net/mlx5: Avoid setting unused var when modifying vport node GUID
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / cq.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/hardirq.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/cmd.h>
38#include <rdma/ib_verbs.h>
39#include <linux/mlx5/cq.h>
40#include "mlx5_core.h"
41
94c6825e
MB
42#define TASKLET_MAX_TIME 2
43#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
44
45void mlx5_cq_tasklet_cb(unsigned long data)
46{
47 unsigned long flags;
48 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
49 struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
50 struct mlx5_core_cq *mcq;
51 struct mlx5_core_cq *temp;
52
53 spin_lock_irqsave(&ctx->lock, flags);
54 list_splice_tail_init(&ctx->list, &ctx->process_list);
55 spin_unlock_irqrestore(&ctx->lock, flags);
56
57 list_for_each_entry_safe(mcq, temp, &ctx->process_list,
58 tasklet_ctx.list) {
59 list_del_init(&mcq->tasklet_ctx.list);
60 mcq->tasklet_ctx.comp(mcq);
61 if (atomic_dec_and_test(&mcq->refcount))
62 complete(&mcq->free);
63 if (time_after(jiffies, end))
64 break;
65 }
66
67 if (!list_empty(&ctx->process_list))
68 tasklet_schedule(&ctx->task);
69}
70
71static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
72{
73 unsigned long flags;
74 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
75
76 spin_lock_irqsave(&tasklet_ctx->lock, flags);
77 /* When migrating CQs between EQs will be implemented, please note
78 * that you need to sync this point. It is possible that
79 * while migrating a CQ, completions on the old EQs could
80 * still arrive.
81 */
82 if (list_empty_careful(&cq->tasklet_ctx.list)) {
83 atomic_inc(&cq->refcount);
84 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
85 }
86 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
87}
88
e126ba97
EC
89void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
90{
91 struct mlx5_core_cq *cq;
92 struct mlx5_cq_table *table = &dev->priv.cq_table;
93
94 spin_lock(&table->lock);
95 cq = radix_tree_lookup(&table->tree, cqn);
96 if (likely(cq))
97 atomic_inc(&cq->refcount);
98 spin_unlock(&table->lock);
99
100 if (!cq) {
101 mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
102 return;
103 }
104
105 ++cq->arm_sn;
106
107 cq->comp(cq);
108
109 if (atomic_dec_and_test(&cq->refcount))
110 complete(&cq->free);
111}
112
113void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
114{
115 struct mlx5_cq_table *table = &dev->priv.cq_table;
116 struct mlx5_core_cq *cq;
117
118 spin_lock(&table->lock);
119
120 cq = radix_tree_lookup(&table->tree, cqn);
121 if (cq)
122 atomic_inc(&cq->refcount);
123
124 spin_unlock(&table->lock);
125
126 if (!cq) {
127 mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
128 return;
129 }
130
131 cq->event(cq, event_type);
132
133 if (atomic_dec_and_test(&cq->refcount))
134 complete(&cq->free);
135}
136
137
138int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
139 struct mlx5_create_cq_mbox_in *in, int inlen)
140{
141 int err;
142 struct mlx5_cq_table *table = &dev->priv.cq_table;
143 struct mlx5_create_cq_mbox_out out;
144 struct mlx5_destroy_cq_mbox_in din;
145 struct mlx5_destroy_cq_mbox_out dout;
94c6825e
MB
146 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
147 c_eqn);
148 struct mlx5_eq *eq;
149
150 eq = mlx5_eqn2eq(dev, eqn);
151 if (IS_ERR(eq))
152 return PTR_ERR(eq);
e126ba97
EC
153
154 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
155 memset(&out, 0, sizeof(out));
156 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
157 if (err)
158 return err;
159
160 if (out.hdr.status)
161 return mlx5_cmd_status_to_err(&out.hdr);
162
163 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
164 cq->cons_index = 0;
165 cq->arm_sn = 0;
166 atomic_set(&cq->refcount, 1);
167 init_completion(&cq->free);
94c6825e
MB
168 if (!cq->comp)
169 cq->comp = mlx5_add_cq_to_tasklet;
170 /* assuming CQ will be deleted before the EQ */
171 cq->tasklet_ctx.priv = &eq->tasklet_ctx;
172 INIT_LIST_HEAD(&cq->tasklet_ctx.list);
e126ba97
EC
173
174 spin_lock_irq(&table->lock);
175 err = radix_tree_insert(&table->tree, cq->cqn, cq);
176 spin_unlock_irq(&table->lock);
177 if (err)
178 goto err_cmd;
179
180 cq->pid = current->pid;
181 err = mlx5_debug_cq_add(dev, cq);
182 if (err)
183 mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
184 cq->cqn);
185
186 return 0;
187
188err_cmd:
189 memset(&din, 0, sizeof(din));
190 memset(&dout, 0, sizeof(dout));
191 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
192 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
193 return err;
194}
195EXPORT_SYMBOL(mlx5_core_create_cq);
196
197int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
198{
199 struct mlx5_cq_table *table = &dev->priv.cq_table;
200 struct mlx5_destroy_cq_mbox_in in;
201 struct mlx5_destroy_cq_mbox_out out;
202 struct mlx5_core_cq *tmp;
203 int err;
204
205 spin_lock_irq(&table->lock);
206 tmp = radix_tree_delete(&table->tree, cq->cqn);
207 spin_unlock_irq(&table->lock);
208 if (!tmp) {
209 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
210 return -EINVAL;
211 }
212 if (tmp != cq) {
213 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
214 return -EINVAL;
215 }
216
217 memset(&in, 0, sizeof(in));
218 memset(&out, 0, sizeof(out));
219 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
220 in.cqn = cpu_to_be32(cq->cqn);
221 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
222 if (err)
223 return err;
224
225 if (out.hdr.status)
226 return mlx5_cmd_status_to_err(&out.hdr);
227
228 synchronize_irq(cq->irqn);
229
230 mlx5_debug_cq_remove(dev, cq);
231 if (atomic_dec_and_test(&cq->refcount))
232 complete(&cq->free);
233 wait_for_completion(&cq->free);
234
235 return 0;
236}
237EXPORT_SYMBOL(mlx5_core_destroy_cq);
238
239int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
240 struct mlx5_query_cq_mbox_out *out)
241{
242 struct mlx5_query_cq_mbox_in in;
243 int err;
244
245 memset(&in, 0, sizeof(in));
246 memset(out, 0, sizeof(*out));
247
248 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
249 in.cqn = cpu_to_be32(cq->cqn);
250 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
251 if (err)
252 return err;
253
254 if (out->hdr.status)
255 return mlx5_cmd_status_to_err(&out->hdr);
256
257 return err;
258}
259EXPORT_SYMBOL(mlx5_core_query_cq);
260
261
262int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
bde51583 263 struct mlx5_modify_cq_mbox_in *in, int in_sz)
e126ba97 264{
3bdb31f6
EC
265 struct mlx5_modify_cq_mbox_out out;
266 int err;
267
268 memset(&out, 0, sizeof(out));
269 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
bde51583 270 err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
3bdb31f6
EC
271 if (err)
272 return err;
273
274 if (out.hdr.status)
275 return mlx5_cmd_status_to_err(&out.hdr);
276
277 return 0;
e126ba97 278}
3bdb31f6 279EXPORT_SYMBOL(mlx5_core_modify_cq);
e126ba97 280
90b3e38d
RS
281int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
282 struct mlx5_core_cq *cq,
283 u16 cq_period,
284 u16 cq_max_count)
285{
286 struct mlx5_modify_cq_mbox_in in;
287
288 memset(&in, 0, sizeof(in));
289
290 in.cqn = cpu_to_be32(cq->cqn);
291 in.ctx.cq_period = cpu_to_be16(cq_period);
292 in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
293 in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
294 MLX5_CQ_MODIFY_COUNT);
295
296 return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
297}
298
e126ba97
EC
299int mlx5_init_cq_table(struct mlx5_core_dev *dev)
300{
301 struct mlx5_cq_table *table = &dev->priv.cq_table;
302 int err;
303
a31208b1 304 memset(table, 0, sizeof(*table));
e126ba97
EC
305 spin_lock_init(&table->lock);
306 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
307 err = mlx5_cq_debugfs_init(dev);
308
309 return err;
310}
311
312void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
313{
314 mlx5_cq_debugfs_cleanup(dev);
315}