Merge tag 'asm-generic-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
225c7b1f 37#include <linux/hardirq.h>
ee40fa06 38#include <linux/export.h>
225c7b1f
RD
39
40#include <linux/mlx4/cmd.h>
3fdcb97f 41#include <linux/mlx4/cq.h>
225c7b1f
RD
42
43#include "mlx4.h"
44#include "icm.h"
45
225c7b1f
RD
46#define MLX4_CQ_STATUS_OK ( 0 << 28)
47#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
48#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
49#define MLX4_CQ_FLAG_CC ( 1 << 18)
50#define MLX4_CQ_FLAG_OI ( 1 << 17)
51#define MLX4_CQ_STATE_ARMED ( 9 << 8)
52#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
53#define MLX4_EQ_STATE_FIRED (10 << 8)
54
3dca0f42
MB
55#define TASKLET_MAX_TIME 2
56#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
57
a1be161a 58void mlx4_cq_tasklet_cb(struct tasklet_struct *t)
3dca0f42
MB
59{
60 unsigned long flags;
61 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
a1be161a 62 struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task);
3dca0f42
MB
63 struct mlx4_cq *mcq, *temp;
64
65 spin_lock_irqsave(&ctx->lock, flags);
66 list_splice_tail_init(&ctx->list, &ctx->process_list);
67 spin_unlock_irqrestore(&ctx->lock, flags);
68
69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
70 list_del_init(&mcq->tasklet_ctx.list);
71 mcq->tasklet_ctx.comp(mcq);
ff61b5e3 72 if (refcount_dec_and_test(&mcq->refcount))
3dca0f42
MB
73 complete(&mcq->free);
74 if (time_after(jiffies, end))
75 break;
76 }
77
78 if (!list_empty(&ctx->process_list))
79 tasklet_schedule(&ctx->task);
80}
81
82static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
83{
3dca0f42 84 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
01f0f425
ED
85 unsigned long flags;
86 bool kick;
3dca0f42
MB
87
88 spin_lock_irqsave(&tasklet_ctx->lock, flags);
89 /* When migrating CQs between EQs will be implemented, please note
90 * that you need to sync this point. It is possible that
91 * while migrating a CQ, completions on the old EQs could
92 * still arrive.
93 */
94 if (list_empty_careful(&cq->tasklet_ctx.list)) {
ff61b5e3 95 refcount_inc(&cq->refcount);
01f0f425 96 kick = list_empty(&tasklet_ctx->list);
3dca0f42 97 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
01f0f425
ED
98 if (kick)
99 tasklet_schedule(&tasklet_ctx->task);
3dca0f42
MB
100 }
101 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
102}
103
225c7b1f
RD
104void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
105{
106 struct mlx4_cq *cq;
107
291c566a 108 rcu_read_lock();
225c7b1f
RD
109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
110 cqn & (dev->caps.num_cqs - 1));
291c566a
JM
111 rcu_read_unlock();
112
225c7b1f 113 if (!cq) {
d7233386 114 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
225c7b1f
RD
115 return;
116 }
117
291c566a
JM
118 /* Acessing the CQ outside of rcu_read_lock is safe, because
119 * the CQ is freed only after interrupt handling is completed.
120 */
225c7b1f
RD
121 ++cq->arm_sn;
122
123 cq->comp(cq);
124}
125
126void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
127{
128 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
129 struct mlx4_cq *cq;
130
291c566a 131 rcu_read_lock();
225c7b1f 132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
291c566a 133 rcu_read_unlock();
225c7b1f
RD
134
135 if (!cq) {
291c566a 136 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
225c7b1f
RD
137 return;
138 }
139
291c566a
JM
140 /* Acessing the CQ outside of rcu_read_lock is safe, because
141 * the CQ is freed only after interrupt handling is completed.
142 */
225c7b1f 143 cq->event(cq, event_type);
225c7b1f
RD
144}
145
146static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
e4567897 147 int cq_num, u8 opmod)
225c7b1f 148{
e4567897 149 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod,
d7233386
JM
150 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
151 MLX4_CMD_WRAPPED);
225c7b1f
RD
152}
153
3fdcb97f
EC
154static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
155 int cq_num, u32 opmod)
156{
157 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
f9baff50 158 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
3fdcb97f
EC
159}
160
225c7b1f
RD
161static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
162 int cq_num)
163{
eb41049f 164 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
d7233386 165 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
f9baff50 166 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
225c7b1f
RD
167}
168
3fdcb97f
EC
169int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
170 u16 count, u16 period)
171{
172 struct mlx4_cmd_mailbox *mailbox;
173 struct mlx4_cq_context *cq_context;
174 int err;
175
176 mailbox = mlx4_alloc_cmd_mailbox(dev);
177 if (IS_ERR(mailbox))
178 return PTR_ERR(mailbox);
179
180 cq_context = mailbox->buf;
3fdcb97f
EC
181 cq_context->cq_max_count = cpu_to_be16(count);
182 cq_context->cq_period = cpu_to_be16(period);
183
184 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
185
186 mlx4_free_cmd_mailbox(dev, mailbox);
187 return err;
188}
189EXPORT_SYMBOL_GPL(mlx4_cq_modify);
190
bbf8eed1
VS
191int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
192 int entries, struct mlx4_mtt *mtt)
193{
194 struct mlx4_cmd_mailbox *mailbox;
195 struct mlx4_cq_context *cq_context;
196 u64 mtt_addr;
197 int err;
198
199 mailbox = mlx4_alloc_cmd_mailbox(dev);
200 if (IS_ERR(mailbox))
201 return PTR_ERR(mailbox);
202
203 cq_context = mailbox->buf;
bbf8eed1
VS
204 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
205 cq_context->log_page_size = mtt->page_shift - 12;
206 mtt_addr = mlx4_mtt_addr(dev, mtt);
207 cq_context->mtt_base_addr_h = mtt_addr >> 32;
208 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
209
f5b3a096 210 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
bbf8eed1
VS
211
212 mlx4_free_cmd_mailbox(dev, mailbox);
213 return err;
214}
215EXPORT_SYMBOL_GPL(mlx4_cq_resize);
216
c82e9aa0 217int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
d7233386
JM
218{
219 struct mlx4_priv *priv = mlx4_priv(dev);
220 struct mlx4_cq_table *cq_table = &priv->cq_table;
221 int err;
222
223 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
224 if (*cqn == -1)
225 return -ENOMEM;
226
8900b894 227 err = mlx4_table_get(dev, &cq_table->table, *cqn);
d7233386
JM
228 if (err)
229 goto err_out;
230
8900b894 231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
d7233386
JM
232 if (err)
233 goto err_put;
234 return 0;
235
236err_put:
237 mlx4_table_put(dev, &cq_table->table, *cqn);
238
239err_out:
7c6d74d2 240 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
d7233386
JM
241 return err;
242}
243
f3301870 244static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage)
d7233386 245{
f3301870 246 u32 in_modifier = RES_CQ | (((u32)usage & 3) << 30);
d7233386
JM
247 u64 out_param;
248 int err;
249
250 if (mlx4_is_mfunc(dev)) {
f3301870 251 err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
d7233386
JM
252 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
253 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
254 if (err)
255 return err;
256 else {
257 *cqn = get_param_l(&out_param);
258 return 0;
259 }
260 }
261 return __mlx4_cq_alloc_icm(dev, cqn);
262}
263
c82e9aa0 264void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
d7233386
JM
265{
266 struct mlx4_priv *priv = mlx4_priv(dev);
267 struct mlx4_cq_table *cq_table = &priv->cq_table;
268
269 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
270 mlx4_table_put(dev, &cq_table->table, cqn);
7c6d74d2 271 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
d7233386
JM
272}
273
274static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
275{
e7dbeba8 276 u64 in_param = 0;
d7233386
JM
277 int err;
278
279 if (mlx4_is_mfunc(dev)) {
280 set_param_l(&in_param, cqn);
281 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
282 MLX4_CMD_FREE_RES,
283 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
284 if (err)
285 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
286 } else
287 __mlx4_cq_free_icm(dev, cqn);
288}
289
e4567897
DJ
290static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
291{
292 int entries_per_copy = PAGE_SIZE / cqe_size;
293 void *init_ents;
294 int err = 0;
295 int i;
296
297 init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
298 if (!init_ents)
299 return -ENOMEM;
300
301 /* Populate a list of CQ entries to reduce the number of
302 * copy_to_user calls. 0xcc is the initialization value
303 * required by the FW.
304 */
305 memset(init_ents, 0xcc, PAGE_SIZE);
306
307 if (entries_per_copy < entries) {
308 for (i = 0; i < entries / entries_per_copy; i++) {
ffe4cfc3
JM
309 err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
310 -EFAULT : 0;
e4567897
DJ
311 if (err)
312 goto out;
313
314 buf += PAGE_SIZE;
315 }
316 } else {
f69bf5de
GS
317 err = copy_to_user((void __user *)buf, init_ents,
318 array_size(entries, cqe_size)) ?
ffe4cfc3 319 -EFAULT : 0;
e4567897
DJ
320 }
321
322out:
323 kfree(init_ents);
324
325 return err;
326}
327
328static void mlx4_init_kernel_cqes(struct mlx4_buf *buf,
329 int entries,
330 int cqe_size)
331{
332 int i;
333
334 if (buf->nbufs == 1)
335 memset(buf->direct.buf, 0xcc, entries * cqe_size);
336 else
337 for (i = 0; i < buf->npages; i++)
338 memset(buf->page_list[i].buf, 0xcc,
339 1UL << buf->page_shift);
340}
341
ec693d47
AV
342int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
343 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
344 struct mlx4_cq *cq, unsigned vector, int collapsed,
e4567897 345 int timestamp_en, void *buf_addr, bool user_cq)
225c7b1f 346{
e4567897 347 bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
225c7b1f
RD
348 struct mlx4_priv *priv = mlx4_priv(dev);
349 struct mlx4_cq_table *cq_table = &priv->cq_table;
350 struct mlx4_cmd_mailbox *mailbox;
351 struct mlx4_cq_context *cq_context;
352 u64 mtt_addr;
353 int err;
354
c66fa19c 355 if (vector >= dev->caps.num_comp_vectors)
b8dd786f
YP
356 return -EINVAL;
357
358 cq->vector = vector;
359
f3301870 360 err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage);
225c7b1f 361 if (err)
d7233386 362 return err;
225c7b1f 363
291c566a 364 spin_lock(&cq_table->lock);
225c7b1f 365 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
291c566a 366 spin_unlock(&cq_table->lock);
225c7b1f 367 if (err)
d7233386 368 goto err_icm;
225c7b1f
RD
369
370 mailbox = mlx4_alloc_cmd_mailbox(dev);
371 if (IS_ERR(mailbox)) {
372 err = PTR_ERR(mailbox);
373 goto err_radix;
374 }
375
376 cq_context = mailbox->buf;
e463c7b1 377 cq_context->flags = cpu_to_be32(!!collapsed << 18);
ec693d47
AV
378 if (timestamp_en)
379 cq_context->flags |= cpu_to_be32(1 << 19);
380
85743f1e
HN
381 cq_context->logsize_usrpage =
382 cpu_to_be32((ilog2(nent) << 24) |
383 mlx4_to_hw_uar_index(dev, uar->index));
c66fa19c 384 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
225c7b1f
RD
385 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
386
387 mtt_addr = mlx4_mtt_addr(dev, mtt);
388 cq_context->mtt_base_addr_h = mtt_addr >> 32;
389 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
390 cq_context->db_rec_addr = cpu_to_be64(db_rec);
391
e4567897
DJ
392 if (sw_cq_init) {
393 if (user_cq) {
394 err = mlx4_init_user_cqes(buf_addr, nent,
395 dev->caps.cqe_size);
396 if (err)
397 sw_cq_init = false;
398 } else {
399 mlx4_init_kernel_cqes(buf_addr, nent,
400 dev->caps.cqe_size);
401 }
402 }
403
404 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init);
405
225c7b1f
RD
406 mlx4_free_cmd_mailbox(dev, mailbox);
407 if (err)
408 goto err_radix;
409
410 cq->cons_index = 0;
411 cq->arm_sn = 1;
412 cq->uar = uar;
ff61b5e3 413 refcount_set(&cq->refcount, 1);
225c7b1f 414 init_completion(&cq->free);
3dca0f42
MB
415 cq->comp = mlx4_add_cq_to_tasklet;
416 cq->tasklet_ctx.priv =
c66fa19c 417 &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
3dca0f42
MB
418 INIT_LIST_HEAD(&cq->tasklet_ctx.list);
419
225c7b1f 420
c66fa19c 421 cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
225c7b1f
RD
422 return 0;
423
424err_radix:
291c566a 425 spin_lock(&cq_table->lock);
225c7b1f 426 radix_tree_delete(&cq_table->tree, cq->cqn);
291c566a 427 spin_unlock(&cq_table->lock);
225c7b1f 428
d7233386
JM
429err_icm:
430 mlx4_cq_free_icm(dev, cq->cqn);
225c7b1f
RD
431
432 return err;
433}
434EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
435
436void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
437{
438 struct mlx4_priv *priv = mlx4_priv(dev);
439 struct mlx4_cq_table *cq_table = &priv->cq_table;
440 int err;
441
442 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
443 if (err)
444 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
445
291c566a
JM
446 spin_lock(&cq_table->lock);
447 radix_tree_delete(&cq_table->tree, cq->cqn);
448 spin_unlock(&cq_table->lock);
449
c66fa19c 450 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
6d90aa5c
MB
451 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
452 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
453 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
225c7b1f 454
ff61b5e3 455 if (refcount_dec_and_test(&cq->refcount))
225c7b1f
RD
456 complete(&cq->free);
457 wait_for_completion(&cq->free);
458
d7233386 459 mlx4_cq_free_icm(dev, cq->cqn);
225c7b1f
RD
460}
461EXPORT_SYMBOL_GPL(mlx4_cq_free);
462
3d73c288 463int mlx4_init_cq_table(struct mlx4_dev *dev)
225c7b1f
RD
464{
465 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
225c7b1f
RD
466
467 spin_lock_init(&cq_table->lock);
468 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
d7233386
JM
469 if (mlx4_is_slave(dev))
470 return 0;
225c7b1f 471
016ade51
ZY
472 return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
473 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
225c7b1f
RD
474}
475
476void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
477{
d7233386
JM
478 if (mlx4_is_slave(dev))
479 return;
225c7b1f
RD
480 /* Nothing to do to clean up radix_tree */
481 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
482}