2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/random.h>
42 #include <linux/io-mapping.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/debugfs.h>
46 #include "mlx5_core.h"
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
98 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
102 void *context, int page_queue)
104 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
105 struct mlx5_cmd_work_ent *ent;
107 ent = kzalloc(sizeof(*ent), alloc_flags);
109 return ERR_PTR(-ENOMEM);
114 ent->context = context;
116 ent->page_queue = page_queue;
121 static u8 alloc_token(struct mlx5_cmd *cmd)
125 spin_lock(&cmd->token_lock);
126 token = cmd->token++ % 255 + 1;
127 spin_unlock(&cmd->token_lock);
132 static int alloc_ent(struct mlx5_cmd *cmd)
137 spin_lock_irqsave(&cmd->alloc_lock, flags);
138 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
139 if (ret < cmd->max_reg_cmds)
140 clear_bit(ret, &cmd->bitmask);
141 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
143 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
146 static void free_ent(struct mlx5_cmd *cmd, int idx)
150 spin_lock_irqsave(&cmd->alloc_lock, flags);
151 set_bit(idx, &cmd->bitmask);
152 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
155 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
157 return cmd->cmd_buf + (idx << cmd->log_stride);
160 static u8 xor8_buf(void *buf, int len)
166 for (i = 0; i < len; i++)
172 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
174 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
177 if (xor8_buf(block, sizeof(*block)) != 0xff)
183 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
186 block->token = token;
188 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
189 sizeof(block->data) - 2);
190 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
194 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
196 struct mlx5_cmd_mailbox *next = msg->next;
199 calc_block_sig(next->buf, token, csum);
204 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
206 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
207 calc_chain_sig(ent->in, ent->token, csum);
208 calc_chain_sig(ent->out, ent->token, csum);
211 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
213 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
217 own = ent->lay->status_own;
218 if (!(own & CMD_OWNER_HW)) {
222 usleep_range(5000, 10000);
223 } while (time_before(jiffies, poll_end));
225 ent->ret = -ETIMEDOUT;
228 static void free_cmd(struct mlx5_cmd_work_ent *ent)
234 static int verify_signature(struct mlx5_cmd_work_ent *ent)
236 struct mlx5_cmd_mailbox *next = ent->out->next;
240 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
245 err = verify_block_sig(next->buf);
255 static void dump_buf(void *buf, int size, int data_only, int offset)
260 for (i = 0; i < size; i += 16) {
261 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
262 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
271 const char *mlx5_command_str(int command)
274 case MLX5_CMD_OP_QUERY_HCA_CAP:
275 return "QUERY_HCA_CAP";
277 case MLX5_CMD_OP_SET_HCA_CAP:
278 return "SET_HCA_CAP";
280 case MLX5_CMD_OP_QUERY_ADAPTER:
281 return "QUERY_ADAPTER";
283 case MLX5_CMD_OP_INIT_HCA:
286 case MLX5_CMD_OP_TEARDOWN_HCA:
287 return "TEARDOWN_HCA";
289 case MLX5_CMD_OP_ENABLE_HCA:
290 return "MLX5_CMD_OP_ENABLE_HCA";
292 case MLX5_CMD_OP_DISABLE_HCA:
293 return "MLX5_CMD_OP_DISABLE_HCA";
295 case MLX5_CMD_OP_QUERY_PAGES:
296 return "QUERY_PAGES";
298 case MLX5_CMD_OP_MANAGE_PAGES:
299 return "MANAGE_PAGES";
301 case MLX5_CMD_OP_CREATE_MKEY:
302 return "CREATE_MKEY";
304 case MLX5_CMD_OP_QUERY_MKEY:
307 case MLX5_CMD_OP_DESTROY_MKEY:
308 return "DESTROY_MKEY";
310 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
311 return "QUERY_SPECIAL_CONTEXTS";
313 case MLX5_CMD_OP_CREATE_EQ:
316 case MLX5_CMD_OP_DESTROY_EQ:
319 case MLX5_CMD_OP_QUERY_EQ:
322 case MLX5_CMD_OP_CREATE_CQ:
325 case MLX5_CMD_OP_DESTROY_CQ:
328 case MLX5_CMD_OP_QUERY_CQ:
331 case MLX5_CMD_OP_MODIFY_CQ:
334 case MLX5_CMD_OP_CREATE_QP:
337 case MLX5_CMD_OP_DESTROY_QP:
340 case MLX5_CMD_OP_RST2INIT_QP:
341 return "RST2INIT_QP";
343 case MLX5_CMD_OP_INIT2RTR_QP:
344 return "INIT2RTR_QP";
346 case MLX5_CMD_OP_RTR2RTS_QP:
349 case MLX5_CMD_OP_RTS2RTS_QP:
352 case MLX5_CMD_OP_SQERR2RTS_QP:
353 return "SQERR2RTS_QP";
355 case MLX5_CMD_OP_2ERR_QP:
358 case MLX5_CMD_OP_RTS2SQD_QP:
361 case MLX5_CMD_OP_SQD2RTS_QP:
364 case MLX5_CMD_OP_2RST_QP:
367 case MLX5_CMD_OP_QUERY_QP:
370 case MLX5_CMD_OP_CONF_SQP:
373 case MLX5_CMD_OP_MAD_IFC:
376 case MLX5_CMD_OP_INIT2INIT_QP:
377 return "INIT2INIT_QP";
379 case MLX5_CMD_OP_SUSPEND_QP:
382 case MLX5_CMD_OP_UNSUSPEND_QP:
383 return "UNSUSPEND_QP";
385 case MLX5_CMD_OP_SQD2SQD_QP:
388 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
389 return "ALLOC_QP_COUNTER_SET";
391 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
392 return "DEALLOC_QP_COUNTER_SET";
394 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
395 return "QUERY_QP_COUNTER_SET";
397 case MLX5_CMD_OP_CREATE_PSV:
400 case MLX5_CMD_OP_DESTROY_PSV:
401 return "DESTROY_PSV";
403 case MLX5_CMD_OP_QUERY_PSV:
406 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
407 return "QUERY_SIG_RULE_TABLE";
409 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
410 return "QUERY_BLOCK_SIZE_TABLE";
412 case MLX5_CMD_OP_CREATE_SRQ:
415 case MLX5_CMD_OP_DESTROY_SRQ:
416 return "DESTROY_SRQ";
418 case MLX5_CMD_OP_QUERY_SRQ:
421 case MLX5_CMD_OP_ARM_RQ:
424 case MLX5_CMD_OP_RESIZE_SRQ:
427 case MLX5_CMD_OP_ALLOC_PD:
430 case MLX5_CMD_OP_DEALLOC_PD:
433 case MLX5_CMD_OP_ALLOC_UAR:
436 case MLX5_CMD_OP_DEALLOC_UAR:
437 return "DEALLOC_UAR";
439 case MLX5_CMD_OP_ATTACH_TO_MCG:
440 return "ATTACH_TO_MCG";
442 case MLX5_CMD_OP_DETACH_FROM_MCG:
443 return "DETACH_FROM_MCG";
445 case MLX5_CMD_OP_ALLOC_XRCD:
448 case MLX5_CMD_OP_DEALLOC_XRCD:
449 return "DEALLOC_XRCD";
451 case MLX5_CMD_OP_ACCESS_REG:
452 return "MLX5_CMD_OP_ACCESS_REG";
454 default: return "unknown command opcode";
458 static void dump_command(struct mlx5_core_dev *dev,
459 struct mlx5_cmd_work_ent *ent, int input)
461 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
462 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
463 struct mlx5_cmd_mailbox *next = msg->next;
468 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
471 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
472 "dump command data %s(0x%x) %s\n",
473 mlx5_command_str(op), op,
474 input ? "INPUT" : "OUTPUT");
476 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
477 mlx5_command_str(op), op,
478 input ? "INPUT" : "OUTPUT");
482 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
483 offset += sizeof(ent->lay->in);
485 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
486 offset += sizeof(ent->lay->out);
489 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
490 offset += sizeof(*ent->lay);
493 while (next && offset < msg->len) {
495 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
496 dump_buf(next->buf, dump_len, 1, offset);
497 offset += MLX5_CMD_DATA_BLOCK_SIZE;
499 mlx5_core_dbg(dev, "command block:\n");
500 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
501 offset += sizeof(struct mlx5_cmd_prot_block);
510 static void cmd_work_handler(struct work_struct *work)
512 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
513 struct mlx5_cmd *cmd = ent->cmd;
514 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
515 struct mlx5_cmd_layout *lay;
516 struct semaphore *sem;
518 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
520 if (!ent->page_queue) {
521 ent->idx = alloc_ent(cmd);
523 mlx5_core_err(dev, "failed to allocate command entry\n");
528 ent->idx = cmd->max_reg_cmds;
531 ent->token = alloc_token(cmd);
532 cmd->ent_arr[ent->idx] = ent;
533 lay = get_inst(cmd, ent->idx);
535 memset(lay, 0, sizeof(*lay));
536 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
538 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
539 lay->inlen = cpu_to_be32(ent->in->len);
541 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
542 lay->outlen = cpu_to_be32(ent->out->len);
543 lay->type = MLX5_PCI_CMD_XPORT;
544 lay->token = ent->token;
545 lay->status_own = CMD_OWNER_HW;
546 set_signature(ent, !cmd->checksum_disabled);
547 dump_command(dev, ent, 1);
548 ktime_get_ts(&ent->ts1);
550 /* ring doorbell after the descriptor is valid */
552 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
553 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
555 if (cmd->mode == CMD_MODE_POLLING) {
557 /* make sure we read the descriptor after ownership is SW */
559 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
563 static const char *deliv_status_to_str(u8 status)
566 case MLX5_CMD_DELIVERY_STAT_OK:
568 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
569 return "signature error";
570 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
571 return "token error";
572 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
573 return "bad block number";
574 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
575 return "output pointer not aligned to block size";
576 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
577 return "input pointer not aligned to block size";
578 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
579 return "firmware internal error";
580 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
581 return "command input length error";
582 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
583 return "command ouput length error";
584 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
585 return "reserved fields not cleared";
586 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
587 return "bad command descriptor type";
589 return "unknown status code";
593 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
595 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
597 return be16_to_cpu(hdr->opcode);
600 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
602 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
603 struct mlx5_cmd *cmd = &dev->cmd;
606 if (cmd->mode == CMD_MODE_POLLING) {
607 wait_for_completion(&ent->done);
610 if (!wait_for_completion_timeout(&ent->done, timeout))
615 if (err == -ETIMEDOUT) {
616 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
617 mlx5_command_str(msg_to_opcode(ent->in)),
618 msg_to_opcode(ent->in));
620 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
621 deliv_status_to_str(ent->status), ent->status);
627 * 1. Callback functions may not sleep
628 * 2. page queue commands do not support asynchrous completion
630 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
631 struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
632 void *context, int page_queue, u8 *status)
634 struct mlx5_cmd *cmd = &dev->cmd;
635 struct mlx5_cmd_work_ent *ent;
636 ktime_t t1, t2, delta;
637 struct mlx5_cmd_stats *stats;
642 if (callback && page_queue)
645 ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
650 init_completion(&ent->done);
652 INIT_WORK(&ent->work, cmd_work_handler);
654 cmd_work_handler(&ent->work);
655 } else if (!queue_work(cmd->wq, &ent->work)) {
656 mlx5_core_warn(dev, "failed to queue work\n");
662 err = wait_func(dev, ent);
663 if (err == -ETIMEDOUT)
666 t1 = timespec_to_ktime(ent->ts1);
667 t2 = timespec_to_ktime(ent->ts2);
668 delta = ktime_sub(t2, t1);
669 ds = ktime_to_ns(delta);
670 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
671 if (op < ARRAY_SIZE(cmd->stats)) {
672 stats = &cmd->stats[op];
673 spin_lock(&stats->lock);
676 spin_unlock(&stats->lock);
678 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
679 "fw exec time for %s is %lld nsec\n",
680 mlx5_command_str(op), ds);
681 *status = ent->status;
693 static ssize_t dbg_write(struct file *filp, const char __user *buf,
694 size_t count, loff_t *pos)
696 struct mlx5_core_dev *dev = filp->private_data;
697 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
701 if (!dbg->in_msg || !dbg->out_msg)
704 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
707 lbuf[sizeof(lbuf) - 1] = 0;
709 if (strcmp(lbuf, "go"))
712 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
714 return err ? err : count;
718 static const struct file_operations fops = {
719 .owner = THIS_MODULE,
724 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
726 struct mlx5_cmd_prot_block *block;
727 struct mlx5_cmd_mailbox *next;
733 copy = min_t(int, size, sizeof(to->first.data));
734 memcpy(to->first.data, from, copy);
745 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
747 memcpy(block->data, from, copy);
756 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
758 struct mlx5_cmd_prot_block *block;
759 struct mlx5_cmd_mailbox *next;
765 copy = min_t(int, size, sizeof(from->first.data));
766 memcpy(to, from->first.data, copy);
777 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
780 memcpy(to, block->data, copy);
789 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
792 struct mlx5_cmd_mailbox *mailbox;
794 mailbox = kmalloc(sizeof(*mailbox), flags);
796 return ERR_PTR(-ENOMEM);
798 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
801 mlx5_core_dbg(dev, "failed allocation\n");
803 return ERR_PTR(-ENOMEM);
805 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
806 mailbox->next = NULL;
811 static void free_cmd_box(struct mlx5_core_dev *dev,
812 struct mlx5_cmd_mailbox *mailbox)
814 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
818 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
819 gfp_t flags, int size)
821 struct mlx5_cmd_mailbox *tmp, *head = NULL;
822 struct mlx5_cmd_prot_block *block;
823 struct mlx5_cmd_msg *msg;
829 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
831 return ERR_PTR(-ENOMEM);
833 blen = size - min_t(int, sizeof(msg->first.data), size);
834 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
836 for (i = 0; i < n; i++) {
837 tmp = alloc_cmd_box(dev, flags);
839 mlx5_core_warn(dev, "failed allocating block\n");
846 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
847 block->block_num = cpu_to_be32(n - i - 1);
857 free_cmd_box(dev, head);
865 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
866 struct mlx5_cmd_msg *msg)
868 struct mlx5_cmd_mailbox *head = msg->next;
869 struct mlx5_cmd_mailbox *next;
873 free_cmd_box(dev, head);
879 static ssize_t data_write(struct file *filp, const char __user *buf,
880 size_t count, loff_t *pos)
882 struct mlx5_core_dev *dev = filp->private_data;
883 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
894 ptr = kzalloc(count, GFP_KERNEL);
898 if (copy_from_user(ptr, buf, count)) {
914 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
917 struct mlx5_core_dev *dev = filp->private_data;
918 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
927 copy = min_t(int, count, dbg->outlen);
928 if (copy_to_user(buf, dbg->out_msg, copy))
936 static const struct file_operations dfops = {
937 .owner = THIS_MODULE,
943 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
946 struct mlx5_core_dev *dev = filp->private_data;
947 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
954 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
958 if (copy_to_user(buf, &outlen, err))
966 static ssize_t outlen_write(struct file *filp, const char __user *buf,
967 size_t count, loff_t *pos)
969 struct mlx5_core_dev *dev = filp->private_data;
970 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
976 if (*pos != 0 || count > 6)
983 if (copy_from_user(outlen_str, buf, count))
988 err = sscanf(outlen_str, "%d", &outlen);
992 ptr = kzalloc(outlen, GFP_KERNEL);
997 dbg->outlen = outlen;
1004 static const struct file_operations olfops = {
1005 .owner = THIS_MODULE,
1006 .open = simple_open,
1007 .write = outlen_write,
1008 .read = outlen_read,
1011 static void set_wqname(struct mlx5_core_dev *dev)
1013 struct mlx5_cmd *cmd = &dev->cmd;
1015 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1016 dev_name(&dev->pdev->dev));
1019 static void clean_debug_files(struct mlx5_core_dev *dev)
1021 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1023 if (!mlx5_debugfs_root)
1026 mlx5_cmdif_debugfs_cleanup(dev);
1027 debugfs_remove_recursive(dbg->dbg_root);
1030 static int create_debugfs_files(struct mlx5_core_dev *dev)
1032 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1035 if (!mlx5_debugfs_root)
1038 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1042 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1047 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1052 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1054 if (!dbg->dbg_outlen)
1057 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1059 if (!dbg->dbg_status)
1062 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1066 mlx5_cmdif_debugfs_init(dev);
1071 clean_debug_files(dev);
1075 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1077 struct mlx5_cmd *cmd = &dev->cmd;
1080 for (i = 0; i < cmd->max_reg_cmds; i++)
1083 down(&cmd->pages_sem);
1085 flush_workqueue(cmd->wq);
1087 cmd->mode = CMD_MODE_EVENTS;
1089 up(&cmd->pages_sem);
1090 for (i = 0; i < cmd->max_reg_cmds; i++)
1094 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1096 struct mlx5_cmd *cmd = &dev->cmd;
1099 for (i = 0; i < cmd->max_reg_cmds; i++)
1102 down(&cmd->pages_sem);
1104 flush_workqueue(cmd->wq);
1105 cmd->mode = CMD_MODE_POLLING;
1107 up(&cmd->pages_sem);
1108 for (i = 0; i < cmd->max_reg_cmds; i++)
1112 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1114 struct mlx5_cmd *cmd = &dev->cmd;
1115 struct mlx5_cmd_work_ent *ent;
1116 mlx5_cmd_cbk_t callback;
1121 for (i = 0; i < (1 << cmd->log_sz); i++) {
1122 if (test_bit(i, &vector)) {
1123 struct semaphore *sem;
1125 ent = cmd->ent_arr[i];
1126 if (ent->page_queue)
1127 sem = &cmd->pages_sem;
1130 ktime_get_ts(&ent->ts2);
1131 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1132 dump_command(dev, ent, 0);
1134 if (!cmd->checksum_disabled)
1135 ent->ret = verify_signature(ent);
1138 ent->status = ent->lay->status_own >> 1;
1139 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1140 ent->ret, deliv_status_to_str(ent->status), ent->status);
1142 free_ent(cmd, ent->idx);
1143 if (ent->callback) {
1144 callback = ent->callback;
1145 context = ent->context;
1148 callback(err, context);
1150 complete(&ent->done);
1156 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1158 static int status_to_err(u8 status)
1160 return status ? -1 : 0; /* TBD more meaningful codes */
1163 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1165 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1166 struct mlx5_cmd *cmd = &dev->cmd;
1167 struct cache_ent *ent = NULL;
1169 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1170 ent = &cmd->cache.large;
1171 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1172 ent = &cmd->cache.med;
1175 spin_lock(&ent->lock);
1176 if (!list_empty(&ent->head)) {
1177 msg = list_entry(ent->head.next, typeof(*msg), list);
1178 /* For cached lists, we must explicitly state what is
1182 list_del(&msg->list);
1184 spin_unlock(&ent->lock);
1188 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
1193 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1196 spin_lock(&msg->cache->lock);
1197 list_add_tail(&msg->list, &msg->cache->head);
1198 spin_unlock(&msg->cache->lock);
1200 mlx5_free_cmd_msg(dev, msg);
1204 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1206 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1209 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1212 struct mlx5_cmd_msg *inb;
1213 struct mlx5_cmd_msg *outb;
1218 pages_queue = is_manage_pages(in);
1220 inb = alloc_msg(dev, in_size);
1226 err = mlx5_copy_to_msg(inb, in, in_size);
1228 mlx5_core_warn(dev, "err %d\n", err);
1232 outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
1234 err = PTR_ERR(outb);
1238 err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
1242 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1244 err = status_to_err(status);
1248 err = mlx5_copy_from_msg(out, outb, out_size);
1251 mlx5_free_cmd_msg(dev, outb);
1257 EXPORT_SYMBOL(mlx5_cmd_exec);
1259 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1261 struct mlx5_cmd *cmd = &dev->cmd;
1262 struct mlx5_cmd_msg *msg;
1263 struct mlx5_cmd_msg *n;
1265 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1266 list_del(&msg->list);
1267 mlx5_free_cmd_msg(dev, msg);
1270 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1271 list_del(&msg->list);
1272 mlx5_free_cmd_msg(dev, msg);
1276 static int create_msg_cache(struct mlx5_core_dev *dev)
1278 struct mlx5_cmd *cmd = &dev->cmd;
1279 struct mlx5_cmd_msg *msg;
1283 spin_lock_init(&cmd->cache.large.lock);
1284 INIT_LIST_HEAD(&cmd->cache.large.head);
1285 spin_lock_init(&cmd->cache.med.lock);
1286 INIT_LIST_HEAD(&cmd->cache.med.head);
1288 for (i = 0; i < NUM_LONG_LISTS; i++) {
1289 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1294 msg->cache = &cmd->cache.large;
1295 list_add_tail(&msg->list, &cmd->cache.large.head);
1298 for (i = 0; i < NUM_MED_LISTS; i++) {
1299 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1304 msg->cache = &cmd->cache.med;
1305 list_add_tail(&msg->list, &cmd->cache.med.head);
1311 destroy_msg_cache(dev);
1315 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1317 int size = sizeof(struct mlx5_cmd_prot_block);
1318 int align = roundup_pow_of_two(size);
1319 struct mlx5_cmd *cmd = &dev->cmd;
1325 cmd_if_rev = cmdif_rev(dev);
1326 if (cmd_if_rev != CMD_IF_REV) {
1327 dev_err(&dev->pdev->dev,
1328 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1329 CMD_IF_REV, cmd_if_rev);
1333 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1337 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1338 if (!cmd->cmd_buf) {
1342 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1344 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1349 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1350 cmd->log_sz = cmd_l >> 4 & 0xf;
1351 cmd->log_stride = cmd_l & 0xf;
1352 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1353 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1359 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1360 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1365 cmd->checksum_disabled = 1;
1366 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1367 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1369 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1370 if (cmd->cmdif_rev > CMD_IF_REV) {
1371 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1372 CMD_IF_REV, cmd->cmdif_rev);
1377 spin_lock_init(&cmd->alloc_lock);
1378 spin_lock_init(&cmd->token_lock);
1379 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1380 spin_lock_init(&cmd->stats[i].lock);
1382 sema_init(&cmd->sem, cmd->max_reg_cmds);
1383 sema_init(&cmd->pages_sem, 1);
1385 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1386 cmd_l = (u32)(cmd->dma);
1387 if (cmd_l & 0xfff) {
1388 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1393 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1394 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1396 /* Make sure firmware sees the complete address before we proceed */
1399 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1401 cmd->mode = CMD_MODE_POLLING;
1403 err = create_msg_cache(dev);
1405 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1410 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1412 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1417 err = create_debugfs_files(dev);
1426 destroy_workqueue(cmd->wq);
1429 destroy_msg_cache(dev);
1432 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1435 free_pages((unsigned long)cmd->cmd_buf, 0);
1438 pci_pool_destroy(cmd->pool);
1442 EXPORT_SYMBOL(mlx5_cmd_init);
1444 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1446 struct mlx5_cmd *cmd = &dev->cmd;
1448 clean_debug_files(dev);
1449 destroy_workqueue(cmd->wq);
1450 destroy_msg_cache(dev);
1451 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1453 free_pages((unsigned long)cmd->cmd_buf, 0);
1454 pci_pool_destroy(cmd->pool);
1456 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1458 static const char *cmd_status_str(u8 status)
1461 case MLX5_CMD_STAT_OK:
1463 case MLX5_CMD_STAT_INT_ERR:
1464 return "internal error";
1465 case MLX5_CMD_STAT_BAD_OP_ERR:
1466 return "bad operation";
1467 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1468 return "bad parameter";
1469 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1470 return "bad system state";
1471 case MLX5_CMD_STAT_BAD_RES_ERR:
1472 return "bad resource";
1473 case MLX5_CMD_STAT_RES_BUSY:
1474 return "resource busy";
1475 case MLX5_CMD_STAT_LIM_ERR:
1476 return "limits exceeded";
1477 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1478 return "bad resource state";
1479 case MLX5_CMD_STAT_IX_ERR:
1481 case MLX5_CMD_STAT_NO_RES_ERR:
1482 return "no resources";
1483 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1484 return "bad input length";
1485 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1486 return "bad output length";
1487 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1488 return "bad QP state";
1489 case MLX5_CMD_STAT_BAD_PKT_ERR:
1490 return "bad packet (discarded)";
1491 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1492 return "bad size too many outstanding CQEs";
1494 return "unknown status";
1498 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1503 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1504 cmd_status_str(hdr->status), hdr->status,
1505 be32_to_cpu(hdr->syndrome));
1507 switch (hdr->status) {
1508 case MLX5_CMD_STAT_OK: return 0;
1509 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1510 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1511 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1513 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1514 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1515 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
1516 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1517 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1518 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1519 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1520 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1521 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1522 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1523 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1524 default: return -EIO;