mlx5_core: Fix use after free in mlx5_cmd_comp_handler()
[linux-2.6-block.git] / include / linux / mlx5 / driver.h
CommitLineData
e126ba97
EC
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DRIVER_H
34#define MLX5_DRIVER_H
35
36#include <linux/kernel.h>
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/spinlock_types.h>
40#include <linux/semaphore.h>
41#include <linux/vmalloc.h>
42#include <linux/radix-tree.h>
43#include <linux/mlx5/device.h>
44#include <linux/mlx5/doorbell.h>
45
46enum {
47 MLX5_BOARD_ID_LEN = 64,
48 MLX5_MAX_NAME_LEN = 16,
49};
50
51enum {
52 /* one minute for the sake of bringup. Generally, commands must always
53 * complete and we may need to increase this timeout value
54 */
55 MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
56 MLX5_CMD_WQ_MAX_NAME = 32,
57};
58
59enum {
60 CMD_OWNER_SW = 0x0,
61 CMD_OWNER_HW = 0x1,
62 CMD_STATUS_SUCCESS = 0,
63};
64
65enum mlx5_sqp_t {
66 MLX5_SQP_SMI = 0,
67 MLX5_SQP_GSI = 1,
68 MLX5_SQP_IEEE_1588 = 2,
69 MLX5_SQP_SNIFFER = 3,
70 MLX5_SQP_SYNC_UMR = 4,
71};
72
73enum {
74 MLX5_MAX_PORTS = 2,
75};
76
77enum {
78 MLX5_EQ_VEC_PAGES = 0,
79 MLX5_EQ_VEC_CMD = 1,
80 MLX5_EQ_VEC_ASYNC = 2,
81 MLX5_EQ_VEC_COMP_BASE,
82};
83
84enum {
85 MLX5_MAX_EQ_NAME = 20
86};
87
88enum {
89 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
90 MLX5_ATOMIC_MODE_CX = 2 << 16,
91 MLX5_ATOMIC_MODE_8B = 3 << 16,
92 MLX5_ATOMIC_MODE_16B = 4 << 16,
93 MLX5_ATOMIC_MODE_32B = 5 << 16,
94 MLX5_ATOMIC_MODE_64B = 6 << 16,
95 MLX5_ATOMIC_MODE_128B = 7 << 16,
96 MLX5_ATOMIC_MODE_256B = 8 << 16,
97};
98
99enum {
100 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
101 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
102 MLX5_CMD_OP_INIT_HCA = 0x102,
103 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
104 MLX5_CMD_OP_QUERY_PAGES = 0x107,
105 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
106 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
107
108 MLX5_CMD_OP_CREATE_MKEY = 0x200,
109 MLX5_CMD_OP_QUERY_MKEY = 0x201,
110 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
111 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
112
113 MLX5_CMD_OP_CREATE_EQ = 0x301,
114 MLX5_CMD_OP_DESTROY_EQ = 0x302,
115 MLX5_CMD_OP_QUERY_EQ = 0x303,
116
117 MLX5_CMD_OP_CREATE_CQ = 0x400,
118 MLX5_CMD_OP_DESTROY_CQ = 0x401,
119 MLX5_CMD_OP_QUERY_CQ = 0x402,
120 MLX5_CMD_OP_MODIFY_CQ = 0x403,
121
122 MLX5_CMD_OP_CREATE_QP = 0x500,
123 MLX5_CMD_OP_DESTROY_QP = 0x501,
124 MLX5_CMD_OP_RST2INIT_QP = 0x502,
125 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
126 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
127 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
128 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
129 MLX5_CMD_OP_2ERR_QP = 0x507,
130 MLX5_CMD_OP_RTS2SQD_QP = 0x508,
131 MLX5_CMD_OP_SQD2RTS_QP = 0x509,
132 MLX5_CMD_OP_2RST_QP = 0x50a,
133 MLX5_CMD_OP_QUERY_QP = 0x50b,
134 MLX5_CMD_OP_CONF_SQP = 0x50c,
135 MLX5_CMD_OP_MAD_IFC = 0x50d,
136 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
137 MLX5_CMD_OP_SUSPEND_QP = 0x50f,
138 MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
139 MLX5_CMD_OP_SQD2SQD_QP = 0x511,
140 MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
141 MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
142 MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
143
144 MLX5_CMD_OP_CREATE_PSV = 0x600,
145 MLX5_CMD_OP_DESTROY_PSV = 0x601,
146 MLX5_CMD_OP_QUERY_PSV = 0x602,
147 MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
148 MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
149
150 MLX5_CMD_OP_CREATE_SRQ = 0x700,
151 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
152 MLX5_CMD_OP_QUERY_SRQ = 0x702,
153 MLX5_CMD_OP_ARM_RQ = 0x703,
154 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
155
156 MLX5_CMD_OP_ALLOC_PD = 0x800,
157 MLX5_CMD_OP_DEALLOC_PD = 0x801,
158 MLX5_CMD_OP_ALLOC_UAR = 0x802,
159 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
160
161 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
162 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
163
164
165 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
166 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
167
168 MLX5_CMD_OP_ACCESS_REG = 0x805,
169 MLX5_CMD_OP_MAX = 0x810,
170};
171
172enum {
173 MLX5_REG_PCAP = 0x5001,
174 MLX5_REG_PMTU = 0x5003,
175 MLX5_REG_PTYS = 0x5004,
176 MLX5_REG_PAOS = 0x5006,
177 MLX5_REG_PMAOS = 0x5012,
178 MLX5_REG_PUDE = 0x5009,
179 MLX5_REG_PMPE = 0x5010,
180 MLX5_REG_PELC = 0x500e,
181 MLX5_REG_PMLP = 0, /* TBD */
182 MLX5_REG_NODE_DESC = 0x6001,
183 MLX5_REG_HOST_ENDIANNESS = 0x7004,
184};
185
186enum dbg_rsc_type {
187 MLX5_DBG_RSC_QP,
188 MLX5_DBG_RSC_EQ,
189 MLX5_DBG_RSC_CQ,
190};
191
192struct mlx5_field_desc {
193 struct dentry *dent;
194 int i;
195};
196
197struct mlx5_rsc_debug {
198 struct mlx5_core_dev *dev;
199 void *object;
200 enum dbg_rsc_type type;
201 struct dentry *root;
202 struct mlx5_field_desc fields[0];
203};
204
205enum mlx5_dev_event {
206 MLX5_DEV_EVENT_SYS_ERROR,
207 MLX5_DEV_EVENT_PORT_UP,
208 MLX5_DEV_EVENT_PORT_DOWN,
209 MLX5_DEV_EVENT_PORT_INITIALIZED,
210 MLX5_DEV_EVENT_LID_CHANGE,
211 MLX5_DEV_EVENT_PKEY_CHANGE,
212 MLX5_DEV_EVENT_GUID_CHANGE,
213 MLX5_DEV_EVENT_CLIENT_REREG,
214};
215
216struct mlx5_uuar_info {
217 struct mlx5_uar *uars;
218 int num_uars;
219 int num_low_latency_uuars;
220 unsigned long *bitmap;
221 unsigned int *count;
222 struct mlx5_bf *bfs;
223
224 /*
225 * protect uuar allocation data structs
226 */
227 struct mutex lock;
228};
229
230struct mlx5_bf {
231 void __iomem *reg;
232 void __iomem *regreg;
233 int buf_size;
234 struct mlx5_uar *uar;
235 unsigned long offset;
236 int need_lock;
237 /* protect blue flame buffer selection when needed
238 */
239 spinlock_t lock;
240
241 /* serialize 64 bit writes when done as two 32 bit accesses
242 */
243 spinlock_t lock32;
244 int uuarn;
245};
246
247struct mlx5_cmd_first {
248 __be32 data[4];
249};
250
251struct mlx5_cmd_msg {
252 struct list_head list;
253 struct cache_ent *cache;
254 u32 len;
255 struct mlx5_cmd_first first;
256 struct mlx5_cmd_mailbox *next;
257};
258
259struct mlx5_cmd_debug {
260 struct dentry *dbg_root;
261 struct dentry *dbg_in;
262 struct dentry *dbg_out;
263 struct dentry *dbg_outlen;
264 struct dentry *dbg_status;
265 struct dentry *dbg_run;
266 void *in_msg;
267 void *out_msg;
268 u8 status;
269 u16 inlen;
270 u16 outlen;
271};
272
273struct cache_ent {
274 /* protect block chain allocations
275 */
276 spinlock_t lock;
277 struct list_head head;
278};
279
280struct cmd_msg_cache {
281 struct cache_ent large;
282 struct cache_ent med;
283
284};
285
286struct mlx5_cmd_stats {
287 u64 sum;
288 u64 n;
289 struct dentry *root;
290 struct dentry *avg;
291 struct dentry *count;
292 /* protect command average calculations */
293 spinlock_t lock;
294};
295
296struct mlx5_cmd {
297 void *cmd_buf;
298 dma_addr_t dma;
299 u16 cmdif_rev;
300 u8 log_sz;
301 u8 log_stride;
302 int max_reg_cmds;
303 int events;
304 u32 __iomem *vector;
305
306 /* protect command queue allocations
307 */
308 spinlock_t alloc_lock;
309
310 /* protect token allocations
311 */
312 spinlock_t token_lock;
313 u8 token;
314 unsigned long bitmask;
315 char wq_name[MLX5_CMD_WQ_MAX_NAME];
316 struct workqueue_struct *wq;
317 struct semaphore sem;
318 struct semaphore pages_sem;
319 int mode;
320 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
321 struct pci_pool *pool;
322 struct mlx5_cmd_debug dbg;
323 struct cmd_msg_cache cache;
324 int checksum_disabled;
325 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
326};
327
328struct mlx5_port_caps {
329 int gid_table_len;
330 int pkey_table_len;
331};
332
333struct mlx5_caps {
334 u8 log_max_eq;
335 u8 log_max_cq;
336 u8 log_max_qp;
337 u8 log_max_mkey;
338 u8 log_max_pd;
339 u8 log_max_srq;
340 u32 max_cqes;
341 int max_wqes;
342 int max_sq_desc_sz;
343 int max_rq_desc_sz;
344 u64 flags;
345 u16 stat_rate_support;
346 int log_max_msg;
347 int num_ports;
348 int max_ra_res_qp;
349 int max_ra_req_qp;
350 int max_srq_wqes;
351 int bf_reg_size;
352 int bf_regs_per_page;
353 struct mlx5_port_caps port[MLX5_MAX_PORTS];
354 u8 ext_port_cap[MLX5_MAX_PORTS];
355 int max_vf;
356 u32 reserved_lkey;
357 u8 local_ca_ack_delay;
358 u8 log_max_mcg;
359 u16 max_qp_mcg;
360 int min_page_sz;
361};
362
363struct mlx5_cmd_mailbox {
364 void *buf;
365 dma_addr_t dma;
366 struct mlx5_cmd_mailbox *next;
367};
368
369struct mlx5_buf_list {
370 void *buf;
371 dma_addr_t map;
372};
373
374struct mlx5_buf {
375 struct mlx5_buf_list direct;
376 struct mlx5_buf_list *page_list;
377 int nbufs;
378 int npages;
379 int page_shift;
380 int size;
381};
382
383struct mlx5_eq {
384 struct mlx5_core_dev *dev;
385 __be32 __iomem *doorbell;
386 u32 cons_index;
387 struct mlx5_buf buf;
388 int size;
389 u8 irqn;
390 u8 eqn;
391 int nent;
392 u64 mask;
393 char name[MLX5_MAX_EQ_NAME];
394 struct list_head list;
395 int index;
396 struct mlx5_rsc_debug *dbg;
397};
398
399
400struct mlx5_core_mr {
401 u64 iova;
402 u64 size;
403 u32 key;
404 u32 pd;
405 u32 access;
406};
407
408struct mlx5_core_srq {
409 u32 srqn;
410 int max;
411 int max_gs;
412 int max_avail_gather;
413 int wqe_shift;
414 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
415
416 atomic_t refcount;
417 struct completion free;
418};
419
420struct mlx5_eq_table {
421 void __iomem *update_ci;
422 void __iomem *update_arm_ci;
423 struct list_head *comp_eq_head;
424 struct mlx5_eq pages_eq;
425 struct mlx5_eq async_eq;
426 struct mlx5_eq cmd_eq;
427 struct msix_entry *msix_arr;
428 int num_comp_vectors;
429 /* protect EQs list
430 */
431 spinlock_t lock;
432};
433
434struct mlx5_uar {
435 u32 index;
436 struct list_head bf_list;
437 unsigned free_bf_bmap;
438 void __iomem *wc_map;
439 void __iomem *map;
440};
441
442
443struct mlx5_core_health {
444 struct health_buffer __iomem *health;
445 __be32 __iomem *health_counter;
446 struct timer_list timer;
447 struct list_head list;
448 u32 prev;
449 int miss_counter;
450};
451
452struct mlx5_cq_table {
453 /* protect radix tree
454 */
455 spinlock_t lock;
456 struct radix_tree_root tree;
457};
458
459struct mlx5_qp_table {
460 /* protect radix tree
461 */
462 spinlock_t lock;
463 struct radix_tree_root tree;
464};
465
466struct mlx5_srq_table {
467 /* protect radix tree
468 */
469 spinlock_t lock;
470 struct radix_tree_root tree;
471};
472
473struct mlx5_priv {
474 char name[MLX5_MAX_NAME_LEN];
475 struct mlx5_eq_table eq_table;
476 struct mlx5_uuar_info uuari;
477 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
478
479 /* pages stuff */
480 struct workqueue_struct *pg_wq;
481 struct rb_root page_root;
482 int fw_pages;
483 int reg_pages;
484
485 struct mlx5_core_health health;
486
487 struct mlx5_srq_table srq_table;
488
489 /* start: qp staff */
490 struct mlx5_qp_table qp_table;
491 struct dentry *qp_debugfs;
492 struct dentry *eq_debugfs;
493 struct dentry *cq_debugfs;
494 struct dentry *cmdif_debugfs;
495 /* end: qp staff */
496
497 /* start: cq staff */
498 struct mlx5_cq_table cq_table;
499 /* end: cq staff */
500
501 /* start: alloc staff */
502 struct mutex pgdir_mutex;
503 struct list_head pgdir_list;
504 /* end: alloc staff */
505 struct dentry *dbg_root;
506
507 /* protect mkey key part */
508 spinlock_t mkey_lock;
509 u8 mkey_key;
510};
511
512struct mlx5_core_dev {
513 struct pci_dev *pdev;
514 u8 rev_id;
515 char board_id[MLX5_BOARD_ID_LEN];
516 struct mlx5_cmd cmd;
517 struct mlx5_caps caps;
518 phys_addr_t iseg_base;
519 struct mlx5_init_seg __iomem *iseg;
520 void (*event) (struct mlx5_core_dev *dev,
521 enum mlx5_dev_event event,
522 void *data);
523 struct mlx5_priv priv;
524 struct mlx5_profile *profile;
525 atomic_t num_qps;
526};
527
528struct mlx5_db {
529 __be32 *db;
530 union {
531 struct mlx5_db_pgdir *pgdir;
532 struct mlx5_ib_user_db_page *user_page;
533 } u;
534 dma_addr_t dma;
535 int index;
536};
537
538enum {
539 MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
540};
541
542enum {
543 MLX5_COMP_EQ_SIZE = 1024,
544};
545
546struct mlx5_db_pgdir {
547 struct list_head list;
548 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
549 __be32 *db_page;
550 dma_addr_t db_dma;
551};
552
553typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
554
555struct mlx5_cmd_work_ent {
556 struct mlx5_cmd_msg *in;
557 struct mlx5_cmd_msg *out;
558 mlx5_cmd_cbk_t callback;
559 void *context;
560 int idx;
561 struct completion done;
562 struct mlx5_cmd *cmd;
563 struct work_struct work;
564 struct mlx5_cmd_layout *lay;
565 int ret;
566 int page_queue;
567 u8 status;
568 u8 token;
569 struct timespec ts1;
570 struct timespec ts2;
571};
572
573struct mlx5_pas {
574 u64 pa;
575 u8 log_sz;
576};
577
578static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
579{
580 if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
581 return buf->direct.buf + offset;
582 else
583 return buf->page_list[offset >> PAGE_SHIFT].buf +
584 (offset & (PAGE_SIZE - 1));
585}
586
587extern struct workqueue_struct *mlx5_core_wq;
588
589#define STRUCT_FIELD(header, field) \
590 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
591 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
592
593struct ib_field {
594 size_t struct_offset_bytes;
595 size_t struct_size_bytes;
596 int offset_bits;
597 int size_bits;
598};
599
600static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
601{
602 return pci_get_drvdata(pdev);
603}
604
605extern struct dentry *mlx5_debugfs_root;
606
607static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
608{
609 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
610}
611
612static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
613{
614 return ioread32be(&dev->iseg->fw_rev) >> 16;
615}
616
617static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
618{
619 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
620}
621
622static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
623{
624 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
625}
626
627static inline void *mlx5_vzalloc(unsigned long size)
628{
629 void *rtn;
630
631 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
632 if (!rtn)
633 rtn = vzalloc(size);
634 return rtn;
635}
636
637static inline void mlx5_vfree(const void *addr)
638{
639 if (addr && is_vmalloc_addr(addr))
640 vfree(addr);
641 else
642 kfree(addr);
643}
644
645int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
646void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
647int mlx5_cmd_init(struct mlx5_core_dev *dev);
648void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
649void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
650void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
651int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
652int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
653 int out_size);
654int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
655int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
656int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
657int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
658void mlx5_health_cleanup(void);
659void __init mlx5_health_init(void);
660void mlx5_start_health_poll(struct mlx5_core_dev *dev);
661void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
662int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
663 struct mlx5_buf *buf);
664void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
665struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
666 gfp_t flags, int npages);
667void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
668 struct mlx5_cmd_mailbox *head);
669int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
670 struct mlx5_create_srq_mbox_in *in, int inlen);
671int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
672int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
673 struct mlx5_query_srq_mbox_out *out);
674int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
675 u16 lwm, int is_srq);
676int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
677 struct mlx5_create_mkey_mbox_in *in, int inlen);
678int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
679int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
680 struct mlx5_query_mkey_mbox_out *out, int outlen);
681int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 u32 *mkey);
683int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
684int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
685int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
686 u16 opmod, int port);
687void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
688void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
689int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
690void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
691void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
692 s16 npages);
693int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
694int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
695void mlx5_register_debugfs(void);
696void mlx5_unregister_debugfs(void);
697int mlx5_eq_init(struct mlx5_core_dev *dev);
698void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
699void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
700void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
701void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
702void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
703struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
704void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
705void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
706int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
707 int nent, u64 mask, const char *name, struct mlx5_uar *uar);
708int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
709int mlx5_start_eqs(struct mlx5_core_dev *dev);
710int mlx5_stop_eqs(struct mlx5_core_dev *dev);
711int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
712int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
713
714int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
715void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
716int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
717 int size_in, void *data_out, int size_out,
718 u16 reg_num, int arg, int write);
719int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
720
721int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
722void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
723int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
724 struct mlx5_query_eq_mbox_out *out, int outlen);
725int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
726void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
727int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
728void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
729int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
730void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
731
63884c90 732typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
e126ba97
EC
733int mlx5_register_health_report_handler(health_handler_t handler);
734void mlx5_unregister_health_report_handler(void);
735const char *mlx5_command_str(int command);
736int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
737void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
738
739static inline u32 mlx5_mkey_to_idx(u32 mkey)
740{
741 return mkey >> 8;
742}
743
744static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
745{
746 return mkey_idx << 8;
747}
748
749enum {
750 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
751 MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
752 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
753};
754
755enum {
756 MAX_MR_CACHE_ENTRIES = 16,
757};
758
759struct mlx5_profile {
760 u64 mask;
761 u32 log_max_qp;
762 int cmdif_csum;
763 struct {
764 int size;
765 int limit;
766 } mr_cache[MAX_MR_CACHE_ENTRIES];
767};
768
769#endif /* MLX5_DRIVER_H */