2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #ifndef __IW_CXGB4_H__
32 #define __IW_CXGB4_H__
34 #include <linux/mutex.h>
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <linux/idr.h>
38 #include <linux/completion.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/inet.h>
44 #include <linux/wait.h>
45 #include <linux/kref.h>
46 #include <linux/timer.h>
49 #include <asm/byteorder.h>
51 #include <net/net_namespace.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/iw_cm.h>
57 #include "cxgb4_uld.h"
61 #define DRV_NAME "iw_cxgb4"
62 #define MOD DRV_NAME ":"
64 extern int c4iw_debug;
65 #define PDBG(fmt, args...) \
68 printk(MOD fmt, ## args); \
73 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
74 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
76 static inline void *cplhdr(struct sk_buff *skb)
81 #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
82 #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
84 struct c4iw_id_table {
86 u32 start; /* logical minimal id */
87 u32 last; /* hint for find */
93 struct c4iw_resource {
94 struct c4iw_id_table tpt_table;
95 struct c4iw_id_table qid_table;
96 struct c4iw_id_table pdid_table;
99 struct c4iw_qid_list {
100 struct list_head entry;
104 struct c4iw_dev_ucontext {
105 struct list_head qpids;
106 struct list_head cqids;
110 enum c4iw_rdev_flags {
111 T4_FATAL_ERROR = (1<<0),
112 T4_STATUS_PAGE_DISABLED = (1<<1),
124 struct c4iw_stat qid;
126 struct c4iw_stat stag;
127 struct c4iw_stat pbl;
128 struct c4iw_stat rqt;
129 struct c4iw_stat ocqp;
133 u64 db_state_transitions;
134 u64 db_fc_interruptions;
136 u64 act_ofld_conn_fails;
137 u64 pas_ofld_conn_fails;
141 struct c4iw_resource resource;
142 unsigned long qpshift;
144 unsigned long cqshift;
146 struct c4iw_dev_ucontext uctx;
147 struct gen_pool *pbl_pool;
148 struct gen_pool *rqt_pool;
149 struct gen_pool *ocqp_pool;
151 struct cxgb4_lld_info lldi;
152 unsigned long oc_mw_pa;
153 void __iomem *oc_mw_kva;
154 struct c4iw_stats stats;
155 struct t4_dev_status_page *status_page;
158 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
160 return rdev->flags & T4_FATAL_ERROR;
163 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
165 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
168 #define C4IW_WR_TO (30*HZ)
170 struct c4iw_wr_wait {
171 struct completion completion;
175 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
178 init_completion(&wr_waitp->completion);
181 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
184 complete(&wr_waitp->completion);
187 static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
188 struct c4iw_wr_wait *wr_waitp,
192 unsigned to = C4IW_WR_TO;
196 ret = wait_for_completion_timeout(&wr_waitp->completion, to);
198 printk(KERN_ERR MOD "%s - Device %s not responding - "
199 "tid %u qpid %u\n", func,
200 pci_name(rdev->lldi.pdev), hwtid, qpid);
201 if (c4iw_fatal_error(rdev)) {
202 wr_waitp->ret = -EIO;
209 PDBG("%s: FW reply %d tid %u qpid %u\n",
210 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
211 return wr_waitp->ret;
222 struct ib_device ibdev;
223 struct c4iw_rdev rdev;
224 u32 device_cap_flags;
229 struct mutex db_mutex;
230 struct dentry *debugfs_root;
231 enum db_state db_state;
232 struct idr hwtid_idr;
235 struct list_head db_fc_list;
238 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
240 return container_of(ibdev, struct c4iw_dev, ibdev);
243 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
245 return container_of(rdev, struct c4iw_dev, rdev);
248 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
250 return idr_find(&rhp->cqidr, cqid);
253 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
255 return idr_find(&rhp->qpidr, qpid);
258 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
260 return idr_find(&rhp->mmidr, mmid);
263 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
264 void *handle, u32 id, int lock)
269 idr_preload(GFP_KERNEL);
270 spin_lock_irq(&rhp->lock);
273 ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
276 spin_unlock_irq(&rhp->lock);
280 BUG_ON(ret == -ENOSPC);
281 return ret < 0 ? ret : 0;
284 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
285 void *handle, u32 id)
287 return _insert_handle(rhp, idr, handle, id, 1);
290 static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
291 void *handle, u32 id)
293 return _insert_handle(rhp, idr, handle, id, 0);
296 static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
300 spin_lock_irq(&rhp->lock);
303 spin_unlock_irq(&rhp->lock);
306 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
308 _remove_handle(rhp, idr, id, 1);
311 static inline void remove_handle_nolock(struct c4iw_dev *rhp,
312 struct idr *idr, u32 id)
314 _remove_handle(rhp, idr, id, 0);
320 struct c4iw_dev *rhp;
323 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
325 return container_of(ibpd, struct c4iw_pd, ibpd);
328 struct tpt_attributes {
331 enum fw_ri_mem_perms perms;
340 u32 remote_invaliate_disable:1;
342 u32 mw_bind_enable:1;
348 struct ib_umem *umem;
349 struct c4iw_dev *rhp;
351 struct tpt_attributes attr;
354 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
356 return container_of(ibmr, struct c4iw_mr, ibmr);
361 struct c4iw_dev *rhp;
363 struct tpt_attributes attr;
366 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
368 return container_of(ibmw, struct c4iw_mw, ibmw);
371 struct c4iw_fr_page_list {
372 struct ib_fast_reg_page_list ibpl;
373 DEFINE_DMA_UNMAP_ADDR(mapping);
375 struct c4iw_dev *dev;
378 static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
379 struct ib_fast_reg_page_list *ibpl)
381 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
386 struct c4iw_dev *rhp;
389 spinlock_t comp_handler_lock;
391 wait_queue_head_t wait;
394 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
396 return container_of(ibcq, struct c4iw_cq, ibcq);
399 struct c4iw_mpa_attributes {
401 u8 recv_marker_enabled;
402 u8 xmit_marker_enabled;
404 u8 enhanced_rdma_conn;
409 struct c4iw_qp_attributes {
415 u32 sq_max_sges_rdma_write;
419 u8 enable_rdma_write;
421 u8 enable_mmid0_fastreg;
426 char terminate_buffer[52];
427 u32 terminate_msg_len;
428 u8 is_terminate_local;
429 struct c4iw_mpa_attributes mpa_attr;
430 struct c4iw_ep *llp_stream_handle;
439 struct list_head db_fc_entry;
440 struct c4iw_dev *rhp;
442 struct c4iw_qp_attributes attr;
447 wait_queue_head_t wait;
448 struct timer_list timer;
451 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
453 return container_of(ibqp, struct c4iw_qp, ibqp);
456 struct c4iw_ucontext {
457 struct ib_ucontext ibucontext;
458 struct c4iw_dev_ucontext uctx;
460 spinlock_t mmap_lock;
461 struct list_head mmaps;
464 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
466 return container_of(c, struct c4iw_ucontext, ibucontext);
469 struct c4iw_mm_entry {
470 struct list_head entry;
476 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
477 u32 key, unsigned len)
479 struct list_head *pos, *nxt;
480 struct c4iw_mm_entry *mm;
482 spin_lock(&ucontext->mmap_lock);
483 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
485 mm = list_entry(pos, struct c4iw_mm_entry, entry);
486 if (mm->key == key && mm->len == len) {
487 list_del_init(&mm->entry);
488 spin_unlock(&ucontext->mmap_lock);
489 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
490 key, (unsigned long long) mm->addr, mm->len);
494 spin_unlock(&ucontext->mmap_lock);
498 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
499 struct c4iw_mm_entry *mm)
501 spin_lock(&ucontext->mmap_lock);
502 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
503 mm->key, (unsigned long long) mm->addr, mm->len);
504 list_add_tail(&mm->entry, &ucontext->mmaps);
505 spin_unlock(&ucontext->mmap_lock);
508 enum c4iw_qp_attr_mask {
509 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
510 C4IW_QP_ATTR_SQ_DB = 1<<1,
511 C4IW_QP_ATTR_RQ_DB = 1<<2,
512 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
513 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
514 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
515 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
516 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
517 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
518 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
519 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
520 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
521 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
522 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
523 C4IW_QP_ATTR_MAX_ORD |
524 C4IW_QP_ATTR_MAX_IRD |
525 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
526 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
527 C4IW_QP_ATTR_MPA_ATTR |
528 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
531 int c4iw_modify_qp(struct c4iw_dev *rhp,
533 enum c4iw_qp_attr_mask mask,
534 struct c4iw_qp_attributes *attrs,
541 C4IW_QP_STATE_TERMINATE,
542 C4IW_QP_STATE_CLOSING,
546 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
551 return C4IW_QP_STATE_IDLE;
553 return C4IW_QP_STATE_RTS;
555 return C4IW_QP_STATE_CLOSING;
557 return C4IW_QP_STATE_TERMINATE;
559 return C4IW_QP_STATE_ERROR;
565 static inline int to_ib_qp_state(int c4iw_qp_state)
567 switch (c4iw_qp_state) {
568 case C4IW_QP_STATE_IDLE:
570 case C4IW_QP_STATE_RTS:
572 case C4IW_QP_STATE_CLOSING:
574 case C4IW_QP_STATE_TERMINATE:
576 case C4IW_QP_STATE_ERROR:
582 static inline u32 c4iw_ib_to_tpt_access(int a)
584 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
585 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
586 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
587 FW_RI_MEM_ACCESS_LOCAL_READ;
590 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
592 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
593 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
596 enum c4iw_mmid_state {
597 C4IW_STAG_STATE_VALID,
598 C4IW_STAG_STATE_INVALID
601 #define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
603 #define MPA_KEY_REQ "MPA ID Req Frame"
604 #define MPA_KEY_REP "MPA ID Rep Frame"
606 #define MPA_MAX_PRIVATE_DATA 256
607 #define MPA_ENHANCED_RDMA_CONN 0x10
608 #define MPA_REJECT 0x20
610 #define MPA_MARKERS 0x80
611 #define MPA_FLAGS_MASK 0xE0
613 #define MPA_V2_PEER2PEER_MODEL 0x8000
614 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
615 #define MPA_V2_RDMA_WRITE_RTR 0x8000
616 #define MPA_V2_RDMA_READ_RTR 0x4000
617 #define MPA_V2_IRD_ORD_MASK 0x3FFF
619 #define c4iw_put_ep(ep) { \
620 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
621 ep, atomic_read(&((ep)->kref.refcount))); \
622 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
623 kref_put(&((ep)->kref), _c4iw_free_ep); \
626 #define c4iw_get_ep(ep) { \
627 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
628 ep, atomic_read(&((ep)->kref.refcount))); \
629 kref_get(&((ep)->kref)); \
631 void _c4iw_free_ep(struct kref *kref);
637 __be16 private_data_size;
641 struct mpa_v2_conn_params {
646 struct terminate_message {
653 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
655 enum c4iw_layers_types {
659 RDMAP_LOCAL_CATA = 0x00,
660 RDMAP_REMOTE_PROT = 0x01,
661 RDMAP_REMOTE_OP = 0x02,
662 DDP_LOCAL_CATA = 0x00,
663 DDP_TAGGED_ERR = 0x01,
664 DDP_UNTAGGED_ERR = 0x02,
668 enum c4iw_rdma_ecodes {
669 RDMAP_INV_STAG = 0x00,
670 RDMAP_BASE_BOUNDS = 0x01,
671 RDMAP_ACC_VIOL = 0x02,
672 RDMAP_STAG_NOT_ASSOC = 0x03,
673 RDMAP_TO_WRAP = 0x04,
674 RDMAP_INV_VERS = 0x05,
675 RDMAP_INV_OPCODE = 0x06,
676 RDMAP_STREAM_CATA = 0x07,
677 RDMAP_GLOBAL_CATA = 0x08,
678 RDMAP_CANT_INV_STAG = 0x09,
679 RDMAP_UNSPECIFIED = 0xff
682 enum c4iw_ddp_ecodes {
683 DDPT_INV_STAG = 0x00,
684 DDPT_BASE_BOUNDS = 0x01,
685 DDPT_STAG_NOT_ASSOC = 0x02,
687 DDPT_INV_VERS = 0x04,
689 DDPU_INV_MSN_NOBUF = 0x02,
690 DDPU_INV_MSN_RANGE = 0x03,
692 DDPU_MSG_TOOBIG = 0x05,
696 enum c4iw_mpa_ecodes {
698 MPA_MARKER_ERR = 0x03,
699 MPA_LOCAL_CATA = 0x05,
700 MPA_INSUFF_IRD = 0x06,
701 MPA_NOMATCH_RTR = 0x07,
720 PEER_ABORT_IN_PROGRESS = 0,
721 ABORT_REQ_IN_PROGRESS = 1,
722 RELEASE_RESOURCES = 2,
728 enum c4iw_ep_history {
748 CONN_RPL_UPCALL = 19,
749 ACT_RETRY_NOMEM = 20,
753 struct c4iw_ep_common {
754 struct iw_cm_id *cm_id;
756 struct c4iw_dev *dev;
757 enum c4iw_ep_state state;
760 struct sockaddr_storage local_addr;
761 struct sockaddr_storage remote_addr;
762 struct c4iw_wr_wait wr_wait;
764 unsigned long history;
767 struct c4iw_listen_ep {
768 struct c4iw_ep_common com;
774 struct c4iw_ep_common com;
775 struct c4iw_ep *parent_ep;
776 struct timer_list timer;
777 struct list_head entry;
782 struct l2t_entry *l2t;
783 struct dst_entry *dst;
784 struct sk_buff *mpa_skb;
785 struct c4iw_mpa_attributes mpa_attr;
786 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
787 unsigned int mpa_pkt_len;
800 u8 retry_with_mpa_v1;
801 u8 tried_with_mpa_v1;
802 unsigned int retry_count;
805 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
807 return cm_id->provider_data;
810 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
812 return cm_id->provider_data;
815 static inline int compute_wscale(int win)
819 while (wscale < 14 && (65535<<wscale) < win)
824 static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
826 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
827 return infop->vr->ocq.size > 0;
833 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
834 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
835 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
836 u32 reserved, u32 flags);
837 void c4iw_id_table_free(struct c4iw_id_table *alloc);
839 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
841 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
842 struct l2t_entry *l2t);
843 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
844 struct c4iw_dev_ucontext *uctx);
845 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
846 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
847 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
848 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
849 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
850 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
851 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
852 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
853 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
854 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
855 void c4iw_destroy_resource(struct c4iw_resource *rscp);
856 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
857 int c4iw_register_device(struct c4iw_dev *dev);
858 void c4iw_unregister_device(struct c4iw_dev *dev);
859 int __init c4iw_cm_init(void);
860 void __exit c4iw_cm_term(void);
861 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
862 struct c4iw_dev_ucontext *uctx);
863 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
864 struct c4iw_dev_ucontext *uctx);
865 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
866 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
867 struct ib_send_wr **bad_wr);
868 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
869 struct ib_recv_wr **bad_wr);
870 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
871 struct ib_mw_bind *mw_bind);
872 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
873 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
874 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
875 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
876 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
877 void c4iw_qp_add_ref(struct ib_qp *qp);
878 void c4iw_qp_rem_ref(struct ib_qp *qp);
879 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
880 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
881 struct ib_device *device,
883 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
884 int c4iw_dealloc_mw(struct ib_mw *mw);
885 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
886 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
887 u64 length, u64 virt, int acc,
888 struct ib_udata *udata);
889 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
890 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
891 struct ib_phys_buf *buffer_list,
895 int c4iw_reregister_phys_mem(struct ib_mr *mr,
898 struct ib_phys_buf *buffer_list,
900 int acc, u64 *iova_start);
901 int c4iw_dereg_mr(struct ib_mr *ib_mr);
902 int c4iw_destroy_cq(struct ib_cq *ib_cq);
903 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
905 struct ib_ucontext *ib_context,
906 struct ib_udata *udata);
907 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
908 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
909 int c4iw_destroy_qp(struct ib_qp *ib_qp);
910 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
911 struct ib_qp_init_attr *attrs,
912 struct ib_udata *udata);
913 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
914 int attr_mask, struct ib_udata *udata);
915 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
916 int attr_mask, struct ib_qp_init_attr *init_attr);
917 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
918 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
919 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
920 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
921 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
922 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
923 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
924 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
925 void c4iw_flush_hw_cq(struct c4iw_cq *chp);
926 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
927 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
928 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
929 int c4iw_flush_sq(struct c4iw_qp *qhp);
930 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
931 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
932 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
933 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
934 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
935 struct c4iw_dev_ucontext *uctx);
936 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
937 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
938 struct c4iw_dev_ucontext *uctx);
939 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
941 extern struct cxgb4_client t4c_client;
942 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
943 extern int c4iw_max_read_depth;
944 extern int db_fc_threshold;
945 extern int db_coalescing_threshold;