Merge tag 'pull-bcachefs-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / drivers / infiniband / hw / irdma / verbs.h
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_VERBS_H
4 #define IRDMA_VERBS_H
5
6 #define IRDMA_MAX_SAVED_PHY_PGADDR      4
7 #define IRDMA_FLUSH_DELAY_MS            20
8
9 #define IRDMA_PKEY_TBL_SZ               1
10 #define IRDMA_DEFAULT_PKEY              0xFFFF
11
12 struct irdma_ucontext {
13         struct ib_ucontext ibucontext;
14         struct irdma_device *iwdev;
15         struct rdma_user_mmap_entry *db_mmap_entry;
16         struct list_head cq_reg_mem_list;
17         spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
18         struct list_head qp_reg_mem_list;
19         spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
20         int abi_ver;
21         u8 legacy_mode : 1;
22         u8 use_raw_attrs : 1;
23 };
24
25 struct irdma_pd {
26         struct ib_pd ibpd;
27         struct irdma_sc_pd sc_pd;
28 };
29
30 union irdma_sockaddr {
31         struct sockaddr_in saddr_in;
32         struct sockaddr_in6 saddr_in6;
33 };
34
35 struct irdma_av {
36         u8 macaddr[16];
37         struct rdma_ah_attr attrs;
38         union irdma_sockaddr sgid_addr;
39         union irdma_sockaddr dgid_addr;
40         u8 net_type;
41 };
42
43 struct irdma_ah {
44         struct ib_ah ibah;
45         struct irdma_sc_ah sc_ah;
46         struct irdma_pd *pd;
47         struct irdma_av av;
48         u8 sgid_index;
49         union ib_gid dgid;
50         struct hlist_node list;
51         refcount_t refcnt;
52         struct irdma_ah *parent_ah; /* AH from cached list */
53 };
54
55 struct irdma_hmc_pble {
56         union {
57                 u32 idx;
58                 dma_addr_t addr;
59         };
60 };
61
62 struct irdma_cq_mr {
63         struct irdma_hmc_pble cq_pbl;
64         dma_addr_t shadow;
65         bool split;
66 };
67
68 struct irdma_qp_mr {
69         struct irdma_hmc_pble sq_pbl;
70         struct irdma_hmc_pble rq_pbl;
71         dma_addr_t shadow;
72         struct page *sq_page;
73 };
74
75 struct irdma_cq_buf {
76         struct irdma_dma_mem kmem_buf;
77         struct irdma_cq_uk cq_uk;
78         struct irdma_hw *hw;
79         struct list_head list;
80         struct work_struct work;
81 };
82
83 struct irdma_pbl {
84         struct list_head list;
85         union {
86                 struct irdma_qp_mr qp_mr;
87                 struct irdma_cq_mr cq_mr;
88         };
89
90         bool pbl_allocated:1;
91         bool on_list:1;
92         u64 user_base;
93         struct irdma_pble_alloc pble_alloc;
94         struct irdma_mr *iwmr;
95 };
96
97 struct irdma_mr {
98         union {
99                 struct ib_mr ibmr;
100                 struct ib_mw ibmw;
101         };
102         struct ib_umem *region;
103         int access;
104         u8 is_hwreg;
105         u16 type;
106         u32 page_cnt;
107         u64 page_size;
108         u32 npages;
109         u32 stag;
110         u64 len;
111         u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
112         struct irdma_pbl iwpbl;
113 };
114
115 struct irdma_cq {
116         struct ib_cq ibcq;
117         struct irdma_sc_cq sc_cq;
118         u16 cq_head;
119         u16 cq_size;
120         u16 cq_num;
121         bool user_mode;
122         atomic_t armed;
123         enum irdma_cmpl_notify last_notify;
124         u32 polled_cmpls;
125         u32 cq_mem_size;
126         struct irdma_dma_mem kmem;
127         struct irdma_dma_mem kmem_shadow;
128         struct completion free_cq;
129         refcount_t refcnt;
130         spinlock_t lock; /* for poll cq */
131         struct irdma_pbl *iwpbl;
132         struct irdma_pbl *iwpbl_shadow;
133         struct list_head resize_list;
134         struct irdma_cq_poll_info cur_cqe;
135         struct list_head cmpl_generated;
136 };
137
138 struct irdma_cmpl_gen {
139         struct list_head list;
140         struct irdma_cq_poll_info cpi;
141 };
142
143 struct disconn_work {
144         struct work_struct work;
145         struct irdma_qp *iwqp;
146 };
147
148 struct iw_cm_id;
149
150 struct irdma_qp_kmode {
151         struct irdma_dma_mem dma_mem;
152         struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
153         u64 *rq_wrid_mem;
154 };
155
156 struct irdma_qp {
157         struct ib_qp ibqp;
158         struct irdma_sc_qp sc_qp;
159         struct irdma_device *iwdev;
160         struct irdma_cq *iwscq;
161         struct irdma_cq *iwrcq;
162         struct irdma_pd *iwpd;
163         struct rdma_user_mmap_entry *push_wqe_mmap_entry;
164         struct rdma_user_mmap_entry *push_db_mmap_entry;
165         struct irdma_qp_host_ctx_info ctx_info;
166         union {
167                 struct irdma_iwarp_offload_info iwarp_info;
168                 struct irdma_roce_offload_info roce_info;
169         };
170
171         union {
172                 struct irdma_tcp_offload_info tcp_info;
173                 struct irdma_udp_offload_info udp_info;
174         };
175
176         struct irdma_ah roce_ah;
177         struct list_head teardown_entry;
178         refcount_t refcnt;
179         struct iw_cm_id *cm_id;
180         struct irdma_cm_node *cm_node;
181         struct delayed_work dwork_flush;
182         struct ib_mr *lsmm_mr;
183         atomic_t hw_mod_qp_pend;
184         enum ib_qp_state ibqp_state;
185         u32 qp_mem_size;
186         u32 last_aeq;
187         int max_send_wr;
188         int max_recv_wr;
189         atomic_t close_timer_started;
190         spinlock_t lock; /* serialize posting WRs to SQ/RQ */
191         struct irdma_qp_context *iwqp_context;
192         void *pbl_vbase;
193         dma_addr_t pbl_pbase;
194         struct page *page;
195         u8 active_conn : 1;
196         u8 user_mode : 1;
197         u8 hte_added : 1;
198         u8 flush_issued : 1;
199         u8 sig_all : 1;
200         u8 pau_mode : 1;
201         u8 suspend_pending : 1;
202         u8 rsvd : 1;
203         u8 iwarp_state;
204         u16 term_sq_flush_code;
205         u16 term_rq_flush_code;
206         u8 hw_iwarp_state;
207         u8 hw_tcp_state;
208         struct irdma_qp_kmode kqp;
209         struct irdma_dma_mem host_ctx;
210         struct timer_list terminate_timer;
211         struct irdma_pbl *iwpbl;
212         struct irdma_dma_mem q2_ctx_mem;
213         struct irdma_dma_mem ietf_mem;
214         struct completion free_qp;
215         wait_queue_head_t waitq;
216         wait_queue_head_t mod_qp_waitq;
217         u8 rts_ae_rcvd;
218 };
219
220 enum irdma_mmap_flag {
221         IRDMA_MMAP_IO_NC,
222         IRDMA_MMAP_IO_WC,
223 };
224
225 struct irdma_user_mmap_entry {
226         struct rdma_user_mmap_entry rdma_entry;
227         u64 bar_offset;
228         u8 mmap_flag;
229 };
230
231 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
232 {
233         return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
234 }
235
236 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
237 {
238         return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
239 }
240
241 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
242                                    struct ib_wc *entry)
243 {
244         switch (cq_poll_info->op_type) {
245         case IRDMA_OP_TYPE_RDMA_WRITE:
246         case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
247                 entry->opcode = IB_WC_RDMA_WRITE;
248                 break;
249         case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
250         case IRDMA_OP_TYPE_RDMA_READ:
251                 entry->opcode = IB_WC_RDMA_READ;
252                 break;
253         case IRDMA_OP_TYPE_SEND_SOL:
254         case IRDMA_OP_TYPE_SEND_SOL_INV:
255         case IRDMA_OP_TYPE_SEND_INV:
256         case IRDMA_OP_TYPE_SEND:
257                 entry->opcode = IB_WC_SEND;
258                 break;
259         case IRDMA_OP_TYPE_FAST_REG_NSMR:
260                 entry->opcode = IB_WC_REG_MR;
261                 break;
262         case IRDMA_OP_TYPE_INV_STAG:
263                 entry->opcode = IB_WC_LOCAL_INV;
264                 break;
265         default:
266                 entry->status = IB_WC_GENERAL_ERR;
267         }
268 }
269
270 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
271                                    struct ib_wc *entry, bool send_imm_support)
272 {
273         /**
274          * iWARP does not support sendImm, so the presence of Imm data
275          * must be WriteImm.
276          */
277         if (!send_imm_support) {
278                 entry->opcode = cq_poll_info->imm_valid ?
279                                         IB_WC_RECV_RDMA_WITH_IMM :
280                                         IB_WC_RECV;
281                 return;
282         }
283
284         switch (cq_poll_info->op_type) {
285         case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
286         case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
287                 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
288                 break;
289         default:
290                 entry->opcode = IB_WC_RECV;
291         }
292 }
293
294 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
295 int irdma_ib_register_device(struct irdma_device *iwdev);
296 void irdma_ib_unregister_device(struct irdma_device *iwdev);
297 void irdma_ib_dealloc_device(struct ib_device *ibdev);
298 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
299 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
300 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
301 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
302 #endif /* IRDMA_VERBS_H */