Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * - Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials | |
20 | * provided with the distribution. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
29 | * SOFTWARE. | |
30 | */ | |
31 | #ifndef __IW_CXGB4_H__ | |
32 | #define __IW_CXGB4_H__ | |
33 | ||
34 | #include <linux/mutex.h> | |
35 | #include <linux/list.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/idr.h> | |
c337374b | 38 | #include <linux/completion.h> |
cfdda9d7 SW |
39 | #include <linux/netdevice.h> |
40 | #include <linux/sched.h> | |
41 | #include <linux/pci.h> | |
42 | #include <linux/dma-mapping.h> | |
43 | #include <linux/inet.h> | |
44 | #include <linux/wait.h> | |
45 | #include <linux/kref.h> | |
46 | #include <linux/timer.h> | |
47 | #include <linux/io.h> | |
cfdda9d7 SW |
48 | |
49 | #include <asm/byteorder.h> | |
50 | ||
51 | #include <net/net_namespace.h> | |
52 | ||
53 | #include <rdma/ib_verbs.h> | |
54 | #include <rdma/iw_cm.h> | |
55 | ||
56 | #include "cxgb4.h" | |
57 | #include "cxgb4_uld.h" | |
58 | #include "l2t.h" | |
59 | #include "user.h" | |
60 | ||
61 | #define DRV_NAME "iw_cxgb4" | |
62 | #define MOD DRV_NAME ":" | |
63 | ||
64 | extern int c4iw_debug; | |
65 | #define PDBG(fmt, args...) \ | |
66 | do { \ | |
67 | if (c4iw_debug) \ | |
68 | printk(MOD fmt, ## args); \ | |
69 | } while (0) | |
70 | ||
71 | #include "t4.h" | |
72 | ||
73 | #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start) | |
74 | #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start) | |
75 | ||
76 | static inline void *cplhdr(struct sk_buff *skb) | |
77 | { | |
78 | return skb->data; | |
79 | } | |
80 | ||
ec3eead2 VP |
81 | #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */ |
82 | #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */ | |
83 | ||
84 | struct c4iw_id_table { | |
85 | u32 flags; | |
86 | u32 start; /* logical minimal id */ | |
87 | u32 last; /* hint for find */ | |
88 | u32 max; | |
89 | spinlock_t lock; | |
90 | unsigned long *table; | |
91 | }; | |
92 | ||
cfdda9d7 | 93 | struct c4iw_resource { |
ec3eead2 VP |
94 | struct c4iw_id_table tpt_table; |
95 | struct c4iw_id_table qid_table; | |
96 | struct c4iw_id_table pdid_table; | |
cfdda9d7 SW |
97 | }; |
98 | ||
99 | struct c4iw_qid_list { | |
100 | struct list_head entry; | |
101 | u32 qid; | |
102 | }; | |
103 | ||
104 | struct c4iw_dev_ucontext { | |
105 | struct list_head qpids; | |
106 | struct list_head cqids; | |
107 | struct mutex lock; | |
108 | }; | |
109 | ||
110 | enum c4iw_rdev_flags { | |
111 | T4_FATAL_ERROR = (1<<0), | |
112 | }; | |
113 | ||
8d81ef34 VP |
114 | struct c4iw_stat { |
115 | u64 total; | |
116 | u64 cur; | |
117 | u64 max; | |
ec3eead2 | 118 | u64 fail; |
8d81ef34 VP |
119 | }; |
120 | ||
121 | struct c4iw_stats { | |
122 | struct mutex lock; | |
123 | struct c4iw_stat qid; | |
124 | struct c4iw_stat pd; | |
125 | struct c4iw_stat stag; | |
126 | struct c4iw_stat pbl; | |
127 | struct c4iw_stat rqt; | |
128 | struct c4iw_stat ocqp; | |
2c974781 VP |
129 | u64 db_full; |
130 | u64 db_empty; | |
131 | u64 db_drop; | |
422eea0a | 132 | u64 db_state_transitions; |
8d81ef34 VP |
133 | }; |
134 | ||
cfdda9d7 SW |
135 | struct c4iw_rdev { |
136 | struct c4iw_resource resource; | |
137 | unsigned long qpshift; | |
138 | u32 qpmask; | |
139 | unsigned long cqshift; | |
140 | u32 cqmask; | |
141 | struct c4iw_dev_ucontext uctx; | |
142 | struct gen_pool *pbl_pool; | |
143 | struct gen_pool *rqt_pool; | |
c6d7b267 | 144 | struct gen_pool *ocqp_pool; |
cfdda9d7 SW |
145 | u32 flags; |
146 | struct cxgb4_lld_info lldi; | |
c6d7b267 SW |
147 | unsigned long oc_mw_pa; |
148 | void __iomem *oc_mw_kva; | |
8d81ef34 | 149 | struct c4iw_stats stats; |
cfdda9d7 SW |
150 | }; |
151 | ||
152 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) | |
153 | { | |
154 | return rdev->flags & T4_FATAL_ERROR; | |
155 | } | |
156 | ||
157 | static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |
158 | { | |
159 | return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); | |
160 | } | |
161 | ||
aadc4df3 SW |
162 | #define C4IW_WR_TO (10*HZ) |
163 | ||
164 | struct c4iw_wr_wait { | |
c337374b | 165 | struct completion completion; |
aadc4df3 SW |
166 | int ret; |
167 | }; | |
168 | ||
169 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | |
170 | { | |
171 | wr_waitp->ret = 0; | |
c337374b | 172 | init_completion(&wr_waitp->completion); |
aadc4df3 SW |
173 | } |
174 | ||
d9594d99 SW |
175 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) |
176 | { | |
177 | wr_waitp->ret = ret; | |
c337374b | 178 | complete(&wr_waitp->completion); |
d9594d99 SW |
179 | } |
180 | ||
aadc4df3 SW |
181 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
182 | struct c4iw_wr_wait *wr_waitp, | |
183 | u32 hwtid, u32 qpid, | |
184 | const char *func) | |
185 | { | |
186 | unsigned to = C4IW_WR_TO; | |
d9594d99 | 187 | int ret; |
aadc4df3 | 188 | |
d9594d99 | 189 | do { |
c337374b | 190 | ret = wait_for_completion_timeout(&wr_waitp->completion, to); |
d9594d99 | 191 | if (!ret) { |
aadc4df3 SW |
192 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
193 | "tid %u qpid %u\n", func, | |
194 | pci_name(rdev->lldi.pdev), hwtid, qpid); | |
2f25e9a5 SW |
195 | if (c4iw_fatal_error(rdev)) { |
196 | wr_waitp->ret = -EIO; | |
197 | break; | |
198 | } | |
aadc4df3 SW |
199 | to = to << 2; |
200 | } | |
d9594d99 | 201 | } while (!ret); |
aadc4df3 | 202 | if (wr_waitp->ret) |
30c95c2d SW |
203 | PDBG("%s: FW reply %d tid %u qpid %u\n", |
204 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); | |
aadc4df3 SW |
205 | return wr_waitp->ret; |
206 | } | |
207 | ||
2c974781 VP |
208 | enum db_state { |
209 | NORMAL = 0, | |
210 | FLOW_CONTROL = 1, | |
211 | RECOVERY = 2 | |
212 | }; | |
213 | ||
cfdda9d7 SW |
214 | struct c4iw_dev { |
215 | struct ib_device ibdev; | |
216 | struct c4iw_rdev rdev; | |
217 | u32 device_cap_flags; | |
218 | struct idr cqidr; | |
219 | struct idr qpidr; | |
220 | struct idr mmidr; | |
221 | spinlock_t lock; | |
2c974781 | 222 | struct mutex db_mutex; |
cfdda9d7 | 223 | struct dentry *debugfs_root; |
2c974781 | 224 | enum db_state db_state; |
422eea0a | 225 | int qpcnt; |
cfdda9d7 SW |
226 | }; |
227 | ||
228 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | |
229 | { | |
230 | return container_of(ibdev, struct c4iw_dev, ibdev); | |
231 | } | |
232 | ||
233 | static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) | |
234 | { | |
235 | return container_of(rdev, struct c4iw_dev, rdev); | |
236 | } | |
237 | ||
238 | static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) | |
239 | { | |
240 | return idr_find(&rhp->cqidr, cqid); | |
241 | } | |
242 | ||
243 | static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) | |
244 | { | |
245 | return idr_find(&rhp->qpidr, qpid); | |
246 | } | |
247 | ||
248 | static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) | |
249 | { | |
250 | return idr_find(&rhp->mmidr, mmid); | |
251 | } | |
252 | ||
2c974781 VP |
253 | static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, |
254 | void *handle, u32 id, int lock) | |
cfdda9d7 SW |
255 | { |
256 | int ret; | |
257 | int newid; | |
258 | ||
259 | do { | |
4984037b | 260 | if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) |
cfdda9d7 | 261 | return -ENOMEM; |
2c974781 VP |
262 | if (lock) |
263 | spin_lock_irq(&rhp->lock); | |
cfdda9d7 | 264 | ret = idr_get_new_above(idr, handle, id, &newid); |
ec3eead2 | 265 | BUG_ON(!ret && newid != id); |
2c974781 VP |
266 | if (lock) |
267 | spin_unlock_irq(&rhp->lock); | |
cfdda9d7 SW |
268 | } while (ret == -EAGAIN); |
269 | ||
270 | return ret; | |
271 | } | |
272 | ||
2c974781 VP |
273 | static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, |
274 | void *handle, u32 id) | |
275 | { | |
276 | return _insert_handle(rhp, idr, handle, id, 1); | |
277 | } | |
278 | ||
279 | static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, | |
280 | void *handle, u32 id) | |
281 | { | |
282 | return _insert_handle(rhp, idr, handle, id, 0); | |
283 | } | |
284 | ||
422eea0a VP |
285 | static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, |
286 | u32 id, int lock) | |
cfdda9d7 | 287 | { |
422eea0a VP |
288 | if (lock) |
289 | spin_lock_irq(&rhp->lock); | |
cfdda9d7 | 290 | idr_remove(idr, id); |
422eea0a VP |
291 | if (lock) |
292 | spin_unlock_irq(&rhp->lock); | |
293 | } | |
294 | ||
295 | static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) | |
296 | { | |
297 | _remove_handle(rhp, idr, id, 1); | |
298 | } | |
299 | ||
300 | static inline void remove_handle_nolock(struct c4iw_dev *rhp, | |
301 | struct idr *idr, u32 id) | |
302 | { | |
303 | _remove_handle(rhp, idr, id, 0); | |
cfdda9d7 SW |
304 | } |
305 | ||
306 | struct c4iw_pd { | |
307 | struct ib_pd ibpd; | |
308 | u32 pdid; | |
309 | struct c4iw_dev *rhp; | |
310 | }; | |
311 | ||
312 | static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd) | |
313 | { | |
314 | return container_of(ibpd, struct c4iw_pd, ibpd); | |
315 | } | |
316 | ||
317 | struct tpt_attributes { | |
318 | u64 len; | |
319 | u64 va_fbo; | |
320 | enum fw_ri_mem_perms perms; | |
321 | u32 stag; | |
322 | u32 pdid; | |
323 | u32 qpid; | |
324 | u32 pbl_addr; | |
325 | u32 pbl_size; | |
326 | u32 state:1; | |
327 | u32 type:2; | |
328 | u32 rsvd:1; | |
329 | u32 remote_invaliate_disable:1; | |
330 | u32 zbva:1; | |
331 | u32 mw_bind_enable:1; | |
332 | u32 page_size:5; | |
333 | }; | |
334 | ||
335 | struct c4iw_mr { | |
336 | struct ib_mr ibmr; | |
337 | struct ib_umem *umem; | |
338 | struct c4iw_dev *rhp; | |
339 | u64 kva; | |
340 | struct tpt_attributes attr; | |
341 | }; | |
342 | ||
343 | static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) | |
344 | { | |
345 | return container_of(ibmr, struct c4iw_mr, ibmr); | |
346 | } | |
347 | ||
348 | struct c4iw_mw { | |
349 | struct ib_mw ibmw; | |
350 | struct c4iw_dev *rhp; | |
351 | u64 kva; | |
352 | struct tpt_attributes attr; | |
353 | }; | |
354 | ||
355 | static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) | |
356 | { | |
357 | return container_of(ibmw, struct c4iw_mw, ibmw); | |
358 | } | |
359 | ||
360 | struct c4iw_fr_page_list { | |
361 | struct ib_fast_reg_page_list ibpl; | |
f38926aa | 362 | DEFINE_DMA_UNMAP_ADDR(mapping); |
cfdda9d7 SW |
363 | dma_addr_t dma_addr; |
364 | struct c4iw_dev *dev; | |
365 | int size; | |
366 | }; | |
367 | ||
368 | static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( | |
369 | struct ib_fast_reg_page_list *ibpl) | |
370 | { | |
371 | return container_of(ibpl, struct c4iw_fr_page_list, ibpl); | |
372 | } | |
373 | ||
374 | struct c4iw_cq { | |
375 | struct ib_cq ibcq; | |
376 | struct c4iw_dev *rhp; | |
377 | struct t4_cq cq; | |
378 | spinlock_t lock; | |
581bbe2c | 379 | spinlock_t comp_handler_lock; |
cfdda9d7 SW |
380 | atomic_t refcnt; |
381 | wait_queue_head_t wait; | |
382 | }; | |
383 | ||
384 | static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) | |
385 | { | |
386 | return container_of(ibcq, struct c4iw_cq, ibcq); | |
387 | } | |
388 | ||
389 | struct c4iw_mpa_attributes { | |
390 | u8 initiator; | |
391 | u8 recv_marker_enabled; | |
392 | u8 xmit_marker_enabled; | |
393 | u8 crc_enabled; | |
d2fe99e8 | 394 | u8 enhanced_rdma_conn; |
cfdda9d7 SW |
395 | u8 version; |
396 | u8 p2p_type; | |
397 | }; | |
398 | ||
399 | struct c4iw_qp_attributes { | |
400 | u32 scq; | |
401 | u32 rcq; | |
402 | u32 sq_num_entries; | |
403 | u32 rq_num_entries; | |
404 | u32 sq_max_sges; | |
405 | u32 sq_max_sges_rdma_write; | |
406 | u32 rq_max_sges; | |
407 | u32 state; | |
408 | u8 enable_rdma_read; | |
409 | u8 enable_rdma_write; | |
410 | u8 enable_bind; | |
411 | u8 enable_mmid0_fastreg; | |
412 | u32 max_ord; | |
413 | u32 max_ird; | |
414 | u32 pd; | |
415 | u32 next_state; | |
416 | char terminate_buffer[52]; | |
417 | u32 terminate_msg_len; | |
418 | u8 is_terminate_local; | |
419 | struct c4iw_mpa_attributes mpa_attr; | |
420 | struct c4iw_ep *llp_stream_handle; | |
d2fe99e8 KS |
421 | u8 layer_etype; |
422 | u8 ecode; | |
2c974781 VP |
423 | u16 sq_db_inc; |
424 | u16 rq_db_inc; | |
cfdda9d7 SW |
425 | }; |
426 | ||
427 | struct c4iw_qp { | |
428 | struct ib_qp ibqp; | |
429 | struct c4iw_dev *rhp; | |
430 | struct c4iw_ep *ep; | |
431 | struct c4iw_qp_attributes attr; | |
432 | struct t4_wq wq; | |
433 | spinlock_t lock; | |
2f5b48c3 | 434 | struct mutex mutex; |
cfdda9d7 SW |
435 | atomic_t refcnt; |
436 | wait_queue_head_t wait; | |
437 | struct timer_list timer; | |
438 | }; | |
439 | ||
440 | static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) | |
441 | { | |
442 | return container_of(ibqp, struct c4iw_qp, ibqp); | |
443 | } | |
444 | ||
445 | struct c4iw_ucontext { | |
446 | struct ib_ucontext ibucontext; | |
447 | struct c4iw_dev_ucontext uctx; | |
448 | u32 key; | |
449 | spinlock_t mmap_lock; | |
450 | struct list_head mmaps; | |
451 | }; | |
452 | ||
453 | static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) | |
454 | { | |
455 | return container_of(c, struct c4iw_ucontext, ibucontext); | |
456 | } | |
457 | ||
458 | struct c4iw_mm_entry { | |
459 | struct list_head entry; | |
460 | u64 addr; | |
461 | u32 key; | |
462 | unsigned len; | |
463 | }; | |
464 | ||
465 | static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, | |
466 | u32 key, unsigned len) | |
467 | { | |
468 | struct list_head *pos, *nxt; | |
469 | struct c4iw_mm_entry *mm; | |
470 | ||
471 | spin_lock(&ucontext->mmap_lock); | |
472 | list_for_each_safe(pos, nxt, &ucontext->mmaps) { | |
473 | ||
474 | mm = list_entry(pos, struct c4iw_mm_entry, entry); | |
475 | if (mm->key == key && mm->len == len) { | |
476 | list_del_init(&mm->entry); | |
477 | spin_unlock(&ucontext->mmap_lock); | |
478 | PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__, | |
479 | key, (unsigned long long) mm->addr, mm->len); | |
480 | return mm; | |
481 | } | |
482 | } | |
483 | spin_unlock(&ucontext->mmap_lock); | |
484 | return NULL; | |
485 | } | |
486 | ||
487 | static inline void insert_mmap(struct c4iw_ucontext *ucontext, | |
488 | struct c4iw_mm_entry *mm) | |
489 | { | |
490 | spin_lock(&ucontext->mmap_lock); | |
491 | PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__, | |
492 | mm->key, (unsigned long long) mm->addr, mm->len); | |
493 | list_add_tail(&mm->entry, &ucontext->mmaps); | |
494 | spin_unlock(&ucontext->mmap_lock); | |
495 | } | |
496 | ||
497 | enum c4iw_qp_attr_mask { | |
498 | C4IW_QP_ATTR_NEXT_STATE = 1 << 0, | |
2c974781 VP |
499 | C4IW_QP_ATTR_SQ_DB = 1<<1, |
500 | C4IW_QP_ATTR_RQ_DB = 1<<2, | |
cfdda9d7 SW |
501 | C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, |
502 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, | |
503 | C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, | |
504 | C4IW_QP_ATTR_MAX_ORD = 1 << 11, | |
505 | C4IW_QP_ATTR_MAX_IRD = 1 << 12, | |
506 | C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22, | |
507 | C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23, | |
508 | C4IW_QP_ATTR_MPA_ATTR = 1 << 24, | |
509 | C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25, | |
510 | C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ | | |
511 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | |
512 | C4IW_QP_ATTR_MAX_ORD | | |
513 | C4IW_QP_ATTR_MAX_IRD | | |
514 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | | |
515 | C4IW_QP_ATTR_STREAM_MSG_BUFFER | | |
516 | C4IW_QP_ATTR_MPA_ATTR | | |
517 | C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE) | |
518 | }; | |
519 | ||
520 | int c4iw_modify_qp(struct c4iw_dev *rhp, | |
521 | struct c4iw_qp *qhp, | |
522 | enum c4iw_qp_attr_mask mask, | |
523 | struct c4iw_qp_attributes *attrs, | |
524 | int internal); | |
525 | ||
526 | enum c4iw_qp_state { | |
527 | C4IW_QP_STATE_IDLE, | |
528 | C4IW_QP_STATE_RTS, | |
529 | C4IW_QP_STATE_ERROR, | |
530 | C4IW_QP_STATE_TERMINATE, | |
531 | C4IW_QP_STATE_CLOSING, | |
532 | C4IW_QP_STATE_TOT | |
533 | }; | |
534 | ||
535 | static inline int c4iw_convert_state(enum ib_qp_state ib_state) | |
536 | { | |
537 | switch (ib_state) { | |
538 | case IB_QPS_RESET: | |
539 | case IB_QPS_INIT: | |
540 | return C4IW_QP_STATE_IDLE; | |
541 | case IB_QPS_RTS: | |
542 | return C4IW_QP_STATE_RTS; | |
543 | case IB_QPS_SQD: | |
544 | return C4IW_QP_STATE_CLOSING; | |
545 | case IB_QPS_SQE: | |
546 | return C4IW_QP_STATE_TERMINATE; | |
547 | case IB_QPS_ERR: | |
548 | return C4IW_QP_STATE_ERROR; | |
549 | default: | |
550 | return -1; | |
551 | } | |
552 | } | |
553 | ||
67bbc055 VP |
554 | static inline int to_ib_qp_state(int c4iw_qp_state) |
555 | { | |
556 | switch (c4iw_qp_state) { | |
557 | case C4IW_QP_STATE_IDLE: | |
558 | return IB_QPS_INIT; | |
559 | case C4IW_QP_STATE_RTS: | |
560 | return IB_QPS_RTS; | |
561 | case C4IW_QP_STATE_CLOSING: | |
562 | return IB_QPS_SQD; | |
563 | case C4IW_QP_STATE_TERMINATE: | |
564 | return IB_QPS_SQE; | |
565 | case C4IW_QP_STATE_ERROR: | |
566 | return IB_QPS_ERR; | |
567 | } | |
568 | return IB_QPS_ERR; | |
569 | } | |
570 | ||
cfdda9d7 SW |
571 | static inline u32 c4iw_ib_to_tpt_access(int a) |
572 | { | |
573 | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | | |
574 | (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) | | |
575 | (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) | | |
576 | FW_RI_MEM_ACCESS_LOCAL_READ; | |
577 | } | |
578 | ||
579 | static inline u32 c4iw_ib_to_tpt_bind_access(int acc) | |
580 | { | |
581 | return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | | |
582 | (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); | |
583 | } | |
584 | ||
585 | enum c4iw_mmid_state { | |
586 | C4IW_STAG_STATE_VALID, | |
587 | C4IW_STAG_STATE_INVALID | |
588 | }; | |
589 | ||
590 | #define C4IW_NODE_DESC "cxgb4 Chelsio Communications" | |
591 | ||
592 | #define MPA_KEY_REQ "MPA ID Req Frame" | |
593 | #define MPA_KEY_REP "MPA ID Rep Frame" | |
594 | ||
595 | #define MPA_MAX_PRIVATE_DATA 256 | |
d2fe99e8 | 596 | #define MPA_ENHANCED_RDMA_CONN 0x10 |
cfdda9d7 SW |
597 | #define MPA_REJECT 0x20 |
598 | #define MPA_CRC 0x40 | |
599 | #define MPA_MARKERS 0x80 | |
600 | #define MPA_FLAGS_MASK 0xE0 | |
601 | ||
d2fe99e8 KS |
602 | #define MPA_V2_PEER2PEER_MODEL 0x8000 |
603 | #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 | |
604 | #define MPA_V2_RDMA_WRITE_RTR 0x8000 | |
605 | #define MPA_V2_RDMA_READ_RTR 0x4000 | |
606 | #define MPA_V2_IRD_ORD_MASK 0x3FFF | |
607 | ||
cfdda9d7 SW |
608 | #define c4iw_put_ep(ep) { \ |
609 | PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ | |
610 | ep, atomic_read(&((ep)->kref.refcount))); \ | |
611 | WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \ | |
612 | kref_put(&((ep)->kref), _c4iw_free_ep); \ | |
613 | } | |
614 | ||
615 | #define c4iw_get_ep(ep) { \ | |
616 | PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \ | |
617 | ep, atomic_read(&((ep)->kref.refcount))); \ | |
618 | kref_get(&((ep)->kref)); \ | |
619 | } | |
620 | void _c4iw_free_ep(struct kref *kref); | |
621 | ||
622 | struct mpa_message { | |
623 | u8 key[16]; | |
624 | u8 flags; | |
625 | u8 revision; | |
626 | __be16 private_data_size; | |
627 | u8 private_data[0]; | |
628 | }; | |
629 | ||
d2fe99e8 KS |
630 | struct mpa_v2_conn_params { |
631 | __be16 ird; | |
632 | __be16 ord; | |
633 | }; | |
634 | ||
cfdda9d7 SW |
635 | struct terminate_message { |
636 | u8 layer_etype; | |
637 | u8 ecode; | |
638 | __be16 hdrct_rsvd; | |
639 | u8 len_hdrs[0]; | |
640 | }; | |
641 | ||
642 | #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) | |
643 | ||
644 | enum c4iw_layers_types { | |
645 | LAYER_RDMAP = 0x00, | |
646 | LAYER_DDP = 0x10, | |
647 | LAYER_MPA = 0x20, | |
648 | RDMAP_LOCAL_CATA = 0x00, | |
649 | RDMAP_REMOTE_PROT = 0x01, | |
650 | RDMAP_REMOTE_OP = 0x02, | |
651 | DDP_LOCAL_CATA = 0x00, | |
652 | DDP_TAGGED_ERR = 0x01, | |
653 | DDP_UNTAGGED_ERR = 0x02, | |
654 | DDP_LLP = 0x03 | |
655 | }; | |
656 | ||
657 | enum c4iw_rdma_ecodes { | |
658 | RDMAP_INV_STAG = 0x00, | |
659 | RDMAP_BASE_BOUNDS = 0x01, | |
660 | RDMAP_ACC_VIOL = 0x02, | |
661 | RDMAP_STAG_NOT_ASSOC = 0x03, | |
662 | RDMAP_TO_WRAP = 0x04, | |
663 | RDMAP_INV_VERS = 0x05, | |
664 | RDMAP_INV_OPCODE = 0x06, | |
665 | RDMAP_STREAM_CATA = 0x07, | |
666 | RDMAP_GLOBAL_CATA = 0x08, | |
667 | RDMAP_CANT_INV_STAG = 0x09, | |
668 | RDMAP_UNSPECIFIED = 0xff | |
669 | }; | |
670 | ||
671 | enum c4iw_ddp_ecodes { | |
672 | DDPT_INV_STAG = 0x00, | |
673 | DDPT_BASE_BOUNDS = 0x01, | |
674 | DDPT_STAG_NOT_ASSOC = 0x02, | |
675 | DDPT_TO_WRAP = 0x03, | |
676 | DDPT_INV_VERS = 0x04, | |
677 | DDPU_INV_QN = 0x01, | |
678 | DDPU_INV_MSN_NOBUF = 0x02, | |
679 | DDPU_INV_MSN_RANGE = 0x03, | |
680 | DDPU_INV_MO = 0x04, | |
681 | DDPU_MSG_TOOBIG = 0x05, | |
682 | DDPU_INV_VERS = 0x06 | |
683 | }; | |
684 | ||
685 | enum c4iw_mpa_ecodes { | |
686 | MPA_CRC_ERR = 0x02, | |
d2fe99e8 KS |
687 | MPA_MARKER_ERR = 0x03, |
688 | MPA_LOCAL_CATA = 0x05, | |
689 | MPA_INSUFF_IRD = 0x06, | |
690 | MPA_NOMATCH_RTR = 0x07, | |
cfdda9d7 SW |
691 | }; |
692 | ||
693 | enum c4iw_ep_state { | |
694 | IDLE = 0, | |
695 | LISTEN, | |
696 | CONNECTING, | |
697 | MPA_REQ_WAIT, | |
698 | MPA_REQ_SENT, | |
699 | MPA_REQ_RCVD, | |
700 | MPA_REP_SENT, | |
701 | FPDU_MODE, | |
702 | ABORTING, | |
703 | CLOSING, | |
704 | MORIBUND, | |
705 | DEAD, | |
706 | }; | |
707 | ||
708 | enum c4iw_ep_flags { | |
709 | PEER_ABORT_IN_PROGRESS = 0, | |
710 | ABORT_REQ_IN_PROGRESS = 1, | |
711 | RELEASE_RESOURCES = 2, | |
712 | CLOSE_SENT = 3, | |
713 | }; | |
714 | ||
715 | struct c4iw_ep_common { | |
716 | struct iw_cm_id *cm_id; | |
717 | struct c4iw_qp *qp; | |
718 | struct c4iw_dev *dev; | |
719 | enum c4iw_ep_state state; | |
720 | struct kref kref; | |
2f5b48c3 | 721 | struct mutex mutex; |
cfdda9d7 SW |
722 | struct sockaddr_in local_addr; |
723 | struct sockaddr_in remote_addr; | |
aadc4df3 | 724 | struct c4iw_wr_wait wr_wait; |
cfdda9d7 SW |
725 | unsigned long flags; |
726 | }; | |
727 | ||
728 | struct c4iw_listen_ep { | |
729 | struct c4iw_ep_common com; | |
730 | unsigned int stid; | |
731 | int backlog; | |
732 | }; | |
733 | ||
734 | struct c4iw_ep { | |
735 | struct c4iw_ep_common com; | |
736 | struct c4iw_ep *parent_ep; | |
737 | struct timer_list timer; | |
be4c9bad | 738 | struct list_head entry; |
cfdda9d7 SW |
739 | unsigned int atid; |
740 | u32 hwtid; | |
741 | u32 snd_seq; | |
742 | u32 rcv_seq; | |
743 | struct l2t_entry *l2t; | |
744 | struct dst_entry *dst; | |
745 | struct sk_buff *mpa_skb; | |
746 | struct c4iw_mpa_attributes mpa_attr; | |
747 | u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA]; | |
748 | unsigned int mpa_pkt_len; | |
749 | u32 ird; | |
750 | u32 ord; | |
751 | u32 smac_idx; | |
752 | u32 tx_chan; | |
753 | u32 mtu; | |
754 | u16 mss; | |
755 | u16 emss; | |
756 | u16 plen; | |
757 | u16 rss_qid; | |
758 | u16 txq_idx; | |
d4f1a5c6 | 759 | u16 ctrlq_idx; |
cfdda9d7 | 760 | u8 tos; |
d2fe99e8 KS |
761 | u8 retry_with_mpa_v1; |
762 | u8 tried_with_mpa_v1; | |
cfdda9d7 SW |
763 | }; |
764 | ||
765 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) | |
766 | { | |
767 | return cm_id->provider_data; | |
768 | } | |
769 | ||
770 | static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) | |
771 | { | |
772 | return cm_id->provider_data; | |
773 | } | |
774 | ||
775 | static inline int compute_wscale(int win) | |
776 | { | |
777 | int wscale = 0; | |
778 | ||
779 | while (wscale < 14 && (65535<<wscale) < win) | |
780 | wscale++; | |
781 | return wscale; | |
782 | } | |
783 | ||
ec3eead2 VP |
784 | u32 c4iw_id_alloc(struct c4iw_id_table *alloc); |
785 | void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); | |
786 | int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, | |
787 | u32 reserved, u32 flags); | |
788 | void c4iw_id_table_free(struct c4iw_id_table *alloc); | |
789 | ||
cfdda9d7 SW |
790 | typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb); |
791 | ||
792 | int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |
793 | struct l2t_entry *l2t); | |
794 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid, | |
795 | struct c4iw_dev_ucontext *uctx); | |
ec3eead2 VP |
796 | u32 c4iw_get_resource(struct c4iw_id_table *id_table); |
797 | void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); | |
cfdda9d7 SW |
798 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); |
799 | int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); | |
800 | int c4iw_pblpool_create(struct c4iw_rdev *rdev); | |
801 | int c4iw_rqtpool_create(struct c4iw_rdev *rdev); | |
c6d7b267 | 802 | int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev); |
cfdda9d7 SW |
803 | void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); |
804 | void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); | |
c6d7b267 | 805 | void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev); |
cfdda9d7 SW |
806 | void c4iw_destroy_resource(struct c4iw_resource *rscp); |
807 | int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); | |
808 | int c4iw_register_device(struct c4iw_dev *dev); | |
809 | void c4iw_unregister_device(struct c4iw_dev *dev); | |
810 | int __init c4iw_cm_init(void); | |
811 | void __exit c4iw_cm_term(void); | |
812 | void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, | |
813 | struct c4iw_dev_ucontext *uctx); | |
814 | void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, | |
815 | struct c4iw_dev_ucontext *uctx); | |
816 | int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | |
817 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
818 | struct ib_send_wr **bad_wr); | |
819 | int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
820 | struct ib_recv_wr **bad_wr); | |
821 | int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | |
822 | struct ib_mw_bind *mw_bind); | |
823 | int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); | |
824 | int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); | |
825 | int c4iw_destroy_listen(struct iw_cm_id *cm_id); | |
826 | int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); | |
827 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); | |
828 | void c4iw_qp_add_ref(struct ib_qp *qp); | |
829 | void c4iw_qp_rem_ref(struct ib_qp *qp); | |
830 | void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list); | |
831 | struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl( | |
832 | struct ib_device *device, | |
833 | int page_list_len); | |
834 | struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); | |
835 | int c4iw_dealloc_mw(struct ib_mw *mw); | |
836 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd); | |
837 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, | |
838 | u64 length, u64 virt, int acc, | |
839 | struct ib_udata *udata); | |
840 | struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); | |
841 | struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, | |
842 | struct ib_phys_buf *buffer_list, | |
843 | int num_phys_buf, | |
844 | int acc, | |
845 | u64 *iova_start); | |
846 | int c4iw_reregister_phys_mem(struct ib_mr *mr, | |
847 | int mr_rereg_mask, | |
848 | struct ib_pd *pd, | |
849 | struct ib_phys_buf *buffer_list, | |
850 | int num_phys_buf, | |
851 | int acc, u64 *iova_start); | |
852 | int c4iw_dereg_mr(struct ib_mr *ib_mr); | |
853 | int c4iw_destroy_cq(struct ib_cq *ib_cq); | |
854 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |
855 | int vector, | |
856 | struct ib_ucontext *ib_context, | |
857 | struct ib_udata *udata); | |
858 | int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); | |
859 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | |
860 | int c4iw_destroy_qp(struct ib_qp *ib_qp); | |
861 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, | |
862 | struct ib_qp_init_attr *attrs, | |
863 | struct ib_udata *udata); | |
864 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
865 | int attr_mask, struct ib_udata *udata); | |
67bbc055 VP |
866 | int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
867 | int attr_mask, struct ib_qp_init_attr *init_attr); | |
cfdda9d7 SW |
868 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn); |
869 | u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); | |
870 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | |
871 | u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); | |
872 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | |
c6d7b267 SW |
873 | u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); |
874 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); | |
cfdda9d7 SW |
875 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); |
876 | void c4iw_flush_hw_cq(struct t4_cq *cq); | |
877 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); | |
878 | void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count); | |
879 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); | |
880 | int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); | |
881 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); | |
882 | int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); | |
883 | u16 c4iw_rqes_posted(struct c4iw_qp *qhp); | |
cfdda9d7 SW |
884 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); |
885 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); | |
886 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, | |
887 | struct c4iw_dev_ucontext *uctx); | |
888 | u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); | |
889 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, | |
890 | struct c4iw_dev_ucontext *uctx); | |
891 | void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); | |
892 | ||
893 | extern struct cxgb4_client t4c_client; | |
894 | extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; | |
be4c9bad | 895 | extern int c4iw_max_read_depth; |
422eea0a VP |
896 | extern int db_fc_threshold; |
897 | ||
cfdda9d7 SW |
898 | |
899 | #endif |