Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
e4dd23d7 PG |
32 | |
33 | #include <linux/module.h> | |
34 | ||
cfdda9d7 SW |
35 | #include "iw_cxgb4.h" |
36 | ||
2c974781 VP |
37 | static int db_delay_usecs = 1; |
38 | module_param(db_delay_usecs, int, 0644); | |
39 | MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain"); | |
40 | ||
a9c77198 | 41 | static int ocqp_support = 1; |
c6d7b267 | 42 | module_param(ocqp_support, int, 0644); |
a9c77198 | 43 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); |
c6d7b267 | 44 | |
3cbdb928 | 45 | int db_fc_threshold = 1000; |
422eea0a | 46 | module_param(db_fc_threshold, int, 0644); |
3cbdb928 VP |
47 | MODULE_PARM_DESC(db_fc_threshold, |
48 | "QP count/threshold that triggers" | |
49 | " automatic db flow control mode (default = 1000)"); | |
50 | ||
51 | int db_coalescing_threshold; | |
52 | module_param(db_coalescing_threshold, int, 0644); | |
53 | MODULE_PARM_DESC(db_coalescing_threshold, | |
54 | "QP count/threshold that triggers" | |
55 | " disabling db coalescing (default = 0)"); | |
422eea0a | 56 | |
42b6a949 VP |
57 | static int max_fr_immd = T4_MAX_FR_IMMD; |
58 | module_param(max_fr_immd, int, 0644); | |
59 | MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); | |
60 | ||
4c2c5763 HS |
61 | static int alloc_ird(struct c4iw_dev *dev, u32 ird) |
62 | { | |
63 | int ret = 0; | |
64 | ||
65 | spin_lock_irq(&dev->lock); | |
66 | if (ird <= dev->avail_ird) | |
67 | dev->avail_ird -= ird; | |
68 | else | |
69 | ret = -ENOMEM; | |
70 | spin_unlock_irq(&dev->lock); | |
71 | ||
72 | if (ret) | |
73 | dev_warn(&dev->rdev.lldi.pdev->dev, | |
74 | "device IRD resources exhausted\n"); | |
75 | ||
76 | return ret; | |
77 | } | |
78 | ||
79 | static void free_ird(struct c4iw_dev *dev, int ird) | |
80 | { | |
81 | spin_lock_irq(&dev->lock); | |
82 | dev->avail_ird += ird; | |
83 | spin_unlock_irq(&dev->lock); | |
84 | } | |
85 | ||
2f5b48c3 SW |
86 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) |
87 | { | |
88 | unsigned long flag; | |
89 | spin_lock_irqsave(&qhp->lock, flag); | |
90 | qhp->attr.state = state; | |
91 | spin_unlock_irqrestore(&qhp->lock, flag); | |
92 | } | |
93 | ||
c6d7b267 SW |
94 | static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) |
95 | { | |
96 | c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); | |
97 | } | |
98 | ||
99 | static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
100 | { | |
101 | dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, | |
102 | pci_unmap_addr(sq, mapping)); | |
103 | } | |
104 | ||
105 | static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
106 | { | |
107 | if (t4_sq_onchip(sq)) | |
108 | dealloc_oc_sq(rdev, sq); | |
109 | else | |
110 | dealloc_host_sq(rdev, sq); | |
111 | } | |
112 | ||
113 | static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
114 | { | |
f079af7a | 115 | if (!ocqp_support || !ocqp_supported(&rdev->lldi)) |
c6d7b267 SW |
116 | return -ENOSYS; |
117 | sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); | |
118 | if (!sq->dma_addr) | |
119 | return -ENOMEM; | |
120 | sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - | |
121 | rdev->lldi.vr->ocq.start; | |
122 | sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - | |
123 | rdev->lldi.vr->ocq.start); | |
124 | sq->flags |= T4_SQ_ONCHIP; | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
129 | { | |
130 | sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, | |
131 | &(sq->dma_addr), GFP_KERNEL); | |
132 | if (!sq->queue) | |
133 | return -ENOMEM; | |
134 | sq->phys_addr = virt_to_phys(sq->queue); | |
135 | pci_unmap_addr_set(sq, mapping, sq->dma_addr); | |
136 | return 0; | |
137 | } | |
138 | ||
5b0c2759 TLSC |
139 | static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) |
140 | { | |
141 | int ret = -ENOSYS; | |
142 | if (user) | |
143 | ret = alloc_oc_sq(rdev, sq); | |
144 | if (ret) | |
145 | ret = alloc_host_sq(rdev, sq); | |
146 | return ret; | |
147 | } | |
148 | ||
cfdda9d7 SW |
149 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
150 | struct c4iw_dev_ucontext *uctx) | |
151 | { | |
152 | /* | |
153 | * uP clears EQ contexts when the connection exits rdma mode, | |
154 | * so no need to post a RESET WR for these EQs. | |
155 | */ | |
156 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
157 | wq->rq.memsize, wq->rq.queue, | |
f38926aa | 158 | dma_unmap_addr(&wq->rq, mapping)); |
c6d7b267 | 159 | dealloc_sq(rdev, &wq->sq); |
cfdda9d7 SW |
160 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
161 | kfree(wq->rq.sw_rq); | |
162 | kfree(wq->sq.sw_sq); | |
163 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); | |
164 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); | |
165 | return 0; | |
166 | } | |
167 | ||
74217d4c H |
168 | /* |
169 | * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL, | |
170 | * then this is a user mapping so compute the page-aligned physical address | |
171 | * for mapping. | |
172 | */ | |
173 | void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, | |
174 | enum cxgb4_bar2_qtype qtype, | |
175 | unsigned int *pbar2_qid, u64 *pbar2_pa) | |
176 | { | |
177 | u64 bar2_qoffset; | |
178 | int ret; | |
179 | ||
180 | ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype, | |
181 | pbar2_pa ? 1 : 0, | |
182 | &bar2_qoffset, pbar2_qid); | |
183 | if (ret) | |
184 | return NULL; | |
185 | ||
186 | if (pbar2_pa) | |
187 | *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; | |
188 | return rdev->bar2_kva + bar2_qoffset; | |
189 | } | |
190 | ||
cfdda9d7 SW |
191 | static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
192 | struct t4_cq *rcq, struct t4_cq *scq, | |
193 | struct c4iw_dev_ucontext *uctx) | |
194 | { | |
195 | int user = (uctx != &rdev->uctx); | |
196 | struct fw_ri_res_wr *res_wr; | |
197 | struct fw_ri_res *res; | |
198 | int wr_len; | |
199 | struct c4iw_wr_wait wr_wait; | |
200 | struct sk_buff *skb; | |
9919d5bd | 201 | int ret = 0; |
cfdda9d7 SW |
202 | int eqsize; |
203 | ||
204 | wq->sq.qid = c4iw_get_qpid(rdev, uctx); | |
205 | if (!wq->sq.qid) | |
206 | return -ENOMEM; | |
207 | ||
208 | wq->rq.qid = c4iw_get_qpid(rdev, uctx); | |
c079c287 EG |
209 | if (!wq->rq.qid) { |
210 | ret = -ENOMEM; | |
211 | goto free_sq_qid; | |
212 | } | |
cfdda9d7 SW |
213 | |
214 | if (!user) { | |
215 | wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, | |
216 | GFP_KERNEL); | |
c079c287 EG |
217 | if (!wq->sq.sw_sq) { |
218 | ret = -ENOMEM; | |
219 | goto free_rq_qid; | |
220 | } | |
cfdda9d7 SW |
221 | |
222 | wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, | |
223 | GFP_KERNEL); | |
c079c287 EG |
224 | if (!wq->rq.sw_rq) { |
225 | ret = -ENOMEM; | |
226 | goto free_sw_sq; | |
227 | } | |
cfdda9d7 SW |
228 | } |
229 | ||
230 | /* | |
66eb19af | 231 | * RQT must be a power of 2 and at least 16 deep. |
cfdda9d7 | 232 | */ |
66eb19af | 233 | wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); |
cfdda9d7 | 234 | wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); |
c079c287 EG |
235 | if (!wq->rq.rqt_hwaddr) { |
236 | ret = -ENOMEM; | |
237 | goto free_sw_rq; | |
238 | } | |
cfdda9d7 | 239 | |
5b0c2759 TLSC |
240 | ret = alloc_sq(rdev, &wq->sq, user); |
241 | if (ret) | |
242 | goto free_hwaddr; | |
cfdda9d7 | 243 | memset(wq->sq.queue, 0, wq->sq.memsize); |
f38926aa | 244 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
cfdda9d7 SW |
245 | |
246 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | |
247 | wq->rq.memsize, &(wq->rq.dma_addr), | |
248 | GFP_KERNEL); | |
55e57a78 WY |
249 | if (!wq->rq.queue) { |
250 | ret = -ENOMEM; | |
c079c287 | 251 | goto free_sq; |
55e57a78 | 252 | } |
cfdda9d7 SW |
253 | PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", |
254 | __func__, wq->sq.queue, | |
255 | (unsigned long long)virt_to_phys(wq->sq.queue), | |
256 | wq->rq.queue, | |
257 | (unsigned long long)virt_to_phys(wq->rq.queue)); | |
258 | memset(wq->rq.queue, 0, wq->rq.memsize); | |
f38926aa | 259 | dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); |
cfdda9d7 SW |
260 | |
261 | wq->db = rdev->lldi.db_reg; | |
fa658a98 | 262 | |
74217d4c H |
263 | wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, |
264 | &wq->sq.bar2_qid, | |
265 | user ? &wq->sq.bar2_pa : NULL); | |
266 | wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, | |
267 | &wq->rq.bar2_qid, | |
268 | user ? &wq->rq.bar2_pa : NULL); | |
269 | ||
270 | /* | |
271 | * User mode must have bar2 access. | |
272 | */ | |
273 | if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { | |
274 | pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", | |
275 | pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); | |
276 | goto free_dma; | |
cfdda9d7 | 277 | } |
74217d4c | 278 | |
cfdda9d7 SW |
279 | wq->rdev = rdev; |
280 | wq->rq.msn = 1; | |
281 | ||
282 | /* build fw_ri_res_wr */ | |
283 | wr_len = sizeof *res_wr + 2 * sizeof *res; | |
284 | ||
d3c814e8 | 285 | skb = alloc_skb(wr_len, GFP_KERNEL); |
cfdda9d7 SW |
286 | if (!skb) { |
287 | ret = -ENOMEM; | |
c079c287 | 288 | goto free_dma; |
cfdda9d7 SW |
289 | } |
290 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
291 | ||
292 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | |
293 | memset(res_wr, 0, wr_len); | |
294 | res_wr->op_nres = cpu_to_be32( | |
e2ac9628 | 295 | FW_WR_OP_V(FW_RI_RES_WR) | |
cf7fe64a | 296 | FW_RI_RES_WR_NRES_V(2) | |
e2ac9628 | 297 | FW_WR_COMPL_F); |
cfdda9d7 | 298 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
6198dd8d | 299 | res_wr->cookie = (uintptr_t)&wr_wait; |
cfdda9d7 SW |
300 | res = res_wr->res; |
301 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | |
302 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
303 | ||
304 | /* | |
305 | * eqsize is the number of 64B entries plus the status page size. | |
306 | */ | |
04e10e21 HS |
307 | eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + |
308 | rdev->hw_queue.t4_eq_status_entries; | |
cfdda9d7 SW |
309 | |
310 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( | |
cf7fe64a HS |
311 | FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ |
312 | FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ | |
313 | FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ | |
314 | (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | | |
315 | FW_RI_RES_WR_IQID_V(scq->cqid)); | |
cfdda9d7 | 316 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
cf7fe64a HS |
317 | FW_RI_RES_WR_DCAEN_V(0) | |
318 | FW_RI_RES_WR_DCACPU_V(0) | | |
319 | FW_RI_RES_WR_FBMIN_V(2) | | |
320 | FW_RI_RES_WR_FBMAX_V(2) | | |
321 | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | | |
322 | FW_RI_RES_WR_CIDXFTHRESH_V(0) | | |
323 | FW_RI_RES_WR_EQSIZE_V(eqsize)); | |
cfdda9d7 SW |
324 | res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); |
325 | res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); | |
326 | res++; | |
327 | res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; | |
328 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
329 | ||
330 | /* | |
331 | * eqsize is the number of 64B entries plus the status page size. | |
332 | */ | |
04e10e21 HS |
333 | eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + |
334 | rdev->hw_queue.t4_eq_status_entries; | |
cfdda9d7 | 335 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( |
cf7fe64a HS |
336 | FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ |
337 | FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ | |
338 | FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ | |
339 | FW_RI_RES_WR_IQID_V(rcq->cqid)); | |
cfdda9d7 | 340 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
cf7fe64a HS |
341 | FW_RI_RES_WR_DCAEN_V(0) | |
342 | FW_RI_RES_WR_DCACPU_V(0) | | |
343 | FW_RI_RES_WR_FBMIN_V(2) | | |
344 | FW_RI_RES_WR_FBMAX_V(2) | | |
345 | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | | |
346 | FW_RI_RES_WR_CIDXFTHRESH_V(0) | | |
347 | FW_RI_RES_WR_EQSIZE_V(eqsize)); | |
cfdda9d7 SW |
348 | res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); |
349 | res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); | |
350 | ||
351 | c4iw_init_wr_wait(&wr_wait); | |
352 | ||
353 | ret = c4iw_ofld_send(rdev, skb); | |
354 | if (ret) | |
c079c287 | 355 | goto free_dma; |
aadc4df3 | 356 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); |
cfdda9d7 | 357 | if (ret) |
c079c287 | 358 | goto free_dma; |
cfdda9d7 | 359 | |
74217d4c | 360 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n", |
cfdda9d7 | 361 | __func__, wq->sq.qid, wq->rq.qid, wq->db, |
74217d4c | 362 | wq->sq.bar2_va, wq->rq.bar2_va); |
cfdda9d7 SW |
363 | |
364 | return 0; | |
c079c287 | 365 | free_dma: |
cfdda9d7 SW |
366 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
367 | wq->rq.memsize, wq->rq.queue, | |
f38926aa | 368 | dma_unmap_addr(&wq->rq, mapping)); |
c079c287 | 369 | free_sq: |
c6d7b267 | 370 | dealloc_sq(rdev, &wq->sq); |
c079c287 | 371 | free_hwaddr: |
cfdda9d7 | 372 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
c079c287 | 373 | free_sw_rq: |
cfdda9d7 | 374 | kfree(wq->rq.sw_rq); |
c079c287 | 375 | free_sw_sq: |
cfdda9d7 | 376 | kfree(wq->sq.sw_sq); |
c079c287 | 377 | free_rq_qid: |
cfdda9d7 | 378 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); |
c079c287 | 379 | free_sq_qid: |
cfdda9d7 | 380 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); |
c079c287 | 381 | return ret; |
cfdda9d7 SW |
382 | } |
383 | ||
d37ac31d SW |
384 | static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, |
385 | struct ib_send_wr *wr, int max, u32 *plenp) | |
cfdda9d7 | 386 | { |
d37ac31d SW |
387 | u8 *dstp, *srcp; |
388 | u32 plen = 0; | |
cfdda9d7 | 389 | int i; |
d37ac31d SW |
390 | int rem, len; |
391 | ||
392 | dstp = (u8 *)immdp->data; | |
393 | for (i = 0; i < wr->num_sge; i++) { | |
394 | if ((plen + wr->sg_list[i].length) > max) | |
395 | return -EMSGSIZE; | |
396 | srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; | |
397 | plen += wr->sg_list[i].length; | |
398 | rem = wr->sg_list[i].length; | |
399 | while (rem) { | |
400 | if (dstp == (u8 *)&sq->queue[sq->size]) | |
401 | dstp = (u8 *)sq->queue; | |
402 | if (rem <= (u8 *)&sq->queue[sq->size] - dstp) | |
403 | len = rem; | |
404 | else | |
405 | len = (u8 *)&sq->queue[sq->size] - dstp; | |
406 | memcpy(dstp, srcp, len); | |
407 | dstp += len; | |
408 | srcp += len; | |
409 | rem -= len; | |
410 | } | |
411 | } | |
13fecb83 SW |
412 | len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); |
413 | if (len) | |
414 | memset(dstp, 0, len); | |
d37ac31d SW |
415 | immdp->op = FW_RI_DATA_IMMD; |
416 | immdp->r1 = 0; | |
417 | immdp->r2 = 0; | |
418 | immdp->immdlen = cpu_to_be32(plen); | |
419 | *plenp = plen; | |
420 | return 0; | |
421 | } | |
422 | ||
423 | static int build_isgl(__be64 *queue_start, __be64 *queue_end, | |
424 | struct fw_ri_isgl *isglp, struct ib_sge *sg_list, | |
425 | int num_sge, u32 *plenp) | |
426 | ||
427 | { | |
428 | int i; | |
429 | u32 plen = 0; | |
430 | __be64 *flitp = (__be64 *)isglp->sge; | |
431 | ||
432 | for (i = 0; i < num_sge; i++) { | |
433 | if ((plen + sg_list[i].length) < plen) | |
434 | return -EMSGSIZE; | |
435 | plen += sg_list[i].length; | |
436 | *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | | |
437 | sg_list[i].length); | |
438 | if (++flitp == queue_end) | |
439 | flitp = queue_start; | |
440 | *flitp = cpu_to_be64(sg_list[i].addr); | |
441 | if (++flitp == queue_end) | |
442 | flitp = queue_start; | |
443 | } | |
13fecb83 | 444 | *flitp = (__force __be64)0; |
d37ac31d SW |
445 | isglp->op = FW_RI_DATA_ISGL; |
446 | isglp->r1 = 0; | |
447 | isglp->nsge = cpu_to_be16(num_sge); | |
448 | isglp->r2 = 0; | |
449 | if (plenp) | |
450 | *plenp = plen; | |
451 | return 0; | |
452 | } | |
453 | ||
454 | static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, | |
455 | struct ib_send_wr *wr, u8 *len16) | |
456 | { | |
cfdda9d7 SW |
457 | u32 plen; |
458 | int size; | |
d37ac31d | 459 | int ret; |
cfdda9d7 SW |
460 | |
461 | if (wr->num_sge > T4_MAX_SEND_SGE) | |
462 | return -EINVAL; | |
463 | switch (wr->opcode) { | |
464 | case IB_WR_SEND: | |
465 | if (wr->send_flags & IB_SEND_SOLICITED) | |
466 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 467 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE)); |
cfdda9d7 SW |
468 | else |
469 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 470 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND)); |
cfdda9d7 SW |
471 | wqe->send.stag_inv = 0; |
472 | break; | |
473 | case IB_WR_SEND_WITH_INV: | |
474 | if (wr->send_flags & IB_SEND_SOLICITED) | |
475 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 476 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV)); |
cfdda9d7 SW |
477 | else |
478 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 479 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV)); |
cfdda9d7 SW |
480 | wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); |
481 | break; | |
482 | ||
483 | default: | |
484 | return -EINVAL; | |
485 | } | |
c3f98fa2 SW |
486 | wqe->send.r3 = 0; |
487 | wqe->send.r4 = 0; | |
d37ac31d | 488 | |
cfdda9d7 SW |
489 | plen = 0; |
490 | if (wr->num_sge) { | |
491 | if (wr->send_flags & IB_SEND_INLINE) { | |
d37ac31d SW |
492 | ret = build_immd(sq, wqe->send.u.immd_src, wr, |
493 | T4_MAX_SEND_INLINE, &plen); | |
494 | if (ret) | |
495 | return ret; | |
cfdda9d7 SW |
496 | size = sizeof wqe->send + sizeof(struct fw_ri_immd) + |
497 | plen; | |
498 | } else { | |
d37ac31d SW |
499 | ret = build_isgl((__be64 *)sq->queue, |
500 | (__be64 *)&sq->queue[sq->size], | |
501 | wqe->send.u.isgl_src, | |
502 | wr->sg_list, wr->num_sge, &plen); | |
503 | if (ret) | |
504 | return ret; | |
cfdda9d7 SW |
505 | size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + |
506 | wr->num_sge * sizeof(struct fw_ri_sge); | |
507 | } | |
508 | } else { | |
509 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
510 | wqe->send.u.immd_src[0].r1 = 0; | |
511 | wqe->send.u.immd_src[0].r2 = 0; | |
512 | wqe->send.u.immd_src[0].immdlen = 0; | |
513 | size = sizeof wqe->send + sizeof(struct fw_ri_immd); | |
d37ac31d | 514 | plen = 0; |
cfdda9d7 SW |
515 | } |
516 | *len16 = DIV_ROUND_UP(size, 16); | |
517 | wqe->send.plen = cpu_to_be32(plen); | |
518 | return 0; | |
519 | } | |
520 | ||
d37ac31d SW |
521 | static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, |
522 | struct ib_send_wr *wr, u8 *len16) | |
cfdda9d7 | 523 | { |
cfdda9d7 SW |
524 | u32 plen; |
525 | int size; | |
d37ac31d | 526 | int ret; |
cfdda9d7 | 527 | |
d37ac31d | 528 | if (wr->num_sge > T4_MAX_SEND_SGE) |
cfdda9d7 SW |
529 | return -EINVAL; |
530 | wqe->write.r2 = 0; | |
e622f2f4 CH |
531 | wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); |
532 | wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); | |
cfdda9d7 SW |
533 | if (wr->num_sge) { |
534 | if (wr->send_flags & IB_SEND_INLINE) { | |
d37ac31d SW |
535 | ret = build_immd(sq, wqe->write.u.immd_src, wr, |
536 | T4_MAX_WRITE_INLINE, &plen); | |
537 | if (ret) | |
538 | return ret; | |
cfdda9d7 SW |
539 | size = sizeof wqe->write + sizeof(struct fw_ri_immd) + |
540 | plen; | |
541 | } else { | |
d37ac31d SW |
542 | ret = build_isgl((__be64 *)sq->queue, |
543 | (__be64 *)&sq->queue[sq->size], | |
544 | wqe->write.u.isgl_src, | |
545 | wr->sg_list, wr->num_sge, &plen); | |
546 | if (ret) | |
547 | return ret; | |
cfdda9d7 SW |
548 | size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + |
549 | wr->num_sge * sizeof(struct fw_ri_sge); | |
550 | } | |
551 | } else { | |
552 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
553 | wqe->write.u.immd_src[0].r1 = 0; | |
554 | wqe->write.u.immd_src[0].r2 = 0; | |
555 | wqe->write.u.immd_src[0].immdlen = 0; | |
556 | size = sizeof wqe->write + sizeof(struct fw_ri_immd); | |
d37ac31d | 557 | plen = 0; |
cfdda9d7 SW |
558 | } |
559 | *len16 = DIV_ROUND_UP(size, 16); | |
560 | wqe->write.plen = cpu_to_be32(plen); | |
561 | return 0; | |
562 | } | |
563 | ||
564 | static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
565 | { | |
566 | if (wr->num_sge > 1) | |
567 | return -EINVAL; | |
568 | if (wr->num_sge) { | |
e622f2f4 CH |
569 | wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); |
570 | wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr | |
cfdda9d7 | 571 | >> 32)); |
e622f2f4 | 572 | wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); |
cfdda9d7 SW |
573 | wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); |
574 | wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); | |
575 | wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr | |
576 | >> 32)); | |
577 | wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); | |
578 | } else { | |
579 | wqe->read.stag_src = cpu_to_be32(2); | |
580 | wqe->read.to_src_hi = 0; | |
581 | wqe->read.to_src_lo = 0; | |
582 | wqe->read.stag_sink = cpu_to_be32(2); | |
583 | wqe->read.plen = 0; | |
584 | wqe->read.to_sink_hi = 0; | |
585 | wqe->read.to_sink_lo = 0; | |
586 | } | |
587 | wqe->read.r2 = 0; | |
588 | wqe->read.r5 = 0; | |
589 | *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | |
590 | return 0; | |
591 | } | |
592 | ||
593 | static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |
594 | struct ib_recv_wr *wr, u8 *len16) | |
595 | { | |
d37ac31d | 596 | int ret; |
cfdda9d7 | 597 | |
d37ac31d SW |
598 | ret = build_isgl((__be64 *)qhp->wq.rq.queue, |
599 | (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], | |
600 | &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); | |
601 | if (ret) | |
602 | return ret; | |
cfdda9d7 SW |
603 | *len16 = DIV_ROUND_UP(sizeof wqe->recv + |
604 | wr->num_sge * sizeof(struct fw_ri_sge), 16); | |
605 | return 0; | |
606 | } | |
607 | ||
40dbf6ee | 608 | static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, |
e622f2f4 | 609 | struct ib_send_wr *send_wr, u8 *len16, u8 t5dev) |
cfdda9d7 | 610 | { |
e622f2f4 | 611 | struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr); |
cfdda9d7 SW |
612 | struct fw_ri_immd *imdp; |
613 | __be64 *p; | |
614 | int i; | |
e622f2f4 | 615 | int pbllen = roundup(wr->page_list_len * sizeof(u64), 32); |
40dbf6ee | 616 | int rem; |
cfdda9d7 | 617 | |
e622f2f4 | 618 | if (wr->page_list_len > t4_max_fr_depth(use_dsgl)) |
cfdda9d7 SW |
619 | return -EINVAL; |
620 | ||
621 | wqe->fr.qpbinde_to_dcacpu = 0; | |
e622f2f4 | 622 | wqe->fr.pgsz_shift = wr->page_shift - 12; |
cfdda9d7 | 623 | wqe->fr.addr_type = FW_RI_VA_BASED_TO; |
e622f2f4 | 624 | wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access_flags); |
cfdda9d7 | 625 | wqe->fr.len_hi = 0; |
e622f2f4 CH |
626 | wqe->fr.len_lo = cpu_to_be32(wr->length); |
627 | wqe->fr.stag = cpu_to_be32(wr->rkey); | |
628 | wqe->fr.va_hi = cpu_to_be32(wr->iova_start >> 32); | |
629 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff); | |
42b6a949 VP |
630 | |
631 | if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { | |
632 | struct c4iw_fr_page_list *c4pl = | |
e622f2f4 | 633 | to_c4iw_fr_page_list(wr->page_list); |
42b6a949 VP |
634 | struct fw_ri_dsgl *sglp; |
635 | ||
e622f2f4 CH |
636 | for (i = 0; i < wr->page_list_len; i++) { |
637 | wr->page_list->page_list[i] = (__force u64) | |
638 | cpu_to_be64((u64)wr->page_list->page_list[i]); | |
42b6a949 VP |
639 | } |
640 | ||
641 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | |
642 | sglp->op = FW_RI_DATA_DSGL; | |
643 | sglp->r1 = 0; | |
644 | sglp->nsge = cpu_to_be16(1); | |
645 | sglp->addr0 = cpu_to_be64(c4pl->dma_addr); | |
646 | sglp->len0 = cpu_to_be32(pbllen); | |
647 | ||
648 | *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); | |
649 | } else { | |
650 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); | |
651 | imdp->op = FW_RI_DATA_IMMD; | |
652 | imdp->r1 = 0; | |
653 | imdp->r2 = 0; | |
654 | imdp->immdlen = cpu_to_be32(pbllen); | |
655 | p = (__be64 *)(imdp + 1); | |
656 | rem = pbllen; | |
e622f2f4 CH |
657 | for (i = 0; i < wr->page_list_len; i++) { |
658 | *p = cpu_to_be64((u64)wr->page_list->page_list[i]); | |
42b6a949 VP |
659 | rem -= sizeof(*p); |
660 | if (++p == (__be64 *)&sq->queue[sq->size]) | |
661 | p = (__be64 *)sq->queue; | |
662 | } | |
663 | BUG_ON(rem < 0); | |
664 | while (rem) { | |
665 | *p = 0; | |
666 | rem -= sizeof(*p); | |
667 | if (++p == (__be64 *)&sq->queue[sq->size]) | |
668 | p = (__be64 *)sq->queue; | |
669 | } | |
670 | *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) | |
671 | + pbllen, 16); | |
cfdda9d7 SW |
672 | } |
673 | return 0; | |
674 | } | |
675 | ||
676 | static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, | |
677 | u8 *len16) | |
678 | { | |
679 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | |
680 | wqe->inv.r2 = 0; | |
681 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); | |
682 | return 0; | |
683 | } | |
684 | ||
685 | void c4iw_qp_add_ref(struct ib_qp *qp) | |
686 | { | |
687 | PDBG("%s ib_qp %p\n", __func__, qp); | |
688 | atomic_inc(&(to_c4iw_qp(qp)->refcnt)); | |
689 | } | |
690 | ||
691 | void c4iw_qp_rem_ref(struct ib_qp *qp) | |
692 | { | |
693 | PDBG("%s ib_qp %p\n", __func__, qp); | |
694 | if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) | |
695 | wake_up(&(to_c4iw_qp(qp)->wait)); | |
696 | } | |
697 | ||
05eb2389 SW |
698 | static void add_to_fc_list(struct list_head *head, struct list_head *entry) |
699 | { | |
700 | if (list_empty(entry)) | |
701 | list_add_tail(entry, head); | |
702 | } | |
703 | ||
704 | static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) | |
705 | { | |
706 | unsigned long flags; | |
707 | ||
708 | spin_lock_irqsave(&qhp->rhp->lock, flags); | |
709 | spin_lock(&qhp->lock); | |
fa658a98 | 710 | if (qhp->rhp->db_state == NORMAL) |
963cab50 | 711 | t4_ring_sq_db(&qhp->wq, inc, NULL); |
fa658a98 | 712 | else { |
05eb2389 SW |
713 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
714 | qhp->wq.sq.wq_pidx_inc += inc; | |
715 | } | |
716 | spin_unlock(&qhp->lock); | |
717 | spin_unlock_irqrestore(&qhp->rhp->lock, flags); | |
718 | return 0; | |
719 | } | |
720 | ||
721 | static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) | |
722 | { | |
723 | unsigned long flags; | |
724 | ||
725 | spin_lock_irqsave(&qhp->rhp->lock, flags); | |
726 | spin_lock(&qhp->lock); | |
fa658a98 | 727 | if (qhp->rhp->db_state == NORMAL) |
963cab50 | 728 | t4_ring_rq_db(&qhp->wq, inc, NULL); |
fa658a98 | 729 | else { |
05eb2389 SW |
730 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
731 | qhp->wq.rq.wq_pidx_inc += inc; | |
732 | } | |
733 | spin_unlock(&qhp->lock); | |
734 | spin_unlock_irqrestore(&qhp->rhp->lock, flags); | |
735 | return 0; | |
736 | } | |
737 | ||
cfdda9d7 SW |
738 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
739 | struct ib_send_wr **bad_wr) | |
740 | { | |
741 | int err = 0; | |
742 | u8 len16 = 0; | |
743 | enum fw_wr_opcodes fw_opcode = 0; | |
744 | enum fw_ri_wr_flags fw_flags; | |
745 | struct c4iw_qp *qhp; | |
fa658a98 | 746 | union t4_wr *wqe = NULL; |
cfdda9d7 SW |
747 | u32 num_wrs; |
748 | struct t4_swsqe *swsqe; | |
749 | unsigned long flag; | |
750 | u16 idx = 0; | |
751 | ||
752 | qhp = to_c4iw_qp(ibqp); | |
753 | spin_lock_irqsave(&qhp->lock, flag); | |
754 | if (t4_wq_in_error(&qhp->wq)) { | |
755 | spin_unlock_irqrestore(&qhp->lock, flag); | |
756 | return -EINVAL; | |
757 | } | |
758 | num_wrs = t4_sq_avail(&qhp->wq); | |
759 | if (num_wrs == 0) { | |
760 | spin_unlock_irqrestore(&qhp->lock, flag); | |
761 | return -ENOMEM; | |
762 | } | |
763 | while (wr) { | |
764 | if (num_wrs == 0) { | |
765 | err = -ENOMEM; | |
766 | *bad_wr = wr; | |
767 | break; | |
768 | } | |
d37ac31d SW |
769 | wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + |
770 | qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); | |
771 | ||
cfdda9d7 SW |
772 | fw_flags = 0; |
773 | if (wr->send_flags & IB_SEND_SOLICITED) | |
774 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; | |
ba32de9d | 775 | if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) |
cfdda9d7 SW |
776 | fw_flags |= FW_RI_COMPLETION_FLAG; |
777 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; | |
778 | switch (wr->opcode) { | |
779 | case IB_WR_SEND_WITH_INV: | |
780 | case IB_WR_SEND: | |
781 | if (wr->send_flags & IB_SEND_FENCE) | |
782 | fw_flags |= FW_RI_READ_FENCE_FLAG; | |
783 | fw_opcode = FW_RI_SEND_WR; | |
784 | if (wr->opcode == IB_WR_SEND) | |
785 | swsqe->opcode = FW_RI_SEND; | |
786 | else | |
787 | swsqe->opcode = FW_RI_SEND_WITH_INV; | |
d37ac31d | 788 | err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); |
cfdda9d7 SW |
789 | break; |
790 | case IB_WR_RDMA_WRITE: | |
791 | fw_opcode = FW_RI_RDMA_WRITE_WR; | |
792 | swsqe->opcode = FW_RI_RDMA_WRITE; | |
d37ac31d | 793 | err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); |
cfdda9d7 SW |
794 | break; |
795 | case IB_WR_RDMA_READ: | |
2f1fb507 | 796 | case IB_WR_RDMA_READ_WITH_INV: |
cfdda9d7 SW |
797 | fw_opcode = FW_RI_RDMA_READ_WR; |
798 | swsqe->opcode = FW_RI_READ_REQ; | |
2f1fb507 | 799 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) |
410ade4c | 800 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; |
2f1fb507 SW |
801 | else |
802 | fw_flags = 0; | |
cfdda9d7 SW |
803 | err = build_rdma_read(wqe, wr, &len16); |
804 | if (err) | |
805 | break; | |
806 | swsqe->read_len = wr->sg_list[0].length; | |
807 | if (!qhp->wq.sq.oldest_read) | |
808 | qhp->wq.sq.oldest_read = swsqe; | |
809 | break; | |
810 | case IB_WR_FAST_REG_MR: | |
811 | fw_opcode = FW_RI_FR_NSMR_WR; | |
812 | swsqe->opcode = FW_RI_FAST_REGISTER; | |
42b6a949 | 813 | err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, |
963cab50 | 814 | !is_t4( |
42b6a949 VP |
815 | qhp->rhp->rdev.lldi.adapter_type) ? |
816 | 1 : 0); | |
cfdda9d7 SW |
817 | break; |
818 | case IB_WR_LOCAL_INV: | |
4ab1eb9c SW |
819 | if (wr->send_flags & IB_SEND_FENCE) |
820 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; | |
cfdda9d7 SW |
821 | fw_opcode = FW_RI_INV_LSTAG_WR; |
822 | swsqe->opcode = FW_RI_LOCAL_INV; | |
823 | err = build_inv_stag(wqe, wr, &len16); | |
824 | break; | |
825 | default: | |
826 | PDBG("%s post of type=%d TBD!\n", __func__, | |
827 | wr->opcode); | |
828 | err = -EINVAL; | |
829 | } | |
830 | if (err) { | |
831 | *bad_wr = wr; | |
832 | break; | |
833 | } | |
834 | swsqe->idx = qhp->wq.sq.pidx; | |
835 | swsqe->complete = 0; | |
ba32de9d SW |
836 | swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || |
837 | qhp->sq_sig_all; | |
1cf24dce | 838 | swsqe->flushed = 0; |
cfdda9d7 | 839 | swsqe->wr_id = wr->wr_id; |
7730b4c7 HS |
840 | if (c4iw_wr_log) { |
841 | swsqe->sge_ts = cxgb4_read_sge_timestamp( | |
842 | qhp->rhp->rdev.lldi.ports[0]); | |
843 | getnstimeofday(&swsqe->host_ts); | |
844 | } | |
cfdda9d7 SW |
845 | |
846 | init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); | |
847 | ||
848 | PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", | |
849 | __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, | |
850 | swsqe->opcode, swsqe->read_len); | |
851 | wr = wr->next; | |
852 | num_wrs--; | |
d37ac31d SW |
853 | t4_sq_produce(&qhp->wq, len16); |
854 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | |
cfdda9d7 | 855 | } |
05eb2389 | 856 | if (!qhp->rhp->rdev.status_page->db_off) { |
963cab50 | 857 | t4_ring_sq_db(&qhp->wq, idx, wqe); |
05eb2389 SW |
858 | spin_unlock_irqrestore(&qhp->lock, flag); |
859 | } else { | |
860 | spin_unlock_irqrestore(&qhp->lock, flag); | |
861 | ring_kernel_sq_db(qhp, idx); | |
862 | } | |
cfdda9d7 SW |
863 | return err; |
864 | } | |
865 | ||
866 | int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
867 | struct ib_recv_wr **bad_wr) | |
868 | { | |
869 | int err = 0; | |
870 | struct c4iw_qp *qhp; | |
fa658a98 | 871 | union t4_recv_wr *wqe = NULL; |
cfdda9d7 SW |
872 | u32 num_wrs; |
873 | u8 len16 = 0; | |
874 | unsigned long flag; | |
875 | u16 idx = 0; | |
876 | ||
877 | qhp = to_c4iw_qp(ibqp); | |
878 | spin_lock_irqsave(&qhp->lock, flag); | |
879 | if (t4_wq_in_error(&qhp->wq)) { | |
880 | spin_unlock_irqrestore(&qhp->lock, flag); | |
881 | return -EINVAL; | |
882 | } | |
883 | num_wrs = t4_rq_avail(&qhp->wq); | |
884 | if (num_wrs == 0) { | |
885 | spin_unlock_irqrestore(&qhp->lock, flag); | |
886 | return -ENOMEM; | |
887 | } | |
888 | while (wr) { | |
889 | if (wr->num_sge > T4_MAX_RECV_SGE) { | |
890 | err = -EINVAL; | |
891 | *bad_wr = wr; | |
892 | break; | |
893 | } | |
d37ac31d SW |
894 | wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + |
895 | qhp->wq.rq.wq_pidx * | |
896 | T4_EQ_ENTRY_SIZE); | |
cfdda9d7 SW |
897 | if (num_wrs) |
898 | err = build_rdma_recv(qhp, wqe, wr, &len16); | |
899 | else | |
900 | err = -ENOMEM; | |
901 | if (err) { | |
902 | *bad_wr = wr; | |
903 | break; | |
904 | } | |
905 | ||
906 | qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; | |
7730b4c7 HS |
907 | if (c4iw_wr_log) { |
908 | qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = | |
909 | cxgb4_read_sge_timestamp( | |
910 | qhp->rhp->rdev.lldi.ports[0]); | |
911 | getnstimeofday( | |
912 | &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); | |
913 | } | |
cfdda9d7 SW |
914 | |
915 | wqe->recv.opcode = FW_RI_RECV_WR; | |
916 | wqe->recv.r1 = 0; | |
917 | wqe->recv.wrid = qhp->wq.rq.pidx; | |
918 | wqe->recv.r2[0] = 0; | |
919 | wqe->recv.r2[1] = 0; | |
920 | wqe->recv.r2[2] = 0; | |
921 | wqe->recv.len16 = len16; | |
cfdda9d7 SW |
922 | PDBG("%s cookie 0x%llx pidx %u\n", __func__, |
923 | (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); | |
d37ac31d SW |
924 | t4_rq_produce(&qhp->wq, len16); |
925 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | |
cfdda9d7 SW |
926 | wr = wr->next; |
927 | num_wrs--; | |
cfdda9d7 | 928 | } |
05eb2389 | 929 | if (!qhp->rhp->rdev.status_page->db_off) { |
963cab50 | 930 | t4_ring_rq_db(&qhp->wq, idx, wqe); |
05eb2389 SW |
931 | spin_unlock_irqrestore(&qhp->lock, flag); |
932 | } else { | |
933 | spin_unlock_irqrestore(&qhp->lock, flag); | |
934 | ring_kernel_rq_db(qhp, idx); | |
935 | } | |
cfdda9d7 SW |
936 | return err; |
937 | } | |
938 | ||
939 | int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) | |
940 | { | |
941 | return -ENOSYS; | |
942 | } | |
943 | ||
944 | static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, | |
945 | u8 *ecode) | |
946 | { | |
947 | int status; | |
948 | int tagged; | |
949 | int opcode; | |
950 | int rqtype; | |
951 | int send_inv; | |
952 | ||
953 | if (!err_cqe) { | |
954 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
955 | *ecode = 0; | |
956 | return; | |
957 | } | |
958 | ||
959 | status = CQE_STATUS(err_cqe); | |
960 | opcode = CQE_OPCODE(err_cqe); | |
961 | rqtype = RQ_TYPE(err_cqe); | |
962 | send_inv = (opcode == FW_RI_SEND_WITH_INV) || | |
963 | (opcode == FW_RI_SEND_WITH_SE_INV); | |
964 | tagged = (opcode == FW_RI_RDMA_WRITE) || | |
965 | (rqtype && (opcode == FW_RI_READ_RESP)); | |
966 | ||
967 | switch (status) { | |
968 | case T4_ERR_STAG: | |
969 | if (send_inv) { | |
970 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
971 | *ecode = RDMAP_CANT_INV_STAG; | |
972 | } else { | |
973 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
974 | *ecode = RDMAP_INV_STAG; | |
975 | } | |
976 | break; | |
977 | case T4_ERR_PDID: | |
978 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
979 | if ((opcode == FW_RI_SEND_WITH_INV) || | |
980 | (opcode == FW_RI_SEND_WITH_SE_INV)) | |
981 | *ecode = RDMAP_CANT_INV_STAG; | |
982 | else | |
983 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
984 | break; | |
985 | case T4_ERR_QPID: | |
986 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
987 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
988 | break; | |
989 | case T4_ERR_ACCESS: | |
990 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
991 | *ecode = RDMAP_ACC_VIOL; | |
992 | break; | |
993 | case T4_ERR_WRAP: | |
994 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
995 | *ecode = RDMAP_TO_WRAP; | |
996 | break; | |
997 | case T4_ERR_BOUND: | |
998 | if (tagged) { | |
999 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
1000 | *ecode = DDPT_BASE_BOUNDS; | |
1001 | } else { | |
1002 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1003 | *ecode = RDMAP_BASE_BOUNDS; | |
1004 | } | |
1005 | break; | |
1006 | case T4_ERR_INVALIDATE_SHARED_MR: | |
1007 | case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: | |
1008 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1009 | *ecode = RDMAP_CANT_INV_STAG; | |
1010 | break; | |
1011 | case T4_ERR_ECC: | |
1012 | case T4_ERR_ECC_PSTAG: | |
1013 | case T4_ERR_INTERNAL_ERR: | |
1014 | *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; | |
1015 | *ecode = 0; | |
1016 | break; | |
1017 | case T4_ERR_OUT_OF_RQE: | |
1018 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1019 | *ecode = DDPU_INV_MSN_NOBUF; | |
1020 | break; | |
1021 | case T4_ERR_PBL_ADDR_BOUND: | |
1022 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
1023 | *ecode = DDPT_BASE_BOUNDS; | |
1024 | break; | |
1025 | case T4_ERR_CRC: | |
1026 | *layer_type = LAYER_MPA|DDP_LLP; | |
1027 | *ecode = MPA_CRC_ERR; | |
1028 | break; | |
1029 | case T4_ERR_MARKER: | |
1030 | *layer_type = LAYER_MPA|DDP_LLP; | |
1031 | *ecode = MPA_MARKER_ERR; | |
1032 | break; | |
1033 | case T4_ERR_PDU_LEN_ERR: | |
1034 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1035 | *ecode = DDPU_MSG_TOOBIG; | |
1036 | break; | |
1037 | case T4_ERR_DDP_VERSION: | |
1038 | if (tagged) { | |
1039 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
1040 | *ecode = DDPT_INV_VERS; | |
1041 | } else { | |
1042 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1043 | *ecode = DDPU_INV_VERS; | |
1044 | } | |
1045 | break; | |
1046 | case T4_ERR_RDMA_VERSION: | |
1047 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1048 | *ecode = RDMAP_INV_VERS; | |
1049 | break; | |
1050 | case T4_ERR_OPCODE: | |
1051 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1052 | *ecode = RDMAP_INV_OPCODE; | |
1053 | break; | |
1054 | case T4_ERR_DDP_QUEUE_NUM: | |
1055 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1056 | *ecode = DDPU_INV_QN; | |
1057 | break; | |
1058 | case T4_ERR_MSN: | |
1059 | case T4_ERR_MSN_GAP: | |
1060 | case T4_ERR_MSN_RANGE: | |
1061 | case T4_ERR_IRD_OVERFLOW: | |
1062 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1063 | *ecode = DDPU_INV_MSN_RANGE; | |
1064 | break; | |
1065 | case T4_ERR_TBIT: | |
1066 | *layer_type = LAYER_DDP|DDP_LOCAL_CATA; | |
1067 | *ecode = 0; | |
1068 | break; | |
1069 | case T4_ERR_MO: | |
1070 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1071 | *ecode = DDPU_INV_MO; | |
1072 | break; | |
1073 | default: | |
1074 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
1075 | *ecode = 0; | |
1076 | break; | |
1077 | } | |
1078 | } | |
1079 | ||
be4c9bad RD |
1080 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
1081 | gfp_t gfp) | |
cfdda9d7 SW |
1082 | { |
1083 | struct fw_ri_wr *wqe; | |
1084 | struct sk_buff *skb; | |
1085 | struct terminate_message *term; | |
1086 | ||
1087 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
1088 | qhp->ep->hwtid); | |
1089 | ||
be4c9bad | 1090 | skb = alloc_skb(sizeof *wqe, gfp); |
cfdda9d7 | 1091 | if (!skb) |
be4c9bad | 1092 | return; |
cfdda9d7 SW |
1093 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
1094 | ||
1095 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
1096 | memset(wqe, 0, sizeof *wqe); | |
e2ac9628 | 1097 | wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); |
cfdda9d7 | 1098 | wqe->flowid_len16 = cpu_to_be32( |
e2ac9628 HS |
1099 | FW_WR_FLOWID_V(qhp->ep->hwtid) | |
1100 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | |
cfdda9d7 SW |
1101 | |
1102 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; | |
1103 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | |
1104 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | |
d2fe99e8 KS |
1105 | if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { |
1106 | term->layer_etype = qhp->attr.layer_etype; | |
1107 | term->ecode = qhp->attr.ecode; | |
1108 | } else | |
1109 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | |
be4c9bad | 1110 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
cfdda9d7 SW |
1111 | } |
1112 | ||
1113 | /* | |
1114 | * Assumes qhp lock is held. | |
1115 | */ | |
1116 | static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |
2f5b48c3 | 1117 | struct c4iw_cq *schp) |
cfdda9d7 SW |
1118 | { |
1119 | int count; | |
678ea9b5 | 1120 | int rq_flushed, sq_flushed; |
2f5b48c3 | 1121 | unsigned long flag; |
cfdda9d7 SW |
1122 | |
1123 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | |
cfdda9d7 | 1124 | |
732bee7a | 1125 | /* locking hierarchy: cq lock first, then qp lock. */ |
2f5b48c3 | 1126 | spin_lock_irqsave(&rchp->lock, flag); |
cfdda9d7 | 1127 | spin_lock(&qhp->lock); |
1cf24dce SW |
1128 | |
1129 | if (qhp->wq.flushed) { | |
1130 | spin_unlock(&qhp->lock); | |
1131 | spin_unlock_irqrestore(&rchp->lock, flag); | |
1132 | return; | |
1133 | } | |
1134 | qhp->wq.flushed = 1; | |
1135 | ||
1136 | c4iw_flush_hw_cq(rchp); | |
cfdda9d7 | 1137 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); |
678ea9b5 | 1138 | rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
cfdda9d7 | 1139 | spin_unlock(&qhp->lock); |
2f5b48c3 | 1140 | spin_unlock_irqrestore(&rchp->lock, flag); |
cfdda9d7 | 1141 | |
732bee7a | 1142 | /* locking hierarchy: cq lock first, then qp lock. */ |
2f5b48c3 | 1143 | spin_lock_irqsave(&schp->lock, flag); |
cfdda9d7 | 1144 | spin_lock(&qhp->lock); |
1cf24dce SW |
1145 | if (schp != rchp) |
1146 | c4iw_flush_hw_cq(schp); | |
678ea9b5 | 1147 | sq_flushed = c4iw_flush_sq(qhp); |
cfdda9d7 | 1148 | spin_unlock(&qhp->lock); |
2f5b48c3 | 1149 | spin_unlock_irqrestore(&schp->lock, flag); |
678ea9b5 SW |
1150 | |
1151 | if (schp == rchp) { | |
1152 | if (t4_clear_cq_armed(&rchp->cq) && | |
1153 | (rq_flushed || sq_flushed)) { | |
1154 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | |
1155 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | |
1156 | rchp->ibcq.cq_context); | |
1157 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | |
1158 | } | |
1159 | } else { | |
1160 | if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { | |
1161 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | |
1162 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | |
1163 | rchp->ibcq.cq_context); | |
1164 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | |
1165 | } | |
1166 | if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { | |
1167 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | |
1168 | (*schp->ibcq.comp_handler)(&schp->ibcq, | |
1169 | schp->ibcq.cq_context); | |
1170 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | |
1171 | } | |
581bbe2c | 1172 | } |
cfdda9d7 SW |
1173 | } |
1174 | ||
2f5b48c3 | 1175 | static void flush_qp(struct c4iw_qp *qhp) |
cfdda9d7 SW |
1176 | { |
1177 | struct c4iw_cq *rchp, *schp; | |
581bbe2c | 1178 | unsigned long flag; |
cfdda9d7 | 1179 | |
1cf24dce SW |
1180 | rchp = to_c4iw_cq(qhp->ibqp.recv_cq); |
1181 | schp = to_c4iw_cq(qhp->ibqp.send_cq); | |
cfdda9d7 | 1182 | |
1cf24dce | 1183 | t4_set_wq_in_error(&qhp->wq); |
cfdda9d7 | 1184 | if (qhp->ibqp.uobject) { |
cfdda9d7 | 1185 | t4_set_cq_in_error(&rchp->cq); |
581bbe2c | 1186 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
01e7da6b | 1187 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
581bbe2c | 1188 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); |
01e7da6b | 1189 | if (schp != rchp) { |
cfdda9d7 | 1190 | t4_set_cq_in_error(&schp->cq); |
581bbe2c | 1191 | spin_lock_irqsave(&schp->comp_handler_lock, flag); |
01e7da6b KS |
1192 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
1193 | schp->ibcq.cq_context); | |
581bbe2c | 1194 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); |
01e7da6b | 1195 | } |
cfdda9d7 SW |
1196 | return; |
1197 | } | |
2f5b48c3 | 1198 | __flush_qp(qhp, rchp, schp); |
cfdda9d7 SW |
1199 | } |
1200 | ||
73d6fcad SW |
1201 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
1202 | struct c4iw_ep *ep) | |
cfdda9d7 SW |
1203 | { |
1204 | struct fw_ri_wr *wqe; | |
1205 | int ret; | |
cfdda9d7 SW |
1206 | struct sk_buff *skb; |
1207 | ||
1208 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
73d6fcad | 1209 | ep->hwtid); |
cfdda9d7 | 1210 | |
d3c814e8 | 1211 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
cfdda9d7 SW |
1212 | if (!skb) |
1213 | return -ENOMEM; | |
73d6fcad | 1214 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); |
cfdda9d7 SW |
1215 | |
1216 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
1217 | memset(wqe, 0, sizeof *wqe); | |
1218 | wqe->op_compl = cpu_to_be32( | |
e2ac9628 HS |
1219 | FW_WR_OP_V(FW_RI_INIT_WR) | |
1220 | FW_WR_COMPL_F); | |
cfdda9d7 | 1221 | wqe->flowid_len16 = cpu_to_be32( |
e2ac9628 HS |
1222 | FW_WR_FLOWID_V(ep->hwtid) | |
1223 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | |
6198dd8d | 1224 | wqe->cookie = (uintptr_t)&ep->com.wr_wait; |
cfdda9d7 SW |
1225 | |
1226 | wqe->u.fini.type = FW_RI_TYPE_FINI; | |
cfdda9d7 SW |
1227 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
1228 | if (ret) | |
1229 | goto out; | |
1230 | ||
2f5b48c3 | 1231 | ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, |
aadc4df3 | 1232 | qhp->wq.sq.qid, __func__); |
cfdda9d7 SW |
1233 | out: |
1234 | PDBG("%s ret %d\n", __func__, ret); | |
1235 | return ret; | |
1236 | } | |
1237 | ||
1238 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) | |
1239 | { | |
d2fe99e8 | 1240 | PDBG("%s p2p_type = %d\n", __func__, p2p_type); |
cfdda9d7 SW |
1241 | memset(&init->u, 0, sizeof init->u); |
1242 | switch (p2p_type) { | |
1243 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: | |
1244 | init->u.write.opcode = FW_RI_RDMA_WRITE_WR; | |
1245 | init->u.write.stag_sink = cpu_to_be32(1); | |
1246 | init->u.write.to_sink = cpu_to_be64(1); | |
1247 | init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
1248 | init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + | |
1249 | sizeof(struct fw_ri_immd), | |
1250 | 16); | |
1251 | break; | |
1252 | case FW_RI_INIT_P2PTYPE_READ_REQ: | |
1253 | init->u.write.opcode = FW_RI_RDMA_READ_WR; | |
1254 | init->u.read.stag_src = cpu_to_be32(1); | |
1255 | init->u.read.to_src_lo = cpu_to_be32(1); | |
1256 | init->u.read.stag_sink = cpu_to_be32(1); | |
1257 | init->u.read.to_sink_lo = cpu_to_be32(1); | |
1258 | init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); | |
1259 | break; | |
1260 | } | |
1261 | } | |
1262 | ||
1263 | static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |
1264 | { | |
1265 | struct fw_ri_wr *wqe; | |
1266 | int ret; | |
cfdda9d7 SW |
1267 | struct sk_buff *skb; |
1268 | ||
4c2c5763 HS |
1269 | PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, |
1270 | qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); | |
cfdda9d7 | 1271 | |
d3c814e8 | 1272 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
4c2c5763 HS |
1273 | if (!skb) { |
1274 | ret = -ENOMEM; | |
1275 | goto out; | |
1276 | } | |
1277 | ret = alloc_ird(rhp, qhp->attr.max_ird); | |
1278 | if (ret) { | |
1279 | qhp->attr.max_ird = 0; | |
1280 | kfree_skb(skb); | |
1281 | goto out; | |
1282 | } | |
cfdda9d7 SW |
1283 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
1284 | ||
1285 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
1286 | memset(wqe, 0, sizeof *wqe); | |
1287 | wqe->op_compl = cpu_to_be32( | |
e2ac9628 HS |
1288 | FW_WR_OP_V(FW_RI_INIT_WR) | |
1289 | FW_WR_COMPL_F); | |
cfdda9d7 | 1290 | wqe->flowid_len16 = cpu_to_be32( |
e2ac9628 HS |
1291 | FW_WR_FLOWID_V(qhp->ep->hwtid) | |
1292 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | |
cfdda9d7 | 1293 | |
6198dd8d | 1294 | wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; |
cfdda9d7 SW |
1295 | |
1296 | wqe->u.init.type = FW_RI_TYPE_INIT; | |
1297 | wqe->u.init.mpareqbit_p2ptype = | |
cf7fe64a HS |
1298 | FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | |
1299 | FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); | |
cfdda9d7 SW |
1300 | wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; |
1301 | if (qhp->attr.mpa_attr.recv_marker_enabled) | |
1302 | wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; | |
1303 | if (qhp->attr.mpa_attr.xmit_marker_enabled) | |
1304 | wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; | |
1305 | if (qhp->attr.mpa_attr.crc_enabled) | |
1306 | wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; | |
1307 | ||
1308 | wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | | |
1309 | FW_RI_QP_RDMA_WRITE_ENABLE | | |
1310 | FW_RI_QP_BIND_ENABLE; | |
1311 | if (!qhp->ibqp.uobject) | |
1312 | wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | | |
1313 | FW_RI_QP_STAG0_ENABLE; | |
1314 | wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); | |
1315 | wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); | |
1316 | wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); | |
1317 | wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); | |
1318 | wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); | |
1319 | wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); | |
1320 | wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); | |
1321 | wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); | |
1322 | wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); | |
1323 | wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); | |
1324 | wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); | |
1325 | wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); | |
1326 | wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - | |
1327 | rhp->rdev.lldi.vr->rq.start); | |
1328 | if (qhp->attr.mpa_attr.initiator) | |
1329 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | |
1330 | ||
cfdda9d7 SW |
1331 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
1332 | if (ret) | |
4c2c5763 | 1333 | goto err1; |
cfdda9d7 | 1334 | |
2f5b48c3 SW |
1335 | ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, |
1336 | qhp->ep->hwtid, qhp->wq.sq.qid, __func__); | |
4c2c5763 HS |
1337 | if (!ret) |
1338 | goto out; | |
1339 | err1: | |
1340 | free_ird(rhp, qhp->attr.max_ird); | |
cfdda9d7 SW |
1341 | out: |
1342 | PDBG("%s ret %d\n", __func__, ret); | |
1343 | return ret; | |
1344 | } | |
1345 | ||
1346 | int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |
1347 | enum c4iw_qp_attr_mask mask, | |
1348 | struct c4iw_qp_attributes *attrs, | |
1349 | int internal) | |
1350 | { | |
1351 | int ret = 0; | |
1352 | struct c4iw_qp_attributes newattr = qhp->attr; | |
cfdda9d7 SW |
1353 | int disconnect = 0; |
1354 | int terminate = 0; | |
1355 | int abort = 0; | |
1356 | int free = 0; | |
1357 | struct c4iw_ep *ep = NULL; | |
1358 | ||
1359 | PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__, | |
1360 | qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, | |
1361 | (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); | |
1362 | ||
2f5b48c3 | 1363 | mutex_lock(&qhp->mutex); |
cfdda9d7 SW |
1364 | |
1365 | /* Process attr changes if in IDLE */ | |
1366 | if (mask & C4IW_QP_ATTR_VALID_MODIFY) { | |
1367 | if (qhp->attr.state != C4IW_QP_STATE_IDLE) { | |
1368 | ret = -EIO; | |
1369 | goto out; | |
1370 | } | |
1371 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) | |
1372 | newattr.enable_rdma_read = attrs->enable_rdma_read; | |
1373 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) | |
1374 | newattr.enable_rdma_write = attrs->enable_rdma_write; | |
1375 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) | |
1376 | newattr.enable_bind = attrs->enable_bind; | |
1377 | if (mask & C4IW_QP_ATTR_MAX_ORD) { | |
be4c9bad | 1378 | if (attrs->max_ord > c4iw_max_read_depth) { |
cfdda9d7 SW |
1379 | ret = -EINVAL; |
1380 | goto out; | |
1381 | } | |
1382 | newattr.max_ord = attrs->max_ord; | |
1383 | } | |
1384 | if (mask & C4IW_QP_ATTR_MAX_IRD) { | |
4c2c5763 | 1385 | if (attrs->max_ird > cur_max_read_depth(rhp)) { |
cfdda9d7 SW |
1386 | ret = -EINVAL; |
1387 | goto out; | |
1388 | } | |
1389 | newattr.max_ird = attrs->max_ird; | |
1390 | } | |
1391 | qhp->attr = newattr; | |
1392 | } | |
1393 | ||
2c974781 | 1394 | if (mask & C4IW_QP_ATTR_SQ_DB) { |
05eb2389 | 1395 | ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); |
2c974781 VP |
1396 | goto out; |
1397 | } | |
1398 | if (mask & C4IW_QP_ATTR_RQ_DB) { | |
05eb2389 | 1399 | ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); |
2c974781 VP |
1400 | goto out; |
1401 | } | |
1402 | ||
cfdda9d7 SW |
1403 | if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) |
1404 | goto out; | |
1405 | if (qhp->attr.state == attrs->next_state) | |
1406 | goto out; | |
1407 | ||
1408 | switch (qhp->attr.state) { | |
1409 | case C4IW_QP_STATE_IDLE: | |
1410 | switch (attrs->next_state) { | |
1411 | case C4IW_QP_STATE_RTS: | |
1412 | if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { | |
1413 | ret = -EINVAL; | |
1414 | goto out; | |
1415 | } | |
1416 | if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { | |
1417 | ret = -EINVAL; | |
1418 | goto out; | |
1419 | } | |
1420 | qhp->attr.mpa_attr = attrs->mpa_attr; | |
1421 | qhp->attr.llp_stream_handle = attrs->llp_stream_handle; | |
1422 | qhp->ep = qhp->attr.llp_stream_handle; | |
2f5b48c3 | 1423 | set_state(qhp, C4IW_QP_STATE_RTS); |
cfdda9d7 SW |
1424 | |
1425 | /* | |
1426 | * Ref the endpoint here and deref when we | |
1427 | * disassociate the endpoint from the QP. This | |
1428 | * happens in CLOSING->IDLE transition or *->ERROR | |
1429 | * transition. | |
1430 | */ | |
1431 | c4iw_get_ep(&qhp->ep->com); | |
cfdda9d7 | 1432 | ret = rdma_init(rhp, qhp); |
cfdda9d7 SW |
1433 | if (ret) |
1434 | goto err; | |
1435 | break; | |
1436 | case C4IW_QP_STATE_ERROR: | |
2f5b48c3 SW |
1437 | set_state(qhp, C4IW_QP_STATE_ERROR); |
1438 | flush_qp(qhp); | |
cfdda9d7 SW |
1439 | break; |
1440 | default: | |
1441 | ret = -EINVAL; | |
1442 | goto out; | |
1443 | } | |
1444 | break; | |
1445 | case C4IW_QP_STATE_RTS: | |
1446 | switch (attrs->next_state) { | |
1447 | case C4IW_QP_STATE_CLOSING: | |
1448 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | |
b4e2901c | 1449 | t4_set_wq_in_error(&qhp->wq); |
2f5b48c3 | 1450 | set_state(qhp, C4IW_QP_STATE_CLOSING); |
73d6fcad | 1451 | ep = qhp->ep; |
cfdda9d7 SW |
1452 | if (!internal) { |
1453 | abort = 0; | |
1454 | disconnect = 1; | |
2f5b48c3 | 1455 | c4iw_get_ep(&qhp->ep->com); |
cfdda9d7 | 1456 | } |
73d6fcad | 1457 | ret = rdma_fini(rhp, qhp, ep); |
8da7e7a5 | 1458 | if (ret) |
cfdda9d7 | 1459 | goto err; |
cfdda9d7 SW |
1460 | break; |
1461 | case C4IW_QP_STATE_TERMINATE: | |
b4e2901c | 1462 | t4_set_wq_in_error(&qhp->wq); |
2f5b48c3 | 1463 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
d2fe99e8 KS |
1464 | qhp->attr.layer_etype = attrs->layer_etype; |
1465 | qhp->attr.ecode = attrs->ecode; | |
be4c9bad | 1466 | ep = qhp->ep; |
cc18b939 SW |
1467 | if (!internal) { |
1468 | c4iw_get_ep(&qhp->ep->com); | |
0e42c1f4 | 1469 | terminate = 1; |
cc18b939 SW |
1470 | disconnect = 1; |
1471 | } else { | |
1472 | terminate = qhp->attr.send_term; | |
09992579 SW |
1473 | ret = rdma_fini(rhp, qhp, ep); |
1474 | if (ret) | |
1475 | goto err; | |
1476 | } | |
cfdda9d7 SW |
1477 | break; |
1478 | case C4IW_QP_STATE_ERROR: | |
1cf24dce | 1479 | t4_set_wq_in_error(&qhp->wq); |
b4e2901c | 1480 | set_state(qhp, C4IW_QP_STATE_ERROR); |
cfdda9d7 SW |
1481 | if (!internal) { |
1482 | abort = 1; | |
1483 | disconnect = 1; | |
1484 | ep = qhp->ep; | |
2f5b48c3 | 1485 | c4iw_get_ep(&qhp->ep->com); |
cfdda9d7 SW |
1486 | } |
1487 | goto err; | |
1488 | break; | |
1489 | default: | |
1490 | ret = -EINVAL; | |
1491 | goto out; | |
1492 | } | |
1493 | break; | |
1494 | case C4IW_QP_STATE_CLOSING: | |
1495 | if (!internal) { | |
1496 | ret = -EINVAL; | |
1497 | goto out; | |
1498 | } | |
1499 | switch (attrs->next_state) { | |
1500 | case C4IW_QP_STATE_IDLE: | |
2f5b48c3 SW |
1501 | flush_qp(qhp); |
1502 | set_state(qhp, C4IW_QP_STATE_IDLE); | |
cfdda9d7 SW |
1503 | qhp->attr.llp_stream_handle = NULL; |
1504 | c4iw_put_ep(&qhp->ep->com); | |
1505 | qhp->ep = NULL; | |
1506 | wake_up(&qhp->wait); | |
1507 | break; | |
1508 | case C4IW_QP_STATE_ERROR: | |
1509 | goto err; | |
1510 | default: | |
1511 | ret = -EINVAL; | |
1512 | goto err; | |
1513 | } | |
1514 | break; | |
1515 | case C4IW_QP_STATE_ERROR: | |
1516 | if (attrs->next_state != C4IW_QP_STATE_IDLE) { | |
1517 | ret = -EINVAL; | |
1518 | goto out; | |
1519 | } | |
1520 | if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { | |
1521 | ret = -EINVAL; | |
1522 | goto out; | |
1523 | } | |
2f5b48c3 | 1524 | set_state(qhp, C4IW_QP_STATE_IDLE); |
cfdda9d7 SW |
1525 | break; |
1526 | case C4IW_QP_STATE_TERMINATE: | |
1527 | if (!internal) { | |
1528 | ret = -EINVAL; | |
1529 | goto out; | |
1530 | } | |
1531 | goto err; | |
1532 | break; | |
1533 | default: | |
1534 | printk(KERN_ERR "%s in a bad state %d\n", | |
1535 | __func__, qhp->attr.state); | |
1536 | ret = -EINVAL; | |
1537 | goto err; | |
1538 | break; | |
1539 | } | |
1540 | goto out; | |
1541 | err: | |
1542 | PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, | |
1543 | qhp->wq.sq.qid); | |
1544 | ||
1545 | /* disassociate the LLP connection */ | |
1546 | qhp->attr.llp_stream_handle = NULL; | |
af93fb5d SW |
1547 | if (!ep) |
1548 | ep = qhp->ep; | |
cfdda9d7 | 1549 | qhp->ep = NULL; |
2f5b48c3 | 1550 | set_state(qhp, C4IW_QP_STATE_ERROR); |
cfdda9d7 | 1551 | free = 1; |
91e9c071 | 1552 | abort = 1; |
cfdda9d7 | 1553 | BUG_ON(!ep); |
2f5b48c3 | 1554 | flush_qp(qhp); |
5b341808 | 1555 | wake_up(&qhp->wait); |
cfdda9d7 | 1556 | out: |
2f5b48c3 | 1557 | mutex_unlock(&qhp->mutex); |
cfdda9d7 SW |
1558 | |
1559 | if (terminate) | |
be4c9bad | 1560 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); |
cfdda9d7 SW |
1561 | |
1562 | /* | |
1563 | * If disconnect is 1, then we need to initiate a disconnect | |
1564 | * on the EP. This can be a normal close (RTS->CLOSING) or | |
1565 | * an abnormal close (RTS/CLOSING->ERROR). | |
1566 | */ | |
1567 | if (disconnect) { | |
be4c9bad RD |
1568 | c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : |
1569 | GFP_KERNEL); | |
cfdda9d7 SW |
1570 | c4iw_put_ep(&ep->com); |
1571 | } | |
1572 | ||
1573 | /* | |
1574 | * If free is 1, then we've disassociated the EP from the QP | |
1575 | * and we need to dereference the EP. | |
1576 | */ | |
1577 | if (free) | |
1578 | c4iw_put_ep(&ep->com); | |
cfdda9d7 SW |
1579 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); |
1580 | return ret; | |
1581 | } | |
1582 | ||
1583 | int c4iw_destroy_qp(struct ib_qp *ib_qp) | |
1584 | { | |
1585 | struct c4iw_dev *rhp; | |
1586 | struct c4iw_qp *qhp; | |
1587 | struct c4iw_qp_attributes attrs; | |
1588 | struct c4iw_ucontext *ucontext; | |
1589 | ||
1590 | qhp = to_c4iw_qp(ib_qp); | |
1591 | rhp = qhp->rhp; | |
1592 | ||
1593 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
d2fe99e8 KS |
1594 | if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) |
1595 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1596 | else | |
1597 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
cfdda9d7 SW |
1598 | wait_event(qhp->wait, !qhp->ep); |
1599 | ||
05eb2389 | 1600 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
cfdda9d7 SW |
1601 | atomic_dec(&qhp->refcnt); |
1602 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | |
1603 | ||
05eb2389 SW |
1604 | spin_lock_irq(&rhp->lock); |
1605 | if (!list_empty(&qhp->db_fc_entry)) | |
1606 | list_del_init(&qhp->db_fc_entry); | |
1607 | spin_unlock_irq(&rhp->lock); | |
4c2c5763 | 1608 | free_ird(rhp, qhp->attr.max_ird); |
05eb2389 | 1609 | |
cfdda9d7 SW |
1610 | ucontext = ib_qp->uobject ? |
1611 | to_c4iw_ucontext(ib_qp->uobject->context) : NULL; | |
1612 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1613 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1614 | ||
1615 | PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); | |
1616 | kfree(qhp); | |
1617 | return 0; | |
1618 | } | |
1619 | ||
1620 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |
1621 | struct ib_udata *udata) | |
1622 | { | |
1623 | struct c4iw_dev *rhp; | |
1624 | struct c4iw_qp *qhp; | |
1625 | struct c4iw_pd *php; | |
1626 | struct c4iw_cq *schp; | |
1627 | struct c4iw_cq *rchp; | |
1628 | struct c4iw_create_qp_resp uresp; | |
ff1706f4 | 1629 | unsigned int sqsize, rqsize; |
cfdda9d7 SW |
1630 | struct c4iw_ucontext *ucontext; |
1631 | int ret; | |
c6d7b267 | 1632 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; |
cfdda9d7 SW |
1633 | |
1634 | PDBG("%s ib_pd %p\n", __func__, pd); | |
1635 | ||
1636 | if (attrs->qp_type != IB_QPT_RC) | |
1637 | return ERR_PTR(-EINVAL); | |
1638 | ||
1639 | php = to_c4iw_pd(pd); | |
1640 | rhp = php->rhp; | |
1641 | schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); | |
1642 | rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); | |
1643 | if (!schp || !rchp) | |
1644 | return ERR_PTR(-EINVAL); | |
1645 | ||
1646 | if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) | |
1647 | return ERR_PTR(-EINVAL); | |
1648 | ||
66eb19af | 1649 | if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) |
cfdda9d7 | 1650 | return ERR_PTR(-E2BIG); |
66eb19af HS |
1651 | rqsize = attrs->cap.max_recv_wr + 1; |
1652 | if (rqsize < 8) | |
1653 | rqsize = 8; | |
cfdda9d7 | 1654 | |
66eb19af | 1655 | if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) |
cfdda9d7 | 1656 | return ERR_PTR(-E2BIG); |
66eb19af HS |
1657 | sqsize = attrs->cap.max_send_wr + 1; |
1658 | if (sqsize < 8) | |
1659 | sqsize = 8; | |
cfdda9d7 SW |
1660 | |
1661 | ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; | |
1662 | ||
cfdda9d7 SW |
1663 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); |
1664 | if (!qhp) | |
1665 | return ERR_PTR(-ENOMEM); | |
1666 | qhp->wq.sq.size = sqsize; | |
66eb19af HS |
1667 | qhp->wq.sq.memsize = |
1668 | (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * | |
1669 | sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); | |
1cf24dce | 1670 | qhp->wq.sq.flush_cidx = -1; |
cfdda9d7 | 1671 | qhp->wq.rq.size = rqsize; |
66eb19af HS |
1672 | qhp->wq.rq.memsize = |
1673 | (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * | |
1674 | sizeof(*qhp->wq.rq.queue); | |
cfdda9d7 SW |
1675 | |
1676 | if (ucontext) { | |
1677 | qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); | |
1678 | qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); | |
1679 | } | |
1680 | ||
cfdda9d7 SW |
1681 | ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, |
1682 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1683 | if (ret) | |
1684 | goto err1; | |
1685 | ||
1686 | attrs->cap.max_recv_wr = rqsize - 1; | |
1687 | attrs->cap.max_send_wr = sqsize - 1; | |
1688 | attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; | |
1689 | ||
1690 | qhp->rhp = rhp; | |
1691 | qhp->attr.pd = php->pdid; | |
1692 | qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; | |
1693 | qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; | |
1694 | qhp->attr.sq_num_entries = attrs->cap.max_send_wr; | |
1695 | qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; | |
1696 | qhp->attr.sq_max_sges = attrs->cap.max_send_sge; | |
1697 | qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; | |
1698 | qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; | |
1699 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1700 | qhp->attr.next_state = C4IW_QP_STATE_IDLE; | |
1701 | qhp->attr.enable_rdma_read = 1; | |
1702 | qhp->attr.enable_rdma_write = 1; | |
1703 | qhp->attr.enable_bind = 1; | |
4c2c5763 HS |
1704 | qhp->attr.max_ord = 0; |
1705 | qhp->attr.max_ird = 0; | |
ba32de9d | 1706 | qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; |
cfdda9d7 | 1707 | spin_lock_init(&qhp->lock); |
2f5b48c3 | 1708 | mutex_init(&qhp->mutex); |
cfdda9d7 SW |
1709 | init_waitqueue_head(&qhp->wait); |
1710 | atomic_set(&qhp->refcnt, 1); | |
1711 | ||
05eb2389 | 1712 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); |
cfdda9d7 SW |
1713 | if (ret) |
1714 | goto err2; | |
1715 | ||
cfdda9d7 SW |
1716 | if (udata) { |
1717 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | |
1718 | if (!mm1) { | |
1719 | ret = -ENOMEM; | |
30a6a62f | 1720 | goto err3; |
cfdda9d7 SW |
1721 | } |
1722 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | |
1723 | if (!mm2) { | |
1724 | ret = -ENOMEM; | |
30a6a62f | 1725 | goto err4; |
cfdda9d7 SW |
1726 | } |
1727 | mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); | |
1728 | if (!mm3) { | |
1729 | ret = -ENOMEM; | |
30a6a62f | 1730 | goto err5; |
cfdda9d7 SW |
1731 | } |
1732 | mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); | |
1733 | if (!mm4) { | |
1734 | ret = -ENOMEM; | |
30a6a62f | 1735 | goto err6; |
cfdda9d7 | 1736 | } |
c6d7b267 SW |
1737 | if (t4_sq_onchip(&qhp->wq.sq)) { |
1738 | mm5 = kmalloc(sizeof *mm5, GFP_KERNEL); | |
1739 | if (!mm5) { | |
1740 | ret = -ENOMEM; | |
1741 | goto err7; | |
1742 | } | |
1743 | uresp.flags = C4IW_QPF_ONCHIP; | |
1744 | } else | |
1745 | uresp.flags = 0; | |
cfdda9d7 SW |
1746 | uresp.qid_mask = rhp->rdev.qpmask; |
1747 | uresp.sqid = qhp->wq.sq.qid; | |
1748 | uresp.sq_size = qhp->wq.sq.size; | |
1749 | uresp.sq_memsize = qhp->wq.sq.memsize; | |
1750 | uresp.rqid = qhp->wq.rq.qid; | |
1751 | uresp.rq_size = qhp->wq.rq.size; | |
1752 | uresp.rq_memsize = qhp->wq.rq.memsize; | |
1753 | spin_lock(&ucontext->mmap_lock); | |
c6d7b267 SW |
1754 | if (mm5) { |
1755 | uresp.ma_sync_key = ucontext->key; | |
1756 | ucontext->key += PAGE_SIZE; | |
ae1fe07f DC |
1757 | } else { |
1758 | uresp.ma_sync_key = 0; | |
c6d7b267 | 1759 | } |
cfdda9d7 SW |
1760 | uresp.sq_key = ucontext->key; |
1761 | ucontext->key += PAGE_SIZE; | |
1762 | uresp.rq_key = ucontext->key; | |
1763 | ucontext->key += PAGE_SIZE; | |
1764 | uresp.sq_db_gts_key = ucontext->key; | |
1765 | ucontext->key += PAGE_SIZE; | |
1766 | uresp.rq_db_gts_key = ucontext->key; | |
1767 | ucontext->key += PAGE_SIZE; | |
1768 | spin_unlock(&ucontext->mmap_lock); | |
1769 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | |
1770 | if (ret) | |
c6d7b267 | 1771 | goto err8; |
cfdda9d7 | 1772 | mm1->key = uresp.sq_key; |
c6d7b267 | 1773 | mm1->addr = qhp->wq.sq.phys_addr; |
cfdda9d7 SW |
1774 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); |
1775 | insert_mmap(ucontext, mm1); | |
1776 | mm2->key = uresp.rq_key; | |
1777 | mm2->addr = virt_to_phys(qhp->wq.rq.queue); | |
1778 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | |
1779 | insert_mmap(ucontext, mm2); | |
1780 | mm3->key = uresp.sq_db_gts_key; | |
74217d4c | 1781 | mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa; |
cfdda9d7 SW |
1782 | mm3->len = PAGE_SIZE; |
1783 | insert_mmap(ucontext, mm3); | |
1784 | mm4->key = uresp.rq_db_gts_key; | |
74217d4c | 1785 | mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa; |
cfdda9d7 SW |
1786 | mm4->len = PAGE_SIZE; |
1787 | insert_mmap(ucontext, mm4); | |
c6d7b267 SW |
1788 | if (mm5) { |
1789 | mm5->key = uresp.ma_sync_key; | |
1790 | mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) | |
a56c66e8 | 1791 | + PCIE_MA_SYNC_A) & PAGE_MASK; |
c6d7b267 SW |
1792 | mm5->len = PAGE_SIZE; |
1793 | insert_mmap(ucontext, mm5); | |
1794 | } | |
cfdda9d7 SW |
1795 | } |
1796 | qhp->ibqp.qp_num = qhp->wq.sq.qid; | |
1797 | init_timer(&(qhp->timer)); | |
05eb2389 | 1798 | INIT_LIST_HEAD(&qhp->db_fc_entry); |
66eb19af HS |
1799 | PDBG("%s sq id %u size %u memsize %zu num_entries %u " |
1800 | "rq id %u size %u memsize %zu num_entries %u\n", __func__, | |
1801 | qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, | |
1802 | attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, | |
1803 | qhp->wq.rq.memsize, attrs->cap.max_recv_wr); | |
cfdda9d7 | 1804 | return &qhp->ibqp; |
c6d7b267 SW |
1805 | err8: |
1806 | kfree(mm5); | |
cfdda9d7 | 1807 | err7: |
30a6a62f | 1808 | kfree(mm4); |
cfdda9d7 | 1809 | err6: |
30a6a62f | 1810 | kfree(mm3); |
cfdda9d7 | 1811 | err5: |
30a6a62f | 1812 | kfree(mm2); |
cfdda9d7 | 1813 | err4: |
30a6a62f | 1814 | kfree(mm1); |
cfdda9d7 SW |
1815 | err3: |
1816 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | |
1817 | err2: | |
1818 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1819 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1820 | err1: | |
1821 | kfree(qhp); | |
1822 | return ERR_PTR(ret); | |
1823 | } | |
1824 | ||
1825 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1826 | int attr_mask, struct ib_udata *udata) | |
1827 | { | |
1828 | struct c4iw_dev *rhp; | |
1829 | struct c4iw_qp *qhp; | |
1830 | enum c4iw_qp_attr_mask mask = 0; | |
1831 | struct c4iw_qp_attributes attrs; | |
1832 | ||
1833 | PDBG("%s ib_qp %p\n", __func__, ibqp); | |
1834 | ||
1835 | /* iwarp does not support the RTR state */ | |
1836 | if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) | |
1837 | attr_mask &= ~IB_QP_STATE; | |
1838 | ||
1839 | /* Make sure we still have something left to do */ | |
1840 | if (!attr_mask) | |
1841 | return 0; | |
1842 | ||
1843 | memset(&attrs, 0, sizeof attrs); | |
1844 | qhp = to_c4iw_qp(ibqp); | |
1845 | rhp = qhp->rhp; | |
1846 | ||
1847 | attrs.next_state = c4iw_convert_state(attr->qp_state); | |
1848 | attrs.enable_rdma_read = (attr->qp_access_flags & | |
1849 | IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
1850 | attrs.enable_rdma_write = (attr->qp_access_flags & | |
1851 | IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
1852 | attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; | |
1853 | ||
1854 | ||
1855 | mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; | |
1856 | mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? | |
1857 | (C4IW_QP_ATTR_ENABLE_RDMA_READ | | |
1858 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | |
1859 | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; | |
1860 | ||
2c974781 VP |
1861 | /* |
1862 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | |
1863 | * ringing the queue db when we're in DB_FULL mode. | |
c2f9da92 | 1864 | * Only allow this on T4 devices. |
2c974781 VP |
1865 | */ |
1866 | attrs.sq_db_inc = attr->sq_psn; | |
1867 | attrs.rq_db_inc = attr->rq_psn; | |
1868 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | |
1869 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | |
963cab50 | 1870 | if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && |
c2f9da92 SW |
1871 | (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) |
1872 | return -EINVAL; | |
2c974781 | 1873 | |
cfdda9d7 SW |
1874 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
1875 | } | |
1876 | ||
1877 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) | |
1878 | { | |
1879 | PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); | |
1880 | return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); | |
1881 | } | |
67bbc055 VP |
1882 | |
1883 | int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1884 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
1885 | { | |
1886 | struct c4iw_qp *qhp = to_c4iw_qp(ibqp); | |
1887 | ||
1888 | memset(attr, 0, sizeof *attr); | |
1889 | memset(init_attr, 0, sizeof *init_attr); | |
1890 | attr->qp_state = to_ib_qp_state(qhp->attr.state); | |
3e5c02c9 HS |
1891 | init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; |
1892 | init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; | |
1893 | init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; | |
1894 | init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; | |
1895 | init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; | |
1896 | init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; | |
67bbc055 VP |
1897 | return 0; |
1898 | } |