Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
e4dd23d7 PG |
32 | |
33 | #include <linux/module.h> | |
34 | ||
cfdda9d7 SW |
35 | #include "iw_cxgb4.h" |
36 | ||
2c974781 VP |
37 | static int db_delay_usecs = 1; |
38 | module_param(db_delay_usecs, int, 0644); | |
39 | MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain"); | |
40 | ||
a9c77198 | 41 | static int ocqp_support = 1; |
c6d7b267 | 42 | module_param(ocqp_support, int, 0644); |
a9c77198 | 43 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); |
c6d7b267 | 44 | |
3cbdb928 | 45 | int db_fc_threshold = 1000; |
422eea0a | 46 | module_param(db_fc_threshold, int, 0644); |
3cbdb928 VP |
47 | MODULE_PARM_DESC(db_fc_threshold, |
48 | "QP count/threshold that triggers" | |
49 | " automatic db flow control mode (default = 1000)"); | |
50 | ||
51 | int db_coalescing_threshold; | |
52 | module_param(db_coalescing_threshold, int, 0644); | |
53 | MODULE_PARM_DESC(db_coalescing_threshold, | |
54 | "QP count/threshold that triggers" | |
55 | " disabling db coalescing (default = 0)"); | |
422eea0a | 56 | |
42b6a949 VP |
57 | static int max_fr_immd = T4_MAX_FR_IMMD; |
58 | module_param(max_fr_immd, int, 0644); | |
59 | MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); | |
60 | ||
4c2c5763 HS |
61 | static int alloc_ird(struct c4iw_dev *dev, u32 ird) |
62 | { | |
63 | int ret = 0; | |
64 | ||
65 | spin_lock_irq(&dev->lock); | |
66 | if (ird <= dev->avail_ird) | |
67 | dev->avail_ird -= ird; | |
68 | else | |
69 | ret = -ENOMEM; | |
70 | spin_unlock_irq(&dev->lock); | |
71 | ||
72 | if (ret) | |
73 | dev_warn(&dev->rdev.lldi.pdev->dev, | |
74 | "device IRD resources exhausted\n"); | |
75 | ||
76 | return ret; | |
77 | } | |
78 | ||
79 | static void free_ird(struct c4iw_dev *dev, int ird) | |
80 | { | |
81 | spin_lock_irq(&dev->lock); | |
82 | dev->avail_ird += ird; | |
83 | spin_unlock_irq(&dev->lock); | |
84 | } | |
85 | ||
2f5b48c3 SW |
86 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) |
87 | { | |
88 | unsigned long flag; | |
89 | spin_lock_irqsave(&qhp->lock, flag); | |
90 | qhp->attr.state = state; | |
91 | spin_unlock_irqrestore(&qhp->lock, flag); | |
92 | } | |
93 | ||
c6d7b267 SW |
94 | static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) |
95 | { | |
96 | c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); | |
97 | } | |
98 | ||
99 | static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
100 | { | |
101 | dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, | |
102 | pci_unmap_addr(sq, mapping)); | |
103 | } | |
104 | ||
105 | static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
106 | { | |
107 | if (t4_sq_onchip(sq)) | |
108 | dealloc_oc_sq(rdev, sq); | |
109 | else | |
110 | dealloc_host_sq(rdev, sq); | |
111 | } | |
112 | ||
113 | static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
114 | { | |
f079af7a | 115 | if (!ocqp_support || !ocqp_supported(&rdev->lldi)) |
c6d7b267 SW |
116 | return -ENOSYS; |
117 | sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); | |
118 | if (!sq->dma_addr) | |
119 | return -ENOMEM; | |
120 | sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - | |
121 | rdev->lldi.vr->ocq.start; | |
122 | sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - | |
123 | rdev->lldi.vr->ocq.start); | |
124 | sq->flags |= T4_SQ_ONCHIP; | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |
129 | { | |
130 | sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, | |
131 | &(sq->dma_addr), GFP_KERNEL); | |
132 | if (!sq->queue) | |
133 | return -ENOMEM; | |
134 | sq->phys_addr = virt_to_phys(sq->queue); | |
135 | pci_unmap_addr_set(sq, mapping, sq->dma_addr); | |
136 | return 0; | |
137 | } | |
138 | ||
5b0c2759 TLSC |
139 | static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) |
140 | { | |
141 | int ret = -ENOSYS; | |
142 | if (user) | |
143 | ret = alloc_oc_sq(rdev, sq); | |
144 | if (ret) | |
145 | ret = alloc_host_sq(rdev, sq); | |
146 | return ret; | |
147 | } | |
148 | ||
cfdda9d7 SW |
149 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
150 | struct c4iw_dev_ucontext *uctx) | |
151 | { | |
152 | /* | |
153 | * uP clears EQ contexts when the connection exits rdma mode, | |
154 | * so no need to post a RESET WR for these EQs. | |
155 | */ | |
156 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
157 | wq->rq.memsize, wq->rq.queue, | |
f38926aa | 158 | dma_unmap_addr(&wq->rq, mapping)); |
c6d7b267 | 159 | dealloc_sq(rdev, &wq->sq); |
cfdda9d7 SW |
160 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
161 | kfree(wq->rq.sw_rq); | |
162 | kfree(wq->sq.sw_sq); | |
163 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); | |
164 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); | |
165 | return 0; | |
166 | } | |
167 | ||
74217d4c H |
168 | /* |
169 | * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL, | |
170 | * then this is a user mapping so compute the page-aligned physical address | |
171 | * for mapping. | |
172 | */ | |
173 | void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, | |
174 | enum cxgb4_bar2_qtype qtype, | |
175 | unsigned int *pbar2_qid, u64 *pbar2_pa) | |
176 | { | |
177 | u64 bar2_qoffset; | |
178 | int ret; | |
179 | ||
180 | ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype, | |
181 | pbar2_pa ? 1 : 0, | |
182 | &bar2_qoffset, pbar2_qid); | |
183 | if (ret) | |
184 | return NULL; | |
185 | ||
186 | if (pbar2_pa) | |
187 | *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; | |
32cc92c7 H |
188 | |
189 | if (is_t4(rdev->lldi.adapter_type)) | |
190 | return NULL; | |
191 | ||
74217d4c H |
192 | return rdev->bar2_kva + bar2_qoffset; |
193 | } | |
194 | ||
cfdda9d7 SW |
195 | static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
196 | struct t4_cq *rcq, struct t4_cq *scq, | |
197 | struct c4iw_dev_ucontext *uctx) | |
198 | { | |
199 | int user = (uctx != &rdev->uctx); | |
200 | struct fw_ri_res_wr *res_wr; | |
201 | struct fw_ri_res *res; | |
202 | int wr_len; | |
203 | struct c4iw_wr_wait wr_wait; | |
204 | struct sk_buff *skb; | |
9919d5bd | 205 | int ret = 0; |
cfdda9d7 SW |
206 | int eqsize; |
207 | ||
208 | wq->sq.qid = c4iw_get_qpid(rdev, uctx); | |
209 | if (!wq->sq.qid) | |
210 | return -ENOMEM; | |
211 | ||
212 | wq->rq.qid = c4iw_get_qpid(rdev, uctx); | |
c079c287 EG |
213 | if (!wq->rq.qid) { |
214 | ret = -ENOMEM; | |
215 | goto free_sq_qid; | |
216 | } | |
cfdda9d7 SW |
217 | |
218 | if (!user) { | |
219 | wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, | |
220 | GFP_KERNEL); | |
c079c287 EG |
221 | if (!wq->sq.sw_sq) { |
222 | ret = -ENOMEM; | |
223 | goto free_rq_qid; | |
224 | } | |
cfdda9d7 SW |
225 | |
226 | wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, | |
227 | GFP_KERNEL); | |
c079c287 EG |
228 | if (!wq->rq.sw_rq) { |
229 | ret = -ENOMEM; | |
230 | goto free_sw_sq; | |
231 | } | |
cfdda9d7 SW |
232 | } |
233 | ||
234 | /* | |
66eb19af | 235 | * RQT must be a power of 2 and at least 16 deep. |
cfdda9d7 | 236 | */ |
66eb19af | 237 | wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); |
cfdda9d7 | 238 | wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); |
c079c287 EG |
239 | if (!wq->rq.rqt_hwaddr) { |
240 | ret = -ENOMEM; | |
241 | goto free_sw_rq; | |
242 | } | |
cfdda9d7 | 243 | |
5b0c2759 TLSC |
244 | ret = alloc_sq(rdev, &wq->sq, user); |
245 | if (ret) | |
246 | goto free_hwaddr; | |
cfdda9d7 | 247 | memset(wq->sq.queue, 0, wq->sq.memsize); |
f38926aa | 248 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
cfdda9d7 SW |
249 | |
250 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | |
251 | wq->rq.memsize, &(wq->rq.dma_addr), | |
252 | GFP_KERNEL); | |
55e57a78 WY |
253 | if (!wq->rq.queue) { |
254 | ret = -ENOMEM; | |
c079c287 | 255 | goto free_sq; |
55e57a78 | 256 | } |
a9a42886 JP |
257 | pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", |
258 | __func__, wq->sq.queue, | |
259 | (unsigned long long)virt_to_phys(wq->sq.queue), | |
260 | wq->rq.queue, | |
261 | (unsigned long long)virt_to_phys(wq->rq.queue)); | |
cfdda9d7 | 262 | memset(wq->rq.queue, 0, wq->rq.memsize); |
f38926aa | 263 | dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); |
cfdda9d7 SW |
264 | |
265 | wq->db = rdev->lldi.db_reg; | |
fa658a98 | 266 | |
74217d4c H |
267 | wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, |
268 | &wq->sq.bar2_qid, | |
269 | user ? &wq->sq.bar2_pa : NULL); | |
270 | wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, | |
271 | &wq->rq.bar2_qid, | |
272 | user ? &wq->rq.bar2_pa : NULL); | |
273 | ||
274 | /* | |
275 | * User mode must have bar2 access. | |
276 | */ | |
32cc92c7 | 277 | if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { |
700456bd | 278 | pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n", |
74217d4c H |
279 | pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); |
280 | goto free_dma; | |
cfdda9d7 | 281 | } |
74217d4c | 282 | |
cfdda9d7 SW |
283 | wq->rdev = rdev; |
284 | wq->rq.msn = 1; | |
285 | ||
286 | /* build fw_ri_res_wr */ | |
287 | wr_len = sizeof *res_wr + 2 * sizeof *res; | |
288 | ||
d3c814e8 | 289 | skb = alloc_skb(wr_len, GFP_KERNEL); |
cfdda9d7 SW |
290 | if (!skb) { |
291 | ret = -ENOMEM; | |
c079c287 | 292 | goto free_dma; |
cfdda9d7 SW |
293 | } |
294 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
295 | ||
de77b966 | 296 | res_wr = __skb_put_zero(skb, wr_len); |
cfdda9d7 | 297 | res_wr->op_nres = cpu_to_be32( |
e2ac9628 | 298 | FW_WR_OP_V(FW_RI_RES_WR) | |
cf7fe64a | 299 | FW_RI_RES_WR_NRES_V(2) | |
e2ac9628 | 300 | FW_WR_COMPL_F); |
cfdda9d7 | 301 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
6198dd8d | 302 | res_wr->cookie = (uintptr_t)&wr_wait; |
cfdda9d7 SW |
303 | res = res_wr->res; |
304 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | |
305 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
306 | ||
307 | /* | |
308 | * eqsize is the number of 64B entries plus the status page size. | |
309 | */ | |
04e10e21 HS |
310 | eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + |
311 | rdev->hw_queue.t4_eq_status_entries; | |
cfdda9d7 SW |
312 | |
313 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( | |
cf7fe64a HS |
314 | FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ |
315 | FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ | |
316 | FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ | |
317 | (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | | |
318 | FW_RI_RES_WR_IQID_V(scq->cqid)); | |
cfdda9d7 | 319 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
cf7fe64a HS |
320 | FW_RI_RES_WR_DCAEN_V(0) | |
321 | FW_RI_RES_WR_DCACPU_V(0) | | |
322 | FW_RI_RES_WR_FBMIN_V(2) | | |
b414fa01 SW |
323 | (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) : |
324 | FW_RI_RES_WR_FBMAX_V(3)) | | |
cf7fe64a HS |
325 | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | |
326 | FW_RI_RES_WR_CIDXFTHRESH_V(0) | | |
327 | FW_RI_RES_WR_EQSIZE_V(eqsize)); | |
cfdda9d7 SW |
328 | res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); |
329 | res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); | |
330 | res++; | |
331 | res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; | |
332 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
333 | ||
334 | /* | |
335 | * eqsize is the number of 64B entries plus the status page size. | |
336 | */ | |
04e10e21 HS |
337 | eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + |
338 | rdev->hw_queue.t4_eq_status_entries; | |
cfdda9d7 | 339 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( |
cf7fe64a HS |
340 | FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ |
341 | FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ | |
342 | FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ | |
343 | FW_RI_RES_WR_IQID_V(rcq->cqid)); | |
cfdda9d7 | 344 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
cf7fe64a HS |
345 | FW_RI_RES_WR_DCAEN_V(0) | |
346 | FW_RI_RES_WR_DCACPU_V(0) | | |
347 | FW_RI_RES_WR_FBMIN_V(2) | | |
b414fa01 | 348 | FW_RI_RES_WR_FBMAX_V(3) | |
cf7fe64a HS |
349 | FW_RI_RES_WR_CIDXFTHRESHO_V(0) | |
350 | FW_RI_RES_WR_CIDXFTHRESH_V(0) | | |
351 | FW_RI_RES_WR_EQSIZE_V(eqsize)); | |
cfdda9d7 SW |
352 | res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); |
353 | res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); | |
354 | ||
355 | c4iw_init_wr_wait(&wr_wait); | |
356 | ||
357 | ret = c4iw_ofld_send(rdev, skb); | |
358 | if (ret) | |
c079c287 | 359 | goto free_dma; |
aadc4df3 | 360 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); |
cfdda9d7 | 361 | if (ret) |
c079c287 | 362 | goto free_dma; |
cfdda9d7 | 363 | |
a9a42886 JP |
364 | pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n", |
365 | __func__, wq->sq.qid, wq->rq.qid, wq->db, | |
366 | wq->sq.bar2_va, wq->rq.bar2_va); | |
cfdda9d7 SW |
367 | |
368 | return 0; | |
c079c287 | 369 | free_dma: |
cfdda9d7 SW |
370 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
371 | wq->rq.memsize, wq->rq.queue, | |
f38926aa | 372 | dma_unmap_addr(&wq->rq, mapping)); |
c079c287 | 373 | free_sq: |
c6d7b267 | 374 | dealloc_sq(rdev, &wq->sq); |
c079c287 | 375 | free_hwaddr: |
cfdda9d7 | 376 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
c079c287 | 377 | free_sw_rq: |
cfdda9d7 | 378 | kfree(wq->rq.sw_rq); |
c079c287 | 379 | free_sw_sq: |
cfdda9d7 | 380 | kfree(wq->sq.sw_sq); |
c079c287 | 381 | free_rq_qid: |
cfdda9d7 | 382 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); |
c079c287 | 383 | free_sq_qid: |
cfdda9d7 | 384 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); |
c079c287 | 385 | return ret; |
cfdda9d7 SW |
386 | } |
387 | ||
d37ac31d SW |
388 | static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, |
389 | struct ib_send_wr *wr, int max, u32 *plenp) | |
cfdda9d7 | 390 | { |
d37ac31d SW |
391 | u8 *dstp, *srcp; |
392 | u32 plen = 0; | |
cfdda9d7 | 393 | int i; |
d37ac31d SW |
394 | int rem, len; |
395 | ||
396 | dstp = (u8 *)immdp->data; | |
397 | for (i = 0; i < wr->num_sge; i++) { | |
398 | if ((plen + wr->sg_list[i].length) > max) | |
399 | return -EMSGSIZE; | |
400 | srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; | |
401 | plen += wr->sg_list[i].length; | |
402 | rem = wr->sg_list[i].length; | |
403 | while (rem) { | |
404 | if (dstp == (u8 *)&sq->queue[sq->size]) | |
405 | dstp = (u8 *)sq->queue; | |
406 | if (rem <= (u8 *)&sq->queue[sq->size] - dstp) | |
407 | len = rem; | |
408 | else | |
409 | len = (u8 *)&sq->queue[sq->size] - dstp; | |
410 | memcpy(dstp, srcp, len); | |
411 | dstp += len; | |
412 | srcp += len; | |
413 | rem -= len; | |
414 | } | |
415 | } | |
13fecb83 SW |
416 | len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); |
417 | if (len) | |
418 | memset(dstp, 0, len); | |
d37ac31d SW |
419 | immdp->op = FW_RI_DATA_IMMD; |
420 | immdp->r1 = 0; | |
421 | immdp->r2 = 0; | |
422 | immdp->immdlen = cpu_to_be32(plen); | |
423 | *plenp = plen; | |
424 | return 0; | |
425 | } | |
426 | ||
427 | static int build_isgl(__be64 *queue_start, __be64 *queue_end, | |
428 | struct fw_ri_isgl *isglp, struct ib_sge *sg_list, | |
429 | int num_sge, u32 *plenp) | |
430 | ||
431 | { | |
432 | int i; | |
433 | u32 plen = 0; | |
434 | __be64 *flitp = (__be64 *)isglp->sge; | |
435 | ||
436 | for (i = 0; i < num_sge; i++) { | |
437 | if ((plen + sg_list[i].length) < plen) | |
438 | return -EMSGSIZE; | |
439 | plen += sg_list[i].length; | |
440 | *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | | |
441 | sg_list[i].length); | |
442 | if (++flitp == queue_end) | |
443 | flitp = queue_start; | |
444 | *flitp = cpu_to_be64(sg_list[i].addr); | |
445 | if (++flitp == queue_end) | |
446 | flitp = queue_start; | |
447 | } | |
13fecb83 | 448 | *flitp = (__force __be64)0; |
d37ac31d SW |
449 | isglp->op = FW_RI_DATA_ISGL; |
450 | isglp->r1 = 0; | |
451 | isglp->nsge = cpu_to_be16(num_sge); | |
452 | isglp->r2 = 0; | |
453 | if (plenp) | |
454 | *plenp = plen; | |
455 | return 0; | |
456 | } | |
457 | ||
458 | static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, | |
459 | struct ib_send_wr *wr, u8 *len16) | |
460 | { | |
cfdda9d7 SW |
461 | u32 plen; |
462 | int size; | |
d37ac31d | 463 | int ret; |
cfdda9d7 SW |
464 | |
465 | if (wr->num_sge > T4_MAX_SEND_SGE) | |
466 | return -EINVAL; | |
467 | switch (wr->opcode) { | |
468 | case IB_WR_SEND: | |
469 | if (wr->send_flags & IB_SEND_SOLICITED) | |
470 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 471 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE)); |
cfdda9d7 SW |
472 | else |
473 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 474 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND)); |
cfdda9d7 SW |
475 | wqe->send.stag_inv = 0; |
476 | break; | |
477 | case IB_WR_SEND_WITH_INV: | |
478 | if (wr->send_flags & IB_SEND_SOLICITED) | |
479 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 480 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV)); |
cfdda9d7 SW |
481 | else |
482 | wqe->send.sendop_pkd = cpu_to_be32( | |
cf7fe64a | 483 | FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV)); |
cfdda9d7 SW |
484 | wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); |
485 | break; | |
486 | ||
487 | default: | |
488 | return -EINVAL; | |
489 | } | |
c3f98fa2 SW |
490 | wqe->send.r3 = 0; |
491 | wqe->send.r4 = 0; | |
d37ac31d | 492 | |
cfdda9d7 SW |
493 | plen = 0; |
494 | if (wr->num_sge) { | |
495 | if (wr->send_flags & IB_SEND_INLINE) { | |
d37ac31d SW |
496 | ret = build_immd(sq, wqe->send.u.immd_src, wr, |
497 | T4_MAX_SEND_INLINE, &plen); | |
498 | if (ret) | |
499 | return ret; | |
cfdda9d7 SW |
500 | size = sizeof wqe->send + sizeof(struct fw_ri_immd) + |
501 | plen; | |
502 | } else { | |
d37ac31d SW |
503 | ret = build_isgl((__be64 *)sq->queue, |
504 | (__be64 *)&sq->queue[sq->size], | |
505 | wqe->send.u.isgl_src, | |
506 | wr->sg_list, wr->num_sge, &plen); | |
507 | if (ret) | |
508 | return ret; | |
cfdda9d7 SW |
509 | size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + |
510 | wr->num_sge * sizeof(struct fw_ri_sge); | |
511 | } | |
512 | } else { | |
513 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
514 | wqe->send.u.immd_src[0].r1 = 0; | |
515 | wqe->send.u.immd_src[0].r2 = 0; | |
516 | wqe->send.u.immd_src[0].immdlen = 0; | |
517 | size = sizeof wqe->send + sizeof(struct fw_ri_immd); | |
d37ac31d | 518 | plen = 0; |
cfdda9d7 SW |
519 | } |
520 | *len16 = DIV_ROUND_UP(size, 16); | |
521 | wqe->send.plen = cpu_to_be32(plen); | |
522 | return 0; | |
523 | } | |
524 | ||
d37ac31d SW |
525 | static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, |
526 | struct ib_send_wr *wr, u8 *len16) | |
cfdda9d7 | 527 | { |
cfdda9d7 SW |
528 | u32 plen; |
529 | int size; | |
d37ac31d | 530 | int ret; |
cfdda9d7 | 531 | |
d37ac31d | 532 | if (wr->num_sge > T4_MAX_SEND_SGE) |
cfdda9d7 SW |
533 | return -EINVAL; |
534 | wqe->write.r2 = 0; | |
e622f2f4 CH |
535 | wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); |
536 | wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); | |
cfdda9d7 SW |
537 | if (wr->num_sge) { |
538 | if (wr->send_flags & IB_SEND_INLINE) { | |
d37ac31d SW |
539 | ret = build_immd(sq, wqe->write.u.immd_src, wr, |
540 | T4_MAX_WRITE_INLINE, &plen); | |
541 | if (ret) | |
542 | return ret; | |
cfdda9d7 SW |
543 | size = sizeof wqe->write + sizeof(struct fw_ri_immd) + |
544 | plen; | |
545 | } else { | |
d37ac31d SW |
546 | ret = build_isgl((__be64 *)sq->queue, |
547 | (__be64 *)&sq->queue[sq->size], | |
548 | wqe->write.u.isgl_src, | |
549 | wr->sg_list, wr->num_sge, &plen); | |
550 | if (ret) | |
551 | return ret; | |
cfdda9d7 SW |
552 | size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + |
553 | wr->num_sge * sizeof(struct fw_ri_sge); | |
554 | } | |
555 | } else { | |
556 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
557 | wqe->write.u.immd_src[0].r1 = 0; | |
558 | wqe->write.u.immd_src[0].r2 = 0; | |
559 | wqe->write.u.immd_src[0].immdlen = 0; | |
560 | size = sizeof wqe->write + sizeof(struct fw_ri_immd); | |
d37ac31d | 561 | plen = 0; |
cfdda9d7 SW |
562 | } |
563 | *len16 = DIV_ROUND_UP(size, 16); | |
564 | wqe->write.plen = cpu_to_be32(plen); | |
565 | return 0; | |
566 | } | |
567 | ||
568 | static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
569 | { | |
570 | if (wr->num_sge > 1) | |
571 | return -EINVAL; | |
572 | if (wr->num_sge) { | |
e622f2f4 CH |
573 | wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); |
574 | wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr | |
cfdda9d7 | 575 | >> 32)); |
e622f2f4 | 576 | wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); |
cfdda9d7 SW |
577 | wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); |
578 | wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); | |
579 | wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr | |
580 | >> 32)); | |
581 | wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); | |
582 | } else { | |
583 | wqe->read.stag_src = cpu_to_be32(2); | |
584 | wqe->read.to_src_hi = 0; | |
585 | wqe->read.to_src_lo = 0; | |
586 | wqe->read.stag_sink = cpu_to_be32(2); | |
587 | wqe->read.plen = 0; | |
588 | wqe->read.to_sink_hi = 0; | |
589 | wqe->read.to_sink_lo = 0; | |
590 | } | |
591 | wqe->read.r2 = 0; | |
592 | wqe->read.r5 = 0; | |
593 | *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | |
594 | return 0; | |
595 | } | |
596 | ||
597 | static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |
598 | struct ib_recv_wr *wr, u8 *len16) | |
599 | { | |
d37ac31d | 600 | int ret; |
cfdda9d7 | 601 | |
d37ac31d SW |
602 | ret = build_isgl((__be64 *)qhp->wq.rq.queue, |
603 | (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], | |
604 | &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); | |
605 | if (ret) | |
606 | return ret; | |
cfdda9d7 SW |
607 | *len16 = DIV_ROUND_UP(sizeof wqe->recv + |
608 | wr->num_sge * sizeof(struct fw_ri_sge), 16); | |
609 | return 0; | |
610 | } | |
611 | ||
49b53a93 SW |
612 | static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr, |
613 | struct ib_reg_wr *wr, struct c4iw_mr *mhp, | |
614 | u8 *len16) | |
615 | { | |
616 | __be64 *p = (__be64 *)fr->pbl; | |
617 | ||
618 | fr->r2 = cpu_to_be32(0); | |
619 | fr->stag = cpu_to_be32(mhp->ibmr.rkey); | |
620 | ||
621 | fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | | |
622 | FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) | | |
623 | FW_RI_TPTE_STAGSTATE_V(1) | | |
624 | FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) | | |
625 | FW_RI_TPTE_PDID_V(mhp->attr.pdid)); | |
626 | fr->tpte.locread_to_qpid = cpu_to_be32( | |
627 | FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) | | |
628 | FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) | | |
629 | FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12)); | |
630 | fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V( | |
631 | PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); | |
632 | fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0); | |
633 | fr->tpte.len_hi = cpu_to_be32(0); | |
634 | fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length); | |
635 | fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); | |
636 | fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff); | |
637 | ||
638 | p[0] = cpu_to_be64((u64)mhp->mpl[0]); | |
639 | p[1] = cpu_to_be64((u64)mhp->mpl[1]); | |
640 | ||
641 | *len16 = DIV_ROUND_UP(sizeof(*fr), 16); | |
642 | } | |
643 | ||
8376b86d | 644 | static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, |
49b53a93 SW |
645 | struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, |
646 | bool dsgl_supported) | |
8376b86d | 647 | { |
8376b86d SG |
648 | struct fw_ri_immd *imdp; |
649 | __be64 *p; | |
650 | int i; | |
651 | int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32); | |
652 | int rem; | |
653 | ||
ee30f7d5 | 654 | if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl)) |
8376b86d SG |
655 | return -EINVAL; |
656 | ||
657 | wqe->fr.qpbinde_to_dcacpu = 0; | |
658 | wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; | |
659 | wqe->fr.addr_type = FW_RI_VA_BASED_TO; | |
660 | wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); | |
661 | wqe->fr.len_hi = 0; | |
662 | wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length); | |
663 | wqe->fr.stag = cpu_to_be32(wr->key); | |
664 | wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); | |
665 | wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & | |
666 | 0xffffffff); | |
667 | ||
ee30f7d5 | 668 | if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) { |
8376b86d SG |
669 | struct fw_ri_dsgl *sglp; |
670 | ||
671 | for (i = 0; i < mhp->mpl_len; i++) | |
672 | mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]); | |
673 | ||
674 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | |
675 | sglp->op = FW_RI_DATA_DSGL; | |
676 | sglp->r1 = 0; | |
677 | sglp->nsge = cpu_to_be16(1); | |
678 | sglp->addr0 = cpu_to_be64(mhp->mpl_addr); | |
679 | sglp->len0 = cpu_to_be32(pbllen); | |
680 | ||
681 | *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); | |
682 | } else { | |
683 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); | |
684 | imdp->op = FW_RI_DATA_IMMD; | |
685 | imdp->r1 = 0; | |
686 | imdp->r2 = 0; | |
687 | imdp->immdlen = cpu_to_be32(pbllen); | |
688 | p = (__be64 *)(imdp + 1); | |
689 | rem = pbllen; | |
690 | for (i = 0; i < mhp->mpl_len; i++) { | |
691 | *p = cpu_to_be64((u64)mhp->mpl[i]); | |
692 | rem -= sizeof(*p); | |
693 | if (++p == (__be64 *)&sq->queue[sq->size]) | |
694 | p = (__be64 *)sq->queue; | |
695 | } | |
696 | BUG_ON(rem < 0); | |
697 | while (rem) { | |
698 | *p = 0; | |
699 | rem -= sizeof(*p); | |
700 | if (++p == (__be64 *)&sq->queue[sq->size]) | |
701 | p = (__be64 *)sq->queue; | |
702 | } | |
703 | *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) | |
704 | + pbllen, 16); | |
705 | } | |
706 | return 0; | |
707 | } | |
708 | ||
5c6b2aaf | 709 | static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) |
cfdda9d7 SW |
710 | { |
711 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | |
712 | wqe->inv.r2 = 0; | |
713 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); | |
714 | return 0; | |
715 | } | |
716 | ||
c12a67fe SW |
717 | static void free_qp_work(struct work_struct *work) |
718 | { | |
719 | struct c4iw_ucontext *ucontext; | |
720 | struct c4iw_qp *qhp; | |
721 | struct c4iw_dev *rhp; | |
722 | ||
723 | qhp = container_of(work, struct c4iw_qp, free_work); | |
724 | ucontext = qhp->ucontext; | |
725 | rhp = qhp->rhp; | |
726 | ||
a9a42886 | 727 | pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); |
c12a67fe SW |
728 | destroy_qp(&rhp->rdev, &qhp->wq, |
729 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
730 | ||
731 | if (ucontext) | |
732 | c4iw_put_ucontext(ucontext); | |
733 | kfree(qhp); | |
734 | } | |
735 | ||
736 | static void queue_qp_free(struct kref *kref) | |
ad61a4c7 SW |
737 | { |
738 | struct c4iw_qp *qhp; | |
739 | ||
740 | qhp = container_of(kref, struct c4iw_qp, kref); | |
a9a42886 | 741 | pr_debug("%s qhp %p\n", __func__, qhp); |
c12a67fe | 742 | queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); |
ad61a4c7 SW |
743 | } |
744 | ||
cfdda9d7 SW |
745 | void c4iw_qp_add_ref(struct ib_qp *qp) |
746 | { | |
a9a42886 | 747 | pr_debug("%s ib_qp %p\n", __func__, qp); |
ad61a4c7 | 748 | kref_get(&to_c4iw_qp(qp)->kref); |
cfdda9d7 SW |
749 | } |
750 | ||
751 | void c4iw_qp_rem_ref(struct ib_qp *qp) | |
752 | { | |
a9a42886 | 753 | pr_debug("%s ib_qp %p\n", __func__, qp); |
c12a67fe | 754 | kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); |
cfdda9d7 SW |
755 | } |
756 | ||
05eb2389 SW |
757 | static void add_to_fc_list(struct list_head *head, struct list_head *entry) |
758 | { | |
759 | if (list_empty(entry)) | |
760 | list_add_tail(entry, head); | |
761 | } | |
762 | ||
763 | static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) | |
764 | { | |
765 | unsigned long flags; | |
766 | ||
767 | spin_lock_irqsave(&qhp->rhp->lock, flags); | |
768 | spin_lock(&qhp->lock); | |
fa658a98 | 769 | if (qhp->rhp->db_state == NORMAL) |
963cab50 | 770 | t4_ring_sq_db(&qhp->wq, inc, NULL); |
fa658a98 | 771 | else { |
05eb2389 SW |
772 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
773 | qhp->wq.sq.wq_pidx_inc += inc; | |
774 | } | |
775 | spin_unlock(&qhp->lock); | |
776 | spin_unlock_irqrestore(&qhp->rhp->lock, flags); | |
777 | return 0; | |
778 | } | |
779 | ||
780 | static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) | |
781 | { | |
782 | unsigned long flags; | |
783 | ||
784 | spin_lock_irqsave(&qhp->rhp->lock, flags); | |
785 | spin_lock(&qhp->lock); | |
fa658a98 | 786 | if (qhp->rhp->db_state == NORMAL) |
963cab50 | 787 | t4_ring_rq_db(&qhp->wq, inc, NULL); |
fa658a98 | 788 | else { |
05eb2389 SW |
789 | add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); |
790 | qhp->wq.rq.wq_pidx_inc += inc; | |
791 | } | |
792 | spin_unlock(&qhp->lock); | |
793 | spin_unlock_irqrestore(&qhp->rhp->lock, flags); | |
794 | return 0; | |
795 | } | |
796 | ||
4fe7c296 SW |
797 | static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) |
798 | { | |
799 | struct t4_cqe cqe = {}; | |
800 | struct c4iw_cq *schp; | |
801 | unsigned long flag; | |
802 | struct t4_cq *cq; | |
803 | ||
804 | schp = to_c4iw_cq(qhp->ibqp.send_cq); | |
805 | cq = &schp->cq; | |
806 | ||
807 | cqe.u.drain_cookie = wr->wr_id; | |
808 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | | |
809 | CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | | |
810 | CQE_TYPE_V(1) | | |
811 | CQE_SWCQE_V(1) | | |
812 | CQE_QPID_V(qhp->wq.sq.qid)); | |
813 | ||
814 | spin_lock_irqsave(&schp->lock, flag); | |
815 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); | |
816 | cq->sw_queue[cq->sw_pidx] = cqe; | |
817 | t4_swcq_produce(cq); | |
818 | spin_unlock_irqrestore(&schp->lock, flag); | |
819 | ||
820 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | |
821 | (*schp->ibcq.comp_handler)(&schp->ibcq, | |
822 | schp->ibcq.cq_context); | |
823 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | |
824 | } | |
825 | ||
826 | static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) | |
827 | { | |
828 | struct t4_cqe cqe = {}; | |
829 | struct c4iw_cq *rchp; | |
830 | unsigned long flag; | |
831 | struct t4_cq *cq; | |
832 | ||
833 | rchp = to_c4iw_cq(qhp->ibqp.recv_cq); | |
834 | cq = &rchp->cq; | |
835 | ||
836 | cqe.u.drain_cookie = wr->wr_id; | |
837 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | | |
838 | CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | | |
839 | CQE_TYPE_V(0) | | |
840 | CQE_SWCQE_V(1) | | |
841 | CQE_QPID_V(qhp->wq.sq.qid)); | |
842 | ||
843 | spin_lock_irqsave(&rchp->lock, flag); | |
844 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); | |
845 | cq->sw_queue[cq->sw_pidx] = cqe; | |
846 | t4_swcq_produce(cq); | |
847 | spin_unlock_irqrestore(&rchp->lock, flag); | |
848 | ||
849 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | |
850 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | |
851 | rchp->ibcq.cq_context); | |
852 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | |
853 | } | |
854 | ||
cfdda9d7 SW |
855 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
856 | struct ib_send_wr **bad_wr) | |
857 | { | |
858 | int err = 0; | |
859 | u8 len16 = 0; | |
860 | enum fw_wr_opcodes fw_opcode = 0; | |
861 | enum fw_ri_wr_flags fw_flags; | |
862 | struct c4iw_qp *qhp; | |
fa658a98 | 863 | union t4_wr *wqe = NULL; |
cfdda9d7 SW |
864 | u32 num_wrs; |
865 | struct t4_swsqe *swsqe; | |
866 | unsigned long flag; | |
867 | u16 idx = 0; | |
868 | ||
869 | qhp = to_c4iw_qp(ibqp); | |
870 | spin_lock_irqsave(&qhp->lock, flag); | |
871 | if (t4_wq_in_error(&qhp->wq)) { | |
872 | spin_unlock_irqrestore(&qhp->lock, flag); | |
4fe7c296 SW |
873 | complete_sq_drain_wr(qhp, wr); |
874 | return err; | |
cfdda9d7 SW |
875 | } |
876 | num_wrs = t4_sq_avail(&qhp->wq); | |
877 | if (num_wrs == 0) { | |
878 | spin_unlock_irqrestore(&qhp->lock, flag); | |
4ff522ea | 879 | *bad_wr = wr; |
cfdda9d7 SW |
880 | return -ENOMEM; |
881 | } | |
882 | while (wr) { | |
883 | if (num_wrs == 0) { | |
884 | err = -ENOMEM; | |
885 | *bad_wr = wr; | |
886 | break; | |
887 | } | |
d37ac31d SW |
888 | wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + |
889 | qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); | |
890 | ||
cfdda9d7 SW |
891 | fw_flags = 0; |
892 | if (wr->send_flags & IB_SEND_SOLICITED) | |
893 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; | |
ba32de9d | 894 | if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) |
cfdda9d7 SW |
895 | fw_flags |= FW_RI_COMPLETION_FLAG; |
896 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; | |
897 | switch (wr->opcode) { | |
898 | case IB_WR_SEND_WITH_INV: | |
899 | case IB_WR_SEND: | |
900 | if (wr->send_flags & IB_SEND_FENCE) | |
901 | fw_flags |= FW_RI_READ_FENCE_FLAG; | |
902 | fw_opcode = FW_RI_SEND_WR; | |
903 | if (wr->opcode == IB_WR_SEND) | |
904 | swsqe->opcode = FW_RI_SEND; | |
905 | else | |
906 | swsqe->opcode = FW_RI_SEND_WITH_INV; | |
d37ac31d | 907 | err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); |
cfdda9d7 SW |
908 | break; |
909 | case IB_WR_RDMA_WRITE: | |
910 | fw_opcode = FW_RI_RDMA_WRITE_WR; | |
911 | swsqe->opcode = FW_RI_RDMA_WRITE; | |
d37ac31d | 912 | err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); |
cfdda9d7 SW |
913 | break; |
914 | case IB_WR_RDMA_READ: | |
2f1fb507 | 915 | case IB_WR_RDMA_READ_WITH_INV: |
cfdda9d7 SW |
916 | fw_opcode = FW_RI_RDMA_READ_WR; |
917 | swsqe->opcode = FW_RI_READ_REQ; | |
5c6b2aaf SW |
918 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { |
919 | c4iw_invalidate_mr(qhp->rhp, | |
920 | wr->sg_list[0].lkey); | |
410ade4c | 921 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; |
5c6b2aaf | 922 | } else { |
2f1fb507 | 923 | fw_flags = 0; |
5c6b2aaf | 924 | } |
cfdda9d7 SW |
925 | err = build_rdma_read(wqe, wr, &len16); |
926 | if (err) | |
927 | break; | |
928 | swsqe->read_len = wr->sg_list[0].length; | |
929 | if (!qhp->wq.sq.oldest_read) | |
930 | qhp->wq.sq.oldest_read = swsqe; | |
931 | break; | |
49b53a93 SW |
932 | case IB_WR_REG_MR: { |
933 | struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr); | |
934 | ||
8376b86d | 935 | swsqe->opcode = FW_RI_FAST_REGISTER; |
49b53a93 SW |
936 | if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support && |
937 | !mhp->attr.state && mhp->mpl_len <= 2) { | |
938 | fw_opcode = FW_RI_FR_NSMR_TPTE_WR; | |
939 | build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr), | |
940 | mhp, &len16); | |
941 | } else { | |
942 | fw_opcode = FW_RI_FR_NSMR_WR; | |
943 | err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), | |
944 | mhp, &len16, | |
945 | qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl); | |
946 | if (err) | |
947 | break; | |
948 | } | |
949 | mhp->attr.state = 1; | |
8376b86d | 950 | break; |
49b53a93 | 951 | } |
cfdda9d7 | 952 | case IB_WR_LOCAL_INV: |
4ab1eb9c SW |
953 | if (wr->send_flags & IB_SEND_FENCE) |
954 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; | |
cfdda9d7 SW |
955 | fw_opcode = FW_RI_INV_LSTAG_WR; |
956 | swsqe->opcode = FW_RI_LOCAL_INV; | |
5c6b2aaf SW |
957 | err = build_inv_stag(wqe, wr, &len16); |
958 | c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); | |
cfdda9d7 SW |
959 | break; |
960 | default: | |
a9a42886 JP |
961 | pr_debug("%s post of type=%d TBD!\n", __func__, |
962 | wr->opcode); | |
cfdda9d7 SW |
963 | err = -EINVAL; |
964 | } | |
965 | if (err) { | |
966 | *bad_wr = wr; | |
967 | break; | |
968 | } | |
969 | swsqe->idx = qhp->wq.sq.pidx; | |
970 | swsqe->complete = 0; | |
ba32de9d SW |
971 | swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || |
972 | qhp->sq_sig_all; | |
1cf24dce | 973 | swsqe->flushed = 0; |
cfdda9d7 | 974 | swsqe->wr_id = wr->wr_id; |
7730b4c7 HS |
975 | if (c4iw_wr_log) { |
976 | swsqe->sge_ts = cxgb4_read_sge_timestamp( | |
977 | qhp->rhp->rdev.lldi.ports[0]); | |
978 | getnstimeofday(&swsqe->host_ts); | |
979 | } | |
cfdda9d7 SW |
980 | |
981 | init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); | |
982 | ||
a9a42886 JP |
983 | pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", |
984 | __func__, | |
985 | (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, | |
986 | swsqe->opcode, swsqe->read_len); | |
cfdda9d7 SW |
987 | wr = wr->next; |
988 | num_wrs--; | |
d37ac31d SW |
989 | t4_sq_produce(&qhp->wq, len16); |
990 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | |
cfdda9d7 | 991 | } |
05eb2389 | 992 | if (!qhp->rhp->rdev.status_page->db_off) { |
963cab50 | 993 | t4_ring_sq_db(&qhp->wq, idx, wqe); |
05eb2389 SW |
994 | spin_unlock_irqrestore(&qhp->lock, flag); |
995 | } else { | |
996 | spin_unlock_irqrestore(&qhp->lock, flag); | |
997 | ring_kernel_sq_db(qhp, idx); | |
998 | } | |
cfdda9d7 SW |
999 | return err; |
1000 | } | |
1001 | ||
1002 | int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1003 | struct ib_recv_wr **bad_wr) | |
1004 | { | |
1005 | int err = 0; | |
1006 | struct c4iw_qp *qhp; | |
fa658a98 | 1007 | union t4_recv_wr *wqe = NULL; |
cfdda9d7 SW |
1008 | u32 num_wrs; |
1009 | u8 len16 = 0; | |
1010 | unsigned long flag; | |
1011 | u16 idx = 0; | |
1012 | ||
1013 | qhp = to_c4iw_qp(ibqp); | |
1014 | spin_lock_irqsave(&qhp->lock, flag); | |
1015 | if (t4_wq_in_error(&qhp->wq)) { | |
1016 | spin_unlock_irqrestore(&qhp->lock, flag); | |
4fe7c296 SW |
1017 | complete_rq_drain_wr(qhp, wr); |
1018 | return err; | |
cfdda9d7 SW |
1019 | } |
1020 | num_wrs = t4_rq_avail(&qhp->wq); | |
1021 | if (num_wrs == 0) { | |
1022 | spin_unlock_irqrestore(&qhp->lock, flag); | |
4ff522ea | 1023 | *bad_wr = wr; |
cfdda9d7 SW |
1024 | return -ENOMEM; |
1025 | } | |
1026 | while (wr) { | |
1027 | if (wr->num_sge > T4_MAX_RECV_SGE) { | |
1028 | err = -EINVAL; | |
1029 | *bad_wr = wr; | |
1030 | break; | |
1031 | } | |
d37ac31d SW |
1032 | wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + |
1033 | qhp->wq.rq.wq_pidx * | |
1034 | T4_EQ_ENTRY_SIZE); | |
cfdda9d7 SW |
1035 | if (num_wrs) |
1036 | err = build_rdma_recv(qhp, wqe, wr, &len16); | |
1037 | else | |
1038 | err = -ENOMEM; | |
1039 | if (err) { | |
1040 | *bad_wr = wr; | |
1041 | break; | |
1042 | } | |
1043 | ||
1044 | qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; | |
7730b4c7 HS |
1045 | if (c4iw_wr_log) { |
1046 | qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = | |
1047 | cxgb4_read_sge_timestamp( | |
1048 | qhp->rhp->rdev.lldi.ports[0]); | |
1049 | getnstimeofday( | |
1050 | &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); | |
1051 | } | |
cfdda9d7 SW |
1052 | |
1053 | wqe->recv.opcode = FW_RI_RECV_WR; | |
1054 | wqe->recv.r1 = 0; | |
1055 | wqe->recv.wrid = qhp->wq.rq.pidx; | |
1056 | wqe->recv.r2[0] = 0; | |
1057 | wqe->recv.r2[1] = 0; | |
1058 | wqe->recv.r2[2] = 0; | |
1059 | wqe->recv.len16 = len16; | |
a9a42886 JP |
1060 | pr_debug("%s cookie 0x%llx pidx %u\n", |
1061 | __func__, | |
1062 | (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); | |
d37ac31d SW |
1063 | t4_rq_produce(&qhp->wq, len16); |
1064 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | |
cfdda9d7 SW |
1065 | wr = wr->next; |
1066 | num_wrs--; | |
cfdda9d7 | 1067 | } |
05eb2389 | 1068 | if (!qhp->rhp->rdev.status_page->db_off) { |
963cab50 | 1069 | t4_ring_rq_db(&qhp->wq, idx, wqe); |
05eb2389 SW |
1070 | spin_unlock_irqrestore(&qhp->lock, flag); |
1071 | } else { | |
1072 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1073 | ring_kernel_rq_db(qhp, idx); | |
1074 | } | |
cfdda9d7 SW |
1075 | return err; |
1076 | } | |
1077 | ||
cfdda9d7 SW |
1078 | static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, |
1079 | u8 *ecode) | |
1080 | { | |
1081 | int status; | |
1082 | int tagged; | |
1083 | int opcode; | |
1084 | int rqtype; | |
1085 | int send_inv; | |
1086 | ||
1087 | if (!err_cqe) { | |
1088 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
1089 | *ecode = 0; | |
1090 | return; | |
1091 | } | |
1092 | ||
1093 | status = CQE_STATUS(err_cqe); | |
1094 | opcode = CQE_OPCODE(err_cqe); | |
1095 | rqtype = RQ_TYPE(err_cqe); | |
1096 | send_inv = (opcode == FW_RI_SEND_WITH_INV) || | |
1097 | (opcode == FW_RI_SEND_WITH_SE_INV); | |
1098 | tagged = (opcode == FW_RI_RDMA_WRITE) || | |
1099 | (rqtype && (opcode == FW_RI_READ_RESP)); | |
1100 | ||
1101 | switch (status) { | |
1102 | case T4_ERR_STAG: | |
1103 | if (send_inv) { | |
1104 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1105 | *ecode = RDMAP_CANT_INV_STAG; | |
1106 | } else { | |
1107 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1108 | *ecode = RDMAP_INV_STAG; | |
1109 | } | |
1110 | break; | |
1111 | case T4_ERR_PDID: | |
1112 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1113 | if ((opcode == FW_RI_SEND_WITH_INV) || | |
1114 | (opcode == FW_RI_SEND_WITH_SE_INV)) | |
1115 | *ecode = RDMAP_CANT_INV_STAG; | |
1116 | else | |
1117 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
1118 | break; | |
1119 | case T4_ERR_QPID: | |
1120 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1121 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
1122 | break; | |
1123 | case T4_ERR_ACCESS: | |
1124 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1125 | *ecode = RDMAP_ACC_VIOL; | |
1126 | break; | |
1127 | case T4_ERR_WRAP: | |
1128 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1129 | *ecode = RDMAP_TO_WRAP; | |
1130 | break; | |
1131 | case T4_ERR_BOUND: | |
1132 | if (tagged) { | |
1133 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
1134 | *ecode = DDPT_BASE_BOUNDS; | |
1135 | } else { | |
1136 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
1137 | *ecode = RDMAP_BASE_BOUNDS; | |
1138 | } | |
1139 | break; | |
1140 | case T4_ERR_INVALIDATE_SHARED_MR: | |
1141 | case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: | |
1142 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1143 | *ecode = RDMAP_CANT_INV_STAG; | |
1144 | break; | |
1145 | case T4_ERR_ECC: | |
1146 | case T4_ERR_ECC_PSTAG: | |
1147 | case T4_ERR_INTERNAL_ERR: | |
1148 | *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; | |
1149 | *ecode = 0; | |
1150 | break; | |
1151 | case T4_ERR_OUT_OF_RQE: | |
1152 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1153 | *ecode = DDPU_INV_MSN_NOBUF; | |
1154 | break; | |
1155 | case T4_ERR_PBL_ADDR_BOUND: | |
1156 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
1157 | *ecode = DDPT_BASE_BOUNDS; | |
1158 | break; | |
1159 | case T4_ERR_CRC: | |
1160 | *layer_type = LAYER_MPA|DDP_LLP; | |
1161 | *ecode = MPA_CRC_ERR; | |
1162 | break; | |
1163 | case T4_ERR_MARKER: | |
1164 | *layer_type = LAYER_MPA|DDP_LLP; | |
1165 | *ecode = MPA_MARKER_ERR; | |
1166 | break; | |
1167 | case T4_ERR_PDU_LEN_ERR: | |
1168 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1169 | *ecode = DDPU_MSG_TOOBIG; | |
1170 | break; | |
1171 | case T4_ERR_DDP_VERSION: | |
1172 | if (tagged) { | |
1173 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
1174 | *ecode = DDPT_INV_VERS; | |
1175 | } else { | |
1176 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1177 | *ecode = DDPU_INV_VERS; | |
1178 | } | |
1179 | break; | |
1180 | case T4_ERR_RDMA_VERSION: | |
1181 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1182 | *ecode = RDMAP_INV_VERS; | |
1183 | break; | |
1184 | case T4_ERR_OPCODE: | |
1185 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
1186 | *ecode = RDMAP_INV_OPCODE; | |
1187 | break; | |
1188 | case T4_ERR_DDP_QUEUE_NUM: | |
1189 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1190 | *ecode = DDPU_INV_QN; | |
1191 | break; | |
1192 | case T4_ERR_MSN: | |
1193 | case T4_ERR_MSN_GAP: | |
1194 | case T4_ERR_MSN_RANGE: | |
1195 | case T4_ERR_IRD_OVERFLOW: | |
1196 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1197 | *ecode = DDPU_INV_MSN_RANGE; | |
1198 | break; | |
1199 | case T4_ERR_TBIT: | |
1200 | *layer_type = LAYER_DDP|DDP_LOCAL_CATA; | |
1201 | *ecode = 0; | |
1202 | break; | |
1203 | case T4_ERR_MO: | |
1204 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
1205 | *ecode = DDPU_INV_MO; | |
1206 | break; | |
1207 | default: | |
1208 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
1209 | *ecode = 0; | |
1210 | break; | |
1211 | } | |
1212 | } | |
1213 | ||
be4c9bad RD |
1214 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
1215 | gfp_t gfp) | |
cfdda9d7 SW |
1216 | { |
1217 | struct fw_ri_wr *wqe; | |
1218 | struct sk_buff *skb; | |
1219 | struct terminate_message *term; | |
1220 | ||
a9a42886 JP |
1221 | pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
1222 | qhp->ep->hwtid); | |
cfdda9d7 | 1223 | |
4a740838 H |
1224 | skb = skb_dequeue(&qhp->ep->com.ep_skb_list); |
1225 | if (WARN_ON(!skb)) | |
be4c9bad | 1226 | return; |
4a740838 | 1227 | |
cfdda9d7 SW |
1228 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
1229 | ||
4df864c1 | 1230 | wqe = __skb_put(skb, sizeof(*wqe)); |
cfdda9d7 | 1231 | memset(wqe, 0, sizeof *wqe); |
e2ac9628 | 1232 | wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); |
cfdda9d7 | 1233 | wqe->flowid_len16 = cpu_to_be32( |
e2ac9628 HS |
1234 | FW_WR_FLOWID_V(qhp->ep->hwtid) | |
1235 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | |
cfdda9d7 SW |
1236 | |
1237 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; | |
1238 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | |
1239 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | |
d2fe99e8 KS |
1240 | if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { |
1241 | term->layer_etype = qhp->attr.layer_etype; | |
1242 | term->ecode = qhp->attr.ecode; | |
1243 | } else | |
1244 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | |
be4c9bad | 1245 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
cfdda9d7 SW |
1246 | } |
1247 | ||
1248 | /* | |
1249 | * Assumes qhp lock is held. | |
1250 | */ | |
1251 | static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |
2f5b48c3 | 1252 | struct c4iw_cq *schp) |
cfdda9d7 SW |
1253 | { |
1254 | int count; | |
678ea9b5 | 1255 | int rq_flushed, sq_flushed; |
2f5b48c3 | 1256 | unsigned long flag; |
cfdda9d7 | 1257 | |
a9a42886 | 1258 | pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
cfdda9d7 | 1259 | |
732bee7a | 1260 | /* locking hierarchy: cq lock first, then qp lock. */ |
2f5b48c3 | 1261 | spin_lock_irqsave(&rchp->lock, flag); |
cfdda9d7 | 1262 | spin_lock(&qhp->lock); |
1cf24dce SW |
1263 | |
1264 | if (qhp->wq.flushed) { | |
1265 | spin_unlock(&qhp->lock); | |
1266 | spin_unlock_irqrestore(&rchp->lock, flag); | |
1267 | return; | |
1268 | } | |
1269 | qhp->wq.flushed = 1; | |
1270 | ||
1271 | c4iw_flush_hw_cq(rchp); | |
cfdda9d7 | 1272 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); |
678ea9b5 | 1273 | rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
cfdda9d7 | 1274 | spin_unlock(&qhp->lock); |
2f5b48c3 | 1275 | spin_unlock_irqrestore(&rchp->lock, flag); |
cfdda9d7 | 1276 | |
732bee7a | 1277 | /* locking hierarchy: cq lock first, then qp lock. */ |
2f5b48c3 | 1278 | spin_lock_irqsave(&schp->lock, flag); |
cfdda9d7 | 1279 | spin_lock(&qhp->lock); |
1cf24dce SW |
1280 | if (schp != rchp) |
1281 | c4iw_flush_hw_cq(schp); | |
678ea9b5 | 1282 | sq_flushed = c4iw_flush_sq(qhp); |
cfdda9d7 | 1283 | spin_unlock(&qhp->lock); |
2f5b48c3 | 1284 | spin_unlock_irqrestore(&schp->lock, flag); |
678ea9b5 SW |
1285 | |
1286 | if (schp == rchp) { | |
1287 | if (t4_clear_cq_armed(&rchp->cq) && | |
1288 | (rq_flushed || sq_flushed)) { | |
1289 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | |
1290 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | |
1291 | rchp->ibcq.cq_context); | |
1292 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | |
1293 | } | |
1294 | } else { | |
1295 | if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { | |
1296 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | |
1297 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | |
1298 | rchp->ibcq.cq_context); | |
1299 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | |
1300 | } | |
1301 | if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { | |
1302 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | |
1303 | (*schp->ibcq.comp_handler)(&schp->ibcq, | |
1304 | schp->ibcq.cq_context); | |
1305 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | |
1306 | } | |
581bbe2c | 1307 | } |
cfdda9d7 SW |
1308 | } |
1309 | ||
2f5b48c3 | 1310 | static void flush_qp(struct c4iw_qp *qhp) |
cfdda9d7 SW |
1311 | { |
1312 | struct c4iw_cq *rchp, *schp; | |
581bbe2c | 1313 | unsigned long flag; |
cfdda9d7 | 1314 | |
1cf24dce SW |
1315 | rchp = to_c4iw_cq(qhp->ibqp.recv_cq); |
1316 | schp = to_c4iw_cq(qhp->ibqp.send_cq); | |
cfdda9d7 | 1317 | |
1cf24dce | 1318 | t4_set_wq_in_error(&qhp->wq); |
cfdda9d7 | 1319 | if (qhp->ibqp.uobject) { |
cfdda9d7 | 1320 | t4_set_cq_in_error(&rchp->cq); |
581bbe2c | 1321 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
01e7da6b | 1322 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
581bbe2c | 1323 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); |
01e7da6b | 1324 | if (schp != rchp) { |
cfdda9d7 | 1325 | t4_set_cq_in_error(&schp->cq); |
581bbe2c | 1326 | spin_lock_irqsave(&schp->comp_handler_lock, flag); |
01e7da6b KS |
1327 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
1328 | schp->ibcq.cq_context); | |
581bbe2c | 1329 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); |
01e7da6b | 1330 | } |
cfdda9d7 SW |
1331 | return; |
1332 | } | |
2f5b48c3 | 1333 | __flush_qp(qhp, rchp, schp); |
cfdda9d7 SW |
1334 | } |
1335 | ||
73d6fcad SW |
1336 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
1337 | struct c4iw_ep *ep) | |
cfdda9d7 SW |
1338 | { |
1339 | struct fw_ri_wr *wqe; | |
1340 | int ret; | |
cfdda9d7 SW |
1341 | struct sk_buff *skb; |
1342 | ||
a9a42886 JP |
1343 | pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
1344 | ep->hwtid); | |
cfdda9d7 | 1345 | |
4a740838 H |
1346 | skb = skb_dequeue(&ep->com.ep_skb_list); |
1347 | if (WARN_ON(!skb)) | |
cfdda9d7 | 1348 | return -ENOMEM; |
4a740838 | 1349 | |
73d6fcad | 1350 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); |
cfdda9d7 | 1351 | |
4df864c1 | 1352 | wqe = __skb_put(skb, sizeof(*wqe)); |
cfdda9d7 SW |
1353 | memset(wqe, 0, sizeof *wqe); |
1354 | wqe->op_compl = cpu_to_be32( | |
e2ac9628 HS |
1355 | FW_WR_OP_V(FW_RI_INIT_WR) | |
1356 | FW_WR_COMPL_F); | |
cfdda9d7 | 1357 | wqe->flowid_len16 = cpu_to_be32( |
e2ac9628 HS |
1358 | FW_WR_FLOWID_V(ep->hwtid) | |
1359 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | |
6198dd8d | 1360 | wqe->cookie = (uintptr_t)&ep->com.wr_wait; |
cfdda9d7 SW |
1361 | |
1362 | wqe->u.fini.type = FW_RI_TYPE_FINI; | |
cfdda9d7 SW |
1363 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
1364 | if (ret) | |
1365 | goto out; | |
1366 | ||
2f5b48c3 | 1367 | ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, |
aadc4df3 | 1368 | qhp->wq.sq.qid, __func__); |
cfdda9d7 | 1369 | out: |
a9a42886 | 1370 | pr_debug("%s ret %d\n", __func__, ret); |
cfdda9d7 SW |
1371 | return ret; |
1372 | } | |
1373 | ||
1374 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) | |
1375 | { | |
a9a42886 | 1376 | pr_debug("%s p2p_type = %d\n", __func__, p2p_type); |
cfdda9d7 SW |
1377 | memset(&init->u, 0, sizeof init->u); |
1378 | switch (p2p_type) { | |
1379 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: | |
1380 | init->u.write.opcode = FW_RI_RDMA_WRITE_WR; | |
1381 | init->u.write.stag_sink = cpu_to_be32(1); | |
1382 | init->u.write.to_sink = cpu_to_be64(1); | |
1383 | init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
1384 | init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + | |
1385 | sizeof(struct fw_ri_immd), | |
1386 | 16); | |
1387 | break; | |
1388 | case FW_RI_INIT_P2PTYPE_READ_REQ: | |
1389 | init->u.write.opcode = FW_RI_RDMA_READ_WR; | |
1390 | init->u.read.stag_src = cpu_to_be32(1); | |
1391 | init->u.read.to_src_lo = cpu_to_be32(1); | |
1392 | init->u.read.stag_sink = cpu_to_be32(1); | |
1393 | init->u.read.to_sink_lo = cpu_to_be32(1); | |
1394 | init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); | |
1395 | break; | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |
1400 | { | |
1401 | struct fw_ri_wr *wqe; | |
1402 | int ret; | |
cfdda9d7 SW |
1403 | struct sk_buff *skb; |
1404 | ||
a9a42886 JP |
1405 | pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, |
1406 | qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); | |
cfdda9d7 | 1407 | |
d3c814e8 | 1408 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
4c2c5763 HS |
1409 | if (!skb) { |
1410 | ret = -ENOMEM; | |
1411 | goto out; | |
1412 | } | |
1413 | ret = alloc_ird(rhp, qhp->attr.max_ird); | |
1414 | if (ret) { | |
1415 | qhp->attr.max_ird = 0; | |
1416 | kfree_skb(skb); | |
1417 | goto out; | |
1418 | } | |
cfdda9d7 SW |
1419 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
1420 | ||
4df864c1 | 1421 | wqe = __skb_put(skb, sizeof(*wqe)); |
cfdda9d7 SW |
1422 | memset(wqe, 0, sizeof *wqe); |
1423 | wqe->op_compl = cpu_to_be32( | |
e2ac9628 HS |
1424 | FW_WR_OP_V(FW_RI_INIT_WR) | |
1425 | FW_WR_COMPL_F); | |
cfdda9d7 | 1426 | wqe->flowid_len16 = cpu_to_be32( |
e2ac9628 HS |
1427 | FW_WR_FLOWID_V(qhp->ep->hwtid) | |
1428 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | |
cfdda9d7 | 1429 | |
6198dd8d | 1430 | wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; |
cfdda9d7 SW |
1431 | |
1432 | wqe->u.init.type = FW_RI_TYPE_INIT; | |
1433 | wqe->u.init.mpareqbit_p2ptype = | |
cf7fe64a HS |
1434 | FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | |
1435 | FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); | |
cfdda9d7 SW |
1436 | wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; |
1437 | if (qhp->attr.mpa_attr.recv_marker_enabled) | |
1438 | wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; | |
1439 | if (qhp->attr.mpa_attr.xmit_marker_enabled) | |
1440 | wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; | |
1441 | if (qhp->attr.mpa_attr.crc_enabled) | |
1442 | wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; | |
1443 | ||
1444 | wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | | |
1445 | FW_RI_QP_RDMA_WRITE_ENABLE | | |
1446 | FW_RI_QP_BIND_ENABLE; | |
1447 | if (!qhp->ibqp.uobject) | |
1448 | wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | | |
1449 | FW_RI_QP_STAG0_ENABLE; | |
1450 | wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); | |
1451 | wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); | |
1452 | wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); | |
1453 | wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); | |
1454 | wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); | |
1455 | wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); | |
1456 | wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); | |
1457 | wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); | |
1458 | wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); | |
1459 | wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); | |
1460 | wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); | |
1461 | wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); | |
1462 | wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - | |
1463 | rhp->rdev.lldi.vr->rq.start); | |
1464 | if (qhp->attr.mpa_attr.initiator) | |
1465 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | |
1466 | ||
cfdda9d7 SW |
1467 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
1468 | if (ret) | |
4c2c5763 | 1469 | goto err1; |
cfdda9d7 | 1470 | |
2f5b48c3 SW |
1471 | ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, |
1472 | qhp->ep->hwtid, qhp->wq.sq.qid, __func__); | |
4c2c5763 HS |
1473 | if (!ret) |
1474 | goto out; | |
1475 | err1: | |
1476 | free_ird(rhp, qhp->attr.max_ird); | |
cfdda9d7 | 1477 | out: |
a9a42886 | 1478 | pr_debug("%s ret %d\n", __func__, ret); |
cfdda9d7 SW |
1479 | return ret; |
1480 | } | |
1481 | ||
1482 | int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |
1483 | enum c4iw_qp_attr_mask mask, | |
1484 | struct c4iw_qp_attributes *attrs, | |
1485 | int internal) | |
1486 | { | |
1487 | int ret = 0; | |
1488 | struct c4iw_qp_attributes newattr = qhp->attr; | |
cfdda9d7 SW |
1489 | int disconnect = 0; |
1490 | int terminate = 0; | |
1491 | int abort = 0; | |
1492 | int free = 0; | |
1493 | struct c4iw_ep *ep = NULL; | |
1494 | ||
a9a42886 JP |
1495 | pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", |
1496 | __func__, | |
1497 | qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, | |
1498 | (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); | |
cfdda9d7 | 1499 | |
2f5b48c3 | 1500 | mutex_lock(&qhp->mutex); |
cfdda9d7 SW |
1501 | |
1502 | /* Process attr changes if in IDLE */ | |
1503 | if (mask & C4IW_QP_ATTR_VALID_MODIFY) { | |
1504 | if (qhp->attr.state != C4IW_QP_STATE_IDLE) { | |
1505 | ret = -EIO; | |
1506 | goto out; | |
1507 | } | |
1508 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) | |
1509 | newattr.enable_rdma_read = attrs->enable_rdma_read; | |
1510 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) | |
1511 | newattr.enable_rdma_write = attrs->enable_rdma_write; | |
1512 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) | |
1513 | newattr.enable_bind = attrs->enable_bind; | |
1514 | if (mask & C4IW_QP_ATTR_MAX_ORD) { | |
be4c9bad | 1515 | if (attrs->max_ord > c4iw_max_read_depth) { |
cfdda9d7 SW |
1516 | ret = -EINVAL; |
1517 | goto out; | |
1518 | } | |
1519 | newattr.max_ord = attrs->max_ord; | |
1520 | } | |
1521 | if (mask & C4IW_QP_ATTR_MAX_IRD) { | |
4c2c5763 | 1522 | if (attrs->max_ird > cur_max_read_depth(rhp)) { |
cfdda9d7 SW |
1523 | ret = -EINVAL; |
1524 | goto out; | |
1525 | } | |
1526 | newattr.max_ird = attrs->max_ird; | |
1527 | } | |
1528 | qhp->attr = newattr; | |
1529 | } | |
1530 | ||
2c974781 | 1531 | if (mask & C4IW_QP_ATTR_SQ_DB) { |
05eb2389 | 1532 | ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); |
2c974781 VP |
1533 | goto out; |
1534 | } | |
1535 | if (mask & C4IW_QP_ATTR_RQ_DB) { | |
05eb2389 | 1536 | ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); |
2c974781 VP |
1537 | goto out; |
1538 | } | |
1539 | ||
cfdda9d7 SW |
1540 | if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) |
1541 | goto out; | |
1542 | if (qhp->attr.state == attrs->next_state) | |
1543 | goto out; | |
1544 | ||
1545 | switch (qhp->attr.state) { | |
1546 | case C4IW_QP_STATE_IDLE: | |
1547 | switch (attrs->next_state) { | |
1548 | case C4IW_QP_STATE_RTS: | |
1549 | if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { | |
1550 | ret = -EINVAL; | |
1551 | goto out; | |
1552 | } | |
1553 | if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { | |
1554 | ret = -EINVAL; | |
1555 | goto out; | |
1556 | } | |
1557 | qhp->attr.mpa_attr = attrs->mpa_attr; | |
1558 | qhp->attr.llp_stream_handle = attrs->llp_stream_handle; | |
1559 | qhp->ep = qhp->attr.llp_stream_handle; | |
2f5b48c3 | 1560 | set_state(qhp, C4IW_QP_STATE_RTS); |
cfdda9d7 SW |
1561 | |
1562 | /* | |
1563 | * Ref the endpoint here and deref when we | |
1564 | * disassociate the endpoint from the QP. This | |
1565 | * happens in CLOSING->IDLE transition or *->ERROR | |
1566 | * transition. | |
1567 | */ | |
1568 | c4iw_get_ep(&qhp->ep->com); | |
cfdda9d7 | 1569 | ret = rdma_init(rhp, qhp); |
cfdda9d7 SW |
1570 | if (ret) |
1571 | goto err; | |
1572 | break; | |
1573 | case C4IW_QP_STATE_ERROR: | |
2f5b48c3 SW |
1574 | set_state(qhp, C4IW_QP_STATE_ERROR); |
1575 | flush_qp(qhp); | |
cfdda9d7 SW |
1576 | break; |
1577 | default: | |
1578 | ret = -EINVAL; | |
1579 | goto out; | |
1580 | } | |
1581 | break; | |
1582 | case C4IW_QP_STATE_RTS: | |
1583 | switch (attrs->next_state) { | |
1584 | case C4IW_QP_STATE_CLOSING: | |
2c935bc5 | 1585 | BUG_ON(kref_read(&qhp->ep->com.kref) < 2); |
b4e2901c | 1586 | t4_set_wq_in_error(&qhp->wq); |
2f5b48c3 | 1587 | set_state(qhp, C4IW_QP_STATE_CLOSING); |
73d6fcad | 1588 | ep = qhp->ep; |
cfdda9d7 SW |
1589 | if (!internal) { |
1590 | abort = 0; | |
1591 | disconnect = 1; | |
2f5b48c3 | 1592 | c4iw_get_ep(&qhp->ep->com); |
cfdda9d7 | 1593 | } |
73d6fcad | 1594 | ret = rdma_fini(rhp, qhp, ep); |
8da7e7a5 | 1595 | if (ret) |
cfdda9d7 | 1596 | goto err; |
cfdda9d7 SW |
1597 | break; |
1598 | case C4IW_QP_STATE_TERMINATE: | |
b4e2901c | 1599 | t4_set_wq_in_error(&qhp->wq); |
2f5b48c3 | 1600 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
d2fe99e8 KS |
1601 | qhp->attr.layer_etype = attrs->layer_etype; |
1602 | qhp->attr.ecode = attrs->ecode; | |
be4c9bad | 1603 | ep = qhp->ep; |
cc18b939 SW |
1604 | if (!internal) { |
1605 | c4iw_get_ep(&qhp->ep->com); | |
0e42c1f4 | 1606 | terminate = 1; |
cc18b939 SW |
1607 | disconnect = 1; |
1608 | } else { | |
1609 | terminate = qhp->attr.send_term; | |
09992579 SW |
1610 | ret = rdma_fini(rhp, qhp, ep); |
1611 | if (ret) | |
1612 | goto err; | |
1613 | } | |
cfdda9d7 SW |
1614 | break; |
1615 | case C4IW_QP_STATE_ERROR: | |
1cf24dce | 1616 | t4_set_wq_in_error(&qhp->wq); |
b4e2901c | 1617 | set_state(qhp, C4IW_QP_STATE_ERROR); |
cfdda9d7 SW |
1618 | if (!internal) { |
1619 | abort = 1; | |
1620 | disconnect = 1; | |
1621 | ep = qhp->ep; | |
2f5b48c3 | 1622 | c4iw_get_ep(&qhp->ep->com); |
cfdda9d7 SW |
1623 | } |
1624 | goto err; | |
1625 | break; | |
1626 | default: | |
1627 | ret = -EINVAL; | |
1628 | goto out; | |
1629 | } | |
1630 | break; | |
1631 | case C4IW_QP_STATE_CLOSING: | |
4fe7c296 SW |
1632 | |
1633 | /* | |
1634 | * Allow kernel users to move to ERROR for qp draining. | |
1635 | */ | |
1636 | if (!internal && (qhp->ibqp.uobject || attrs->next_state != | |
1637 | C4IW_QP_STATE_ERROR)) { | |
cfdda9d7 SW |
1638 | ret = -EINVAL; |
1639 | goto out; | |
1640 | } | |
1641 | switch (attrs->next_state) { | |
1642 | case C4IW_QP_STATE_IDLE: | |
2f5b48c3 SW |
1643 | flush_qp(qhp); |
1644 | set_state(qhp, C4IW_QP_STATE_IDLE); | |
cfdda9d7 SW |
1645 | qhp->attr.llp_stream_handle = NULL; |
1646 | c4iw_put_ep(&qhp->ep->com); | |
1647 | qhp->ep = NULL; | |
1648 | wake_up(&qhp->wait); | |
1649 | break; | |
1650 | case C4IW_QP_STATE_ERROR: | |
1651 | goto err; | |
1652 | default: | |
1653 | ret = -EINVAL; | |
1654 | goto err; | |
1655 | } | |
1656 | break; | |
1657 | case C4IW_QP_STATE_ERROR: | |
1658 | if (attrs->next_state != C4IW_QP_STATE_IDLE) { | |
1659 | ret = -EINVAL; | |
1660 | goto out; | |
1661 | } | |
1662 | if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { | |
1663 | ret = -EINVAL; | |
1664 | goto out; | |
1665 | } | |
2f5b48c3 | 1666 | set_state(qhp, C4IW_QP_STATE_IDLE); |
cfdda9d7 SW |
1667 | break; |
1668 | case C4IW_QP_STATE_TERMINATE: | |
1669 | if (!internal) { | |
1670 | ret = -EINVAL; | |
1671 | goto out; | |
1672 | } | |
1673 | goto err; | |
1674 | break; | |
1675 | default: | |
700456bd | 1676 | pr_err("%s in a bad state %d\n", __func__, qhp->attr.state); |
cfdda9d7 SW |
1677 | ret = -EINVAL; |
1678 | goto err; | |
1679 | break; | |
1680 | } | |
1681 | goto out; | |
1682 | err: | |
a9a42886 JP |
1683 | pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, |
1684 | qhp->wq.sq.qid); | |
cfdda9d7 SW |
1685 | |
1686 | /* disassociate the LLP connection */ | |
1687 | qhp->attr.llp_stream_handle = NULL; | |
af93fb5d SW |
1688 | if (!ep) |
1689 | ep = qhp->ep; | |
cfdda9d7 | 1690 | qhp->ep = NULL; |
2f5b48c3 | 1691 | set_state(qhp, C4IW_QP_STATE_ERROR); |
cfdda9d7 | 1692 | free = 1; |
91e9c071 | 1693 | abort = 1; |
cfdda9d7 | 1694 | BUG_ON(!ep); |
2f5b48c3 | 1695 | flush_qp(qhp); |
5b341808 | 1696 | wake_up(&qhp->wait); |
cfdda9d7 | 1697 | out: |
2f5b48c3 | 1698 | mutex_unlock(&qhp->mutex); |
cfdda9d7 SW |
1699 | |
1700 | if (terminate) | |
be4c9bad | 1701 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); |
cfdda9d7 SW |
1702 | |
1703 | /* | |
1704 | * If disconnect is 1, then we need to initiate a disconnect | |
1705 | * on the EP. This can be a normal close (RTS->CLOSING) or | |
1706 | * an abnormal close (RTS/CLOSING->ERROR). | |
1707 | */ | |
1708 | if (disconnect) { | |
be4c9bad RD |
1709 | c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : |
1710 | GFP_KERNEL); | |
cfdda9d7 SW |
1711 | c4iw_put_ep(&ep->com); |
1712 | } | |
1713 | ||
1714 | /* | |
1715 | * If free is 1, then we've disassociated the EP from the QP | |
1716 | * and we need to dereference the EP. | |
1717 | */ | |
1718 | if (free) | |
1719 | c4iw_put_ep(&ep->com); | |
a9a42886 | 1720 | pr_debug("%s exit state %d\n", __func__, qhp->attr.state); |
cfdda9d7 SW |
1721 | return ret; |
1722 | } | |
1723 | ||
1724 | int c4iw_destroy_qp(struct ib_qp *ib_qp) | |
1725 | { | |
1726 | struct c4iw_dev *rhp; | |
1727 | struct c4iw_qp *qhp; | |
1728 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
1729 | |
1730 | qhp = to_c4iw_qp(ib_qp); | |
1731 | rhp = qhp->rhp; | |
1732 | ||
1733 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
d2fe99e8 KS |
1734 | if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) |
1735 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1736 | else | |
1737 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
cfdda9d7 SW |
1738 | wait_event(qhp->wait, !qhp->ep); |
1739 | ||
05eb2389 | 1740 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
cfdda9d7 | 1741 | |
05eb2389 SW |
1742 | spin_lock_irq(&rhp->lock); |
1743 | if (!list_empty(&qhp->db_fc_entry)) | |
1744 | list_del_init(&qhp->db_fc_entry); | |
1745 | spin_unlock_irq(&rhp->lock); | |
4c2c5763 | 1746 | free_ird(rhp, qhp->attr.max_ird); |
05eb2389 | 1747 | |
ad61a4c7 SW |
1748 | c4iw_qp_rem_ref(ib_qp); |
1749 | ||
a9a42886 | 1750 | pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); |
cfdda9d7 SW |
1751 | return 0; |
1752 | } | |
1753 | ||
1754 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |
1755 | struct ib_udata *udata) | |
1756 | { | |
1757 | struct c4iw_dev *rhp; | |
1758 | struct c4iw_qp *qhp; | |
1759 | struct c4iw_pd *php; | |
1760 | struct c4iw_cq *schp; | |
1761 | struct c4iw_cq *rchp; | |
1762 | struct c4iw_create_qp_resp uresp; | |
ff1706f4 | 1763 | unsigned int sqsize, rqsize; |
cfdda9d7 SW |
1764 | struct c4iw_ucontext *ucontext; |
1765 | int ret; | |
a6054df3 H |
1766 | struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; |
1767 | struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; | |
cfdda9d7 | 1768 | |
a9a42886 | 1769 | pr_debug("%s ib_pd %p\n", __func__, pd); |
cfdda9d7 SW |
1770 | |
1771 | if (attrs->qp_type != IB_QPT_RC) | |
1772 | return ERR_PTR(-EINVAL); | |
1773 | ||
1774 | php = to_c4iw_pd(pd); | |
1775 | rhp = php->rhp; | |
1776 | schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); | |
1777 | rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); | |
1778 | if (!schp || !rchp) | |
1779 | return ERR_PTR(-EINVAL); | |
1780 | ||
1781 | if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) | |
1782 | return ERR_PTR(-EINVAL); | |
1783 | ||
66eb19af | 1784 | if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) |
cfdda9d7 | 1785 | return ERR_PTR(-E2BIG); |
66eb19af HS |
1786 | rqsize = attrs->cap.max_recv_wr + 1; |
1787 | if (rqsize < 8) | |
1788 | rqsize = 8; | |
cfdda9d7 | 1789 | |
66eb19af | 1790 | if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) |
cfdda9d7 | 1791 | return ERR_PTR(-E2BIG); |
66eb19af HS |
1792 | sqsize = attrs->cap.max_send_wr + 1; |
1793 | if (sqsize < 8) | |
1794 | sqsize = 8; | |
cfdda9d7 SW |
1795 | |
1796 | ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; | |
1797 | ||
cfdda9d7 SW |
1798 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); |
1799 | if (!qhp) | |
1800 | return ERR_PTR(-ENOMEM); | |
1801 | qhp->wq.sq.size = sqsize; | |
66eb19af HS |
1802 | qhp->wq.sq.memsize = |
1803 | (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * | |
1804 | sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); | |
1cf24dce | 1805 | qhp->wq.sq.flush_cidx = -1; |
cfdda9d7 | 1806 | qhp->wq.rq.size = rqsize; |
66eb19af HS |
1807 | qhp->wq.rq.memsize = |
1808 | (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * | |
1809 | sizeof(*qhp->wq.rq.queue); | |
cfdda9d7 SW |
1810 | |
1811 | if (ucontext) { | |
1812 | qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); | |
1813 | qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); | |
1814 | } | |
1815 | ||
cfdda9d7 SW |
1816 | ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, |
1817 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1818 | if (ret) | |
1819 | goto err1; | |
1820 | ||
1821 | attrs->cap.max_recv_wr = rqsize - 1; | |
1822 | attrs->cap.max_send_wr = sqsize - 1; | |
1823 | attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; | |
1824 | ||
1825 | qhp->rhp = rhp; | |
1826 | qhp->attr.pd = php->pdid; | |
1827 | qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; | |
1828 | qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; | |
1829 | qhp->attr.sq_num_entries = attrs->cap.max_send_wr; | |
1830 | qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; | |
1831 | qhp->attr.sq_max_sges = attrs->cap.max_send_sge; | |
1832 | qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; | |
1833 | qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; | |
1834 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1835 | qhp->attr.next_state = C4IW_QP_STATE_IDLE; | |
1836 | qhp->attr.enable_rdma_read = 1; | |
1837 | qhp->attr.enable_rdma_write = 1; | |
1838 | qhp->attr.enable_bind = 1; | |
4c2c5763 HS |
1839 | qhp->attr.max_ord = 0; |
1840 | qhp->attr.max_ird = 0; | |
ba32de9d | 1841 | qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; |
cfdda9d7 | 1842 | spin_lock_init(&qhp->lock); |
2f5b48c3 | 1843 | mutex_init(&qhp->mutex); |
cfdda9d7 | 1844 | init_waitqueue_head(&qhp->wait); |
ad61a4c7 | 1845 | kref_init(&qhp->kref); |
c12a67fe | 1846 | INIT_WORK(&qhp->free_work, free_qp_work); |
cfdda9d7 | 1847 | |
05eb2389 | 1848 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); |
cfdda9d7 SW |
1849 | if (ret) |
1850 | goto err2; | |
1851 | ||
cfdda9d7 | 1852 | if (udata) { |
a6054df3 H |
1853 | sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL); |
1854 | if (!sq_key_mm) { | |
cfdda9d7 | 1855 | ret = -ENOMEM; |
30a6a62f | 1856 | goto err3; |
cfdda9d7 | 1857 | } |
a6054df3 H |
1858 | rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL); |
1859 | if (!rq_key_mm) { | |
cfdda9d7 | 1860 | ret = -ENOMEM; |
30a6a62f | 1861 | goto err4; |
cfdda9d7 | 1862 | } |
a6054df3 H |
1863 | sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL); |
1864 | if (!sq_db_key_mm) { | |
cfdda9d7 | 1865 | ret = -ENOMEM; |
30a6a62f | 1866 | goto err5; |
cfdda9d7 | 1867 | } |
a6054df3 H |
1868 | rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL); |
1869 | if (!rq_db_key_mm) { | |
cfdda9d7 | 1870 | ret = -ENOMEM; |
30a6a62f | 1871 | goto err6; |
cfdda9d7 | 1872 | } |
c6d7b267 | 1873 | if (t4_sq_onchip(&qhp->wq.sq)) { |
a6054df3 H |
1874 | ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm), |
1875 | GFP_KERNEL); | |
1876 | if (!ma_sync_key_mm) { | |
c6d7b267 SW |
1877 | ret = -ENOMEM; |
1878 | goto err7; | |
1879 | } | |
1880 | uresp.flags = C4IW_QPF_ONCHIP; | |
1881 | } else | |
1882 | uresp.flags = 0; | |
cfdda9d7 SW |
1883 | uresp.qid_mask = rhp->rdev.qpmask; |
1884 | uresp.sqid = qhp->wq.sq.qid; | |
1885 | uresp.sq_size = qhp->wq.sq.size; | |
1886 | uresp.sq_memsize = qhp->wq.sq.memsize; | |
1887 | uresp.rqid = qhp->wq.rq.qid; | |
1888 | uresp.rq_size = qhp->wq.rq.size; | |
1889 | uresp.rq_memsize = qhp->wq.rq.memsize; | |
1890 | spin_lock(&ucontext->mmap_lock); | |
a6054df3 | 1891 | if (ma_sync_key_mm) { |
c6d7b267 SW |
1892 | uresp.ma_sync_key = ucontext->key; |
1893 | ucontext->key += PAGE_SIZE; | |
ae1fe07f DC |
1894 | } else { |
1895 | uresp.ma_sync_key = 0; | |
c6d7b267 | 1896 | } |
cfdda9d7 SW |
1897 | uresp.sq_key = ucontext->key; |
1898 | ucontext->key += PAGE_SIZE; | |
1899 | uresp.rq_key = ucontext->key; | |
1900 | ucontext->key += PAGE_SIZE; | |
1901 | uresp.sq_db_gts_key = ucontext->key; | |
1902 | ucontext->key += PAGE_SIZE; | |
1903 | uresp.rq_db_gts_key = ucontext->key; | |
1904 | ucontext->key += PAGE_SIZE; | |
1905 | spin_unlock(&ucontext->mmap_lock); | |
1906 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | |
1907 | if (ret) | |
c6d7b267 | 1908 | goto err8; |
a6054df3 H |
1909 | sq_key_mm->key = uresp.sq_key; |
1910 | sq_key_mm->addr = qhp->wq.sq.phys_addr; | |
1911 | sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); | |
1912 | insert_mmap(ucontext, sq_key_mm); | |
1913 | rq_key_mm->key = uresp.rq_key; | |
1914 | rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); | |
1915 | rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); | |
1916 | insert_mmap(ucontext, rq_key_mm); | |
1917 | sq_db_key_mm->key = uresp.sq_db_gts_key; | |
1918 | sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; | |
1919 | sq_db_key_mm->len = PAGE_SIZE; | |
1920 | insert_mmap(ucontext, sq_db_key_mm); | |
1921 | rq_db_key_mm->key = uresp.rq_db_gts_key; | |
1922 | rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa; | |
1923 | rq_db_key_mm->len = PAGE_SIZE; | |
1924 | insert_mmap(ucontext, rq_db_key_mm); | |
1925 | if (ma_sync_key_mm) { | |
1926 | ma_sync_key_mm->key = uresp.ma_sync_key; | |
1927 | ma_sync_key_mm->addr = | |
1928 | (pci_resource_start(rhp->rdev.lldi.pdev, 0) + | |
1929 | PCIE_MA_SYNC_A) & PAGE_MASK; | |
1930 | ma_sync_key_mm->len = PAGE_SIZE; | |
1931 | insert_mmap(ucontext, ma_sync_key_mm); | |
c6d7b267 | 1932 | } |
c12a67fe SW |
1933 | |
1934 | c4iw_get_ucontext(ucontext); | |
1935 | qhp->ucontext = ucontext; | |
cfdda9d7 SW |
1936 | } |
1937 | qhp->ibqp.qp_num = qhp->wq.sq.qid; | |
1938 | init_timer(&(qhp->timer)); | |
05eb2389 | 1939 | INIT_LIST_HEAD(&qhp->db_fc_entry); |
a9a42886 JP |
1940 | pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n", |
1941 | __func__, | |
1942 | qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, | |
1943 | attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, | |
1944 | qhp->wq.rq.memsize, attrs->cap.max_recv_wr); | |
cfdda9d7 | 1945 | return &qhp->ibqp; |
c6d7b267 | 1946 | err8: |
a6054df3 | 1947 | kfree(ma_sync_key_mm); |
cfdda9d7 | 1948 | err7: |
a6054df3 | 1949 | kfree(rq_db_key_mm); |
cfdda9d7 | 1950 | err6: |
a6054df3 | 1951 | kfree(sq_db_key_mm); |
cfdda9d7 | 1952 | err5: |
a6054df3 | 1953 | kfree(rq_key_mm); |
cfdda9d7 | 1954 | err4: |
a6054df3 | 1955 | kfree(sq_key_mm); |
cfdda9d7 SW |
1956 | err3: |
1957 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | |
1958 | err2: | |
1959 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1960 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1961 | err1: | |
1962 | kfree(qhp); | |
1963 | return ERR_PTR(ret); | |
1964 | } | |
1965 | ||
1966 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1967 | int attr_mask, struct ib_udata *udata) | |
1968 | { | |
1969 | struct c4iw_dev *rhp; | |
1970 | struct c4iw_qp *qhp; | |
1971 | enum c4iw_qp_attr_mask mask = 0; | |
1972 | struct c4iw_qp_attributes attrs; | |
1973 | ||
a9a42886 | 1974 | pr_debug("%s ib_qp %p\n", __func__, ibqp); |
cfdda9d7 SW |
1975 | |
1976 | /* iwarp does not support the RTR state */ | |
1977 | if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) | |
1978 | attr_mask &= ~IB_QP_STATE; | |
1979 | ||
1980 | /* Make sure we still have something left to do */ | |
1981 | if (!attr_mask) | |
1982 | return 0; | |
1983 | ||
1984 | memset(&attrs, 0, sizeof attrs); | |
1985 | qhp = to_c4iw_qp(ibqp); | |
1986 | rhp = qhp->rhp; | |
1987 | ||
1988 | attrs.next_state = c4iw_convert_state(attr->qp_state); | |
1989 | attrs.enable_rdma_read = (attr->qp_access_flags & | |
1990 | IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
1991 | attrs.enable_rdma_write = (attr->qp_access_flags & | |
1992 | IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
1993 | attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; | |
1994 | ||
1995 | ||
1996 | mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; | |
1997 | mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? | |
1998 | (C4IW_QP_ATTR_ENABLE_RDMA_READ | | |
1999 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | |
2000 | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; | |
2001 | ||
2c974781 VP |
2002 | /* |
2003 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | |
2004 | * ringing the queue db when we're in DB_FULL mode. | |
c2f9da92 | 2005 | * Only allow this on T4 devices. |
2c974781 VP |
2006 | */ |
2007 | attrs.sq_db_inc = attr->sq_psn; | |
2008 | attrs.rq_db_inc = attr->rq_psn; | |
2009 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | |
2010 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | |
963cab50 | 2011 | if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && |
c2f9da92 SW |
2012 | (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) |
2013 | return -EINVAL; | |
2c974781 | 2014 | |
cfdda9d7 SW |
2015 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
2016 | } | |
2017 | ||
2018 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) | |
2019 | { | |
a9a42886 | 2020 | pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); |
cfdda9d7 SW |
2021 | return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); |
2022 | } | |
67bbc055 VP |
2023 | |
2024 | int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
2025 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
2026 | { | |
2027 | struct c4iw_qp *qhp = to_c4iw_qp(ibqp); | |
2028 | ||
2029 | memset(attr, 0, sizeof *attr); | |
2030 | memset(init_attr, 0, sizeof *init_attr); | |
2031 | attr->qp_state = to_ib_qp_state(qhp->attr.state); | |
3e5c02c9 HS |
2032 | init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; |
2033 | init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; | |
2034 | init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; | |
2035 | init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; | |
2036 | init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; | |
2037 | init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; | |
67bbc055 VP |
2038 | return 0; |
2039 | } |