Commit | Line | Data |
---|---|---|
8700e3e7 MS |
1 | /* |
2 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. | |
3 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/skbuff.h> | |
35 | ||
36 | #include "rxe.h" | |
37 | #include "rxe_loc.h" | |
38 | #include "rxe_queue.h" | |
39 | #include "rxe_task.h" | |
40 | ||
41 | enum comp_state { | |
42 | COMPST_GET_ACK, | |
43 | COMPST_GET_WQE, | |
44 | COMPST_COMP_WQE, | |
45 | COMPST_COMP_ACK, | |
46 | COMPST_CHECK_PSN, | |
47 | COMPST_CHECK_ACK, | |
48 | COMPST_READ, | |
49 | COMPST_ATOMIC, | |
50 | COMPST_WRITE_SEND, | |
51 | COMPST_UPDATE_COMP, | |
52 | COMPST_ERROR_RETRY, | |
53 | COMPST_RNR_RETRY, | |
54 | COMPST_ERROR, | |
55 | COMPST_EXIT, /* We have an issue, and we want to rerun the completer */ | |
56 | COMPST_DONE, /* The completer finished successflly */ | |
57 | }; | |
58 | ||
59 | static char *comp_state_name[] = { | |
60 | [COMPST_GET_ACK] = "GET ACK", | |
61 | [COMPST_GET_WQE] = "GET WQE", | |
62 | [COMPST_COMP_WQE] = "COMP WQE", | |
63 | [COMPST_COMP_ACK] = "COMP ACK", | |
64 | [COMPST_CHECK_PSN] = "CHECK PSN", | |
65 | [COMPST_CHECK_ACK] = "CHECK ACK", | |
66 | [COMPST_READ] = "READ", | |
67 | [COMPST_ATOMIC] = "ATOMIC", | |
68 | [COMPST_WRITE_SEND] = "WRITE/SEND", | |
69 | [COMPST_UPDATE_COMP] = "UPDATE COMP", | |
70 | [COMPST_ERROR_RETRY] = "ERROR RETRY", | |
71 | [COMPST_RNR_RETRY] = "RNR RETRY", | |
72 | [COMPST_ERROR] = "ERROR", | |
73 | [COMPST_EXIT] = "EXIT", | |
74 | [COMPST_DONE] = "DONE", | |
75 | }; | |
76 | ||
77 | static unsigned long rnrnak_usec[32] = { | |
78 | [IB_RNR_TIMER_655_36] = 655360, | |
79 | [IB_RNR_TIMER_000_01] = 10, | |
80 | [IB_RNR_TIMER_000_02] = 20, | |
81 | [IB_RNR_TIMER_000_03] = 30, | |
82 | [IB_RNR_TIMER_000_04] = 40, | |
83 | [IB_RNR_TIMER_000_06] = 60, | |
84 | [IB_RNR_TIMER_000_08] = 80, | |
85 | [IB_RNR_TIMER_000_12] = 120, | |
86 | [IB_RNR_TIMER_000_16] = 160, | |
87 | [IB_RNR_TIMER_000_24] = 240, | |
88 | [IB_RNR_TIMER_000_32] = 320, | |
89 | [IB_RNR_TIMER_000_48] = 480, | |
90 | [IB_RNR_TIMER_000_64] = 640, | |
91 | [IB_RNR_TIMER_000_96] = 960, | |
92 | [IB_RNR_TIMER_001_28] = 1280, | |
93 | [IB_RNR_TIMER_001_92] = 1920, | |
94 | [IB_RNR_TIMER_002_56] = 2560, | |
95 | [IB_RNR_TIMER_003_84] = 3840, | |
96 | [IB_RNR_TIMER_005_12] = 5120, | |
97 | [IB_RNR_TIMER_007_68] = 7680, | |
98 | [IB_RNR_TIMER_010_24] = 10240, | |
99 | [IB_RNR_TIMER_015_36] = 15360, | |
100 | [IB_RNR_TIMER_020_48] = 20480, | |
101 | [IB_RNR_TIMER_030_72] = 30720, | |
102 | [IB_RNR_TIMER_040_96] = 40960, | |
103 | [IB_RNR_TIMER_061_44] = 61410, | |
104 | [IB_RNR_TIMER_081_92] = 81920, | |
105 | [IB_RNR_TIMER_122_88] = 122880, | |
106 | [IB_RNR_TIMER_163_84] = 163840, | |
107 | [IB_RNR_TIMER_245_76] = 245760, | |
108 | [IB_RNR_TIMER_327_68] = 327680, | |
109 | [IB_RNR_TIMER_491_52] = 491520, | |
110 | }; | |
111 | ||
112 | static inline unsigned long rnrnak_jiffies(u8 timeout) | |
113 | { | |
114 | return max_t(unsigned long, | |
115 | usecs_to_jiffies(rnrnak_usec[timeout]), 1); | |
116 | } | |
117 | ||
118 | static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) | |
119 | { | |
120 | switch (opcode) { | |
121 | case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE; | |
122 | case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE; | |
123 | case IB_WR_SEND: return IB_WC_SEND; | |
124 | case IB_WR_SEND_WITH_IMM: return IB_WC_SEND; | |
125 | case IB_WR_RDMA_READ: return IB_WC_RDMA_READ; | |
126 | case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP; | |
127 | case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD; | |
128 | case IB_WR_LSO: return IB_WC_LSO; | |
129 | case IB_WR_SEND_WITH_INV: return IB_WC_SEND; | |
130 | case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ; | |
131 | case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV; | |
132 | case IB_WR_REG_MR: return IB_WC_REG_MR; | |
133 | ||
134 | default: | |
135 | return 0xff; | |
136 | } | |
137 | } | |
138 | ||
139 | void retransmit_timer(unsigned long data) | |
140 | { | |
141 | struct rxe_qp *qp = (struct rxe_qp *)data; | |
142 | ||
143 | if (qp->valid) { | |
144 | qp->comp.timeout = 1; | |
145 | rxe_run_task(&qp->comp.task, 1); | |
146 | } | |
147 | } | |
148 | ||
149 | void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, | |
150 | struct sk_buff *skb) | |
151 | { | |
152 | int must_sched; | |
153 | ||
154 | skb_queue_tail(&qp->resp_pkts, skb); | |
155 | ||
156 | must_sched = skb_queue_len(&qp->resp_pkts) > 1; | |
157 | rxe_run_task(&qp->comp.task, must_sched); | |
158 | } | |
159 | ||
160 | static inline enum comp_state get_wqe(struct rxe_qp *qp, | |
161 | struct rxe_pkt_info *pkt, | |
162 | struct rxe_send_wqe **wqe_p) | |
163 | { | |
164 | struct rxe_send_wqe *wqe; | |
165 | ||
166 | /* we come here whether or not we found a response packet to see if | |
167 | * there are any posted WQEs | |
168 | */ | |
169 | wqe = queue_head(qp->sq.queue); | |
170 | *wqe_p = wqe; | |
171 | ||
172 | /* no WQE or requester has not started it yet */ | |
173 | if (!wqe || wqe->state == wqe_state_posted) | |
174 | return pkt ? COMPST_DONE : COMPST_EXIT; | |
175 | ||
176 | /* WQE does not require an ack */ | |
177 | if (wqe->state == wqe_state_done) | |
178 | return COMPST_COMP_WQE; | |
179 | ||
180 | /* WQE caused an error */ | |
181 | if (wqe->state == wqe_state_error) | |
182 | return COMPST_ERROR; | |
183 | ||
184 | /* we have a WQE, if we also have an ack check its PSN */ | |
185 | return pkt ? COMPST_CHECK_PSN : COMPST_EXIT; | |
186 | } | |
187 | ||
188 | static inline void reset_retry_counters(struct rxe_qp *qp) | |
189 | { | |
190 | qp->comp.retry_cnt = qp->attr.retry_cnt; | |
191 | qp->comp.rnr_retry = qp->attr.rnr_retry; | |
192 | } | |
193 | ||
194 | static inline enum comp_state check_psn(struct rxe_qp *qp, | |
195 | struct rxe_pkt_info *pkt, | |
196 | struct rxe_send_wqe *wqe) | |
197 | { | |
198 | s32 diff; | |
199 | ||
200 | /* check to see if response is past the oldest WQE. if it is, complete | |
201 | * send/write or error read/atomic | |
202 | */ | |
203 | diff = psn_compare(pkt->psn, wqe->last_psn); | |
204 | if (diff > 0) { | |
205 | if (wqe->state == wqe_state_pending) { | |
206 | if (wqe->mask & WR_ATOMIC_OR_READ_MASK) | |
207 | return COMPST_ERROR_RETRY; | |
208 | ||
209 | reset_retry_counters(qp); | |
210 | return COMPST_COMP_WQE; | |
211 | } else { | |
212 | return COMPST_DONE; | |
213 | } | |
214 | } | |
215 | ||
216 | /* compare response packet to expected response */ | |
217 | diff = psn_compare(pkt->psn, qp->comp.psn); | |
218 | if (diff < 0) { | |
219 | /* response is most likely a retried packet if it matches an | |
220 | * uncompleted WQE go complete it else ignore it | |
221 | */ | |
222 | if (pkt->psn == wqe->last_psn) | |
223 | return COMPST_COMP_ACK; | |
224 | else | |
225 | return COMPST_DONE; | |
226 | } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) { | |
cbf1f9a4 | 227 | return COMPST_DONE; |
8700e3e7 MS |
228 | } else { |
229 | return COMPST_CHECK_ACK; | |
230 | } | |
231 | } | |
232 | ||
233 | static inline enum comp_state check_ack(struct rxe_qp *qp, | |
234 | struct rxe_pkt_info *pkt, | |
235 | struct rxe_send_wqe *wqe) | |
236 | { | |
237 | unsigned int mask = pkt->mask; | |
238 | u8 syn; | |
239 | ||
240 | /* Check the sequence only */ | |
241 | switch (qp->comp.opcode) { | |
242 | case -1: | |
243 | /* Will catch all *_ONLY cases. */ | |
244 | if (!(mask & RXE_START_MASK)) | |
245 | return COMPST_ERROR; | |
246 | ||
247 | break; | |
248 | ||
249 | case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: | |
250 | case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: | |
251 | if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && | |
252 | pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { | |
253 | return COMPST_ERROR; | |
254 | } | |
255 | break; | |
256 | default: | |
257 | WARN_ON(1); | |
258 | } | |
259 | ||
260 | /* Check operation validity. */ | |
261 | switch (pkt->opcode) { | |
262 | case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: | |
263 | case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST: | |
264 | case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY: | |
265 | syn = aeth_syn(pkt); | |
266 | ||
267 | if ((syn & AETH_TYPE_MASK) != AETH_ACK) | |
268 | return COMPST_ERROR; | |
269 | ||
270 | /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE | |
271 | * doesn't have an AETH) | |
272 | */ | |
273 | case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: | |
274 | if (wqe->wr.opcode != IB_WR_RDMA_READ && | |
275 | wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { | |
276 | return COMPST_ERROR; | |
277 | } | |
278 | reset_retry_counters(qp); | |
279 | return COMPST_READ; | |
280 | ||
281 | case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE: | |
282 | syn = aeth_syn(pkt); | |
283 | ||
284 | if ((syn & AETH_TYPE_MASK) != AETH_ACK) | |
285 | return COMPST_ERROR; | |
286 | ||
287 | if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP && | |
288 | wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD) | |
289 | return COMPST_ERROR; | |
290 | reset_retry_counters(qp); | |
291 | return COMPST_ATOMIC; | |
292 | ||
293 | case IB_OPCODE_RC_ACKNOWLEDGE: | |
294 | syn = aeth_syn(pkt); | |
295 | switch (syn & AETH_TYPE_MASK) { | |
296 | case AETH_ACK: | |
297 | reset_retry_counters(qp); | |
298 | return COMPST_WRITE_SEND; | |
299 | ||
300 | case AETH_RNR_NAK: | |
301 | return COMPST_RNR_RETRY; | |
302 | ||
303 | case AETH_NAK: | |
304 | switch (syn) { | |
305 | case AETH_NAK_PSN_SEQ_ERROR: | |
306 | /* a nak implicitly acks all packets with psns | |
307 | * before | |
308 | */ | |
309 | if (psn_compare(pkt->psn, qp->comp.psn) > 0) { | |
310 | qp->comp.psn = pkt->psn; | |
311 | if (qp->req.wait_psn) { | |
312 | qp->req.wait_psn = 0; | |
313 | rxe_run_task(&qp->req.task, 1); | |
314 | } | |
315 | } | |
316 | return COMPST_ERROR_RETRY; | |
317 | ||
318 | case AETH_NAK_INVALID_REQ: | |
319 | wqe->status = IB_WC_REM_INV_REQ_ERR; | |
320 | return COMPST_ERROR; | |
321 | ||
322 | case AETH_NAK_REM_ACC_ERR: | |
323 | wqe->status = IB_WC_REM_ACCESS_ERR; | |
324 | return COMPST_ERROR; | |
325 | ||
326 | case AETH_NAK_REM_OP_ERR: | |
327 | wqe->status = IB_WC_REM_OP_ERR; | |
328 | return COMPST_ERROR; | |
329 | ||
330 | default: | |
331 | pr_warn("unexpected nak %x\n", syn); | |
332 | wqe->status = IB_WC_REM_OP_ERR; | |
333 | return COMPST_ERROR; | |
334 | } | |
335 | ||
336 | default: | |
337 | return COMPST_ERROR; | |
338 | } | |
339 | break; | |
340 | ||
341 | default: | |
342 | pr_warn("unexpected opcode\n"); | |
343 | } | |
344 | ||
345 | return COMPST_ERROR; | |
346 | } | |
347 | ||
348 | static inline enum comp_state do_read(struct rxe_qp *qp, | |
349 | struct rxe_pkt_info *pkt, | |
350 | struct rxe_send_wqe *wqe) | |
351 | { | |
352 | struct rxe_dev *rxe = to_rdev(qp->ibqp.device); | |
353 | int ret; | |
354 | ||
355 | ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, | |
356 | &wqe->dma, payload_addr(pkt), | |
357 | payload_size(pkt), to_mem_obj, NULL); | |
358 | if (ret) | |
359 | return COMPST_ERROR; | |
360 | ||
361 | if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK)) | |
362 | return COMPST_COMP_ACK; | |
363 | else | |
364 | return COMPST_UPDATE_COMP; | |
365 | } | |
366 | ||
367 | static inline enum comp_state do_atomic(struct rxe_qp *qp, | |
368 | struct rxe_pkt_info *pkt, | |
369 | struct rxe_send_wqe *wqe) | |
370 | { | |
371 | struct rxe_dev *rxe = to_rdev(qp->ibqp.device); | |
372 | int ret; | |
373 | ||
374 | u64 atomic_orig = atmack_orig(pkt); | |
375 | ||
376 | ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, | |
377 | &wqe->dma, &atomic_orig, | |
378 | sizeof(u64), to_mem_obj, NULL); | |
379 | if (ret) | |
380 | return COMPST_ERROR; | |
381 | else | |
382 | return COMPST_COMP_ACK; | |
383 | } | |
384 | ||
385 | static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, | |
386 | struct rxe_cqe *cqe) | |
387 | { | |
388 | memset(cqe, 0, sizeof(*cqe)); | |
389 | ||
390 | if (!qp->is_user) { | |
391 | struct ib_wc *wc = &cqe->ibwc; | |
392 | ||
393 | wc->wr_id = wqe->wr.wr_id; | |
394 | wc->status = wqe->status; | |
395 | wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); | |
396 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || | |
397 | wqe->wr.opcode == IB_WR_SEND_WITH_IMM) | |
398 | wc->wc_flags = IB_WC_WITH_IMM; | |
399 | wc->byte_len = wqe->dma.length; | |
400 | wc->qp = &qp->ibqp; | |
401 | } else { | |
402 | struct ib_uverbs_wc *uwc = &cqe->uibwc; | |
403 | ||
404 | uwc->wr_id = wqe->wr.wr_id; | |
405 | uwc->status = wqe->status; | |
406 | uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); | |
407 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || | |
408 | wqe->wr.opcode == IB_WR_SEND_WITH_IMM) | |
409 | uwc->wc_flags = IB_WC_WITH_IMM; | |
410 | uwc->byte_len = wqe->dma.length; | |
411 | uwc->qp_num = qp->ibqp.qp_num; | |
412 | } | |
413 | } | |
414 | ||
415 | static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) | |
416 | { | |
417 | struct rxe_cqe cqe; | |
418 | ||
419 | if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || | |
420 | (wqe->wr.send_flags & IB_SEND_SIGNALED) || | |
421 | (qp->req.state == QP_STATE_ERROR)) { | |
422 | make_send_cqe(qp, wqe, &cqe); | |
dd753d87 | 423 | advance_consumer(qp->sq.queue); |
8700e3e7 | 424 | rxe_cq_post(qp->scq, &cqe, 0); |
dd753d87 AB |
425 | } else { |
426 | advance_consumer(qp->sq.queue); | |
8700e3e7 MS |
427 | } |
428 | ||
8700e3e7 MS |
429 | /* |
430 | * we completed something so let req run again | |
431 | * if it is trying to fence | |
432 | */ | |
433 | if (qp->req.wait_fence) { | |
434 | qp->req.wait_fence = 0; | |
435 | rxe_run_task(&qp->req.task, 1); | |
436 | } | |
437 | } | |
438 | ||
439 | static inline enum comp_state complete_ack(struct rxe_qp *qp, | |
440 | struct rxe_pkt_info *pkt, | |
441 | struct rxe_send_wqe *wqe) | |
442 | { | |
443 | unsigned long flags; | |
444 | ||
445 | if (wqe->has_rd_atomic) { | |
446 | wqe->has_rd_atomic = 0; | |
447 | atomic_inc(&qp->req.rd_atomic); | |
448 | if (qp->req.need_rd_atomic) { | |
449 | qp->comp.timeout_retry = 0; | |
450 | qp->req.need_rd_atomic = 0; | |
451 | rxe_run_task(&qp->req.task, 1); | |
452 | } | |
453 | } | |
454 | ||
455 | if (unlikely(qp->req.state == QP_STATE_DRAIN)) { | |
456 | /* state_lock used by requester & completer */ | |
457 | spin_lock_irqsave(&qp->state_lock, flags); | |
458 | if ((qp->req.state == QP_STATE_DRAIN) && | |
459 | (qp->comp.psn == qp->req.psn)) { | |
460 | qp->req.state = QP_STATE_DRAINED; | |
461 | spin_unlock_irqrestore(&qp->state_lock, flags); | |
462 | ||
463 | if (qp->ibqp.event_handler) { | |
464 | struct ib_event ev; | |
465 | ||
466 | ev.device = qp->ibqp.device; | |
467 | ev.element.qp = &qp->ibqp; | |
468 | ev.event = IB_EVENT_SQ_DRAINED; | |
469 | qp->ibqp.event_handler(&ev, | |
470 | qp->ibqp.qp_context); | |
471 | } | |
472 | } else { | |
473 | spin_unlock_irqrestore(&qp->state_lock, flags); | |
474 | } | |
475 | } | |
476 | ||
477 | do_complete(qp, wqe); | |
478 | ||
479 | if (psn_compare(pkt->psn, qp->comp.psn) >= 0) | |
480 | return COMPST_UPDATE_COMP; | |
481 | else | |
482 | return COMPST_DONE; | |
483 | } | |
484 | ||
485 | static inline enum comp_state complete_wqe(struct rxe_qp *qp, | |
486 | struct rxe_pkt_info *pkt, | |
487 | struct rxe_send_wqe *wqe) | |
488 | { | |
489 | qp->comp.opcode = -1; | |
490 | ||
491 | if (pkt) { | |
492 | if (psn_compare(pkt->psn, qp->comp.psn) >= 0) | |
493 | qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; | |
494 | ||
495 | if (qp->req.wait_psn) { | |
496 | qp->req.wait_psn = 0; | |
497 | rxe_run_task(&qp->req.task, 1); | |
498 | } | |
499 | } | |
500 | ||
501 | do_complete(qp, wqe); | |
502 | ||
503 | return COMPST_GET_WQE; | |
504 | } | |
505 | ||
506 | int rxe_completer(void *arg) | |
507 | { | |
508 | struct rxe_qp *qp = (struct rxe_qp *)arg; | |
509 | struct rxe_send_wqe *wqe = wqe; | |
510 | struct sk_buff *skb = NULL; | |
511 | struct rxe_pkt_info *pkt = NULL; | |
512 | enum comp_state state; | |
513 | ||
37f69f43 AB |
514 | rxe_add_ref(qp); |
515 | ||
8700e3e7 MS |
516 | if (!qp->valid) { |
517 | while ((skb = skb_dequeue(&qp->resp_pkts))) { | |
518 | rxe_drop_ref(qp); | |
519 | kfree_skb(skb); | |
520 | } | |
521 | skb = NULL; | |
522 | pkt = NULL; | |
523 | ||
524 | while (queue_head(qp->sq.queue)) | |
525 | advance_consumer(qp->sq.queue); | |
526 | ||
527 | goto exit; | |
528 | } | |
529 | ||
530 | if (qp->req.state == QP_STATE_ERROR) { | |
531 | while ((skb = skb_dequeue(&qp->resp_pkts))) { | |
532 | rxe_drop_ref(qp); | |
533 | kfree_skb(skb); | |
534 | } | |
535 | skb = NULL; | |
536 | pkt = NULL; | |
537 | ||
538 | while ((wqe = queue_head(qp->sq.queue))) { | |
539 | wqe->status = IB_WC_WR_FLUSH_ERR; | |
540 | do_complete(qp, wqe); | |
541 | } | |
542 | ||
543 | goto exit; | |
544 | } | |
545 | ||
546 | if (qp->req.state == QP_STATE_RESET) { | |
547 | while ((skb = skb_dequeue(&qp->resp_pkts))) { | |
548 | rxe_drop_ref(qp); | |
549 | kfree_skb(skb); | |
550 | } | |
551 | skb = NULL; | |
552 | pkt = NULL; | |
553 | ||
554 | while (queue_head(qp->sq.queue)) | |
555 | advance_consumer(qp->sq.queue); | |
556 | ||
557 | goto exit; | |
558 | } | |
559 | ||
560 | if (qp->comp.timeout) { | |
561 | qp->comp.timeout_retry = 1; | |
562 | qp->comp.timeout = 0; | |
563 | } else { | |
564 | qp->comp.timeout_retry = 0; | |
565 | } | |
566 | ||
567 | if (qp->req.need_retry) | |
568 | goto exit; | |
569 | ||
570 | state = COMPST_GET_ACK; | |
571 | ||
572 | while (1) { | |
e404f945 PP |
573 | pr_debug("qp#%d state = %s\n", qp_num(qp), |
574 | comp_state_name[state]); | |
8700e3e7 MS |
575 | switch (state) { |
576 | case COMPST_GET_ACK: | |
577 | skb = skb_dequeue(&qp->resp_pkts); | |
578 | if (skb) { | |
579 | pkt = SKB_TO_PKT(skb); | |
580 | qp->comp.timeout_retry = 0; | |
581 | } | |
582 | state = COMPST_GET_WQE; | |
583 | break; | |
584 | ||
585 | case COMPST_GET_WQE: | |
586 | state = get_wqe(qp, pkt, &wqe); | |
587 | break; | |
588 | ||
589 | case COMPST_CHECK_PSN: | |
590 | state = check_psn(qp, pkt, wqe); | |
591 | break; | |
592 | ||
593 | case COMPST_CHECK_ACK: | |
594 | state = check_ack(qp, pkt, wqe); | |
595 | break; | |
596 | ||
597 | case COMPST_READ: | |
598 | state = do_read(qp, pkt, wqe); | |
599 | break; | |
600 | ||
601 | case COMPST_ATOMIC: | |
602 | state = do_atomic(qp, pkt, wqe); | |
603 | break; | |
604 | ||
605 | case COMPST_WRITE_SEND: | |
606 | if (wqe->state == wqe_state_pending && | |
607 | wqe->last_psn == pkt->psn) | |
608 | state = COMPST_COMP_ACK; | |
609 | else | |
610 | state = COMPST_UPDATE_COMP; | |
611 | break; | |
612 | ||
613 | case COMPST_COMP_ACK: | |
614 | state = complete_ack(qp, pkt, wqe); | |
615 | break; | |
616 | ||
617 | case COMPST_COMP_WQE: | |
618 | state = complete_wqe(qp, pkt, wqe); | |
619 | break; | |
620 | ||
621 | case COMPST_UPDATE_COMP: | |
622 | if (pkt->mask & RXE_END_MASK) | |
623 | qp->comp.opcode = -1; | |
624 | else | |
625 | qp->comp.opcode = pkt->opcode; | |
626 | ||
627 | if (psn_compare(pkt->psn, qp->comp.psn) >= 0) | |
628 | qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; | |
629 | ||
630 | if (qp->req.wait_psn) { | |
631 | qp->req.wait_psn = 0; | |
632 | rxe_run_task(&qp->req.task, 1); | |
633 | } | |
634 | ||
635 | state = COMPST_DONE; | |
636 | break; | |
637 | ||
638 | case COMPST_DONE: | |
639 | if (pkt) { | |
640 | rxe_drop_ref(pkt->qp); | |
641 | kfree_skb(skb); | |
642 | } | |
643 | goto done; | |
644 | ||
645 | case COMPST_EXIT: | |
646 | if (qp->comp.timeout_retry && wqe) { | |
647 | state = COMPST_ERROR_RETRY; | |
648 | break; | |
649 | } | |
650 | ||
651 | /* re reset the timeout counter if | |
652 | * (1) QP is type RC | |
653 | * (2) the QP is alive | |
654 | * (3) there is a packet sent by the requester that | |
655 | * might be acked (we still might get spurious | |
656 | * timeouts but try to keep them as few as possible) | |
657 | * (4) the timeout parameter is set | |
658 | */ | |
659 | if ((qp_type(qp) == IB_QPT_RC) && | |
660 | (qp->req.state == QP_STATE_READY) && | |
661 | (psn_compare(qp->req.psn, qp->comp.psn) > 0) && | |
662 | qp->qp_timeout_jiffies) | |
663 | mod_timer(&qp->retrans_timer, | |
664 | jiffies + qp->qp_timeout_jiffies); | |
665 | goto exit; | |
666 | ||
667 | case COMPST_ERROR_RETRY: | |
668 | /* we come here if the retry timer fired and we did | |
669 | * not receive a response packet. try to retry the send | |
670 | * queue if that makes sense and the limits have not | |
671 | * been exceeded. remember that some timeouts are | |
672 | * spurious since we do not reset the timer but kick | |
673 | * it down the road or let it expire | |
674 | */ | |
675 | ||
676 | /* there is nothing to retry in this case */ | |
677 | if (!wqe || (wqe->state == wqe_state_posted)) | |
678 | goto exit; | |
679 | ||
680 | if (qp->comp.retry_cnt > 0) { | |
681 | if (qp->comp.retry_cnt != 7) | |
682 | qp->comp.retry_cnt--; | |
683 | ||
684 | /* no point in retrying if we have already | |
685 | * seen the last ack that the requester could | |
686 | * have caused | |
687 | */ | |
688 | if (psn_compare(qp->req.psn, | |
689 | qp->comp.psn) > 0) { | |
690 | /* tell the requester to retry the | |
691 | * send send queue next time around | |
692 | */ | |
693 | qp->req.need_retry = 1; | |
694 | rxe_run_task(&qp->req.task, 1); | |
695 | } | |
c1cc72cb YC |
696 | |
697 | if (pkt) { | |
698 | rxe_drop_ref(pkt->qp); | |
699 | kfree_skb(skb); | |
700 | } | |
701 | ||
8700e3e7 | 702 | goto exit; |
c1cc72cb | 703 | |
8700e3e7 MS |
704 | } else { |
705 | wqe->status = IB_WC_RETRY_EXC_ERR; | |
706 | state = COMPST_ERROR; | |
707 | } | |
708 | break; | |
709 | ||
710 | case COMPST_RNR_RETRY: | |
711 | if (qp->comp.rnr_retry > 0) { | |
712 | if (qp->comp.rnr_retry != 7) | |
713 | qp->comp.rnr_retry--; | |
714 | ||
715 | qp->req.need_retry = 1; | |
e404f945 PP |
716 | pr_debug("qp#%d set rnr nak timer\n", |
717 | qp_num(qp)); | |
8700e3e7 MS |
718 | mod_timer(&qp->rnr_nak_timer, |
719 | jiffies + rnrnak_jiffies(aeth_syn(pkt) | |
720 | & ~AETH_TYPE_MASK)); | |
721 | goto exit; | |
722 | } else { | |
723 | wqe->status = IB_WC_RNR_RETRY_EXC_ERR; | |
724 | state = COMPST_ERROR; | |
725 | } | |
726 | break; | |
727 | ||
728 | case COMPST_ERROR: | |
729 | do_complete(qp, wqe); | |
730 | rxe_qp_error(qp); | |
c1cc72cb YC |
731 | |
732 | if (pkt) { | |
733 | rxe_drop_ref(pkt->qp); | |
734 | kfree_skb(skb); | |
735 | } | |
736 | ||
8700e3e7 MS |
737 | goto exit; |
738 | } | |
739 | } | |
740 | ||
741 | exit: | |
742 | /* we come here if we are done with processing and want the task to | |
743 | * exit from the loop calling us | |
744 | */ | |
37f69f43 | 745 | rxe_drop_ref(qp); |
8700e3e7 MS |
746 | return -EAGAIN; |
747 | ||
748 | done: | |
749 | /* we come here if we have processed a packet we want the task to call | |
750 | * us again to see if there is anything else to do | |
751 | */ | |
37f69f43 | 752 | rxe_drop_ref(qp); |
8700e3e7 MS |
753 | return 0; |
754 | } |