Commit | Line | Data |
---|---|---|
74ed6b5e BS |
1 | /* |
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <rdma/ib_smi.h> | |
34 | ||
35 | #include "ipath_verbs.h" | |
36 | #include "ips_common.h" | |
37 | ||
38 | /** | |
39 | * ipath_ud_loopback - handle send on loopback QPs | |
40 | * @sqp: the QP | |
41 | * @ss: the SGE state | |
42 | * @length: the length of the data to send | |
43 | * @wr: the work request | |
44 | * @wc: the work completion entry | |
45 | * | |
46 | * This is called from ipath_post_ud_send() to forward a WQE addressed | |
47 | * to the same HCA. | |
48 | */ | |
49 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | |
50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) | |
51 | { | |
52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); | |
53 | struct ipath_qp *qp; | |
54 | struct ib_ah_attr *ah_attr; | |
55 | unsigned long flags; | |
56 | struct ipath_rq *rq; | |
57 | struct ipath_srq *srq; | |
58 | struct ipath_sge_state rsge; | |
59 | struct ipath_sge *sge; | |
60 | struct ipath_rwqe *wqe; | |
61 | ||
62 | qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); | |
63 | if (!qp) | |
64 | return; | |
65 | ||
66 | /* | |
67 | * Check that the qkey matches (except for QP0, see 9.6.1.4.1). | |
68 | * Qkeys with the high order bit set mean use the | |
69 | * qkey from the QP context instead of the WR (see 10.2.5). | |
70 | */ | |
71 | if (unlikely(qp->ibqp.qp_num && | |
72 | ((int) wr->wr.ud.remote_qkey < 0 | |
73 | ? qp->qkey : wr->wr.ud.remote_qkey) != qp->qkey)) { | |
74 | /* XXX OK to lose a count once in a while. */ | |
75 | dev->qkey_violations++; | |
76 | dev->n_pkt_drops++; | |
77 | goto done; | |
78 | } | |
79 | ||
80 | /* | |
81 | * A GRH is expected to preceed the data even if not | |
82 | * present on the wire. | |
83 | */ | |
84 | wc->byte_len = length + sizeof(struct ib_grh); | |
85 | ||
86 | if (wr->opcode == IB_WR_SEND_WITH_IMM) { | |
87 | wc->wc_flags = IB_WC_WITH_IMM; | |
88 | wc->imm_data = wr->imm_data; | |
89 | } else { | |
90 | wc->wc_flags = 0; | |
91 | wc->imm_data = 0; | |
92 | } | |
93 | ||
94 | /* | |
95 | * Get the next work request entry to find where to put the data. | |
96 | * Note that it is safe to drop the lock after changing rq->tail | |
97 | * since ipath_post_receive() won't fill the empty slot. | |
98 | */ | |
99 | if (qp->ibqp.srq) { | |
100 | srq = to_isrq(qp->ibqp.srq); | |
101 | rq = &srq->rq; | |
102 | } else { | |
103 | srq = NULL; | |
104 | rq = &qp->r_rq; | |
105 | } | |
106 | spin_lock_irqsave(&rq->lock, flags); | |
107 | if (rq->tail == rq->head) { | |
108 | spin_unlock_irqrestore(&rq->lock, flags); | |
109 | dev->n_pkt_drops++; | |
110 | goto done; | |
111 | } | |
112 | /* Silently drop packets which are too big. */ | |
113 | wqe = get_rwqe_ptr(rq, rq->tail); | |
114 | if (wc->byte_len > wqe->length) { | |
115 | spin_unlock_irqrestore(&rq->lock, flags); | |
116 | dev->n_pkt_drops++; | |
117 | goto done; | |
118 | } | |
119 | wc->wr_id = wqe->wr_id; | |
120 | rsge.sge = wqe->sg_list[0]; | |
121 | rsge.sg_list = wqe->sg_list + 1; | |
122 | rsge.num_sge = wqe->num_sge; | |
123 | if (++rq->tail >= rq->size) | |
124 | rq->tail = 0; | |
125 | if (srq && srq->ibsrq.event_handler) { | |
126 | u32 n; | |
127 | ||
128 | if (rq->head < rq->tail) | |
129 | n = rq->size + rq->head - rq->tail; | |
130 | else | |
131 | n = rq->head - rq->tail; | |
132 | if (n < srq->limit) { | |
133 | struct ib_event ev; | |
134 | ||
135 | srq->limit = 0; | |
136 | spin_unlock_irqrestore(&rq->lock, flags); | |
137 | ev.device = qp->ibqp.device; | |
138 | ev.element.srq = qp->ibqp.srq; | |
139 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
140 | srq->ibsrq.event_handler(&ev, | |
141 | srq->ibsrq.srq_context); | |
142 | } else | |
143 | spin_unlock_irqrestore(&rq->lock, flags); | |
144 | } else | |
145 | spin_unlock_irqrestore(&rq->lock, flags); | |
146 | ah_attr = &to_iah(wr->wr.ud.ah)->attr; | |
147 | if (ah_attr->ah_flags & IB_AH_GRH) { | |
148 | ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); | |
149 | wc->wc_flags |= IB_WC_GRH; | |
150 | } else | |
151 | ipath_skip_sge(&rsge, sizeof(struct ib_grh)); | |
152 | sge = &ss->sge; | |
153 | while (length) { | |
154 | u32 len = sge->length; | |
155 | ||
156 | if (len > length) | |
157 | len = length; | |
158 | BUG_ON(len == 0); | |
159 | ipath_copy_sge(&rsge, sge->vaddr, len); | |
160 | sge->vaddr += len; | |
161 | sge->length -= len; | |
162 | sge->sge_length -= len; | |
163 | if (sge->sge_length == 0) { | |
164 | if (--ss->num_sge) | |
165 | *sge = *ss->sg_list++; | |
166 | } else if (sge->length == 0 && sge->mr != NULL) { | |
167 | if (++sge->n >= IPATH_SEGSZ) { | |
168 | if (++sge->m >= sge->mr->mapsz) | |
169 | break; | |
170 | sge->n = 0; | |
171 | } | |
172 | sge->vaddr = | |
173 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | |
174 | sge->length = | |
175 | sge->mr->map[sge->m]->segs[sge->n].length; | |
176 | } | |
177 | length -= len; | |
178 | } | |
179 | wc->status = IB_WC_SUCCESS; | |
180 | wc->opcode = IB_WC_RECV; | |
181 | wc->vendor_err = 0; | |
182 | wc->qp_num = qp->ibqp.qp_num; | |
183 | wc->src_qp = sqp->ibqp.qp_num; | |
184 | /* XXX do we know which pkey matched? Only needed for GSI. */ | |
185 | wc->pkey_index = 0; | |
186 | wc->slid = ipath_layer_get_lid(dev->dd) | | |
187 | (ah_attr->src_path_bits & | |
188 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); | |
189 | wc->sl = ah_attr->sl; | |
190 | wc->dlid_path_bits = | |
191 | ah_attr->dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | |
192 | /* Signal completion event if the solicited bit is set. */ | |
193 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, | |
194 | wr->send_flags & IB_SEND_SOLICITED); | |
195 | ||
196 | done: | |
197 | if (atomic_dec_and_test(&qp->refcount)) | |
198 | wake_up(&qp->wait); | |
199 | } | |
200 | ||
201 | /** | |
202 | * ipath_post_ud_send - post a UD send on QP | |
203 | * @qp: the QP | |
204 | * @wr: the work request | |
205 | * | |
206 | * Note that we actually send the data as it is posted instead of putting | |
207 | * the request into a ring buffer. If we wanted to use a ring buffer, | |
208 | * we would need to save a reference to the destination address in the SWQE. | |
209 | */ | |
210 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |
211 | { | |
212 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | |
213 | struct ipath_other_headers *ohdr; | |
214 | struct ib_ah_attr *ah_attr; | |
215 | struct ipath_sge_state ss; | |
216 | struct ipath_sge *sg_list; | |
217 | struct ib_wc wc; | |
218 | u32 hwords; | |
219 | u32 nwords; | |
220 | u32 len; | |
221 | u32 extra_bytes; | |
222 | u32 bth0; | |
223 | u16 lrh0; | |
224 | u16 lid; | |
225 | int i; | |
226 | int ret; | |
227 | ||
228 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { | |
229 | ret = 0; | |
230 | goto bail; | |
231 | } | |
232 | ||
233 | /* IB spec says that num_sge == 0 is OK. */ | |
234 | if (wr->num_sge > qp->s_max_sge) { | |
235 | ret = -EINVAL; | |
236 | goto bail; | |
237 | } | |
238 | ||
239 | if (wr->num_sge > 1) { | |
240 | sg_list = kmalloc((qp->s_max_sge - 1) * sizeof(*sg_list), | |
241 | GFP_ATOMIC); | |
242 | if (!sg_list) { | |
243 | ret = -ENOMEM; | |
244 | goto bail; | |
245 | } | |
246 | } else | |
247 | sg_list = NULL; | |
248 | ||
249 | /* Check the buffer to send. */ | |
250 | ss.sg_list = sg_list; | |
251 | ss.sge.mr = NULL; | |
252 | ss.sge.vaddr = NULL; | |
253 | ss.sge.length = 0; | |
254 | ss.sge.sge_length = 0; | |
255 | ss.num_sge = 0; | |
256 | len = 0; | |
257 | for (i = 0; i < wr->num_sge; i++) { | |
258 | /* Check LKEY */ | |
259 | if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { | |
260 | ret = -EINVAL; | |
261 | goto bail; | |
262 | } | |
263 | ||
264 | if (wr->sg_list[i].length == 0) | |
265 | continue; | |
266 | if (!ipath_lkey_ok(&dev->lk_table, ss.num_sge ? | |
267 | sg_list + ss.num_sge - 1 : &ss.sge, | |
268 | &wr->sg_list[i], 0)) { | |
269 | ret = -EINVAL; | |
270 | goto bail; | |
271 | } | |
272 | len += wr->sg_list[i].length; | |
273 | ss.num_sge++; | |
274 | } | |
275 | extra_bytes = (4 - len) & 3; | |
276 | nwords = (len + extra_bytes) >> 2; | |
277 | ||
278 | /* Construct the header. */ | |
279 | ah_attr = &to_iah(wr->wr.ud.ah)->attr; | |
280 | if (ah_attr->dlid == 0) { | |
281 | ret = -EINVAL; | |
282 | goto bail; | |
283 | } | |
284 | if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) { | |
285 | if (ah_attr->dlid != IPS_PERMISSIVE_LID) | |
286 | dev->n_multicast_xmit++; | |
287 | else | |
288 | dev->n_unicast_xmit++; | |
289 | } else { | |
290 | dev->n_unicast_xmit++; | |
291 | lid = ah_attr->dlid & | |
292 | ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | |
293 | if (unlikely(lid == ipath_layer_get_lid(dev->dd))) { | |
294 | /* | |
295 | * Pass in an uninitialized ib_wc to save stack | |
296 | * space. | |
297 | */ | |
298 | ipath_ud_loopback(qp, &ss, len, wr, &wc); | |
299 | goto done; | |
300 | } | |
301 | } | |
302 | if (ah_attr->ah_flags & IB_AH_GRH) { | |
303 | /* Header size in 32-bit words. */ | |
304 | hwords = 17; | |
305 | lrh0 = IPS_LRH_GRH; | |
306 | ohdr = &qp->s_hdr.u.l.oth; | |
307 | qp->s_hdr.u.l.grh.version_tclass_flow = | |
308 | cpu_to_be32((6 << 28) | | |
309 | (ah_attr->grh.traffic_class << 20) | | |
310 | ah_attr->grh.flow_label); | |
311 | qp->s_hdr.u.l.grh.paylen = | |
312 | cpu_to_be16(((wr->opcode == | |
313 | IB_WR_SEND_WITH_IMM ? 6 : 5) + | |
314 | nwords + SIZE_OF_CRC) << 2); | |
315 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ | |
316 | qp->s_hdr.u.l.grh.next_hdr = 0x1B; | |
317 | qp->s_hdr.u.l.grh.hop_limit = ah_attr->grh.hop_limit; | |
318 | /* The SGID is 32-bit aligned. */ | |
319 | qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = | |
320 | dev->gid_prefix; | |
321 | qp->s_hdr.u.l.grh.sgid.global.interface_id = | |
322 | ipath_layer_get_guid(dev->dd); | |
323 | qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; | |
324 | /* | |
325 | * Don't worry about sending to locally attached multicast | |
326 | * QPs. It is unspecified by the spec. what happens. | |
327 | */ | |
328 | } else { | |
329 | /* Header size in 32-bit words. */ | |
330 | hwords = 7; | |
331 | lrh0 = IPS_LRH_BTH; | |
332 | ohdr = &qp->s_hdr.u.oth; | |
333 | } | |
334 | if (wr->opcode == IB_WR_SEND_WITH_IMM) { | |
335 | ohdr->u.ud.imm_data = wr->imm_data; | |
336 | wc.imm_data = wr->imm_data; | |
337 | hwords += 1; | |
338 | bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; | |
339 | } else if (wr->opcode == IB_WR_SEND) { | |
340 | wc.imm_data = 0; | |
341 | bth0 = IB_OPCODE_UD_SEND_ONLY << 24; | |
342 | } else { | |
343 | ret = -EINVAL; | |
344 | goto bail; | |
345 | } | |
346 | lrh0 |= ah_attr->sl << 4; | |
347 | if (qp->ibqp.qp_type == IB_QPT_SMI) | |
348 | lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ | |
349 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | |
350 | qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ | |
351 | qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); | |
352 | lid = ipath_layer_get_lid(dev->dd); | |
353 | if (lid) { | |
354 | lid |= ah_attr->src_path_bits & | |
355 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | |
356 | qp->s_hdr.lrh[3] = cpu_to_be16(lid); | |
357 | } else | |
358 | qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; | |
359 | if (wr->send_flags & IB_SEND_SOLICITED) | |
360 | bth0 |= 1 << 23; | |
361 | bth0 |= extra_bytes << 20; | |
362 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY : | |
363 | ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | |
364 | ohdr->bth[0] = cpu_to_be32(bth0); | |
365 | /* | |
366 | * Use the multicast QP if the destination LID is a multicast LID. | |
367 | */ | |
368 | ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE && | |
369 | ah_attr->dlid != IPS_PERMISSIVE_LID ? | |
370 | __constant_cpu_to_be32(IPS_MULTICAST_QPN) : | |
371 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
372 | /* XXX Could lose a PSN count but not worth locking */ | |
373 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK); | |
374 | /* | |
375 | * Qkeys with the high order bit set mean use the | |
376 | * qkey from the QP context instead of the WR (see 10.2.5). | |
377 | */ | |
378 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wr->wr.ud.remote_qkey < 0 ? | |
379 | qp->qkey : wr->wr.ud.remote_qkey); | |
380 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); | |
381 | if (ipath_verbs_send(dev->dd, hwords, (u32 *) &qp->s_hdr, | |
382 | len, &ss)) | |
383 | dev->n_no_piobuf++; | |
384 | ||
385 | done: | |
386 | /* Queue the completion status entry. */ | |
387 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || | |
388 | (wr->send_flags & IB_SEND_SIGNALED)) { | |
389 | wc.wr_id = wr->wr_id; | |
390 | wc.status = IB_WC_SUCCESS; | |
391 | wc.vendor_err = 0; | |
392 | wc.opcode = IB_WC_SEND; | |
393 | wc.byte_len = len; | |
394 | wc.qp_num = qp->ibqp.qp_num; | |
395 | wc.src_qp = 0; | |
396 | wc.wc_flags = 0; | |
397 | /* XXX initialize other fields? */ | |
398 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); | |
399 | } | |
400 | kfree(sg_list); | |
401 | ||
402 | ret = 0; | |
403 | ||
404 | bail: | |
405 | return ret; | |
406 | } | |
407 | ||
408 | /** | |
409 | * ipath_ud_rcv - receive an incoming UD packet | |
410 | * @dev: the device the packet came in on | |
411 | * @hdr: the packet header | |
412 | * @has_grh: true if the packet has a GRH | |
413 | * @data: the packet data | |
414 | * @tlen: the packet length | |
415 | * @qp: the QP the packet came on | |
416 | * | |
417 | * This is called from ipath_qp_rcv() to process an incoming UD packet | |
418 | * for the given QP. | |
419 | * Called at interrupt level. | |
420 | */ | |
421 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |
422 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp) | |
423 | { | |
424 | struct ipath_other_headers *ohdr; | |
425 | int opcode; | |
426 | u32 hdrsize; | |
427 | u32 pad; | |
428 | unsigned long flags; | |
429 | struct ib_wc wc; | |
430 | u32 qkey; | |
431 | u32 src_qp; | |
432 | struct ipath_rq *rq; | |
433 | struct ipath_srq *srq; | |
434 | struct ipath_rwqe *wqe; | |
435 | u16 dlid; | |
436 | int header_in_data; | |
437 | ||
438 | /* Check for GRH */ | |
439 | if (!has_grh) { | |
440 | ohdr = &hdr->u.oth; | |
441 | hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */ | |
442 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | |
443 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]); | |
444 | header_in_data = 0; | |
445 | } else { | |
446 | ohdr = &hdr->u.l.oth; | |
447 | hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */ | |
448 | /* | |
449 | * The header with GRH is 68 bytes and the core driver sets | |
450 | * the eager header buffer size to 56 bytes so the last 12 | |
451 | * bytes of the IB header is in the data buffer. | |
452 | */ | |
453 | header_in_data = | |
454 | ipath_layer_get_rcvhdrentsize(dev->dd) == 16; | |
455 | if (header_in_data) { | |
456 | qkey = be32_to_cpu(((__be32 *) data)[1]); | |
457 | src_qp = be32_to_cpu(((__be32 *) data)[2]); | |
458 | data += 12; | |
459 | } else { | |
460 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | |
461 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]); | |
462 | } | |
463 | } | |
464 | src_qp &= IPS_QPN_MASK; | |
465 | ||
466 | /* | |
467 | * Check that the permissive LID is only used on QP0 | |
468 | * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1). | |
469 | */ | |
470 | if (qp->ibqp.qp_num) { | |
471 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || | |
472 | hdr->lrh[3] == IB_LID_PERMISSIVE)) { | |
473 | dev->n_pkt_drops++; | |
474 | goto bail; | |
475 | } | |
476 | if (unlikely(qkey != qp->qkey)) { | |
477 | /* XXX OK to lose a count once in a while. */ | |
478 | dev->qkey_violations++; | |
479 | dev->n_pkt_drops++; | |
480 | goto bail; | |
481 | } | |
482 | } else if (hdr->lrh[1] == IB_LID_PERMISSIVE || | |
483 | hdr->lrh[3] == IB_LID_PERMISSIVE) { | |
484 | struct ib_smp *smp = (struct ib_smp *) data; | |
485 | ||
486 | if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
487 | dev->n_pkt_drops++; | |
488 | goto bail; | |
489 | } | |
490 | } | |
491 | ||
492 | /* Get the number of bytes the message was padded by. */ | |
493 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | |
494 | if (unlikely(tlen < (hdrsize + pad + 4))) { | |
495 | /* Drop incomplete packets. */ | |
496 | dev->n_pkt_drops++; | |
497 | goto bail; | |
498 | } | |
499 | tlen -= hdrsize + pad + 4; | |
500 | ||
501 | /* Drop invalid MAD packets (see 13.5.3.1). */ | |
502 | if (unlikely((qp->ibqp.qp_num == 0 && | |
503 | (tlen != 256 || | |
504 | (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) || | |
505 | (qp->ibqp.qp_num == 1 && | |
506 | (tlen != 256 || | |
507 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) { | |
508 | dev->n_pkt_drops++; | |
509 | goto bail; | |
510 | } | |
511 | ||
512 | /* | |
513 | * A GRH is expected to preceed the data even if not | |
514 | * present on the wire. | |
515 | */ | |
516 | wc.byte_len = tlen + sizeof(struct ib_grh); | |
517 | ||
518 | /* | |
519 | * The opcode is in the low byte when its in network order | |
520 | * (top byte when in host order). | |
521 | */ | |
522 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | |
523 | if (qp->ibqp.qp_num > 1 && | |
524 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | |
525 | if (header_in_data) { | |
526 | wc.imm_data = *(__be32 *) data; | |
527 | data += sizeof(__be32); | |
528 | } else | |
529 | wc.imm_data = ohdr->u.ud.imm_data; | |
530 | wc.wc_flags = IB_WC_WITH_IMM; | |
531 | hdrsize += sizeof(u32); | |
532 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | |
533 | wc.imm_data = 0; | |
534 | wc.wc_flags = 0; | |
535 | } else { | |
536 | dev->n_pkt_drops++; | |
537 | goto bail; | |
538 | } | |
539 | ||
540 | /* | |
541 | * Get the next work request entry to find where to put the data. | |
542 | * Note that it is safe to drop the lock after changing rq->tail | |
543 | * since ipath_post_receive() won't fill the empty slot. | |
544 | */ | |
545 | if (qp->ibqp.srq) { | |
546 | srq = to_isrq(qp->ibqp.srq); | |
547 | rq = &srq->rq; | |
548 | } else { | |
549 | srq = NULL; | |
550 | rq = &qp->r_rq; | |
551 | } | |
552 | spin_lock_irqsave(&rq->lock, flags); | |
553 | if (rq->tail == rq->head) { | |
554 | spin_unlock_irqrestore(&rq->lock, flags); | |
555 | dev->n_pkt_drops++; | |
556 | goto bail; | |
557 | } | |
558 | /* Silently drop packets which are too big. */ | |
559 | wqe = get_rwqe_ptr(rq, rq->tail); | |
560 | if (wc.byte_len > wqe->length) { | |
561 | spin_unlock_irqrestore(&rq->lock, flags); | |
562 | dev->n_pkt_drops++; | |
563 | goto bail; | |
564 | } | |
565 | wc.wr_id = wqe->wr_id; | |
566 | qp->r_sge.sge = wqe->sg_list[0]; | |
567 | qp->r_sge.sg_list = wqe->sg_list + 1; | |
568 | qp->r_sge.num_sge = wqe->num_sge; | |
569 | if (++rq->tail >= rq->size) | |
570 | rq->tail = 0; | |
571 | if (srq && srq->ibsrq.event_handler) { | |
572 | u32 n; | |
573 | ||
574 | if (rq->head < rq->tail) | |
575 | n = rq->size + rq->head - rq->tail; | |
576 | else | |
577 | n = rq->head - rq->tail; | |
578 | if (n < srq->limit) { | |
579 | struct ib_event ev; | |
580 | ||
581 | srq->limit = 0; | |
582 | spin_unlock_irqrestore(&rq->lock, flags); | |
583 | ev.device = qp->ibqp.device; | |
584 | ev.element.srq = qp->ibqp.srq; | |
585 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
586 | srq->ibsrq.event_handler(&ev, | |
587 | srq->ibsrq.srq_context); | |
588 | } else | |
589 | spin_unlock_irqrestore(&rq->lock, flags); | |
590 | } else | |
591 | spin_unlock_irqrestore(&rq->lock, flags); | |
592 | if (has_grh) { | |
593 | ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, | |
594 | sizeof(struct ib_grh)); | |
595 | wc.wc_flags |= IB_WC_GRH; | |
596 | } else | |
597 | ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); | |
598 | ipath_copy_sge(&qp->r_sge, data, | |
599 | wc.byte_len - sizeof(struct ib_grh)); | |
600 | wc.status = IB_WC_SUCCESS; | |
601 | wc.opcode = IB_WC_RECV; | |
602 | wc.vendor_err = 0; | |
603 | wc.qp_num = qp->ibqp.qp_num; | |
604 | wc.src_qp = src_qp; | |
605 | /* XXX do we know which pkey matched? Only needed for GSI. */ | |
606 | wc.pkey_index = 0; | |
607 | wc.slid = be16_to_cpu(hdr->lrh[3]); | |
608 | wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF; | |
609 | dlid = be16_to_cpu(hdr->lrh[1]); | |
610 | /* | |
611 | * Save the LMC lower bits if the destination LID is a unicast LID. | |
612 | */ | |
613 | wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 : | |
614 | dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | |
615 | /* Signal completion event if the solicited bit is set. */ | |
616 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | |
617 | (ohdr->bth[0] & | |
618 | __constant_cpu_to_be32(1 << 23)) != 0); | |
619 | ||
620 | bail:; | |
621 | } |