Commit | Line | Data |
---|---|---|
1e23b3ee AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
1e23b3ee AG |
35 | #include <linux/pci.h> |
36 | #include <linux/dma-mapping.h> | |
37 | #include <rdma/rdma_cm.h> | |
38 | ||
39 | #include "rds.h" | |
40 | #include "ib.h" | |
41 | ||
42 | static struct kmem_cache *rds_ib_incoming_slab; | |
43 | static struct kmem_cache *rds_ib_frag_slab; | |
44 | static atomic_t rds_ib_allocation = ATOMIC_INIT(0); | |
45 | ||
0b088e00 | 46 | /* Free frag and attached recv buffer f_sg */ |
1e23b3ee AG |
47 | static void rds_ib_frag_free(struct rds_page_frag *frag) |
48 | { | |
0b088e00 AG |
49 | rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); |
50 | __free_page(sg_page(&frag->f_sg)); | |
1e23b3ee AG |
51 | kmem_cache_free(rds_ib_frag_slab, frag); |
52 | } | |
53 | ||
1e23b3ee AG |
54 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic) |
55 | { | |
56 | struct rds_ib_recv_work *recv; | |
57 | u32 i; | |
58 | ||
59 | for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { | |
60 | struct ib_sge *sge; | |
61 | ||
62 | recv->r_ibinc = NULL; | |
63 | recv->r_frag = NULL; | |
64 | ||
65 | recv->r_wr.next = NULL; | |
66 | recv->r_wr.wr_id = i; | |
67 | recv->r_wr.sg_list = recv->r_sge; | |
68 | recv->r_wr.num_sge = RDS_IB_RECV_SGE; | |
69 | ||
919ced4c | 70 | sge = &recv->r_sge[0]; |
1e23b3ee AG |
71 | sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); |
72 | sge->length = sizeof(struct rds_header); | |
73 | sge->lkey = ic->i_mr->lkey; | |
919ced4c AG |
74 | |
75 | sge = &recv->r_sge[1]; | |
76 | sge->addr = 0; | |
77 | sge->length = RDS_FRAG_SIZE; | |
78 | sge->lkey = ic->i_mr->lkey; | |
1e23b3ee AG |
79 | } |
80 | } | |
81 | ||
82 | static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, | |
83 | struct rds_ib_recv_work *recv) | |
84 | { | |
85 | if (recv->r_ibinc) { | |
86 | rds_inc_put(&recv->r_ibinc->ii_inc); | |
87 | recv->r_ibinc = NULL; | |
88 | } | |
89 | if (recv->r_frag) { | |
fc24f780 | 90 | ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
1e23b3ee AG |
91 | rds_ib_frag_free(recv->r_frag); |
92 | recv->r_frag = NULL; | |
93 | } | |
94 | } | |
95 | ||
96 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) | |
97 | { | |
98 | u32 i; | |
99 | ||
100 | for (i = 0; i < ic->i_recv_ring.w_nr; i++) | |
101 | rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); | |
1e23b3ee AG |
102 | } |
103 | ||
104 | static int rds_ib_recv_refill_one(struct rds_connection *conn, | |
f17a1a55 | 105 | struct rds_ib_recv_work *recv) |
1e23b3ee AG |
106 | { |
107 | struct rds_ib_connection *ic = conn->c_transport_data; | |
1e23b3ee AG |
108 | struct ib_sge *sge; |
109 | int ret = -ENOMEM; | |
110 | ||
3427e854 AG |
111 | /* |
112 | * ibinc was taken from recv if recv contained the start of a message. | |
113 | * recvs that were continuations will still have this allocated. | |
114 | */ | |
8690bfa1 | 115 | if (!recv->r_ibinc) { |
86357b19 | 116 | if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { |
1e23b3ee AG |
117 | rds_ib_stats_inc(s_ib_rx_alloc_limit); |
118 | goto out; | |
119 | } | |
f17a1a55 | 120 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, GFP_NOWAIT); |
8690bfa1 | 121 | if (!recv->r_ibinc) { |
86357b19 | 122 | atomic_dec(&rds_ib_allocation); |
1e23b3ee | 123 | goto out; |
86357b19 | 124 | } |
1e23b3ee AG |
125 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); |
126 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); | |
127 | } | |
128 | ||
3427e854 AG |
129 | WARN_ON(recv->r_frag); /* leak! */ |
130 | recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, GFP_NOWAIT); | |
131 | if (!recv->r_frag) | |
132 | goto out; | |
133 | INIT_LIST_HEAD(&recv->r_frag->f_item); | |
134 | sg_init_table(&recv->r_frag->f_sg, 1); | |
135 | ret = rds_page_remainder_alloc(&recv->r_frag->f_sg, | |
136 | RDS_FRAG_SIZE, GFP_NOWAIT); | |
137 | if (ret) { | |
138 | kmem_cache_free(rds_ib_frag_slab, recv->r_frag); | |
139 | recv->r_frag = NULL; | |
140 | goto out; | |
1e23b3ee AG |
141 | } |
142 | ||
0b088e00 AG |
143 | ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, |
144 | 1, DMA_FROM_DEVICE); | |
145 | WARN_ON(ret != 1); | |
1e23b3ee | 146 | |
919ced4c | 147 | sge = &recv->r_sge[0]; |
1e23b3ee AG |
148 | sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); |
149 | sge->length = sizeof(struct rds_header); | |
150 | ||
919ced4c | 151 | sge = &recv->r_sge[1]; |
0b088e00 AG |
152 | sge->addr = sg_dma_address(&recv->r_frag->f_sg); |
153 | sge->length = sg_dma_len(&recv->r_frag->f_sg); | |
1e23b3ee AG |
154 | |
155 | ret = 0; | |
156 | out: | |
157 | return ret; | |
158 | } | |
159 | ||
160 | /* | |
161 | * This tries to allocate and post unused work requests after making sure that | |
162 | * they have all the allocations they need to queue received fragments into | |
163 | * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc | |
164 | * pairs don't go unmatched. | |
165 | * | |
166 | * -1 is returned if posting fails due to temporary resource exhaustion. | |
167 | */ | |
f17a1a55 | 168 | int rds_ib_recv_refill(struct rds_connection *conn, int prefill) |
1e23b3ee AG |
169 | { |
170 | struct rds_ib_connection *ic = conn->c_transport_data; | |
171 | struct rds_ib_recv_work *recv; | |
172 | struct ib_recv_wr *failed_wr; | |
173 | unsigned int posted = 0; | |
174 | int ret = 0; | |
175 | u32 pos; | |
176 | ||
f64f9e71 JP |
177 | while ((prefill || rds_conn_up(conn)) && |
178 | rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { | |
1e23b3ee AG |
179 | if (pos >= ic->i_recv_ring.w_nr) { |
180 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", | |
181 | pos); | |
182 | ret = -EINVAL; | |
183 | break; | |
184 | } | |
185 | ||
186 | recv = &ic->i_recvs[pos]; | |
f17a1a55 | 187 | ret = rds_ib_recv_refill_one(conn, recv); |
1e23b3ee AG |
188 | if (ret) { |
189 | ret = -1; | |
190 | break; | |
191 | } | |
192 | ||
193 | /* XXX when can this fail? */ | |
194 | ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); | |
195 | rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, | |
0b088e00 AG |
196 | recv->r_ibinc, sg_page(&recv->r_frag->f_sg), |
197 | (long) sg_dma_address(&recv->r_frag->f_sg), ret); | |
1e23b3ee AG |
198 | if (ret) { |
199 | rds_ib_conn_error(conn, "recv post on " | |
200 | "%pI4 returned %d, disconnecting and " | |
201 | "reconnecting\n", &conn->c_faddr, | |
202 | ret); | |
203 | ret = -1; | |
204 | break; | |
205 | } | |
206 | ||
207 | posted++; | |
208 | } | |
209 | ||
210 | /* We're doing flow control - update the window. */ | |
211 | if (ic->i_flowctl && posted) | |
212 | rds_ib_advertise_credits(conn, posted); | |
213 | ||
214 | if (ret) | |
215 | rds_ib_ring_unalloc(&ic->i_recv_ring, 1); | |
216 | return ret; | |
217 | } | |
218 | ||
809fa148 | 219 | static void rds_ib_inc_purge(struct rds_incoming *inc) |
1e23b3ee AG |
220 | { |
221 | struct rds_ib_incoming *ibinc; | |
222 | struct rds_page_frag *frag; | |
223 | struct rds_page_frag *pos; | |
224 | ||
225 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
226 | rdsdebug("purging ibinc %p inc %p\n", ibinc, inc); | |
227 | ||
228 | list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { | |
229 | list_del_init(&frag->f_item); | |
1e23b3ee AG |
230 | rds_ib_frag_free(frag); |
231 | } | |
232 | } | |
233 | ||
234 | void rds_ib_inc_free(struct rds_incoming *inc) | |
235 | { | |
236 | struct rds_ib_incoming *ibinc; | |
237 | ||
238 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
239 | ||
240 | rds_ib_inc_purge(inc); | |
241 | rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc); | |
242 | BUG_ON(!list_empty(&ibinc->ii_frags)); | |
243 | kmem_cache_free(rds_ib_incoming_slab, ibinc); | |
244 | atomic_dec(&rds_ib_allocation); | |
245 | BUG_ON(atomic_read(&rds_ib_allocation) < 0); | |
246 | } | |
247 | ||
248 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | |
249 | size_t size) | |
250 | { | |
251 | struct rds_ib_incoming *ibinc; | |
252 | struct rds_page_frag *frag; | |
253 | struct iovec *iov = first_iov; | |
254 | unsigned long to_copy; | |
255 | unsigned long frag_off = 0; | |
256 | unsigned long iov_off = 0; | |
257 | int copied = 0; | |
258 | int ret; | |
259 | u32 len; | |
260 | ||
261 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
262 | frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); | |
263 | len = be32_to_cpu(inc->i_hdr.h_len); | |
264 | ||
265 | while (copied < size && copied < len) { | |
266 | if (frag_off == RDS_FRAG_SIZE) { | |
267 | frag = list_entry(frag->f_item.next, | |
268 | struct rds_page_frag, f_item); | |
269 | frag_off = 0; | |
270 | } | |
271 | while (iov_off == iov->iov_len) { | |
272 | iov_off = 0; | |
273 | iov++; | |
274 | } | |
275 | ||
276 | to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); | |
277 | to_copy = min_t(size_t, to_copy, size - copied); | |
278 | to_copy = min_t(unsigned long, to_copy, len - copied); | |
279 | ||
280 | rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " | |
0b088e00 | 281 | "[%p, %u] + %lu\n", |
1e23b3ee | 282 | to_copy, iov->iov_base, iov->iov_len, iov_off, |
0b088e00 | 283 | sg_page(&frag->f_sg), frag->f_sg.offset, frag_off); |
1e23b3ee AG |
284 | |
285 | /* XXX needs + offset for multiple recvs per page */ | |
0b088e00 AG |
286 | ret = rds_page_copy_to_user(sg_page(&frag->f_sg), |
287 | frag->f_sg.offset + frag_off, | |
1e23b3ee AG |
288 | iov->iov_base + iov_off, |
289 | to_copy); | |
290 | if (ret) { | |
291 | copied = ret; | |
292 | break; | |
293 | } | |
294 | ||
295 | iov_off += to_copy; | |
296 | frag_off += to_copy; | |
297 | copied += to_copy; | |
298 | } | |
299 | ||
300 | return copied; | |
301 | } | |
302 | ||
303 | /* ic starts out kzalloc()ed */ | |
304 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic) | |
305 | { | |
306 | struct ib_send_wr *wr = &ic->i_ack_wr; | |
307 | struct ib_sge *sge = &ic->i_ack_sge; | |
308 | ||
309 | sge->addr = ic->i_ack_dma; | |
310 | sge->length = sizeof(struct rds_header); | |
311 | sge->lkey = ic->i_mr->lkey; | |
312 | ||
313 | wr->sg_list = sge; | |
314 | wr->num_sge = 1; | |
315 | wr->opcode = IB_WR_SEND; | |
316 | wr->wr_id = RDS_IB_ACK_WR_ID; | |
317 | wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
318 | } | |
319 | ||
320 | /* | |
321 | * You'd think that with reliable IB connections you wouldn't need to ack | |
322 | * messages that have been received. The problem is that IB hardware generates | |
323 | * an ack message before it has DMAed the message into memory. This creates a | |
324 | * potential message loss if the HCA is disabled for any reason between when it | |
325 | * sends the ack and before the message is DMAed and processed. This is only a | |
326 | * potential issue if another HCA is available for fail-over. | |
327 | * | |
328 | * When the remote host receives our ack they'll free the sent message from | |
329 | * their send queue. To decrease the latency of this we always send an ack | |
330 | * immediately after we've received messages. | |
331 | * | |
332 | * For simplicity, we only have one ack in flight at a time. This puts | |
333 | * pressure on senders to have deep enough send queues to absorb the latency of | |
334 | * a single ack frame being in flight. This might not be good enough. | |
335 | * | |
336 | * This is implemented by have a long-lived send_wr and sge which point to a | |
337 | * statically allocated ack frame. This ack wr does not fall under the ring | |
338 | * accounting that the tx and rx wrs do. The QP attribute specifically makes | |
339 | * room for it beyond the ring size. Send completion notices its special | |
340 | * wr_id and avoids working with the ring in that case. | |
341 | */ | |
8cbd9606 | 342 | #ifndef KERNEL_HAS_ATOMIC64 |
1e23b3ee AG |
343 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, |
344 | int ack_required) | |
345 | { | |
8cbd9606 AG |
346 | unsigned long flags; |
347 | ||
348 | spin_lock_irqsave(&ic->i_ack_lock, flags); | |
349 | ic->i_ack_next = seq; | |
350 | if (ack_required) | |
351 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
352 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | |
353 | } | |
354 | ||
355 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |
356 | { | |
357 | unsigned long flags; | |
358 | u64 seq; | |
359 | ||
360 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
361 | ||
362 | spin_lock_irqsave(&ic->i_ack_lock, flags); | |
363 | seq = ic->i_ack_next; | |
364 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | |
365 | ||
366 | return seq; | |
367 | } | |
368 | #else | |
369 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, | |
370 | int ack_required) | |
371 | { | |
372 | atomic64_set(&ic->i_ack_next, seq); | |
1e23b3ee AG |
373 | if (ack_required) { |
374 | smp_mb__before_clear_bit(); | |
375 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
376 | } | |
377 | } | |
378 | ||
379 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |
380 | { | |
381 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
382 | smp_mb__after_clear_bit(); | |
383 | ||
8cbd9606 | 384 | return atomic64_read(&ic->i_ack_next); |
1e23b3ee | 385 | } |
8cbd9606 AG |
386 | #endif |
387 | ||
1e23b3ee AG |
388 | |
389 | static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) | |
390 | { | |
391 | struct rds_header *hdr = ic->i_ack; | |
392 | struct ib_send_wr *failed_wr; | |
393 | u64 seq; | |
394 | int ret; | |
395 | ||
396 | seq = rds_ib_get_ack(ic); | |
397 | ||
398 | rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); | |
399 | rds_message_populate_header(hdr, 0, 0, 0); | |
400 | hdr->h_ack = cpu_to_be64(seq); | |
401 | hdr->h_credit = adv_credits; | |
402 | rds_message_make_checksum(hdr); | |
403 | ic->i_ack_queued = jiffies; | |
404 | ||
405 | ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); | |
406 | if (unlikely(ret)) { | |
407 | /* Failed to send. Release the WR, and | |
408 | * force another ACK. | |
409 | */ | |
410 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
411 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
412 | ||
413 | rds_ib_stats_inc(s_ib_ack_send_failure); | |
735f61e6 AG |
414 | |
415 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); | |
1e23b3ee AG |
416 | } else |
417 | rds_ib_stats_inc(s_ib_ack_sent); | |
418 | } | |
419 | ||
420 | /* | |
421 | * There are 3 ways of getting acknowledgements to the peer: | |
422 | * 1. We call rds_ib_attempt_ack from the recv completion handler | |
423 | * to send an ACK-only frame. | |
424 | * However, there can be only one such frame in the send queue | |
425 | * at any time, so we may have to postpone it. | |
426 | * 2. When another (data) packet is transmitted while there's | |
427 | * an ACK in the queue, we piggyback the ACK sequence number | |
428 | * on the data packet. | |
429 | * 3. If the ACK WR is done sending, we get called from the | |
430 | * send queue completion handler, and check whether there's | |
431 | * another ACK pending (postponed because the WR was on the | |
432 | * queue). If so, we transmit it. | |
433 | * | |
434 | * We maintain 2 variables: | |
435 | * - i_ack_flags, which keeps track of whether the ACK WR | |
436 | * is currently in the send queue or not (IB_ACK_IN_FLIGHT) | |
437 | * - i_ack_next, which is the last sequence number we received | |
438 | * | |
439 | * Potentially, send queue and receive queue handlers can run concurrently. | |
8cbd9606 AG |
440 | * It would be nice to not have to use a spinlock to synchronize things, |
441 | * but the one problem that rules this out is that 64bit updates are | |
442 | * not atomic on all platforms. Things would be a lot simpler if | |
443 | * we had atomic64 or maybe cmpxchg64 everywhere. | |
1e23b3ee AG |
444 | * |
445 | * Reconnecting complicates this picture just slightly. When we | |
446 | * reconnect, we may be seeing duplicate packets. The peer | |
447 | * is retransmitting them, because it hasn't seen an ACK for | |
448 | * them. It is important that we ACK these. | |
449 | * | |
450 | * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with | |
451 | * this flag set *MUST* be acknowledged immediately. | |
452 | */ | |
453 | ||
454 | /* | |
455 | * When we get here, we're called from the recv queue handler. | |
456 | * Check whether we ought to transmit an ACK. | |
457 | */ | |
458 | void rds_ib_attempt_ack(struct rds_ib_connection *ic) | |
459 | { | |
460 | unsigned int adv_credits; | |
461 | ||
462 | if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) | |
463 | return; | |
464 | ||
465 | if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { | |
466 | rds_ib_stats_inc(s_ib_ack_send_delayed); | |
467 | return; | |
468 | } | |
469 | ||
470 | /* Can we get a send credit? */ | |
7b70d033 | 471 | if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { |
1e23b3ee AG |
472 | rds_ib_stats_inc(s_ib_tx_throttle); |
473 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
474 | return; | |
475 | } | |
476 | ||
477 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
478 | rds_ib_send_ack(ic, adv_credits); | |
479 | } | |
480 | ||
481 | /* | |
482 | * We get here from the send completion handler, when the | |
483 | * adapter tells us the ACK frame was sent. | |
484 | */ | |
485 | void rds_ib_ack_send_complete(struct rds_ib_connection *ic) | |
486 | { | |
487 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
488 | rds_ib_attempt_ack(ic); | |
489 | } | |
490 | ||
491 | /* | |
492 | * This is called by the regular xmit code when it wants to piggyback | |
493 | * an ACK on an outgoing frame. | |
494 | */ | |
495 | u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) | |
496 | { | |
497 | if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) | |
498 | rds_ib_stats_inc(s_ib_ack_send_piggybacked); | |
499 | return rds_ib_get_ack(ic); | |
500 | } | |
501 | ||
502 | /* | |
503 | * It's kind of lame that we're copying from the posted receive pages into | |
504 | * long-lived bitmaps. We could have posted the bitmaps and rdma written into | |
505 | * them. But receiving new congestion bitmaps should be a *rare* event, so | |
506 | * hopefully we won't need to invest that complexity in making it more | |
507 | * efficient. By copying we can share a simpler core with TCP which has to | |
508 | * copy. | |
509 | */ | |
510 | static void rds_ib_cong_recv(struct rds_connection *conn, | |
511 | struct rds_ib_incoming *ibinc) | |
512 | { | |
513 | struct rds_cong_map *map; | |
514 | unsigned int map_off; | |
515 | unsigned int map_page; | |
516 | struct rds_page_frag *frag; | |
517 | unsigned long frag_off; | |
518 | unsigned long to_copy; | |
519 | unsigned long copied; | |
520 | uint64_t uncongested = 0; | |
521 | void *addr; | |
522 | ||
523 | /* catch completely corrupt packets */ | |
524 | if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) | |
525 | return; | |
526 | ||
527 | map = conn->c_fcong; | |
528 | map_page = 0; | |
529 | map_off = 0; | |
530 | ||
531 | frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); | |
532 | frag_off = 0; | |
533 | ||
534 | copied = 0; | |
535 | ||
536 | while (copied < RDS_CONG_MAP_BYTES) { | |
537 | uint64_t *src, *dst; | |
538 | unsigned int k; | |
539 | ||
540 | to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); | |
541 | BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ | |
542 | ||
0b088e00 | 543 | addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0); |
1e23b3ee AG |
544 | |
545 | src = addr + frag_off; | |
546 | dst = (void *)map->m_page_addrs[map_page] + map_off; | |
547 | for (k = 0; k < to_copy; k += 8) { | |
548 | /* Record ports that became uncongested, ie | |
549 | * bits that changed from 0 to 1. */ | |
550 | uncongested |= ~(*src) & *dst; | |
551 | *dst++ = *src++; | |
552 | } | |
553 | kunmap_atomic(addr, KM_SOFTIRQ0); | |
554 | ||
555 | copied += to_copy; | |
556 | ||
557 | map_off += to_copy; | |
558 | if (map_off == PAGE_SIZE) { | |
559 | map_off = 0; | |
560 | map_page++; | |
561 | } | |
562 | ||
563 | frag_off += to_copy; | |
564 | if (frag_off == RDS_FRAG_SIZE) { | |
565 | frag = list_entry(frag->f_item.next, | |
566 | struct rds_page_frag, f_item); | |
567 | frag_off = 0; | |
568 | } | |
569 | } | |
570 | ||
571 | /* the congestion map is in little endian order */ | |
572 | uncongested = le64_to_cpu(uncongested); | |
573 | ||
574 | rds_cong_map_updated(map, uncongested); | |
575 | } | |
576 | ||
577 | /* | |
578 | * Rings are posted with all the allocations they'll need to queue the | |
579 | * incoming message to the receiving socket so this can't fail. | |
580 | * All fragments start with a header, so we can make sure we're not receiving | |
581 | * garbage, and we can tell a small 8 byte fragment from an ACK frame. | |
582 | */ | |
583 | struct rds_ib_ack_state { | |
584 | u64 ack_next; | |
585 | u64 ack_recv; | |
586 | unsigned int ack_required:1; | |
587 | unsigned int ack_next_valid:1; | |
588 | unsigned int ack_recv_valid:1; | |
589 | }; | |
590 | ||
591 | static void rds_ib_process_recv(struct rds_connection *conn, | |
597ddd50 | 592 | struct rds_ib_recv_work *recv, u32 data_len, |
1e23b3ee AG |
593 | struct rds_ib_ack_state *state) |
594 | { | |
595 | struct rds_ib_connection *ic = conn->c_transport_data; | |
596 | struct rds_ib_incoming *ibinc = ic->i_ibinc; | |
597 | struct rds_header *ihdr, *hdr; | |
598 | ||
599 | /* XXX shut down the connection if port 0,0 are seen? */ | |
600 | ||
601 | rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, | |
597ddd50 | 602 | data_len); |
1e23b3ee | 603 | |
597ddd50 | 604 | if (data_len < sizeof(struct rds_header)) { |
1e23b3ee AG |
605 | rds_ib_conn_error(conn, "incoming message " |
606 | "from %pI4 didn't inclue a " | |
607 | "header, disconnecting and " | |
608 | "reconnecting\n", | |
609 | &conn->c_faddr); | |
610 | return; | |
611 | } | |
597ddd50 | 612 | data_len -= sizeof(struct rds_header); |
1e23b3ee | 613 | |
f147dd9e | 614 | ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; |
1e23b3ee AG |
615 | |
616 | /* Validate the checksum. */ | |
617 | if (!rds_message_verify_checksum(ihdr)) { | |
618 | rds_ib_conn_error(conn, "incoming message " | |
619 | "from %pI4 has corrupted header - " | |
620 | "forcing a reconnect\n", | |
621 | &conn->c_faddr); | |
622 | rds_stats_inc(s_recv_drop_bad_checksum); | |
623 | return; | |
624 | } | |
625 | ||
626 | /* Process the ACK sequence which comes with every packet */ | |
627 | state->ack_recv = be64_to_cpu(ihdr->h_ack); | |
628 | state->ack_recv_valid = 1; | |
629 | ||
630 | /* Process the credits update if there was one */ | |
631 | if (ihdr->h_credit) | |
632 | rds_ib_send_add_credits(conn, ihdr->h_credit); | |
633 | ||
597ddd50 | 634 | if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { |
1e23b3ee AG |
635 | /* This is an ACK-only packet. The fact that it gets |
636 | * special treatment here is that historically, ACKs | |
637 | * were rather special beasts. | |
638 | */ | |
639 | rds_ib_stats_inc(s_ib_ack_received); | |
640 | ||
641 | /* | |
642 | * Usually the frags make their way on to incs and are then freed as | |
643 | * the inc is freed. We don't go that route, so we have to drop the | |
644 | * page ref ourselves. We can't just leave the page on the recv | |
645 | * because that confuses the dma mapping of pages and each recv's use | |
0b088e00 | 646 | * of a partial page. |
1e23b3ee AG |
647 | * |
648 | * FIXME: Fold this into the code path below. | |
649 | */ | |
0b088e00 AG |
650 | rds_ib_frag_free(recv->r_frag); |
651 | recv->r_frag = NULL; | |
1e23b3ee AG |
652 | return; |
653 | } | |
654 | ||
655 | /* | |
656 | * If we don't already have an inc on the connection then this | |
657 | * fragment has a header and starts a message.. copy its header | |
658 | * into the inc and save the inc so we can hang upcoming fragments | |
659 | * off its list. | |
660 | */ | |
8690bfa1 | 661 | if (!ibinc) { |
1e23b3ee AG |
662 | ibinc = recv->r_ibinc; |
663 | recv->r_ibinc = NULL; | |
664 | ic->i_ibinc = ibinc; | |
665 | ||
666 | hdr = &ibinc->ii_inc.i_hdr; | |
667 | memcpy(hdr, ihdr, sizeof(*hdr)); | |
668 | ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); | |
669 | ||
670 | rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, | |
671 | ic->i_recv_data_rem, hdr->h_flags); | |
672 | } else { | |
673 | hdr = &ibinc->ii_inc.i_hdr; | |
674 | /* We can't just use memcmp here; fragments of a | |
675 | * single message may carry different ACKs */ | |
f64f9e71 JP |
676 | if (hdr->h_sequence != ihdr->h_sequence || |
677 | hdr->h_len != ihdr->h_len || | |
678 | hdr->h_sport != ihdr->h_sport || | |
679 | hdr->h_dport != ihdr->h_dport) { | |
1e23b3ee AG |
680 | rds_ib_conn_error(conn, |
681 | "fragment header mismatch; forcing reconnect\n"); | |
682 | return; | |
683 | } | |
684 | } | |
685 | ||
686 | list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); | |
687 | recv->r_frag = NULL; | |
688 | ||
689 | if (ic->i_recv_data_rem > RDS_FRAG_SIZE) | |
690 | ic->i_recv_data_rem -= RDS_FRAG_SIZE; | |
691 | else { | |
692 | ic->i_recv_data_rem = 0; | |
693 | ic->i_ibinc = NULL; | |
694 | ||
695 | if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) | |
696 | rds_ib_cong_recv(conn, ibinc); | |
697 | else { | |
698 | rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, | |
699 | &ibinc->ii_inc, GFP_ATOMIC, | |
700 | KM_SOFTIRQ0); | |
701 | state->ack_next = be64_to_cpu(hdr->h_sequence); | |
702 | state->ack_next_valid = 1; | |
703 | } | |
704 | ||
705 | /* Evaluate the ACK_REQUIRED flag *after* we received | |
706 | * the complete frame, and after bumping the next_rx | |
707 | * sequence. */ | |
708 | if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { | |
709 | rds_stats_inc(s_recv_ack_required); | |
710 | state->ack_required = 1; | |
711 | } | |
712 | ||
713 | rds_inc_put(&ibinc->ii_inc); | |
714 | } | |
715 | } | |
716 | ||
717 | /* | |
718 | * Plucking the oldest entry from the ring can be done concurrently with | |
719 | * the thread refilling the ring. Each ring operation is protected by | |
720 | * spinlocks and the transient state of refilling doesn't change the | |
721 | * recording of which entry is oldest. | |
722 | * | |
723 | * This relies on IB only calling one cq comp_handler for each cq so that | |
724 | * there will only be one caller of rds_recv_incoming() per RDS connection. | |
725 | */ | |
726 | void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |
727 | { | |
728 | struct rds_connection *conn = context; | |
729 | struct rds_ib_connection *ic = conn->c_transport_data; | |
1e23b3ee AG |
730 | |
731 | rdsdebug("conn %p cq %p\n", conn, cq); | |
732 | ||
733 | rds_ib_stats_inc(s_ib_rx_cq_call); | |
734 | ||
d521b63b AG |
735 | tasklet_schedule(&ic->i_recv_tasklet); |
736 | } | |
1e23b3ee | 737 | |
d521b63b AG |
738 | static inline void rds_poll_cq(struct rds_ib_connection *ic, |
739 | struct rds_ib_ack_state *state) | |
740 | { | |
741 | struct rds_connection *conn = ic->conn; | |
742 | struct ib_wc wc; | |
743 | struct rds_ib_recv_work *recv; | |
744 | ||
745 | while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { | |
1e23b3ee AG |
746 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", |
747 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, | |
748 | be32_to_cpu(wc.ex.imm_data)); | |
749 | rds_ib_stats_inc(s_ib_rx_cq_event); | |
750 | ||
751 | recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; | |
752 | ||
fc24f780 | 753 | ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
1e23b3ee AG |
754 | |
755 | /* | |
756 | * Also process recvs in connecting state because it is possible | |
757 | * to get a recv completion _before_ the rdmacm ESTABLISHED | |
758 | * event is processed. | |
759 | */ | |
760 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { | |
761 | /* We expect errors as the qp is drained during shutdown */ | |
762 | if (wc.status == IB_WC_SUCCESS) { | |
d521b63b | 763 | rds_ib_process_recv(conn, recv, wc.byte_len, state); |
1e23b3ee AG |
764 | } else { |
765 | rds_ib_conn_error(conn, "recv completion on " | |
766 | "%pI4 had status %u, disconnecting and " | |
767 | "reconnecting\n", &conn->c_faddr, | |
768 | wc.status); | |
769 | } | |
770 | } | |
771 | ||
772 | rds_ib_ring_free(&ic->i_recv_ring, 1); | |
773 | } | |
d521b63b AG |
774 | } |
775 | ||
776 | void rds_ib_recv_tasklet_fn(unsigned long data) | |
777 | { | |
778 | struct rds_ib_connection *ic = (struct rds_ib_connection *) data; | |
779 | struct rds_connection *conn = ic->conn; | |
780 | struct rds_ib_ack_state state = { 0, }; | |
781 | ||
782 | rds_poll_cq(ic, &state); | |
783 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | |
784 | rds_poll_cq(ic, &state); | |
1e23b3ee AG |
785 | |
786 | if (state.ack_next_valid) | |
787 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); | |
788 | if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { | |
789 | rds_send_drop_acked(conn, state.ack_recv, NULL); | |
790 | ic->i_ack_recv = state.ack_recv; | |
791 | } | |
792 | if (rds_conn_up(conn)) | |
793 | rds_ib_attempt_ack(ic); | |
794 | ||
795 | /* If we ever end up with a really empty receive ring, we're | |
796 | * in deep trouble, as the sender will definitely see RNR | |
797 | * timeouts. */ | |
798 | if (rds_ib_ring_empty(&ic->i_recv_ring)) | |
799 | rds_ib_stats_inc(s_ib_rx_ring_empty); | |
800 | ||
1e23b3ee | 801 | if (rds_ib_ring_low(&ic->i_recv_ring)) |
f17a1a55 | 802 | rds_ib_recv_refill(conn, 0); |
1e23b3ee AG |
803 | } |
804 | ||
805 | int rds_ib_recv(struct rds_connection *conn) | |
806 | { | |
807 | struct rds_ib_connection *ic = conn->c_transport_data; | |
808 | int ret = 0; | |
809 | ||
810 | rdsdebug("conn %p\n", conn); | |
1e23b3ee AG |
811 | if (rds_conn_up(conn)) |
812 | rds_ib_attempt_ack(ic); | |
813 | ||
814 | return ret; | |
815 | } | |
816 | ||
817 | int __init rds_ib_recv_init(void) | |
818 | { | |
819 | struct sysinfo si; | |
820 | int ret = -ENOMEM; | |
821 | ||
822 | /* Default to 30% of all available RAM for recv memory */ | |
823 | si_meminfo(&si); | |
824 | rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; | |
825 | ||
826 | rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", | |
827 | sizeof(struct rds_ib_incoming), | |
828 | 0, 0, NULL); | |
8690bfa1 | 829 | if (!rds_ib_incoming_slab) |
1e23b3ee AG |
830 | goto out; |
831 | ||
832 | rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", | |
833 | sizeof(struct rds_page_frag), | |
834 | 0, 0, NULL); | |
8690bfa1 | 835 | if (!rds_ib_frag_slab) |
1e23b3ee AG |
836 | kmem_cache_destroy(rds_ib_incoming_slab); |
837 | else | |
838 | ret = 0; | |
839 | out: | |
840 | return ret; | |
841 | } | |
842 | ||
843 | void rds_ib_recv_exit(void) | |
844 | { | |
845 | kmem_cache_destroy(rds_ib_incoming_slab); | |
846 | kmem_cache_destroy(rds_ib_frag_slab); | |
847 | } |