Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / net / rds / send.c
CommitLineData
5c115590 1/*
a43cced9 2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
5c115590
AG
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
d9b93842 34#include <linux/moduleparam.h>
5a0e3ad6 35#include <linux/gfp.h>
5c115590
AG
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
cb0a6056 39#include <linux/ratelimit.h>
bc3b2d7f 40#include <linux/export.h>
4bebdd7a 41#include <linux/sizes.h>
5c115590
AG
42
43#include "rds.h"
5c115590
AG
44
45/* When transmitting messages in rds_send_xmit, we need to emerge from
46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
47 * will kick our shin.
48 * Also, it seems fairer to not let one busy connection stall all the
49 * others.
50 *
51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
52 * it to 0 will restore the old behavior (where we looped until we had
53 * drained the queue).
54 */
4bebdd7a 55static int send_batch_count = SZ_1K;
5c115590
AG
56module_param(send_batch_count, int, 0444);
57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
ff51bf84 59static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
5c115590 61/*
0f4b1c7e
ZB
62 * Reset the send state. Callers must ensure that this doesn't race with
63 * rds_send_xmit().
5c115590 64 */
d769ef81 65void rds_send_path_reset(struct rds_conn_path *cp)
5c115590
AG
66{
67 struct rds_message *rm, *tmp;
68 unsigned long flags;
69
4e9b551c
SV
70 if (cp->cp_xmit_rm) {
71 rm = cp->cp_xmit_rm;
72 cp->cp_xmit_rm = NULL;
5c115590
AG
73 /* Tell the user the RDMA op is no longer mapped by the
74 * transport. This isn't entirely true (it's flushed out
75 * independently) but as the connection is down, there's
76 * no ongoing RDMA to/from that memory */
7e3f2952 77 rds_message_unmapped(rm);
7e3f2952 78 rds_message_put(rm);
5c115590 79 }
7e3f2952 80
4e9b551c
SV
81 cp->cp_xmit_sg = 0;
82 cp->cp_xmit_hdr_off = 0;
83 cp->cp_xmit_data_off = 0;
84 cp->cp_xmit_atomic_sent = 0;
85 cp->cp_xmit_rdma_sent = 0;
86 cp->cp_xmit_data_sent = 0;
5c115590 87
4e9b551c 88 cp->cp_conn->c_map_queued = 0;
5c115590 89
4e9b551c
SV
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
5c115590
AG
92
93 /* Mark messages as retransmissions, and move them to the send q */
4e9b551c
SV
94 spin_lock_irqsave(&cp->cp_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
5c115590
AG
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 }
4e9b551c
SV
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100 spin_unlock_irqrestore(&cp->cp_lock, flags);
101}
d769ef81 102EXPORT_SYMBOL_GPL(rds_send_path_reset);
5c115590 103
1f9ecd7e 104static int acquire_in_xmit(struct rds_conn_path *cp)
0f4b1c7e 105{
1422f288 106 return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
0f4b1c7e
ZB
107}
108
1f9ecd7e 109static void release_in_xmit(struct rds_conn_path *cp)
0f4b1c7e 110{
1422f288 111 clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
0f4b1c7e
ZB
112 /*
113 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
114 * hot path and finding waiters is very rare. We don't want to walk
115 * the system-wide hashed waitqueue buckets in the fast path only to
116 * almost never find waiters.
117 */
1f9ecd7e
SV
118 if (waitqueue_active(&cp->cp_waitq))
119 wake_up_all(&cp->cp_waitq);
0f4b1c7e
ZB
120}
121
5c115590 122/*
25985edc 123 * We're making the conscious trade-off here to only send one message
5c115590
AG
124 * down the connection at a time.
125 * Pro:
126 * - tx queueing is a simple fifo list
127 * - reassembly is optional and easily done by transports per conn
128 * - no per flow rx lookup at all, straight to the socket
129 * - less per-frag memory and wire overhead
130 * Con:
131 * - queued acks can be delayed behind large messages
132 * Depends:
133 * - small message latency is higher behind queued large messages
134 * - large message latency isn't starved by intervening small sends
135 */
1f9ecd7e 136int rds_send_xmit(struct rds_conn_path *cp)
5c115590 137{
1f9ecd7e 138 struct rds_connection *conn = cp->cp_conn;
5c115590
AG
139 struct rds_message *rm;
140 unsigned long flags;
141 unsigned int tmp;
5c115590
AG
142 struct scatterlist *sg;
143 int ret = 0;
5c115590 144 LIST_HEAD(to_be_dropped);
443be0e5
SV
145 int batch_count;
146 unsigned long send_gen = 0;
11740ef4 147 int same_rm = 0;
5c115590 148
fcc5450c 149restart:
443be0e5 150 batch_count = 0;
049ee3f5 151
5c115590
AG
152 /*
153 * sendmsg calls here after having queued its message on the send
154 * queue. We only have one task feeding the connection at a time. If
155 * another thread is already feeding the queue then we back off. This
156 * avoids blocking the caller and trading per-connection data between
157 * caches per message.
5c115590 158 */
1f9ecd7e 159 if (!acquire_in_xmit(cp)) {
049ee3f5 160 rds_stats_inc(s_send_lock_contention);
5c115590
AG
161 ret = -ENOMEM;
162 goto out;
163 }
0f4b1c7e 164
ebeeb1ad 165 if (rds_destroy_pending(cp->cp_conn)) {
3db6e0d1
SV
166 release_in_xmit(cp);
167 ret = -ENETUNREACH; /* dont requeue send work */
168 goto out;
169 }
170
443be0e5
SV
171 /*
172 * we record the send generation after doing the xmit acquire.
173 * if someone else manages to jump in and do some work, we'll use
174 * this to avoid a goto restart farther down.
175 *
176 * The acquire_in_xmit() check above ensures that only one
177 * caller can increment c_send_gen at any time.
178 */
e623a48e
HB
179 send_gen = READ_ONCE(cp->cp_send_gen) + 1;
180 WRITE_ONCE(cp->cp_send_gen, send_gen);
443be0e5 181
0f4b1c7e
ZB
182 /*
183 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
184 * we do the opposite to avoid races.
185 */
1f9ecd7e
SV
186 if (!rds_conn_path_up(cp)) {
187 release_in_xmit(cp);
0f4b1c7e
ZB
188 ret = 0;
189 goto out;
190 }
5c115590 191
226f7a7d
SV
192 if (conn->c_trans->xmit_path_prepare)
193 conn->c_trans->xmit_path_prepare(cp);
5c115590
AG
194
195 /*
196 * spin trying to push headers and data down the connection until
5b2366bd 197 * the connection doesn't make forward progress.
5c115590 198 */
fcc5450c 199 while (1) {
5c115590 200
1f9ecd7e 201 rm = cp->cp_xmit_rm;
5c115590 202
11740ef4
AG
203 if (!rm) {
204 same_rm = 0;
205 } else {
206 same_rm++;
207 if (same_rm >= 4096) {
208 rds_stats_inc(s_send_stuck_rm);
209 ret = -EAGAIN;
210 break;
211 }
212 }
213
5b2366bd
AG
214 /*
215 * If between sending messages, we can send a pending congestion
216 * map update.
5c115590 217 */
8690bfa1 218 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
77dd550e
AG
219 rm = rds_cong_update_alloc(conn);
220 if (IS_ERR(rm)) {
221 ret = PTR_ERR(rm);
222 break;
5b2366bd 223 }
77dd550e 224 rm->data.op_active = 1;
1f9ecd7e
SV
225 rm->m_inc.i_conn_path = cp;
226 rm->m_inc.i_conn = cp->cp_conn;
77dd550e 227
1f9ecd7e 228 cp->cp_xmit_rm = rm;
5c115590
AG
229 }
230
231 /*
5b2366bd 232 * If not already working on one, grab the next message.
5c115590 233 *
1f9ecd7e 234 * cp_xmit_rm holds a ref while we're sending this message down
5c115590
AG
235 * the connction. We can use this ref while holding the
236 * send_sem.. rds_send_reset() is serialized with it.
237 */
8690bfa1 238 if (!rm) {
5c115590
AG
239 unsigned int len;
240
443be0e5
SV
241 batch_count++;
242
243 /* we want to process as big a batch as we can, but
244 * we also want to avoid softlockups. If we've been
245 * through a lot of messages, lets back off and see
246 * if anyone else jumps in
247 */
4bebdd7a 248 if (batch_count >= send_batch_count)
443be0e5
SV
249 goto over_batch;
250
1f9ecd7e 251 spin_lock_irqsave(&cp->cp_lock, flags);
5c115590 252
1f9ecd7e
SV
253 if (!list_empty(&cp->cp_send_queue)) {
254 rm = list_entry(cp->cp_send_queue.next,
5c115590
AG
255 struct rds_message,
256 m_conn_item);
257 rds_message_addref(rm);
258
259 /*
260 * Move the message from the send queue to the retransmit
261 * list right away.
262 */
1f9ecd7e
SV
263 list_move_tail(&rm->m_conn_item,
264 &cp->cp_retrans);
5c115590
AG
265 }
266
1f9ecd7e 267 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590 268
fcc5450c 269 if (!rm)
5c115590 270 break;
5c115590
AG
271
272 /* Unfortunately, the way Infiniband deals with
273 * RDMA to a bad MR key is by moving the entire
db473c07 274 * queue pair to error state. We could possibly
5c115590
AG
275 * recover from that, but right now we drop the
276 * connection.
277 * Therefore, we never retransmit messages with RDMA ops.
278 */
905dd418
SV
279 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
280 (rm->rdma.op_active &&
281 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
1f9ecd7e 282 spin_lock_irqsave(&cp->cp_lock, flags);
5c115590
AG
283 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
284 list_move(&rm->m_conn_item, &to_be_dropped);
1f9ecd7e 285 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590
AG
286 continue;
287 }
288
289 /* Require an ACK every once in a while */
290 len = ntohl(rm->m_inc.i_hdr.h_len);
1f9ecd7e
SV
291 if (cp->cp_unacked_packets == 0 ||
292 cp->cp_unacked_bytes < len) {
f530f39f 293 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
5c115590 294
1f9ecd7e
SV
295 cp->cp_unacked_packets =
296 rds_sysctl_max_unacked_packets;
297 cp->cp_unacked_bytes =
298 rds_sysctl_max_unacked_bytes;
5c115590
AG
299 rds_stats_inc(s_send_ack_required);
300 } else {
1f9ecd7e
SV
301 cp->cp_unacked_bytes -= len;
302 cp->cp_unacked_packets--;
5c115590
AG
303 }
304
1f9ecd7e 305 cp->cp_xmit_rm = rm;
5c115590
AG
306 }
307
2c3a5f9a 308 /* The transport either sends the whole rdma or none of it */
1f9ecd7e 309 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
ff3d7d36 310 rm->m_final_op = &rm->rdma;
4f73113c 311 /* The transport owns the mapped memory for now.
312 * You can't unmap it while it's on the send queue
313 */
314 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
2c3a5f9a 315 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
4f73113c 316 if (ret) {
317 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
318 wake_up_interruptible(&rm->m_flush_wait);
15133f6e 319 break;
4f73113c 320 }
1f9ecd7e 321 cp->cp_xmit_rdma_sent = 1;
2c3a5f9a 322
15133f6e
AG
323 }
324
1f9ecd7e 325 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
ff3d7d36 326 rm->m_final_op = &rm->atomic;
4f73113c 327 /* The transport owns the mapped memory for now.
328 * You can't unmap it while it's on the send queue
329 */
330 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ff3d7d36 331 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
4f73113c 332 if (ret) {
333 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
334 wake_up_interruptible(&rm->m_flush_wait);
5c115590 335 break;
4f73113c 336 }
1f9ecd7e 337 cp->cp_xmit_atomic_sent = 1;
ff3d7d36 338
5c115590
AG
339 }
340
2c3a5f9a
AG
341 /*
342 * A number of cases require an RDS header to be sent
343 * even if there is no data.
344 * We permit 0-byte sends; rds-ping depends on this.
345 * However, if there are exclusively attached silent ops,
346 * we skip the hdr/data send, to enable silent operation.
347 */
348 if (rm->data.op_nents == 0) {
349 int ops_present;
350 int all_ops_are_silent = 1;
351
352 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
353 if (rm->atomic.op_active && !rm->atomic.op_silent)
354 all_ops_are_silent = 0;
355 if (rm->rdma.op_active && !rm->rdma.op_silent)
356 all_ops_are_silent = 0;
357
358 if (ops_present && all_ops_are_silent
359 && !rm->m_rdma_cookie)
360 rm->data.op_active = 0;
361 }
362
1f9ecd7e 363 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
ff3d7d36 364 rm->m_final_op = &rm->data;
1f9ecd7e 365
5c115590 366 ret = conn->c_trans->xmit(conn, rm,
1f9ecd7e
SV
367 cp->cp_xmit_hdr_off,
368 cp->cp_xmit_sg,
369 cp->cp_xmit_data_off);
5c115590
AG
370 if (ret <= 0)
371 break;
372
1f9ecd7e 373 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
5c115590
AG
374 tmp = min_t(int, ret,
375 sizeof(struct rds_header) -
1f9ecd7e
SV
376 cp->cp_xmit_hdr_off);
377 cp->cp_xmit_hdr_off += tmp;
5c115590
AG
378 ret -= tmp;
379 }
380
1f9ecd7e 381 sg = &rm->data.op_sg[cp->cp_xmit_sg];
5c115590
AG
382 while (ret) {
383 tmp = min_t(int, ret, sg->length -
1f9ecd7e
SV
384 cp->cp_xmit_data_off);
385 cp->cp_xmit_data_off += tmp;
5c115590 386 ret -= tmp;
1f9ecd7e
SV
387 if (cp->cp_xmit_data_off == sg->length) {
388 cp->cp_xmit_data_off = 0;
5c115590 389 sg++;
1f9ecd7e
SV
390 cp->cp_xmit_sg++;
391 BUG_ON(ret != 0 && cp->cp_xmit_sg ==
392 rm->data.op_nents);
5c115590
AG
393 }
394 }
5b2366bd 395
1f9ecd7e
SV
396 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
397 (cp->cp_xmit_sg == rm->data.op_nents))
398 cp->cp_xmit_data_sent = 1;
5b2366bd
AG
399 }
400
401 /*
402 * A rm will only take multiple times through this loop
403 * if there is a data op. Thus, if the data is sent (or there was
404 * none), then we're done with the rm.
405 */
1f9ecd7e
SV
406 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
407 cp->cp_xmit_rm = NULL;
408 cp->cp_xmit_sg = 0;
409 cp->cp_xmit_hdr_off = 0;
410 cp->cp_xmit_data_off = 0;
411 cp->cp_xmit_rdma_sent = 0;
412 cp->cp_xmit_atomic_sent = 0;
413 cp->cp_xmit_data_sent = 0;
5b2366bd
AG
414
415 rds_message_put(rm);
5c115590
AG
416 }
417 }
418
443be0e5 419over_batch:
226f7a7d
SV
420 if (conn->c_trans->xmit_path_complete)
421 conn->c_trans->xmit_path_complete(cp);
1f9ecd7e 422 release_in_xmit(cp);
5c115590 423
2ad8099b
AG
424 /* Nuke any messages we decided not to retransmit. */
425 if (!list_empty(&to_be_dropped)) {
426 /* irqs on here, so we can put(), unlike above */
427 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
428 rds_message_put(rm);
429 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
430 }
431
fcc5450c 432 /*
0f4b1c7e
ZB
433 * Other senders can queue a message after we last test the send queue
434 * but before we clear RDS_IN_XMIT. In that case they'd back off and
435 * not try and send their newly queued message. We need to check the
436 * send queue after having cleared RDS_IN_XMIT so that their message
437 * doesn't get stuck on the send queue.
fcc5450c
AG
438 *
439 * If the transport cannot continue (i.e ret != 0), then it must
440 * call us when more room is available, such as from the tx
441 * completion handler.
443be0e5
SV
442 *
443 * We have an extra generation check here so that if someone manages
444 * to jump in after our release_in_xmit, we'll see that they have done
445 * some work and we will skip our goto
fcc5450c
AG
446 */
447 if (ret == 0) {
126f760c
HB
448 bool raced;
449
9e29db0e 450 smp_mb();
126f760c
HB
451 raced = send_gen != READ_ONCE(cp->cp_send_gen);
452
0c484240 453 if ((test_bit(0, &conn->c_map_queued) ||
126f760c 454 !list_empty(&cp->cp_send_queue)) && !raced) {
4bebdd7a
SS
455 if (batch_count < send_batch_count)
456 goto restart;
3db6e0d1 457 rcu_read_lock();
ebeeb1ad 458 if (rds_destroy_pending(cp->cp_conn))
3db6e0d1
SV
459 ret = -ENETUNREACH;
460 else
461 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
462 rcu_read_unlock();
126f760c
HB
463 } else if (raced) {
464 rds_stats_inc(s_send_lock_queue_raced);
5c115590 465 }
5c115590
AG
466 }
467out:
468 return ret;
469}
0c28c045 470EXPORT_SYMBOL_GPL(rds_send_xmit);
5c115590
AG
471
472static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
473{
474 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
475
476 assert_spin_locked(&rs->rs_lock);
477
478 BUG_ON(rs->rs_snd_bytes < len);
479 rs->rs_snd_bytes -= len;
480
481 if (rs->rs_snd_bytes == 0)
482 rds_stats_inc(s_send_queue_empty);
483}
484
485static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
486 is_acked_func is_acked)
487{
488 if (is_acked)
489 return is_acked(rm, ack);
490 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
491}
492
5c115590
AG
493/*
494 * This is pretty similar to what happens below in the ACK
495 * handling code - except that we call here as soon as we get
496 * the IB send completion on the RDMA op and the accompanying
497 * message.
498 */
499void rds_rdma_send_complete(struct rds_message *rm, int status)
500{
501 struct rds_sock *rs = NULL;
f8b3aaf2 502 struct rm_rdma_op *ro;
5c115590 503 struct rds_notifier *notifier;
9de0864c 504 unsigned long flags;
5c115590 505
9de0864c 506 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 507
f8b3aaf2 508 ro = &rm->rdma;
f64f9e71 509 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
616d37a0 510 ro->op_active && ro->op_notify && ro->op_notifier) {
f8b3aaf2 511 notifier = ro->op_notifier;
5c115590
AG
512 rs = rm->m_rs;
513 sock_hold(rds_rs_to_sk(rs));
514
515 notifier->n_status = status;
516 spin_lock(&rs->rs_lock);
517 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
518 spin_unlock(&rs->rs_lock);
519
f8b3aaf2 520 ro->op_notifier = NULL;
5c115590
AG
521 }
522
9de0864c 523 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590
AG
524
525 if (rs) {
526 rds_wake_sk_sleep(rs);
527 sock_put(rds_rs_to_sk(rs));
528 }
529}
616b757a 530EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
5c115590 531
15133f6e
AG
532/*
533 * Just like above, except looks at atomic op
534 */
535void rds_atomic_send_complete(struct rds_message *rm, int status)
536{
537 struct rds_sock *rs = NULL;
538 struct rm_atomic_op *ao;
539 struct rds_notifier *notifier;
cf4b7389 540 unsigned long flags;
15133f6e 541
cf4b7389 542 spin_lock_irqsave(&rm->m_rs_lock, flags);
15133f6e
AG
543
544 ao = &rm->atomic;
545 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
546 && ao->op_active && ao->op_notify && ao->op_notifier) {
547 notifier = ao->op_notifier;
548 rs = rm->m_rs;
549 sock_hold(rds_rs_to_sk(rs));
550
551 notifier->n_status = status;
552 spin_lock(&rs->rs_lock);
553 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
554 spin_unlock(&rs->rs_lock);
555
556 ao->op_notifier = NULL;
557 }
558
cf4b7389 559 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
15133f6e
AG
560
561 if (rs) {
562 rds_wake_sk_sleep(rs);
563 sock_put(rds_rs_to_sk(rs));
564 }
565}
566EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
567
5c115590
AG
568/*
569 * This is the same as rds_rdma_send_complete except we
570 * don't do any locking - we have all the ingredients (message,
571 * socket, socket lock) and can just move the notifier.
572 */
573static inline void
940786eb 574__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
5c115590 575{
f8b3aaf2 576 struct rm_rdma_op *ro;
940786eb 577 struct rm_atomic_op *ao;
5c115590 578
f8b3aaf2
AG
579 ro = &rm->rdma;
580 if (ro->op_active && ro->op_notify && ro->op_notifier) {
581 ro->op_notifier->n_status = status;
582 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
583 ro->op_notifier = NULL;
5c115590
AG
584 }
585
940786eb
AG
586 ao = &rm->atomic;
587 if (ao->op_active && ao->op_notify && ao->op_notifier) {
588 ao->op_notifier->n_status = status;
589 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
590 ao->op_notifier = NULL;
591 }
592
5c115590
AG
593 /* No need to wake the app - caller does this */
594}
595
5c115590
AG
596/*
597 * This removes messages from the socket's list if they're on it. The list
598 * argument must be private to the caller, we must be able to modify it
599 * without locks. The messages must have a reference held for their
600 * position on the list. This function will drop that reference after
601 * removing the messages from the 'messages' list regardless of if it found
602 * the messages on the socket list or not.
603 */
ff51bf84 604static void rds_send_remove_from_sock(struct list_head *messages, int status)
5c115590 605{
561c7df6 606 unsigned long flags;
5c115590
AG
607 struct rds_sock *rs = NULL;
608 struct rds_message *rm;
609
5c115590 610 while (!list_empty(messages)) {
561c7df6
AG
611 int was_on_sock = 0;
612
5c115590
AG
613 rm = list_entry(messages->next, struct rds_message,
614 m_conn_item);
615 list_del_init(&rm->m_conn_item);
616
617 /*
618 * If we see this flag cleared then we're *sure* that someone
619 * else beat us to removing it from the sock. If we race
620 * with their flag update we'll get the lock and then really
621 * see that the flag has been cleared.
622 *
623 * The message spinlock makes sure nobody clears rm->m_rs
624 * while we're messing with it. It does not prevent the
625 * message from being removed from the socket, though.
626 */
561c7df6 627 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590
AG
628 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
629 goto unlock_and_drop;
630
631 if (rs != rm->m_rs) {
632 if (rs) {
5c115590
AG
633 rds_wake_sk_sleep(rs);
634 sock_put(rds_rs_to_sk(rs));
635 }
636 rs = rm->m_rs;
593cbb3e
HK
637 if (rs)
638 sock_hold(rds_rs_to_sk(rs));
5c115590 639 }
593cbb3e
HK
640 if (!rs)
641 goto unlock_and_drop;
048c15e6 642 spin_lock(&rs->rs_lock);
5c115590
AG
643
644 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
f8b3aaf2 645 struct rm_rdma_op *ro = &rm->rdma;
5c115590
AG
646 struct rds_notifier *notifier;
647
648 list_del_init(&rm->m_sock_item);
649 rds_send_sndbuf_remove(rs, rm);
650
f8b3aaf2
AG
651 if (ro->op_active && ro->op_notifier &&
652 (ro->op_notify || (ro->op_recverr && status))) {
653 notifier = ro->op_notifier;
5c115590
AG
654 list_add_tail(&notifier->n_list,
655 &rs->rs_notify_queue);
656 if (!notifier->n_status)
657 notifier->n_status = status;
f8b3aaf2 658 rm->rdma.op_notifier = NULL;
5c115590 659 }
561c7df6 660 was_on_sock = 1;
5c115590 661 }
048c15e6 662 spin_unlock(&rs->rs_lock);
5c115590
AG
663
664unlock_and_drop:
561c7df6 665 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590 666 rds_message_put(rm);
799bac55 667 if (was_on_sock)
561c7df6 668 rds_message_put(rm);
5c115590
AG
669 }
670
671 if (rs) {
5c115590
AG
672 rds_wake_sk_sleep(rs);
673 sock_put(rds_rs_to_sk(rs));
674 }
5c115590
AG
675}
676
677/*
678 * Transports call here when they've determined that the receiver queued
679 * messages up to, and including, the given sequence number. Messages are
680 * moved to the retrans queue when rds_send_xmit picks them off the send
681 * queue. This means that in the TCP case, the message may not have been
682 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
683 * checks the RDS_MSG_HAS_ACK_SEQ bit.
5c115590 684 */
5c3d274c
SV
685void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
686 is_acked_func is_acked)
5c115590
AG
687{
688 struct rds_message *rm, *tmp;
689 unsigned long flags;
690 LIST_HEAD(list);
691
5c3d274c 692 spin_lock_irqsave(&cp->cp_lock, flags);
5c115590 693
5c3d274c 694 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
5c115590
AG
695 if (!rds_send_is_acked(rm, ack, is_acked))
696 break;
697
698 list_move(&rm->m_conn_item, &list);
699 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
700 }
701
702 /* order flag updates with spin locks */
703 if (!list_empty(&list))
4e857c58 704 smp_mb__after_atomic();
5c115590 705
5c3d274c 706 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590
AG
707
708 /* now remove the messages from the sock list as needed */
709 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
710}
5c3d274c
SV
711EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
712
713void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
714 is_acked_func is_acked)
715{
716 WARN_ON(conn->c_trans->t_mp_capable);
717 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
718}
616b757a 719EXPORT_SYMBOL_GPL(rds_send_drop_acked);
5c115590 720
eee2fa6a 721void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
5c115590
AG
722{
723 struct rds_message *rm, *tmp;
724 struct rds_connection *conn;
01ff34ed 725 struct rds_conn_path *cp;
7c82eaf0 726 unsigned long flags;
5c115590 727 LIST_HEAD(list);
5c115590
AG
728
729 /* get all the messages we're dropping under the rs lock */
730 spin_lock_irqsave(&rs->rs_lock, flags);
731
732 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
eee2fa6a
KCP
733 if (dest &&
734 (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
735 dest->sin6_port != rm->m_inc.i_hdr.h_dport))
5c115590
AG
736 continue;
737
5c115590
AG
738 list_move(&rm->m_sock_item, &list);
739 rds_send_sndbuf_remove(rs, rm);
740 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
5c115590
AG
741 }
742
743 /* order flag updates with the rs lock */
4e857c58 744 smp_mb__after_atomic();
5c115590
AG
745
746 spin_unlock_irqrestore(&rs->rs_lock, flags);
747
7c82eaf0
AG
748 if (list_empty(&list))
749 return;
5c115590 750
7c82eaf0 751 /* Remove the messages from the conn */
5c115590 752 list_for_each_entry(rm, &list, m_sock_item) {
7c82eaf0
AG
753
754 conn = rm->m_inc.i_conn;
01ff34ed
SV
755 if (conn->c_trans->t_mp_capable)
756 cp = rm->m_inc.i_conn_path;
757 else
758 cp = &conn->c_path[0];
5c115590 759
01ff34ed 760 spin_lock_irqsave(&cp->cp_lock, flags);
5c115590 761 /*
7c82eaf0
AG
762 * Maybe someone else beat us to removing rm from the conn.
763 * If we race with their flag update we'll get the lock and
764 * then really see that the flag has been cleared.
5c115590 765 */
7c82eaf0 766 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
01ff34ed 767 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590 768 continue;
5c115590 769 }
9de0864c 770 list_del_init(&rm->m_conn_item);
01ff34ed 771 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590 772
7c82eaf0
AG
773 /*
774 * Couldn't grab m_rs_lock in top loop (lock ordering),
775 * but we can now.
776 */
9de0864c 777 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 778
7c82eaf0 779 spin_lock(&rs->rs_lock);
940786eb 780 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
7c82eaf0
AG
781 spin_unlock(&rs->rs_lock);
782
9de0864c 783 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
7c82eaf0 784
7c82eaf0 785 rds_message_put(rm);
7c82eaf0 786 }
5c115590 787
7c82eaf0 788 rds_wake_sk_sleep(rs);
550a8002 789
5c115590
AG
790 while (!list_empty(&list)) {
791 rm = list_entry(list.next, struct rds_message, m_sock_item);
792 list_del_init(&rm->m_sock_item);
5c115590 793 rds_message_wait(rm);
dfcec251 794
795 /* just in case the code above skipped this message
796 * because RDS_MSG_ON_CONN wasn't set, run it again here
797 * taking m_rs_lock is the only thing that keeps us
798 * from racing with ack processing.
799 */
800 spin_lock_irqsave(&rm->m_rs_lock, flags);
801
802 spin_lock(&rs->rs_lock);
803 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
804 spin_unlock(&rs->rs_lock);
805
dfcec251 806 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
807
5c115590
AG
808 rds_message_put(rm);
809 }
810}
811
812/*
813 * we only want this to fire once so we use the callers 'queued'. It's
814 * possible that another thread can race with us and remove the
815 * message from the flow with RDS_CANCEL_SENT_TO.
816 */
817static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
780a6d9e 818 struct rds_conn_path *cp,
5c115590
AG
819 struct rds_message *rm, __be16 sport,
820 __be16 dport, int *queued)
821{
822 unsigned long flags;
823 u32 len;
824
825 if (*queued)
826 goto out;
827
828 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
829
830 /* this is the only place which holds both the socket's rs_lock
831 * and the connection's c_lock */
832 spin_lock_irqsave(&rs->rs_lock, flags);
833
834 /*
835 * If there is a little space in sndbuf, we don't queue anything,
836 * and userspace gets -EAGAIN. But poll() indicates there's send
837 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
838 * freed up by incoming acks. So we check the *old* value of
839 * rs_snd_bytes here to allow the last msg to exceed the buffer,
840 * and poll() now knows no more data can be sent.
841 */
842 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
843 rs->rs_snd_bytes += len;
844
845 /* let recv side know we are close to send space exhaustion.
846 * This is probably not the optimal way to do it, as this
847 * means we set the flag on *all* messages as soon as our
848 * throughput hits a certain threshold.
849 */
850 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
f530f39f 851 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
5c115590
AG
852
853 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
854 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
855 rds_message_addref(rm);
ea8994cb 856 sock_hold(rds_rs_to_sk(rs));
5c115590
AG
857 rm->m_rs = rs;
858
859 /* The code ordering is a little weird, but we're
860 trying to minimize the time we hold c_lock */
861 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
862 rm->m_inc.i_conn = conn;
780a6d9e 863 rm->m_inc.i_conn_path = cp;
5c115590
AG
864 rds_message_addref(rm);
865
780a6d9e
SV
866 spin_lock(&cp->cp_lock);
867 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
868 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
5c115590 869 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
780a6d9e 870 spin_unlock(&cp->cp_lock);
5c115590
AG
871
872 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
873 rm, len, rs, rs->rs_snd_bytes,
874 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
875
876 *queued = 1;
877 }
878
879 spin_unlock_irqrestore(&rs->rs_lock, flags);
880out:
881 return *queued;
882}
883
fc445084
AG
884/*
885 * rds_message is getting to be quite complicated, and we'd like to allocate
886 * it all in one go. This figures out how big it needs to be up front.
887 */
ea010070 888static int rds_rm_size(struct msghdr *msg, int num_sgs,
889 struct rds_iov_vector_arr *vct)
fc445084 890{
ff87e97a 891 struct cmsghdr *cmsg;
fc445084 892 int size = 0;
aa0a4ef4 893 int cmsg_groups = 0;
ff87e97a 894 int retval;
0cebacce 895 bool zcopy_cookie = false;
ea010070 896 struct rds_iov_vector *iov, *tmp_iov;
ff87e97a 897
c75ab8a5 898 if (num_sgs < 0)
899 return -EINVAL;
900
f95b414e 901 for_each_cmsghdr(cmsg, msg) {
ff87e97a
AG
902 if (!CMSG_OK(msg, cmsg))
903 return -EINVAL;
904
905 if (cmsg->cmsg_level != SOL_RDS)
906 continue;
907
908 switch (cmsg->cmsg_type) {
909 case RDS_CMSG_RDMA_ARGS:
ea010070 910 if (vct->indx >= vct->len) {
911 vct->len += vct->incr;
912 tmp_iov =
913 krealloc(vct->vec,
914 vct->len *
915 sizeof(struct rds_iov_vector),
916 GFP_KERNEL);
917 if (!tmp_iov) {
918 vct->len -= vct->incr;
919 return -ENOMEM;
920 }
921 vct->vec = tmp_iov;
922 }
923 iov = &vct->vec[vct->indx];
924 memset(iov, 0, sizeof(struct rds_iov_vector));
925 vct->indx++;
aa0a4ef4 926 cmsg_groups |= 1;
ea010070 927 retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
ff87e97a
AG
928 if (retval < 0)
929 return retval;
930 size += retval;
aa0a4ef4 931
ff87e97a
AG
932 break;
933
0cebacce
SV
934 case RDS_CMSG_ZCOPY_COOKIE:
935 zcopy_cookie = true;
df561f66 936 fallthrough;
f9053113 937
ff87e97a
AG
938 case RDS_CMSG_RDMA_DEST:
939 case RDS_CMSG_RDMA_MAP:
aa0a4ef4 940 cmsg_groups |= 2;
ff87e97a
AG
941 /* these are valid but do no add any size */
942 break;
943
15133f6e
AG
944 case RDS_CMSG_ATOMIC_CSWP:
945 case RDS_CMSG_ATOMIC_FADD:
20c72bd5
AG
946 case RDS_CMSG_MASKED_ATOMIC_CSWP:
947 case RDS_CMSG_MASKED_ATOMIC_FADD:
aa0a4ef4 948 cmsg_groups |= 1;
15133f6e
AG
949 size += sizeof(struct scatterlist);
950 break;
951
ff87e97a
AG
952 default:
953 return -EINVAL;
954 }
955
956 }
fc445084 957
0cebacce
SV
958 if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
959 return -EINVAL;
960
961 size += num_sgs * sizeof(struct scatterlist);
fc445084 962
aa0a4ef4
AG
963 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
964 if (cmsg_groups == 3)
965 return -EINVAL;
966
fc445084
AG
967 return size;
968}
969
0cebacce
SV
970static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
971 struct cmsghdr *cmsg)
972{
973 u32 *cookie;
974
79a5b972
SV
975 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
976 !rm->data.op_mmp_znotifier)
0cebacce
SV
977 return -EINVAL;
978 cookie = CMSG_DATA(cmsg);
979 rm->data.op_mmp_znotifier->z_cookie = *cookie;
980 return 0;
981}
982
5c115590 983static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
ea010070 984 struct msghdr *msg, int *allocated_mr,
985 struct rds_iov_vector_arr *vct)
5c115590
AG
986{
987 struct cmsghdr *cmsg;
ea010070 988 int ret = 0, ind = 0;
5c115590 989
f95b414e 990 for_each_cmsghdr(cmsg, msg) {
5c115590
AG
991 if (!CMSG_OK(msg, cmsg))
992 return -EINVAL;
993
994 if (cmsg->cmsg_level != SOL_RDS)
995 continue;
996
997 /* As a side effect, RDMA_DEST and RDMA_MAP will set
15133f6e 998 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
5c115590
AG
999 */
1000 switch (cmsg->cmsg_type) {
1001 case RDS_CMSG_RDMA_ARGS:
ea010070 1002 if (ind >= vct->indx)
1003 return -ENOMEM;
1004 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1005 ind++;
5c115590
AG
1006 break;
1007
1008 case RDS_CMSG_RDMA_DEST:
1009 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1010 break;
1011
1012 case RDS_CMSG_RDMA_MAP:
1013 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1014 if (!ret)
1015 *allocated_mr = 1;
584a8279
SS
1016 else if (ret == -ENODEV)
1017 /* Accommodate the get_mr() case which can fail
1018 * if connection isn't established yet.
1019 */
1020 ret = -EAGAIN;
5c115590 1021 break;
15133f6e
AG
1022 case RDS_CMSG_ATOMIC_CSWP:
1023 case RDS_CMSG_ATOMIC_FADD:
20c72bd5
AG
1024 case RDS_CMSG_MASKED_ATOMIC_CSWP:
1025 case RDS_CMSG_MASKED_ATOMIC_FADD:
15133f6e
AG
1026 ret = rds_cmsg_atomic(rs, rm, cmsg);
1027 break;
5c115590 1028
0cebacce
SV
1029 case RDS_CMSG_ZCOPY_COOKIE:
1030 ret = rds_cmsg_zcopy(rs, rm, cmsg);
1031 break;
1032
5c115590
AG
1033 default:
1034 return -EINVAL;
1035 }
1036
1037 if (ret)
1038 break;
1039 }
1040
1041 return ret;
1042}
1043
9a4890bd
KCP
1044static int rds_send_mprds_hash(struct rds_sock *rs,
1045 struct rds_connection *conn, int nonblock)
5916e2c1
SV
1046{
1047 int hash;
1048
1049 if (conn->c_npaths == 0)
1050 hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1051 else
1052 hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1053 if (conn->c_npaths == 0 && hash != 0) {
69b92b5b 1054 rds_send_ping(conn, 0);
5916e2c1 1055
a43cced9
KCP
1056 /* The underlying connection is not up yet. Need to wait
1057 * until it is up to be sure that the non-zero c_path can be
1058 * used. But if we are interrupted, we have to use the zero
1059 * c_path in case the connection ends up being non-MP capable.
1060 */
9a4890bd
KCP
1061 if (conn->c_npaths == 0) {
1062 /* Cannot wait for the connection be made, so just use
1063 * the base c_path.
1064 */
1065 if (nonblock)
1066 return 0;
a43cced9
KCP
1067 if (wait_event_interruptible(conn->c_hs_waitq,
1068 conn->c_npaths != 0))
1069 hash = 0;
9a4890bd 1070 }
5916e2c1
SV
1071 if (conn->c_npaths == 1)
1072 hash = 0;
1073 }
1074 return hash;
1075}
1076
f9fb69ad
AR
1077static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1078{
1079 struct rds_rdma_args *args;
1080 struct cmsghdr *cmsg;
1081
1082 for_each_cmsghdr(cmsg, msg) {
1083 if (!CMSG_OK(msg, cmsg))
1084 return -EINVAL;
1085
1086 if (cmsg->cmsg_level != SOL_RDS)
1087 continue;
1088
1089 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
14e138a8
AR
1090 if (cmsg->cmsg_len <
1091 CMSG_LEN(sizeof(struct rds_rdma_args)))
1092 return -EINVAL;
f9fb69ad
AR
1093 args = CMSG_DATA(cmsg);
1094 *rdma_bytes += args->remote_vec.bytes;
1095 }
1096 }
1097 return 0;
1098}
1099
1b784140 1100int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
5c115590
AG
1101{
1102 struct sock *sk = sock->sk;
1103 struct rds_sock *rs = rds_sk_to_rs(sk);
eee2fa6a 1104 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
342dfc30 1105 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
5c115590
AG
1106 __be16 dport;
1107 struct rds_message *rm = NULL;
1108 struct rds_connection *conn;
1109 int ret = 0;
1110 int queued = 0, allocated_mr = 0;
1111 int nonblock = msg->msg_flags & MSG_DONTWAIT;
1123fd73 1112 long timeo = sock_sndtimeo(sk, nonblock);
780a6d9e 1113 struct rds_conn_path *cpath;
eee2fa6a
KCP
1114 struct in6_addr daddr;
1115 __u32 scope_id = 0;
d28c0e73 1116 size_t rdma_payload_len = 0;
0cebacce
SV
1117 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1118 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
eeb2c4fb 1119 int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
eee2fa6a 1120 int namelen;
d84e7bc0 1121 struct rds_iov_vector_arr vct;
ea010070 1122 int ind;
1123
d84e7bc0
DM
1124 memset(&vct, 0, sizeof(vct));
1125
ea010070 1126 /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
1127 vct.incr = 1;
5c115590
AG
1128
1129 /* Mirror Linux UDP mirror of BSD error message compatibility */
1130 /* XXX: Perhaps MSG_MORE someday */
0cebacce 1131 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
5c115590
AG
1132 ret = -EOPNOTSUPP;
1133 goto out;
1134 }
1135
eee2fa6a
KCP
1136 namelen = msg->msg_namelen;
1137 if (namelen != 0) {
1138 if (namelen < sizeof(*usin)) {
1139 ret = -EINVAL;
1140 goto out;
1141 }
1e2b44e7
KCP
1142 switch (usin->sin_family) {
1143 case AF_INET:
1144 if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
eee2fa6a 1145 usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
842841ec 1146 ipv4_is_multicast(usin->sin_addr.s_addr)) {
eee2fa6a
KCP
1147 ret = -EINVAL;
1148 goto out;
1149 }
1150 ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
1151 dport = usin->sin_port;
1152 break;
1153
e65d4d96 1154#if IS_ENABLED(CONFIG_IPV6)
1e2b44e7
KCP
1155 case AF_INET6: {
1156 int addr_type;
1157
1158 if (namelen < sizeof(*sin6)) {
1159 ret = -EINVAL;
1160 goto out;
1161 }
1162 addr_type = ipv6_addr_type(&sin6->sin6_addr);
1163 if (!(addr_type & IPV6_ADDR_UNICAST)) {
1164 __be32 addr4;
1165
1166 if (!(addr_type & IPV6_ADDR_MAPPED)) {
1167 ret = -EINVAL;
1168 goto out;
1169 }
1170
1171 /* It is a mapped address. Need to do some
1172 * sanity checks.
1173 */
1174 addr4 = sin6->sin6_addr.s6_addr32[3];
1175 if (addr4 == htonl(INADDR_ANY) ||
1176 addr4 == htonl(INADDR_BROADCAST) ||
842841ec 1177 ipv4_is_multicast(addr4)) {
dc66fe43 1178 ret = -EINVAL;
1e2b44e7
KCP
1179 goto out;
1180 }
1181 }
1182 if (addr_type & IPV6_ADDR_LINKLOCAL) {
1183 if (sin6->sin6_scope_id == 0) {
1184 ret = -EINVAL;
1185 goto out;
1186 }
1187 scope_id = sin6->sin6_scope_id;
1188 }
1189
1190 daddr = sin6->sin6_addr;
1191 dport = sin6->sin6_port;
1192 break;
eee2fa6a 1193 }
e65d4d96 1194#endif
eee2fa6a
KCP
1195
1196 default:
5c115590
AG
1197 ret = -EINVAL;
1198 goto out;
1199 }
5c115590
AG
1200 } else {
1201 /* We only care about consistency with ->connect() */
1202 lock_sock(sk);
1203 daddr = rs->rs_conn_addr;
1204 dport = rs->rs_conn_port;
eee2fa6a 1205 scope_id = rs->rs_bound_scope_id;
5c115590
AG
1206 release_sock(sk);
1207 }
1208
8c7188b2 1209 lock_sock(sk);
eee2fa6a 1210 if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
8c7188b2 1211 release_sock(sk);
eee2fa6a 1212 ret = -ENOTCONN;
5c115590 1213 goto out;
eee2fa6a
KCP
1214 } else if (namelen != 0) {
1215 /* Cannot send to an IPv4 address using an IPv6 source
1216 * address and cannot send to an IPv6 address using an
1217 * IPv4 source address.
1218 */
1219 if (ipv6_addr_v4mapped(&daddr) ^
1220 ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
1221 release_sock(sk);
1222 ret = -EOPNOTSUPP;
1223 goto out;
1224 }
1e2b44e7
KCP
1225 /* If the socket is already bound to a link local address,
1226 * it can only send to peers on the same link. But allow
ebf89395 1227 * communicating between link local and non-link local address.
1e2b44e7
KCP
1228 */
1229 if (scope_id != rs->rs_bound_scope_id) {
1230 if (!scope_id) {
1231 scope_id = rs->rs_bound_scope_id;
1232 } else if (rs->rs_bound_scope_id) {
1233 release_sock(sk);
1234 ret = -EINVAL;
1235 goto out;
1236 }
1237 }
5c115590 1238 }
8c7188b2 1239 release_sock(sk);
5c115590 1240
f9fb69ad
AR
1241 ret = rds_rdma_bytes(msg, &rdma_payload_len);
1242 if (ret)
1243 goto out;
1244
f9fb69ad
AR
1245 if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1246 ret = -EMSGSIZE;
1247 goto out;
1248 }
1249
06e8941e
MK
1250 if (payload_len > rds_sk_sndbuf(rs)) {
1251 ret = -EMSGSIZE;
1252 goto out;
1253 }
1254
0cebacce
SV
1255 if (zcopy) {
1256 if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1257 ret = -EOPNOTSUPP;
1258 goto out;
1259 }
1260 num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1261 }
fc445084 1262 /* size of rm including all sgs */
ea010070 1263 ret = rds_rm_size(msg, num_sgs, &vct);
fc445084
AG
1264 if (ret < 0)
1265 goto out;
1266
1267 rm = rds_message_alloc(ret, GFP_KERNEL);
1268 if (!rm) {
1269 ret = -ENOMEM;
5c115590
AG
1270 goto out;
1271 }
1272
372cd7de
AG
1273 /* Attach data to the rm */
1274 if (payload_len) {
7dba9203
JG
1275 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1276 if (IS_ERR(rm->data.op_sg)) {
1277 ret = PTR_ERR(rm->data.op_sg);
d139ff09 1278 goto out;
7dba9203 1279 }
0cebacce 1280 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
372cd7de
AG
1281 if (ret)
1282 goto out;
1283 }
1284 rm->data.op_active = 1;
fc445084 1285
5c115590
AG
1286 rm->m_daddr = daddr;
1287
5c115590
AG
1288 /* rds_conn_create has a spinlock that runs with IRQ off.
1289 * Caching the conn in the socket helps a lot. */
fd261ce6
SS
1290 if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
1291 rs->rs_tos == rs->rs_conn->c_tos) {
5c115590 1292 conn = rs->rs_conn;
3eb45036 1293 } else {
d5a8ac28 1294 conn = rds_conn_create_outgoing(sock_net(sock->sk),
eee2fa6a 1295 &rs->rs_bound_addr, &daddr,
fd261ce6 1296 rs->rs_transport, rs->rs_tos,
eee2fa6a
KCP
1297 sock->sk->sk_allocation,
1298 scope_id);
5c115590
AG
1299 if (IS_ERR(conn)) {
1300 ret = PTR_ERR(conn);
1301 goto out;
1302 }
1303 rs->rs_conn = conn;
1304 }
1305
9e630bcb 1306 if (conn->c_trans->t_mp_capable)
9a4890bd 1307 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
9e630bcb
AR
1308 else
1309 cpath = &conn->c_path[0];
1310
1311 rm->m_conn_path = cpath;
1312
49f69691 1313 /* Parse any control messages the user may have included. */
ea010070 1314 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
c055fc00 1315 if (ret)
49f69691
AG
1316 goto out;
1317
2c3a5f9a 1318 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
cb0a6056 1319 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
f8b3aaf2 1320 &rm->rdma, conn->c_trans->xmit_rdma);
15133f6e
AG
1321 ret = -EOPNOTSUPP;
1322 goto out;
1323 }
1324
1325 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
cb0a6056 1326 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
15133f6e 1327 &rm->atomic, conn->c_trans->xmit_atomic);
5c115590
AG
1328 ret = -EOPNOTSUPP;
1329 goto out;
1330 }
1331
ebeeb1ad 1332 if (rds_destroy_pending(conn)) {
3db6e0d1
SV
1333 ret = -EAGAIN;
1334 goto out;
1335 }
1336
9ef845f8
RS
1337 if (rds_conn_path_down(cpath))
1338 rds_check_all_paths(conn);
5c115590
AG
1339
1340 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
b98ba52f
AG
1341 if (ret) {
1342 rs->rs_seen_congestion = 1;
5c115590 1343 goto out;
b98ba52f 1344 }
780a6d9e 1345 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
5c115590
AG
1346 dport, &queued)) {
1347 rds_stats_inc(s_send_queue_full);
06e8941e 1348
5c115590
AG
1349 if (nonblock) {
1350 ret = -EAGAIN;
1351 goto out;
1352 }
1353
aa395145 1354 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
780a6d9e 1355 rds_send_queue_rm(rs, conn, cpath, rm,
5c115590
AG
1356 rs->rs_bound_port,
1357 dport,
1358 &queued),
1359 timeo);
1360 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1361 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1362 continue;
1363
1364 ret = timeo;
1365 if (ret == 0)
1366 ret = -ETIMEDOUT;
1367 goto out;
1368 }
1369
1370 /*
1371 * By now we've committed to the send. We reuse rds_send_worker()
1372 * to retry sends in the rds thread if the transport asks us to.
1373 */
1374 rds_stats_inc(s_send_queued);
1375
1f9ecd7e 1376 ret = rds_send_xmit(cpath);
3db6e0d1
SV
1377 if (ret == -ENOMEM || ret == -EAGAIN) {
1378 ret = 0;
1379 rcu_read_lock();
ebeeb1ad 1380 if (rds_destroy_pending(cpath->cp_conn))
3db6e0d1
SV
1381 ret = -ENETUNREACH;
1382 else
1383 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1384 rcu_read_unlock();
1385 }
1386 if (ret)
1387 goto out;
5c115590 1388 rds_message_put(rm);
ea010070 1389
1390 for (ind = 0; ind < vct.indx; ind++)
1391 kfree(vct.vec[ind].iov);
1392 kfree(vct.vec);
1393
5c115590
AG
1394 return payload_len;
1395
1396out:
ea010070 1397 for (ind = 0; ind < vct.indx; ind++)
1398 kfree(vct.vec[ind].iov);
1399 kfree(vct.vec);
1400
5c115590
AG
1401 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1402 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1403 * or in any other way, we need to destroy the MR again */
1404 if (allocated_mr)
1405 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1406
1407 if (rm)
1408 rds_message_put(rm);
1409 return ret;
1410}
1411
1412/*
5916e2c1
SV
1413 * send out a probe. Can be shared by rds_send_ping,
1414 * rds_send_pong, rds_send_hb.
1415 * rds_send_hb should use h_flags
1416 * RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1417 * or
1418 * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
5c115590 1419 */
bb789763 1420static int
5916e2c1
SV
1421rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1422 __be16 dport, u8 h_flags)
5c115590
AG
1423{
1424 struct rds_message *rm;
1425 unsigned long flags;
1426 int ret = 0;
1427
1428 rm = rds_message_alloc(0, GFP_ATOMIC);
8690bfa1 1429 if (!rm) {
5c115590
AG
1430 ret = -ENOMEM;
1431 goto out;
1432 }
1433
45997e9e 1434 rm->m_daddr = cp->cp_conn->c_faddr;
acfcd4d4 1435 rm->data.op_active = 1;
5c115590 1436
3c0a5900 1437 rds_conn_path_connect_if_down(cp);
5c115590 1438
45997e9e 1439 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
5c115590
AG
1440 if (ret)
1441 goto out;
1442
45997e9e
SV
1443 spin_lock_irqsave(&cp->cp_lock, flags);
1444 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
5c115590
AG
1445 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1446 rds_message_addref(rm);
45997e9e
SV
1447 rm->m_inc.i_conn = cp->cp_conn;
1448 rm->m_inc.i_conn_path = cp;
5c115590 1449
5916e2c1 1450 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
45997e9e 1451 cp->cp_next_tx_seq);
5916e2c1 1452 rm->m_inc.i_hdr.h_flags |= h_flags;
45997e9e 1453 cp->cp_next_tx_seq++;
5916e2c1 1454
00354de5
SV
1455 if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1456 cp->cp_conn->c_trans->t_mp_capable) {
1457 u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1458 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
5916e2c1
SV
1459
1460 rds_message_add_extension(&rm->m_inc.i_hdr,
1461 RDS_EXTHDR_NPATHS, &npaths,
1462 sizeof(npaths));
905dd418
SV
1463 rds_message_add_extension(&rm->m_inc.i_hdr,
1464 RDS_EXTHDR_GEN_NUM,
00354de5 1465 &my_gen_num,
905dd418 1466 sizeof(u32));
5916e2c1 1467 }
45997e9e 1468 spin_unlock_irqrestore(&cp->cp_lock, flags);
5c115590
AG
1469
1470 rds_stats_inc(s_send_queued);
1471 rds_stats_inc(s_send_pong);
1472
7b4b0009 1473 /* schedule the send work on rds_wq */
3db6e0d1 1474 rcu_read_lock();
ebeeb1ad 1475 if (!rds_destroy_pending(cp->cp_conn))
3db6e0d1
SV
1476 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1477 rcu_read_unlock();
acfcd4d4 1478
5c115590
AG
1479 rds_message_put(rm);
1480 return 0;
1481
1482out:
1483 if (rm)
1484 rds_message_put(rm);
1485 return ret;
1486}
5916e2c1
SV
1487
1488int
1489rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1490{
1491 return rds_send_probe(cp, 0, dport, 0);
1492}
1493
69b92b5b
SV
1494void
1495rds_send_ping(struct rds_connection *conn, int cp_index)
5916e2c1
SV
1496{
1497 unsigned long flags;
69b92b5b 1498 struct rds_conn_path *cp = &conn->c_path[cp_index];
5916e2c1
SV
1499
1500 spin_lock_irqsave(&cp->cp_lock, flags);
1501 if (conn->c_ping_triggered) {
1502 spin_unlock_irqrestore(&cp->cp_lock, flags);
1503 return;
1504 }
1505 conn->c_ping_triggered = 1;
1506 spin_unlock_irqrestore(&cp->cp_lock, flags);
69b92b5b 1507 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
5916e2c1 1508}
69b92b5b 1509EXPORT_SYMBOL_GPL(rds_send_ping);