Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd_cb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4  *
5  * Copyright (c) 2011, 2012, Intel Corporation.
6  *
7  *   Author: Zach Brown <zab@zabbo.net>
8  *   Author: Peter J. Braam <braam@clusterfs.com>
9  *   Author: Phil Schwan <phil@clusterfs.com>
10  *   Author: Eric Barton <eric@bartonsoftware.com>
11  *
12  *   This file is part of Portals, http://www.sf.net/projects/sandiaportals/
13  *
14  *   Portals is free software; you can redistribute it and/or
15  *   modify it under the terms of version 2 of the GNU General Public
16  *   License as published by the Free Software Foundation.
17  *
18  *   Portals is distributed in the hope that it will be useful,
19  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
20  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  *   GNU General Public License for more details.
22  *
23  */
24
25 #include "socklnd.h"
26
27 struct ksock_tx *
28 ksocknal_alloc_tx(int type, int size)
29 {
30         struct ksock_tx *tx = NULL;
31
32         if (type == KSOCK_MSG_NOOP) {
33                 LASSERT(size == KSOCK_NOOP_TX_SIZE);
34
35                 /* searching for a noop tx in free list */
36                 spin_lock(&ksocknal_data.ksnd_tx_lock);
37
38                 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
39                         tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
40                                         struct ksock_tx, tx_list);
41                         LASSERT(tx->tx_desc_size == size);
42                         list_del(&tx->tx_list);
43                 }
44
45                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
46         }
47
48         if (!tx)
49                 LIBCFS_ALLOC(tx, size);
50
51         if (!tx)
52                 return NULL;
53
54         atomic_set(&tx->tx_refcount, 1);
55         tx->tx_zc_aborted = 0;
56         tx->tx_zc_capable = 0;
57         tx->tx_zc_checked = 0;
58         tx->tx_desc_size  = size;
59
60         atomic_inc(&ksocknal_data.ksnd_nactive_txs);
61
62         return tx;
63 }
64
65 struct ksock_tx *
66 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
67 {
68         struct ksock_tx *tx;
69
70         tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
71         if (!tx) {
72                 CERROR("Can't allocate noop tx desc\n");
73                 return NULL;
74         }
75
76         tx->tx_conn    = NULL;
77         tx->tx_lnetmsg = NULL;
78         tx->tx_kiov    = NULL;
79         tx->tx_nkiov   = 0;
80         tx->tx_iov     = tx->tx_frags.virt.iov;
81         tx->tx_niov    = 1;
82         tx->tx_nonblk  = nonblk;
83
84         tx->tx_msg.ksm_csum = 0;
85         tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
86         tx->tx_msg.ksm_zc_cookies[0] = 0;
87         tx->tx_msg.ksm_zc_cookies[1] = cookie;
88
89         return tx;
90 }
91
92 void
93 ksocknal_free_tx(struct ksock_tx *tx)
94 {
95         atomic_dec(&ksocknal_data.ksnd_nactive_txs);
96
97         if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
98                 /* it's a noop tx */
99                 spin_lock(&ksocknal_data.ksnd_tx_lock);
100
101                 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
102
103                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
104         } else {
105                 LIBCFS_FREE(tx, tx->tx_desc_size);
106         }
107 }
108
109 static int
110 ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
111 {
112         struct kvec *iov = tx->tx_iov;
113         int nob;
114         int rc;
115
116         LASSERT(tx->tx_niov > 0);
117
118         /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
119         rc = ksocknal_lib_send_iov(conn, tx);
120
121         if (rc <= 0)                        /* sent nothing? */
122                 return rc;
123
124         nob = rc;
125         LASSERT(nob <= tx->tx_resid);
126         tx->tx_resid -= nob;
127
128         /* "consume" iov */
129         do {
130                 LASSERT(tx->tx_niov > 0);
131
132                 if (nob < (int)iov->iov_len) {
133                         iov->iov_base = (void *)((char *)iov->iov_base + nob);
134                         iov->iov_len -= nob;
135                         return rc;
136                 }
137
138                 nob -= iov->iov_len;
139                 tx->tx_iov = ++iov;
140                 tx->tx_niov--;
141         } while (nob);
142
143         return rc;
144 }
145
146 static int
147 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
148 {
149         struct bio_vec *kiov = tx->tx_kiov;
150         int nob;
151         int rc;
152
153         LASSERT(!tx->tx_niov);
154         LASSERT(tx->tx_nkiov > 0);
155
156         /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
157         rc = ksocknal_lib_send_kiov(conn, tx);
158
159         if (rc <= 0)                        /* sent nothing? */
160                 return rc;
161
162         nob = rc;
163         LASSERT(nob <= tx->tx_resid);
164         tx->tx_resid -= nob;
165
166         /* "consume" kiov */
167         do {
168                 LASSERT(tx->tx_nkiov > 0);
169
170                 if (nob < (int)kiov->bv_len) {
171                         kiov->bv_offset += nob;
172                         kiov->bv_len -= nob;
173                         return rc;
174                 }
175
176                 nob -= (int)kiov->bv_len;
177                 tx->tx_kiov = ++kiov;
178                 tx->tx_nkiov--;
179         } while (nob);
180
181         return rc;
182 }
183
184 static int
185 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
186 {
187         int rc;
188         int bufnob;
189
190         if (ksocknal_data.ksnd_stall_tx) {
191                 set_current_state(TASK_UNINTERRUPTIBLE);
192                 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
193         }
194
195         LASSERT(tx->tx_resid);
196
197         rc = ksocknal_connsock_addref(conn);
198         if (rc) {
199                 LASSERT(conn->ksnc_closing);
200                 return -ESHUTDOWN;
201         }
202
203         do {
204                 if (ksocknal_data.ksnd_enomem_tx > 0) {
205                         /* testing... */
206                         ksocknal_data.ksnd_enomem_tx--;
207                         rc = -EAGAIN;
208                 } else if (tx->tx_niov) {
209                         rc = ksocknal_send_iov(conn, tx);
210                 } else {
211                         rc = ksocknal_send_kiov(conn, tx);
212                 }
213
214                 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
215                 if (rc > 0)                  /* sent something? */
216                         conn->ksnc_tx_bufnob += rc; /* account it */
217
218                 if (bufnob < conn->ksnc_tx_bufnob) {
219                         /*
220                          * allocated send buffer bytes < computed; infer
221                          * something got ACKed
222                          */
223                         conn->ksnc_tx_deadline =
224                                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
225                         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
226                         conn->ksnc_tx_bufnob = bufnob;
227                         mb();
228                 }
229
230                 if (rc <= 0) { /* Didn't write anything? */
231
232                         if (!rc) /* some stacks return 0 instead of -EAGAIN */
233                                 rc = -EAGAIN;
234
235                         /* Check if EAGAIN is due to memory pressure */
236                         if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
237                                 rc = -ENOMEM;
238
239                         break;
240                 }
241
242                 /* socket's wmem_queued now includes 'rc' bytes */
243                 atomic_sub(rc, &conn->ksnc_tx_nob);
244                 rc = 0;
245
246         } while (tx->tx_resid);
247
248         ksocknal_connsock_decref(conn);
249         return rc;
250 }
251
252 static int
253 ksocknal_recv_iter(struct ksock_conn *conn)
254 {
255         int nob;
256         int rc;
257
258         /*
259          * Never touch conn->ksnc_rx_to or change connection
260          * status inside ksocknal_lib_recv
261          */
262         rc = ksocknal_lib_recv(conn);
263
264         if (rc <= 0)
265                 return rc;
266
267         /* received something... */
268         nob = rc;
269
270         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
271         conn->ksnc_rx_deadline =
272                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
273         mb();                  /* order with setting rx_started */
274         conn->ksnc_rx_started = 1;
275
276         conn->ksnc_rx_nob_left -= nob;
277
278         iov_iter_advance(&conn->ksnc_rx_to, nob);
279         if (iov_iter_count(&conn->ksnc_rx_to))
280                 return -EAGAIN;
281
282         return 1;
283 }
284
285 static int
286 ksocknal_receive(struct ksock_conn *conn)
287 {
288         /*
289          * Return 1 on success, 0 on EOF, < 0 on error.
290          * Caller checks ksnc_rx_to to determine
291          * progress/completion.
292          */
293         int rc;
294
295         if (ksocknal_data.ksnd_stall_rx) {
296                 set_current_state(TASK_UNINTERRUPTIBLE);
297                 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
298         }
299
300         rc = ksocknal_connsock_addref(conn);
301         if (rc) {
302                 LASSERT(conn->ksnc_closing);
303                 return -ESHUTDOWN;
304         }
305
306         for (;;) {
307                 rc = ksocknal_recv_iter(conn);
308                 if (rc <= 0) {
309                         /* error/EOF or partial receive */
310                         if (rc == -EAGAIN) {
311                                 rc = 1;
312                         } else if (!rc && conn->ksnc_rx_started) {
313                                 /* EOF in the middle of a message */
314                                 rc = -EPROTO;
315                         }
316                         break;
317                 }
318
319                 /* Completed a fragment */
320
321                 if (!iov_iter_count(&conn->ksnc_rx_to)) {
322                         rc = 1;
323                         break;
324                 }
325         }
326
327         ksocknal_connsock_decref(conn);
328         return rc;
329 }
330
331 void
332 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx)
333 {
334         struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
335         int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
336
337         LASSERT(ni || tx->tx_conn);
338
339         if (tx->tx_conn)
340                 ksocknal_conn_decref(tx->tx_conn);
341
342         if (!ni && tx->tx_conn)
343                 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
344
345         ksocknal_free_tx(tx);
346         if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
347                 lnet_finalize(ni, lnetmsg, rc);
348 }
349
350 void
351 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
352 {
353         struct ksock_tx *tx;
354
355         while (!list_empty(txlist)) {
356                 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
357
358                 if (error && tx->tx_lnetmsg) {
359                         CNETERR("Deleting packet type %d len %d %s->%s\n",
360                                 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
361                                 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
362                                 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
363                                 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
364                 } else if (error) {
365                         CNETERR("Deleting noop packet\n");
366                 }
367
368                 list_del(&tx->tx_list);
369
370                 LASSERT(atomic_read(&tx->tx_refcount) == 1);
371                 ksocknal_tx_done(ni, tx);
372         }
373 }
374
375 static void
376 ksocknal_check_zc_req(struct ksock_tx *tx)
377 {
378         struct ksock_conn *conn = tx->tx_conn;
379         struct ksock_peer *peer = conn->ksnc_peer;
380
381         /*
382          * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
383          * to ksnp_zc_req_list if some fragment of this message should be sent
384          * zero-copy.  Our peer will send an ACK containing this cookie when
385          * she has received this message to tell us we can signal completion.
386          * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
387          * ksnp_zc_req_list.
388          */
389         LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
390         LASSERT(tx->tx_zc_capable);
391
392         tx->tx_zc_checked = 1;
393
394         if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
395             !conn->ksnc_zc_capable)
396                 return;
397
398         /*
399          * assign cookie and queue tx to pending list, it will be released when
400          * a matching ack is received. See ksocknal_handle_zcack()
401          */
402         ksocknal_tx_addref(tx);
403
404         spin_lock(&peer->ksnp_lock);
405
406         /* ZC_REQ is going to be pinned to the peer */
407         tx->tx_deadline =
408                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
409
410         LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
411
412         tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
413
414         if (!peer->ksnp_zc_next_cookie)
415                 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
416
417         list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
418
419         spin_unlock(&peer->ksnp_lock);
420 }
421
422 static void
423 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
424 {
425         struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
426
427         LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
428         LASSERT(tx->tx_zc_capable);
429
430         tx->tx_zc_checked = 0;
431
432         spin_lock(&peer->ksnp_lock);
433
434         if (!tx->tx_msg.ksm_zc_cookies[0]) {
435                 /* Not waiting for an ACK */
436                 spin_unlock(&peer->ksnp_lock);
437                 return;
438         }
439
440         tx->tx_msg.ksm_zc_cookies[0] = 0;
441         list_del(&tx->tx_zc_list);
442
443         spin_unlock(&peer->ksnp_lock);
444
445         ksocknal_tx_decref(tx);
446 }
447
448 static int
449 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
450 {
451         int rc;
452
453         if (tx->tx_zc_capable && !tx->tx_zc_checked)
454                 ksocknal_check_zc_req(tx);
455
456         rc = ksocknal_transmit(conn, tx);
457
458         CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
459
460         if (!tx->tx_resid) {
461                 /* Sent everything OK */
462                 LASSERT(!rc);
463
464                 return 0;
465         }
466
467         if (rc == -EAGAIN)
468                 return rc;
469
470         if (rc == -ENOMEM) {
471                 static int counter;
472
473                 counter++;   /* exponential backoff warnings */
474                 if ((counter & (-counter)) == counter)
475                         CWARN("%u ENOMEM tx %p\n", counter, conn);
476
477                 /* Queue on ksnd_enomem_conns for retry after a timeout */
478                 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
479
480                 /* enomem list takes over scheduler's ref... */
481                 LASSERT(conn->ksnc_tx_scheduled);
482                 list_add_tail(&conn->ksnc_tx_list,
483                               &ksocknal_data.ksnd_enomem_conns);
484                 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
485                                                    SOCKNAL_ENOMEM_RETRY),
486                                    ksocknal_data.ksnd_reaper_waketime))
487                         wake_up(&ksocknal_data.ksnd_reaper_waitq);
488
489                 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
490                 return rc;
491         }
492
493         /* Actual error */
494         LASSERT(rc < 0);
495
496         if (!conn->ksnc_closing) {
497                 switch (rc) {
498                 case -ECONNRESET:
499                         LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
500                                       &conn->ksnc_ipaddr);
501                         break;
502                 default:
503                         LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
504                                       &conn->ksnc_ipaddr, rc);
505                         break;
506                 }
507                 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
508                        conn, rc,
509                        libcfs_id2str(conn->ksnc_peer->ksnp_id),
510                        &conn->ksnc_ipaddr,
511                        conn->ksnc_port);
512         }
513
514         if (tx->tx_zc_checked)
515                 ksocknal_uncheck_zc_req(tx);
516
517         /* it's not an error if conn is being closed */
518         ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
519
520         return rc;
521 }
522
523 static void
524 ksocknal_launch_connection_locked(struct ksock_route *route)
525 {
526         /* called holding write lock on ksnd_global_lock */
527
528         LASSERT(!route->ksnr_scheduled);
529         LASSERT(!route->ksnr_connecting);
530         LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
531
532         route->ksnr_scheduled = 1;            /* scheduling conn for connd */
533         ksocknal_route_addref(route);      /* extra ref for connd */
534
535         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
536
537         list_add_tail(&route->ksnr_connd_list,
538                       &ksocknal_data.ksnd_connd_routes);
539         wake_up(&ksocknal_data.ksnd_connd_waitq);
540
541         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
542 }
543
544 void
545 ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
546 {
547         struct ksock_route *route;
548
549         /* called holding write lock on ksnd_global_lock */
550         for (;;) {
551                 /* launch any/all connections that need it */
552                 route = ksocknal_find_connectable_route_locked(peer);
553                 if (!route)
554                         return;
555
556                 ksocknal_launch_connection_locked(route);
557         }
558 }
559
560 struct ksock_conn *
561 ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
562                           int nonblk)
563 {
564         struct list_head *tmp;
565         struct ksock_conn *conn;
566         struct ksock_conn *typed = NULL;
567         struct ksock_conn *fallback = NULL;
568         int tnob = 0;
569         int fnob = 0;
570
571         list_for_each(tmp, &peer->ksnp_conns) {
572                 struct ksock_conn *c;
573                 int nob, rc;
574
575                 c = list_entry(tmp, struct ksock_conn, ksnc_list);
576                 nob = atomic_read(&c->ksnc_tx_nob) +
577                       c->ksnc_sock->sk->sk_wmem_queued;
578
579                 LASSERT(!c->ksnc_closing);
580                 LASSERT(c->ksnc_proto &&
581                         c->ksnc_proto->pro_match_tx);
582
583                 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
584
585                 switch (rc) {
586                 default:
587                         LBUG();
588                 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
589                         continue;
590
591                 case SOCKNAL_MATCH_YES: /* typed connection */
592                         if (!typed || tnob > nob ||
593                             (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
594                              cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
595                                 typed = c;
596                                 tnob  = nob;
597                         }
598                         break;
599
600                 case SOCKNAL_MATCH_MAY: /* fallback connection */
601                         if (!fallback || fnob > nob ||
602                             (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
603                              cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
604                                 fallback = c;
605                                 fnob = nob;
606                         }
607                         break;
608                 }
609         }
610
611         /* prefer the typed selection */
612         conn = (typed) ? typed : fallback;
613
614         if (conn)
615                 conn->ksnc_tx_last_post = cfs_time_current();
616
617         return conn;
618 }
619
620 void
621 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
622 {
623         conn->ksnc_proto->pro_pack(tx);
624
625         atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
626         ksocknal_conn_addref(conn); /* +1 ref for tx */
627         tx->tx_conn = conn;
628 }
629
630 void
631 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
632 {
633         struct ksock_sched *sched = conn->ksnc_scheduler;
634         struct ksock_msg *msg = &tx->tx_msg;
635         struct ksock_tx *ztx = NULL;
636         int bufnob = 0;
637
638         /*
639          * called holding global lock (read or irq-write) and caller may
640          * not have dropped this lock between finding conn and calling me,
641          * so we don't need the {get,put}connsock dance to deref
642          * ksnc_sock...
643          */
644         LASSERT(!conn->ksnc_closing);
645
646         CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
647                libcfs_id2str(conn->ksnc_peer->ksnp_id),
648                &conn->ksnc_ipaddr, conn->ksnc_port);
649
650         ksocknal_tx_prep(conn, tx);
651
652         /*
653          * Ensure the frags we've been given EXACTLY match the number of
654          * bytes we want to send.  Many TCP/IP stacks disregard any total
655          * size parameters passed to them and just look at the frags.
656          *
657          * We always expect at least 1 mapped fragment containing the
658          * complete ksocknal message header.
659          */
660         LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
661                 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
662                 (unsigned int)tx->tx_nob);
663         LASSERT(tx->tx_niov >= 1);
664         LASSERT(tx->tx_resid == tx->tx_nob);
665
666         CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
667                tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
668                                               KSOCK_MSG_NOOP,
669                tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
670
671         /*
672          * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
673          * but they're used inside spinlocks a lot.
674          */
675         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
676         spin_lock_bh(&sched->kss_lock);
677
678         if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
679                 /* First packet starts the timeout */
680                 conn->ksnc_tx_deadline =
681                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
682                 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
683                         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
684                 conn->ksnc_tx_bufnob = 0;
685                 mb(); /* order with adding to tx_queue */
686         }
687
688         if (msg->ksm_type == KSOCK_MSG_NOOP) {
689                 /*
690                  * The packet is noop ZC ACK, try to piggyback the ack_cookie
691                  * on a normal packet so I don't need to send it
692                  */
693                 LASSERT(msg->ksm_zc_cookies[1]);
694                 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
695
696                 /* ZC ACK piggybacked on ztx release tx later */
697                 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
698                         ztx = tx;
699         } else {
700                 /*
701                  * It's a normal packet - can it piggback a noop zc-ack that
702                  * has been queued already?
703                  */
704                 LASSERT(!msg->ksm_zc_cookies[1]);
705                 LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
706
707                 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
708                 /* ztx will be released later */
709         }
710
711         if (ztx) {
712                 atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
713                 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
714         }
715
716         if (conn->ksnc_tx_ready &&      /* able to send */
717             !conn->ksnc_tx_scheduled) { /* not scheduled to send */
718                 /* +1 ref for scheduler */
719                 ksocknal_conn_addref(conn);
720                 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
721                 conn->ksnc_tx_scheduled = 1;
722                 wake_up(&sched->kss_waitq);
723         }
724
725         spin_unlock_bh(&sched->kss_lock);
726 }
727
728 struct ksock_route *
729 ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
730 {
731         unsigned long now = cfs_time_current();
732         struct list_head *tmp;
733         struct ksock_route *route;
734
735         list_for_each(tmp, &peer->ksnp_routes) {
736                 route = list_entry(tmp, struct ksock_route, ksnr_list);
737
738                 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
739
740                 /* connections being established */
741                 if (route->ksnr_scheduled)
742                         continue;
743
744                 /* all route types connected ? */
745                 if (!(ksocknal_route_mask() & ~route->ksnr_connected))
746                         continue;
747
748                 if (!(!route->ksnr_retry_interval || /* first attempt */
749                       cfs_time_aftereq(now, route->ksnr_timeout))) {
750                         CDEBUG(D_NET,
751                                "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
752                                &route->ksnr_ipaddr,
753                                route->ksnr_connected,
754                                route->ksnr_retry_interval,
755                                cfs_duration_sec(route->ksnr_timeout - now));
756                         continue;
757                 }
758
759                 return route;
760         }
761
762         return NULL;
763 }
764
765 struct ksock_route *
766 ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
767 {
768         struct list_head *tmp;
769         struct ksock_route *route;
770
771         list_for_each(tmp, &peer->ksnp_routes) {
772                 route = list_entry(tmp, struct ksock_route, ksnr_list);
773
774                 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
775
776                 if (route->ksnr_scheduled)
777                         return route;
778         }
779
780         return NULL;
781 }
782
783 int
784 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
785                        struct lnet_process_id id)
786 {
787         struct ksock_peer *peer;
788         struct ksock_conn *conn;
789         rwlock_t *g_lock;
790         int retry;
791         int rc;
792
793         LASSERT(!tx->tx_conn);
794
795         g_lock = &ksocknal_data.ksnd_global_lock;
796
797         for (retry = 0;; retry = 1) {
798                 read_lock(g_lock);
799                 peer = ksocknal_find_peer_locked(ni, id);
800                 if (peer) {
801                         if (!ksocknal_find_connectable_route_locked(peer)) {
802                                 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
803                                 if (conn) {
804                                         /*
805                                          * I've got no routes that need to be
806                                          * connecting and I do have an actual
807                                          * connection...
808                                          */
809                                         ksocknal_queue_tx_locked(tx, conn);
810                                         read_unlock(g_lock);
811                                         return 0;
812                                 }
813                         }
814                 }
815
816                 /* I'll need a write lock... */
817                 read_unlock(g_lock);
818
819                 write_lock_bh(g_lock);
820
821                 peer = ksocknal_find_peer_locked(ni, id);
822                 if (peer)
823                         break;
824
825                 write_unlock_bh(g_lock);
826
827                 if (id.pid & LNET_PID_USERFLAG) {
828                         CERROR("Refusing to create a connection to userspace process %s\n",
829                                libcfs_id2str(id));
830                         return -EHOSTUNREACH;
831                 }
832
833                 if (retry) {
834                         CERROR("Can't find peer %s\n", libcfs_id2str(id));
835                         return -EHOSTUNREACH;
836                 }
837
838                 rc = ksocknal_add_peer(ni, id,
839                                        LNET_NIDADDR(id.nid),
840                                        lnet_acceptor_port());
841                 if (rc) {
842                         CERROR("Can't add peer %s: %d\n",
843                                libcfs_id2str(id), rc);
844                         return rc;
845                 }
846         }
847
848         ksocknal_launch_all_connections_locked(peer);
849
850         conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
851         if (conn) {
852                 /* Connection exists; queue message on it */
853                 ksocknal_queue_tx_locked(tx, conn);
854                 write_unlock_bh(g_lock);
855                 return 0;
856         }
857
858         if (peer->ksnp_accepting > 0 ||
859             ksocknal_find_connecting_route_locked(peer)) {
860                 /* the message is going to be pinned to the peer */
861                 tx->tx_deadline =
862                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
863
864                 /* Queue the message until a connection is established */
865                 list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
866                 write_unlock_bh(g_lock);
867                 return 0;
868         }
869
870         write_unlock_bh(g_lock);
871
872         /* NB Routes may be ignored if connections to them failed recently */
873         CNETERR("No usable routes to %s\n", libcfs_id2str(id));
874         return -EHOSTUNREACH;
875 }
876
877 int
878 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
879 {
880         int mpflag = 1;
881         int type = lntmsg->msg_type;
882         struct lnet_process_id target = lntmsg->msg_target;
883         unsigned int payload_niov = lntmsg->msg_niov;
884         struct kvec *payload_iov = lntmsg->msg_iov;
885         struct bio_vec *payload_kiov = lntmsg->msg_kiov;
886         unsigned int payload_offset = lntmsg->msg_offset;
887         unsigned int payload_nob = lntmsg->msg_len;
888         struct ksock_tx *tx;
889         int desc_size;
890         int rc;
891
892         /*
893          * NB 'private' is different depending on what we're sending.
894          * Just ignore it...
895          */
896         CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
897                payload_nob, payload_niov, libcfs_id2str(target));
898
899         LASSERT(!payload_nob || payload_niov > 0);
900         LASSERT(payload_niov <= LNET_MAX_IOV);
901         /* payload is either all vaddrs or all pages */
902         LASSERT(!(payload_kiov && payload_iov));
903         LASSERT(!in_interrupt());
904
905         if (payload_iov)
906                 desc_size = offsetof(struct ksock_tx,
907                                      tx_frags.virt.iov[1 + payload_niov]);
908         else
909                 desc_size = offsetof(struct ksock_tx,
910                                      tx_frags.paged.kiov[payload_niov]);
911
912         if (lntmsg->msg_vmflush)
913                 mpflag = cfs_memory_pressure_get_and_set();
914         tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
915         if (!tx) {
916                 CERROR("Can't allocate tx desc type %d size %d\n",
917                        type, desc_size);
918                 if (lntmsg->msg_vmflush)
919                         cfs_memory_pressure_restore(mpflag);
920                 return -ENOMEM;
921         }
922
923         tx->tx_conn = NULL;                  /* set when assigned a conn */
924         tx->tx_lnetmsg = lntmsg;
925
926         if (payload_iov) {
927                 tx->tx_kiov = NULL;
928                 tx->tx_nkiov = 0;
929                 tx->tx_iov = tx->tx_frags.virt.iov;
930                 tx->tx_niov = 1 +
931                               lnet_extract_iov(payload_niov, &tx->tx_iov[1],
932                                                payload_niov, payload_iov,
933                                                payload_offset, payload_nob);
934         } else {
935                 tx->tx_niov = 1;
936                 tx->tx_iov = &tx->tx_frags.paged.iov;
937                 tx->tx_kiov = tx->tx_frags.paged.kiov;
938                 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
939                                                  payload_niov, payload_kiov,
940                                                  payload_offset, payload_nob);
941
942                 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
943                         tx->tx_zc_capable = 1;
944         }
945
946         tx->tx_msg.ksm_csum = 0;
947         tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
948         tx->tx_msg.ksm_zc_cookies[0] = 0;
949         tx->tx_msg.ksm_zc_cookies[1] = 0;
950
951         /* The first fragment will be set later in pro_pack */
952         rc = ksocknal_launch_packet(ni, tx, target);
953         if (!mpflag)
954                 cfs_memory_pressure_restore(mpflag);
955
956         if (!rc)
957                 return 0;
958
959         ksocknal_free_tx(tx);
960         return -EIO;
961 }
962
963 int
964 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
965 {
966         struct task_struct *task = kthread_run(fn, arg, "%s", name);
967
968         if (IS_ERR(task))
969                 return PTR_ERR(task);
970
971         write_lock_bh(&ksocknal_data.ksnd_global_lock);
972         ksocknal_data.ksnd_nthreads++;
973         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
974         return 0;
975 }
976
977 void
978 ksocknal_thread_fini(void)
979 {
980         write_lock_bh(&ksocknal_data.ksnd_global_lock);
981         ksocknal_data.ksnd_nthreads--;
982         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
983 }
984
985 int
986 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
987 {
988         static char ksocknal_slop_buffer[4096];
989         struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
990
991         int nob;
992         unsigned int niov;
993         int skipped;
994
995         LASSERT(conn->ksnc_proto);
996
997         if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
998                 /* Remind the socket to ack eagerly... */
999                 ksocknal_lib_eager_ack(conn);
1000         }
1001
1002         if (!nob_to_skip) {      /* right at next packet boundary now */
1003                 conn->ksnc_rx_started = 0;
1004                 mb();                  /* racing with timeout thread */
1005
1006                 switch (conn->ksnc_proto->pro_version) {
1007                 case  KSOCK_PROTO_V2:
1008                 case  KSOCK_PROTO_V3:
1009                         conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1010                         kvec->iov_base = &conn->ksnc_msg;
1011                         kvec->iov_len = offsetof(struct ksock_msg, ksm_u);
1012                         conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1013                         iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
1014                                         1, offsetof(struct ksock_msg, ksm_u));
1015                         break;
1016
1017                 case KSOCK_PROTO_V1:
1018                         /* Receiving bare struct lnet_hdr */
1019                         conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1020                         kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1021                         kvec->iov_len = sizeof(struct lnet_hdr);
1022                         conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1023                         iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
1024                                         1, sizeof(struct lnet_hdr));
1025                         break;
1026
1027                 default:
1028                         LBUG();
1029                 }
1030                 conn->ksnc_rx_csum = ~0;
1031                 return 1;
1032         }
1033
1034         /*
1035          * Set up to skip as much as possible now.  If there's more left
1036          * (ran out of iov entries) we'll get called again
1037          */
1038         conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1039         conn->ksnc_rx_nob_left = nob_to_skip;
1040         skipped = 0;
1041         niov = 0;
1042
1043         do {
1044                 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1045
1046                 kvec[niov].iov_base = ksocknal_slop_buffer;
1047                 kvec[niov].iov_len  = nob;
1048                 niov++;
1049                 skipped += nob;
1050                 nob_to_skip -= nob;
1051
1052         } while (nob_to_skip &&    /* mustn't overflow conn's rx iov */
1053                  niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
1054
1055         iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped);
1056         return 0;
1057 }
1058
1059 static int
1060 ksocknal_process_receive(struct ksock_conn *conn)
1061 {
1062         struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
1063         struct lnet_hdr *lhdr;
1064         struct lnet_process_id *id;
1065         int rc;
1066
1067         LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
1068
1069         /* NB: sched lock NOT held */
1070         /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1071         LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1072                 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1073                 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1074                 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1075  again:
1076         if (iov_iter_count(&conn->ksnc_rx_to)) {
1077                 rc = ksocknal_receive(conn);
1078
1079                 if (rc <= 0) {
1080                         LASSERT(rc != -EAGAIN);
1081
1082                         if (!rc)
1083                                 CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
1084                                        conn,
1085                                        libcfs_id2str(conn->ksnc_peer->ksnp_id),
1086                                        &conn->ksnc_ipaddr,
1087                                        conn->ksnc_port);
1088                         else if (!conn->ksnc_closing)
1089                                 CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
1090                                        conn, rc,
1091                                        libcfs_id2str(conn->ksnc_peer->ksnp_id),
1092                                        &conn->ksnc_ipaddr,
1093                                        conn->ksnc_port);
1094
1095                         /* it's not an error if conn is being closed */
1096                         ksocknal_close_conn_and_siblings(conn,
1097                                                          (conn->ksnc_closing) ? 0 : rc);
1098                         return (!rc ? -ESHUTDOWN : rc);
1099                 }
1100
1101                 if (iov_iter_count(&conn->ksnc_rx_to)) {
1102                         /* short read */
1103                         return -EAGAIN;
1104                 }
1105         }
1106         switch (conn->ksnc_rx_state) {
1107         case SOCKNAL_RX_KSM_HEADER:
1108                 if (conn->ksnc_flip) {
1109                         __swab32s(&conn->ksnc_msg.ksm_type);
1110                         __swab32s(&conn->ksnc_msg.ksm_csum);
1111                         __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1112                         __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1113                 }
1114
1115                 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1116                     conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1117                         CERROR("%s: Unknown message type: %x\n",
1118                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1119                                conn->ksnc_msg.ksm_type);
1120                         ksocknal_new_packet(conn, 0);
1121                         ksocknal_close_conn_and_siblings(conn, -EPROTO);
1122                         return -EPROTO;
1123                 }
1124
1125                 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1126                     conn->ksnc_msg.ksm_csum &&     /* has checksum */
1127                     conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1128                         /* NOOP Checksum error */
1129                         CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1130                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1131                                conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1132                         ksocknal_new_packet(conn, 0);
1133                         ksocknal_close_conn_and_siblings(conn, -EPROTO);
1134                         return -EIO;
1135                 }
1136
1137                 if (conn->ksnc_msg.ksm_zc_cookies[1]) {
1138                         __u64 cookie = 0;
1139
1140                         LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1141
1142                         if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1143                                 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1144
1145                         rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1146                                                conn->ksnc_msg.ksm_zc_cookies[1]);
1147
1148                         if (rc) {
1149                                 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1150                                        libcfs_id2str(conn->ksnc_peer->ksnp_id),
1151                                        cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1152                                 ksocknal_new_packet(conn, 0);
1153                                 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1154                                 return rc;
1155                         }
1156                 }
1157
1158                 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1159                         ksocknal_new_packet(conn, 0);
1160                         return 0;       /* NOOP is done and just return */
1161                 }
1162
1163                 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1164                 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1165
1166                 kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1167                 kvec->iov_len = sizeof(struct ksock_lnet_msg);
1168
1169                 iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
1170                                 1, sizeof(struct ksock_lnet_msg));
1171
1172                 goto again;     /* read lnet header now */
1173
1174         case SOCKNAL_RX_LNET_HEADER:
1175                 /* unpack message header */
1176                 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1177
1178                 if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
1179                         /* Userspace peer */
1180                         lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1181                         id = &conn->ksnc_peer->ksnp_id;
1182
1183                         /* Substitute process ID assigned at connection time */
1184                         lhdr->src_pid = cpu_to_le32(id->pid);
1185                         lhdr->src_nid = cpu_to_le64(id->nid);
1186                 }
1187
1188                 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1189                 ksocknal_conn_addref(conn);     /* ++ref while parsing */
1190
1191                 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1192                                 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1193                                 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1194                 if (rc < 0) {
1195                         /* I just received garbage: give up on this conn */
1196                         ksocknal_new_packet(conn, 0);
1197                         ksocknal_close_conn_and_siblings(conn, rc);
1198                         ksocknal_conn_decref(conn);
1199                         return -EPROTO;
1200                 }
1201
1202                 /* I'm racing with ksocknal_recv() */
1203                 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1204                         conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1205
1206                 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1207                         return 0;
1208
1209                 /* ksocknal_recv() got called */
1210                 goto again;
1211
1212         case SOCKNAL_RX_LNET_PAYLOAD:
1213                 /* payload all received */
1214                 rc = 0;
1215
1216                 if (!conn->ksnc_rx_nob_left &&   /* not truncating */
1217                     conn->ksnc_msg.ksm_csum &&  /* has checksum */
1218                     conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1219                         CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1220                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1221                                conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1222                         rc = -EIO;
1223                 }
1224
1225                 if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
1226                         LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1227
1228                         lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1229                         id = &conn->ksnc_peer->ksnp_id;
1230
1231                         rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1232                                         conn->ksnc_msg.ksm_zc_cookies[0],
1233                                         *ksocknal_tunables.ksnd_nonblk_zcack ||
1234                                         le64_to_cpu(lhdr->src_nid) != id->nid);
1235                 }
1236
1237                 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1238
1239                 if (rc) {
1240                         ksocknal_new_packet(conn, 0);
1241                         ksocknal_close_conn_and_siblings(conn, rc);
1242                         return -EPROTO;
1243                 }
1244                 /* Fall through */
1245
1246         case SOCKNAL_RX_SLOP:
1247                 /* starting new packet? */
1248                 if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
1249                         return 0;       /* come back later */
1250                 goto again;          /* try to finish reading slop now */
1251
1252         default:
1253                 break;
1254         }
1255
1256         /* Not Reached */
1257         LBUG();
1258         return -EINVAL;                /* keep gcc happy */
1259 }
1260
1261 int
1262 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1263               int delayed, struct iov_iter *to, unsigned int rlen)
1264 {
1265         struct ksock_conn *conn = private;
1266         struct ksock_sched *sched = conn->ksnc_scheduler;
1267
1268         LASSERT(iov_iter_count(to) <= rlen);
1269         LASSERT(to->nr_segs <= LNET_MAX_IOV);
1270
1271         conn->ksnc_cookie = msg;
1272         conn->ksnc_rx_nob_left = rlen;
1273
1274         conn->ksnc_rx_to = *to;
1275
1276         LASSERT(conn->ksnc_rx_scheduled);
1277
1278         spin_lock_bh(&sched->kss_lock);
1279
1280         switch (conn->ksnc_rx_state) {
1281         case SOCKNAL_RX_PARSE_WAIT:
1282                 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1283                 wake_up(&sched->kss_waitq);
1284                 LASSERT(conn->ksnc_rx_ready);
1285                 break;
1286
1287         case SOCKNAL_RX_PARSE:
1288                 /* scheduler hasn't noticed I'm parsing yet */
1289                 break;
1290         }
1291
1292         conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1293
1294         spin_unlock_bh(&sched->kss_lock);
1295         ksocknal_conn_decref(conn);
1296         return 0;
1297 }
1298
1299 static inline int
1300 ksocknal_sched_cansleep(struct ksock_sched *sched)
1301 {
1302         int rc;
1303
1304         spin_lock_bh(&sched->kss_lock);
1305
1306         rc = !ksocknal_data.ksnd_shuttingdown &&
1307               list_empty(&sched->kss_rx_conns) &&
1308               list_empty(&sched->kss_tx_conns);
1309
1310         spin_unlock_bh(&sched->kss_lock);
1311         return rc;
1312 }
1313
1314 int ksocknal_scheduler(void *arg)
1315 {
1316         struct ksock_sched_info *info;
1317         struct ksock_sched *sched;
1318         struct ksock_conn *conn;
1319         struct ksock_tx *tx;
1320         int rc;
1321         int nloops = 0;
1322         long id = (long)arg;
1323
1324         info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1325         sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1326
1327         cfs_block_allsigs();
1328
1329         rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1330         if (rc) {
1331                 CWARN("Can't set CPU partition affinity to %d: %d\n",
1332                       info->ksi_cpt, rc);
1333         }
1334
1335         spin_lock_bh(&sched->kss_lock);
1336
1337         while (!ksocknal_data.ksnd_shuttingdown) {
1338                 int did_something = 0;
1339
1340                 /* Ensure I progress everything semi-fairly */
1341
1342                 if (!list_empty(&sched->kss_rx_conns)) {
1343                         conn = list_entry(sched->kss_rx_conns.next,
1344                                           struct ksock_conn, ksnc_rx_list);
1345                         list_del(&conn->ksnc_rx_list);
1346
1347                         LASSERT(conn->ksnc_rx_scheduled);
1348                         LASSERT(conn->ksnc_rx_ready);
1349
1350                         /*
1351                          * clear rx_ready in case receive isn't complete.
1352                          * Do it BEFORE we call process_recv, since
1353                          * data_ready can set it any time after we release
1354                          * kss_lock.
1355                          */
1356                         conn->ksnc_rx_ready = 0;
1357                         spin_unlock_bh(&sched->kss_lock);
1358
1359                         rc = ksocknal_process_receive(conn);
1360
1361                         spin_lock_bh(&sched->kss_lock);
1362
1363                         /* I'm the only one that can clear this flag */
1364                         LASSERT(conn->ksnc_rx_scheduled);
1365
1366                         /* Did process_receive get everything it wanted? */
1367                         if (!rc)
1368                                 conn->ksnc_rx_ready = 1;
1369
1370                         if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1371                                 /*
1372                                  * Conn blocked waiting for ksocknal_recv()
1373                                  * I change its state (under lock) to signal
1374                                  * it can be rescheduled
1375                                  */
1376                                 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1377                         } else if (conn->ksnc_rx_ready) {
1378                                 /* reschedule for rx */
1379                                 list_add_tail(&conn->ksnc_rx_list,
1380                                               &sched->kss_rx_conns);
1381                         } else {
1382                                 conn->ksnc_rx_scheduled = 0;
1383                                 /* drop my ref */
1384                                 ksocknal_conn_decref(conn);
1385                         }
1386
1387                         did_something = 1;
1388                 }
1389
1390                 if (!list_empty(&sched->kss_tx_conns)) {
1391                         LIST_HEAD(zlist);
1392
1393                         if (!list_empty(&sched->kss_zombie_noop_txs)) {
1394                                 list_add(&zlist, &sched->kss_zombie_noop_txs);
1395                                 list_del_init(&sched->kss_zombie_noop_txs);
1396                         }
1397
1398                         conn = list_entry(sched->kss_tx_conns.next,
1399                                           struct ksock_conn, ksnc_tx_list);
1400                         list_del(&conn->ksnc_tx_list);
1401
1402                         LASSERT(conn->ksnc_tx_scheduled);
1403                         LASSERT(conn->ksnc_tx_ready);
1404                         LASSERT(!list_empty(&conn->ksnc_tx_queue));
1405
1406                         tx = list_entry(conn->ksnc_tx_queue.next,
1407                                         struct ksock_tx, tx_list);
1408
1409                         if (conn->ksnc_tx_carrier == tx)
1410                                 ksocknal_next_tx_carrier(conn);
1411
1412                         /* dequeue now so empty list => more to send */
1413                         list_del(&tx->tx_list);
1414
1415                         /*
1416                          * Clear tx_ready in case send isn't complete.  Do
1417                          * it BEFORE we call process_transmit, since
1418                          * write_space can set it any time after we release
1419                          * kss_lock.
1420                          */
1421                         conn->ksnc_tx_ready = 0;
1422                         spin_unlock_bh(&sched->kss_lock);
1423
1424                         if (!list_empty(&zlist)) {
1425                                 /*
1426                                  * free zombie noop txs, it's fast because
1427                                  * noop txs are just put in freelist
1428                                  */
1429                                 ksocknal_txlist_done(NULL, &zlist, 0);
1430                         }
1431
1432                         rc = ksocknal_process_transmit(conn, tx);
1433
1434                         if (rc == -ENOMEM || rc == -EAGAIN) {
1435                                 /*
1436                                  * Incomplete send: replace tx on HEAD of
1437                                  * tx_queue
1438                                  */
1439                                 spin_lock_bh(&sched->kss_lock);
1440                                 list_add(&tx->tx_list, &conn->ksnc_tx_queue);
1441                         } else {
1442                                 /* Complete send; tx -ref */
1443                                 ksocknal_tx_decref(tx);
1444
1445                                 spin_lock_bh(&sched->kss_lock);
1446                                 /* assume space for more */
1447                                 conn->ksnc_tx_ready = 1;
1448                         }
1449
1450                         if (rc == -ENOMEM) {
1451                                 /*
1452                                  * Do nothing; after a short timeout, this
1453                                  * conn will be reposted on kss_tx_conns.
1454                                  */
1455                         } else if (conn->ksnc_tx_ready &&
1456                                    !list_empty(&conn->ksnc_tx_queue)) {
1457                                 /* reschedule for tx */
1458                                 list_add_tail(&conn->ksnc_tx_list,
1459                                               &sched->kss_tx_conns);
1460                         } else {
1461                                 conn->ksnc_tx_scheduled = 0;
1462                                 /* drop my ref */
1463                                 ksocknal_conn_decref(conn);
1464                         }
1465
1466                         did_something = 1;
1467                 }
1468                 if (!did_something ||      /* nothing to do */
1469                     ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1470                         spin_unlock_bh(&sched->kss_lock);
1471
1472                         nloops = 0;
1473
1474                         if (!did_something) {   /* wait for something to do */
1475                                 rc = wait_event_interruptible_exclusive(
1476                                         sched->kss_waitq,
1477                                         !ksocknal_sched_cansleep(sched));
1478                                 LASSERT(!rc);
1479                         } else {
1480                                 cond_resched();
1481                         }
1482
1483                         spin_lock_bh(&sched->kss_lock);
1484                 }
1485         }
1486
1487         spin_unlock_bh(&sched->kss_lock);
1488         ksocknal_thread_fini();
1489         return 0;
1490 }
1491
1492 /*
1493  * Add connection to kss_rx_conns of scheduler
1494  * and wakeup the scheduler.
1495  */
1496 void ksocknal_read_callback(struct ksock_conn *conn)
1497 {
1498         struct ksock_sched *sched;
1499
1500         sched = conn->ksnc_scheduler;
1501
1502         spin_lock_bh(&sched->kss_lock);
1503
1504         conn->ksnc_rx_ready = 1;
1505
1506         if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
1507                 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1508                 conn->ksnc_rx_scheduled = 1;
1509                 /* extra ref for scheduler */
1510                 ksocknal_conn_addref(conn);
1511
1512                 wake_up(&sched->kss_waitq);
1513         }
1514         spin_unlock_bh(&sched->kss_lock);
1515 }
1516
1517 /*
1518  * Add connection to kss_tx_conns of scheduler
1519  * and wakeup the scheduler.
1520  */
1521 void ksocknal_write_callback(struct ksock_conn *conn)
1522 {
1523         struct ksock_sched *sched;
1524
1525         sched = conn->ksnc_scheduler;
1526
1527         spin_lock_bh(&sched->kss_lock);
1528
1529         conn->ksnc_tx_ready = 1;
1530
1531         if (!conn->ksnc_tx_scheduled && /* not being progressed */
1532             !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1533                 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1534                 conn->ksnc_tx_scheduled = 1;
1535                 /* extra ref for scheduler */
1536                 ksocknal_conn_addref(conn);
1537
1538                 wake_up(&sched->kss_waitq);
1539         }
1540
1541         spin_unlock_bh(&sched->kss_lock);
1542 }
1543
1544 static struct ksock_proto *
1545 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1546 {
1547         __u32 version = 0;
1548
1549         if (hello->kshm_magic == LNET_PROTO_MAGIC)
1550                 version = hello->kshm_version;
1551         else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1552                 version = __swab32(hello->kshm_version);
1553
1554         if (version) {
1555 #if SOCKNAL_VERSION_DEBUG
1556                 if (*ksocknal_tunables.ksnd_protocol == 1)
1557                         return NULL;
1558
1559                 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1560                     version == KSOCK_PROTO_V3)
1561                         return NULL;
1562 #endif
1563                 if (version == KSOCK_PROTO_V2)
1564                         return &ksocknal_protocol_v2x;
1565
1566                 if (version == KSOCK_PROTO_V3)
1567                         return &ksocknal_protocol_v3x;
1568
1569                 return NULL;
1570         }
1571
1572         if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1573                 struct lnet_magicversion *hmv = (struct lnet_magicversion *)hello;
1574
1575                 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1576                              offsetof(struct ksock_hello_msg, kshm_src_nid));
1577
1578                 if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
1579                     hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
1580                         return &ksocknal_protocol_v1x;
1581         }
1582
1583         return NULL;
1584 }
1585
1586 int
1587 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1588                     lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1589 {
1590         /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1591         struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1592
1593         LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
1594
1595         /* rely on caller to hold a ref on socket so it wouldn't disappear */
1596         LASSERT(conn->ksnc_proto);
1597
1598         hello->kshm_src_nid = ni->ni_nid;
1599         hello->kshm_dst_nid = peer_nid;
1600         hello->kshm_src_pid = the_lnet.ln_pid;
1601
1602         hello->kshm_src_incarnation = net->ksnn_incarnation;
1603         hello->kshm_ctype = conn->ksnc_type;
1604
1605         return conn->ksnc_proto->pro_send_hello(conn, hello);
1606 }
1607
1608 static int
1609 ksocknal_invert_type(int type)
1610 {
1611         switch (type) {
1612         case SOCKLND_CONN_ANY:
1613         case SOCKLND_CONN_CONTROL:
1614                 return type;
1615         case SOCKLND_CONN_BULK_IN:
1616                 return SOCKLND_CONN_BULK_OUT;
1617         case SOCKLND_CONN_BULK_OUT:
1618                 return SOCKLND_CONN_BULK_IN;
1619         default:
1620                 return SOCKLND_CONN_NONE;
1621         }
1622 }
1623
1624 int
1625 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1626                     struct ksock_hello_msg *hello,
1627                     struct lnet_process_id *peerid,
1628                     __u64 *incarnation)
1629 {
1630         /* Return < 0   fatal error
1631          *      0         success
1632          *      EALREADY   lost connection race
1633          *      EPROTO     protocol version mismatch
1634          */
1635         struct socket *sock = conn->ksnc_sock;
1636         int active = !!conn->ksnc_proto;
1637         int timeout;
1638         int proto_match;
1639         int rc;
1640         struct ksock_proto *proto;
1641         struct lnet_process_id recv_id;
1642
1643         /* socket type set on active connections - not set on passive */
1644         LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1645
1646         timeout = active ? *ksocknal_tunables.ksnd_timeout :
1647                             lnet_acceptor_timeout();
1648
1649         rc = lnet_sock_read(sock, &hello->kshm_magic,
1650                             sizeof(hello->kshm_magic), timeout);
1651         if (rc) {
1652                 CERROR("Error %d reading HELLO from %pI4h\n",
1653                        rc, &conn->ksnc_ipaddr);
1654                 LASSERT(rc < 0);
1655                 return rc;
1656         }
1657
1658         if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1659             hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1660             hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1661                 /* Unexpected magic! */
1662                 CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
1663                        __cpu_to_le32(hello->kshm_magic),
1664                        LNET_PROTO_TCP_MAGIC,
1665                        &conn->ksnc_ipaddr);
1666                 return -EPROTO;
1667         }
1668
1669         rc = lnet_sock_read(sock, &hello->kshm_version,
1670                             sizeof(hello->kshm_version), timeout);
1671         if (rc) {
1672                 CERROR("Error %d reading HELLO from %pI4h\n",
1673                        rc, &conn->ksnc_ipaddr);
1674                 LASSERT(rc < 0);
1675                 return rc;
1676         }
1677
1678         proto = ksocknal_parse_proto_version(hello);
1679         if (!proto) {
1680                 if (!active) {
1681                         /* unknown protocol from peer, tell peer my protocol */
1682                         conn->ksnc_proto = &ksocknal_protocol_v3x;
1683 #if SOCKNAL_VERSION_DEBUG
1684                         if (*ksocknal_tunables.ksnd_protocol == 2)
1685                                 conn->ksnc_proto = &ksocknal_protocol_v2x;
1686                         else if (*ksocknal_tunables.ksnd_protocol == 1)
1687                                 conn->ksnc_proto = &ksocknal_protocol_v1x;
1688 #endif
1689                         hello->kshm_nips = 0;
1690                         ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1691                 }
1692
1693                 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1694                        conn->ksnc_proto->pro_version,
1695                        &conn->ksnc_ipaddr);
1696
1697                 return -EPROTO;
1698         }
1699
1700         proto_match = (conn->ksnc_proto == proto);
1701         conn->ksnc_proto = proto;
1702
1703         /* receive the rest of hello message anyway */
1704         rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1705         if (rc) {
1706                 CERROR("Error %d reading or checking hello from from %pI4h\n",
1707                        rc, &conn->ksnc_ipaddr);
1708                 LASSERT(rc < 0);
1709                 return rc;
1710         }
1711
1712         *incarnation = hello->kshm_src_incarnation;
1713
1714         if (hello->kshm_src_nid == LNET_NID_ANY) {
1715                 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1716                        &conn->ksnc_ipaddr);
1717                 return -EPROTO;
1718         }
1719
1720         if (!active &&
1721             conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1722                 /* Userspace NAL assigns peer process ID from socket */
1723                 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1724                 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
1725                                          conn->ksnc_ipaddr);
1726         } else {
1727                 recv_id.nid = hello->kshm_src_nid;
1728                 recv_id.pid = hello->kshm_src_pid;
1729         }
1730
1731         if (!active) {
1732                 *peerid = recv_id;
1733
1734                 /* peer determines type */
1735                 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1736                 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1737                         CERROR("Unexpected type %d from %s ip %pI4h\n",
1738                                hello->kshm_ctype, libcfs_id2str(*peerid),
1739                                &conn->ksnc_ipaddr);
1740                         return -EPROTO;
1741                 }
1742
1743                 return 0;
1744         }
1745
1746         if (peerid->pid != recv_id.pid ||
1747             peerid->nid != recv_id.nid) {
1748                 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
1749                                    libcfs_id2str(*peerid),
1750                                    &conn->ksnc_ipaddr,
1751                                    libcfs_id2str(recv_id));
1752                 return -EPROTO;
1753         }
1754
1755         if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1756                 /* Possible protocol mismatch or I lost the connection race */
1757                 return proto_match ? EALREADY : EPROTO;
1758         }
1759
1760         if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1761                 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1762                        conn->ksnc_type, libcfs_id2str(*peerid),
1763                        &conn->ksnc_ipaddr, hello->kshm_ctype);
1764                 return -EPROTO;
1765         }
1766
1767         return 0;
1768 }
1769
1770 static int
1771 ksocknal_connect(struct ksock_route *route)
1772 {
1773         LIST_HEAD(zombies);
1774         struct ksock_peer *peer = route->ksnr_peer;
1775         int type;
1776         int wanted;
1777         struct socket *sock;
1778         unsigned long deadline;
1779         int retry_later = 0;
1780         int rc = 0;
1781
1782         deadline = cfs_time_add(cfs_time_current(),
1783                                 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1784
1785         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1786
1787         LASSERT(route->ksnr_scheduled);
1788         LASSERT(!route->ksnr_connecting);
1789
1790         route->ksnr_connecting = 1;
1791
1792         for (;;) {
1793                 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1794
1795                 /*
1796                  * stop connecting if peer/route got closed under me, or
1797                  * route got connected while queued
1798                  */
1799                 if (peer->ksnp_closing || route->ksnr_deleted ||
1800                     !wanted) {
1801                         retry_later = 0;
1802                         break;
1803                 }
1804
1805                 /* reschedule if peer is connecting to me */
1806                 if (peer->ksnp_accepting > 0) {
1807                         CDEBUG(D_NET,
1808                                "peer %s(%d) already connecting to me, retry later.\n",
1809                                libcfs_nid2str(peer->ksnp_id.nid),
1810                                peer->ksnp_accepting);
1811                         retry_later = 1;
1812                 }
1813
1814                 if (retry_later) /* needs reschedule */
1815                         break;
1816
1817                 if (wanted & BIT(SOCKLND_CONN_ANY)) {
1818                         type = SOCKLND_CONN_ANY;
1819                 } else if (wanted & BIT(SOCKLND_CONN_CONTROL)) {
1820                         type = SOCKLND_CONN_CONTROL;
1821                 } else if (wanted & BIT(SOCKLND_CONN_BULK_IN)) {
1822                         type = SOCKLND_CONN_BULK_IN;
1823                 } else {
1824                         LASSERT(wanted & BIT(SOCKLND_CONN_BULK_OUT));
1825                         type = SOCKLND_CONN_BULK_OUT;
1826                 }
1827
1828                 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1829
1830                 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1831                         rc = -ETIMEDOUT;
1832                         lnet_connect_console_error(rc, peer->ksnp_id.nid,
1833                                                    route->ksnr_ipaddr,
1834                                                    route->ksnr_port);
1835                         goto failed;
1836                 }
1837
1838                 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1839                                   route->ksnr_myipaddr,
1840                                   route->ksnr_ipaddr, route->ksnr_port);
1841                 if (rc)
1842                         goto failed;
1843
1844                 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1845                 if (rc < 0) {
1846                         lnet_connect_console_error(rc, peer->ksnp_id.nid,
1847                                                    route->ksnr_ipaddr,
1848                                                    route->ksnr_port);
1849                         goto failed;
1850                 }
1851
1852                 /*
1853                  * A +ve RC means I have to retry because I lost the connection
1854                  * race or I have to renegotiate protocol version
1855                  */
1856                 retry_later = (rc);
1857                 if (retry_later)
1858                         CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1859                                libcfs_nid2str(peer->ksnp_id.nid));
1860
1861                 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1862         }
1863
1864         route->ksnr_scheduled = 0;
1865         route->ksnr_connecting = 0;
1866
1867         if (retry_later) {
1868                 /*
1869                  * re-queue for attention; this frees me up to handle
1870                  * the peer's incoming connection request
1871                  */
1872                 if (rc == EALREADY ||
1873                     (!rc && peer->ksnp_accepting > 0)) {
1874                         /*
1875                          * We want to introduce a delay before next
1876                          * attempt to connect if we lost conn race,
1877                          * but the race is resolved quickly usually,
1878                          * so min_reconnectms should be good heuristic
1879                          */
1880                         route->ksnr_retry_interval =
1881                                 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
1882                         route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1883                                                            route->ksnr_retry_interval);
1884                 }
1885
1886                 ksocknal_launch_connection_locked(route);
1887         }
1888
1889         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1890         return retry_later;
1891
1892  failed:
1893         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1894
1895         route->ksnr_scheduled = 0;
1896         route->ksnr_connecting = 0;
1897
1898         /* This is a retry rather than a new connection */
1899         route->ksnr_retry_interval *= 2;
1900         route->ksnr_retry_interval =
1901                 max(route->ksnr_retry_interval,
1902                     cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
1903         route->ksnr_retry_interval =
1904                 min(route->ksnr_retry_interval,
1905                     cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
1906
1907         LASSERT(route->ksnr_retry_interval);
1908         route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1909                                            route->ksnr_retry_interval);
1910
1911         if (!list_empty(&peer->ksnp_tx_queue) &&
1912             !peer->ksnp_accepting &&
1913             !ksocknal_find_connecting_route_locked(peer)) {
1914                 struct ksock_conn *conn;
1915
1916                 /*
1917                  * ksnp_tx_queue is queued on a conn on successful
1918                  * connection for V1.x and V2.x
1919                  */
1920                 if (!list_empty(&peer->ksnp_conns)) {
1921                         conn = list_entry(peer->ksnp_conns.next,
1922                                           struct ksock_conn, ksnc_list);
1923                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1924                 }
1925
1926                 /*
1927                  * take all the blocked packets while I've got the lock and
1928                  * complete below...
1929                  */
1930                 list_splice_init(&peer->ksnp_tx_queue, &zombies);
1931         }
1932
1933         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1934
1935         ksocknal_peer_failed(peer);
1936         ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
1937         return 0;
1938 }
1939
1940 /*
1941  * check whether we need to create more connds.
1942  * It will try to create new thread if it's necessary, @timeout can
1943  * be updated if failed to create, so caller wouldn't keep try while
1944  * running out of resource.
1945  */
1946 static int
1947 ksocknal_connd_check_start(time64_t sec, long *timeout)
1948 {
1949         char name[16];
1950         int rc;
1951         int total = ksocknal_data.ksnd_connd_starting +
1952                     ksocknal_data.ksnd_connd_running;
1953
1954         if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
1955                 /* still in initializing */
1956                 return 0;
1957         }
1958
1959         if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
1960             total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
1961                 /*
1962                  * can't create more connd, or still have enough
1963                  * threads to handle more connecting
1964                  */
1965                 return 0;
1966         }
1967
1968         if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
1969                 /* no pending connecting request */
1970                 return 0;
1971         }
1972
1973         if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
1974                 /* may run out of resource, retry later */
1975                 *timeout = cfs_time_seconds(1);
1976                 return 0;
1977         }
1978
1979         if (ksocknal_data.ksnd_connd_starting > 0) {
1980                 /* serialize starting to avoid flood */
1981                 return 0;
1982         }
1983
1984         ksocknal_data.ksnd_connd_starting_stamp = sec;
1985         ksocknal_data.ksnd_connd_starting++;
1986         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1987
1988         /* NB: total is the next id */
1989         snprintf(name, sizeof(name), "socknal_cd%02d", total);
1990         rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
1991
1992         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1993         if (!rc)
1994                 return 1;
1995
1996         /* we tried ... */
1997         LASSERT(ksocknal_data.ksnd_connd_starting > 0);
1998         ksocknal_data.ksnd_connd_starting--;
1999         ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2000
2001         return 1;
2002 }
2003
2004 /*
2005  * check whether current thread can exit, it will return 1 if there are too
2006  * many threads and no creating in past 120 seconds.
2007  * Also, this function may update @timeout to make caller come back
2008  * again to recheck these conditions.
2009  */
2010 static int
2011 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2012 {
2013         int val;
2014
2015         if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2016                 /* still in initializing */
2017                 return 0;
2018         }
2019
2020         if (ksocknal_data.ksnd_connd_starting > 0) {
2021                 /* in progress of starting new thread */
2022                 return 0;
2023         }
2024
2025         if (ksocknal_data.ksnd_connd_running <=
2026             *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2027                 return 0;
2028         }
2029
2030         /* created thread in past 120 seconds? */
2031         val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2032                     SOCKNAL_CONND_TIMEOUT - sec);
2033
2034         *timeout = (val > 0) ? cfs_time_seconds(val) :
2035                                cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2036         if (val > 0)
2037                 return 0;
2038
2039         /* no creating in past 120 seconds */
2040
2041         return ksocknal_data.ksnd_connd_running >
2042                ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2043 }
2044
2045 /*
2046  * Go through connd_routes queue looking for a route that we can process
2047  * right now, @timeout_p can be updated if we need to come back later
2048  */
2049 static struct ksock_route *
2050 ksocknal_connd_get_route_locked(signed long *timeout_p)
2051 {
2052         struct ksock_route *route;
2053         unsigned long now;
2054
2055         now = cfs_time_current();
2056
2057         /* connd_routes can contain both pending and ordinary routes */
2058         list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2059                             ksnr_connd_list) {
2060                 if (!route->ksnr_retry_interval ||
2061                     cfs_time_aftereq(now, route->ksnr_timeout))
2062                         return route;
2063
2064                 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2065                     (int)*timeout_p > (int)(route->ksnr_timeout - now))
2066                         *timeout_p = (int)(route->ksnr_timeout - now);
2067         }
2068
2069         return NULL;
2070 }
2071
2072 int
2073 ksocknal_connd(void *arg)
2074 {
2075         spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2076         struct ksock_connreq *cr;
2077         wait_queue_entry_t wait;
2078         int nloops = 0;
2079         int cons_retry = 0;
2080
2081         cfs_block_allsigs();
2082
2083         init_waitqueue_entry(&wait, current);
2084
2085         spin_lock_bh(connd_lock);
2086
2087         LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2088         ksocknal_data.ksnd_connd_starting--;
2089         ksocknal_data.ksnd_connd_running++;
2090
2091         while (!ksocknal_data.ksnd_shuttingdown) {
2092                 struct ksock_route *route = NULL;
2093                 time64_t sec = ktime_get_real_seconds();
2094                 long timeout = MAX_SCHEDULE_TIMEOUT;
2095                 int dropped_lock = 0;
2096
2097                 if (ksocknal_connd_check_stop(sec, &timeout)) {
2098                         /* wakeup another one to check stop */
2099                         wake_up(&ksocknal_data.ksnd_connd_waitq);
2100                         break;
2101                 }
2102
2103                 if (ksocknal_connd_check_start(sec, &timeout)) {
2104                         /* created new thread */
2105                         dropped_lock = 1;
2106                 }
2107
2108                 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2109                         /* Connection accepted by the listener */
2110                         cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2111                                         struct ksock_connreq, ksncr_list);
2112
2113                         list_del(&cr->ksncr_list);
2114                         spin_unlock_bh(connd_lock);
2115                         dropped_lock = 1;
2116
2117                         ksocknal_create_conn(cr->ksncr_ni, NULL,
2118                                              cr->ksncr_sock, SOCKLND_CONN_NONE);
2119                         lnet_ni_decref(cr->ksncr_ni);
2120                         LIBCFS_FREE(cr, sizeof(*cr));
2121
2122                         spin_lock_bh(connd_lock);
2123                 }
2124
2125                 /*
2126                  * Only handle an outgoing connection request if there
2127                  * is a thread left to handle incoming connections and
2128                  * create new connd
2129                  */
2130                 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2131                     ksocknal_data.ksnd_connd_running) {
2132                         route = ksocknal_connd_get_route_locked(&timeout);
2133                 }
2134                 if (route) {
2135                         list_del(&route->ksnr_connd_list);
2136                         ksocknal_data.ksnd_connd_connecting++;
2137                         spin_unlock_bh(connd_lock);
2138                         dropped_lock = 1;
2139
2140                         if (ksocknal_connect(route)) {
2141                                 /* consecutive retry */
2142                                 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2143                                         CWARN("massive consecutive re-connecting to %pI4h\n",
2144                                               &route->ksnr_ipaddr);
2145                                         cons_retry = 0;
2146                                 }
2147                         } else {
2148                                 cons_retry = 0;
2149                         }
2150
2151                         ksocknal_route_decref(route);
2152
2153                         spin_lock_bh(connd_lock);
2154                         ksocknal_data.ksnd_connd_connecting--;
2155                 }
2156
2157                 if (dropped_lock) {
2158                         if (++nloops < SOCKNAL_RESCHED)
2159                                 continue;
2160                         spin_unlock_bh(connd_lock);
2161                         nloops = 0;
2162                         cond_resched();
2163                         spin_lock_bh(connd_lock);
2164                         continue;
2165                 }
2166
2167                 /* Nothing to do for 'timeout'  */
2168                 set_current_state(TASK_INTERRUPTIBLE);
2169                 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
2170                                          &wait);
2171                 spin_unlock_bh(connd_lock);
2172
2173                 nloops = 0;
2174                 schedule_timeout(timeout);
2175
2176                 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2177                 spin_lock_bh(connd_lock);
2178         }
2179         ksocknal_data.ksnd_connd_running--;
2180         spin_unlock_bh(connd_lock);
2181
2182         ksocknal_thread_fini();
2183         return 0;
2184 }
2185
2186 static struct ksock_conn *
2187 ksocknal_find_timed_out_conn(struct ksock_peer *peer)
2188 {
2189         /* We're called with a shared lock on ksnd_global_lock */
2190         struct ksock_conn *conn;
2191         struct list_head *ctmp;
2192
2193         list_for_each(ctmp, &peer->ksnp_conns) {
2194                 int error;
2195
2196                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2197
2198                 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2199                 LASSERT(!conn->ksnc_closing);
2200
2201                 /*
2202                  * SOCK_ERROR will reset error code of socket in
2203                  * some platform (like Darwin8.x)
2204                  */
2205                 error = conn->ksnc_sock->sk->sk_err;
2206                 if (error) {
2207                         ksocknal_conn_addref(conn);
2208
2209                         switch (error) {
2210                         case ECONNRESET:
2211                                 CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
2212                                         libcfs_id2str(peer->ksnp_id),
2213                                         &conn->ksnc_ipaddr,
2214                                         conn->ksnc_port);
2215                                 break;
2216                         case ETIMEDOUT:
2217                                 CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
2218                                         libcfs_id2str(peer->ksnp_id),
2219                                         &conn->ksnc_ipaddr,
2220                                         conn->ksnc_port);
2221                                 break;
2222                         default:
2223                                 CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
2224                                         error,
2225                                         libcfs_id2str(peer->ksnp_id),
2226                                         &conn->ksnc_ipaddr,
2227                                         conn->ksnc_port);
2228                                 break;
2229                         }
2230
2231                         return conn;
2232                 }
2233
2234                 if (conn->ksnc_rx_started &&
2235                     cfs_time_aftereq(cfs_time_current(),
2236                                      conn->ksnc_rx_deadline)) {
2237                         /* Timed out incomplete incoming message */
2238                         ksocknal_conn_addref(conn);
2239                         CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
2240                                 libcfs_id2str(peer->ksnp_id),
2241                                 &conn->ksnc_ipaddr,
2242                                 conn->ksnc_port,
2243                                 conn->ksnc_rx_state,
2244                                 iov_iter_count(&conn->ksnc_rx_to),
2245                                 conn->ksnc_rx_nob_left);
2246                         return conn;
2247                 }
2248
2249                 if ((!list_empty(&conn->ksnc_tx_queue) ||
2250                      conn->ksnc_sock->sk->sk_wmem_queued) &&
2251                     cfs_time_aftereq(cfs_time_current(),
2252                                      conn->ksnc_tx_deadline)) {
2253                         /*
2254                          * Timed out messages queued for sending or
2255                          * buffered in the socket's send buffer
2256                          */
2257                         ksocknal_conn_addref(conn);
2258                         CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
2259                                 libcfs_id2str(peer->ksnp_id),
2260                                 &conn->ksnc_ipaddr,
2261                                 conn->ksnc_port);
2262                         return conn;
2263                 }
2264         }
2265
2266         return NULL;
2267 }
2268
2269 static inline void
2270 ksocknal_flush_stale_txs(struct ksock_peer *peer)
2271 {
2272         struct ksock_tx *tx;
2273         struct ksock_tx *tmp;
2274         LIST_HEAD(stale_txs);
2275
2276         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2277
2278         list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
2279                 if (!cfs_time_aftereq(cfs_time_current(),
2280                                       tx->tx_deadline))
2281                         break;
2282
2283                 list_del(&tx->tx_list);
2284                 list_add_tail(&tx->tx_list, &stale_txs);
2285         }
2286
2287         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2288
2289         ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2290 }
2291
2292 static int
2293 ksocknal_send_keepalive_locked(struct ksock_peer *peer)
2294         __must_hold(&ksocknal_data.ksnd_global_lock)
2295 {
2296         struct ksock_sched *sched;
2297         struct ksock_conn *conn;
2298         struct ksock_tx *tx;
2299
2300         /* last_alive will be updated by create_conn */
2301         if (list_empty(&peer->ksnp_conns))
2302                 return 0;
2303
2304         if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2305                 return 0;
2306
2307         if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2308             time_before(cfs_time_current(),
2309                         cfs_time_add(peer->ksnp_last_alive,
2310                                      cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2311                 return 0;
2312
2313         if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
2314                 return 0;
2315
2316         /*
2317          * retry 10 secs later, so we wouldn't put pressure
2318          * on this peer if we failed to send keepalive this time
2319          */
2320         peer->ksnp_send_keepalive = cfs_time_shift(10);
2321
2322         conn = ksocknal_find_conn_locked(peer, NULL, 1);
2323         if (conn) {
2324                 sched = conn->ksnc_scheduler;
2325
2326                 spin_lock_bh(&sched->kss_lock);
2327                 if (!list_empty(&conn->ksnc_tx_queue)) {
2328                         spin_unlock_bh(&sched->kss_lock);
2329                         /* there is an queued ACK, don't need keepalive */
2330                         return 0;
2331                 }
2332
2333                 spin_unlock_bh(&sched->kss_lock);
2334         }
2335
2336         read_unlock(&ksocknal_data.ksnd_global_lock);
2337
2338         /* cookie = 1 is reserved for keepalive PING */
2339         tx = ksocknal_alloc_tx_noop(1, 1);
2340         if (!tx) {
2341                 read_lock(&ksocknal_data.ksnd_global_lock);
2342                 return -ENOMEM;
2343         }
2344
2345         if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
2346                 read_lock(&ksocknal_data.ksnd_global_lock);
2347                 return 1;
2348         }
2349
2350         ksocknal_free_tx(tx);
2351         read_lock(&ksocknal_data.ksnd_global_lock);
2352
2353         return -EIO;
2354 }
2355
2356 static void
2357 ksocknal_check_peer_timeouts(int idx)
2358 {
2359         struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2360         struct ksock_peer *peer;
2361         struct ksock_conn *conn;
2362         struct ksock_tx *tx;
2363
2364  again:
2365         /*
2366          * NB. We expect to have a look at all the peers and not find any
2367          * connections to time out, so we just use a shared lock while we
2368          * take a look...
2369          */
2370         read_lock(&ksocknal_data.ksnd_global_lock);
2371
2372         list_for_each_entry(peer, peers, ksnp_list) {
2373                 unsigned long deadline = 0;
2374                 struct ksock_tx *tx_stale;
2375                 int resid = 0;
2376                 int n = 0;
2377
2378                 if (ksocknal_send_keepalive_locked(peer)) {
2379                         read_unlock(&ksocknal_data.ksnd_global_lock);
2380                         goto again;
2381                 }
2382
2383                 conn = ksocknal_find_timed_out_conn(peer);
2384
2385                 if (conn) {
2386                         read_unlock(&ksocknal_data.ksnd_global_lock);
2387
2388                         ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2389
2390                         /*
2391                          * NB we won't find this one again, but we can't
2392                          * just proceed with the next peer, since we dropped
2393                          * ksnd_global_lock and it might be dead already!
2394                          */
2395                         ksocknal_conn_decref(conn);
2396                         goto again;
2397                 }
2398
2399                 /*
2400                  * we can't process stale txs right here because we're
2401                  * holding only shared lock
2402                  */
2403                 if (!list_empty(&peer->ksnp_tx_queue)) {
2404                         tx = list_entry(peer->ksnp_tx_queue.next,
2405                                         struct ksock_tx, tx_list);
2406
2407                         if (cfs_time_aftereq(cfs_time_current(),
2408                                              tx->tx_deadline)) {
2409                                 ksocknal_peer_addref(peer);
2410                                 read_unlock(&ksocknal_data.ksnd_global_lock);
2411
2412                                 ksocknal_flush_stale_txs(peer);
2413
2414                                 ksocknal_peer_decref(peer);
2415                                 goto again;
2416                         }
2417                 }
2418
2419                 if (list_empty(&peer->ksnp_zc_req_list))
2420                         continue;
2421
2422                 tx_stale = NULL;
2423                 spin_lock(&peer->ksnp_lock);
2424                 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2425                         if (!cfs_time_aftereq(cfs_time_current(),
2426                                               tx->tx_deadline))
2427                                 break;
2428                         /* ignore the TX if connection is being closed */
2429                         if (tx->tx_conn->ksnc_closing)
2430                                 continue;
2431                         if (!tx_stale)
2432                                 tx_stale = tx;
2433                         n++;
2434                 }
2435
2436                 if (!tx_stale) {
2437                         spin_unlock(&peer->ksnp_lock);
2438                         continue;
2439                 }
2440
2441                 deadline = tx_stale->tx_deadline;
2442                 resid = tx_stale->tx_resid;
2443                 conn = tx_stale->tx_conn;
2444                 ksocknal_conn_addref(conn);
2445
2446                 spin_unlock(&peer->ksnp_lock);
2447                 read_unlock(&ksocknal_data.ksnd_global_lock);
2448
2449                 CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
2450                        n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale,
2451                        cfs_duration_sec(cfs_time_current() - deadline),
2452                        resid, conn->ksnc_sock->sk->sk_wmem_queued);
2453
2454                 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2455                 ksocknal_conn_decref(conn);
2456                 goto again;
2457         }
2458
2459         read_unlock(&ksocknal_data.ksnd_global_lock);
2460 }
2461
2462 int
2463 ksocknal_reaper(void *arg)
2464 {
2465         wait_queue_entry_t wait;
2466         struct ksock_conn *conn;
2467         struct ksock_sched *sched;
2468         struct list_head enomem_conns;
2469         int nenomem_conns;
2470         long timeout;
2471         int i;
2472         int peer_index = 0;
2473         unsigned long deadline = cfs_time_current();
2474
2475         cfs_block_allsigs();
2476
2477         INIT_LIST_HEAD(&enomem_conns);
2478         init_waitqueue_entry(&wait, current);
2479
2480         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2481
2482         while (!ksocknal_data.ksnd_shuttingdown) {
2483                 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2484                         conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2485                                           struct ksock_conn, ksnc_list);
2486                         list_del(&conn->ksnc_list);
2487
2488                         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2489
2490                         ksocknal_terminate_conn(conn);
2491                         ksocknal_conn_decref(conn);
2492
2493                         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2494                         continue;
2495                 }
2496
2497                 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2498                         conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2499                                           struct ksock_conn, ksnc_list);
2500                         list_del(&conn->ksnc_list);
2501
2502                         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2503
2504                         ksocknal_destroy_conn(conn);
2505
2506                         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2507                         continue;
2508                 }
2509
2510                 if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
2511                         list_add(&enomem_conns,
2512                                  &ksocknal_data.ksnd_enomem_conns);
2513                         list_del_init(&ksocknal_data.ksnd_enomem_conns);
2514                 }
2515
2516                 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2517
2518                 /* reschedule all the connections that stalled with ENOMEM... */
2519                 nenomem_conns = 0;
2520                 while (!list_empty(&enomem_conns)) {
2521                         conn = list_entry(enomem_conns.next, struct ksock_conn,
2522                                           ksnc_tx_list);
2523                         list_del(&conn->ksnc_tx_list);
2524
2525                         sched = conn->ksnc_scheduler;
2526
2527                         spin_lock_bh(&sched->kss_lock);
2528
2529                         LASSERT(conn->ksnc_tx_scheduled);
2530                         conn->ksnc_tx_ready = 1;
2531                         list_add_tail(&conn->ksnc_tx_list,
2532                                       &sched->kss_tx_conns);
2533                         wake_up(&sched->kss_waitq);
2534
2535                         spin_unlock_bh(&sched->kss_lock);
2536                         nenomem_conns++;
2537                 }
2538
2539                 /* careful with the jiffy wrap... */
2540                 while ((timeout = cfs_time_sub(deadline,
2541                                                cfs_time_current())) <= 0) {
2542                         const int n = 4;
2543                         const int p = 1;
2544                         int chunk = ksocknal_data.ksnd_peer_hash_size;
2545
2546                         /*
2547                          * Time to check for timeouts on a few more peers: I do
2548                          * checks every 'p' seconds on a proportion of the peer
2549                          * table and I need to check every connection 'n' times
2550                          * within a timeout interval, to ensure I detect a
2551                          * timeout on any connection within (n+1)/n times the
2552                          * timeout interval.
2553                          */
2554                         if (*ksocknal_tunables.ksnd_timeout > n * p)
2555                                 chunk = (chunk * n * p) /
2556                                         *ksocknal_tunables.ksnd_timeout;
2557                         if (!chunk)
2558                                 chunk = 1;
2559
2560                         for (i = 0; i < chunk; i++) {
2561                                 ksocknal_check_peer_timeouts(peer_index);
2562                                 peer_index = (peer_index + 1) %
2563                                              ksocknal_data.ksnd_peer_hash_size;
2564                         }
2565
2566                         deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2567                 }
2568
2569                 if (nenomem_conns) {
2570                         /*
2571                          * Reduce my timeout if I rescheduled ENOMEM conns.
2572                          * This also prevents me getting woken immediately
2573                          * if any go back on my enomem list.
2574                          */
2575                         timeout = SOCKNAL_ENOMEM_RETRY;
2576                 }
2577                 ksocknal_data.ksnd_reaper_waketime =
2578                         cfs_time_add(cfs_time_current(), timeout);
2579
2580                 set_current_state(TASK_INTERRUPTIBLE);
2581                 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2582
2583                 if (!ksocknal_data.ksnd_shuttingdown &&
2584                     list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2585                     list_empty(&ksocknal_data.ksnd_zombie_conns))
2586                         schedule_timeout(timeout);
2587
2588                 set_current_state(TASK_RUNNING);
2589                 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2590
2591                 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2592         }
2593
2594         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2595
2596         ksocknal_thread_fini();
2597         return 0;
2598 }