1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Copyright (c) 2011, 2012, Intel Corporation.
7 * Author: Zach Brown <zab@zabbo.net>
8 * Author: Peter J. Braam <braam@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
10 * Author: Eric Barton <eric@bartonsoftware.com>
12 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
14 * Portals is free software; you can redistribute it and/or
15 * modify it under the terms of version 2 of the GNU General Public
16 * License as published by the Free Software Foundation.
18 * Portals is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
28 ksocknal_alloc_tx(int type, int size)
30 struct ksock_tx *tx = NULL;
32 if (type == KSOCK_MSG_NOOP) {
33 LASSERT(size == KSOCK_NOOP_TX_SIZE);
35 /* searching for a noop tx in free list */
36 spin_lock(&ksocknal_data.ksnd_tx_lock);
38 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
39 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
40 struct ksock_tx, tx_list);
41 LASSERT(tx->tx_desc_size == size);
42 list_del(&tx->tx_list);
45 spin_unlock(&ksocknal_data.ksnd_tx_lock);
49 LIBCFS_ALLOC(tx, size);
54 atomic_set(&tx->tx_refcount, 1);
55 tx->tx_zc_aborted = 0;
56 tx->tx_zc_capable = 0;
57 tx->tx_zc_checked = 0;
58 tx->tx_desc_size = size;
60 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
66 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
70 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
72 CERROR("Can't allocate noop tx desc\n");
77 tx->tx_lnetmsg = NULL;
80 tx->tx_iov = tx->tx_frags.virt.iov;
82 tx->tx_nonblk = nonblk;
84 tx->tx_msg.ksm_csum = 0;
85 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
86 tx->tx_msg.ksm_zc_cookies[0] = 0;
87 tx->tx_msg.ksm_zc_cookies[1] = cookie;
93 ksocknal_free_tx(struct ksock_tx *tx)
95 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
97 if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
99 spin_lock(&ksocknal_data.ksnd_tx_lock);
101 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
103 spin_unlock(&ksocknal_data.ksnd_tx_lock);
105 LIBCFS_FREE(tx, tx->tx_desc_size);
110 ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
112 struct kvec *iov = tx->tx_iov;
116 LASSERT(tx->tx_niov > 0);
118 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
119 rc = ksocknal_lib_send_iov(conn, tx);
121 if (rc <= 0) /* sent nothing? */
125 LASSERT(nob <= tx->tx_resid);
130 LASSERT(tx->tx_niov > 0);
132 if (nob < (int)iov->iov_len) {
133 iov->iov_base = (void *)((char *)iov->iov_base + nob);
147 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
149 struct bio_vec *kiov = tx->tx_kiov;
153 LASSERT(!tx->tx_niov);
154 LASSERT(tx->tx_nkiov > 0);
156 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
157 rc = ksocknal_lib_send_kiov(conn, tx);
159 if (rc <= 0) /* sent nothing? */
163 LASSERT(nob <= tx->tx_resid);
168 LASSERT(tx->tx_nkiov > 0);
170 if (nob < (int)kiov->bv_len) {
171 kiov->bv_offset += nob;
176 nob -= (int)kiov->bv_len;
177 tx->tx_kiov = ++kiov;
185 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
190 if (ksocknal_data.ksnd_stall_tx) {
191 set_current_state(TASK_UNINTERRUPTIBLE);
192 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
195 LASSERT(tx->tx_resid);
197 rc = ksocknal_connsock_addref(conn);
199 LASSERT(conn->ksnc_closing);
204 if (ksocknal_data.ksnd_enomem_tx > 0) {
206 ksocknal_data.ksnd_enomem_tx--;
208 } else if (tx->tx_niov) {
209 rc = ksocknal_send_iov(conn, tx);
211 rc = ksocknal_send_kiov(conn, tx);
214 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
215 if (rc > 0) /* sent something? */
216 conn->ksnc_tx_bufnob += rc; /* account it */
218 if (bufnob < conn->ksnc_tx_bufnob) {
220 * allocated send buffer bytes < computed; infer
221 * something got ACKed
223 conn->ksnc_tx_deadline =
224 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
225 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
226 conn->ksnc_tx_bufnob = bufnob;
230 if (rc <= 0) { /* Didn't write anything? */
232 if (!rc) /* some stacks return 0 instead of -EAGAIN */
235 /* Check if EAGAIN is due to memory pressure */
236 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
242 /* socket's wmem_queued now includes 'rc' bytes */
243 atomic_sub(rc, &conn->ksnc_tx_nob);
246 } while (tx->tx_resid);
248 ksocknal_connsock_decref(conn);
253 ksocknal_recv_iter(struct ksock_conn *conn)
259 * Never touch conn->ksnc_rx_to or change connection
260 * status inside ksocknal_lib_recv
262 rc = ksocknal_lib_recv(conn);
267 /* received something... */
270 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
271 conn->ksnc_rx_deadline =
272 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
273 mb(); /* order with setting rx_started */
274 conn->ksnc_rx_started = 1;
276 conn->ksnc_rx_nob_left -= nob;
278 iov_iter_advance(&conn->ksnc_rx_to, nob);
279 if (iov_iter_count(&conn->ksnc_rx_to))
286 ksocknal_receive(struct ksock_conn *conn)
289 * Return 1 on success, 0 on EOF, < 0 on error.
290 * Caller checks ksnc_rx_to to determine
291 * progress/completion.
295 if (ksocknal_data.ksnd_stall_rx) {
296 set_current_state(TASK_UNINTERRUPTIBLE);
297 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
300 rc = ksocknal_connsock_addref(conn);
302 LASSERT(conn->ksnc_closing);
307 rc = ksocknal_recv_iter(conn);
309 /* error/EOF or partial receive */
312 } else if (!rc && conn->ksnc_rx_started) {
313 /* EOF in the middle of a message */
319 /* Completed a fragment */
321 if (!iov_iter_count(&conn->ksnc_rx_to)) {
327 ksocknal_connsock_decref(conn);
332 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx)
334 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
335 int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
337 LASSERT(ni || tx->tx_conn);
340 ksocknal_conn_decref(tx->tx_conn);
342 if (!ni && tx->tx_conn)
343 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
345 ksocknal_free_tx(tx);
346 if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
347 lnet_finalize(ni, lnetmsg, rc);
351 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
355 while (!list_empty(txlist)) {
356 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
358 if (error && tx->tx_lnetmsg) {
359 CNETERR("Deleting packet type %d len %d %s->%s\n",
360 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
361 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
362 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
363 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
365 CNETERR("Deleting noop packet\n");
368 list_del(&tx->tx_list);
370 LASSERT(atomic_read(&tx->tx_refcount) == 1);
371 ksocknal_tx_done(ni, tx);
376 ksocknal_check_zc_req(struct ksock_tx *tx)
378 struct ksock_conn *conn = tx->tx_conn;
379 struct ksock_peer *peer = conn->ksnc_peer;
382 * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
383 * to ksnp_zc_req_list if some fragment of this message should be sent
384 * zero-copy. Our peer will send an ACK containing this cookie when
385 * she has received this message to tell us we can signal completion.
386 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
389 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
390 LASSERT(tx->tx_zc_capable);
392 tx->tx_zc_checked = 1;
394 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
395 !conn->ksnc_zc_capable)
399 * assign cookie and queue tx to pending list, it will be released when
400 * a matching ack is received. See ksocknal_handle_zcack()
402 ksocknal_tx_addref(tx);
404 spin_lock(&peer->ksnp_lock);
406 /* ZC_REQ is going to be pinned to the peer */
408 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
410 LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
412 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
414 if (!peer->ksnp_zc_next_cookie)
415 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
417 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
419 spin_unlock(&peer->ksnp_lock);
423 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
425 struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
427 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
428 LASSERT(tx->tx_zc_capable);
430 tx->tx_zc_checked = 0;
432 spin_lock(&peer->ksnp_lock);
434 if (!tx->tx_msg.ksm_zc_cookies[0]) {
435 /* Not waiting for an ACK */
436 spin_unlock(&peer->ksnp_lock);
440 tx->tx_msg.ksm_zc_cookies[0] = 0;
441 list_del(&tx->tx_zc_list);
443 spin_unlock(&peer->ksnp_lock);
445 ksocknal_tx_decref(tx);
449 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
453 if (tx->tx_zc_capable && !tx->tx_zc_checked)
454 ksocknal_check_zc_req(tx);
456 rc = ksocknal_transmit(conn, tx);
458 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
461 /* Sent everything OK */
473 counter++; /* exponential backoff warnings */
474 if ((counter & (-counter)) == counter)
475 CWARN("%u ENOMEM tx %p\n", counter, conn);
477 /* Queue on ksnd_enomem_conns for retry after a timeout */
478 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
480 /* enomem list takes over scheduler's ref... */
481 LASSERT(conn->ksnc_tx_scheduled);
482 list_add_tail(&conn->ksnc_tx_list,
483 &ksocknal_data.ksnd_enomem_conns);
484 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
485 SOCKNAL_ENOMEM_RETRY),
486 ksocknal_data.ksnd_reaper_waketime))
487 wake_up(&ksocknal_data.ksnd_reaper_waitq);
489 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
496 if (!conn->ksnc_closing) {
499 LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
503 LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
504 &conn->ksnc_ipaddr, rc);
507 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
509 libcfs_id2str(conn->ksnc_peer->ksnp_id),
514 if (tx->tx_zc_checked)
515 ksocknal_uncheck_zc_req(tx);
517 /* it's not an error if conn is being closed */
518 ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
524 ksocknal_launch_connection_locked(struct ksock_route *route)
526 /* called holding write lock on ksnd_global_lock */
528 LASSERT(!route->ksnr_scheduled);
529 LASSERT(!route->ksnr_connecting);
530 LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
532 route->ksnr_scheduled = 1; /* scheduling conn for connd */
533 ksocknal_route_addref(route); /* extra ref for connd */
535 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
537 list_add_tail(&route->ksnr_connd_list,
538 &ksocknal_data.ksnd_connd_routes);
539 wake_up(&ksocknal_data.ksnd_connd_waitq);
541 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
545 ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
547 struct ksock_route *route;
549 /* called holding write lock on ksnd_global_lock */
551 /* launch any/all connections that need it */
552 route = ksocknal_find_connectable_route_locked(peer);
556 ksocknal_launch_connection_locked(route);
561 ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
564 struct list_head *tmp;
565 struct ksock_conn *conn;
566 struct ksock_conn *typed = NULL;
567 struct ksock_conn *fallback = NULL;
571 list_for_each(tmp, &peer->ksnp_conns) {
572 struct ksock_conn *c;
575 c = list_entry(tmp, struct ksock_conn, ksnc_list);
576 nob = atomic_read(&c->ksnc_tx_nob) +
577 c->ksnc_sock->sk->sk_wmem_queued;
579 LASSERT(!c->ksnc_closing);
580 LASSERT(c->ksnc_proto &&
581 c->ksnc_proto->pro_match_tx);
583 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
588 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
591 case SOCKNAL_MATCH_YES: /* typed connection */
592 if (!typed || tnob > nob ||
593 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
594 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
600 case SOCKNAL_MATCH_MAY: /* fallback connection */
601 if (!fallback || fnob > nob ||
602 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
603 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
611 /* prefer the typed selection */
612 conn = (typed) ? typed : fallback;
615 conn->ksnc_tx_last_post = cfs_time_current();
621 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
623 conn->ksnc_proto->pro_pack(tx);
625 atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
626 ksocknal_conn_addref(conn); /* +1 ref for tx */
631 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
633 struct ksock_sched *sched = conn->ksnc_scheduler;
634 struct ksock_msg *msg = &tx->tx_msg;
635 struct ksock_tx *ztx = NULL;
639 * called holding global lock (read or irq-write) and caller may
640 * not have dropped this lock between finding conn and calling me,
641 * so we don't need the {get,put}connsock dance to deref
644 LASSERT(!conn->ksnc_closing);
646 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
647 libcfs_id2str(conn->ksnc_peer->ksnp_id),
648 &conn->ksnc_ipaddr, conn->ksnc_port);
650 ksocknal_tx_prep(conn, tx);
653 * Ensure the frags we've been given EXACTLY match the number of
654 * bytes we want to send. Many TCP/IP stacks disregard any total
655 * size parameters passed to them and just look at the frags.
657 * We always expect at least 1 mapped fragment containing the
658 * complete ksocknal message header.
660 LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
661 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
662 (unsigned int)tx->tx_nob);
663 LASSERT(tx->tx_niov >= 1);
664 LASSERT(tx->tx_resid == tx->tx_nob);
666 CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
667 tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
669 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
672 * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
673 * but they're used inside spinlocks a lot.
675 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
676 spin_lock_bh(&sched->kss_lock);
678 if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
679 /* First packet starts the timeout */
680 conn->ksnc_tx_deadline =
681 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
682 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
683 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
684 conn->ksnc_tx_bufnob = 0;
685 mb(); /* order with adding to tx_queue */
688 if (msg->ksm_type == KSOCK_MSG_NOOP) {
690 * The packet is noop ZC ACK, try to piggyback the ack_cookie
691 * on a normal packet so I don't need to send it
693 LASSERT(msg->ksm_zc_cookies[1]);
694 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
696 /* ZC ACK piggybacked on ztx release tx later */
697 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
701 * It's a normal packet - can it piggback a noop zc-ack that
702 * has been queued already?
704 LASSERT(!msg->ksm_zc_cookies[1]);
705 LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
707 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
708 /* ztx will be released later */
712 atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
713 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
716 if (conn->ksnc_tx_ready && /* able to send */
717 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
718 /* +1 ref for scheduler */
719 ksocknal_conn_addref(conn);
720 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
721 conn->ksnc_tx_scheduled = 1;
722 wake_up(&sched->kss_waitq);
725 spin_unlock_bh(&sched->kss_lock);
729 ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
731 unsigned long now = cfs_time_current();
732 struct list_head *tmp;
733 struct ksock_route *route;
735 list_for_each(tmp, &peer->ksnp_routes) {
736 route = list_entry(tmp, struct ksock_route, ksnr_list);
738 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
740 /* connections being established */
741 if (route->ksnr_scheduled)
744 /* all route types connected ? */
745 if (!(ksocknal_route_mask() & ~route->ksnr_connected))
748 if (!(!route->ksnr_retry_interval || /* first attempt */
749 cfs_time_aftereq(now, route->ksnr_timeout))) {
751 "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
753 route->ksnr_connected,
754 route->ksnr_retry_interval,
755 cfs_duration_sec(route->ksnr_timeout - now));
766 ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
768 struct list_head *tmp;
769 struct ksock_route *route;
771 list_for_each(tmp, &peer->ksnp_routes) {
772 route = list_entry(tmp, struct ksock_route, ksnr_list);
774 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
776 if (route->ksnr_scheduled)
784 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
785 struct lnet_process_id id)
787 struct ksock_peer *peer;
788 struct ksock_conn *conn;
793 LASSERT(!tx->tx_conn);
795 g_lock = &ksocknal_data.ksnd_global_lock;
797 for (retry = 0;; retry = 1) {
799 peer = ksocknal_find_peer_locked(ni, id);
801 if (!ksocknal_find_connectable_route_locked(peer)) {
802 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
805 * I've got no routes that need to be
806 * connecting and I do have an actual
809 ksocknal_queue_tx_locked(tx, conn);
816 /* I'll need a write lock... */
819 write_lock_bh(g_lock);
821 peer = ksocknal_find_peer_locked(ni, id);
825 write_unlock_bh(g_lock);
827 if (id.pid & LNET_PID_USERFLAG) {
828 CERROR("Refusing to create a connection to userspace process %s\n",
830 return -EHOSTUNREACH;
834 CERROR("Can't find peer %s\n", libcfs_id2str(id));
835 return -EHOSTUNREACH;
838 rc = ksocknal_add_peer(ni, id,
839 LNET_NIDADDR(id.nid),
840 lnet_acceptor_port());
842 CERROR("Can't add peer %s: %d\n",
843 libcfs_id2str(id), rc);
848 ksocknal_launch_all_connections_locked(peer);
850 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
852 /* Connection exists; queue message on it */
853 ksocknal_queue_tx_locked(tx, conn);
854 write_unlock_bh(g_lock);
858 if (peer->ksnp_accepting > 0 ||
859 ksocknal_find_connecting_route_locked(peer)) {
860 /* the message is going to be pinned to the peer */
862 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
864 /* Queue the message until a connection is established */
865 list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
866 write_unlock_bh(g_lock);
870 write_unlock_bh(g_lock);
872 /* NB Routes may be ignored if connections to them failed recently */
873 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
874 return -EHOSTUNREACH;
878 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
881 int type = lntmsg->msg_type;
882 struct lnet_process_id target = lntmsg->msg_target;
883 unsigned int payload_niov = lntmsg->msg_niov;
884 struct kvec *payload_iov = lntmsg->msg_iov;
885 struct bio_vec *payload_kiov = lntmsg->msg_kiov;
886 unsigned int payload_offset = lntmsg->msg_offset;
887 unsigned int payload_nob = lntmsg->msg_len;
893 * NB 'private' is different depending on what we're sending.
896 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
897 payload_nob, payload_niov, libcfs_id2str(target));
899 LASSERT(!payload_nob || payload_niov > 0);
900 LASSERT(payload_niov <= LNET_MAX_IOV);
901 /* payload is either all vaddrs or all pages */
902 LASSERT(!(payload_kiov && payload_iov));
903 LASSERT(!in_interrupt());
906 desc_size = offsetof(struct ksock_tx,
907 tx_frags.virt.iov[1 + payload_niov]);
909 desc_size = offsetof(struct ksock_tx,
910 tx_frags.paged.kiov[payload_niov]);
912 if (lntmsg->msg_vmflush)
913 mpflag = cfs_memory_pressure_get_and_set();
914 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
916 CERROR("Can't allocate tx desc type %d size %d\n",
918 if (lntmsg->msg_vmflush)
919 cfs_memory_pressure_restore(mpflag);
923 tx->tx_conn = NULL; /* set when assigned a conn */
924 tx->tx_lnetmsg = lntmsg;
929 tx->tx_iov = tx->tx_frags.virt.iov;
931 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
932 payload_niov, payload_iov,
933 payload_offset, payload_nob);
936 tx->tx_iov = &tx->tx_frags.paged.iov;
937 tx->tx_kiov = tx->tx_frags.paged.kiov;
938 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
939 payload_niov, payload_kiov,
940 payload_offset, payload_nob);
942 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
943 tx->tx_zc_capable = 1;
946 tx->tx_msg.ksm_csum = 0;
947 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
948 tx->tx_msg.ksm_zc_cookies[0] = 0;
949 tx->tx_msg.ksm_zc_cookies[1] = 0;
951 /* The first fragment will be set later in pro_pack */
952 rc = ksocknal_launch_packet(ni, tx, target);
954 cfs_memory_pressure_restore(mpflag);
959 ksocknal_free_tx(tx);
964 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
966 struct task_struct *task = kthread_run(fn, arg, "%s", name);
969 return PTR_ERR(task);
971 write_lock_bh(&ksocknal_data.ksnd_global_lock);
972 ksocknal_data.ksnd_nthreads++;
973 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
978 ksocknal_thread_fini(void)
980 write_lock_bh(&ksocknal_data.ksnd_global_lock);
981 ksocknal_data.ksnd_nthreads--;
982 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
986 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
988 static char ksocknal_slop_buffer[4096];
989 struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
995 LASSERT(conn->ksnc_proto);
997 if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
998 /* Remind the socket to ack eagerly... */
999 ksocknal_lib_eager_ack(conn);
1002 if (!nob_to_skip) { /* right at next packet boundary now */
1003 conn->ksnc_rx_started = 0;
1004 mb(); /* racing with timeout thread */
1006 switch (conn->ksnc_proto->pro_version) {
1007 case KSOCK_PROTO_V2:
1008 case KSOCK_PROTO_V3:
1009 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1010 kvec->iov_base = &conn->ksnc_msg;
1011 kvec->iov_len = offsetof(struct ksock_msg, ksm_u);
1012 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1013 iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
1014 1, offsetof(struct ksock_msg, ksm_u));
1017 case KSOCK_PROTO_V1:
1018 /* Receiving bare struct lnet_hdr */
1019 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1020 kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1021 kvec->iov_len = sizeof(struct lnet_hdr);
1022 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1023 iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
1024 1, sizeof(struct lnet_hdr));
1030 conn->ksnc_rx_csum = ~0;
1035 * Set up to skip as much as possible now. If there's more left
1036 * (ran out of iov entries) we'll get called again
1038 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1039 conn->ksnc_rx_nob_left = nob_to_skip;
1044 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1046 kvec[niov].iov_base = ksocknal_slop_buffer;
1047 kvec[niov].iov_len = nob;
1052 } while (nob_to_skip && /* mustn't overflow conn's rx iov */
1053 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
1055 iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped);
1060 ksocknal_process_receive(struct ksock_conn *conn)
1062 struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
1063 struct lnet_hdr *lhdr;
1064 struct lnet_process_id *id;
1067 LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
1069 /* NB: sched lock NOT held */
1070 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1071 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1072 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1073 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1074 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1076 if (iov_iter_count(&conn->ksnc_rx_to)) {
1077 rc = ksocknal_receive(conn);
1080 LASSERT(rc != -EAGAIN);
1083 CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
1085 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1088 else if (!conn->ksnc_closing)
1089 CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
1091 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1095 /* it's not an error if conn is being closed */
1096 ksocknal_close_conn_and_siblings(conn,
1097 (conn->ksnc_closing) ? 0 : rc);
1098 return (!rc ? -ESHUTDOWN : rc);
1101 if (iov_iter_count(&conn->ksnc_rx_to)) {
1106 switch (conn->ksnc_rx_state) {
1107 case SOCKNAL_RX_KSM_HEADER:
1108 if (conn->ksnc_flip) {
1109 __swab32s(&conn->ksnc_msg.ksm_type);
1110 __swab32s(&conn->ksnc_msg.ksm_csum);
1111 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1112 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1115 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1116 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1117 CERROR("%s: Unknown message type: %x\n",
1118 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1119 conn->ksnc_msg.ksm_type);
1120 ksocknal_new_packet(conn, 0);
1121 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1125 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1126 conn->ksnc_msg.ksm_csum && /* has checksum */
1127 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1128 /* NOOP Checksum error */
1129 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1130 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1131 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1132 ksocknal_new_packet(conn, 0);
1133 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1137 if (conn->ksnc_msg.ksm_zc_cookies[1]) {
1140 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1142 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1143 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1145 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1146 conn->ksnc_msg.ksm_zc_cookies[1]);
1149 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1150 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1151 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1152 ksocknal_new_packet(conn, 0);
1153 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1158 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1159 ksocknal_new_packet(conn, 0);
1160 return 0; /* NOOP is done and just return */
1163 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1164 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1166 kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1167 kvec->iov_len = sizeof(struct ksock_lnet_msg);
1169 iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
1170 1, sizeof(struct ksock_lnet_msg));
1172 goto again; /* read lnet header now */
1174 case SOCKNAL_RX_LNET_HEADER:
1175 /* unpack message header */
1176 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1178 if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
1179 /* Userspace peer */
1180 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1181 id = &conn->ksnc_peer->ksnp_id;
1183 /* Substitute process ID assigned at connection time */
1184 lhdr->src_pid = cpu_to_le32(id->pid);
1185 lhdr->src_nid = cpu_to_le64(id->nid);
1188 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1189 ksocknal_conn_addref(conn); /* ++ref while parsing */
1191 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1192 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1193 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1195 /* I just received garbage: give up on this conn */
1196 ksocknal_new_packet(conn, 0);
1197 ksocknal_close_conn_and_siblings(conn, rc);
1198 ksocknal_conn_decref(conn);
1202 /* I'm racing with ksocknal_recv() */
1203 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1204 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1206 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1209 /* ksocknal_recv() got called */
1212 case SOCKNAL_RX_LNET_PAYLOAD:
1213 /* payload all received */
1216 if (!conn->ksnc_rx_nob_left && /* not truncating */
1217 conn->ksnc_msg.ksm_csum && /* has checksum */
1218 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1219 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1220 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1221 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1225 if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
1226 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1228 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1229 id = &conn->ksnc_peer->ksnp_id;
1231 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1232 conn->ksnc_msg.ksm_zc_cookies[0],
1233 *ksocknal_tunables.ksnd_nonblk_zcack ||
1234 le64_to_cpu(lhdr->src_nid) != id->nid);
1237 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1240 ksocknal_new_packet(conn, 0);
1241 ksocknal_close_conn_and_siblings(conn, rc);
1246 case SOCKNAL_RX_SLOP:
1247 /* starting new packet? */
1248 if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
1249 return 0; /* come back later */
1250 goto again; /* try to finish reading slop now */
1258 return -EINVAL; /* keep gcc happy */
1262 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1263 int delayed, struct iov_iter *to, unsigned int rlen)
1265 struct ksock_conn *conn = private;
1266 struct ksock_sched *sched = conn->ksnc_scheduler;
1268 LASSERT(iov_iter_count(to) <= rlen);
1269 LASSERT(to->nr_segs <= LNET_MAX_IOV);
1271 conn->ksnc_cookie = msg;
1272 conn->ksnc_rx_nob_left = rlen;
1274 conn->ksnc_rx_to = *to;
1276 LASSERT(conn->ksnc_rx_scheduled);
1278 spin_lock_bh(&sched->kss_lock);
1280 switch (conn->ksnc_rx_state) {
1281 case SOCKNAL_RX_PARSE_WAIT:
1282 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1283 wake_up(&sched->kss_waitq);
1284 LASSERT(conn->ksnc_rx_ready);
1287 case SOCKNAL_RX_PARSE:
1288 /* scheduler hasn't noticed I'm parsing yet */
1292 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1294 spin_unlock_bh(&sched->kss_lock);
1295 ksocknal_conn_decref(conn);
1300 ksocknal_sched_cansleep(struct ksock_sched *sched)
1304 spin_lock_bh(&sched->kss_lock);
1306 rc = !ksocknal_data.ksnd_shuttingdown &&
1307 list_empty(&sched->kss_rx_conns) &&
1308 list_empty(&sched->kss_tx_conns);
1310 spin_unlock_bh(&sched->kss_lock);
1314 int ksocknal_scheduler(void *arg)
1316 struct ksock_sched_info *info;
1317 struct ksock_sched *sched;
1318 struct ksock_conn *conn;
1319 struct ksock_tx *tx;
1322 long id = (long)arg;
1324 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1325 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1327 cfs_block_allsigs();
1329 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1331 CWARN("Can't set CPU partition affinity to %d: %d\n",
1335 spin_lock_bh(&sched->kss_lock);
1337 while (!ksocknal_data.ksnd_shuttingdown) {
1338 int did_something = 0;
1340 /* Ensure I progress everything semi-fairly */
1342 if (!list_empty(&sched->kss_rx_conns)) {
1343 conn = list_entry(sched->kss_rx_conns.next,
1344 struct ksock_conn, ksnc_rx_list);
1345 list_del(&conn->ksnc_rx_list);
1347 LASSERT(conn->ksnc_rx_scheduled);
1348 LASSERT(conn->ksnc_rx_ready);
1351 * clear rx_ready in case receive isn't complete.
1352 * Do it BEFORE we call process_recv, since
1353 * data_ready can set it any time after we release
1356 conn->ksnc_rx_ready = 0;
1357 spin_unlock_bh(&sched->kss_lock);
1359 rc = ksocknal_process_receive(conn);
1361 spin_lock_bh(&sched->kss_lock);
1363 /* I'm the only one that can clear this flag */
1364 LASSERT(conn->ksnc_rx_scheduled);
1366 /* Did process_receive get everything it wanted? */
1368 conn->ksnc_rx_ready = 1;
1370 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1372 * Conn blocked waiting for ksocknal_recv()
1373 * I change its state (under lock) to signal
1374 * it can be rescheduled
1376 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1377 } else if (conn->ksnc_rx_ready) {
1378 /* reschedule for rx */
1379 list_add_tail(&conn->ksnc_rx_list,
1380 &sched->kss_rx_conns);
1382 conn->ksnc_rx_scheduled = 0;
1384 ksocknal_conn_decref(conn);
1390 if (!list_empty(&sched->kss_tx_conns)) {
1393 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1394 list_add(&zlist, &sched->kss_zombie_noop_txs);
1395 list_del_init(&sched->kss_zombie_noop_txs);
1398 conn = list_entry(sched->kss_tx_conns.next,
1399 struct ksock_conn, ksnc_tx_list);
1400 list_del(&conn->ksnc_tx_list);
1402 LASSERT(conn->ksnc_tx_scheduled);
1403 LASSERT(conn->ksnc_tx_ready);
1404 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1406 tx = list_entry(conn->ksnc_tx_queue.next,
1407 struct ksock_tx, tx_list);
1409 if (conn->ksnc_tx_carrier == tx)
1410 ksocknal_next_tx_carrier(conn);
1412 /* dequeue now so empty list => more to send */
1413 list_del(&tx->tx_list);
1416 * Clear tx_ready in case send isn't complete. Do
1417 * it BEFORE we call process_transmit, since
1418 * write_space can set it any time after we release
1421 conn->ksnc_tx_ready = 0;
1422 spin_unlock_bh(&sched->kss_lock);
1424 if (!list_empty(&zlist)) {
1426 * free zombie noop txs, it's fast because
1427 * noop txs are just put in freelist
1429 ksocknal_txlist_done(NULL, &zlist, 0);
1432 rc = ksocknal_process_transmit(conn, tx);
1434 if (rc == -ENOMEM || rc == -EAGAIN) {
1436 * Incomplete send: replace tx on HEAD of
1439 spin_lock_bh(&sched->kss_lock);
1440 list_add(&tx->tx_list, &conn->ksnc_tx_queue);
1442 /* Complete send; tx -ref */
1443 ksocknal_tx_decref(tx);
1445 spin_lock_bh(&sched->kss_lock);
1446 /* assume space for more */
1447 conn->ksnc_tx_ready = 1;
1450 if (rc == -ENOMEM) {
1452 * Do nothing; after a short timeout, this
1453 * conn will be reposted on kss_tx_conns.
1455 } else if (conn->ksnc_tx_ready &&
1456 !list_empty(&conn->ksnc_tx_queue)) {
1457 /* reschedule for tx */
1458 list_add_tail(&conn->ksnc_tx_list,
1459 &sched->kss_tx_conns);
1461 conn->ksnc_tx_scheduled = 0;
1463 ksocknal_conn_decref(conn);
1468 if (!did_something || /* nothing to do */
1469 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1470 spin_unlock_bh(&sched->kss_lock);
1474 if (!did_something) { /* wait for something to do */
1475 rc = wait_event_interruptible_exclusive(
1477 !ksocknal_sched_cansleep(sched));
1483 spin_lock_bh(&sched->kss_lock);
1487 spin_unlock_bh(&sched->kss_lock);
1488 ksocknal_thread_fini();
1493 * Add connection to kss_rx_conns of scheduler
1494 * and wakeup the scheduler.
1496 void ksocknal_read_callback(struct ksock_conn *conn)
1498 struct ksock_sched *sched;
1500 sched = conn->ksnc_scheduler;
1502 spin_lock_bh(&sched->kss_lock);
1504 conn->ksnc_rx_ready = 1;
1506 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1507 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1508 conn->ksnc_rx_scheduled = 1;
1509 /* extra ref for scheduler */
1510 ksocknal_conn_addref(conn);
1512 wake_up(&sched->kss_waitq);
1514 spin_unlock_bh(&sched->kss_lock);
1518 * Add connection to kss_tx_conns of scheduler
1519 * and wakeup the scheduler.
1521 void ksocknal_write_callback(struct ksock_conn *conn)
1523 struct ksock_sched *sched;
1525 sched = conn->ksnc_scheduler;
1527 spin_lock_bh(&sched->kss_lock);
1529 conn->ksnc_tx_ready = 1;
1531 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1532 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1533 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1534 conn->ksnc_tx_scheduled = 1;
1535 /* extra ref for scheduler */
1536 ksocknal_conn_addref(conn);
1538 wake_up(&sched->kss_waitq);
1541 spin_unlock_bh(&sched->kss_lock);
1544 static struct ksock_proto *
1545 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1549 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1550 version = hello->kshm_version;
1551 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1552 version = __swab32(hello->kshm_version);
1555 #if SOCKNAL_VERSION_DEBUG
1556 if (*ksocknal_tunables.ksnd_protocol == 1)
1559 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1560 version == KSOCK_PROTO_V3)
1563 if (version == KSOCK_PROTO_V2)
1564 return &ksocknal_protocol_v2x;
1566 if (version == KSOCK_PROTO_V3)
1567 return &ksocknal_protocol_v3x;
1572 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1573 struct lnet_magicversion *hmv = (struct lnet_magicversion *)hello;
1575 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1576 offsetof(struct ksock_hello_msg, kshm_src_nid));
1578 if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
1579 hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
1580 return &ksocknal_protocol_v1x;
1587 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1588 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1590 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1591 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1593 LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
1595 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1596 LASSERT(conn->ksnc_proto);
1598 hello->kshm_src_nid = ni->ni_nid;
1599 hello->kshm_dst_nid = peer_nid;
1600 hello->kshm_src_pid = the_lnet.ln_pid;
1602 hello->kshm_src_incarnation = net->ksnn_incarnation;
1603 hello->kshm_ctype = conn->ksnc_type;
1605 return conn->ksnc_proto->pro_send_hello(conn, hello);
1609 ksocknal_invert_type(int type)
1612 case SOCKLND_CONN_ANY:
1613 case SOCKLND_CONN_CONTROL:
1615 case SOCKLND_CONN_BULK_IN:
1616 return SOCKLND_CONN_BULK_OUT;
1617 case SOCKLND_CONN_BULK_OUT:
1618 return SOCKLND_CONN_BULK_IN;
1620 return SOCKLND_CONN_NONE;
1625 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1626 struct ksock_hello_msg *hello,
1627 struct lnet_process_id *peerid,
1630 /* Return < 0 fatal error
1632 * EALREADY lost connection race
1633 * EPROTO protocol version mismatch
1635 struct socket *sock = conn->ksnc_sock;
1636 int active = !!conn->ksnc_proto;
1640 struct ksock_proto *proto;
1641 struct lnet_process_id recv_id;
1643 /* socket type set on active connections - not set on passive */
1644 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1646 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1647 lnet_acceptor_timeout();
1649 rc = lnet_sock_read(sock, &hello->kshm_magic,
1650 sizeof(hello->kshm_magic), timeout);
1652 CERROR("Error %d reading HELLO from %pI4h\n",
1653 rc, &conn->ksnc_ipaddr);
1658 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1659 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1660 hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1661 /* Unexpected magic! */
1662 CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
1663 __cpu_to_le32(hello->kshm_magic),
1664 LNET_PROTO_TCP_MAGIC,
1665 &conn->ksnc_ipaddr);
1669 rc = lnet_sock_read(sock, &hello->kshm_version,
1670 sizeof(hello->kshm_version), timeout);
1672 CERROR("Error %d reading HELLO from %pI4h\n",
1673 rc, &conn->ksnc_ipaddr);
1678 proto = ksocknal_parse_proto_version(hello);
1681 /* unknown protocol from peer, tell peer my protocol */
1682 conn->ksnc_proto = &ksocknal_protocol_v3x;
1683 #if SOCKNAL_VERSION_DEBUG
1684 if (*ksocknal_tunables.ksnd_protocol == 2)
1685 conn->ksnc_proto = &ksocknal_protocol_v2x;
1686 else if (*ksocknal_tunables.ksnd_protocol == 1)
1687 conn->ksnc_proto = &ksocknal_protocol_v1x;
1689 hello->kshm_nips = 0;
1690 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1693 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1694 conn->ksnc_proto->pro_version,
1695 &conn->ksnc_ipaddr);
1700 proto_match = (conn->ksnc_proto == proto);
1701 conn->ksnc_proto = proto;
1703 /* receive the rest of hello message anyway */
1704 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1706 CERROR("Error %d reading or checking hello from from %pI4h\n",
1707 rc, &conn->ksnc_ipaddr);
1712 *incarnation = hello->kshm_src_incarnation;
1714 if (hello->kshm_src_nid == LNET_NID_ANY) {
1715 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1716 &conn->ksnc_ipaddr);
1721 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1722 /* Userspace NAL assigns peer process ID from socket */
1723 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1724 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
1727 recv_id.nid = hello->kshm_src_nid;
1728 recv_id.pid = hello->kshm_src_pid;
1734 /* peer determines type */
1735 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1736 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1737 CERROR("Unexpected type %d from %s ip %pI4h\n",
1738 hello->kshm_ctype, libcfs_id2str(*peerid),
1739 &conn->ksnc_ipaddr);
1746 if (peerid->pid != recv_id.pid ||
1747 peerid->nid != recv_id.nid) {
1748 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
1749 libcfs_id2str(*peerid),
1751 libcfs_id2str(recv_id));
1755 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1756 /* Possible protocol mismatch or I lost the connection race */
1757 return proto_match ? EALREADY : EPROTO;
1760 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1761 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1762 conn->ksnc_type, libcfs_id2str(*peerid),
1763 &conn->ksnc_ipaddr, hello->kshm_ctype);
1771 ksocknal_connect(struct ksock_route *route)
1774 struct ksock_peer *peer = route->ksnr_peer;
1777 struct socket *sock;
1778 unsigned long deadline;
1779 int retry_later = 0;
1782 deadline = cfs_time_add(cfs_time_current(),
1783 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1785 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1787 LASSERT(route->ksnr_scheduled);
1788 LASSERT(!route->ksnr_connecting);
1790 route->ksnr_connecting = 1;
1793 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1796 * stop connecting if peer/route got closed under me, or
1797 * route got connected while queued
1799 if (peer->ksnp_closing || route->ksnr_deleted ||
1805 /* reschedule if peer is connecting to me */
1806 if (peer->ksnp_accepting > 0) {
1808 "peer %s(%d) already connecting to me, retry later.\n",
1809 libcfs_nid2str(peer->ksnp_id.nid),
1810 peer->ksnp_accepting);
1814 if (retry_later) /* needs reschedule */
1817 if (wanted & BIT(SOCKLND_CONN_ANY)) {
1818 type = SOCKLND_CONN_ANY;
1819 } else if (wanted & BIT(SOCKLND_CONN_CONTROL)) {
1820 type = SOCKLND_CONN_CONTROL;
1821 } else if (wanted & BIT(SOCKLND_CONN_BULK_IN)) {
1822 type = SOCKLND_CONN_BULK_IN;
1824 LASSERT(wanted & BIT(SOCKLND_CONN_BULK_OUT));
1825 type = SOCKLND_CONN_BULK_OUT;
1828 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1830 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1832 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1838 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1839 route->ksnr_myipaddr,
1840 route->ksnr_ipaddr, route->ksnr_port);
1844 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1846 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1853 * A +ve RC means I have to retry because I lost the connection
1854 * race or I have to renegotiate protocol version
1858 CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1859 libcfs_nid2str(peer->ksnp_id.nid));
1861 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1864 route->ksnr_scheduled = 0;
1865 route->ksnr_connecting = 0;
1869 * re-queue for attention; this frees me up to handle
1870 * the peer's incoming connection request
1872 if (rc == EALREADY ||
1873 (!rc && peer->ksnp_accepting > 0)) {
1875 * We want to introduce a delay before next
1876 * attempt to connect if we lost conn race,
1877 * but the race is resolved quickly usually,
1878 * so min_reconnectms should be good heuristic
1880 route->ksnr_retry_interval =
1881 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
1882 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1883 route->ksnr_retry_interval);
1886 ksocknal_launch_connection_locked(route);
1889 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1893 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1895 route->ksnr_scheduled = 0;
1896 route->ksnr_connecting = 0;
1898 /* This is a retry rather than a new connection */
1899 route->ksnr_retry_interval *= 2;
1900 route->ksnr_retry_interval =
1901 max(route->ksnr_retry_interval,
1902 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
1903 route->ksnr_retry_interval =
1904 min(route->ksnr_retry_interval,
1905 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
1907 LASSERT(route->ksnr_retry_interval);
1908 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1909 route->ksnr_retry_interval);
1911 if (!list_empty(&peer->ksnp_tx_queue) &&
1912 !peer->ksnp_accepting &&
1913 !ksocknal_find_connecting_route_locked(peer)) {
1914 struct ksock_conn *conn;
1917 * ksnp_tx_queue is queued on a conn on successful
1918 * connection for V1.x and V2.x
1920 if (!list_empty(&peer->ksnp_conns)) {
1921 conn = list_entry(peer->ksnp_conns.next,
1922 struct ksock_conn, ksnc_list);
1923 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1927 * take all the blocked packets while I've got the lock and
1930 list_splice_init(&peer->ksnp_tx_queue, &zombies);
1933 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1935 ksocknal_peer_failed(peer);
1936 ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
1941 * check whether we need to create more connds.
1942 * It will try to create new thread if it's necessary, @timeout can
1943 * be updated if failed to create, so caller wouldn't keep try while
1944 * running out of resource.
1947 ksocknal_connd_check_start(time64_t sec, long *timeout)
1951 int total = ksocknal_data.ksnd_connd_starting +
1952 ksocknal_data.ksnd_connd_running;
1954 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
1955 /* still in initializing */
1959 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
1960 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
1962 * can't create more connd, or still have enough
1963 * threads to handle more connecting
1968 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
1969 /* no pending connecting request */
1973 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
1974 /* may run out of resource, retry later */
1975 *timeout = cfs_time_seconds(1);
1979 if (ksocknal_data.ksnd_connd_starting > 0) {
1980 /* serialize starting to avoid flood */
1984 ksocknal_data.ksnd_connd_starting_stamp = sec;
1985 ksocknal_data.ksnd_connd_starting++;
1986 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1988 /* NB: total is the next id */
1989 snprintf(name, sizeof(name), "socknal_cd%02d", total);
1990 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
1992 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1997 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
1998 ksocknal_data.ksnd_connd_starting--;
1999 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2005 * check whether current thread can exit, it will return 1 if there are too
2006 * many threads and no creating in past 120 seconds.
2007 * Also, this function may update @timeout to make caller come back
2008 * again to recheck these conditions.
2011 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2015 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2016 /* still in initializing */
2020 if (ksocknal_data.ksnd_connd_starting > 0) {
2021 /* in progress of starting new thread */
2025 if (ksocknal_data.ksnd_connd_running <=
2026 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2030 /* created thread in past 120 seconds? */
2031 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2032 SOCKNAL_CONND_TIMEOUT - sec);
2034 *timeout = (val > 0) ? cfs_time_seconds(val) :
2035 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2039 /* no creating in past 120 seconds */
2041 return ksocknal_data.ksnd_connd_running >
2042 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2046 * Go through connd_routes queue looking for a route that we can process
2047 * right now, @timeout_p can be updated if we need to come back later
2049 static struct ksock_route *
2050 ksocknal_connd_get_route_locked(signed long *timeout_p)
2052 struct ksock_route *route;
2055 now = cfs_time_current();
2057 /* connd_routes can contain both pending and ordinary routes */
2058 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2060 if (!route->ksnr_retry_interval ||
2061 cfs_time_aftereq(now, route->ksnr_timeout))
2064 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2065 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2066 *timeout_p = (int)(route->ksnr_timeout - now);
2073 ksocknal_connd(void *arg)
2075 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2076 struct ksock_connreq *cr;
2077 wait_queue_entry_t wait;
2081 cfs_block_allsigs();
2083 init_waitqueue_entry(&wait, current);
2085 spin_lock_bh(connd_lock);
2087 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2088 ksocknal_data.ksnd_connd_starting--;
2089 ksocknal_data.ksnd_connd_running++;
2091 while (!ksocknal_data.ksnd_shuttingdown) {
2092 struct ksock_route *route = NULL;
2093 time64_t sec = ktime_get_real_seconds();
2094 long timeout = MAX_SCHEDULE_TIMEOUT;
2095 int dropped_lock = 0;
2097 if (ksocknal_connd_check_stop(sec, &timeout)) {
2098 /* wakeup another one to check stop */
2099 wake_up(&ksocknal_data.ksnd_connd_waitq);
2103 if (ksocknal_connd_check_start(sec, &timeout)) {
2104 /* created new thread */
2108 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2109 /* Connection accepted by the listener */
2110 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2111 struct ksock_connreq, ksncr_list);
2113 list_del(&cr->ksncr_list);
2114 spin_unlock_bh(connd_lock);
2117 ksocknal_create_conn(cr->ksncr_ni, NULL,
2118 cr->ksncr_sock, SOCKLND_CONN_NONE);
2119 lnet_ni_decref(cr->ksncr_ni);
2120 LIBCFS_FREE(cr, sizeof(*cr));
2122 spin_lock_bh(connd_lock);
2126 * Only handle an outgoing connection request if there
2127 * is a thread left to handle incoming connections and
2130 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2131 ksocknal_data.ksnd_connd_running) {
2132 route = ksocknal_connd_get_route_locked(&timeout);
2135 list_del(&route->ksnr_connd_list);
2136 ksocknal_data.ksnd_connd_connecting++;
2137 spin_unlock_bh(connd_lock);
2140 if (ksocknal_connect(route)) {
2141 /* consecutive retry */
2142 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2143 CWARN("massive consecutive re-connecting to %pI4h\n",
2144 &route->ksnr_ipaddr);
2151 ksocknal_route_decref(route);
2153 spin_lock_bh(connd_lock);
2154 ksocknal_data.ksnd_connd_connecting--;
2158 if (++nloops < SOCKNAL_RESCHED)
2160 spin_unlock_bh(connd_lock);
2163 spin_lock_bh(connd_lock);
2167 /* Nothing to do for 'timeout' */
2168 set_current_state(TASK_INTERRUPTIBLE);
2169 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
2171 spin_unlock_bh(connd_lock);
2174 schedule_timeout(timeout);
2176 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2177 spin_lock_bh(connd_lock);
2179 ksocknal_data.ksnd_connd_running--;
2180 spin_unlock_bh(connd_lock);
2182 ksocknal_thread_fini();
2186 static struct ksock_conn *
2187 ksocknal_find_timed_out_conn(struct ksock_peer *peer)
2189 /* We're called with a shared lock on ksnd_global_lock */
2190 struct ksock_conn *conn;
2191 struct list_head *ctmp;
2193 list_for_each(ctmp, &peer->ksnp_conns) {
2196 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2198 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2199 LASSERT(!conn->ksnc_closing);
2202 * SOCK_ERROR will reset error code of socket in
2203 * some platform (like Darwin8.x)
2205 error = conn->ksnc_sock->sk->sk_err;
2207 ksocknal_conn_addref(conn);
2211 CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
2212 libcfs_id2str(peer->ksnp_id),
2217 CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
2218 libcfs_id2str(peer->ksnp_id),
2223 CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
2225 libcfs_id2str(peer->ksnp_id),
2234 if (conn->ksnc_rx_started &&
2235 cfs_time_aftereq(cfs_time_current(),
2236 conn->ksnc_rx_deadline)) {
2237 /* Timed out incomplete incoming message */
2238 ksocknal_conn_addref(conn);
2239 CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
2240 libcfs_id2str(peer->ksnp_id),
2243 conn->ksnc_rx_state,
2244 iov_iter_count(&conn->ksnc_rx_to),
2245 conn->ksnc_rx_nob_left);
2249 if ((!list_empty(&conn->ksnc_tx_queue) ||
2250 conn->ksnc_sock->sk->sk_wmem_queued) &&
2251 cfs_time_aftereq(cfs_time_current(),
2252 conn->ksnc_tx_deadline)) {
2254 * Timed out messages queued for sending or
2255 * buffered in the socket's send buffer
2257 ksocknal_conn_addref(conn);
2258 CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
2259 libcfs_id2str(peer->ksnp_id),
2270 ksocknal_flush_stale_txs(struct ksock_peer *peer)
2272 struct ksock_tx *tx;
2273 struct ksock_tx *tmp;
2274 LIST_HEAD(stale_txs);
2276 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2278 list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
2279 if (!cfs_time_aftereq(cfs_time_current(),
2283 list_del(&tx->tx_list);
2284 list_add_tail(&tx->tx_list, &stale_txs);
2287 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2289 ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2293 ksocknal_send_keepalive_locked(struct ksock_peer *peer)
2294 __must_hold(&ksocknal_data.ksnd_global_lock)
2296 struct ksock_sched *sched;
2297 struct ksock_conn *conn;
2298 struct ksock_tx *tx;
2300 /* last_alive will be updated by create_conn */
2301 if (list_empty(&peer->ksnp_conns))
2304 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2307 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2308 time_before(cfs_time_current(),
2309 cfs_time_add(peer->ksnp_last_alive,
2310 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2313 if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
2317 * retry 10 secs later, so we wouldn't put pressure
2318 * on this peer if we failed to send keepalive this time
2320 peer->ksnp_send_keepalive = cfs_time_shift(10);
2322 conn = ksocknal_find_conn_locked(peer, NULL, 1);
2324 sched = conn->ksnc_scheduler;
2326 spin_lock_bh(&sched->kss_lock);
2327 if (!list_empty(&conn->ksnc_tx_queue)) {
2328 spin_unlock_bh(&sched->kss_lock);
2329 /* there is an queued ACK, don't need keepalive */
2333 spin_unlock_bh(&sched->kss_lock);
2336 read_unlock(&ksocknal_data.ksnd_global_lock);
2338 /* cookie = 1 is reserved for keepalive PING */
2339 tx = ksocknal_alloc_tx_noop(1, 1);
2341 read_lock(&ksocknal_data.ksnd_global_lock);
2345 if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
2346 read_lock(&ksocknal_data.ksnd_global_lock);
2350 ksocknal_free_tx(tx);
2351 read_lock(&ksocknal_data.ksnd_global_lock);
2357 ksocknal_check_peer_timeouts(int idx)
2359 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2360 struct ksock_peer *peer;
2361 struct ksock_conn *conn;
2362 struct ksock_tx *tx;
2366 * NB. We expect to have a look at all the peers and not find any
2367 * connections to time out, so we just use a shared lock while we
2370 read_lock(&ksocknal_data.ksnd_global_lock);
2372 list_for_each_entry(peer, peers, ksnp_list) {
2373 unsigned long deadline = 0;
2374 struct ksock_tx *tx_stale;
2378 if (ksocknal_send_keepalive_locked(peer)) {
2379 read_unlock(&ksocknal_data.ksnd_global_lock);
2383 conn = ksocknal_find_timed_out_conn(peer);
2386 read_unlock(&ksocknal_data.ksnd_global_lock);
2388 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2391 * NB we won't find this one again, but we can't
2392 * just proceed with the next peer, since we dropped
2393 * ksnd_global_lock and it might be dead already!
2395 ksocknal_conn_decref(conn);
2400 * we can't process stale txs right here because we're
2401 * holding only shared lock
2403 if (!list_empty(&peer->ksnp_tx_queue)) {
2404 tx = list_entry(peer->ksnp_tx_queue.next,
2405 struct ksock_tx, tx_list);
2407 if (cfs_time_aftereq(cfs_time_current(),
2409 ksocknal_peer_addref(peer);
2410 read_unlock(&ksocknal_data.ksnd_global_lock);
2412 ksocknal_flush_stale_txs(peer);
2414 ksocknal_peer_decref(peer);
2419 if (list_empty(&peer->ksnp_zc_req_list))
2423 spin_lock(&peer->ksnp_lock);
2424 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2425 if (!cfs_time_aftereq(cfs_time_current(),
2428 /* ignore the TX if connection is being closed */
2429 if (tx->tx_conn->ksnc_closing)
2437 spin_unlock(&peer->ksnp_lock);
2441 deadline = tx_stale->tx_deadline;
2442 resid = tx_stale->tx_resid;
2443 conn = tx_stale->tx_conn;
2444 ksocknal_conn_addref(conn);
2446 spin_unlock(&peer->ksnp_lock);
2447 read_unlock(&ksocknal_data.ksnd_global_lock);
2449 CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
2450 n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale,
2451 cfs_duration_sec(cfs_time_current() - deadline),
2452 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2454 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2455 ksocknal_conn_decref(conn);
2459 read_unlock(&ksocknal_data.ksnd_global_lock);
2463 ksocknal_reaper(void *arg)
2465 wait_queue_entry_t wait;
2466 struct ksock_conn *conn;
2467 struct ksock_sched *sched;
2468 struct list_head enomem_conns;
2473 unsigned long deadline = cfs_time_current();
2475 cfs_block_allsigs();
2477 INIT_LIST_HEAD(&enomem_conns);
2478 init_waitqueue_entry(&wait, current);
2480 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2482 while (!ksocknal_data.ksnd_shuttingdown) {
2483 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2484 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2485 struct ksock_conn, ksnc_list);
2486 list_del(&conn->ksnc_list);
2488 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2490 ksocknal_terminate_conn(conn);
2491 ksocknal_conn_decref(conn);
2493 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2497 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2498 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2499 struct ksock_conn, ksnc_list);
2500 list_del(&conn->ksnc_list);
2502 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2504 ksocknal_destroy_conn(conn);
2506 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2510 if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
2511 list_add(&enomem_conns,
2512 &ksocknal_data.ksnd_enomem_conns);
2513 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2516 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2518 /* reschedule all the connections that stalled with ENOMEM... */
2520 while (!list_empty(&enomem_conns)) {
2521 conn = list_entry(enomem_conns.next, struct ksock_conn,
2523 list_del(&conn->ksnc_tx_list);
2525 sched = conn->ksnc_scheduler;
2527 spin_lock_bh(&sched->kss_lock);
2529 LASSERT(conn->ksnc_tx_scheduled);
2530 conn->ksnc_tx_ready = 1;
2531 list_add_tail(&conn->ksnc_tx_list,
2532 &sched->kss_tx_conns);
2533 wake_up(&sched->kss_waitq);
2535 spin_unlock_bh(&sched->kss_lock);
2539 /* careful with the jiffy wrap... */
2540 while ((timeout = cfs_time_sub(deadline,
2541 cfs_time_current())) <= 0) {
2544 int chunk = ksocknal_data.ksnd_peer_hash_size;
2547 * Time to check for timeouts on a few more peers: I do
2548 * checks every 'p' seconds on a proportion of the peer
2549 * table and I need to check every connection 'n' times
2550 * within a timeout interval, to ensure I detect a
2551 * timeout on any connection within (n+1)/n times the
2554 if (*ksocknal_tunables.ksnd_timeout > n * p)
2555 chunk = (chunk * n * p) /
2556 *ksocknal_tunables.ksnd_timeout;
2560 for (i = 0; i < chunk; i++) {
2561 ksocknal_check_peer_timeouts(peer_index);
2562 peer_index = (peer_index + 1) %
2563 ksocknal_data.ksnd_peer_hash_size;
2566 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2569 if (nenomem_conns) {
2571 * Reduce my timeout if I rescheduled ENOMEM conns.
2572 * This also prevents me getting woken immediately
2573 * if any go back on my enomem list.
2575 timeout = SOCKNAL_ENOMEM_RETRY;
2577 ksocknal_data.ksnd_reaper_waketime =
2578 cfs_time_add(cfs_time_current(), timeout);
2580 set_current_state(TASK_INTERRUPTIBLE);
2581 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2583 if (!ksocknal_data.ksnd_shuttingdown &&
2584 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2585 list_empty(&ksocknal_data.ksnd_zombie_conns))
2586 schedule_timeout(timeout);
2588 set_current_state(TASK_RUNNING);
2589 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2591 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2594 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2596 ksocknal_thread_fini();