1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the "low-level" comms layer.
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
22 * be expanded for the cluster infrastructure then that is its
23 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
37 * lowcomms will choose to use either TCP or SCTP as its transport layer
38 * depending on the configuration variable 'protocol'. This should be set
39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
45 #include <asm/ioctls.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
56 #include <trace/events/dlm.h>
57 #include <trace/events/sock.h>
59 #include "dlm_internal.h"
65 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000)
66 #define DLM_MAX_PROCESS_BUFFERS 24
67 #define NEEDED_RMEM (4*1024*1024)
70 struct socket *sock; /* NULL if not connected */
71 uint32_t nodeid; /* So we know who we are in the list */
72 /* this semaphore is used to allow parallel recv/send in read
73 * lock mode. When we release a sock we need to held the write lock.
75 * However this is locking code and not nice. When we remove the
76 * othercon handling we can look into other mechanism to synchronize
77 * io handling to call sock_release() at the right time.
79 struct rw_semaphore sock_lock;
81 #define CF_APP_LIMITED 0
82 #define CF_RECV_PENDING 1
83 #define CF_SEND_PENDING 2
84 #define CF_RECV_INTR 3
86 #define CF_IS_OTHERCON 5
87 struct list_head writequeue; /* List of outgoing writequeue_entries */
88 spinlock_t writequeue_lock;
90 struct hlist_node list;
91 /* due some connect()/accept() races we currently have this cross over
92 * connection attempt second connection for one node.
94 * There is a solution to avoid the race by introducing a connect
95 * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to
96 * connect. Otherside can connect but will only be considered that
97 * the other side wants to have a reconnect.
99 * However changing to this behaviour will break backwards compatible.
100 * In a DLM protocol major version upgrade we should remove this!
102 struct connection *othercon;
103 struct work_struct rwork; /* receive worker */
104 struct work_struct swork; /* send worker */
105 wait_queue_head_t shutdown_wait;
106 unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE];
111 struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT];
112 spinlock_t addrs_lock;
115 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
117 struct listen_connection {
119 struct work_struct rwork;
122 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
123 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
125 /* An entry waiting to be sent */
126 struct writequeue_entry {
127 struct list_head list;
134 struct connection *con;
135 struct list_head msgs;
140 struct writequeue_entry *entry;
141 struct dlm_msg *orig_msg;
145 int idx; /* new()/commit() idx exchange */
147 struct list_head list;
151 struct processqueue_entry {
156 struct list_head list;
159 struct dlm_proto_ops {
164 int (*connect)(struct connection *con, struct socket *sock,
165 struct sockaddr *addr, int addr_len);
166 void (*sockopts)(struct socket *sock);
167 int (*bind)(struct socket *sock);
168 int (*listen_validate)(void);
169 void (*listen_sockopts)(struct socket *sock);
170 int (*listen_bind)(struct socket *sock);
173 static struct listen_sock_callbacks {
174 void (*sk_error_report)(struct sock *);
175 void (*sk_data_ready)(struct sock *);
176 void (*sk_state_change)(struct sock *);
177 void (*sk_write_space)(struct sock *);
180 static struct listen_connection listen_con;
181 static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT];
182 static int dlm_local_count;
185 static struct workqueue_struct *io_workqueue;
186 static struct workqueue_struct *process_workqueue;
188 static struct hlist_head connection_hash[CONN_HASH_SIZE];
189 static DEFINE_SPINLOCK(connections_lock);
190 DEFINE_STATIC_SRCU(connections_srcu);
192 static const struct dlm_proto_ops *dlm_proto_ops;
194 #define DLM_IO_SUCCESS 0
197 #define DLM_IO_RESCHED 3
198 #define DLM_IO_FLUSH 4
200 static void process_recv_sockets(struct work_struct *work);
201 static void process_send_sockets(struct work_struct *work);
202 static void process_dlm_messages(struct work_struct *work);
204 static DECLARE_WORK(process_work, process_dlm_messages);
205 static DEFINE_SPINLOCK(processqueue_lock);
206 static bool process_dlm_messages_pending;
207 static atomic_t processqueue_count;
208 static LIST_HEAD(processqueue);
210 bool dlm_lowcomms_is_running(void)
212 return !!listen_con.sock;
215 static void lowcomms_queue_swork(struct connection *con)
217 assert_spin_locked(&con->writequeue_lock);
219 if (!test_bit(CF_IO_STOP, &con->flags) &&
220 !test_bit(CF_APP_LIMITED, &con->flags) &&
221 !test_and_set_bit(CF_SEND_PENDING, &con->flags))
222 queue_work(io_workqueue, &con->swork);
225 static void lowcomms_queue_rwork(struct connection *con)
227 #ifdef CONFIG_LOCKDEP
228 WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk));
231 if (!test_bit(CF_IO_STOP, &con->flags) &&
232 !test_and_set_bit(CF_RECV_PENDING, &con->flags))
233 queue_work(io_workqueue, &con->rwork);
236 static void writequeue_entry_ctor(void *data)
238 struct writequeue_entry *entry = data;
240 INIT_LIST_HEAD(&entry->msgs);
243 struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void)
245 return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry),
246 0, 0, writequeue_entry_ctor);
249 struct kmem_cache *dlm_lowcomms_msg_cache_create(void)
251 return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL);
254 /* need to held writequeue_lock */
255 static struct writequeue_entry *con_next_wq(struct connection *con)
257 struct writequeue_entry *e;
259 e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry,
261 /* if len is zero nothing is to send, if there are users filling
262 * buffers we wait until the users are done so we can send more.
264 if (!e || e->users || e->len == 0)
270 static struct connection *__find_con(int nodeid, int r)
272 struct connection *con;
274 hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
275 if (con->nodeid == nodeid)
282 static void dlm_con_init(struct connection *con, int nodeid)
284 con->nodeid = nodeid;
285 init_rwsem(&con->sock_lock);
286 INIT_LIST_HEAD(&con->writequeue);
287 spin_lock_init(&con->writequeue_lock);
288 INIT_WORK(&con->swork, process_send_sockets);
289 INIT_WORK(&con->rwork, process_recv_sockets);
290 spin_lock_init(&con->addrs_lock);
291 init_waitqueue_head(&con->shutdown_wait);
295 * If 'allocation' is zero then we don't attempt to create a new
296 * connection structure for this node.
298 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
300 struct connection *con, *tmp;
303 r = nodeid_hash(nodeid);
304 con = __find_con(nodeid, r);
308 con = kzalloc(sizeof(*con), alloc);
312 dlm_con_init(con, nodeid);
314 spin_lock(&connections_lock);
315 /* Because multiple workqueues/threads calls this function it can
316 * race on multiple cpu's. Instead of locking hot path __find_con()
317 * we just check in rare cases of recently added nodes again
318 * under protection of connections_lock. If this is the case we
319 * abort our connection creation and return the existing connection.
321 tmp = __find_con(nodeid, r);
323 spin_unlock(&connections_lock);
328 hlist_add_head_rcu(&con->list, &connection_hash[r]);
329 spin_unlock(&connections_lock);
334 static int addr_compare(const struct sockaddr_storage *x,
335 const struct sockaddr_storage *y)
337 switch (x->ss_family) {
339 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
340 struct sockaddr_in *siny = (struct sockaddr_in *)y;
341 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
343 if (sinx->sin_port != siny->sin_port)
348 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
349 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
350 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
352 if (sinx->sin6_port != siny->sin6_port)
362 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
363 struct sockaddr *sa_out, bool try_new_addr,
366 struct sockaddr_storage sas;
367 struct connection *con;
370 if (!dlm_local_count)
373 idx = srcu_read_lock(&connections_srcu);
374 con = nodeid2con(nodeid, 0);
376 srcu_read_unlock(&connections_srcu, idx);
380 spin_lock(&con->addrs_lock);
381 if (!con->addr_count) {
382 spin_unlock(&con->addrs_lock);
383 srcu_read_unlock(&connections_srcu, idx);
387 memcpy(&sas, &con->addr[con->curr_addr_index],
388 sizeof(struct sockaddr_storage));
391 con->curr_addr_index++;
392 if (con->curr_addr_index == con->addr_count)
393 con->curr_addr_index = 0;
397 spin_unlock(&con->addrs_lock);
400 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
403 srcu_read_unlock(&connections_srcu, idx);
407 if (dlm_local_addr[0].ss_family == AF_INET) {
408 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
409 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
410 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
412 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
413 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
414 ret6->sin6_addr = in6->sin6_addr;
417 srcu_read_unlock(&connections_srcu, idx);
421 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
424 struct connection *con;
427 idx = srcu_read_lock(&connections_srcu);
428 for (i = 0; i < CONN_HASH_SIZE; i++) {
429 hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
430 WARN_ON_ONCE(!con->addr_count);
432 spin_lock(&con->addrs_lock);
433 for (addr_i = 0; addr_i < con->addr_count; addr_i++) {
434 if (addr_compare(&con->addr[addr_i], addr)) {
435 *nodeid = con->nodeid;
437 spin_unlock(&con->addrs_lock);
438 srcu_read_unlock(&connections_srcu, idx);
442 spin_unlock(&con->addrs_lock);
445 srcu_read_unlock(&connections_srcu, idx);
450 static bool dlm_lowcomms_con_has_addr(const struct connection *con,
451 const struct sockaddr_storage *addr)
455 for (i = 0; i < con->addr_count; i++) {
456 if (addr_compare(&con->addr[i], addr))
463 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
465 struct connection *con;
468 idx = srcu_read_lock(&connections_srcu);
469 con = nodeid2con(nodeid, GFP_NOFS);
471 srcu_read_unlock(&connections_srcu, idx);
475 spin_lock(&con->addrs_lock);
476 if (!con->addr_count) {
477 memcpy(&con->addr[0], addr, sizeof(*addr));
479 con->mark = dlm_config.ci_mark;
480 spin_unlock(&con->addrs_lock);
481 srcu_read_unlock(&connections_srcu, idx);
485 ret = dlm_lowcomms_con_has_addr(con, addr);
487 spin_unlock(&con->addrs_lock);
488 srcu_read_unlock(&connections_srcu, idx);
492 if (con->addr_count >= DLM_MAX_ADDR_COUNT) {
493 spin_unlock(&con->addrs_lock);
494 srcu_read_unlock(&connections_srcu, idx);
498 memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr));
499 srcu_read_unlock(&connections_srcu, idx);
500 spin_unlock(&con->addrs_lock);
504 /* Data available on socket or listen socket received a connect */
505 static void lowcomms_data_ready(struct sock *sk)
507 struct connection *con = sock2con(sk);
509 trace_sk_data_ready(sk);
511 set_bit(CF_RECV_INTR, &con->flags);
512 lowcomms_queue_rwork(con);
515 static void lowcomms_write_space(struct sock *sk)
517 struct connection *con = sock2con(sk);
519 clear_bit(SOCK_NOSPACE, &con->sock->flags);
521 spin_lock_bh(&con->writequeue_lock);
522 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
523 con->sock->sk->sk_write_pending--;
524 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
527 lowcomms_queue_swork(con);
528 spin_unlock_bh(&con->writequeue_lock);
531 static void lowcomms_state_change(struct sock *sk)
533 /* SCTP layer is not calling sk_data_ready when the connection
534 * is done, so we catch the signal through here.
536 if (sk->sk_shutdown == RCV_SHUTDOWN)
537 lowcomms_data_ready(sk);
540 static void lowcomms_listen_data_ready(struct sock *sk)
542 trace_sk_data_ready(sk);
544 queue_work(io_workqueue, &listen_con.rwork);
547 int dlm_lowcomms_connect_node(int nodeid)
549 struct connection *con;
552 idx = srcu_read_lock(&connections_srcu);
553 con = nodeid2con(nodeid, 0);
554 if (WARN_ON_ONCE(!con)) {
555 srcu_read_unlock(&connections_srcu, idx);
559 down_read(&con->sock_lock);
561 spin_lock_bh(&con->writequeue_lock);
562 lowcomms_queue_swork(con);
563 spin_unlock_bh(&con->writequeue_lock);
565 up_read(&con->sock_lock);
566 srcu_read_unlock(&connections_srcu, idx);
572 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
574 struct connection *con;
577 idx = srcu_read_lock(&connections_srcu);
578 con = nodeid2con(nodeid, 0);
580 srcu_read_unlock(&connections_srcu, idx);
584 spin_lock(&con->addrs_lock);
586 spin_unlock(&con->addrs_lock);
587 srcu_read_unlock(&connections_srcu, idx);
591 static void lowcomms_error_report(struct sock *sk)
593 struct connection *con = sock2con(sk);
594 struct inet_sock *inet;
597 switch (sk->sk_family) {
599 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
600 "sending to node %d at %pI4, dport %d, "
601 "sk_err=%d/%d\n", dlm_our_nodeid(),
602 con->nodeid, &inet->inet_daddr,
603 ntohs(inet->inet_dport), sk->sk_err,
604 READ_ONCE(sk->sk_err_soft));
606 #if IS_ENABLED(CONFIG_IPV6)
608 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
609 "sending to node %d at %pI6c, "
610 "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
611 con->nodeid, &sk->sk_v6_daddr,
612 ntohs(inet->inet_dport), sk->sk_err,
613 READ_ONCE(sk->sk_err_soft));
617 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
618 "invalid socket family %d set, "
619 "sk_err=%d/%d\n", dlm_our_nodeid(),
620 sk->sk_family, sk->sk_err,
621 READ_ONCE(sk->sk_err_soft));
625 dlm_midcomms_unack_msg_resend(con->nodeid);
627 listen_sock.sk_error_report(sk);
630 static void restore_callbacks(struct sock *sk)
632 #ifdef CONFIG_LOCKDEP
633 WARN_ON_ONCE(!lockdep_sock_is_held(sk));
636 sk->sk_user_data = NULL;
637 sk->sk_data_ready = listen_sock.sk_data_ready;
638 sk->sk_state_change = listen_sock.sk_state_change;
639 sk->sk_write_space = listen_sock.sk_write_space;
640 sk->sk_error_report = listen_sock.sk_error_report;
643 /* Make a socket active */
644 static void add_sock(struct socket *sock, struct connection *con)
646 struct sock *sk = sock->sk;
651 sk->sk_user_data = con;
652 sk->sk_data_ready = lowcomms_data_ready;
653 sk->sk_write_space = lowcomms_write_space;
654 if (dlm_config.ci_protocol == DLM_PROTO_SCTP)
655 sk->sk_state_change = lowcomms_state_change;
656 sk->sk_allocation = GFP_NOFS;
657 sk->sk_use_task_frag = false;
658 sk->sk_error_report = lowcomms_error_report;
662 /* Add the port number to an IPv6 or 4 sockaddr and return the address
664 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
667 saddr->ss_family = dlm_local_addr[0].ss_family;
668 if (saddr->ss_family == AF_INET) {
669 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
670 in4_addr->sin_port = cpu_to_be16(port);
671 *addr_len = sizeof(struct sockaddr_in);
672 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
674 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
675 in6_addr->sin6_port = cpu_to_be16(port);
676 *addr_len = sizeof(struct sockaddr_in6);
678 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
681 static void dlm_page_release(struct kref *kref)
683 struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
686 __free_page(e->page);
687 dlm_free_writequeue(e);
690 static void dlm_msg_release(struct kref *kref)
692 struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
694 kref_put(&msg->entry->ref, dlm_page_release);
698 static void free_entry(struct writequeue_entry *e)
700 struct dlm_msg *msg, *tmp;
702 list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
704 msg->orig_msg->retransmit = false;
705 kref_put(&msg->orig_msg->ref, dlm_msg_release);
708 list_del(&msg->list);
709 kref_put(&msg->ref, dlm_msg_release);
713 kref_put(&e->ref, dlm_page_release);
716 static void dlm_close_sock(struct socket **sock)
718 lock_sock((*sock)->sk);
719 restore_callbacks((*sock)->sk);
720 release_sock((*sock)->sk);
726 static void allow_connection_io(struct connection *con)
729 clear_bit(CF_IO_STOP, &con->othercon->flags);
730 clear_bit(CF_IO_STOP, &con->flags);
733 static void stop_connection_io(struct connection *con)
736 stop_connection_io(con->othercon);
738 spin_lock_bh(&con->writequeue_lock);
739 set_bit(CF_IO_STOP, &con->flags);
740 spin_unlock_bh(&con->writequeue_lock);
742 down_write(&con->sock_lock);
744 lock_sock(con->sock->sk);
745 restore_callbacks(con->sock->sk);
746 release_sock(con->sock->sk);
748 up_write(&con->sock_lock);
750 cancel_work_sync(&con->swork);
751 cancel_work_sync(&con->rwork);
754 /* Close a remote connection and tidy up */
755 static void close_connection(struct connection *con, bool and_other)
757 struct writequeue_entry *e;
759 if (con->othercon && and_other)
760 close_connection(con->othercon, false);
762 down_write(&con->sock_lock);
764 up_write(&con->sock_lock);
768 dlm_close_sock(&con->sock);
770 /* if we send a writequeue entry only a half way, we drop the
771 * whole entry because reconnection and that we not start of the
772 * middle of a msg which will confuse the other end.
774 * we can always drop messages because retransmits, but what we
775 * cannot allow is to transmit half messages which may be processed
778 * our policy is to start on a clean state when disconnects, we don't
779 * know what's send/received on transport layer in this case.
781 spin_lock_bh(&con->writequeue_lock);
782 if (!list_empty(&con->writequeue)) {
783 e = list_first_entry(&con->writequeue, struct writequeue_entry,
788 spin_unlock_bh(&con->writequeue_lock);
790 con->rx_leftover = 0;
792 clear_bit(CF_APP_LIMITED, &con->flags);
793 clear_bit(CF_RECV_PENDING, &con->flags);
794 clear_bit(CF_SEND_PENDING, &con->flags);
795 up_write(&con->sock_lock);
798 static void shutdown_connection(struct connection *con, bool and_other)
802 if (con->othercon && and_other)
803 shutdown_connection(con->othercon, false);
805 flush_workqueue(io_workqueue);
806 down_read(&con->sock_lock);
807 /* nothing to shutdown */
809 up_read(&con->sock_lock);
813 ret = kernel_sock_shutdown(con->sock, SHUT_WR);
814 up_read(&con->sock_lock);
816 log_print("Connection %p failed to shutdown: %d will force close",
820 ret = wait_event_timeout(con->shutdown_wait, !con->sock,
821 DLM_SHUTDOWN_WAIT_TIMEOUT);
823 log_print("Connection %p shutdown timed out, will force close",
832 close_connection(con, false);
835 static struct processqueue_entry *new_processqueue_entry(int nodeid,
838 struct processqueue_entry *pentry;
840 pentry = kmalloc(sizeof(*pentry), GFP_NOFS);
844 pentry->buf = kmalloc(buflen, GFP_NOFS);
850 pentry->nodeid = nodeid;
854 static void free_processqueue_entry(struct processqueue_entry *pentry)
860 struct dlm_processed_nodes {
863 struct list_head list;
866 static void process_dlm_messages(struct work_struct *work)
868 struct processqueue_entry *pentry;
870 spin_lock(&processqueue_lock);
871 pentry = list_first_entry_or_null(&processqueue,
872 struct processqueue_entry, list);
873 if (WARN_ON_ONCE(!pentry)) {
874 process_dlm_messages_pending = false;
875 spin_unlock(&processqueue_lock);
879 list_del(&pentry->list);
880 atomic_dec(&processqueue_count);
881 spin_unlock(&processqueue_lock);
884 dlm_process_incoming_buffer(pentry->nodeid, pentry->buf,
886 free_processqueue_entry(pentry);
888 spin_lock(&processqueue_lock);
889 pentry = list_first_entry_or_null(&processqueue,
890 struct processqueue_entry, list);
892 process_dlm_messages_pending = false;
893 spin_unlock(&processqueue_lock);
897 list_del(&pentry->list);
898 atomic_dec(&processqueue_count);
899 spin_unlock(&processqueue_lock);
903 /* Data received from remote end */
904 static int receive_from_sock(struct connection *con, int buflen)
906 struct processqueue_entry *pentry;
907 int ret, buflen_real;
911 pentry = new_processqueue_entry(con->nodeid, buflen);
913 return DLM_IO_RESCHED;
915 memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover);
917 /* calculate new buffer parameter regarding last receive and
918 * possible leftover bytes
920 iov.iov_base = pentry->buf + con->rx_leftover;
921 iov.iov_len = buflen - con->rx_leftover;
923 memset(&msg, 0, sizeof(msg));
924 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
925 clear_bit(CF_RECV_INTR, &con->flags);
927 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
929 trace_dlm_recv(con->nodeid, ret);
930 if (ret == -EAGAIN) {
931 lock_sock(con->sock->sk);
932 if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) {
933 release_sock(con->sock->sk);
937 clear_bit(CF_RECV_PENDING, &con->flags);
938 release_sock(con->sock->sk);
939 free_processqueue_entry(pentry);
941 } else if (ret == 0) {
942 /* close will clear CF_RECV_PENDING */
943 free_processqueue_entry(pentry);
945 } else if (ret < 0) {
946 free_processqueue_entry(pentry);
950 /* new buflen according readed bytes and leftover from last receive */
951 buflen_real = ret + con->rx_leftover;
952 ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf,
955 free_processqueue_entry(pentry);
959 pentry->buflen = ret;
961 /* calculate leftover bytes from process and put it into begin of
962 * the receive buffer, so next receive we have the full message
963 * at the start address of the receive buffer.
965 con->rx_leftover = buflen_real - ret;
966 memmove(con->rx_leftover_buf, pentry->buf + ret,
969 spin_lock(&processqueue_lock);
970 ret = atomic_inc_return(&processqueue_count);
971 list_add_tail(&pentry->list, &processqueue);
972 if (!process_dlm_messages_pending) {
973 process_dlm_messages_pending = true;
974 queue_work(process_workqueue, &process_work);
976 spin_unlock(&processqueue_lock);
978 if (ret > DLM_MAX_PROCESS_BUFFERS)
981 return DLM_IO_SUCCESS;
984 /* Listening socket is busy, accept a connection */
985 static int accept_from_sock(void)
987 struct sockaddr_storage peeraddr;
988 int len, idx, result, nodeid;
989 struct connection *newcon;
990 struct socket *newsock;
993 result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK);
994 if (result == -EAGAIN)
999 /* Get the connected socket's peer */
1000 memset(&peeraddr, 0, sizeof(peeraddr));
1001 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
1003 result = -ECONNABORTED;
1007 /* Get the new node's NODEID */
1008 make_sockaddr(&peeraddr, 0, &len);
1009 if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
1010 switch (peeraddr.ss_family) {
1012 struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr;
1014 log_print("connect from non cluster IPv4 node %pI4",
1018 #if IS_ENABLED(CONFIG_IPV6)
1020 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr;
1022 log_print("connect from non cluster IPv6 node %pI6c",
1028 log_print("invalid family from non cluster node");
1032 sock_release(newsock);
1036 log_print("got connection from %d", nodeid);
1038 /* Check to see if we already have a connection to this node. This
1039 * could happen if the two nodes initiate a connection at roughly
1040 * the same time and the connections cross on the wire.
1041 * In this case we store the incoming one in "othercon"
1043 idx = srcu_read_lock(&connections_srcu);
1044 newcon = nodeid2con(nodeid, 0);
1045 if (WARN_ON_ONCE(!newcon)) {
1046 srcu_read_unlock(&connections_srcu, idx);
1051 sock_set_mark(newsock->sk, mark);
1053 down_write(&newcon->sock_lock);
1055 struct connection *othercon = newcon->othercon;
1058 othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
1060 log_print("failed to allocate incoming socket");
1061 up_write(&newcon->sock_lock);
1062 srcu_read_unlock(&connections_srcu, idx);
1067 dlm_con_init(othercon, nodeid);
1068 lockdep_set_subclass(&othercon->sock_lock, 1);
1069 newcon->othercon = othercon;
1070 set_bit(CF_IS_OTHERCON, &othercon->flags);
1072 /* close other sock con if we have something new */
1073 close_connection(othercon, false);
1076 down_write(&othercon->sock_lock);
1077 add_sock(newsock, othercon);
1079 /* check if we receved something while adding */
1080 lock_sock(othercon->sock->sk);
1081 lowcomms_queue_rwork(othercon);
1082 release_sock(othercon->sock->sk);
1083 up_write(&othercon->sock_lock);
1086 /* accept copies the sk after we've saved the callbacks, so we
1087 don't want to save them a second time or comm errors will
1088 result in calling sk_error_report recursively. */
1089 add_sock(newsock, newcon);
1091 /* check if we receved something while adding */
1092 lock_sock(newcon->sock->sk);
1093 lowcomms_queue_rwork(newcon);
1094 release_sock(newcon->sock->sk);
1096 up_write(&newcon->sock_lock);
1097 srcu_read_unlock(&connections_srcu, idx);
1099 return DLM_IO_SUCCESS;
1103 sock_release(newsock);
1109 * writequeue_entry_complete - try to delete and free write queue entry
1110 * @e: write queue entry to try to delete
1111 * @completed: bytes completed
1113 * writequeue_lock must be held.
1115 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
1117 e->offset += completed;
1118 e->len -= completed;
1119 /* signal that page was half way transmitted */
1122 if (e->len == 0 && e->users == 0)
1127 * sctp_bind_addrs - bind a SCTP socket to all our addresses
1129 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1131 struct sockaddr_storage localaddr;
1132 struct sockaddr *addr = (struct sockaddr *)&localaddr;
1133 int i, addr_len, result = 0;
1135 for (i = 0; i < dlm_local_count; i++) {
1136 memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr));
1137 make_sockaddr(&localaddr, port, &addr_len);
1140 result = kernel_bind(sock, addr, addr_len);
1142 result = sock_bind_add(sock->sk, addr, addr_len);
1145 log_print("Can't bind to %d addr number %d, %d.\n",
1146 port, i + 1, result);
1153 /* Get local addresses */
1154 static void init_local(void)
1156 struct sockaddr_storage sas;
1159 dlm_local_count = 0;
1160 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1161 if (dlm_our_addr(&sas, i))
1164 memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas));
1168 static struct writequeue_entry *new_writequeue_entry(struct connection *con)
1170 struct writequeue_entry *entry;
1172 entry = dlm_allocate_writequeue();
1176 entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1178 dlm_free_writequeue(entry);
1185 entry->dirty = false;
1188 kref_init(&entry->ref);
1192 static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
1193 char **ppc, void (*cb)(void *data),
1196 struct writequeue_entry *e;
1198 spin_lock_bh(&con->writequeue_lock);
1199 if (!list_empty(&con->writequeue)) {
1200 e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1201 if (DLM_WQ_REMAIN_BYTES(e) >= len) {
1204 *ppc = page_address(e->page) + e->end;
1214 e = new_writequeue_entry(con);
1219 *ppc = page_address(e->page);
1224 list_add_tail(&e->list, &con->writequeue);
1227 spin_unlock_bh(&con->writequeue_lock);
1231 static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
1232 gfp_t allocation, char **ppc,
1233 void (*cb)(void *data),
1236 struct writequeue_entry *e;
1237 struct dlm_msg *msg;
1239 msg = dlm_allocate_msg(allocation);
1243 kref_init(&msg->ref);
1245 e = new_wq_entry(con, len, ppc, cb, data);
1251 msg->retransmit = false;
1252 msg->orig_msg = NULL;
1260 /* avoid false positive for nodes_srcu, unlock happens in
1261 * dlm_lowcomms_commit_msg which is a must call if success
1264 struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
1265 char **ppc, void (*cb)(void *data),
1268 struct connection *con;
1269 struct dlm_msg *msg;
1272 if (len > DLM_MAX_SOCKET_BUFSIZE ||
1273 len < sizeof(struct dlm_header)) {
1274 BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
1275 log_print("failed to allocate a buffer of size %d", len);
1280 idx = srcu_read_lock(&connections_srcu);
1281 con = nodeid2con(nodeid, 0);
1282 if (WARN_ON_ONCE(!con)) {
1283 srcu_read_unlock(&connections_srcu, idx);
1287 msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data);
1289 srcu_read_unlock(&connections_srcu, idx);
1293 /* for dlm_lowcomms_commit_msg() */
1294 kref_get(&msg->ref);
1295 /* we assume if successful commit must called */
1301 static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1303 struct writequeue_entry *e = msg->entry;
1304 struct connection *con = e->con;
1307 spin_lock_bh(&con->writequeue_lock);
1308 kref_get(&msg->ref);
1309 list_add(&msg->list, &e->msgs);
1315 e->len = DLM_WQ_LENGTH_BYTES(e);
1317 lowcomms_queue_swork(con);
1320 spin_unlock_bh(&con->writequeue_lock);
1324 /* avoid false positive for nodes_srcu, lock was happen in
1325 * dlm_lowcomms_new_msg
1328 void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1330 _dlm_lowcomms_commit_msg(msg);
1331 srcu_read_unlock(&connections_srcu, msg->idx);
1332 /* because dlm_lowcomms_new_msg() */
1333 kref_put(&msg->ref, dlm_msg_release);
1337 void dlm_lowcomms_put_msg(struct dlm_msg *msg)
1339 kref_put(&msg->ref, dlm_msg_release);
1342 /* does not held connections_srcu, usage lowcomms_error_report only */
1343 int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
1345 struct dlm_msg *msg_resend;
1348 if (msg->retransmit)
1351 msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
1352 GFP_ATOMIC, &ppc, NULL, NULL);
1356 msg->retransmit = true;
1357 kref_get(&msg->ref);
1358 msg_resend->orig_msg = msg;
1360 memcpy(ppc, msg->ppc, msg->len);
1361 _dlm_lowcomms_commit_msg(msg_resend);
1362 dlm_lowcomms_put_msg(msg_resend);
1367 /* Send a message */
1368 static int send_to_sock(struct connection *con)
1370 struct writequeue_entry *e;
1371 struct bio_vec bvec;
1372 struct msghdr msg = {
1373 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL,
1375 int len, offset, ret;
1377 spin_lock_bh(&con->writequeue_lock);
1378 e = con_next_wq(con);
1380 clear_bit(CF_SEND_PENDING, &con->flags);
1381 spin_unlock_bh(&con->writequeue_lock);
1387 WARN_ON_ONCE(len == 0 && e->users == 0);
1388 spin_unlock_bh(&con->writequeue_lock);
1390 bvec_set_page(&bvec, e->page, len, offset);
1391 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1392 ret = sock_sendmsg(con->sock, &msg);
1393 trace_dlm_send(con->nodeid, ret);
1394 if (ret == -EAGAIN || ret == 0) {
1395 lock_sock(con->sock->sk);
1396 spin_lock_bh(&con->writequeue_lock);
1397 if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1398 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1399 /* Notify TCP that we're limited by the
1400 * application window size.
1402 set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags);
1403 con->sock->sk->sk_write_pending++;
1405 clear_bit(CF_SEND_PENDING, &con->flags);
1406 spin_unlock_bh(&con->writequeue_lock);
1407 release_sock(con->sock->sk);
1409 /* wait for write_space() event */
1412 spin_unlock_bh(&con->writequeue_lock);
1413 release_sock(con->sock->sk);
1415 return DLM_IO_RESCHED;
1416 } else if (ret < 0) {
1420 spin_lock_bh(&con->writequeue_lock);
1421 writequeue_entry_complete(e, ret);
1422 spin_unlock_bh(&con->writequeue_lock);
1424 return DLM_IO_SUCCESS;
1427 static void clean_one_writequeue(struct connection *con)
1429 struct writequeue_entry *e, *safe;
1431 spin_lock_bh(&con->writequeue_lock);
1432 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1435 spin_unlock_bh(&con->writequeue_lock);
1438 static void connection_release(struct rcu_head *rcu)
1440 struct connection *con = container_of(rcu, struct connection, rcu);
1442 WARN_ON_ONCE(!list_empty(&con->writequeue));
1443 WARN_ON_ONCE(con->sock);
1447 /* Called from recovery when it knows that a node has
1449 int dlm_lowcomms_close(int nodeid)
1451 struct connection *con;
1454 log_print("closing connection to node %d", nodeid);
1456 idx = srcu_read_lock(&connections_srcu);
1457 con = nodeid2con(nodeid, 0);
1458 if (WARN_ON_ONCE(!con)) {
1459 srcu_read_unlock(&connections_srcu, idx);
1463 stop_connection_io(con);
1464 log_print("io handling for node: %d stopped", nodeid);
1465 close_connection(con, true);
1467 spin_lock(&connections_lock);
1468 hlist_del_rcu(&con->list);
1469 spin_unlock(&connections_lock);
1471 clean_one_writequeue(con);
1472 call_srcu(&connections_srcu, &con->rcu, connection_release);
1473 if (con->othercon) {
1474 clean_one_writequeue(con->othercon);
1475 call_srcu(&connections_srcu, &con->othercon->rcu, connection_release);
1477 srcu_read_unlock(&connections_srcu, idx);
1479 /* for debugging we print when we are done to compare with other
1480 * messages in between. This function need to be correctly synchronized
1483 log_print("closing connection to node %d done", nodeid);
1488 /* Receive worker function */
1489 static void process_recv_sockets(struct work_struct *work)
1491 struct connection *con = container_of(work, struct connection, rwork);
1494 down_read(&con->sock_lock);
1496 up_read(&con->sock_lock);
1500 buflen = READ_ONCE(dlm_config.ci_buffer_size);
1502 ret = receive_from_sock(con, buflen);
1503 } while (ret == DLM_IO_SUCCESS);
1504 up_read(&con->sock_lock);
1508 /* CF_RECV_PENDING cleared */
1511 close_connection(con, false);
1512 wake_up(&con->shutdown_wait);
1513 /* CF_RECV_PENDING cleared */
1516 flush_workqueue(process_workqueue);
1518 case DLM_IO_RESCHED:
1520 queue_work(io_workqueue, &con->rwork);
1521 /* CF_RECV_PENDING not cleared */
1525 if (test_bit(CF_IS_OTHERCON, &con->flags)) {
1526 close_connection(con, false);
1528 spin_lock_bh(&con->writequeue_lock);
1529 lowcomms_queue_swork(con);
1530 spin_unlock_bh(&con->writequeue_lock);
1533 /* CF_RECV_PENDING cleared for othercon
1534 * we trigger send queue if not already done
1535 * and process_send_sockets will handle it
1545 static void process_listen_recv_socket(struct work_struct *work)
1549 if (WARN_ON_ONCE(!listen_con.sock))
1553 ret = accept_from_sock();
1554 } while (ret == DLM_IO_SUCCESS);
1557 log_print("critical error accepting connection: %d", ret);
1560 static int dlm_connect(struct connection *con)
1562 struct sockaddr_storage addr;
1563 int result, addr_len;
1564 struct socket *sock;
1567 memset(&addr, 0, sizeof(addr));
1568 result = nodeid_to_addr(con->nodeid, &addr, NULL,
1569 dlm_proto_ops->try_new_addr, &mark);
1571 log_print("no address for nodeid %d", con->nodeid);
1575 /* Create a socket to communicate with */
1576 result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
1577 SOCK_STREAM, dlm_proto_ops->proto, &sock);
1581 sock_set_mark(sock->sk, mark);
1582 dlm_proto_ops->sockopts(sock);
1584 result = dlm_proto_ops->bind(sock);
1590 add_sock(sock, con);
1592 log_print_ratelimited("connecting to %d", con->nodeid);
1593 make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
1594 result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
1604 dlm_close_sock(&con->sock);
1612 /* Send worker function */
1613 static void process_send_sockets(struct work_struct *work)
1615 struct connection *con = container_of(work, struct connection, swork);
1618 WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags));
1620 down_read(&con->sock_lock);
1622 up_read(&con->sock_lock);
1623 down_write(&con->sock_lock);
1625 ret = dlm_connect(con);
1630 /* avoid spamming resched on connection
1631 * we might can switch to a state_change
1632 * event based mechanism if established
1637 /* CF_SEND_PENDING not cleared */
1638 up_write(&con->sock_lock);
1639 log_print("connect to node %d try %d error %d",
1640 con->nodeid, con->retries++, ret);
1642 /* For now we try forever to reconnect. In
1643 * future we should send a event to cluster
1644 * manager to fence itself after certain amount
1647 queue_work(io_workqueue, &con->swork);
1651 downgrade_write(&con->sock_lock);
1655 ret = send_to_sock(con);
1656 } while (ret == DLM_IO_SUCCESS);
1657 up_read(&con->sock_lock);
1661 /* CF_SEND_PENDING cleared */
1663 case DLM_IO_RESCHED:
1664 /* CF_SEND_PENDING not cleared */
1666 queue_work(io_workqueue, &con->swork);
1670 close_connection(con, false);
1672 /* CF_SEND_PENDING cleared */
1673 spin_lock_bh(&con->writequeue_lock);
1674 lowcomms_queue_swork(con);
1675 spin_unlock_bh(&con->writequeue_lock);
1684 static void work_stop(void)
1687 destroy_workqueue(io_workqueue);
1688 io_workqueue = NULL;
1691 if (process_workqueue) {
1692 destroy_workqueue(process_workqueue);
1693 process_workqueue = NULL;
1697 static int work_start(void)
1699 io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM |
1701 if (!io_workqueue) {
1702 log_print("can't start dlm_io");
1706 /* ordered dlm message process queue,
1707 * should be converted to a tasklet
1709 process_workqueue = alloc_ordered_workqueue("dlm_process",
1710 WQ_HIGHPRI | WQ_MEM_RECLAIM);
1711 if (!process_workqueue) {
1712 log_print("can't start dlm_process");
1713 destroy_workqueue(io_workqueue);
1714 io_workqueue = NULL;
1721 void dlm_lowcomms_shutdown(void)
1723 struct connection *con;
1726 /* stop lowcomms_listen_data_ready calls */
1727 lock_sock(listen_con.sock->sk);
1728 listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready;
1729 release_sock(listen_con.sock->sk);
1731 cancel_work_sync(&listen_con.rwork);
1732 dlm_close_sock(&listen_con.sock);
1734 idx = srcu_read_lock(&connections_srcu);
1735 for (i = 0; i < CONN_HASH_SIZE; i++) {
1736 hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
1737 shutdown_connection(con, true);
1738 stop_connection_io(con);
1739 flush_workqueue(process_workqueue);
1740 close_connection(con, true);
1742 clean_one_writequeue(con);
1744 clean_one_writequeue(con->othercon);
1745 allow_connection_io(con);
1748 srcu_read_unlock(&connections_srcu, idx);
1751 void dlm_lowcomms_stop(void)
1754 dlm_proto_ops = NULL;
1757 static int dlm_listen_for_all(void)
1759 struct socket *sock;
1762 log_print("Using %s for communications",
1763 dlm_proto_ops->name);
1765 result = dlm_proto_ops->listen_validate();
1769 result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
1770 SOCK_STREAM, dlm_proto_ops->proto, &sock);
1772 log_print("Can't create comms socket: %d", result);
1776 sock_set_mark(sock->sk, dlm_config.ci_mark);
1777 dlm_proto_ops->listen_sockopts(sock);
1779 result = dlm_proto_ops->listen_bind(sock);
1783 lock_sock(sock->sk);
1784 listen_sock.sk_data_ready = sock->sk->sk_data_ready;
1785 listen_sock.sk_write_space = sock->sk->sk_write_space;
1786 listen_sock.sk_error_report = sock->sk->sk_error_report;
1787 listen_sock.sk_state_change = sock->sk->sk_state_change;
1789 listen_con.sock = sock;
1791 sock->sk->sk_allocation = GFP_NOFS;
1792 sock->sk->sk_use_task_frag = false;
1793 sock->sk->sk_data_ready = lowcomms_listen_data_ready;
1794 release_sock(sock->sk);
1796 result = sock->ops->listen(sock, 128);
1798 dlm_close_sock(&listen_con.sock);
1809 static int dlm_tcp_bind(struct socket *sock)
1811 struct sockaddr_storage src_addr;
1812 int result, addr_len;
1814 /* Bind to our cluster-known address connecting to avoid
1817 memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
1818 make_sockaddr(&src_addr, 0, &addr_len);
1820 result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
1823 /* This *may* not indicate a critical error */
1824 log_print("could not bind for connect: %d", result);
1830 static int dlm_tcp_connect(struct connection *con, struct socket *sock,
1831 struct sockaddr *addr, int addr_len)
1833 return sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
1836 static int dlm_tcp_listen_validate(void)
1838 /* We don't support multi-homed hosts */
1839 if (dlm_local_count > 1) {
1840 log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
1847 static void dlm_tcp_sockopts(struct socket *sock)
1849 /* Turn off Nagle's algorithm */
1850 tcp_sock_set_nodelay(sock->sk);
1853 static void dlm_tcp_listen_sockopts(struct socket *sock)
1855 dlm_tcp_sockopts(sock);
1856 sock_set_reuseaddr(sock->sk);
1859 static int dlm_tcp_listen_bind(struct socket *sock)
1863 /* Bind to our port */
1864 make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
1865 return sock->ops->bind(sock, (struct sockaddr *)&dlm_local_addr[0],
1869 static const struct dlm_proto_ops dlm_tcp_ops = {
1871 .proto = IPPROTO_TCP,
1872 .connect = dlm_tcp_connect,
1873 .sockopts = dlm_tcp_sockopts,
1874 .bind = dlm_tcp_bind,
1875 .listen_validate = dlm_tcp_listen_validate,
1876 .listen_sockopts = dlm_tcp_listen_sockopts,
1877 .listen_bind = dlm_tcp_listen_bind,
1880 static int dlm_sctp_bind(struct socket *sock)
1882 return sctp_bind_addrs(sock, 0);
1885 static int dlm_sctp_connect(struct connection *con, struct socket *sock,
1886 struct sockaddr *addr, int addr_len)
1891 * Make sock->ops->connect() function return in specified time,
1892 * since O_NONBLOCK argument in connect() function does not work here,
1893 * then, we should restore the default value of this attribute.
1895 sock_set_sndtimeo(sock->sk, 5);
1896 ret = sock->ops->connect(sock, addr, addr_len, 0);
1897 sock_set_sndtimeo(sock->sk, 0);
1901 static int dlm_sctp_listen_validate(void)
1903 if (!IS_ENABLED(CONFIG_IP_SCTP)) {
1904 log_print("SCTP is not enabled by this kernel");
1908 request_module("sctp");
1912 static int dlm_sctp_bind_listen(struct socket *sock)
1914 return sctp_bind_addrs(sock, dlm_config.ci_tcp_port);
1917 static void dlm_sctp_sockopts(struct socket *sock)
1919 /* Turn off Nagle's algorithm */
1920 sctp_sock_set_nodelay(sock->sk);
1921 sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1924 static const struct dlm_proto_ops dlm_sctp_ops = {
1926 .proto = IPPROTO_SCTP,
1927 .try_new_addr = true,
1928 .connect = dlm_sctp_connect,
1929 .sockopts = dlm_sctp_sockopts,
1930 .bind = dlm_sctp_bind,
1931 .listen_validate = dlm_sctp_listen_validate,
1932 .listen_sockopts = dlm_sctp_sockopts,
1933 .listen_bind = dlm_sctp_bind_listen,
1936 int dlm_lowcomms_start(void)
1941 if (!dlm_local_count) {
1943 log_print("no local IP address has been set");
1947 error = work_start();
1951 /* Start listening */
1952 switch (dlm_config.ci_protocol) {
1954 dlm_proto_ops = &dlm_tcp_ops;
1956 case DLM_PROTO_SCTP:
1957 dlm_proto_ops = &dlm_sctp_ops;
1960 log_print("Invalid protocol identifier %d set",
1961 dlm_config.ci_protocol);
1963 goto fail_proto_ops;
1966 error = dlm_listen_for_all();
1973 dlm_proto_ops = NULL;
1980 void dlm_lowcomms_init(void)
1984 for (i = 0; i < CONN_HASH_SIZE; i++)
1985 INIT_HLIST_HEAD(&connection_hash[i]);
1987 INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1990 void dlm_lowcomms_exit(void)
1992 struct connection *con;
1995 idx = srcu_read_lock(&connections_srcu);
1996 for (i = 0; i < CONN_HASH_SIZE; i++) {
1997 hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
1998 spin_lock(&connections_lock);
1999 hlist_del_rcu(&con->list);
2000 spin_unlock(&connections_lock);
2003 call_srcu(&connections_srcu, &con->othercon->rcu,
2004 connection_release);
2005 call_srcu(&connections_srcu, &con->rcu, connection_release);
2008 srcu_read_unlock(&connections_srcu, idx);