2 * linux/net/sunrpc/xprtsock.c
4 * Client-side transport implementation for sockets.
6 * TCP callback races fixes (C) 1998 Red Hat
7 * TCP send fixes (C) 1998 Red Hat
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
18 * <gilles.quillard@bull.net>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/capability.h>
26 #include <linux/pagemap.h>
27 #include <linux/errno.h>
28 #include <linux/socket.h>
30 #include <linux/net.h>
33 #include <linux/udp.h>
34 #include <linux/tcp.h>
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/sched.h>
38 #include <linux/sunrpc/svcsock.h>
39 #include <linux/sunrpc/xprtsock.h>
40 #include <linux/file.h>
41 #ifdef CONFIG_SUNRPC_BACKCHANNEL
42 #include <linux/sunrpc/bc_xprt.h>
46 #include <net/checksum.h>
50 #include <trace/events/sunrpc.h>
54 static void xs_close(struct rpc_xprt *xprt);
59 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
60 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
61 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
63 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
64 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
66 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
68 #define XS_TCP_LINGER_TO (15U * HZ)
69 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
72 * We can register our own files under /proc/sys/sunrpc by
73 * calling register_sysctl_table() again. The files in that
74 * directory become the union of all files registered there.
76 * We simply need to make sure that we don't collide with
77 * someone else's file names!
80 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
81 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
82 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
83 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
84 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
86 static struct ctl_table_header *sunrpc_table_header;
89 * FIXME: changing the UDP slot table size should also resize the UDP
90 * socket buffers for existing UDP transports
92 static struct ctl_table xs_tunables_table[] = {
94 .procname = "udp_slot_table_entries",
95 .data = &xprt_udp_slot_table_entries,
96 .maxlen = sizeof(unsigned int),
98 .proc_handler = proc_dointvec_minmax,
99 .extra1 = &min_slot_table_size,
100 .extra2 = &max_slot_table_size
103 .procname = "tcp_slot_table_entries",
104 .data = &xprt_tcp_slot_table_entries,
105 .maxlen = sizeof(unsigned int),
107 .proc_handler = proc_dointvec_minmax,
108 .extra1 = &min_slot_table_size,
109 .extra2 = &max_slot_table_size
112 .procname = "tcp_max_slot_table_entries",
113 .data = &xprt_max_tcp_slot_table_entries,
114 .maxlen = sizeof(unsigned int),
116 .proc_handler = proc_dointvec_minmax,
117 .extra1 = &min_slot_table_size,
118 .extra2 = &max_tcp_slot_table_limit
121 .procname = "min_resvport",
122 .data = &xprt_min_resvport,
123 .maxlen = sizeof(unsigned int),
125 .proc_handler = proc_dointvec_minmax,
126 .extra1 = &xprt_min_resvport_limit,
127 .extra2 = &xprt_max_resvport_limit
130 .procname = "max_resvport",
131 .data = &xprt_max_resvport,
132 .maxlen = sizeof(unsigned int),
134 .proc_handler = proc_dointvec_minmax,
135 .extra1 = &xprt_min_resvport_limit,
136 .extra2 = &xprt_max_resvport_limit
139 .procname = "tcp_fin_timeout",
140 .data = &xs_tcp_fin_timeout,
141 .maxlen = sizeof(xs_tcp_fin_timeout),
143 .proc_handler = proc_dointvec_jiffies,
148 static struct ctl_table sunrpc_table[] = {
150 .procname = "sunrpc",
152 .child = xs_tunables_table
160 * Wait duration for a reply from the RPC portmapper.
162 #define XS_BIND_TO (60U * HZ)
165 * Delay if a UDP socket connect error occurs. This is most likely some
166 * kind of resource problem on the local host.
168 #define XS_UDP_REEST_TO (2U * HZ)
171 * The reestablish timeout allows clients to delay for a bit before attempting
172 * to reconnect to a server that just dropped our connection.
174 * We implement an exponential backoff when trying to reestablish a TCP
175 * transport connection with the server. Some servers like to drop a TCP
176 * connection when they are overworked, so we start with a short timeout and
177 * increase over time if the server is down or not responding.
179 #define XS_TCP_INIT_REEST_TO (3U * HZ)
180 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
183 * TCP idle timeout; client drops the transport socket if it is idle
184 * for this long. Note that we also timeout UDP sockets to prevent
185 * holding port numbers when there is no RPC traffic.
187 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
189 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
190 # undef RPC_DEBUG_DATA
191 # define RPCDBG_FACILITY RPCDBG_TRANS
194 #ifdef RPC_DEBUG_DATA
195 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
197 u8 *buf = (u8 *) packet;
200 dprintk("RPC: %s\n", msg);
201 for (j = 0; j < count && j < 128; j += 4) {
205 dprintk("0x%04x ", j);
207 dprintk("%02x%02x%02x%02x ",
208 buf[j], buf[j+1], buf[j+2], buf[j+3]);
213 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
219 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
221 return (struct rpc_xprt *) sk->sk_user_data;
224 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
226 return (struct sockaddr *) &xprt->addr;
229 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
231 return (struct sockaddr_un *) &xprt->addr;
234 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
236 return (struct sockaddr_in *) &xprt->addr;
239 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
241 return (struct sockaddr_in6 *) &xprt->addr;
244 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
246 struct sockaddr *sap = xs_addr(xprt);
247 struct sockaddr_in6 *sin6;
248 struct sockaddr_in *sin;
249 struct sockaddr_un *sun;
252 switch (sap->sa_family) {
254 sun = xs_addr_un(xprt);
255 strlcpy(buf, sun->sun_path, sizeof(buf));
256 xprt->address_strings[RPC_DISPLAY_ADDR] =
257 kstrdup(buf, GFP_KERNEL);
260 (void)rpc_ntop(sap, buf, sizeof(buf));
261 xprt->address_strings[RPC_DISPLAY_ADDR] =
262 kstrdup(buf, GFP_KERNEL);
263 sin = xs_addr_in(xprt);
264 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
267 (void)rpc_ntop(sap, buf, sizeof(buf));
268 xprt->address_strings[RPC_DISPLAY_ADDR] =
269 kstrdup(buf, GFP_KERNEL);
270 sin6 = xs_addr_in6(xprt);
271 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
277 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
280 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
282 struct sockaddr *sap = xs_addr(xprt);
285 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
286 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
288 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
289 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
292 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
293 const char *protocol,
296 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
297 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
298 xs_format_common_peer_addresses(xprt);
299 xs_format_common_peer_ports(xprt);
302 static void xs_update_peer_port(struct rpc_xprt *xprt)
304 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
305 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
307 xs_format_common_peer_ports(xprt);
310 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
314 for (i = 0; i < RPC_DISPLAY_MAX; i++)
316 case RPC_DISPLAY_PROTO:
317 case RPC_DISPLAY_NETID:
320 kfree(xprt->address_strings[i]);
324 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
326 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
328 struct msghdr msg = {
330 .msg_namelen = addrlen,
331 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
334 .iov_base = vec->iov_base + base,
335 .iov_len = vec->iov_len - base,
338 if (iov.iov_len != 0)
339 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
340 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
343 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
345 ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
346 int offset, size_t size, int flags);
348 unsigned int remainder;
351 remainder = xdr->page_len - base;
352 base += xdr->page_base;
353 ppage = xdr->pages + (base >> PAGE_SHIFT);
355 do_sendpage = sock->ops->sendpage;
357 do_sendpage = sock_no_sendpage;
359 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
360 int flags = XS_SENDMSG_FLAGS;
363 if (remainder != 0 || more)
365 err = do_sendpage(sock, *ppage, base, len, flags);
366 if (remainder == 0 || err != len)
380 * xs_sendpages - write pages directly to a socket
381 * @sock: socket to send on
382 * @addr: UDP only -- address of destination
383 * @addrlen: UDP only -- length of destination address
384 * @xdr: buffer containing this request
385 * @base: starting position in the buffer
386 * @zerocopy: true if it is safe to use sendpage()
387 * @sent_p: return the total number of bytes successfully queued for sending
390 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
392 unsigned int remainder = xdr->len - base;
399 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
405 if (base < xdr->head[0].iov_len || addr != NULL) {
406 unsigned int len = xdr->head[0].iov_len - base;
408 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
409 if (remainder == 0 || err != len)
414 base -= xdr->head[0].iov_len;
416 if (base < xdr->page_len) {
417 unsigned int len = xdr->page_len - base;
419 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
421 if (remainder == 0 || sent != len)
425 base -= xdr->page_len;
427 if (base >= xdr->tail[0].iov_len)
429 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
438 static void xs_nospace_callback(struct rpc_task *task)
440 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
442 transport->inet->sk_write_pending--;
443 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
447 * xs_nospace - place task on wait queue if transmit was incomplete
448 * @task: task to put to sleep
451 static int xs_nospace(struct rpc_task *task)
453 struct rpc_rqst *req = task->tk_rqstp;
454 struct rpc_xprt *xprt = req->rq_xprt;
455 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
456 struct sock *sk = transport->inet;
459 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
460 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
463 /* Protect against races with write_space */
464 spin_lock_bh(&xprt->transport_lock);
466 /* Don't race with disconnect */
467 if (xprt_connected(xprt)) {
468 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
470 * Notify TCP that we're limited by the application
473 set_bit(SOCK_NOSPACE, &transport->sock->flags);
474 sk->sk_write_pending++;
475 /* ...and wait for more buffer space */
476 xprt_wait_for_buffer_space(task, xs_nospace_callback);
479 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
483 spin_unlock_bh(&xprt->transport_lock);
485 /* Race breaker in case memory is freed before above code is called */
486 sk->sk_write_space(sk);
491 * Construct a stream transport record marker in @buf.
493 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
495 u32 reclen = buf->len - sizeof(rpc_fraghdr);
496 rpc_fraghdr *base = buf->head[0].iov_base;
497 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
501 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
502 * @task: RPC task that manages the state of an RPC request
505 * 0: The request has been sent
506 * EAGAIN: The socket was blocked, please call again later to
507 * complete the request
508 * ENOTCONN: Caller needs to invoke connect logic then call again
509 * other: Some other error occured, the request was not sent
511 static int xs_local_send_request(struct rpc_task *task)
513 struct rpc_rqst *req = task->tk_rqstp;
514 struct rpc_xprt *xprt = req->rq_xprt;
515 struct sock_xprt *transport =
516 container_of(xprt, struct sock_xprt, xprt);
517 struct xdr_buf *xdr = &req->rq_snd_buf;
521 xs_encode_stream_record_marker(&req->rq_snd_buf);
523 xs_pktdump("packet data:",
524 req->rq_svec->iov_base, req->rq_svec->iov_len);
526 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status);
531 if (status == -EAGAIN && sock_writeable(transport->inet))
534 if (likely(sent > 0) || status == 0) {
535 req->rq_bytes_sent += sent;
536 req->rq_xmit_bytes_sent += sent;
537 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
538 req->rq_bytes_sent = 0;
548 status = xs_nospace(task);
551 dprintk("RPC: sendmsg returned unrecognized error %d\n",
562 * xs_udp_send_request - write an RPC request to a UDP socket
563 * @task: address of RPC task that manages the state of an RPC request
566 * 0: The request has been sent
567 * EAGAIN: The socket was blocked, please call again later to
568 * complete the request
569 * ENOTCONN: Caller needs to invoke connect logic then call again
570 * other: Some other error occurred, the request was not sent
572 static int xs_udp_send_request(struct rpc_task *task)
574 struct rpc_rqst *req = task->tk_rqstp;
575 struct rpc_xprt *xprt = req->rq_xprt;
576 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
577 struct xdr_buf *xdr = &req->rq_snd_buf;
581 xs_pktdump("packet data:",
582 req->rq_svec->iov_base,
583 req->rq_svec->iov_len);
585 if (!xprt_bound(xprt))
587 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
588 xdr, req->rq_bytes_sent, true, &sent);
590 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
591 xdr->len - req->rq_bytes_sent, status);
593 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
594 if (status == -EPERM)
597 if (status == -EAGAIN && sock_writeable(transport->inet))
600 if (sent > 0 || status == 0) {
601 req->rq_xmit_bytes_sent += sent;
602 if (sent >= req->rq_slen)
604 /* Still some bytes left; set up for a retry later. */
612 /* Should we call xs_close() here? */
615 status = xs_nospace(task);
618 dprintk("RPC: sendmsg returned unrecognized error %d\n",
625 /* When the server has died, an ICMP port unreachable message
626 * prompts ECONNREFUSED. */
627 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
634 * xs_tcp_send_request - write an RPC request to a TCP socket
635 * @task: address of RPC task that manages the state of an RPC request
638 * 0: The request has been sent
639 * EAGAIN: The socket was blocked, please call again later to
640 * complete the request
641 * ENOTCONN: Caller needs to invoke connect logic then call again
642 * other: Some other error occurred, the request was not sent
644 * XXX: In the case of soft timeouts, should we eventually give up
645 * if sendmsg is not able to make progress?
647 static int xs_tcp_send_request(struct rpc_task *task)
649 struct rpc_rqst *req = task->tk_rqstp;
650 struct rpc_xprt *xprt = req->rq_xprt;
651 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
652 struct xdr_buf *xdr = &req->rq_snd_buf;
653 bool zerocopy = true;
657 xs_encode_stream_record_marker(&req->rq_snd_buf);
659 xs_pktdump("packet data:",
660 req->rq_svec->iov_base,
661 req->rq_svec->iov_len);
662 /* Don't use zero copy if this is a resend. If the RPC call
663 * completes while the socket holds a reference to the pages,
664 * then we may end up resending corrupted data.
666 if (task->tk_flags & RPC_TASK_SENT)
669 /* Continue transmitting the packet/record. We must be careful
670 * to cope with writespace callbacks arriving _after_ we have
671 * called sendmsg(). */
674 status = xs_sendpages(transport->sock, NULL, 0, xdr,
675 req->rq_bytes_sent, zerocopy, &sent);
677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
678 xdr->len - req->rq_bytes_sent, status);
680 /* If we've sent the entire packet, immediately
681 * reset the count of bytes sent. */
682 req->rq_bytes_sent += sent;
683 req->rq_xmit_bytes_sent += sent;
684 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
685 req->rq_bytes_sent = 0;
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
702 /* Should we call xs_close() here? */
705 status = xs_nospace(task);
708 dprintk("RPC: sendmsg returned unrecognized error %d\n",
716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
723 * xs_tcp_release_xprt - clean up after a tcp transmission
727 * This cleans up if an error causes us to abort the transmission of a request.
728 * In this case, the socket may need to be reset in order to avoid confusing
731 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
733 struct rpc_rqst *req;
735 if (task != xprt->snd_task)
739 req = task->tk_rqstp;
742 if (req->rq_bytes_sent == 0)
744 if (req->rq_bytes_sent == req->rq_snd_buf.len)
746 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
748 xprt_release_xprt(xprt, task);
751 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
753 transport->old_data_ready = sk->sk_data_ready;
754 transport->old_state_change = sk->sk_state_change;
755 transport->old_write_space = sk->sk_write_space;
756 transport->old_error_report = sk->sk_error_report;
759 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
761 sk->sk_data_ready = transport->old_data_ready;
762 sk->sk_state_change = transport->old_state_change;
763 sk->sk_write_space = transport->old_write_space;
764 sk->sk_error_report = transport->old_error_report;
767 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
769 smp_mb__before_atomic();
770 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
771 clear_bit(XPRT_CLOSING, &xprt->state);
772 smp_mb__after_atomic();
775 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
777 xs_sock_reset_connection_flags(xprt);
778 /* Mark transport as closed and wake up all pending tasks */
779 xprt_disconnect_done(xprt);
783 * xs_error_report - callback to handle TCP socket state errors
786 * Note: we don't call sock_error() since there may be a rpc_task
787 * using the socket, and so we don't want to clear sk->sk_err.
789 static void xs_error_report(struct sock *sk)
791 struct rpc_xprt *xprt;
794 read_lock_bh(&sk->sk_callback_lock);
795 if (!(xprt = xprt_from_sock(sk)))
801 /* Is this a reset event? */
802 if (sk->sk_state == TCP_CLOSE)
803 xs_sock_mark_closed(xprt);
804 dprintk("RPC: xs_error_report client %p, error=%d...\n",
806 trace_rpc_socket_error(xprt, sk->sk_socket, err);
807 xprt_wake_pending_tasks(xprt, err);
809 read_unlock_bh(&sk->sk_callback_lock);
812 static void xs_reset_transport(struct sock_xprt *transport)
814 struct socket *sock = transport->sock;
815 struct sock *sk = transport->inet;
816 struct rpc_xprt *xprt = &transport->xprt;
821 if (atomic_read(&transport->xprt.swapper))
822 sk_clear_memalloc(sk);
824 kernel_sock_shutdown(sock, SHUT_RDWR);
826 write_lock_bh(&sk->sk_callback_lock);
827 transport->inet = NULL;
828 transport->sock = NULL;
830 sk->sk_user_data = NULL;
832 xs_restore_old_callbacks(transport, sk);
833 xprt_clear_connected(xprt);
834 write_unlock_bh(&sk->sk_callback_lock);
835 xs_sock_reset_connection_flags(xprt);
837 trace_rpc_socket_close(xprt, sock);
842 * xs_close - close a socket
845 * This is used when all requests are complete; ie, no DRC state remains
846 * on the server we want to save.
848 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
849 * xs_reset_transport() zeroing the socket from underneath a writer.
851 static void xs_close(struct rpc_xprt *xprt)
853 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
855 dprintk("RPC: xs_close xprt %p\n", xprt);
857 xs_reset_transport(transport);
858 xprt->reestablish_timeout = 0;
860 xprt_disconnect_done(xprt);
863 static void xs_inject_disconnect(struct rpc_xprt *xprt)
865 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
867 xprt_disconnect_done(xprt);
870 static void xs_xprt_free(struct rpc_xprt *xprt)
872 xs_free_peer_addresses(xprt);
877 * xs_destroy - prepare to shutdown a transport
878 * @xprt: doomed transport
881 static void xs_destroy(struct rpc_xprt *xprt)
883 struct sock_xprt *transport = container_of(xprt,
884 struct sock_xprt, xprt);
885 dprintk("RPC: xs_destroy xprt %p\n", xprt);
887 cancel_delayed_work_sync(&transport->connect_worker);
890 module_put(THIS_MODULE);
893 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
895 struct xdr_skb_reader desc = {
897 .offset = sizeof(rpc_fraghdr),
898 .count = skb->len - sizeof(rpc_fraghdr),
901 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
909 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
910 * @sk: socket with data to read
912 * Currently this assumes we can read the whole reply in a single gulp.
914 static void xs_local_data_ready(struct sock *sk)
916 struct rpc_task *task;
917 struct rpc_xprt *xprt;
918 struct rpc_rqst *rovr;
920 int err, repsize, copied;
924 read_lock_bh(&sk->sk_callback_lock);
925 dprintk("RPC: %s...\n", __func__);
926 xprt = xprt_from_sock(sk);
930 skb = skb_recv_datagram(sk, 0, 1, &err);
934 repsize = skb->len - sizeof(rpc_fraghdr);
936 dprintk("RPC: impossible RPC reply size %d\n", repsize);
940 /* Copy the XID from the skb... */
941 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
945 /* Look up and lock the request corresponding to the given XID */
946 spin_lock(&xprt->transport_lock);
947 rovr = xprt_lookup_rqst(xprt, *xp);
950 task = rovr->rq_task;
952 copied = rovr->rq_private_buf.buflen;
953 if (copied > repsize)
956 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
957 dprintk("RPC: sk_buff copy failed\n");
961 xprt_complete_rqst(task, copied);
964 spin_unlock(&xprt->transport_lock);
966 skb_free_datagram(sk, skb);
968 read_unlock_bh(&sk->sk_callback_lock);
972 * xs_udp_data_ready - "data ready" callback for UDP sockets
973 * @sk: socket with data to read
976 static void xs_udp_data_ready(struct sock *sk)
978 struct rpc_task *task;
979 struct rpc_xprt *xprt;
980 struct rpc_rqst *rovr;
982 int err, repsize, copied;
986 read_lock_bh(&sk->sk_callback_lock);
987 dprintk("RPC: xs_udp_data_ready...\n");
988 if (!(xprt = xprt_from_sock(sk)))
991 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
994 repsize = skb->len - sizeof(struct udphdr);
996 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1000 /* Copy the XID from the skb... */
1001 xp = skb_header_pointer(skb, sizeof(struct udphdr),
1002 sizeof(_xid), &_xid);
1006 /* Look up and lock the request corresponding to the given XID */
1007 spin_lock(&xprt->transport_lock);
1008 rovr = xprt_lookup_rqst(xprt, *xp);
1011 task = rovr->rq_task;
1013 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1016 /* Suck it into the iovec, verify checksum if not done by hw. */
1017 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1018 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
1022 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1024 xprt_adjust_cwnd(xprt, task, copied);
1025 xprt_complete_rqst(task, copied);
1028 spin_unlock(&xprt->transport_lock);
1030 skb_free_datagram(sk, skb);
1032 read_unlock_bh(&sk->sk_callback_lock);
1036 * Helper function to force a TCP close if the server is sending
1037 * junk and/or it has put us in CLOSE_WAIT
1039 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1041 xprt_force_disconnect(xprt);
1044 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1046 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1050 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1051 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1052 used = xdr_skb_read_bits(desc, p, len);
1053 transport->tcp_offset += used;
1057 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1058 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1059 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1061 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1062 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1064 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1065 transport->tcp_offset = 0;
1067 /* Sanity check of the record length */
1068 if (unlikely(transport->tcp_reclen < 8)) {
1069 dprintk("RPC: invalid TCP record fragment length\n");
1070 xs_tcp_force_close(xprt);
1073 dprintk("RPC: reading TCP record fragment of length %d\n",
1074 transport->tcp_reclen);
1077 static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1079 if (transport->tcp_offset == transport->tcp_reclen) {
1080 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1081 transport->tcp_offset = 0;
1082 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1083 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1084 transport->tcp_flags |= TCP_RCV_COPY_XID;
1085 transport->tcp_copied = 0;
1090 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1095 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1096 dprintk("RPC: reading XID (%Zu bytes)\n", len);
1097 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1098 used = xdr_skb_read_bits(desc, p, len);
1099 transport->tcp_offset += used;
1102 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1103 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1104 transport->tcp_copied = 4;
1105 dprintk("RPC: reading %s XID %08x\n",
1106 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1108 ntohl(transport->tcp_xid));
1109 xs_tcp_check_fraghdr(transport);
1112 static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1113 struct xdr_skb_reader *desc)
1120 * We want transport->tcp_offset to be 8 at the end of this routine
1121 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1122 * When this function is called for the first time,
1123 * transport->tcp_offset is 4 (after having already read the xid).
1125 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1126 len = sizeof(transport->tcp_calldir) - offset;
1127 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1128 p = ((char *) &transport->tcp_calldir) + offset;
1129 used = xdr_skb_read_bits(desc, p, len);
1130 transport->tcp_offset += used;
1133 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1135 * We don't yet have the XDR buffer, so we will write the calldir
1136 * out after we get the buffer from the 'struct rpc_rqst'
1138 switch (ntohl(transport->tcp_calldir)) {
1140 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1141 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1142 transport->tcp_flags |= TCP_RPC_REPLY;
1145 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1146 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1147 transport->tcp_flags &= ~TCP_RPC_REPLY;
1150 dprintk("RPC: invalid request message type\n");
1151 xs_tcp_force_close(&transport->xprt);
1153 xs_tcp_check_fraghdr(transport);
1156 static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1157 struct xdr_skb_reader *desc,
1158 struct rpc_rqst *req)
1160 struct sock_xprt *transport =
1161 container_of(xprt, struct sock_xprt, xprt);
1162 struct xdr_buf *rcvbuf;
1166 rcvbuf = &req->rq_private_buf;
1168 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1170 * Save the RPC direction in the XDR buffer
1172 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1173 &transport->tcp_calldir,
1174 sizeof(transport->tcp_calldir));
1175 transport->tcp_copied += sizeof(transport->tcp_calldir);
1176 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1180 if (len > transport->tcp_reclen - transport->tcp_offset) {
1181 struct xdr_skb_reader my_desc;
1183 len = transport->tcp_reclen - transport->tcp_offset;
1184 memcpy(&my_desc, desc, sizeof(my_desc));
1185 my_desc.count = len;
1186 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1187 &my_desc, xdr_skb_read_bits);
1191 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1192 desc, xdr_skb_read_bits);
1195 transport->tcp_copied += r;
1196 transport->tcp_offset += r;
1199 /* Error when copying to the receive buffer,
1200 * usually because we weren't able to allocate
1201 * additional buffer pages. All we can do now
1202 * is turn off TCP_RCV_COPY_DATA, so the request
1203 * will not receive any additional updates,
1205 * Any remaining data from this record will
1208 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1209 dprintk("RPC: XID %08x truncated request\n",
1210 ntohl(transport->tcp_xid));
1211 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1212 "tcp_offset = %u, tcp_reclen = %u\n",
1213 xprt, transport->tcp_copied,
1214 transport->tcp_offset, transport->tcp_reclen);
1218 dprintk("RPC: XID %08x read %Zd bytes\n",
1219 ntohl(transport->tcp_xid), r);
1220 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1221 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1222 transport->tcp_offset, transport->tcp_reclen);
1224 if (transport->tcp_copied == req->rq_private_buf.buflen)
1225 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1226 else if (transport->tcp_offset == transport->tcp_reclen) {
1227 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1228 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1233 * Finds the request corresponding to the RPC xid and invokes the common
1234 * tcp read code to read the data.
1236 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1237 struct xdr_skb_reader *desc)
1239 struct sock_xprt *transport =
1240 container_of(xprt, struct sock_xprt, xprt);
1241 struct rpc_rqst *req;
1243 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1245 /* Find and lock the request corresponding to this xid */
1246 spin_lock(&xprt->transport_lock);
1247 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1249 dprintk("RPC: XID %08x request not found!\n",
1250 ntohl(transport->tcp_xid));
1251 spin_unlock(&xprt->transport_lock);
1255 xs_tcp_read_common(xprt, desc, req);
1257 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1258 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1260 spin_unlock(&xprt->transport_lock);
1264 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1266 * Obtains an rpc_rqst previously allocated and invokes the common
1267 * tcp read code to read the data. The result is placed in the callback
1269 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1270 * connection and return -1.
1272 static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1273 struct xdr_skb_reader *desc)
1275 struct sock_xprt *transport =
1276 container_of(xprt, struct sock_xprt, xprt);
1277 struct rpc_rqst *req;
1279 /* Look up and lock the request corresponding to the given XID */
1280 spin_lock(&xprt->transport_lock);
1281 req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1283 spin_unlock(&xprt->transport_lock);
1284 printk(KERN_WARNING "Callback slot table overflowed\n");
1285 xprt_force_disconnect(xprt);
1289 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1290 xs_tcp_read_common(xprt, desc, req);
1292 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1293 xprt_complete_bc_request(req, transport->tcp_copied);
1294 spin_unlock(&xprt->transport_lock);
1299 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1300 struct xdr_skb_reader *desc)
1302 struct sock_xprt *transport =
1303 container_of(xprt, struct sock_xprt, xprt);
1305 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1306 xs_tcp_read_reply(xprt, desc) :
1307 xs_tcp_read_callback(xprt, desc);
1310 static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
1314 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
1315 SVC_SOCK_ANONYMOUS);
1321 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1322 struct xdr_skb_reader *desc)
1324 return xs_tcp_read_reply(xprt, desc);
1326 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1329 * Read data off the transport. This can be either an RPC_CALL or an
1330 * RPC_REPLY. Relay the processing to helper functions.
1332 static void xs_tcp_read_data(struct rpc_xprt *xprt,
1333 struct xdr_skb_reader *desc)
1335 struct sock_xprt *transport =
1336 container_of(xprt, struct sock_xprt, xprt);
1338 if (_xs_tcp_read_data(xprt, desc) == 0)
1339 xs_tcp_check_fraghdr(transport);
1342 * The transport_lock protects the request handling.
1343 * There's no need to hold it to update the tcp_flags.
1345 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1349 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1353 len = transport->tcp_reclen - transport->tcp_offset;
1354 if (len > desc->count)
1357 desc->offset += len;
1358 transport->tcp_offset += len;
1359 dprintk("RPC: discarded %Zu bytes\n", len);
1360 xs_tcp_check_fraghdr(transport);
1363 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1365 struct rpc_xprt *xprt = rd_desc->arg.data;
1366 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1367 struct xdr_skb_reader desc = {
1373 dprintk("RPC: xs_tcp_data_recv started\n");
1375 trace_xs_tcp_data_recv(transport);
1376 /* Read in a new fragment marker if necessary */
1377 /* Can we ever really expect to get completely empty fragments? */
1378 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1379 xs_tcp_read_fraghdr(xprt, &desc);
1382 /* Read in the xid if necessary */
1383 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1384 xs_tcp_read_xid(transport, &desc);
1387 /* Read in the call/reply flag */
1388 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1389 xs_tcp_read_calldir(transport, &desc);
1392 /* Read in the request data */
1393 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1394 xs_tcp_read_data(xprt, &desc);
1397 /* Skip over any trailing bytes on short reads */
1398 xs_tcp_read_discard(transport, &desc);
1399 } while (desc.count);
1400 trace_xs_tcp_data_recv(transport);
1401 dprintk("RPC: xs_tcp_data_recv done\n");
1402 return len - desc.count;
1406 * xs_tcp_data_ready - "data ready" callback for TCP sockets
1407 * @sk: socket with data to read
1410 static void xs_tcp_data_ready(struct sock *sk)
1412 struct rpc_xprt *xprt;
1413 read_descriptor_t rd_desc;
1415 unsigned long total = 0;
1417 dprintk("RPC: xs_tcp_data_ready...\n");
1419 read_lock_bh(&sk->sk_callback_lock);
1420 if (!(xprt = xprt_from_sock(sk))) {
1424 /* Any data means we had a useful conversation, so
1425 * the we don't need to delay the next reconnect
1427 if (xprt->reestablish_timeout)
1428 xprt->reestablish_timeout = 0;
1430 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1431 rd_desc.arg.data = xprt;
1433 rd_desc.count = 65536;
1434 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1439 trace_xs_tcp_data_ready(xprt, read, total);
1440 read_unlock_bh(&sk->sk_callback_lock);
1444 * xs_tcp_state_change - callback to handle TCP socket state changes
1445 * @sk: socket whose state has changed
1448 static void xs_tcp_state_change(struct sock *sk)
1450 struct rpc_xprt *xprt;
1451 struct sock_xprt *transport;
1453 read_lock_bh(&sk->sk_callback_lock);
1454 if (!(xprt = xprt_from_sock(sk)))
1456 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1457 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1458 sk->sk_state, xprt_connected(xprt),
1459 sock_flag(sk, SOCK_DEAD),
1460 sock_flag(sk, SOCK_ZAPPED),
1463 transport = container_of(xprt, struct sock_xprt, xprt);
1464 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1465 switch (sk->sk_state) {
1466 case TCP_ESTABLISHED:
1467 spin_lock(&xprt->transport_lock);
1468 if (!xprt_test_and_set_connected(xprt)) {
1470 /* Reset TCP record info */
1471 transport->tcp_offset = 0;
1472 transport->tcp_reclen = 0;
1473 transport->tcp_copied = 0;
1474 transport->tcp_flags =
1475 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1476 xprt->connect_cookie++;
1477 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1478 xprt_clear_connecting(xprt);
1480 xprt_wake_pending_tasks(xprt, -EAGAIN);
1482 spin_unlock(&xprt->transport_lock);
1485 /* The client initiated a shutdown of the socket */
1486 xprt->connect_cookie++;
1487 xprt->reestablish_timeout = 0;
1488 set_bit(XPRT_CLOSING, &xprt->state);
1489 smp_mb__before_atomic();
1490 clear_bit(XPRT_CONNECTED, &xprt->state);
1491 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1492 smp_mb__after_atomic();
1494 case TCP_CLOSE_WAIT:
1495 /* The server initiated a shutdown of the socket */
1496 xprt->connect_cookie++;
1497 clear_bit(XPRT_CONNECTED, &xprt->state);
1498 xs_tcp_force_close(xprt);
1501 * If the server closed down the connection, make sure that
1502 * we back off before reconnecting
1504 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1505 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1508 set_bit(XPRT_CLOSING, &xprt->state);
1509 smp_mb__before_atomic();
1510 clear_bit(XPRT_CONNECTED, &xprt->state);
1511 smp_mb__after_atomic();
1514 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1515 &transport->sock_state))
1516 xprt_clear_connecting(xprt);
1517 xs_sock_mark_closed(xprt);
1520 read_unlock_bh(&sk->sk_callback_lock);
1523 static void xs_write_space(struct sock *sk)
1525 struct socket *sock;
1526 struct rpc_xprt *xprt;
1528 if (unlikely(!(sock = sk->sk_socket)))
1530 clear_bit(SOCK_NOSPACE, &sock->flags);
1532 if (unlikely(!(xprt = xprt_from_sock(sk))))
1534 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1537 xprt_write_space(xprt);
1541 * xs_udp_write_space - callback invoked when socket buffer space
1543 * @sk: socket whose state has changed
1545 * Called when more output buffer space is available for this socket.
1546 * We try not to wake our writers until they can make "significant"
1547 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1548 * with a bunch of small requests.
1550 static void xs_udp_write_space(struct sock *sk)
1552 read_lock_bh(&sk->sk_callback_lock);
1554 /* from net/core/sock.c:sock_def_write_space */
1555 if (sock_writeable(sk))
1558 read_unlock_bh(&sk->sk_callback_lock);
1562 * xs_tcp_write_space - callback invoked when socket buffer space
1564 * @sk: socket whose state has changed
1566 * Called when more output buffer space is available for this socket.
1567 * We try not to wake our writers until they can make "significant"
1568 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1569 * with a bunch of small requests.
1571 static void xs_tcp_write_space(struct sock *sk)
1573 read_lock_bh(&sk->sk_callback_lock);
1575 /* from net/core/stream.c:sk_stream_write_space */
1576 if (sk_stream_is_writeable(sk))
1579 read_unlock_bh(&sk->sk_callback_lock);
1582 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1584 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1585 struct sock *sk = transport->inet;
1587 if (transport->rcvsize) {
1588 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1589 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1591 if (transport->sndsize) {
1592 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1593 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1594 sk->sk_write_space(sk);
1599 * xs_udp_set_buffer_size - set send and receive limits
1600 * @xprt: generic transport
1601 * @sndsize: requested size of send buffer, in bytes
1602 * @rcvsize: requested size of receive buffer, in bytes
1604 * Set socket send and receive buffer size limits.
1606 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1608 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1610 transport->sndsize = 0;
1612 transport->sndsize = sndsize + 1024;
1613 transport->rcvsize = 0;
1615 transport->rcvsize = rcvsize + 1024;
1617 xs_udp_do_set_buffer_size(xprt);
1621 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1622 * @task: task that timed out
1624 * Adjust the congestion window after a retransmit timeout has occurred.
1626 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1628 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1631 static unsigned short xs_get_random_port(void)
1633 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1634 unsigned short rand = (unsigned short) prandom_u32() % range;
1635 return rand + xprt_min_resvport;
1639 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1642 * Note that this function has to be called on all sockets that share the
1643 * same port, and it must be called before binding.
1645 static void xs_sock_set_reuseport(struct socket *sock)
1649 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1650 (char *)&opt, sizeof(opt));
1653 static unsigned short xs_sock_getport(struct socket *sock)
1655 struct sockaddr_storage buf;
1657 unsigned short port = 0;
1659 if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
1661 switch (buf.ss_family) {
1663 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1666 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1673 * xs_set_port - reset the port number in the remote endpoint address
1674 * @xprt: generic transport
1675 * @port: new port number
1678 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1680 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1682 rpc_set_port(xs_addr(xprt), port);
1683 xs_update_peer_port(xprt);
1686 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1688 if (transport->srcport == 0)
1689 transport->srcport = xs_sock_getport(sock);
1692 static unsigned short xs_get_srcport(struct sock_xprt *transport)
1694 unsigned short port = transport->srcport;
1696 if (port == 0 && transport->xprt.resvport)
1697 port = xs_get_random_port();
1701 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1703 if (transport->srcport != 0)
1704 transport->srcport = 0;
1705 if (!transport->xprt.resvport)
1707 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1708 return xprt_max_resvport;
1711 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1713 struct sockaddr_storage myaddr;
1715 unsigned short port = xs_get_srcport(transport);
1716 unsigned short last;
1719 * If we are asking for any ephemeral port (i.e. port == 0 &&
1720 * transport->xprt.resvport == 0), don't bind. Let the local
1721 * port selection happen implicitly when the socket is used
1722 * (for example at connect time).
1724 * This ensures that we can continue to establish TCP
1725 * connections even when all local ephemeral ports are already
1726 * a part of some TCP connection. This makes no difference
1727 * for UDP sockets, but also doens't harm them.
1729 * If we're asking for any reserved port (i.e. port == 0 &&
1730 * transport->xprt.resvport == 1) xs_get_srcport above will
1731 * ensure that port is non-zero and we will bind as needed.
1736 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1738 rpc_set_port((struct sockaddr *)&myaddr, port);
1739 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1740 transport->xprt.addrlen);
1742 transport->srcport = port;
1746 port = xs_next_srcport(transport, port);
1749 } while (err == -EADDRINUSE && nloop != 2);
1751 if (myaddr.ss_family == AF_INET)
1752 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1753 &((struct sockaddr_in *)&myaddr)->sin_addr,
1754 port, err ? "failed" : "ok", err);
1756 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1757 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1758 port, err ? "failed" : "ok", err);
1763 * We don't support autobind on AF_LOCAL sockets
1765 static void xs_local_rpcbind(struct rpc_task *task)
1768 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt));
1772 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1776 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1777 static struct lock_class_key xs_key[2];
1778 static struct lock_class_key xs_slock_key[2];
1780 static inline void xs_reclassify_socketu(struct socket *sock)
1782 struct sock *sk = sock->sk;
1784 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1785 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1788 static inline void xs_reclassify_socket4(struct socket *sock)
1790 struct sock *sk = sock->sk;
1792 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1793 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1796 static inline void xs_reclassify_socket6(struct socket *sock)
1798 struct sock *sk = sock->sk;
1800 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1801 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1804 static inline void xs_reclassify_socket(int family, struct socket *sock)
1806 WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1807 if (sock_owned_by_user(sock->sk))
1812 xs_reclassify_socketu(sock);
1815 xs_reclassify_socket4(sock);
1818 xs_reclassify_socket6(sock);
1823 static inline void xs_reclassify_socketu(struct socket *sock)
1827 static inline void xs_reclassify_socket4(struct socket *sock)
1831 static inline void xs_reclassify_socket6(struct socket *sock)
1835 static inline void xs_reclassify_socket(int family, struct socket *sock)
1840 static void xs_dummy_setup_socket(struct work_struct *work)
1844 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1845 struct sock_xprt *transport, int family, int type,
1846 int protocol, bool reuseport)
1848 struct socket *sock;
1851 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1853 dprintk("RPC: can't create %d transport socket (%d).\n",
1857 xs_reclassify_socket(family, sock);
1860 xs_sock_set_reuseport(sock);
1862 err = xs_bind(transport, sock);
1870 return ERR_PTR(err);
1873 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1874 struct socket *sock)
1876 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1879 if (!transport->inet) {
1880 struct sock *sk = sock->sk;
1882 write_lock_bh(&sk->sk_callback_lock);
1884 xs_save_old_callbacks(transport, sk);
1886 sk->sk_user_data = xprt;
1887 sk->sk_data_ready = xs_local_data_ready;
1888 sk->sk_write_space = xs_udp_write_space;
1889 sk->sk_error_report = xs_error_report;
1890 sk->sk_allocation = GFP_NOIO;
1892 xprt_clear_connected(xprt);
1894 /* Reset to new socket */
1895 transport->sock = sock;
1896 transport->inet = sk;
1898 write_unlock_bh(&sk->sk_callback_lock);
1901 /* Tell the socket layer to start connecting... */
1902 xprt->stat.connect_count++;
1903 xprt->stat.connect_start = jiffies;
1904 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1908 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1909 * @transport: socket transport to connect
1911 static int xs_local_setup_socket(struct sock_xprt *transport)
1913 struct rpc_xprt *xprt = &transport->xprt;
1914 struct socket *sock;
1917 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1918 SOCK_STREAM, 0, &sock, 1);
1920 dprintk("RPC: can't create AF_LOCAL "
1921 "transport socket (%d).\n", -status);
1924 xs_reclassify_socketu(sock);
1926 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1927 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1929 status = xs_local_finish_connecting(xprt, sock);
1930 trace_rpc_socket_connect(xprt, sock, status);
1933 dprintk("RPC: xprt %p connected to %s\n",
1934 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1935 xprt_set_connected(xprt);
1939 dprintk("RPC: xprt %p: socket %s does not exist\n",
1940 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1943 dprintk("RPC: xprt %p: connection refused for %s\n",
1944 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1947 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1949 xprt->address_strings[RPC_DISPLAY_ADDR]);
1953 xprt_clear_connecting(xprt);
1954 xprt_wake_pending_tasks(xprt, status);
1958 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1960 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1963 if (RPC_IS_ASYNC(task)) {
1965 * We want the AF_LOCAL connect to be resolved in the
1966 * filesystem namespace of the process making the rpc
1967 * call. Thus we connect synchronously.
1969 * If we want to support asynchronous AF_LOCAL calls,
1970 * we'll need to figure out how to pass a namespace to
1973 rpc_exit(task, -ENOTCONN);
1976 ret = xs_local_setup_socket(transport);
1977 if (ret && !RPC_IS_SOFTCONN(task))
1978 msleep_interruptible(15000);
1981 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1983 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
1984 * know that we have exclusive access to the socket), to guard against
1985 * races with xs_reset_transport.
1987 static void xs_set_memalloc(struct rpc_xprt *xprt)
1989 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1993 * If there's no sock, then we have nothing to set. The
1994 * reconnecting process will get it for us.
1996 if (!transport->inet)
1998 if (atomic_read(&xprt->swapper))
1999 sk_set_memalloc(transport->inet);
2003 * xs_enable_swap - Tag this transport as being used for swap.
2004 * @xprt: transport to tag
2006 * Take a reference to this transport on behalf of the rpc_clnt, and
2007 * optionally mark it for swapping if it wasn't already.
2010 xs_enable_swap(struct rpc_xprt *xprt)
2012 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2014 if (atomic_inc_return(&xprt->swapper) != 1)
2016 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2017 return -ERESTARTSYS;
2019 sk_set_memalloc(xs->inet);
2020 xprt_release_xprt(xprt, NULL);
2025 * xs_disable_swap - Untag this transport as being used for swap.
2026 * @xprt: transport to tag
2028 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2029 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2032 xs_disable_swap(struct rpc_xprt *xprt)
2034 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2036 if (!atomic_dec_and_test(&xprt->swapper))
2038 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2041 sk_clear_memalloc(xs->inet);
2042 xprt_release_xprt(xprt, NULL);
2045 static void xs_set_memalloc(struct rpc_xprt *xprt)
2050 xs_enable_swap(struct rpc_xprt *xprt)
2056 xs_disable_swap(struct rpc_xprt *xprt)
2061 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2063 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2065 if (!transport->inet) {
2066 struct sock *sk = sock->sk;
2068 write_lock_bh(&sk->sk_callback_lock);
2070 xs_save_old_callbacks(transport, sk);
2072 sk->sk_user_data = xprt;
2073 sk->sk_data_ready = xs_udp_data_ready;
2074 sk->sk_write_space = xs_udp_write_space;
2075 sk->sk_allocation = GFP_NOIO;
2077 xprt_set_connected(xprt);
2079 /* Reset to new socket */
2080 transport->sock = sock;
2081 transport->inet = sk;
2083 xs_set_memalloc(xprt);
2085 write_unlock_bh(&sk->sk_callback_lock);
2087 xs_udp_do_set_buffer_size(xprt);
2090 static void xs_udp_setup_socket(struct work_struct *work)
2092 struct sock_xprt *transport =
2093 container_of(work, struct sock_xprt, connect_worker.work);
2094 struct rpc_xprt *xprt = &transport->xprt;
2095 struct socket *sock = transport->sock;
2098 sock = xs_create_sock(xprt, transport,
2099 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2100 IPPROTO_UDP, false);
2104 dprintk("RPC: worker connecting xprt %p via %s to "
2105 "%s (port %s)\n", xprt,
2106 xprt->address_strings[RPC_DISPLAY_PROTO],
2107 xprt->address_strings[RPC_DISPLAY_ADDR],
2108 xprt->address_strings[RPC_DISPLAY_PORT]);
2110 xs_udp_finish_connecting(xprt, sock);
2111 trace_rpc_socket_connect(xprt, sock, 0);
2114 xprt_unlock_connect(xprt, transport);
2115 xprt_clear_connecting(xprt);
2116 xprt_wake_pending_tasks(xprt, status);
2120 * xs_tcp_shutdown - gracefully shut down a TCP socket
2123 * Initiates a graceful shutdown of the TCP socket by calling the
2124 * equivalent of shutdown(SHUT_RDWR);
2126 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2128 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2129 struct socket *sock = transport->sock;
2133 if (xprt_connected(xprt)) {
2134 kernel_sock_shutdown(sock, SHUT_RDWR);
2135 trace_rpc_socket_shutdown(xprt, sock);
2137 xs_reset_transport(transport);
2140 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2142 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2143 int ret = -ENOTCONN;
2145 if (!transport->inet) {
2146 struct sock *sk = sock->sk;
2147 unsigned int keepidle = xprt->timeout->to_initval / HZ;
2148 unsigned int keepcnt = xprt->timeout->to_retries + 1;
2149 unsigned int opt_on = 1;
2152 /* TCP Keepalive options */
2153 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2154 (char *)&opt_on, sizeof(opt_on));
2155 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2156 (char *)&keepidle, sizeof(keepidle));
2157 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2158 (char *)&keepidle, sizeof(keepidle));
2159 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2160 (char *)&keepcnt, sizeof(keepcnt));
2162 /* TCP user timeout (see RFC5482) */
2163 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2164 (xprt->timeout->to_retries + 1);
2165 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2166 (char *)&timeo, sizeof(timeo));
2168 write_lock_bh(&sk->sk_callback_lock);
2170 xs_save_old_callbacks(transport, sk);
2172 sk->sk_user_data = xprt;
2173 sk->sk_data_ready = xs_tcp_data_ready;
2174 sk->sk_state_change = xs_tcp_state_change;
2175 sk->sk_write_space = xs_tcp_write_space;
2176 sk->sk_error_report = xs_error_report;
2177 sk->sk_allocation = GFP_NOIO;
2179 /* socket options */
2180 sock_reset_flag(sk, SOCK_LINGER);
2181 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2183 xprt_clear_connected(xprt);
2185 /* Reset to new socket */
2186 transport->sock = sock;
2187 transport->inet = sk;
2189 write_unlock_bh(&sk->sk_callback_lock);
2192 if (!xprt_bound(xprt))
2195 xs_set_memalloc(xprt);
2197 /* Tell the socket layer to start connecting... */
2198 xprt->stat.connect_count++;
2199 xprt->stat.connect_start = jiffies;
2200 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2201 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2204 xs_set_srcport(transport, sock);
2207 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2208 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2215 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2217 * Invoked by a work queue tasklet.
2219 static void xs_tcp_setup_socket(struct work_struct *work)
2221 struct sock_xprt *transport =
2222 container_of(work, struct sock_xprt, connect_worker.work);
2223 struct socket *sock = transport->sock;
2224 struct rpc_xprt *xprt = &transport->xprt;
2228 sock = xs_create_sock(xprt, transport,
2229 xs_addr(xprt)->sa_family, SOCK_STREAM,
2232 status = PTR_ERR(sock);
2237 dprintk("RPC: worker connecting xprt %p via %s to "
2238 "%s (port %s)\n", xprt,
2239 xprt->address_strings[RPC_DISPLAY_PROTO],
2240 xprt->address_strings[RPC_DISPLAY_ADDR],
2241 xprt->address_strings[RPC_DISPLAY_PORT]);
2243 status = xs_tcp_finish_connecting(xprt, sock);
2244 trace_rpc_socket_connect(xprt, sock, status);
2245 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2246 xprt, -status, xprt_connected(xprt),
2247 sock->sk->sk_state);
2250 printk("%s: connect returned unhandled error %d\n",
2252 case -EADDRNOTAVAIL:
2253 /* We're probably in TIME_WAIT. Get rid of existing socket,
2256 xs_tcp_force_close(xprt);
2261 xprt_unlock_connect(xprt, transport);
2264 /* Happens, for instance, if the user specified a link
2265 * local IPv6 address without a scope-id.
2272 /* retry with existing socket, after a delay */
2273 xs_tcp_force_close(xprt);
2278 xprt_unlock_connect(xprt, transport);
2279 xprt_clear_connecting(xprt);
2280 xprt_wake_pending_tasks(xprt, status);
2284 * xs_connect - connect a socket to a remote endpoint
2285 * @xprt: pointer to transport structure
2286 * @task: address of RPC task that manages state of connect request
2288 * TCP: If the remote end dropped the connection, delay reconnecting.
2290 * UDP socket connects are synchronous, but we use a work queue anyway
2291 * to guarantee that even unprivileged user processes can set up a
2292 * socket on a privileged port.
2294 * If a UDP socket connect fails, the delay behavior here prevents
2295 * retry floods (hard mounts).
2297 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2299 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2301 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2303 if (transport->sock != NULL) {
2304 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2306 xprt, xprt->reestablish_timeout / HZ);
2308 /* Start by resetting any existing state */
2309 xs_reset_transport(transport);
2311 queue_delayed_work(rpciod_workqueue,
2312 &transport->connect_worker,
2313 xprt->reestablish_timeout);
2314 xprt->reestablish_timeout <<= 1;
2315 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2316 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2317 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2318 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2320 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2321 queue_delayed_work(rpciod_workqueue,
2322 &transport->connect_worker, 0);
2327 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2328 * @xprt: rpc_xprt struct containing statistics
2332 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2336 if (xprt_connected(xprt))
2337 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2339 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2340 "%llu %llu %lu %llu %llu\n",
2341 xprt->stat.bind_count,
2342 xprt->stat.connect_count,
2343 xprt->stat.connect_time,
2347 xprt->stat.bad_xids,
2350 xprt->stat.max_slots,
2351 xprt->stat.sending_u,
2352 xprt->stat.pending_u);
2356 * xs_udp_print_stats - display UDP socket-specifc stats
2357 * @xprt: rpc_xprt struct containing statistics
2361 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2363 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2365 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2368 xprt->stat.bind_count,
2371 xprt->stat.bad_xids,
2374 xprt->stat.max_slots,
2375 xprt->stat.sending_u,
2376 xprt->stat.pending_u);
2380 * xs_tcp_print_stats - display TCP socket-specifc stats
2381 * @xprt: rpc_xprt struct containing statistics
2385 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2387 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2390 if (xprt_connected(xprt))
2391 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2393 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2394 "%llu %llu %lu %llu %llu\n",
2396 xprt->stat.bind_count,
2397 xprt->stat.connect_count,
2398 xprt->stat.connect_time,
2402 xprt->stat.bad_xids,
2405 xprt->stat.max_slots,
2406 xprt->stat.sending_u,
2407 xprt->stat.pending_u);
2411 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2412 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2413 * to use the server side send routines.
2415 static void *bc_malloc(struct rpc_task *task, size_t size)
2418 struct rpc_buffer *buf;
2420 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2421 if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2424 page = alloc_page(GFP_KERNEL);
2428 buf = page_address(page);
2429 buf->len = PAGE_SIZE;
2435 * Free the space allocated in the bc_alloc routine
2437 static void bc_free(void *buffer)
2439 struct rpc_buffer *buf;
2444 buf = container_of(buffer, struct rpc_buffer, data);
2445 free_page((unsigned long)buf);
2449 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2450 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2452 static int bc_sendto(struct rpc_rqst *req)
2455 struct xdr_buf *xbufp = &req->rq_snd_buf;
2456 struct rpc_xprt *xprt = req->rq_xprt;
2457 struct sock_xprt *transport =
2458 container_of(xprt, struct sock_xprt, xprt);
2459 struct socket *sock = transport->sock;
2460 unsigned long headoff;
2461 unsigned long tailoff;
2463 xs_encode_stream_record_marker(xbufp);
2465 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2466 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2467 len = svc_send_common(sock, xbufp,
2468 virt_to_page(xbufp->head[0].iov_base), headoff,
2469 xbufp->tail[0].iov_base, tailoff);
2471 if (len != xbufp->len) {
2472 printk(KERN_NOTICE "Error sending entire callback!\n");
2480 * The send routine. Borrows from svc_send
2482 static int bc_send_request(struct rpc_task *task)
2484 struct rpc_rqst *req = task->tk_rqstp;
2485 struct svc_xprt *xprt;
2488 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2490 * Get the server socket associated with this callback xprt
2492 xprt = req->rq_xprt->bc_xprt;
2495 * Grab the mutex to serialize data as the connection is shared
2496 * with the fore channel
2498 if (!mutex_trylock(&xprt->xpt_mutex)) {
2499 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2500 if (!mutex_trylock(&xprt->xpt_mutex))
2502 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2504 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2507 len = bc_sendto(req);
2508 mutex_unlock(&xprt->xpt_mutex);
2517 * The close routine. Since this is client initiated, we do nothing
2520 static void bc_close(struct rpc_xprt *xprt)
2525 * The xprt destroy routine. Again, because this connection is client
2526 * initiated, we do nothing
2529 static void bc_destroy(struct rpc_xprt *xprt)
2531 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2534 module_put(THIS_MODULE);
2537 static struct rpc_xprt_ops xs_local_ops = {
2538 .reserve_xprt = xprt_reserve_xprt,
2539 .release_xprt = xs_tcp_release_xprt,
2540 .alloc_slot = xprt_alloc_slot,
2541 .rpcbind = xs_local_rpcbind,
2542 .set_port = xs_local_set_port,
2543 .connect = xs_local_connect,
2544 .buf_alloc = rpc_malloc,
2545 .buf_free = rpc_free,
2546 .send_request = xs_local_send_request,
2547 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2549 .destroy = xs_destroy,
2550 .print_stats = xs_local_print_stats,
2551 .enable_swap = xs_enable_swap,
2552 .disable_swap = xs_disable_swap,
2555 static struct rpc_xprt_ops xs_udp_ops = {
2556 .set_buffer_size = xs_udp_set_buffer_size,
2557 .reserve_xprt = xprt_reserve_xprt_cong,
2558 .release_xprt = xprt_release_xprt_cong,
2559 .alloc_slot = xprt_alloc_slot,
2560 .rpcbind = rpcb_getport_async,
2561 .set_port = xs_set_port,
2562 .connect = xs_connect,
2563 .buf_alloc = rpc_malloc,
2564 .buf_free = rpc_free,
2565 .send_request = xs_udp_send_request,
2566 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2567 .timer = xs_udp_timer,
2568 .release_request = xprt_release_rqst_cong,
2570 .destroy = xs_destroy,
2571 .print_stats = xs_udp_print_stats,
2572 .enable_swap = xs_enable_swap,
2573 .disable_swap = xs_disable_swap,
2574 .inject_disconnect = xs_inject_disconnect,
2577 static struct rpc_xprt_ops xs_tcp_ops = {
2578 .reserve_xprt = xprt_reserve_xprt,
2579 .release_xprt = xs_tcp_release_xprt,
2580 .alloc_slot = xprt_lock_and_alloc_slot,
2581 .rpcbind = rpcb_getport_async,
2582 .set_port = xs_set_port,
2583 .connect = xs_connect,
2584 .buf_alloc = rpc_malloc,
2585 .buf_free = rpc_free,
2586 .send_request = xs_tcp_send_request,
2587 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2588 .close = xs_tcp_shutdown,
2589 .destroy = xs_destroy,
2590 .print_stats = xs_tcp_print_stats,
2591 .enable_swap = xs_enable_swap,
2592 .disable_swap = xs_disable_swap,
2593 .inject_disconnect = xs_inject_disconnect,
2594 #ifdef CONFIG_SUNRPC_BACKCHANNEL
2595 .bc_setup = xprt_setup_bc,
2596 .bc_up = xs_tcp_bc_up,
2597 .bc_free_rqst = xprt_free_bc_rqst,
2598 .bc_destroy = xprt_destroy_bc,
2603 * The rpc_xprt_ops for the server backchannel
2606 static struct rpc_xprt_ops bc_tcp_ops = {
2607 .reserve_xprt = xprt_reserve_xprt,
2608 .release_xprt = xprt_release_xprt,
2609 .alloc_slot = xprt_alloc_slot,
2610 .buf_alloc = bc_malloc,
2611 .buf_free = bc_free,
2612 .send_request = bc_send_request,
2613 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2615 .destroy = bc_destroy,
2616 .print_stats = xs_tcp_print_stats,
2617 .enable_swap = xs_enable_swap,
2618 .disable_swap = xs_disable_swap,
2619 .inject_disconnect = xs_inject_disconnect,
2622 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2624 static const struct sockaddr_in sin = {
2625 .sin_family = AF_INET,
2626 .sin_addr.s_addr = htonl(INADDR_ANY),
2628 static const struct sockaddr_in6 sin6 = {
2629 .sin6_family = AF_INET6,
2630 .sin6_addr = IN6ADDR_ANY_INIT,
2637 memcpy(sap, &sin, sizeof(sin));
2640 memcpy(sap, &sin6, sizeof(sin6));
2643 dprintk("RPC: %s: Bad address family\n", __func__);
2644 return -EAFNOSUPPORT;
2649 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2650 unsigned int slot_table_size,
2651 unsigned int max_slot_table_size)
2653 struct rpc_xprt *xprt;
2654 struct sock_xprt *new;
2656 if (args->addrlen > sizeof(xprt->addr)) {
2657 dprintk("RPC: xs_setup_xprt: address too large\n");
2658 return ERR_PTR(-EBADF);
2661 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2662 max_slot_table_size);
2664 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2666 return ERR_PTR(-ENOMEM);
2669 new = container_of(xprt, struct sock_xprt, xprt);
2670 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2671 xprt->addrlen = args->addrlen;
2673 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2676 err = xs_init_anyaddr(args->dstaddr->sa_family,
2677 (struct sockaddr *)&new->srcaddr);
2680 return ERR_PTR(err);
2687 static const struct rpc_timeout xs_local_default_timeout = {
2688 .to_initval = 10 * HZ,
2689 .to_maxval = 10 * HZ,
2694 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2695 * @args: rpc transport creation arguments
2697 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2699 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2701 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2702 struct sock_xprt *transport;
2703 struct rpc_xprt *xprt;
2704 struct rpc_xprt *ret;
2706 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2707 xprt_max_tcp_slot_table_entries);
2710 transport = container_of(xprt, struct sock_xprt, xprt);
2713 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2714 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2716 xprt->bind_timeout = XS_BIND_TO;
2717 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2718 xprt->idle_timeout = XS_IDLE_DISC_TO;
2720 xprt->ops = &xs_local_ops;
2721 xprt->timeout = &xs_local_default_timeout;
2723 INIT_DELAYED_WORK(&transport->connect_worker,
2724 xs_dummy_setup_socket);
2726 switch (sun->sun_family) {
2728 if (sun->sun_path[0] != '/') {
2729 dprintk("RPC: bad AF_LOCAL address: %s\n",
2731 ret = ERR_PTR(-EINVAL);
2734 xprt_set_bound(xprt);
2735 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2736 ret = ERR_PTR(xs_local_setup_socket(transport));
2741 ret = ERR_PTR(-EAFNOSUPPORT);
2745 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2746 xprt->address_strings[RPC_DISPLAY_ADDR]);
2748 if (try_module_get(THIS_MODULE))
2750 ret = ERR_PTR(-EINVAL);
2756 static const struct rpc_timeout xs_udp_default_timeout = {
2757 .to_initval = 5 * HZ,
2758 .to_maxval = 30 * HZ,
2759 .to_increment = 5 * HZ,
2764 * xs_setup_udp - Set up transport to use a UDP socket
2765 * @args: rpc transport creation arguments
2768 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2770 struct sockaddr *addr = args->dstaddr;
2771 struct rpc_xprt *xprt;
2772 struct sock_xprt *transport;
2773 struct rpc_xprt *ret;
2775 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2776 xprt_udp_slot_table_entries);
2779 transport = container_of(xprt, struct sock_xprt, xprt);
2781 xprt->prot = IPPROTO_UDP;
2783 /* XXX: header size can vary due to auth type, IPv6, etc. */
2784 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2786 xprt->bind_timeout = XS_BIND_TO;
2787 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2788 xprt->idle_timeout = XS_IDLE_DISC_TO;
2790 xprt->ops = &xs_udp_ops;
2792 xprt->timeout = &xs_udp_default_timeout;
2794 switch (addr->sa_family) {
2796 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2797 xprt_set_bound(xprt);
2799 INIT_DELAYED_WORK(&transport->connect_worker,
2800 xs_udp_setup_socket);
2801 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2804 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2805 xprt_set_bound(xprt);
2807 INIT_DELAYED_WORK(&transport->connect_worker,
2808 xs_udp_setup_socket);
2809 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2812 ret = ERR_PTR(-EAFNOSUPPORT);
2816 if (xprt_bound(xprt))
2817 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2818 xprt->address_strings[RPC_DISPLAY_ADDR],
2819 xprt->address_strings[RPC_DISPLAY_PORT],
2820 xprt->address_strings[RPC_DISPLAY_PROTO]);
2822 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2823 xprt->address_strings[RPC_DISPLAY_ADDR],
2824 xprt->address_strings[RPC_DISPLAY_PROTO]);
2826 if (try_module_get(THIS_MODULE))
2828 ret = ERR_PTR(-EINVAL);
2834 static const struct rpc_timeout xs_tcp_default_timeout = {
2835 .to_initval = 60 * HZ,
2836 .to_maxval = 60 * HZ,
2841 * xs_setup_tcp - Set up transport to use a TCP socket
2842 * @args: rpc transport creation arguments
2845 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2847 struct sockaddr *addr = args->dstaddr;
2848 struct rpc_xprt *xprt;
2849 struct sock_xprt *transport;
2850 struct rpc_xprt *ret;
2851 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2853 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2854 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2856 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2857 max_slot_table_size);
2860 transport = container_of(xprt, struct sock_xprt, xprt);
2862 xprt->prot = IPPROTO_TCP;
2863 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2864 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2866 xprt->bind_timeout = XS_BIND_TO;
2867 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2868 xprt->idle_timeout = XS_IDLE_DISC_TO;
2870 xprt->ops = &xs_tcp_ops;
2871 xprt->timeout = &xs_tcp_default_timeout;
2873 switch (addr->sa_family) {
2875 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2876 xprt_set_bound(xprt);
2878 INIT_DELAYED_WORK(&transport->connect_worker,
2879 xs_tcp_setup_socket);
2880 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2883 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2884 xprt_set_bound(xprt);
2886 INIT_DELAYED_WORK(&transport->connect_worker,
2887 xs_tcp_setup_socket);
2888 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2891 ret = ERR_PTR(-EAFNOSUPPORT);
2895 if (xprt_bound(xprt))
2896 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2897 xprt->address_strings[RPC_DISPLAY_ADDR],
2898 xprt->address_strings[RPC_DISPLAY_PORT],
2899 xprt->address_strings[RPC_DISPLAY_PROTO]);
2901 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2902 xprt->address_strings[RPC_DISPLAY_ADDR],
2903 xprt->address_strings[RPC_DISPLAY_PROTO]);
2905 if (try_module_get(THIS_MODULE))
2907 ret = ERR_PTR(-EINVAL);
2914 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2915 * @args: rpc transport creation arguments
2918 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2920 struct sockaddr *addr = args->dstaddr;
2921 struct rpc_xprt *xprt;
2922 struct sock_xprt *transport;
2923 struct svc_sock *bc_sock;
2924 struct rpc_xprt *ret;
2926 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2927 xprt_tcp_slot_table_entries);
2930 transport = container_of(xprt, struct sock_xprt, xprt);
2932 xprt->prot = IPPROTO_TCP;
2933 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2934 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2935 xprt->timeout = &xs_tcp_default_timeout;
2938 xprt_set_bound(xprt);
2939 xprt->bind_timeout = 0;
2940 xprt->reestablish_timeout = 0;
2941 xprt->idle_timeout = 0;
2943 xprt->ops = &bc_tcp_ops;
2945 switch (addr->sa_family) {
2947 xs_format_peer_addresses(xprt, "tcp",
2951 xs_format_peer_addresses(xprt, "tcp",
2952 RPCBIND_NETID_TCP6);
2955 ret = ERR_PTR(-EAFNOSUPPORT);
2959 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2960 xprt->address_strings[RPC_DISPLAY_ADDR],
2961 xprt->address_strings[RPC_DISPLAY_PORT],
2962 xprt->address_strings[RPC_DISPLAY_PROTO]);
2965 * Once we've associated a backchannel xprt with a connection,
2966 * we want to keep it around as long as the connection lasts,
2967 * in case we need to start using it for a backchannel again;
2968 * this reference won't be dropped until bc_xprt is destroyed.
2971 args->bc_xprt->xpt_bc_xprt = xprt;
2972 xprt->bc_xprt = args->bc_xprt;
2973 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2974 transport->sock = bc_sock->sk_sock;
2975 transport->inet = bc_sock->sk_sk;
2978 * Since we don't want connections for the backchannel, we set
2979 * the xprt status to connected
2981 xprt_set_connected(xprt);
2983 if (try_module_get(THIS_MODULE))
2986 args->bc_xprt->xpt_bc_xprt = NULL;
2988 ret = ERR_PTR(-EINVAL);
2994 static struct xprt_class xs_local_transport = {
2995 .list = LIST_HEAD_INIT(xs_local_transport.list),
2996 .name = "named UNIX socket",
2997 .owner = THIS_MODULE,
2998 .ident = XPRT_TRANSPORT_LOCAL,
2999 .setup = xs_setup_local,
3002 static struct xprt_class xs_udp_transport = {
3003 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3005 .owner = THIS_MODULE,
3006 .ident = XPRT_TRANSPORT_UDP,
3007 .setup = xs_setup_udp,
3010 static struct xprt_class xs_tcp_transport = {
3011 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3013 .owner = THIS_MODULE,
3014 .ident = XPRT_TRANSPORT_TCP,
3015 .setup = xs_setup_tcp,
3018 static struct xprt_class xs_bc_tcp_transport = {
3019 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3020 .name = "tcp NFSv4.1 backchannel",
3021 .owner = THIS_MODULE,
3022 .ident = XPRT_TRANSPORT_BC_TCP,
3023 .setup = xs_setup_bc_tcp,
3027 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3030 int init_socket_xprt(void)
3032 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3033 if (!sunrpc_table_header)
3034 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3037 xprt_register_transport(&xs_local_transport);
3038 xprt_register_transport(&xs_udp_transport);
3039 xprt_register_transport(&xs_tcp_transport);
3040 xprt_register_transport(&xs_bc_tcp_transport);
3046 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3049 void cleanup_socket_xprt(void)
3051 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3052 if (sunrpc_table_header) {
3053 unregister_sysctl_table(sunrpc_table_header);
3054 sunrpc_table_header = NULL;
3058 xprt_unregister_transport(&xs_local_transport);
3059 xprt_unregister_transport(&xs_udp_transport);
3060 xprt_unregister_transport(&xs_tcp_transport);
3061 xprt_unregister_transport(&xs_bc_tcp_transport);
3064 static int param_set_uint_minmax(const char *val,
3065 const struct kernel_param *kp,
3066 unsigned int min, unsigned int max)
3073 ret = kstrtouint(val, 0, &num);
3074 if (ret == -EINVAL || num < min || num > max)
3076 *((unsigned int *)kp->arg) = num;
3080 static int param_set_portnr(const char *val, const struct kernel_param *kp)
3082 return param_set_uint_minmax(val, kp,
3087 static const struct kernel_param_ops param_ops_portnr = {
3088 .set = param_set_portnr,
3089 .get = param_get_uint,
3092 #define param_check_portnr(name, p) \
3093 __param_check(name, p, unsigned int);
3095 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3096 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3098 static int param_set_slot_table_size(const char *val,
3099 const struct kernel_param *kp)
3101 return param_set_uint_minmax(val, kp,
3103 RPC_MAX_SLOT_TABLE);
3106 static const struct kernel_param_ops param_ops_slot_table_size = {
3107 .set = param_set_slot_table_size,
3108 .get = param_get_uint,
3111 #define param_check_slot_table_size(name, p) \
3112 __param_check(name, p, unsigned int);
3114 static int param_set_max_slot_table_size(const char *val,
3115 const struct kernel_param *kp)
3117 return param_set_uint_minmax(val, kp,
3119 RPC_MAX_SLOT_TABLE_LIMIT);
3122 static const struct kernel_param_ops param_ops_max_slot_table_size = {
3123 .set = param_set_max_slot_table_size,
3124 .get = param_get_uint,
3127 #define param_check_max_slot_table_size(name, p) \
3128 __param_check(name, p, unsigned int);
3130 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3131 slot_table_size, 0644);
3132 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3133 max_slot_table_size, 0644);
3134 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3135 slot_table_size, 0644);