Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1d8206b9 TT |
2 | /* |
3 | * linux/net/sunrpc/svc_xprt.c | |
4 | * | |
5 | * Author: Tom Tucker <tom@opengridcomputing.com> | |
6 | */ | |
7 | ||
8 | #include <linux/sched.h> | |
4034247a | 9 | #include <linux/sched/mm.h> |
1d8206b9 | 10 | #include <linux/errno.h> |
1d8206b9 | 11 | #include <linux/freezer.h> |
7086721f | 12 | #include <linux/kthread.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
1d8206b9 | 14 | #include <net/sock.h> |
c3d4879e | 15 | #include <linux/sunrpc/addr.h> |
1d8206b9 TT |
16 | #include <linux/sunrpc/stats.h> |
17 | #include <linux/sunrpc/svc_xprt.h> | |
dcf1a357 | 18 | #include <linux/sunrpc/svcsock.h> |
99de8ea9 | 19 | #include <linux/sunrpc/xprt.h> |
3a9a231d | 20 | #include <linux/module.h> |
c3d4879e | 21 | #include <linux/netdevice.h> |
860a0d9e | 22 | #include <trace/events/sunrpc.h> |
1d8206b9 TT |
23 | |
24 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | |
25 | ||
ff3ac5c3 TM |
26 | static unsigned int svc_rpc_per_connection_limit __read_mostly; |
27 | module_param(svc_rpc_per_connection_limit, uint, 0644); | |
28 | ||
29 | ||
0f0257ea TT |
30 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); |
31 | static int svc_deferred_recv(struct svc_rqst *rqstp); | |
32 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | |
ff861c4d | 33 | static void svc_age_temp_xprts(struct timer_list *t); |
7710ec36 | 34 | static void svc_delete_xprt(struct svc_xprt *xprt); |
0f0257ea TT |
35 | |
36 | /* apparently the "standard" is that clients close | |
37 | * idle connections after 5 minutes, servers after | |
38 | * 6 minutes | |
855c9e76 | 39 | * http://nfsv4bat.org/Documents/ConnectAThon/1996/nfstcp.pdf |
0f0257ea TT |
40 | */ |
41 | static int svc_conn_age_period = 6*60; | |
42 | ||
1d8206b9 TT |
43 | /* List of registered transport classes */ |
44 | static DEFINE_SPINLOCK(svc_xprt_class_lock); | |
45 | static LIST_HEAD(svc_xprt_class_list); | |
46 | ||
0f0257ea TT |
47 | /* SMP locking strategy: |
48 | * | |
49 | * svc_pool->sp_lock protects most of the fields of that pool. | |
50 | * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. | |
51 | * when both need to be taken (rare), svc_serv->sv_lock is first. | |
3c519914 | 52 | * The "service mutex" protects svc_serv->sv_nrthread. |
0f0257ea TT |
53 | * svc_sock->sk_lock protects the svc_sock->sk_deferred list |
54 | * and the ->sk_info_authunix cache. | |
55 | * | |
56 | * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being | |
57 | * enqueued multiply. During normal transport processing this bit | |
58 | * is set by svc_xprt_enqueue and cleared by svc_xprt_received. | |
59 | * Providers should not manipulate this bit directly. | |
60 | * | |
61 | * Some flags can be set to certain values at any time | |
62 | * providing that certain rules are followed: | |
63 | * | |
64 | * XPT_CONN, XPT_DATA: | |
65 | * - Can be set or cleared at any time. | |
66 | * - After a set, svc_xprt_enqueue must be called to enqueue | |
67 | * the transport for processing. | |
68 | * - After a clear, the transport must be read/accepted. | |
69 | * If this succeeds, it must be set again. | |
70 | * XPT_CLOSE: | |
71 | * - Can set at any time. It is never cleared. | |
72 | * XPT_DEAD: | |
73 | * - Can only be set while XPT_BUSY is held which ensures | |
74 | * that no other thread will be using the transport or will | |
75 | * try to set XPT_DEAD. | |
76 | */ | |
1d8206b9 TT |
77 | int svc_reg_xprt_class(struct svc_xprt_class *xcl) |
78 | { | |
79 | struct svc_xprt_class *cl; | |
80 | int res = -EEXIST; | |
81 | ||
82 | dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); | |
83 | ||
84 | INIT_LIST_HEAD(&xcl->xcl_list); | |
85 | spin_lock(&svc_xprt_class_lock); | |
86 | /* Make sure there isn't already a class with the same name */ | |
87 | list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { | |
88 | if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) | |
89 | goto out; | |
90 | } | |
91 | list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); | |
92 | res = 0; | |
93 | out: | |
94 | spin_unlock(&svc_xprt_class_lock); | |
95 | return res; | |
96 | } | |
97 | EXPORT_SYMBOL_GPL(svc_reg_xprt_class); | |
98 | ||
99 | void svc_unreg_xprt_class(struct svc_xprt_class *xcl) | |
100 | { | |
101 | dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); | |
102 | spin_lock(&svc_xprt_class_lock); | |
103 | list_del_init(&xcl->xcl_list); | |
104 | spin_unlock(&svc_xprt_class_lock); | |
105 | } | |
106 | EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); | |
107 | ||
b25b60d7 CJ |
108 | /** |
109 | * svc_print_xprts - Format the transport list for printing | |
110 | * @buf: target buffer for formatted address | |
111 | * @maxlen: length of target buffer | |
112 | * | |
113 | * Fills in @buf with a string containing a list of transport names, each name | |
114 | * terminated with '\n'. If the buffer is too small, some entries may be | |
115 | * missing, but it is guaranteed that all lines in the output buffer are | |
116 | * complete. | |
117 | * | |
118 | * Returns positive length of the filled-in string. | |
dc9a16e4 TT |
119 | */ |
120 | int svc_print_xprts(char *buf, int maxlen) | |
121 | { | |
8f3a6de3 | 122 | struct svc_xprt_class *xcl; |
dc9a16e4 TT |
123 | char tmpstr[80]; |
124 | int len = 0; | |
125 | buf[0] = '\0'; | |
126 | ||
127 | spin_lock(&svc_xprt_class_lock); | |
8f3a6de3 | 128 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { |
dc9a16e4 | 129 | int slen; |
dc9a16e4 | 130 | |
b25b60d7 CJ |
131 | slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", |
132 | xcl->xcl_name, xcl->xcl_max_payload); | |
133 | if (slen >= sizeof(tmpstr) || len + slen >= maxlen) | |
dc9a16e4 TT |
134 | break; |
135 | len += slen; | |
136 | strcat(buf, tmpstr); | |
137 | } | |
138 | spin_unlock(&svc_xprt_class_lock); | |
139 | ||
140 | return len; | |
141 | } | |
142 | ||
e844d307 CL |
143 | /** |
144 | * svc_xprt_deferred_close - Close a transport | |
145 | * @xprt: transport instance | |
146 | * | |
147 | * Used in contexts that need to defer the work of shutting down | |
148 | * the transport to an nfsd thread. | |
149 | */ | |
150 | void svc_xprt_deferred_close(struct svc_xprt *xprt) | |
151 | { | |
152 | if (!test_and_set_bit(XPT_CLOSE, &xprt->xpt_flags)) | |
153 | svc_xprt_enqueue(xprt); | |
154 | } | |
155 | EXPORT_SYMBOL_GPL(svc_xprt_deferred_close); | |
156 | ||
e1b3157f TT |
157 | static void svc_xprt_free(struct kref *kref) |
158 | { | |
159 | struct svc_xprt *xprt = | |
160 | container_of(kref, struct svc_xprt, xpt_ref); | |
161 | struct module *owner = xprt->xpt_class->xcl_owner; | |
e3bfca01 PE |
162 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) |
163 | svcauth_unix_info_release(xprt); | |
4df493a2 | 164 | put_cred(xprt->xpt_cred); |
6cdef8a6 | 165 | put_net_track(xprt->xpt_net, &xprt->ns_tracker); |
99de8ea9 BF |
166 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ |
167 | if (xprt->xpt_bc_xprt) | |
168 | xprt_put(xprt->xpt_bc_xprt); | |
39a9beab BF |
169 | if (xprt->xpt_bc_xps) |
170 | xprt_switch_put(xprt->xpt_bc_xps); | |
11bbb0f7 | 171 | trace_svc_xprt_free(xprt); |
e1b3157f TT |
172 | xprt->xpt_ops->xpo_free(xprt); |
173 | module_put(owner); | |
174 | } | |
175 | ||
176 | void svc_xprt_put(struct svc_xprt *xprt) | |
177 | { | |
178 | kref_put(&xprt->xpt_ref, svc_xprt_free); | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(svc_xprt_put); | |
181 | ||
1d8206b9 TT |
182 | /* |
183 | * Called by transport drivers to initialize the transport independent | |
184 | * portion of the transport instance. | |
185 | */ | |
bd4620dd SK |
186 | void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, |
187 | struct svc_xprt *xprt, struct svc_serv *serv) | |
1d8206b9 TT |
188 | { |
189 | memset(xprt, 0, sizeof(*xprt)); | |
190 | xprt->xpt_class = xcl; | |
191 | xprt->xpt_ops = xcl->xcl_ops; | |
e1b3157f | 192 | kref_init(&xprt->xpt_ref); |
bb5cf160 | 193 | xprt->xpt_server = serv; |
7a182083 TT |
194 | INIT_LIST_HEAD(&xprt->xpt_list); |
195 | INIT_LIST_HEAD(&xprt->xpt_ready); | |
8c7b0172 | 196 | INIT_LIST_HEAD(&xprt->xpt_deferred); |
edc7a894 | 197 | INIT_LIST_HEAD(&xprt->xpt_users); |
a50fea26 | 198 | mutex_init(&xprt->xpt_mutex); |
def13d74 | 199 | spin_lock_init(&xprt->xpt_lock); |
4e5caaa5 | 200 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
6cdef8a6 | 201 | xprt->xpt_net = get_net_track(net, &xprt->ns_tracker, GFP_ATOMIC); |
ece200dd | 202 | strcpy(xprt->xpt_remotebuf, "uninitialized"); |
1d8206b9 TT |
203 | } |
204 | EXPORT_SYMBOL_GPL(svc_xprt_init); | |
b700cbb1 | 205 | |
5dd248f6 CL |
206 | static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, |
207 | struct svc_serv *serv, | |
62832c03 | 208 | struct net *net, |
9652ada3 CL |
209 | const int family, |
210 | const unsigned short port, | |
211 | int flags) | |
b700cbb1 | 212 | { |
b700cbb1 TT |
213 | struct sockaddr_in sin = { |
214 | .sin_family = AF_INET, | |
e6f1cebf | 215 | .sin_addr.s_addr = htonl(INADDR_ANY), |
b700cbb1 TT |
216 | .sin_port = htons(port), |
217 | }; | |
dfd56b8b | 218 | #if IS_ENABLED(CONFIG_IPV6) |
5dd248f6 CL |
219 | struct sockaddr_in6 sin6 = { |
220 | .sin6_family = AF_INET6, | |
221 | .sin6_addr = IN6ADDR_ANY_INIT, | |
222 | .sin6_port = htons(port), | |
223 | }; | |
dfd56b8b | 224 | #endif |
4b8f380e | 225 | struct svc_xprt *xprt; |
5dd248f6 CL |
226 | struct sockaddr *sap; |
227 | size_t len; | |
228 | ||
9652ada3 CL |
229 | switch (family) { |
230 | case PF_INET: | |
5dd248f6 CL |
231 | sap = (struct sockaddr *)&sin; |
232 | len = sizeof(sin); | |
233 | break; | |
dfd56b8b | 234 | #if IS_ENABLED(CONFIG_IPV6) |
9652ada3 | 235 | case PF_INET6: |
5dd248f6 CL |
236 | sap = (struct sockaddr *)&sin6; |
237 | len = sizeof(sin6); | |
238 | break; | |
dfd56b8b | 239 | #endif |
5dd248f6 CL |
240 | default: |
241 | return ERR_PTR(-EAFNOSUPPORT); | |
242 | } | |
243 | ||
4b8f380e CL |
244 | xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); |
245 | if (IS_ERR(xprt)) | |
246 | trace_svc_xprt_create_err(serv->sv_program->pg_name, | |
dc6c6fb3 | 247 | xcl->xcl_name, sap, len, xprt); |
4b8f380e | 248 | return xprt; |
5dd248f6 CL |
249 | } |
250 | ||
7dcfbd86 CL |
251 | /** |
252 | * svc_xprt_received - start next receiver thread | |
253 | * @xprt: controlling transport | |
254 | * | |
255 | * The caller must hold the XPT_BUSY bit and must | |
6741019c BF |
256 | * not thereafter touch transport data. |
257 | * | |
258 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | |
259 | * insufficient) data. | |
260 | */ | |
7dcfbd86 | 261 | void svc_xprt_received(struct svc_xprt *xprt) |
6741019c | 262 | { |
acf06a7f JL |
263 | if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { |
264 | WARN_ONCE(1, "xprt=0x%p already busy!", xprt); | |
ff1fdb9b | 265 | return; |
acf06a7f JL |
266 | } |
267 | ||
6741019c | 268 | /* As soon as we clear busy, the xprt could be closed and |
c0219c49 | 269 | * 'put', so we need a reference to call svc_xprt_enqueue with: |
6741019c BF |
270 | */ |
271 | svc_xprt_get(xprt); | |
0971374e | 272 | smp_mb__before_atomic(); |
6741019c | 273 | clear_bit(XPT_BUSY, &xprt->xpt_flags); |
c0219c49 | 274 | svc_xprt_enqueue(xprt); |
6741019c BF |
275 | svc_xprt_put(xprt); |
276 | } | |
7dcfbd86 | 277 | EXPORT_SYMBOL_GPL(svc_xprt_received); |
6741019c | 278 | |
39b55301 BF |
279 | void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) |
280 | { | |
281 | clear_bit(XPT_TEMP, &new->xpt_flags); | |
282 | spin_lock_bh(&serv->sv_lock); | |
283 | list_add(&new->xpt_list, &serv->sv_permsocks); | |
284 | spin_unlock_bh(&serv->sv_lock); | |
285 | svc_xprt_received(new); | |
286 | } | |
287 | ||
352ad314 | 288 | static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name, |
da36e6db | 289 | struct net *net, const int family, |
4df493a2 TM |
290 | const unsigned short port, int flags, |
291 | const struct cred *cred) | |
5dd248f6 CL |
292 | { |
293 | struct svc_xprt_class *xcl; | |
294 | ||
b700cbb1 TT |
295 | spin_lock(&svc_xprt_class_lock); |
296 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { | |
4e5caaa5 | 297 | struct svc_xprt *newxprt; |
ed2849d3 | 298 | unsigned short newport; |
4e5caaa5 TT |
299 | |
300 | if (strcmp(xprt_name, xcl->xcl_name)) | |
301 | continue; | |
302 | ||
303 | if (!try_module_get(xcl->xcl_owner)) | |
304 | goto err; | |
305 | ||
306 | spin_unlock(&svc_xprt_class_lock); | |
62832c03 | 307 | newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); |
4e5caaa5 TT |
308 | if (IS_ERR(newxprt)) { |
309 | module_put(xcl->xcl_owner); | |
310 | return PTR_ERR(newxprt); | |
b700cbb1 | 311 | } |
4df493a2 | 312 | newxprt->xpt_cred = get_cred(cred); |
39b55301 | 313 | svc_add_new_perm_xprt(serv, newxprt); |
ed2849d3 | 314 | newport = svc_xprt_local_port(newxprt); |
ed2849d3 | 315 | return newport; |
b700cbb1 | 316 | } |
4e5caaa5 | 317 | err: |
b700cbb1 | 318 | spin_unlock(&svc_xprt_class_lock); |
68717908 CL |
319 | /* This errno is exposed to user space. Provide a reasonable |
320 | * perror msg for a bad transport. */ | |
321 | return -EPROTONOSUPPORT; | |
b700cbb1 | 322 | } |
d96b9c93 | 323 | |
352ad314 CL |
324 | /** |
325 | * svc_xprt_create - Add a new listener to @serv | |
326 | * @serv: target RPC service | |
327 | * @xprt_name: transport class name | |
328 | * @net: network namespace | |
329 | * @family: network address family | |
330 | * @port: listener port | |
331 | * @flags: SVC_SOCK flags | |
332 | * @cred: credential to bind to this transport | |
333 | * | |
334 | * Return values: | |
335 | * %0: New listener added successfully | |
336 | * %-EPROTONOSUPPORT: Requested transport type not supported | |
337 | */ | |
338 | int svc_xprt_create(struct svc_serv *serv, const char *xprt_name, | |
d96b9c93 | 339 | struct net *net, const int family, |
4df493a2 TM |
340 | const unsigned short port, int flags, |
341 | const struct cred *cred) | |
d96b9c93 BF |
342 | { |
343 | int err; | |
344 | ||
352ad314 | 345 | err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred); |
d96b9c93 BF |
346 | if (err == -EPROTONOSUPPORT) { |
347 | request_module("svc%s", xprt_name); | |
352ad314 | 348 | err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred); |
d96b9c93 | 349 | } |
d96b9c93 BF |
350 | return err; |
351 | } | |
352ad314 | 352 | EXPORT_SYMBOL_GPL(svc_xprt_create); |
9dbc240f TT |
353 | |
354 | /* | |
355 | * Copy the local and remote xprt addresses to the rqstp structure | |
356 | */ | |
357 | void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) | |
358 | { | |
9dbc240f TT |
359 | memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); |
360 | rqstp->rq_addrlen = xprt->xpt_remotelen; | |
361 | ||
362 | /* | |
363 | * Destination address in request is needed for binding the | |
364 | * source address in RPC replies/callbacks later. | |
365 | */ | |
849a1cf1 MJ |
366 | memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); |
367 | rqstp->rq_daddrlen = xprt->xpt_locallen; | |
9dbc240f TT |
368 | } |
369 | EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); | |
370 | ||
0f0257ea TT |
371 | /** |
372 | * svc_print_addr - Format rq_addr field for printing | |
373 | * @rqstp: svc_rqst struct containing address to print | |
374 | * @buf: target buffer for formatted address | |
375 | * @len: length of target buffer | |
376 | * | |
377 | */ | |
378 | char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) | |
379 | { | |
380 | return __svc_print_addr(svc_addr(rqstp), buf, len); | |
381 | } | |
382 | EXPORT_SYMBOL_GPL(svc_print_addr); | |
383 | ||
ff3ac5c3 TM |
384 | static bool svc_xprt_slots_in_range(struct svc_xprt *xprt) |
385 | { | |
386 | unsigned int limit = svc_rpc_per_connection_limit; | |
387 | int nrqsts = atomic_read(&xprt->xpt_nr_rqsts); | |
388 | ||
389 | return limit == 0 || (nrqsts >= 0 && nrqsts < limit); | |
390 | } | |
391 | ||
392 | static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt) | |
393 | { | |
394 | if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { | |
395 | if (!svc_xprt_slots_in_range(xprt)) | |
396 | return false; | |
397 | atomic_inc(&xprt->xpt_nr_rqsts); | |
398 | set_bit(RQ_DATA, &rqstp->rq_flags); | |
399 | } | |
400 | return true; | |
401 | } | |
402 | ||
403 | static void svc_xprt_release_slot(struct svc_rqst *rqstp) | |
404 | { | |
405 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
406 | if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { | |
407 | atomic_dec(&xprt->xpt_nr_rqsts); | |
95503d29 | 408 | smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */ |
ff3ac5c3 TM |
409 | svc_xprt_enqueue(xprt); |
410 | } | |
411 | } | |
412 | ||
66c898ca | 413 | static bool svc_xprt_ready(struct svc_xprt *xprt) |
9c335c0b | 414 | { |
1602a7b7 TM |
415 | unsigned long xpt_flags; |
416 | ||
95503d29 BF |
417 | /* |
418 | * If another cpu has recently updated xpt_flags, | |
419 | * sk_sock->flags, xpt_reserved, or xpt_nr_rqsts, we need to | |
420 | * know about it; otherwise it's possible that both that cpu and | |
421 | * this one could call svc_xprt_enqueue() without either | |
422 | * svc_xprt_enqueue() recognizing that the conditions below | |
423 | * are satisfied, and we could stall indefinitely: | |
424 | */ | |
425 | smp_rmb(); | |
1602a7b7 TM |
426 | xpt_flags = READ_ONCE(xprt->xpt_flags); |
427 | ||
c0219c49 CL |
428 | if (xpt_flags & BIT(XPT_BUSY)) |
429 | return false; | |
1602a7b7 | 430 | if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE))) |
9c335c0b | 431 | return true; |
1602a7b7 | 432 | if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) { |
ff3ac5c3 TM |
433 | if (xprt->xpt_ops->xpo_has_wspace(xprt) && |
434 | svc_xprt_slots_in_range(xprt)) | |
82ea2d76 TM |
435 | return true; |
436 | trace_svc_xprt_no_write_space(xprt); | |
437 | return false; | |
438 | } | |
9c335c0b BF |
439 | return false; |
440 | } | |
441 | ||
c0219c49 CL |
442 | /** |
443 | * svc_xprt_enqueue - Queue a transport on an idle nfsd thread | |
444 | * @xprt: transport with data pending | |
445 | * | |
446 | */ | |
447 | void svc_xprt_enqueue(struct svc_xprt *xprt) | |
0f0257ea | 448 | { |
0f0257ea | 449 | struct svc_pool *pool; |
83a712e0 | 450 | struct svc_rqst *rqstp = NULL; |
0f0257ea | 451 | |
66c898ca | 452 | if (!svc_xprt_ready(xprt)) |
7dbb53ba | 453 | return; |
0f0257ea | 454 | |
0f0257ea TT |
455 | /* Mark transport as busy. It will remain in this state until |
456 | * the provider calls svc_xprt_received. We update XPT_BUSY | |
457 | * atomically because it also guards against trying to enqueue | |
458 | * the transport twice. | |
459 | */ | |
7dbb53ba CL |
460 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) |
461 | return; | |
0f0257ea | 462 | |
2059b698 | 463 | pool = svc_pool_for_cpu(xprt->xpt_server); |
0c0746d0 | 464 | |
403c7b44 | 465 | atomic_long_inc(&pool->sp_stats.packets); |
0c0746d0 | 466 | |
22700f3c TM |
467 | spin_lock_bh(&pool->sp_lock); |
468 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | |
469 | pool->sp_stats.sockets_queued++; | |
470 | spin_unlock_bh(&pool->sp_lock); | |
471 | ||
b1691bc0 JL |
472 | /* find a thread for this xprt */ |
473 | rcu_read_lock(); | |
474 | list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { | |
22700f3c | 475 | if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) |
b1691bc0 | 476 | continue; |
403c7b44 | 477 | atomic_long_inc(&pool->sp_stats.threads_woken); |
55f5088c | 478 | rqstp->rq_qtime = ktime_get(); |
b1691bc0 | 479 | wake_up_process(rqstp->rq_task); |
22700f3c | 480 | goto out_unlock; |
0f0257ea | 481 | } |
22700f3c | 482 | set_bit(SP_CONGESTED, &pool->sp_flags); |
83a712e0 | 483 | rqstp = NULL; |
22700f3c TM |
484 | out_unlock: |
485 | rcu_read_unlock(); | |
5089f3d9 | 486 | trace_svc_xprt_enqueue(xprt, rqstp); |
0f0257ea TT |
487 | } |
488 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | |
489 | ||
490 | /* | |
b1691bc0 | 491 | * Dequeue the first transport, if there is one. |
0f0257ea TT |
492 | */ |
493 | static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |
494 | { | |
b1691bc0 | 495 | struct svc_xprt *xprt = NULL; |
0f0257ea TT |
496 | |
497 | if (list_empty(&pool->sp_sockets)) | |
83a712e0 | 498 | goto out; |
0f0257ea | 499 | |
b1691bc0 JL |
500 | spin_lock_bh(&pool->sp_lock); |
501 | if (likely(!list_empty(&pool->sp_sockets))) { | |
502 | xprt = list_first_entry(&pool->sp_sockets, | |
503 | struct svc_xprt, xpt_ready); | |
504 | list_del_init(&xprt->xpt_ready); | |
505 | svc_xprt_get(xprt); | |
b1691bc0 JL |
506 | } |
507 | spin_unlock_bh(&pool->sp_lock); | |
83a712e0 | 508 | out: |
0f0257ea TT |
509 | return xprt; |
510 | } | |
511 | ||
0f0257ea TT |
512 | /** |
513 | * svc_reserve - change the space reserved for the reply to a request. | |
514 | * @rqstp: The request in question | |
515 | * @space: new max space to reserve | |
516 | * | |
517 | * Each request reserves some space on the output queue of the transport | |
518 | * to make sure the reply fits. This function reduces that reserved | |
519 | * space to be the amount of space used already, plus @space. | |
520 | * | |
521 | */ | |
522 | void svc_reserve(struct svc_rqst *rqstp, int space) | |
523 | { | |
d4b09acf VA |
524 | struct svc_xprt *xprt = rqstp->rq_xprt; |
525 | ||
0f0257ea TT |
526 | space += rqstp->rq_res.head[0].iov_len; |
527 | ||
d4b09acf | 528 | if (xprt && space < rqstp->rq_reserved) { |
0f0257ea TT |
529 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); |
530 | rqstp->rq_reserved = space; | |
95503d29 | 531 | smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */ |
0f0257ea TT |
532 | svc_xprt_enqueue(xprt); |
533 | } | |
534 | } | |
24c3767e | 535 | EXPORT_SYMBOL_GPL(svc_reserve); |
0f0257ea TT |
536 | |
537 | static void svc_xprt_release(struct svc_rqst *rqstp) | |
538 | { | |
539 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
540 | ||
63a1b156 | 541 | xprt->xpt_ops->xpo_release_rqst(rqstp); |
0f0257ea | 542 | |
2779e3ae TT |
543 | kfree(rqstp->rq_deferred); |
544 | rqstp->rq_deferred = NULL; | |
545 | ||
2f0f88f4 | 546 | pagevec_release(&rqstp->rq_pvec); |
0f0257ea TT |
547 | svc_free_res_pages(rqstp); |
548 | rqstp->rq_res.page_len = 0; | |
549 | rqstp->rq_res.page_base = 0; | |
550 | ||
551 | /* Reset response buffer and release | |
552 | * the reservation. | |
553 | * But first, check that enough space was reserved | |
554 | * for the reply, otherwise we have a bug! | |
555 | */ | |
556 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | |
557 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | |
558 | rqstp->rq_reserved, | |
559 | rqstp->rq_res.len); | |
560 | ||
561 | rqstp->rq_res.head[0].iov_len = 0; | |
562 | svc_reserve(rqstp, 0); | |
ff3ac5c3 | 563 | svc_xprt_release_slot(rqstp); |
0f0257ea | 564 | rqstp->rq_xprt = NULL; |
0f0257ea TT |
565 | svc_xprt_put(xprt); |
566 | } | |
567 | ||
568 | /* | |
ceff739c JL |
569 | * Some svc_serv's will have occasional work to do, even when a xprt is not |
570 | * waiting to be serviced. This function is there to "kick" a task in one of | |
571 | * those services so that it can wake up and do that work. Note that we only | |
572 | * bother with pool 0 as we don't need to wake up more than one thread for | |
573 | * this purpose. | |
0f0257ea TT |
574 | */ |
575 | void svc_wake_up(struct svc_serv *serv) | |
576 | { | |
577 | struct svc_rqst *rqstp; | |
0f0257ea TT |
578 | struct svc_pool *pool; |
579 | ||
ceff739c JL |
580 | pool = &serv->sv_pools[0]; |
581 | ||
b1691bc0 JL |
582 | rcu_read_lock(); |
583 | list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { | |
584 | /* skip any that aren't queued */ | |
585 | if (test_bit(RQ_BUSY, &rqstp->rq_flags)) | |
586 | continue; | |
587 | rcu_read_unlock(); | |
ceff739c | 588 | wake_up_process(rqstp->rq_task); |
83a712e0 | 589 | trace_svc_wake_up(rqstp->rq_task->pid); |
b1691bc0 JL |
590 | return; |
591 | } | |
592 | rcu_read_unlock(); | |
593 | ||
594 | /* No free entries available */ | |
595 | set_bit(SP_TASK_PENDING, &pool->sp_flags); | |
596 | smp_wmb(); | |
83a712e0 | 597 | trace_svc_wake_up(0); |
0f0257ea | 598 | } |
24c3767e | 599 | EXPORT_SYMBOL_GPL(svc_wake_up); |
0f0257ea TT |
600 | |
601 | int svc_port_is_privileged(struct sockaddr *sin) | |
602 | { | |
603 | switch (sin->sa_family) { | |
604 | case AF_INET: | |
605 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | |
606 | < PROT_SOCK; | |
607 | case AF_INET6: | |
608 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | |
609 | < PROT_SOCK; | |
610 | default: | |
611 | return 0; | |
612 | } | |
613 | } | |
614 | ||
615 | /* | |
c9233eb7 JL |
616 | * Make sure that we don't have too many active connections. If we have, |
617 | * something must be dropped. It's not clear what will happen if we allow | |
618 | * "too many" connections, but when dealing with network-facing software, | |
619 | * we have to code defensively. Here we do that by imposing hard limits. | |
0f0257ea TT |
620 | * |
621 | * There's no point in trying to do random drop here for DoS | |
622 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | |
623 | * attacker can easily beat that. | |
624 | * | |
625 | * The only somewhat efficient mechanism would be if drop old | |
626 | * connections from the same IP first. But right now we don't even | |
627 | * record the client IP in svc_sock. | |
c9233eb7 JL |
628 | * |
629 | * single-threaded services that expect a lot of clients will probably | |
630 | * need to set sv_maxconn to override the default value which is based | |
631 | * on the number of threads | |
0f0257ea TT |
632 | */ |
633 | static void svc_check_conn_limits(struct svc_serv *serv) | |
634 | { | |
c9233eb7 JL |
635 | unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : |
636 | (serv->sv_nrthreads+3) * 20; | |
637 | ||
638 | if (serv->sv_tmpcnt > limit) { | |
0f0257ea TT |
639 | struct svc_xprt *xprt = NULL; |
640 | spin_lock_bh(&serv->sv_lock); | |
641 | if (!list_empty(&serv->sv_tempsocks)) { | |
e87cc472 JP |
642 | /* Try to help the admin */ |
643 | net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", | |
644 | serv->sv_name, serv->sv_maxconn ? | |
645 | "max number of connections" : | |
646 | "number of threads"); | |
0f0257ea TT |
647 | /* |
648 | * Always select the oldest connection. It's not fair, | |
649 | * but so is life | |
650 | */ | |
651 | xprt = list_entry(serv->sv_tempsocks.prev, | |
652 | struct svc_xprt, | |
653 | xpt_list); | |
654 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
655 | svc_xprt_get(xprt); | |
656 | } | |
657 | spin_unlock_bh(&serv->sv_lock); | |
658 | ||
659 | if (xprt) { | |
660 | svc_xprt_enqueue(xprt); | |
661 | svc_xprt_put(xprt); | |
662 | } | |
663 | } | |
664 | } | |
665 | ||
e1d83ee6 | 666 | static int svc_alloc_arg(struct svc_rqst *rqstp) |
0f0257ea | 667 | { |
6797fa5a | 668 | struct svc_serv *serv = rqstp->rq_server; |
ab836264 | 669 | struct xdr_buf *arg = &rqstp->rq_arg; |
e38b3f20 | 670 | unsigned long pages, filled, ret; |
0f0257ea | 671 | |
2f0f88f4 CL |
672 | pagevec_init(&rqstp->rq_pvec); |
673 | ||
8c6ae498 CL |
674 | pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; |
675 | if (pages > RPCSVC_MAXPAGES) { | |
f6e70aab | 676 | pr_warn_once("svc: warning: pages=%lu > RPCSVC_MAXPAGES=%lu\n", |
8c6ae498 | 677 | pages, RPCSVC_MAXPAGES); |
b25cd058 | 678 | /* use as many pages as possible */ |
8c6ae498 CL |
679 | pages = RPCSVC_MAXPAGES; |
680 | } | |
f6e70aab | 681 | |
e38b3f20 N |
682 | for (filled = 0; filled < pages; filled = ret) { |
683 | ret = alloc_pages_bulk_array(GFP_KERNEL, pages, | |
684 | rqstp->rq_pages); | |
685 | if (ret > filled) | |
686 | /* Made progress, don't sleep yet */ | |
687 | continue; | |
f6e70aab CL |
688 | |
689 | set_current_state(TASK_INTERRUPTIBLE); | |
690 | if (signalled() || kthread_should_stop()) { | |
691 | set_current_state(TASK_RUNNING); | |
692 | return -EINTR; | |
0f0257ea | 693 | } |
28fffa6c | 694 | trace_svc_alloc_arg_err(pages, ret); |
4034247a | 695 | memalloc_retry_wait(GFP_KERNEL); |
f6e70aab | 696 | } |
ab836264 CL |
697 | rqstp->rq_page_end = &rqstp->rq_pages[pages]; |
698 | rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ | |
0f0257ea TT |
699 | |
700 | /* Make arg->head point to first page and arg->pages point to rest */ | |
0f0257ea TT |
701 | arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); |
702 | arg->head[0].iov_len = PAGE_SIZE; | |
703 | arg->pages = rqstp->rq_pages + 1; | |
704 | arg->page_base = 0; | |
705 | /* save at least one page for response */ | |
706 | arg->page_len = (pages-2)*PAGE_SIZE; | |
707 | arg->len = (pages-1)*PAGE_SIZE; | |
708 | arg->tail[0].iov_len = 0; | |
6797fa5a BF |
709 | return 0; |
710 | } | |
0f0257ea | 711 | |
b1691bc0 JL |
712 | static bool |
713 | rqst_should_sleep(struct svc_rqst *rqstp) | |
714 | { | |
715 | struct svc_pool *pool = rqstp->rq_pool; | |
716 | ||
717 | /* did someone call svc_wake_up? */ | |
718 | if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) | |
719 | return false; | |
720 | ||
721 | /* was a socket queued? */ | |
722 | if (!list_empty(&pool->sp_sockets)) | |
723 | return false; | |
724 | ||
725 | /* are we shutting down? */ | |
726 | if (signalled() || kthread_should_stop()) | |
727 | return false; | |
728 | ||
729 | /* are we freezing? */ | |
730 | if (freezing(current)) | |
731 | return false; | |
732 | ||
733 | return true; | |
734 | } | |
735 | ||
e1d83ee6 | 736 | static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) |
6797fa5a | 737 | { |
6797fa5a | 738 | struct svc_pool *pool = rqstp->rq_pool; |
a4aa8054 | 739 | long time_left = 0; |
0f0257ea | 740 | |
b1691bc0 JL |
741 | /* rq_xprt should be clear on entry */ |
742 | WARN_ON_ONCE(rqstp->rq_xprt); | |
743 | ||
22700f3c TM |
744 | rqstp->rq_xprt = svc_xprt_dequeue(pool); |
745 | if (rqstp->rq_xprt) | |
746 | goto out_found; | |
7086721f | 747 | |
b1691bc0 JL |
748 | /* |
749 | * We have to be able to interrupt this wait | |
750 | * to bring down the daemons ... | |
751 | */ | |
752 | set_current_state(TASK_INTERRUPTIBLE); | |
22700f3c TM |
753 | smp_mb__before_atomic(); |
754 | clear_bit(SP_CONGESTED, &pool->sp_flags); | |
b1691bc0 | 755 | clear_bit(RQ_BUSY, &rqstp->rq_flags); |
22700f3c | 756 | smp_mb__after_atomic(); |
0f0257ea | 757 | |
b1691bc0 JL |
758 | if (likely(rqst_should_sleep(rqstp))) |
759 | time_left = schedule_timeout(timeout); | |
760 | else | |
761 | __set_current_state(TASK_RUNNING); | |
0f0257ea | 762 | |
b1691bc0 | 763 | try_to_freeze(); |
0f0257ea | 764 | |
b1691bc0 | 765 | set_bit(RQ_BUSY, &rqstp->rq_flags); |
22700f3c TM |
766 | smp_mb__after_atomic(); |
767 | rqstp->rq_xprt = svc_xprt_dequeue(pool); | |
768 | if (rqstp->rq_xprt) | |
769 | goto out_found; | |
b1691bc0 JL |
770 | |
771 | if (!time_left) | |
772 | atomic_long_inc(&pool->sp_stats.threads_timedout); | |
773 | ||
774 | if (signalled() || kthread_should_stop()) | |
775 | return ERR_PTR(-EINTR); | |
776 | return ERR_PTR(-EAGAIN); | |
22700f3c TM |
777 | out_found: |
778 | /* Normally we will wait up to 5 seconds for any required | |
779 | * cache information to be provided. | |
780 | */ | |
781 | if (!test_bit(SP_CONGESTED, &pool->sp_flags)) | |
782 | rqstp->rq_chandle.thread_wait = 5*HZ; | |
783 | else | |
784 | rqstp->rq_chandle.thread_wait = 1*HZ; | |
55f5088c | 785 | trace_svc_xprt_dequeue(rqstp); |
22700f3c | 786 | return rqstp->rq_xprt; |
6797fa5a BF |
787 | } |
788 | ||
e1d83ee6 | 789 | static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) |
65b2e665 BF |
790 | { |
791 | spin_lock_bh(&serv->sv_lock); | |
792 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | |
793 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | |
794 | serv->sv_tmpcnt++; | |
795 | if (serv->sv_temptimer.function == NULL) { | |
796 | /* setup timer to age temp transports */ | |
841b86f3 | 797 | serv->sv_temptimer.function = svc_age_temp_xprts; |
65b2e665 BF |
798 | mod_timer(&serv->sv_temptimer, |
799 | jiffies + svc_conn_age_period * HZ); | |
800 | } | |
801 | spin_unlock_bh(&serv->sv_lock); | |
802 | svc_xprt_received(newxpt); | |
803 | } | |
804 | ||
6797fa5a BF |
805 | static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) |
806 | { | |
807 | struct svc_serv *serv = rqstp->rq_server; | |
808 | int len = 0; | |
0f0257ea | 809 | |
1b644b6e | 810 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
546125d1 SM |
811 | if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) |
812 | xprt->xpt_ops->xpo_kill_temp_xprt(xprt); | |
1b644b6e | 813 | svc_delete_xprt(xprt); |
ca7896cd | 814 | /* Leave XPT_BUSY set on the dead xprt: */ |
83a712e0 | 815 | goto out; |
ca7896cd BF |
816 | } |
817 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | |
0f0257ea | 818 | struct svc_xprt *newxpt; |
65b2e665 BF |
819 | /* |
820 | * We know this module_get will succeed because the | |
821 | * listener holds a reference too | |
822 | */ | |
823 | __module_get(xprt->xpt_class->xcl_owner); | |
824 | svc_check_conn_limits(xprt->xpt_server); | |
0f0257ea | 825 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
1237d354 TM |
826 | if (newxpt) { |
827 | newxpt->xpt_cred = get_cred(xprt->xpt_cred); | |
65b2e665 | 828 | svc_add_new_temp_xprt(serv, newxpt); |
11bbb0f7 | 829 | trace_svc_xprt_accept(newxpt, serv->sv_name); |
82011c80 | 830 | } else { |
c789102c | 831 | module_put(xprt->xpt_class->xcl_owner); |
82011c80 CL |
832 | } |
833 | svc_xprt_received(xprt); | |
ff3ac5c3 | 834 | } else if (svc_xprt_reserve_slot(rqstp, xprt)) { |
6797fa5a | 835 | /* XPT_DATA|XPT_DEFERRED case: */ |
0f0257ea | 836 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
6797fa5a | 837 | rqstp, rqstp->rq_pool->sp_id, xprt, |
2c935bc5 | 838 | kref_read(&xprt->xpt_ref)); |
0f0257ea | 839 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
ca7896cd | 840 | if (rqstp->rq_deferred) |
0f0257ea | 841 | len = svc_deferred_recv(rqstp); |
ca7896cd | 842 | else |
0f0257ea | 843 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
aaba72cd | 844 | rqstp->rq_stime = ktime_get(); |
d10f27a7 BF |
845 | rqstp->rq_reserved = serv->sv_max_mesg; |
846 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | |
062b829c TM |
847 | } else |
848 | svc_xprt_received(xprt); | |
5089f3d9 | 849 | |
83a712e0 | 850 | out: |
6797fa5a BF |
851 | return len; |
852 | } | |
853 | ||
854 | /* | |
855 | * Receive the next request on any transport. This code is carefully | |
856 | * organised not to touch any cachelines in the shared svc_serv | |
857 | * structure, only cachelines in the local svc_pool. | |
858 | */ | |
859 | int svc_recv(struct svc_rqst *rqstp, long timeout) | |
860 | { | |
861 | struct svc_xprt *xprt = NULL; | |
862 | struct svc_serv *serv = rqstp->rq_server; | |
863 | int len, err; | |
864 | ||
6797fa5a BF |
865 | err = svc_alloc_arg(rqstp); |
866 | if (err) | |
860a0d9e | 867 | goto out; |
6797fa5a BF |
868 | |
869 | try_to_freeze(); | |
870 | cond_resched(); | |
860a0d9e | 871 | err = -EINTR; |
6797fa5a | 872 | if (signalled() || kthread_should_stop()) |
860a0d9e | 873 | goto out; |
6797fa5a BF |
874 | |
875 | xprt = svc_get_next_xprt(rqstp, timeout); | |
860a0d9e JL |
876 | if (IS_ERR(xprt)) { |
877 | err = PTR_ERR(xprt); | |
878 | goto out; | |
879 | } | |
6797fa5a BF |
880 | |
881 | len = svc_handle_xprt(rqstp, xprt); | |
0f0257ea TT |
882 | |
883 | /* No data, incomplete (TCP) read, or accept() */ | |
860a0d9e | 884 | err = -EAGAIN; |
9f9d2ebe | 885 | if (len <= 0) |
860a0d9e | 886 | goto out_release; |
5f39d271 | 887 | trace_svc_xdr_recvfrom(&rqstp->rq_arg); |
ca7896cd | 888 | |
0f0257ea TT |
889 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
890 | ||
989f881e | 891 | xprt->xpt_ops->xpo_secure_port(rqstp); |
0f0257ea | 892 | rqstp->rq_chandle.defer = svc_defer; |
860a0d9e | 893 | rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); |
0f0257ea TT |
894 | |
895 | if (serv->sv_stats) | |
896 | serv->sv_stats->netcnt++; | |
897 | return len; | |
860a0d9e | 898 | out_release: |
ca7896cd BF |
899 | rqstp->rq_res.len = 0; |
900 | svc_xprt_release(rqstp); | |
860a0d9e | 901 | out: |
860a0d9e | 902 | return err; |
0f0257ea | 903 | } |
24c3767e | 904 | EXPORT_SYMBOL_GPL(svc_recv); |
0f0257ea TT |
905 | |
906 | /* | |
907 | * Drop request | |
908 | */ | |
909 | void svc_drop(struct svc_rqst *rqstp) | |
910 | { | |
104f6351 | 911 | trace_svc_drop(rqstp); |
0f0257ea TT |
912 | svc_xprt_release(rqstp); |
913 | } | |
24c3767e | 914 | EXPORT_SYMBOL_GPL(svc_drop); |
0f0257ea TT |
915 | |
916 | /* | |
917 | * Return reply to client. | |
918 | */ | |
919 | int svc_send(struct svc_rqst *rqstp) | |
920 | { | |
921 | struct svc_xprt *xprt; | |
860a0d9e | 922 | int len = -EFAULT; |
0f0257ea TT |
923 | struct xdr_buf *xb; |
924 | ||
925 | xprt = rqstp->rq_xprt; | |
926 | if (!xprt) | |
860a0d9e | 927 | goto out; |
0f0257ea | 928 | |
0f0257ea TT |
929 | /* calculate over-all length */ |
930 | xb = &rqstp->rq_res; | |
931 | xb->len = xb->head[0].iov_len + | |
932 | xb->page_len + | |
933 | xb->tail[0].iov_len; | |
5f39d271 | 934 | trace_svc_xdr_sendto(rqstp->rq_xid, xb); |
aaba72cd | 935 | trace_svc_stats_latency(rqstp); |
ca4faf54 CL |
936 | |
937 | len = xprt->xpt_ops->xpo_sendto(rqstp); | |
938 | ||
ece200dd | 939 | trace_svc_send(rqstp, len); |
0f0257ea TT |
940 | svc_xprt_release(rqstp); |
941 | ||
942 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | |
860a0d9e JL |
943 | len = 0; |
944 | out: | |
0f0257ea TT |
945 | return len; |
946 | } | |
947 | ||
948 | /* | |
949 | * Timer function to close old temporary transports, using | |
950 | * a mark-and-sweep algorithm. | |
951 | */ | |
ff861c4d | 952 | static void svc_age_temp_xprts(struct timer_list *t) |
0f0257ea | 953 | { |
ff861c4d | 954 | struct svc_serv *serv = from_timer(serv, t, sv_temptimer); |
0f0257ea TT |
955 | struct svc_xprt *xprt; |
956 | struct list_head *le, *next; | |
0f0257ea TT |
957 | |
958 | dprintk("svc_age_temp_xprts\n"); | |
959 | ||
960 | if (!spin_trylock_bh(&serv->sv_lock)) { | |
961 | /* busy, try again 1 sec later */ | |
962 | dprintk("svc_age_temp_xprts: busy\n"); | |
963 | mod_timer(&serv->sv_temptimer, jiffies + HZ); | |
964 | return; | |
965 | } | |
966 | ||
967 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | |
968 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
969 | ||
970 | /* First time through, just mark it OLD. Second time | |
971 | * through, close it. */ | |
972 | if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) | |
973 | continue; | |
2c935bc5 | 974 | if (kref_read(&xprt->xpt_ref) > 1 || |
f64f9e71 | 975 | test_bit(XPT_BUSY, &xprt->xpt_flags)) |
0f0257ea | 976 | continue; |
e75bafbf | 977 | list_del_init(le); |
0f0257ea | 978 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
0f0257ea TT |
979 | dprintk("queuing xprt %p for closing\n", xprt); |
980 | ||
981 | /* a thread will dequeue and close it soon */ | |
982 | svc_xprt_enqueue(xprt); | |
0f0257ea | 983 | } |
e75bafbf | 984 | spin_unlock_bh(&serv->sv_lock); |
0f0257ea TT |
985 | |
986 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | |
987 | } | |
988 | ||
c3d4879e SM |
989 | /* Close temporary transports whose xpt_local matches server_addr immediately |
990 | * instead of waiting for them to be picked up by the timer. | |
991 | * | |
992 | * This is meant to be called from a notifier_block that runs when an ip | |
993 | * address is deleted. | |
994 | */ | |
995 | void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) | |
996 | { | |
997 | struct svc_xprt *xprt; | |
c3d4879e SM |
998 | struct list_head *le, *next; |
999 | LIST_HEAD(to_be_closed); | |
c3d4879e SM |
1000 | |
1001 | spin_lock_bh(&serv->sv_lock); | |
1002 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | |
1003 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
1004 | if (rpc_cmp_addr(server_addr, (struct sockaddr *) | |
1005 | &xprt->xpt_local)) { | |
1006 | dprintk("svc_age_temp_xprts_now: found %p\n", xprt); | |
1007 | list_move(le, &to_be_closed); | |
1008 | } | |
1009 | } | |
1010 | spin_unlock_bh(&serv->sv_lock); | |
1011 | ||
1012 | while (!list_empty(&to_be_closed)) { | |
1013 | le = to_be_closed.next; | |
1014 | list_del_init(le); | |
1015 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
546125d1 SM |
1016 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
1017 | set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); | |
1018 | dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", | |
1019 | xprt); | |
1020 | svc_xprt_enqueue(xprt); | |
c3d4879e SM |
1021 | } |
1022 | } | |
1023 | EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); | |
1024 | ||
edc7a894 BF |
1025 | static void call_xpt_users(struct svc_xprt *xprt) |
1026 | { | |
1027 | struct svc_xpt_user *u; | |
1028 | ||
1029 | spin_lock(&xprt->xpt_lock); | |
1030 | while (!list_empty(&xprt->xpt_users)) { | |
1031 | u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); | |
bb6ad557 | 1032 | list_del_init(&u->list); |
edc7a894 BF |
1033 | u->callback(u); |
1034 | } | |
1035 | spin_unlock(&xprt->xpt_lock); | |
1036 | } | |
1037 | ||
0f0257ea TT |
1038 | /* |
1039 | * Remove a dead transport | |
1040 | */ | |
7710ec36 | 1041 | static void svc_delete_xprt(struct svc_xprt *xprt) |
0f0257ea TT |
1042 | { |
1043 | struct svc_serv *serv = xprt->xpt_server; | |
22945e4a TT |
1044 | struct svc_deferred_req *dr; |
1045 | ||
22945e4a | 1046 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) |
11bbb0f7 | 1047 | return; |
0f0257ea | 1048 | |
11bbb0f7 | 1049 | trace_svc_xprt_detach(xprt); |
0f0257ea | 1050 | xprt->xpt_ops->xpo_detach(xprt); |
6221f1d9 CL |
1051 | if (xprt->xpt_bc_xprt) |
1052 | xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt); | |
0f0257ea TT |
1053 | |
1054 | spin_lock_bh(&serv->sv_lock); | |
8d65ef76 | 1055 | list_del_init(&xprt->xpt_list); |
01047298 | 1056 | WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); |
22945e4a TT |
1057 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
1058 | serv->sv_tmpcnt--; | |
788e69e5 | 1059 | spin_unlock_bh(&serv->sv_lock); |
22945e4a | 1060 | |
ab1b18f7 | 1061 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) |
22945e4a | 1062 | kfree(dr); |
22945e4a | 1063 | |
edc7a894 | 1064 | call_xpt_users(xprt); |
22945e4a | 1065 | svc_xprt_put(xprt); |
0f0257ea TT |
1066 | } |
1067 | ||
4355d767 CL |
1068 | /** |
1069 | * svc_xprt_close - Close a client connection | |
1070 | * @xprt: transport to disconnect | |
1071 | * | |
1072 | */ | |
1073 | void svc_xprt_close(struct svc_xprt *xprt) | |
0f0257ea | 1074 | { |
11bbb0f7 | 1075 | trace_svc_xprt_close(xprt); |
0f0257ea TT |
1076 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
1077 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | |
1078 | /* someone else will have to effect the close */ | |
1079 | return; | |
b1763316 BF |
1080 | /* |
1081 | * We expect svc_close_xprt() to work even when no threads are | |
1082 | * running (e.g., while configuring the server before starting | |
1083 | * any threads), so if the transport isn't busy, we delete | |
1084 | * it ourself: | |
1085 | */ | |
0f0257ea | 1086 | svc_delete_xprt(xprt); |
0f0257ea | 1087 | } |
4355d767 | 1088 | EXPORT_SYMBOL_GPL(svc_xprt_close); |
0f0257ea | 1089 | |
cc630d9f | 1090 | static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
0f0257ea TT |
1091 | { |
1092 | struct svc_xprt *xprt; | |
cc630d9f | 1093 | int ret = 0; |
0f0257ea | 1094 | |
c7de87ff | 1095 | spin_lock_bh(&serv->sv_lock); |
b4f36f88 | 1096 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
7b147f1f SK |
1097 | if (xprt->xpt_net != net) |
1098 | continue; | |
cc630d9f | 1099 | ret++; |
0f0257ea | 1100 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
cc630d9f | 1101 | svc_xprt_enqueue(xprt); |
0f0257ea | 1102 | } |
c7de87ff | 1103 | spin_unlock_bh(&serv->sv_lock); |
cc630d9f | 1104 | return ret; |
0f0257ea TT |
1105 | } |
1106 | ||
cc630d9f | 1107 | static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) |
0f0257ea | 1108 | { |
b4f36f88 | 1109 | struct svc_pool *pool; |
0f0257ea TT |
1110 | struct svc_xprt *xprt; |
1111 | struct svc_xprt *tmp; | |
b4f36f88 BF |
1112 | int i; |
1113 | ||
b4f36f88 BF |
1114 | for (i = 0; i < serv->sv_nrpools; i++) { |
1115 | pool = &serv->sv_pools[i]; | |
1116 | ||
1117 | spin_lock_bh(&pool->sp_lock); | |
6f513365 | 1118 | list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { |
7b147f1f SK |
1119 | if (xprt->xpt_net != net) |
1120 | continue; | |
b4f36f88 | 1121 | list_del_init(&xprt->xpt_ready); |
cc630d9f BF |
1122 | spin_unlock_bh(&pool->sp_lock); |
1123 | return xprt; | |
b4f36f88 BF |
1124 | } |
1125 | spin_unlock_bh(&pool->sp_lock); | |
1126 | } | |
cc630d9f | 1127 | return NULL; |
6f513365 SK |
1128 | } |
1129 | ||
cc630d9f | 1130 | static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) |
6f513365 SK |
1131 | { |
1132 | struct svc_xprt *xprt; | |
719f8bcc | 1133 | |
cc630d9f BF |
1134 | while ((xprt = svc_dequeue_net(serv, net))) { |
1135 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
719f8bcc | 1136 | svc_delete_xprt(xprt); |
cc630d9f | 1137 | } |
3a22bf50 SK |
1138 | } |
1139 | ||
c7d7ec8f CL |
1140 | /** |
1141 | * svc_xprt_destroy_all - Destroy transports associated with @serv | |
1142 | * @serv: RPC service to be shut down | |
1143 | * @net: target network namespace | |
1144 | * | |
cc630d9f BF |
1145 | * Server threads may still be running (especially in the case where the |
1146 | * service is still running in other network namespaces). | |
1147 | * | |
1148 | * So we shut down sockets the same way we would on a running server, by | |
1149 | * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do | |
1150 | * the close. In the case there are no such other threads, | |
1151 | * threads running, svc_clean_up_xprts() does a simple version of a | |
1152 | * server's main event loop, and in the case where there are other | |
1153 | * threads, we may need to wait a little while and then check again to | |
1154 | * see if they're done. | |
1155 | */ | |
c7d7ec8f | 1156 | void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net) |
3a22bf50 | 1157 | { |
cc630d9f | 1158 | int delay = 0; |
6f513365 | 1159 | |
cc630d9f BF |
1160 | while (svc_close_list(serv, &serv->sv_permsocks, net) + |
1161 | svc_close_list(serv, &serv->sv_tempsocks, net)) { | |
1162 | ||
1163 | svc_clean_up_xprts(serv, net); | |
1164 | msleep(delay++); | |
1165 | } | |
0f0257ea | 1166 | } |
c7d7ec8f | 1167 | EXPORT_SYMBOL_GPL(svc_xprt_destroy_all); |
0f0257ea TT |
1168 | |
1169 | /* | |
1170 | * Handle defer and revisit of requests | |
1171 | */ | |
1172 | ||
1173 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |
1174 | { | |
1175 | struct svc_deferred_req *dr = | |
1176 | container_of(dreq, struct svc_deferred_req, handle); | |
1177 | struct svc_xprt *xprt = dr->xprt; | |
1178 | ||
22945e4a TT |
1179 | spin_lock(&xprt->xpt_lock); |
1180 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
1181 | if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { | |
1182 | spin_unlock(&xprt->xpt_lock); | |
8954c5c2 | 1183 | trace_svc_defer_drop(dr); |
0f0257ea TT |
1184 | svc_xprt_put(xprt); |
1185 | kfree(dr); | |
1186 | return; | |
1187 | } | |
0f0257ea | 1188 | dr->xprt = NULL; |
0f0257ea TT |
1189 | list_add(&dr->handle.recent, &xprt->xpt_deferred); |
1190 | spin_unlock(&xprt->xpt_lock); | |
8954c5c2 | 1191 | trace_svc_defer_queue(dr); |
0f0257ea TT |
1192 | svc_xprt_enqueue(xprt); |
1193 | svc_xprt_put(xprt); | |
1194 | } | |
1195 | ||
260c1d12 TT |
1196 | /* |
1197 | * Save the request off for later processing. The request buffer looks | |
1198 | * like this: | |
1199 | * | |
1200 | * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> | |
1201 | * | |
1202 | * This code can only handle requests that consist of an xprt-header | |
1203 | * and rpc-header. | |
1204 | */ | |
0f0257ea TT |
1205 | static struct cache_deferred_req *svc_defer(struct cache_req *req) |
1206 | { | |
1207 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | |
0f0257ea TT |
1208 | struct svc_deferred_req *dr; |
1209 | ||
30660e04 | 1210 | if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) |
0f0257ea TT |
1211 | return NULL; /* if more than a page, give up FIXME */ |
1212 | if (rqstp->rq_deferred) { | |
1213 | dr = rqstp->rq_deferred; | |
1214 | rqstp->rq_deferred = NULL; | |
1215 | } else { | |
260c1d12 TT |
1216 | size_t skip; |
1217 | size_t size; | |
0f0257ea | 1218 | /* FIXME maybe discard if size too large */ |
260c1d12 | 1219 | size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; |
0f0257ea TT |
1220 | dr = kmalloc(size, GFP_KERNEL); |
1221 | if (dr == NULL) | |
1222 | return NULL; | |
1223 | ||
1224 | dr->handle.owner = rqstp->rq_server; | |
1225 | dr->prot = rqstp->rq_prot; | |
1226 | memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); | |
1227 | dr->addrlen = rqstp->rq_addrlen; | |
1228 | dr->daddr = rqstp->rq_daddr; | |
1229 | dr->argslen = rqstp->rq_arg.len >> 2; | |
773f91b2 CL |
1230 | dr->xprt_ctxt = rqstp->rq_xprt_ctxt; |
1231 | rqstp->rq_xprt_ctxt = NULL; | |
260c1d12 TT |
1232 | |
1233 | /* back up head to the start of the buffer and copy */ | |
1234 | skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | |
1235 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, | |
1236 | dr->argslen << 2); | |
0f0257ea | 1237 | } |
8954c5c2 | 1238 | trace_svc_defer(rqstp); |
0f0257ea TT |
1239 | svc_xprt_get(rqstp->rq_xprt); |
1240 | dr->xprt = rqstp->rq_xprt; | |
7827c81f | 1241 | set_bit(RQ_DROPME, &rqstp->rq_flags); |
0f0257ea TT |
1242 | |
1243 | dr->handle.revisit = svc_revisit; | |
1244 | return &dr->handle; | |
1245 | } | |
1246 | ||
1247 | /* | |
1248 | * recv data from a deferred request into an active one | |
1249 | */ | |
8954c5c2 | 1250 | static noinline int svc_deferred_recv(struct svc_rqst *rqstp) |
0f0257ea TT |
1251 | { |
1252 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
1253 | ||
8954c5c2 CL |
1254 | trace_svc_defer_recv(dr); |
1255 | ||
260c1d12 | 1256 | /* setup iov_base past transport header */ |
983084b2 | 1257 | rqstp->rq_arg.head[0].iov_base = dr->args; |
260c1d12 | 1258 | /* The iov_len does not include the transport header bytes */ |
983084b2 | 1259 | rqstp->rq_arg.head[0].iov_len = dr->argslen << 2; |
0f0257ea | 1260 | rqstp->rq_arg.page_len = 0; |
260c1d12 | 1261 | /* The rq_arg.len includes the transport header bytes */ |
983084b2 | 1262 | rqstp->rq_arg.len = dr->argslen << 2; |
0f0257ea TT |
1263 | rqstp->rq_prot = dr->prot; |
1264 | memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); | |
1265 | rqstp->rq_addrlen = dr->addrlen; | |
260c1d12 | 1266 | /* Save off transport header len in case we get deferred again */ |
0f0257ea TT |
1267 | rqstp->rq_daddr = dr->daddr; |
1268 | rqstp->rq_respages = rqstp->rq_pages; | |
773f91b2 | 1269 | rqstp->rq_xprt_ctxt = dr->xprt_ctxt; |
82011c80 | 1270 | svc_xprt_received(rqstp->rq_xprt); |
983084b2 | 1271 | return dr->argslen << 2; |
0f0257ea TT |
1272 | } |
1273 | ||
1274 | ||
1275 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | |
1276 | { | |
1277 | struct svc_deferred_req *dr = NULL; | |
1278 | ||
1279 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | |
1280 | return NULL; | |
1281 | spin_lock(&xprt->xpt_lock); | |
0f0257ea TT |
1282 | if (!list_empty(&xprt->xpt_deferred)) { |
1283 | dr = list_entry(xprt->xpt_deferred.next, | |
1284 | struct svc_deferred_req, | |
1285 | handle.recent); | |
1286 | list_del_init(&dr->handle.recent); | |
62bac4af BF |
1287 | } else |
1288 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
0f0257ea TT |
1289 | spin_unlock(&xprt->xpt_lock); |
1290 | return dr; | |
1291 | } | |
7fcb98d5 | 1292 | |
156e6209 CL |
1293 | /** |
1294 | * svc_find_xprt - find an RPC transport instance | |
1295 | * @serv: pointer to svc_serv to search | |
1296 | * @xcl_name: C string containing transport's class name | |
4cb54ca2 | 1297 | * @net: owner net pointer |
156e6209 CL |
1298 | * @af: Address family of transport's local address |
1299 | * @port: transport's IP port number | |
1300 | * | |
7fcb98d5 TT |
1301 | * Return the transport instance pointer for the endpoint accepting |
1302 | * connections/peer traffic from the specified transport class, | |
1303 | * address family and port. | |
1304 | * | |
1305 | * Specifying 0 for the address family or port is effectively a | |
1306 | * wild-card, and will result in matching the first transport in the | |
1307 | * service's list that has a matching class name. | |
1308 | */ | |
156e6209 | 1309 | struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, |
4cb54ca2 SK |
1310 | struct net *net, const sa_family_t af, |
1311 | const unsigned short port) | |
7fcb98d5 TT |
1312 | { |
1313 | struct svc_xprt *xprt; | |
1314 | struct svc_xprt *found = NULL; | |
1315 | ||
1316 | /* Sanity check the args */ | |
156e6209 | 1317 | if (serv == NULL || xcl_name == NULL) |
7fcb98d5 TT |
1318 | return found; |
1319 | ||
1320 | spin_lock_bh(&serv->sv_lock); | |
1321 | list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { | |
4cb54ca2 SK |
1322 | if (xprt->xpt_net != net) |
1323 | continue; | |
7fcb98d5 TT |
1324 | if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) |
1325 | continue; | |
1326 | if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) | |
1327 | continue; | |
156e6209 | 1328 | if (port != 0 && port != svc_xprt_local_port(xprt)) |
7fcb98d5 TT |
1329 | continue; |
1330 | found = xprt; | |
a217813f | 1331 | svc_xprt_get(xprt); |
7fcb98d5 TT |
1332 | break; |
1333 | } | |
1334 | spin_unlock_bh(&serv->sv_lock); | |
1335 | return found; | |
1336 | } | |
1337 | EXPORT_SYMBOL_GPL(svc_find_xprt); | |
9571af18 | 1338 | |
335c54bd CL |
1339 | static int svc_one_xprt_name(const struct svc_xprt *xprt, |
1340 | char *pos, int remaining) | |
1341 | { | |
1342 | int len; | |
1343 | ||
1344 | len = snprintf(pos, remaining, "%s %u\n", | |
1345 | xprt->xpt_class->xcl_name, | |
1346 | svc_xprt_local_port(xprt)); | |
1347 | if (len >= remaining) | |
1348 | return -ENAMETOOLONG; | |
1349 | return len; | |
1350 | } | |
1351 | ||
1352 | /** | |
1353 | * svc_xprt_names - format a buffer with a list of transport names | |
1354 | * @serv: pointer to an RPC service | |
1355 | * @buf: pointer to a buffer to be filled in | |
1356 | * @buflen: length of buffer to be filled in | |
1357 | * | |
1358 | * Fills in @buf with a string containing a list of transport names, | |
1359 | * each name terminated with '\n'. | |
1360 | * | |
1361 | * Returns positive length of the filled-in string on success; otherwise | |
1362 | * a negative errno value is returned if an error occurs. | |
9571af18 | 1363 | */ |
335c54bd | 1364 | int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) |
9571af18 TT |
1365 | { |
1366 | struct svc_xprt *xprt; | |
335c54bd CL |
1367 | int len, totlen; |
1368 | char *pos; | |
9571af18 TT |
1369 | |
1370 | /* Sanity check args */ | |
1371 | if (!serv) | |
1372 | return 0; | |
1373 | ||
1374 | spin_lock_bh(&serv->sv_lock); | |
335c54bd CL |
1375 | |
1376 | pos = buf; | |
1377 | totlen = 0; | |
9571af18 | 1378 | list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { |
335c54bd CL |
1379 | len = svc_one_xprt_name(xprt, pos, buflen - totlen); |
1380 | if (len < 0) { | |
1381 | *buf = '\0'; | |
1382 | totlen = len; | |
1383 | } | |
1384 | if (len <= 0) | |
9571af18 | 1385 | break; |
335c54bd CL |
1386 | |
1387 | pos += len; | |
9571af18 TT |
1388 | totlen += len; |
1389 | } | |
335c54bd | 1390 | |
9571af18 TT |
1391 | spin_unlock_bh(&serv->sv_lock); |
1392 | return totlen; | |
1393 | } | |
1394 | EXPORT_SYMBOL_GPL(svc_xprt_names); | |
03cf6c9f GB |
1395 | |
1396 | ||
1397 | /*----------------------------------------------------------------------------*/ | |
1398 | ||
1399 | static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) | |
1400 | { | |
1401 | unsigned int pidx = (unsigned int)*pos; | |
1402 | struct svc_serv *serv = m->private; | |
1403 | ||
1404 | dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); | |
1405 | ||
03cf6c9f GB |
1406 | if (!pidx) |
1407 | return SEQ_START_TOKEN; | |
1408 | return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); | |
1409 | } | |
1410 | ||
1411 | static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) | |
1412 | { | |
1413 | struct svc_pool *pool = p; | |
1414 | struct svc_serv *serv = m->private; | |
1415 | ||
1416 | dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); | |
1417 | ||
1418 | if (p == SEQ_START_TOKEN) { | |
1419 | pool = &serv->sv_pools[0]; | |
1420 | } else { | |
1421 | unsigned int pidx = (pool - &serv->sv_pools[0]); | |
1422 | if (pidx < serv->sv_nrpools-1) | |
1423 | pool = &serv->sv_pools[pidx+1]; | |
1424 | else | |
1425 | pool = NULL; | |
1426 | } | |
1427 | ++*pos; | |
1428 | return pool; | |
1429 | } | |
1430 | ||
1431 | static void svc_pool_stats_stop(struct seq_file *m, void *p) | |
1432 | { | |
03cf6c9f GB |
1433 | } |
1434 | ||
1435 | static int svc_pool_stats_show(struct seq_file *m, void *p) | |
1436 | { | |
1437 | struct svc_pool *pool = p; | |
1438 | ||
1439 | if (p == SEQ_START_TOKEN) { | |
78c210ef | 1440 | seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); |
03cf6c9f GB |
1441 | return 0; |
1442 | } | |
1443 | ||
78c210ef | 1444 | seq_printf(m, "%u %lu %lu %lu %lu\n", |
03cf6c9f | 1445 | pool->sp_id, |
403c7b44 | 1446 | (unsigned long)atomic_long_read(&pool->sp_stats.packets), |
03cf6c9f | 1447 | pool->sp_stats.sockets_queued, |
403c7b44 JL |
1448 | (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), |
1449 | (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); | |
03cf6c9f GB |
1450 | |
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | static const struct seq_operations svc_pool_stats_seq_ops = { | |
1455 | .start = svc_pool_stats_start, | |
1456 | .next = svc_pool_stats_next, | |
1457 | .stop = svc_pool_stats_stop, | |
1458 | .show = svc_pool_stats_show, | |
1459 | }; | |
1460 | ||
1461 | int svc_pool_stats_open(struct svc_serv *serv, struct file *file) | |
1462 | { | |
1463 | int err; | |
1464 | ||
1465 | err = seq_open(file, &svc_pool_stats_seq_ops); | |
1466 | if (!err) | |
1467 | ((struct seq_file *) file->private_data)->private = serv; | |
1468 | return err; | |
1469 | } | |
1470 | EXPORT_SYMBOL(svc_pool_stats_open); | |
1471 | ||
1472 | /*----------------------------------------------------------------------------*/ |