Commit | Line | Data |
---|---|---|
1d8206b9 TT |
1 | /* |
2 | * linux/net/sunrpc/svc_xprt.c | |
3 | * | |
4 | * Author: Tom Tucker <tom@opengridcomputing.com> | |
5 | */ | |
6 | ||
7 | #include <linux/sched.h> | |
8 | #include <linux/errno.h> | |
1d8206b9 | 9 | #include <linux/freezer.h> |
7086721f | 10 | #include <linux/kthread.h> |
5a0e3ad6 | 11 | #include <linux/slab.h> |
1d8206b9 | 12 | #include <net/sock.h> |
c3d4879e | 13 | #include <linux/sunrpc/addr.h> |
1d8206b9 TT |
14 | #include <linux/sunrpc/stats.h> |
15 | #include <linux/sunrpc/svc_xprt.h> | |
dcf1a357 | 16 | #include <linux/sunrpc/svcsock.h> |
99de8ea9 | 17 | #include <linux/sunrpc/xprt.h> |
3a9a231d | 18 | #include <linux/module.h> |
c3d4879e | 19 | #include <linux/netdevice.h> |
860a0d9e | 20 | #include <trace/events/sunrpc.h> |
1d8206b9 TT |
21 | |
22 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | |
23 | ||
ff3ac5c3 TM |
24 | static unsigned int svc_rpc_per_connection_limit __read_mostly; |
25 | module_param(svc_rpc_per_connection_limit, uint, 0644); | |
26 | ||
27 | ||
0f0257ea TT |
28 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); |
29 | static int svc_deferred_recv(struct svc_rqst *rqstp); | |
30 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | |
31 | static void svc_age_temp_xprts(unsigned long closure); | |
7710ec36 | 32 | static void svc_delete_xprt(struct svc_xprt *xprt); |
0f0257ea TT |
33 | |
34 | /* apparently the "standard" is that clients close | |
35 | * idle connections after 5 minutes, servers after | |
36 | * 6 minutes | |
37 | * http://www.connectathon.org/talks96/nfstcp.pdf | |
38 | */ | |
39 | static int svc_conn_age_period = 6*60; | |
40 | ||
1d8206b9 TT |
41 | /* List of registered transport classes */ |
42 | static DEFINE_SPINLOCK(svc_xprt_class_lock); | |
43 | static LIST_HEAD(svc_xprt_class_list); | |
44 | ||
0f0257ea TT |
45 | /* SMP locking strategy: |
46 | * | |
47 | * svc_pool->sp_lock protects most of the fields of that pool. | |
48 | * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. | |
49 | * when both need to be taken (rare), svc_serv->sv_lock is first. | |
3c519914 | 50 | * The "service mutex" protects svc_serv->sv_nrthread. |
0f0257ea TT |
51 | * svc_sock->sk_lock protects the svc_sock->sk_deferred list |
52 | * and the ->sk_info_authunix cache. | |
53 | * | |
54 | * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being | |
55 | * enqueued multiply. During normal transport processing this bit | |
56 | * is set by svc_xprt_enqueue and cleared by svc_xprt_received. | |
57 | * Providers should not manipulate this bit directly. | |
58 | * | |
59 | * Some flags can be set to certain values at any time | |
60 | * providing that certain rules are followed: | |
61 | * | |
62 | * XPT_CONN, XPT_DATA: | |
63 | * - Can be set or cleared at any time. | |
64 | * - After a set, svc_xprt_enqueue must be called to enqueue | |
65 | * the transport for processing. | |
66 | * - After a clear, the transport must be read/accepted. | |
67 | * If this succeeds, it must be set again. | |
68 | * XPT_CLOSE: | |
69 | * - Can set at any time. It is never cleared. | |
70 | * XPT_DEAD: | |
71 | * - Can only be set while XPT_BUSY is held which ensures | |
72 | * that no other thread will be using the transport or will | |
73 | * try to set XPT_DEAD. | |
74 | */ | |
1d8206b9 TT |
75 | int svc_reg_xprt_class(struct svc_xprt_class *xcl) |
76 | { | |
77 | struct svc_xprt_class *cl; | |
78 | int res = -EEXIST; | |
79 | ||
80 | dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); | |
81 | ||
82 | INIT_LIST_HEAD(&xcl->xcl_list); | |
83 | spin_lock(&svc_xprt_class_lock); | |
84 | /* Make sure there isn't already a class with the same name */ | |
85 | list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { | |
86 | if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) | |
87 | goto out; | |
88 | } | |
89 | list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); | |
90 | res = 0; | |
91 | out: | |
92 | spin_unlock(&svc_xprt_class_lock); | |
93 | return res; | |
94 | } | |
95 | EXPORT_SYMBOL_GPL(svc_reg_xprt_class); | |
96 | ||
97 | void svc_unreg_xprt_class(struct svc_xprt_class *xcl) | |
98 | { | |
99 | dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); | |
100 | spin_lock(&svc_xprt_class_lock); | |
101 | list_del_init(&xcl->xcl_list); | |
102 | spin_unlock(&svc_xprt_class_lock); | |
103 | } | |
104 | EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); | |
105 | ||
dc9a16e4 TT |
106 | /* |
107 | * Format the transport list for printing | |
108 | */ | |
109 | int svc_print_xprts(char *buf, int maxlen) | |
110 | { | |
8f3a6de3 | 111 | struct svc_xprt_class *xcl; |
dc9a16e4 TT |
112 | char tmpstr[80]; |
113 | int len = 0; | |
114 | buf[0] = '\0'; | |
115 | ||
116 | spin_lock(&svc_xprt_class_lock); | |
8f3a6de3 | 117 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { |
dc9a16e4 | 118 | int slen; |
dc9a16e4 TT |
119 | |
120 | sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); | |
121 | slen = strlen(tmpstr); | |
122 | if (len + slen > maxlen) | |
123 | break; | |
124 | len += slen; | |
125 | strcat(buf, tmpstr); | |
126 | } | |
127 | spin_unlock(&svc_xprt_class_lock); | |
128 | ||
129 | return len; | |
130 | } | |
131 | ||
e1b3157f TT |
132 | static void svc_xprt_free(struct kref *kref) |
133 | { | |
134 | struct svc_xprt *xprt = | |
135 | container_of(kref, struct svc_xprt, xpt_ref); | |
136 | struct module *owner = xprt->xpt_class->xcl_owner; | |
e3bfca01 PE |
137 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) |
138 | svcauth_unix_info_release(xprt); | |
4fb8518b | 139 | put_net(xprt->xpt_net); |
99de8ea9 BF |
140 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ |
141 | if (xprt->xpt_bc_xprt) | |
142 | xprt_put(xprt->xpt_bc_xprt); | |
39a9beab BF |
143 | if (xprt->xpt_bc_xps) |
144 | xprt_switch_put(xprt->xpt_bc_xps); | |
e1b3157f TT |
145 | xprt->xpt_ops->xpo_free(xprt); |
146 | module_put(owner); | |
147 | } | |
148 | ||
149 | void svc_xprt_put(struct svc_xprt *xprt) | |
150 | { | |
151 | kref_put(&xprt->xpt_ref, svc_xprt_free); | |
152 | } | |
153 | EXPORT_SYMBOL_GPL(svc_xprt_put); | |
154 | ||
1d8206b9 TT |
155 | /* |
156 | * Called by transport drivers to initialize the transport independent | |
157 | * portion of the transport instance. | |
158 | */ | |
bd4620dd SK |
159 | void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, |
160 | struct svc_xprt *xprt, struct svc_serv *serv) | |
1d8206b9 TT |
161 | { |
162 | memset(xprt, 0, sizeof(*xprt)); | |
163 | xprt->xpt_class = xcl; | |
164 | xprt->xpt_ops = xcl->xcl_ops; | |
e1b3157f | 165 | kref_init(&xprt->xpt_ref); |
bb5cf160 | 166 | xprt->xpt_server = serv; |
7a182083 TT |
167 | INIT_LIST_HEAD(&xprt->xpt_list); |
168 | INIT_LIST_HEAD(&xprt->xpt_ready); | |
8c7b0172 | 169 | INIT_LIST_HEAD(&xprt->xpt_deferred); |
edc7a894 | 170 | INIT_LIST_HEAD(&xprt->xpt_users); |
a50fea26 | 171 | mutex_init(&xprt->xpt_mutex); |
def13d74 | 172 | spin_lock_init(&xprt->xpt_lock); |
4e5caaa5 | 173 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
4cfc7e60 | 174 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); |
bd4620dd | 175 | xprt->xpt_net = get_net(net); |
1d8206b9 TT |
176 | } |
177 | EXPORT_SYMBOL_GPL(svc_xprt_init); | |
b700cbb1 | 178 | |
5dd248f6 CL |
179 | static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, |
180 | struct svc_serv *serv, | |
62832c03 | 181 | struct net *net, |
9652ada3 CL |
182 | const int family, |
183 | const unsigned short port, | |
184 | int flags) | |
b700cbb1 | 185 | { |
b700cbb1 TT |
186 | struct sockaddr_in sin = { |
187 | .sin_family = AF_INET, | |
e6f1cebf | 188 | .sin_addr.s_addr = htonl(INADDR_ANY), |
b700cbb1 TT |
189 | .sin_port = htons(port), |
190 | }; | |
dfd56b8b | 191 | #if IS_ENABLED(CONFIG_IPV6) |
5dd248f6 CL |
192 | struct sockaddr_in6 sin6 = { |
193 | .sin6_family = AF_INET6, | |
194 | .sin6_addr = IN6ADDR_ANY_INIT, | |
195 | .sin6_port = htons(port), | |
196 | }; | |
dfd56b8b | 197 | #endif |
5dd248f6 CL |
198 | struct sockaddr *sap; |
199 | size_t len; | |
200 | ||
9652ada3 CL |
201 | switch (family) { |
202 | case PF_INET: | |
5dd248f6 CL |
203 | sap = (struct sockaddr *)&sin; |
204 | len = sizeof(sin); | |
205 | break; | |
dfd56b8b | 206 | #if IS_ENABLED(CONFIG_IPV6) |
9652ada3 | 207 | case PF_INET6: |
5dd248f6 CL |
208 | sap = (struct sockaddr *)&sin6; |
209 | len = sizeof(sin6); | |
210 | break; | |
dfd56b8b | 211 | #endif |
5dd248f6 CL |
212 | default: |
213 | return ERR_PTR(-EAFNOSUPPORT); | |
214 | } | |
215 | ||
62832c03 | 216 | return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); |
5dd248f6 CL |
217 | } |
218 | ||
6741019c BF |
219 | /* |
220 | * svc_xprt_received conditionally queues the transport for processing | |
221 | * by another thread. The caller must hold the XPT_BUSY bit and must | |
222 | * not thereafter touch transport data. | |
223 | * | |
224 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | |
225 | * insufficient) data. | |
226 | */ | |
227 | static void svc_xprt_received(struct svc_xprt *xprt) | |
228 | { | |
acf06a7f JL |
229 | if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { |
230 | WARN_ONCE(1, "xprt=0x%p already busy!", xprt); | |
ff1fdb9b | 231 | return; |
acf06a7f JL |
232 | } |
233 | ||
6741019c | 234 | /* As soon as we clear busy, the xprt could be closed and |
b9e13cdf | 235 | * 'put', so we need a reference to call svc_enqueue_xprt with: |
6741019c BF |
236 | */ |
237 | svc_xprt_get(xprt); | |
0971374e | 238 | smp_mb__before_atomic(); |
6741019c | 239 | clear_bit(XPT_BUSY, &xprt->xpt_flags); |
b9e13cdf | 240 | xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); |
6741019c BF |
241 | svc_xprt_put(xprt); |
242 | } | |
243 | ||
39b55301 BF |
244 | void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) |
245 | { | |
246 | clear_bit(XPT_TEMP, &new->xpt_flags); | |
247 | spin_lock_bh(&serv->sv_lock); | |
248 | list_add(&new->xpt_list, &serv->sv_permsocks); | |
249 | spin_unlock_bh(&serv->sv_lock); | |
250 | svc_xprt_received(new); | |
251 | } | |
252 | ||
d96b9c93 | 253 | int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, |
fc5d00b0 PE |
254 | struct net *net, const int family, |
255 | const unsigned short port, int flags) | |
5dd248f6 CL |
256 | { |
257 | struct svc_xprt_class *xcl; | |
258 | ||
b700cbb1 TT |
259 | spin_lock(&svc_xprt_class_lock); |
260 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { | |
4e5caaa5 | 261 | struct svc_xprt *newxprt; |
ed2849d3 | 262 | unsigned short newport; |
4e5caaa5 TT |
263 | |
264 | if (strcmp(xprt_name, xcl->xcl_name)) | |
265 | continue; | |
266 | ||
267 | if (!try_module_get(xcl->xcl_owner)) | |
268 | goto err; | |
269 | ||
270 | spin_unlock(&svc_xprt_class_lock); | |
62832c03 | 271 | newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); |
4e5caaa5 TT |
272 | if (IS_ERR(newxprt)) { |
273 | module_put(xcl->xcl_owner); | |
274 | return PTR_ERR(newxprt); | |
b700cbb1 | 275 | } |
39b55301 | 276 | svc_add_new_perm_xprt(serv, newxprt); |
ed2849d3 | 277 | newport = svc_xprt_local_port(newxprt); |
ed2849d3 | 278 | return newport; |
b700cbb1 | 279 | } |
4e5caaa5 | 280 | err: |
b700cbb1 | 281 | spin_unlock(&svc_xprt_class_lock); |
68717908 CL |
282 | /* This errno is exposed to user space. Provide a reasonable |
283 | * perror msg for a bad transport. */ | |
284 | return -EPROTONOSUPPORT; | |
b700cbb1 | 285 | } |
d96b9c93 BF |
286 | |
287 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |
288 | struct net *net, const int family, | |
289 | const unsigned short port, int flags) | |
290 | { | |
291 | int err; | |
292 | ||
293 | dprintk("svc: creating transport %s[%d]\n", xprt_name, port); | |
294 | err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); | |
295 | if (err == -EPROTONOSUPPORT) { | |
296 | request_module("svc%s", xprt_name); | |
297 | err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); | |
298 | } | |
299 | if (err) | |
300 | dprintk("svc: transport %s not found, err %d\n", | |
301 | xprt_name, err); | |
302 | return err; | |
303 | } | |
b700cbb1 | 304 | EXPORT_SYMBOL_GPL(svc_create_xprt); |
9dbc240f TT |
305 | |
306 | /* | |
307 | * Copy the local and remote xprt addresses to the rqstp structure | |
308 | */ | |
309 | void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) | |
310 | { | |
9dbc240f TT |
311 | memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); |
312 | rqstp->rq_addrlen = xprt->xpt_remotelen; | |
313 | ||
314 | /* | |
315 | * Destination address in request is needed for binding the | |
316 | * source address in RPC replies/callbacks later. | |
317 | */ | |
849a1cf1 MJ |
318 | memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); |
319 | rqstp->rq_daddrlen = xprt->xpt_locallen; | |
9dbc240f TT |
320 | } |
321 | EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); | |
322 | ||
0f0257ea TT |
323 | /** |
324 | * svc_print_addr - Format rq_addr field for printing | |
325 | * @rqstp: svc_rqst struct containing address to print | |
326 | * @buf: target buffer for formatted address | |
327 | * @len: length of target buffer | |
328 | * | |
329 | */ | |
330 | char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) | |
331 | { | |
332 | return __svc_print_addr(svc_addr(rqstp), buf, len); | |
333 | } | |
334 | EXPORT_SYMBOL_GPL(svc_print_addr); | |
335 | ||
ff3ac5c3 TM |
336 | static bool svc_xprt_slots_in_range(struct svc_xprt *xprt) |
337 | { | |
338 | unsigned int limit = svc_rpc_per_connection_limit; | |
339 | int nrqsts = atomic_read(&xprt->xpt_nr_rqsts); | |
340 | ||
341 | return limit == 0 || (nrqsts >= 0 && nrqsts < limit); | |
342 | } | |
343 | ||
344 | static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt) | |
345 | { | |
346 | if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { | |
347 | if (!svc_xprt_slots_in_range(xprt)) | |
348 | return false; | |
349 | atomic_inc(&xprt->xpt_nr_rqsts); | |
350 | set_bit(RQ_DATA, &rqstp->rq_flags); | |
351 | } | |
352 | return true; | |
353 | } | |
354 | ||
355 | static void svc_xprt_release_slot(struct svc_rqst *rqstp) | |
356 | { | |
357 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
358 | if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { | |
359 | atomic_dec(&xprt->xpt_nr_rqsts); | |
360 | svc_xprt_enqueue(xprt); | |
361 | } | |
362 | } | |
363 | ||
9c335c0b BF |
364 | static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) |
365 | { | |
366 | if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) | |
367 | return true; | |
82ea2d76 | 368 | if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) { |
ff3ac5c3 TM |
369 | if (xprt->xpt_ops->xpo_has_wspace(xprt) && |
370 | svc_xprt_slots_in_range(xprt)) | |
82ea2d76 TM |
371 | return true; |
372 | trace_svc_xprt_no_write_space(xprt); | |
373 | return false; | |
374 | } | |
9c335c0b BF |
375 | return false; |
376 | } | |
377 | ||
b9e13cdf | 378 | void svc_xprt_do_enqueue(struct svc_xprt *xprt) |
0f0257ea | 379 | { |
0f0257ea | 380 | struct svc_pool *pool; |
83a712e0 | 381 | struct svc_rqst *rqstp = NULL; |
0f0257ea | 382 | int cpu; |
b1691bc0 | 383 | bool queued = false; |
0f0257ea | 384 | |
9c335c0b | 385 | if (!svc_xprt_has_something_to_do(xprt)) |
83a712e0 | 386 | goto out; |
0f0257ea | 387 | |
0f0257ea TT |
388 | /* Mark transport as busy. It will remain in this state until |
389 | * the provider calls svc_xprt_received. We update XPT_BUSY | |
390 | * atomically because it also guards against trying to enqueue | |
391 | * the transport twice. | |
392 | */ | |
393 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { | |
394 | /* Don't enqueue transport while already enqueued */ | |
395 | dprintk("svc: transport %p busy, not enqueued\n", xprt); | |
83a712e0 | 396 | goto out; |
0f0257ea | 397 | } |
0f0257ea | 398 | |
0c0746d0 TM |
399 | cpu = get_cpu(); |
400 | pool = svc_pool_for_cpu(xprt->xpt_server, cpu); | |
0c0746d0 | 401 | |
403c7b44 | 402 | atomic_long_inc(&pool->sp_stats.packets); |
0c0746d0 | 403 | |
b1691bc0 JL |
404 | redo_search: |
405 | /* find a thread for this xprt */ | |
406 | rcu_read_lock(); | |
407 | list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { | |
408 | /* Do a lockless check first */ | |
409 | if (test_bit(RQ_BUSY, &rqstp->rq_flags)) | |
410 | continue; | |
411 | ||
412 | /* | |
413 | * Once the xprt has been queued, it can only be dequeued by | |
414 | * the task that intends to service it. All we can do at that | |
415 | * point is to try to wake this thread back up so that it can | |
416 | * do so. | |
983c6844 | 417 | */ |
b1691bc0 JL |
418 | if (!queued) { |
419 | spin_lock_bh(&rqstp->rq_lock); | |
420 | if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) { | |
421 | /* already busy, move on... */ | |
422 | spin_unlock_bh(&rqstp->rq_lock); | |
423 | continue; | |
424 | } | |
425 | ||
426 | /* this one will do */ | |
427 | rqstp->rq_xprt = xprt; | |
428 | svc_xprt_get(xprt); | |
429 | spin_unlock_bh(&rqstp->rq_lock); | |
430 | } | |
431 | rcu_read_unlock(); | |
432 | ||
403c7b44 | 433 | atomic_long_inc(&pool->sp_stats.threads_woken); |
b1691bc0 JL |
434 | wake_up_process(rqstp->rq_task); |
435 | put_cpu(); | |
83a712e0 | 436 | goto out; |
b1691bc0 JL |
437 | } |
438 | rcu_read_unlock(); | |
439 | ||
440 | /* | |
441 | * We didn't find an idle thread to use, so we need to queue the xprt. | |
442 | * Do so and then search again. If we find one, we can't hook this one | |
443 | * up to it directly but we can wake the thread up in the hopes that it | |
444 | * will pick it up once it searches for a xprt to service. | |
445 | */ | |
446 | if (!queued) { | |
447 | queued = true; | |
0f0257ea | 448 | dprintk("svc: transport %p put into queue\n", xprt); |
b1691bc0 | 449 | spin_lock_bh(&pool->sp_lock); |
0f0257ea | 450 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
03cf6c9f | 451 | pool->sp_stats.sockets_queued++; |
b1691bc0 JL |
452 | spin_unlock_bh(&pool->sp_lock); |
453 | goto redo_search; | |
0f0257ea | 454 | } |
83a712e0 | 455 | rqstp = NULL; |
983c6844 | 456 | put_cpu(); |
83a712e0 JL |
457 | out: |
458 | trace_svc_xprt_do_enqueue(xprt, rqstp); | |
0f0257ea | 459 | } |
b9e13cdf | 460 | EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); |
0971374e TM |
461 | |
462 | /* | |
463 | * Queue up a transport with data pending. If there are idle nfsd | |
464 | * processes, wake 'em up. | |
465 | * | |
466 | */ | |
467 | void svc_xprt_enqueue(struct svc_xprt *xprt) | |
468 | { | |
469 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) | |
470 | return; | |
b9e13cdf | 471 | xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); |
0971374e | 472 | } |
0f0257ea TT |
473 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); |
474 | ||
475 | /* | |
b1691bc0 | 476 | * Dequeue the first transport, if there is one. |
0f0257ea TT |
477 | */ |
478 | static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |
479 | { | |
b1691bc0 | 480 | struct svc_xprt *xprt = NULL; |
0f0257ea TT |
481 | |
482 | if (list_empty(&pool->sp_sockets)) | |
83a712e0 | 483 | goto out; |
0f0257ea | 484 | |
b1691bc0 JL |
485 | spin_lock_bh(&pool->sp_lock); |
486 | if (likely(!list_empty(&pool->sp_sockets))) { | |
487 | xprt = list_first_entry(&pool->sp_sockets, | |
488 | struct svc_xprt, xpt_ready); | |
489 | list_del_init(&xprt->xpt_ready); | |
490 | svc_xprt_get(xprt); | |
0f0257ea | 491 | |
b1691bc0 | 492 | dprintk("svc: transport %p dequeued, inuse=%d\n", |
2c935bc5 | 493 | xprt, kref_read(&xprt->xpt_ref)); |
b1691bc0 JL |
494 | } |
495 | spin_unlock_bh(&pool->sp_lock); | |
83a712e0 JL |
496 | out: |
497 | trace_svc_xprt_dequeue(xprt); | |
0f0257ea TT |
498 | return xprt; |
499 | } | |
500 | ||
0f0257ea TT |
501 | /** |
502 | * svc_reserve - change the space reserved for the reply to a request. | |
503 | * @rqstp: The request in question | |
504 | * @space: new max space to reserve | |
505 | * | |
506 | * Each request reserves some space on the output queue of the transport | |
507 | * to make sure the reply fits. This function reduces that reserved | |
508 | * space to be the amount of space used already, plus @space. | |
509 | * | |
510 | */ | |
511 | void svc_reserve(struct svc_rqst *rqstp, int space) | |
512 | { | |
513 | space += rqstp->rq_res.head[0].iov_len; | |
514 | ||
515 | if (space < rqstp->rq_reserved) { | |
516 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
517 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); | |
518 | rqstp->rq_reserved = space; | |
519 | ||
520 | svc_xprt_enqueue(xprt); | |
521 | } | |
522 | } | |
24c3767e | 523 | EXPORT_SYMBOL_GPL(svc_reserve); |
0f0257ea TT |
524 | |
525 | static void svc_xprt_release(struct svc_rqst *rqstp) | |
526 | { | |
527 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
528 | ||
529 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | |
530 | ||
2779e3ae TT |
531 | kfree(rqstp->rq_deferred); |
532 | rqstp->rq_deferred = NULL; | |
533 | ||
0f0257ea TT |
534 | svc_free_res_pages(rqstp); |
535 | rqstp->rq_res.page_len = 0; | |
536 | rqstp->rq_res.page_base = 0; | |
537 | ||
538 | /* Reset response buffer and release | |
539 | * the reservation. | |
540 | * But first, check that enough space was reserved | |
541 | * for the reply, otherwise we have a bug! | |
542 | */ | |
543 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | |
544 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | |
545 | rqstp->rq_reserved, | |
546 | rqstp->rq_res.len); | |
547 | ||
548 | rqstp->rq_res.head[0].iov_len = 0; | |
549 | svc_reserve(rqstp, 0); | |
ff3ac5c3 | 550 | svc_xprt_release_slot(rqstp); |
0f0257ea | 551 | rqstp->rq_xprt = NULL; |
0f0257ea TT |
552 | svc_xprt_put(xprt); |
553 | } | |
554 | ||
555 | /* | |
ceff739c JL |
556 | * Some svc_serv's will have occasional work to do, even when a xprt is not |
557 | * waiting to be serviced. This function is there to "kick" a task in one of | |
558 | * those services so that it can wake up and do that work. Note that we only | |
559 | * bother with pool 0 as we don't need to wake up more than one thread for | |
560 | * this purpose. | |
0f0257ea TT |
561 | */ |
562 | void svc_wake_up(struct svc_serv *serv) | |
563 | { | |
564 | struct svc_rqst *rqstp; | |
0f0257ea TT |
565 | struct svc_pool *pool; |
566 | ||
ceff739c JL |
567 | pool = &serv->sv_pools[0]; |
568 | ||
b1691bc0 JL |
569 | rcu_read_lock(); |
570 | list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { | |
571 | /* skip any that aren't queued */ | |
572 | if (test_bit(RQ_BUSY, &rqstp->rq_flags)) | |
573 | continue; | |
574 | rcu_read_unlock(); | |
ceff739c JL |
575 | dprintk("svc: daemon %p woken up.\n", rqstp); |
576 | wake_up_process(rqstp->rq_task); | |
83a712e0 | 577 | trace_svc_wake_up(rqstp->rq_task->pid); |
b1691bc0 JL |
578 | return; |
579 | } | |
580 | rcu_read_unlock(); | |
581 | ||
582 | /* No free entries available */ | |
583 | set_bit(SP_TASK_PENDING, &pool->sp_flags); | |
584 | smp_wmb(); | |
83a712e0 | 585 | trace_svc_wake_up(0); |
0f0257ea | 586 | } |
24c3767e | 587 | EXPORT_SYMBOL_GPL(svc_wake_up); |
0f0257ea TT |
588 | |
589 | int svc_port_is_privileged(struct sockaddr *sin) | |
590 | { | |
591 | switch (sin->sa_family) { | |
592 | case AF_INET: | |
593 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | |
594 | < PROT_SOCK; | |
595 | case AF_INET6: | |
596 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | |
597 | < PROT_SOCK; | |
598 | default: | |
599 | return 0; | |
600 | } | |
601 | } | |
602 | ||
603 | /* | |
c9233eb7 JL |
604 | * Make sure that we don't have too many active connections. If we have, |
605 | * something must be dropped. It's not clear what will happen if we allow | |
606 | * "too many" connections, but when dealing with network-facing software, | |
607 | * we have to code defensively. Here we do that by imposing hard limits. | |
0f0257ea TT |
608 | * |
609 | * There's no point in trying to do random drop here for DoS | |
610 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | |
611 | * attacker can easily beat that. | |
612 | * | |
613 | * The only somewhat efficient mechanism would be if drop old | |
614 | * connections from the same IP first. But right now we don't even | |
615 | * record the client IP in svc_sock. | |
c9233eb7 JL |
616 | * |
617 | * single-threaded services that expect a lot of clients will probably | |
618 | * need to set sv_maxconn to override the default value which is based | |
619 | * on the number of threads | |
0f0257ea TT |
620 | */ |
621 | static void svc_check_conn_limits(struct svc_serv *serv) | |
622 | { | |
c9233eb7 JL |
623 | unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : |
624 | (serv->sv_nrthreads+3) * 20; | |
625 | ||
626 | if (serv->sv_tmpcnt > limit) { | |
0f0257ea TT |
627 | struct svc_xprt *xprt = NULL; |
628 | spin_lock_bh(&serv->sv_lock); | |
629 | if (!list_empty(&serv->sv_tempsocks)) { | |
e87cc472 JP |
630 | /* Try to help the admin */ |
631 | net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", | |
632 | serv->sv_name, serv->sv_maxconn ? | |
633 | "max number of connections" : | |
634 | "number of threads"); | |
0f0257ea TT |
635 | /* |
636 | * Always select the oldest connection. It's not fair, | |
637 | * but so is life | |
638 | */ | |
639 | xprt = list_entry(serv->sv_tempsocks.prev, | |
640 | struct svc_xprt, | |
641 | xpt_list); | |
642 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
643 | svc_xprt_get(xprt); | |
644 | } | |
645 | spin_unlock_bh(&serv->sv_lock); | |
646 | ||
647 | if (xprt) { | |
648 | svc_xprt_enqueue(xprt); | |
649 | svc_xprt_put(xprt); | |
650 | } | |
651 | } | |
652 | } | |
653 | ||
e1d83ee6 | 654 | static int svc_alloc_arg(struct svc_rqst *rqstp) |
0f0257ea | 655 | { |
6797fa5a BF |
656 | struct svc_serv *serv = rqstp->rq_server; |
657 | struct xdr_buf *arg; | |
658 | int pages; | |
659 | int i; | |
0f0257ea TT |
660 | |
661 | /* now allocate needed pages. If we get a failure, sleep briefly */ | |
8c6ae498 CL |
662 | pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; |
663 | if (pages > RPCSVC_MAXPAGES) { | |
664 | pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", | |
665 | pages, RPCSVC_MAXPAGES); | |
b25cd058 | 666 | /* use as many pages as possible */ |
8c6ae498 CL |
667 | pages = RPCSVC_MAXPAGES; |
668 | } | |
0f0257ea TT |
669 | for (i = 0; i < pages ; i++) |
670 | while (rqstp->rq_pages[i] == NULL) { | |
671 | struct page *p = alloc_page(GFP_KERNEL); | |
672 | if (!p) { | |
7b54fe61 JL |
673 | set_current_state(TASK_INTERRUPTIBLE); |
674 | if (signalled() || kthread_should_stop()) { | |
675 | set_current_state(TASK_RUNNING); | |
7086721f | 676 | return -EINTR; |
7b54fe61 JL |
677 | } |
678 | schedule_timeout(msecs_to_jiffies(500)); | |
0f0257ea TT |
679 | } |
680 | rqstp->rq_pages[i] = p; | |
681 | } | |
2825a7f9 | 682 | rqstp->rq_page_end = &rqstp->rq_pages[i]; |
0f0257ea | 683 | rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ |
0f0257ea TT |
684 | |
685 | /* Make arg->head point to first page and arg->pages point to rest */ | |
686 | arg = &rqstp->rq_arg; | |
687 | arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); | |
688 | arg->head[0].iov_len = PAGE_SIZE; | |
689 | arg->pages = rqstp->rq_pages + 1; | |
690 | arg->page_base = 0; | |
691 | /* save at least one page for response */ | |
692 | arg->page_len = (pages-2)*PAGE_SIZE; | |
693 | arg->len = (pages-1)*PAGE_SIZE; | |
694 | arg->tail[0].iov_len = 0; | |
6797fa5a BF |
695 | return 0; |
696 | } | |
0f0257ea | 697 | |
b1691bc0 JL |
698 | static bool |
699 | rqst_should_sleep(struct svc_rqst *rqstp) | |
700 | { | |
701 | struct svc_pool *pool = rqstp->rq_pool; | |
702 | ||
703 | /* did someone call svc_wake_up? */ | |
704 | if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) | |
705 | return false; | |
706 | ||
707 | /* was a socket queued? */ | |
708 | if (!list_empty(&pool->sp_sockets)) | |
709 | return false; | |
710 | ||
711 | /* are we shutting down? */ | |
712 | if (signalled() || kthread_should_stop()) | |
713 | return false; | |
714 | ||
715 | /* are we freezing? */ | |
716 | if (freezing(current)) | |
717 | return false; | |
718 | ||
719 | return true; | |
720 | } | |
721 | ||
e1d83ee6 | 722 | static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) |
6797fa5a BF |
723 | { |
724 | struct svc_xprt *xprt; | |
725 | struct svc_pool *pool = rqstp->rq_pool; | |
a4aa8054 | 726 | long time_left = 0; |
0f0257ea | 727 | |
b1691bc0 JL |
728 | /* rq_xprt should be clear on entry */ |
729 | WARN_ON_ONCE(rqstp->rq_xprt); | |
730 | ||
f16b6e8d N |
731 | /* Normally we will wait up to 5 seconds for any required |
732 | * cache information to be provided. | |
733 | */ | |
734 | rqstp->rq_chandle.thread_wait = 5*HZ; | |
735 | ||
0f0257ea TT |
736 | xprt = svc_xprt_dequeue(pool); |
737 | if (xprt) { | |
738 | rqstp->rq_xprt = xprt; | |
f16b6e8d N |
739 | |
740 | /* As there is a shortage of threads and this request | |
6610f720 | 741 | * had to be queued, don't allow the thread to wait so |
f16b6e8d N |
742 | * long for cache updates. |
743 | */ | |
744 | rqstp->rq_chandle.thread_wait = 1*HZ; | |
4d5db3f5 | 745 | clear_bit(SP_TASK_PENDING, &pool->sp_flags); |
b1691bc0 JL |
746 | return xprt; |
747 | } | |
7086721f | 748 | |
b1691bc0 JL |
749 | /* |
750 | * We have to be able to interrupt this wait | |
751 | * to bring down the daemons ... | |
752 | */ | |
753 | set_current_state(TASK_INTERRUPTIBLE); | |
754 | clear_bit(RQ_BUSY, &rqstp->rq_flags); | |
755 | smp_mb(); | |
0f0257ea | 756 | |
b1691bc0 JL |
757 | if (likely(rqst_should_sleep(rqstp))) |
758 | time_left = schedule_timeout(timeout); | |
759 | else | |
760 | __set_current_state(TASK_RUNNING); | |
0f0257ea | 761 | |
b1691bc0 | 762 | try_to_freeze(); |
0f0257ea | 763 | |
b1691bc0 JL |
764 | spin_lock_bh(&rqstp->rq_lock); |
765 | set_bit(RQ_BUSY, &rqstp->rq_flags); | |
766 | spin_unlock_bh(&rqstp->rq_lock); | |
106f359c | 767 | |
b1691bc0 JL |
768 | xprt = rqstp->rq_xprt; |
769 | if (xprt != NULL) | |
770 | return xprt; | |
771 | ||
772 | if (!time_left) | |
773 | atomic_long_inc(&pool->sp_stats.threads_timedout); | |
774 | ||
775 | if (signalled() || kthread_should_stop()) | |
776 | return ERR_PTR(-EINTR); | |
777 | return ERR_PTR(-EAGAIN); | |
6797fa5a BF |
778 | } |
779 | ||
e1d83ee6 | 780 | static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) |
65b2e665 BF |
781 | { |
782 | spin_lock_bh(&serv->sv_lock); | |
783 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | |
784 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | |
785 | serv->sv_tmpcnt++; | |
786 | if (serv->sv_temptimer.function == NULL) { | |
787 | /* setup timer to age temp transports */ | |
788 | setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, | |
789 | (unsigned long)serv); | |
790 | mod_timer(&serv->sv_temptimer, | |
791 | jiffies + svc_conn_age_period * HZ); | |
792 | } | |
793 | spin_unlock_bh(&serv->sv_lock); | |
794 | svc_xprt_received(newxpt); | |
795 | } | |
796 | ||
6797fa5a BF |
797 | static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) |
798 | { | |
799 | struct svc_serv *serv = rqstp->rq_server; | |
800 | int len = 0; | |
0f0257ea | 801 | |
1b644b6e BF |
802 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
803 | dprintk("svc_recv: found XPT_CLOSE\n"); | |
546125d1 SM |
804 | if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) |
805 | xprt->xpt_ops->xpo_kill_temp_xprt(xprt); | |
1b644b6e | 806 | svc_delete_xprt(xprt); |
ca7896cd | 807 | /* Leave XPT_BUSY set on the dead xprt: */ |
83a712e0 | 808 | goto out; |
ca7896cd BF |
809 | } |
810 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | |
0f0257ea | 811 | struct svc_xprt *newxpt; |
65b2e665 BF |
812 | /* |
813 | * We know this module_get will succeed because the | |
814 | * listener holds a reference too | |
815 | */ | |
816 | __module_get(xprt->xpt_class->xcl_owner); | |
817 | svc_check_conn_limits(xprt->xpt_server); | |
0f0257ea | 818 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
65b2e665 BF |
819 | if (newxpt) |
820 | svc_add_new_temp_xprt(serv, newxpt); | |
c789102c TM |
821 | else |
822 | module_put(xprt->xpt_class->xcl_owner); | |
ff3ac5c3 | 823 | } else if (svc_xprt_reserve_slot(rqstp, xprt)) { |
6797fa5a | 824 | /* XPT_DATA|XPT_DEFERRED case: */ |
0f0257ea | 825 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
6797fa5a | 826 | rqstp, rqstp->rq_pool->sp_id, xprt, |
2c935bc5 | 827 | kref_read(&xprt->xpt_ref)); |
0f0257ea | 828 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
ca7896cd | 829 | if (rqstp->rq_deferred) |
0f0257ea | 830 | len = svc_deferred_recv(rqstp); |
ca7896cd | 831 | else |
0f0257ea TT |
832 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
833 | dprintk("svc: got len=%d\n", len); | |
d10f27a7 BF |
834 | rqstp->rq_reserved = serv->sv_max_mesg; |
835 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | |
0f0257ea | 836 | } |
6797fa5a | 837 | /* clear XPT_BUSY: */ |
ca7896cd | 838 | svc_xprt_received(xprt); |
83a712e0 JL |
839 | out: |
840 | trace_svc_handle_xprt(xprt, len); | |
6797fa5a BF |
841 | return len; |
842 | } | |
843 | ||
844 | /* | |
845 | * Receive the next request on any transport. This code is carefully | |
846 | * organised not to touch any cachelines in the shared svc_serv | |
847 | * structure, only cachelines in the local svc_pool. | |
848 | */ | |
849 | int svc_recv(struct svc_rqst *rqstp, long timeout) | |
850 | { | |
851 | struct svc_xprt *xprt = NULL; | |
852 | struct svc_serv *serv = rqstp->rq_server; | |
853 | int len, err; | |
854 | ||
855 | dprintk("svc: server %p waiting for data (to = %ld)\n", | |
856 | rqstp, timeout); | |
857 | ||
858 | if (rqstp->rq_xprt) | |
859 | printk(KERN_ERR | |
860 | "svc_recv: service %p, transport not NULL!\n", | |
861 | rqstp); | |
983c6844 | 862 | |
6797fa5a BF |
863 | err = svc_alloc_arg(rqstp); |
864 | if (err) | |
860a0d9e | 865 | goto out; |
6797fa5a BF |
866 | |
867 | try_to_freeze(); | |
868 | cond_resched(); | |
860a0d9e | 869 | err = -EINTR; |
6797fa5a | 870 | if (signalled() || kthread_should_stop()) |
860a0d9e | 871 | goto out; |
6797fa5a BF |
872 | |
873 | xprt = svc_get_next_xprt(rqstp, timeout); | |
860a0d9e JL |
874 | if (IS_ERR(xprt)) { |
875 | err = PTR_ERR(xprt); | |
876 | goto out; | |
877 | } | |
6797fa5a BF |
878 | |
879 | len = svc_handle_xprt(rqstp, xprt); | |
0f0257ea TT |
880 | |
881 | /* No data, incomplete (TCP) read, or accept() */ | |
860a0d9e | 882 | err = -EAGAIN; |
9f9d2ebe | 883 | if (len <= 0) |
860a0d9e | 884 | goto out_release; |
ca7896cd | 885 | |
0f0257ea TT |
886 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
887 | ||
4d152e2c JL |
888 | if (xprt->xpt_ops->xpo_secure_port(rqstp)) |
889 | set_bit(RQ_SECURE, &rqstp->rq_flags); | |
890 | else | |
891 | clear_bit(RQ_SECURE, &rqstp->rq_flags); | |
0f0257ea | 892 | rqstp->rq_chandle.defer = svc_defer; |
860a0d9e | 893 | rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); |
0f0257ea TT |
894 | |
895 | if (serv->sv_stats) | |
896 | serv->sv_stats->netcnt++; | |
860a0d9e | 897 | trace_svc_recv(rqstp, len); |
0f0257ea | 898 | return len; |
860a0d9e | 899 | out_release: |
ca7896cd BF |
900 | rqstp->rq_res.len = 0; |
901 | svc_xprt_release(rqstp); | |
860a0d9e JL |
902 | out: |
903 | trace_svc_recv(rqstp, err); | |
904 | return err; | |
0f0257ea | 905 | } |
24c3767e | 906 | EXPORT_SYMBOL_GPL(svc_recv); |
0f0257ea TT |
907 | |
908 | /* | |
909 | * Drop request | |
910 | */ | |
911 | void svc_drop(struct svc_rqst *rqstp) | |
912 | { | |
104f6351 | 913 | trace_svc_drop(rqstp); |
0f0257ea TT |
914 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); |
915 | svc_xprt_release(rqstp); | |
916 | } | |
24c3767e | 917 | EXPORT_SYMBOL_GPL(svc_drop); |
0f0257ea TT |
918 | |
919 | /* | |
920 | * Return reply to client. | |
921 | */ | |
922 | int svc_send(struct svc_rqst *rqstp) | |
923 | { | |
924 | struct svc_xprt *xprt; | |
860a0d9e | 925 | int len = -EFAULT; |
0f0257ea TT |
926 | struct xdr_buf *xb; |
927 | ||
928 | xprt = rqstp->rq_xprt; | |
929 | if (!xprt) | |
860a0d9e | 930 | goto out; |
0f0257ea TT |
931 | |
932 | /* release the receive skb before sending the reply */ | |
933 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | |
934 | ||
935 | /* calculate over-all length */ | |
936 | xb = &rqstp->rq_res; | |
937 | xb->len = xb->head[0].iov_len + | |
938 | xb->page_len + | |
939 | xb->tail[0].iov_len; | |
940 | ||
941 | /* Grab mutex to serialize outgoing data. */ | |
942 | mutex_lock(&xprt->xpt_mutex); | |
f06f00a2 BF |
943 | if (test_bit(XPT_DEAD, &xprt->xpt_flags) |
944 | || test_bit(XPT_CLOSE, &xprt->xpt_flags)) | |
0f0257ea TT |
945 | len = -ENOTCONN; |
946 | else | |
947 | len = xprt->xpt_ops->xpo_sendto(rqstp); | |
948 | mutex_unlock(&xprt->xpt_mutex); | |
4cfc7e60 | 949 | rpc_wake_up(&xprt->xpt_bc_pending); |
0f0257ea TT |
950 | svc_xprt_release(rqstp); |
951 | ||
952 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | |
860a0d9e JL |
953 | len = 0; |
954 | out: | |
955 | trace_svc_send(rqstp, len); | |
0f0257ea TT |
956 | return len; |
957 | } | |
958 | ||
959 | /* | |
960 | * Timer function to close old temporary transports, using | |
961 | * a mark-and-sweep algorithm. | |
962 | */ | |
963 | static void svc_age_temp_xprts(unsigned long closure) | |
964 | { | |
965 | struct svc_serv *serv = (struct svc_serv *)closure; | |
966 | struct svc_xprt *xprt; | |
967 | struct list_head *le, *next; | |
0f0257ea TT |
968 | |
969 | dprintk("svc_age_temp_xprts\n"); | |
970 | ||
971 | if (!spin_trylock_bh(&serv->sv_lock)) { | |
972 | /* busy, try again 1 sec later */ | |
973 | dprintk("svc_age_temp_xprts: busy\n"); | |
974 | mod_timer(&serv->sv_temptimer, jiffies + HZ); | |
975 | return; | |
976 | } | |
977 | ||
978 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | |
979 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
980 | ||
981 | /* First time through, just mark it OLD. Second time | |
982 | * through, close it. */ | |
983 | if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) | |
984 | continue; | |
2c935bc5 | 985 | if (kref_read(&xprt->xpt_ref) > 1 || |
f64f9e71 | 986 | test_bit(XPT_BUSY, &xprt->xpt_flags)) |
0f0257ea | 987 | continue; |
e75bafbf | 988 | list_del_init(le); |
0f0257ea | 989 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
0f0257ea TT |
990 | dprintk("queuing xprt %p for closing\n", xprt); |
991 | ||
992 | /* a thread will dequeue and close it soon */ | |
993 | svc_xprt_enqueue(xprt); | |
0f0257ea | 994 | } |
e75bafbf | 995 | spin_unlock_bh(&serv->sv_lock); |
0f0257ea TT |
996 | |
997 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | |
998 | } | |
999 | ||
c3d4879e SM |
1000 | /* Close temporary transports whose xpt_local matches server_addr immediately |
1001 | * instead of waiting for them to be picked up by the timer. | |
1002 | * | |
1003 | * This is meant to be called from a notifier_block that runs when an ip | |
1004 | * address is deleted. | |
1005 | */ | |
1006 | void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) | |
1007 | { | |
1008 | struct svc_xprt *xprt; | |
c3d4879e SM |
1009 | struct list_head *le, *next; |
1010 | LIST_HEAD(to_be_closed); | |
c3d4879e SM |
1011 | |
1012 | spin_lock_bh(&serv->sv_lock); | |
1013 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | |
1014 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
1015 | if (rpc_cmp_addr(server_addr, (struct sockaddr *) | |
1016 | &xprt->xpt_local)) { | |
1017 | dprintk("svc_age_temp_xprts_now: found %p\n", xprt); | |
1018 | list_move(le, &to_be_closed); | |
1019 | } | |
1020 | } | |
1021 | spin_unlock_bh(&serv->sv_lock); | |
1022 | ||
1023 | while (!list_empty(&to_be_closed)) { | |
1024 | le = to_be_closed.next; | |
1025 | list_del_init(le); | |
1026 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
546125d1 SM |
1027 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
1028 | set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); | |
1029 | dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", | |
1030 | xprt); | |
1031 | svc_xprt_enqueue(xprt); | |
c3d4879e SM |
1032 | } |
1033 | } | |
1034 | EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); | |
1035 | ||
edc7a894 BF |
1036 | static void call_xpt_users(struct svc_xprt *xprt) |
1037 | { | |
1038 | struct svc_xpt_user *u; | |
1039 | ||
1040 | spin_lock(&xprt->xpt_lock); | |
1041 | while (!list_empty(&xprt->xpt_users)) { | |
1042 | u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); | |
1043 | list_del(&u->list); | |
1044 | u->callback(u); | |
1045 | } | |
1046 | spin_unlock(&xprt->xpt_lock); | |
1047 | } | |
1048 | ||
0f0257ea TT |
1049 | /* |
1050 | * Remove a dead transport | |
1051 | */ | |
7710ec36 | 1052 | static void svc_delete_xprt(struct svc_xprt *xprt) |
0f0257ea TT |
1053 | { |
1054 | struct svc_serv *serv = xprt->xpt_server; | |
22945e4a TT |
1055 | struct svc_deferred_req *dr; |
1056 | ||
1057 | /* Only do this once */ | |
1058 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) | |
ac9303eb | 1059 | BUG(); |
0f0257ea TT |
1060 | |
1061 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | |
1062 | xprt->xpt_ops->xpo_detach(xprt); | |
1063 | ||
1064 | spin_lock_bh(&serv->sv_lock); | |
8d65ef76 | 1065 | list_del_init(&xprt->xpt_list); |
01047298 | 1066 | WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); |
22945e4a TT |
1067 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
1068 | serv->sv_tmpcnt--; | |
788e69e5 | 1069 | spin_unlock_bh(&serv->sv_lock); |
22945e4a | 1070 | |
ab1b18f7 | 1071 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) |
22945e4a | 1072 | kfree(dr); |
22945e4a | 1073 | |
edc7a894 | 1074 | call_xpt_users(xprt); |
22945e4a | 1075 | svc_xprt_put(xprt); |
0f0257ea TT |
1076 | } |
1077 | ||
1078 | void svc_close_xprt(struct svc_xprt *xprt) | |
1079 | { | |
1080 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
1081 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | |
1082 | /* someone else will have to effect the close */ | |
1083 | return; | |
b1763316 BF |
1084 | /* |
1085 | * We expect svc_close_xprt() to work even when no threads are | |
1086 | * running (e.g., while configuring the server before starting | |
1087 | * any threads), so if the transport isn't busy, we delete | |
1088 | * it ourself: | |
1089 | */ | |
0f0257ea | 1090 | svc_delete_xprt(xprt); |
0f0257ea | 1091 | } |
a217813f | 1092 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
0f0257ea | 1093 | |
cc630d9f | 1094 | static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
0f0257ea TT |
1095 | { |
1096 | struct svc_xprt *xprt; | |
cc630d9f | 1097 | int ret = 0; |
0f0257ea | 1098 | |
719f8bcc | 1099 | spin_lock(&serv->sv_lock); |
b4f36f88 | 1100 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
7b147f1f SK |
1101 | if (xprt->xpt_net != net) |
1102 | continue; | |
cc630d9f | 1103 | ret++; |
0f0257ea | 1104 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
cc630d9f | 1105 | svc_xprt_enqueue(xprt); |
0f0257ea | 1106 | } |
719f8bcc | 1107 | spin_unlock(&serv->sv_lock); |
cc630d9f | 1108 | return ret; |
0f0257ea TT |
1109 | } |
1110 | ||
cc630d9f | 1111 | static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) |
0f0257ea | 1112 | { |
b4f36f88 | 1113 | struct svc_pool *pool; |
0f0257ea TT |
1114 | struct svc_xprt *xprt; |
1115 | struct svc_xprt *tmp; | |
b4f36f88 BF |
1116 | int i; |
1117 | ||
b4f36f88 BF |
1118 | for (i = 0; i < serv->sv_nrpools; i++) { |
1119 | pool = &serv->sv_pools[i]; | |
1120 | ||
1121 | spin_lock_bh(&pool->sp_lock); | |
6f513365 | 1122 | list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { |
7b147f1f SK |
1123 | if (xprt->xpt_net != net) |
1124 | continue; | |
b4f36f88 | 1125 | list_del_init(&xprt->xpt_ready); |
cc630d9f BF |
1126 | spin_unlock_bh(&pool->sp_lock); |
1127 | return xprt; | |
b4f36f88 BF |
1128 | } |
1129 | spin_unlock_bh(&pool->sp_lock); | |
1130 | } | |
cc630d9f | 1131 | return NULL; |
6f513365 SK |
1132 | } |
1133 | ||
cc630d9f | 1134 | static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) |
6f513365 SK |
1135 | { |
1136 | struct svc_xprt *xprt; | |
719f8bcc | 1137 | |
cc630d9f BF |
1138 | while ((xprt = svc_dequeue_net(serv, net))) { |
1139 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
719f8bcc | 1140 | svc_delete_xprt(xprt); |
cc630d9f | 1141 | } |
3a22bf50 SK |
1142 | } |
1143 | ||
cc630d9f BF |
1144 | /* |
1145 | * Server threads may still be running (especially in the case where the | |
1146 | * service is still running in other network namespaces). | |
1147 | * | |
1148 | * So we shut down sockets the same way we would on a running server, by | |
1149 | * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do | |
1150 | * the close. In the case there are no such other threads, | |
1151 | * threads running, svc_clean_up_xprts() does a simple version of a | |
1152 | * server's main event loop, and in the case where there are other | |
1153 | * threads, we may need to wait a little while and then check again to | |
1154 | * see if they're done. | |
1155 | */ | |
7b147f1f | 1156 | void svc_close_net(struct svc_serv *serv, struct net *net) |
3a22bf50 | 1157 | { |
cc630d9f | 1158 | int delay = 0; |
6f513365 | 1159 | |
cc630d9f BF |
1160 | while (svc_close_list(serv, &serv->sv_permsocks, net) + |
1161 | svc_close_list(serv, &serv->sv_tempsocks, net)) { | |
1162 | ||
1163 | svc_clean_up_xprts(serv, net); | |
1164 | msleep(delay++); | |
1165 | } | |
0f0257ea TT |
1166 | } |
1167 | ||
1168 | /* | |
1169 | * Handle defer and revisit of requests | |
1170 | */ | |
1171 | ||
1172 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |
1173 | { | |
1174 | struct svc_deferred_req *dr = | |
1175 | container_of(dreq, struct svc_deferred_req, handle); | |
1176 | struct svc_xprt *xprt = dr->xprt; | |
1177 | ||
22945e4a TT |
1178 | spin_lock(&xprt->xpt_lock); |
1179 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
1180 | if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { | |
1181 | spin_unlock(&xprt->xpt_lock); | |
1182 | dprintk("revisit canceled\n"); | |
0f0257ea | 1183 | svc_xprt_put(xprt); |
104f6351 | 1184 | trace_svc_drop_deferred(dr); |
0f0257ea TT |
1185 | kfree(dr); |
1186 | return; | |
1187 | } | |
1188 | dprintk("revisit queued\n"); | |
1189 | dr->xprt = NULL; | |
0f0257ea TT |
1190 | list_add(&dr->handle.recent, &xprt->xpt_deferred); |
1191 | spin_unlock(&xprt->xpt_lock); | |
0f0257ea TT |
1192 | svc_xprt_enqueue(xprt); |
1193 | svc_xprt_put(xprt); | |
1194 | } | |
1195 | ||
260c1d12 TT |
1196 | /* |
1197 | * Save the request off for later processing. The request buffer looks | |
1198 | * like this: | |
1199 | * | |
1200 | * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> | |
1201 | * | |
1202 | * This code can only handle requests that consist of an xprt-header | |
1203 | * and rpc-header. | |
1204 | */ | |
0f0257ea TT |
1205 | static struct cache_deferred_req *svc_defer(struct cache_req *req) |
1206 | { | |
1207 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | |
0f0257ea TT |
1208 | struct svc_deferred_req *dr; |
1209 | ||
30660e04 | 1210 | if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) |
0f0257ea TT |
1211 | return NULL; /* if more than a page, give up FIXME */ |
1212 | if (rqstp->rq_deferred) { | |
1213 | dr = rqstp->rq_deferred; | |
1214 | rqstp->rq_deferred = NULL; | |
1215 | } else { | |
260c1d12 TT |
1216 | size_t skip; |
1217 | size_t size; | |
0f0257ea | 1218 | /* FIXME maybe discard if size too large */ |
260c1d12 | 1219 | size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; |
0f0257ea TT |
1220 | dr = kmalloc(size, GFP_KERNEL); |
1221 | if (dr == NULL) | |
1222 | return NULL; | |
1223 | ||
1224 | dr->handle.owner = rqstp->rq_server; | |
1225 | dr->prot = rqstp->rq_prot; | |
1226 | memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); | |
1227 | dr->addrlen = rqstp->rq_addrlen; | |
1228 | dr->daddr = rqstp->rq_daddr; | |
1229 | dr->argslen = rqstp->rq_arg.len >> 2; | |
260c1d12 TT |
1230 | dr->xprt_hlen = rqstp->rq_xprt_hlen; |
1231 | ||
1232 | /* back up head to the start of the buffer and copy */ | |
1233 | skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | |
1234 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, | |
1235 | dr->argslen << 2); | |
0f0257ea TT |
1236 | } |
1237 | svc_xprt_get(rqstp->rq_xprt); | |
1238 | dr->xprt = rqstp->rq_xprt; | |
78b65eb3 | 1239 | set_bit(RQ_DROPME, &rqstp->rq_flags); |
0f0257ea TT |
1240 | |
1241 | dr->handle.revisit = svc_revisit; | |
104f6351 | 1242 | trace_svc_defer(rqstp); |
0f0257ea TT |
1243 | return &dr->handle; |
1244 | } | |
1245 | ||
1246 | /* | |
1247 | * recv data from a deferred request into an active one | |
1248 | */ | |
1249 | static int svc_deferred_recv(struct svc_rqst *rqstp) | |
1250 | { | |
1251 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
1252 | ||
260c1d12 TT |
1253 | /* setup iov_base past transport header */ |
1254 | rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); | |
1255 | /* The iov_len does not include the transport header bytes */ | |
1256 | rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; | |
0f0257ea | 1257 | rqstp->rq_arg.page_len = 0; |
260c1d12 TT |
1258 | /* The rq_arg.len includes the transport header bytes */ |
1259 | rqstp->rq_arg.len = dr->argslen<<2; | |
0f0257ea TT |
1260 | rqstp->rq_prot = dr->prot; |
1261 | memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); | |
1262 | rqstp->rq_addrlen = dr->addrlen; | |
260c1d12 TT |
1263 | /* Save off transport header len in case we get deferred again */ |
1264 | rqstp->rq_xprt_hlen = dr->xprt_hlen; | |
0f0257ea TT |
1265 | rqstp->rq_daddr = dr->daddr; |
1266 | rqstp->rq_respages = rqstp->rq_pages; | |
260c1d12 | 1267 | return (dr->argslen<<2) - dr->xprt_hlen; |
0f0257ea TT |
1268 | } |
1269 | ||
1270 | ||
1271 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | |
1272 | { | |
1273 | struct svc_deferred_req *dr = NULL; | |
1274 | ||
1275 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | |
1276 | return NULL; | |
1277 | spin_lock(&xprt->xpt_lock); | |
0f0257ea TT |
1278 | if (!list_empty(&xprt->xpt_deferred)) { |
1279 | dr = list_entry(xprt->xpt_deferred.next, | |
1280 | struct svc_deferred_req, | |
1281 | handle.recent); | |
1282 | list_del_init(&dr->handle.recent); | |
104f6351 | 1283 | trace_svc_revisit_deferred(dr); |
62bac4af BF |
1284 | } else |
1285 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
0f0257ea TT |
1286 | spin_unlock(&xprt->xpt_lock); |
1287 | return dr; | |
1288 | } | |
7fcb98d5 | 1289 | |
156e6209 CL |
1290 | /** |
1291 | * svc_find_xprt - find an RPC transport instance | |
1292 | * @serv: pointer to svc_serv to search | |
1293 | * @xcl_name: C string containing transport's class name | |
4cb54ca2 | 1294 | * @net: owner net pointer |
156e6209 CL |
1295 | * @af: Address family of transport's local address |
1296 | * @port: transport's IP port number | |
1297 | * | |
7fcb98d5 TT |
1298 | * Return the transport instance pointer for the endpoint accepting |
1299 | * connections/peer traffic from the specified transport class, | |
1300 | * address family and port. | |
1301 | * | |
1302 | * Specifying 0 for the address family or port is effectively a | |
1303 | * wild-card, and will result in matching the first transport in the | |
1304 | * service's list that has a matching class name. | |
1305 | */ | |
156e6209 | 1306 | struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, |
4cb54ca2 SK |
1307 | struct net *net, const sa_family_t af, |
1308 | const unsigned short port) | |
7fcb98d5 TT |
1309 | { |
1310 | struct svc_xprt *xprt; | |
1311 | struct svc_xprt *found = NULL; | |
1312 | ||
1313 | /* Sanity check the args */ | |
156e6209 | 1314 | if (serv == NULL || xcl_name == NULL) |
7fcb98d5 TT |
1315 | return found; |
1316 | ||
1317 | spin_lock_bh(&serv->sv_lock); | |
1318 | list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { | |
4cb54ca2 SK |
1319 | if (xprt->xpt_net != net) |
1320 | continue; | |
7fcb98d5 TT |
1321 | if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) |
1322 | continue; | |
1323 | if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) | |
1324 | continue; | |
156e6209 | 1325 | if (port != 0 && port != svc_xprt_local_port(xprt)) |
7fcb98d5 TT |
1326 | continue; |
1327 | found = xprt; | |
a217813f | 1328 | svc_xprt_get(xprt); |
7fcb98d5 TT |
1329 | break; |
1330 | } | |
1331 | spin_unlock_bh(&serv->sv_lock); | |
1332 | return found; | |
1333 | } | |
1334 | EXPORT_SYMBOL_GPL(svc_find_xprt); | |
9571af18 | 1335 | |
335c54bd CL |
1336 | static int svc_one_xprt_name(const struct svc_xprt *xprt, |
1337 | char *pos, int remaining) | |
1338 | { | |
1339 | int len; | |
1340 | ||
1341 | len = snprintf(pos, remaining, "%s %u\n", | |
1342 | xprt->xpt_class->xcl_name, | |
1343 | svc_xprt_local_port(xprt)); | |
1344 | if (len >= remaining) | |
1345 | return -ENAMETOOLONG; | |
1346 | return len; | |
1347 | } | |
1348 | ||
1349 | /** | |
1350 | * svc_xprt_names - format a buffer with a list of transport names | |
1351 | * @serv: pointer to an RPC service | |
1352 | * @buf: pointer to a buffer to be filled in | |
1353 | * @buflen: length of buffer to be filled in | |
1354 | * | |
1355 | * Fills in @buf with a string containing a list of transport names, | |
1356 | * each name terminated with '\n'. | |
1357 | * | |
1358 | * Returns positive length of the filled-in string on success; otherwise | |
1359 | * a negative errno value is returned if an error occurs. | |
9571af18 | 1360 | */ |
335c54bd | 1361 | int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) |
9571af18 TT |
1362 | { |
1363 | struct svc_xprt *xprt; | |
335c54bd CL |
1364 | int len, totlen; |
1365 | char *pos; | |
9571af18 TT |
1366 | |
1367 | /* Sanity check args */ | |
1368 | if (!serv) | |
1369 | return 0; | |
1370 | ||
1371 | spin_lock_bh(&serv->sv_lock); | |
335c54bd CL |
1372 | |
1373 | pos = buf; | |
1374 | totlen = 0; | |
9571af18 | 1375 | list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { |
335c54bd CL |
1376 | len = svc_one_xprt_name(xprt, pos, buflen - totlen); |
1377 | if (len < 0) { | |
1378 | *buf = '\0'; | |
1379 | totlen = len; | |
1380 | } | |
1381 | if (len <= 0) | |
9571af18 | 1382 | break; |
335c54bd CL |
1383 | |
1384 | pos += len; | |
9571af18 TT |
1385 | totlen += len; |
1386 | } | |
335c54bd | 1387 | |
9571af18 TT |
1388 | spin_unlock_bh(&serv->sv_lock); |
1389 | return totlen; | |
1390 | } | |
1391 | EXPORT_SYMBOL_GPL(svc_xprt_names); | |
03cf6c9f GB |
1392 | |
1393 | ||
1394 | /*----------------------------------------------------------------------------*/ | |
1395 | ||
1396 | static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) | |
1397 | { | |
1398 | unsigned int pidx = (unsigned int)*pos; | |
1399 | struct svc_serv *serv = m->private; | |
1400 | ||
1401 | dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); | |
1402 | ||
03cf6c9f GB |
1403 | if (!pidx) |
1404 | return SEQ_START_TOKEN; | |
1405 | return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); | |
1406 | } | |
1407 | ||
1408 | static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) | |
1409 | { | |
1410 | struct svc_pool *pool = p; | |
1411 | struct svc_serv *serv = m->private; | |
1412 | ||
1413 | dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); | |
1414 | ||
1415 | if (p == SEQ_START_TOKEN) { | |
1416 | pool = &serv->sv_pools[0]; | |
1417 | } else { | |
1418 | unsigned int pidx = (pool - &serv->sv_pools[0]); | |
1419 | if (pidx < serv->sv_nrpools-1) | |
1420 | pool = &serv->sv_pools[pidx+1]; | |
1421 | else | |
1422 | pool = NULL; | |
1423 | } | |
1424 | ++*pos; | |
1425 | return pool; | |
1426 | } | |
1427 | ||
1428 | static void svc_pool_stats_stop(struct seq_file *m, void *p) | |
1429 | { | |
03cf6c9f GB |
1430 | } |
1431 | ||
1432 | static int svc_pool_stats_show(struct seq_file *m, void *p) | |
1433 | { | |
1434 | struct svc_pool *pool = p; | |
1435 | ||
1436 | if (p == SEQ_START_TOKEN) { | |
78c210ef | 1437 | seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); |
03cf6c9f GB |
1438 | return 0; |
1439 | } | |
1440 | ||
78c210ef | 1441 | seq_printf(m, "%u %lu %lu %lu %lu\n", |
03cf6c9f | 1442 | pool->sp_id, |
403c7b44 | 1443 | (unsigned long)atomic_long_read(&pool->sp_stats.packets), |
03cf6c9f | 1444 | pool->sp_stats.sockets_queued, |
403c7b44 JL |
1445 | (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), |
1446 | (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); | |
03cf6c9f GB |
1447 | |
1448 | return 0; | |
1449 | } | |
1450 | ||
1451 | static const struct seq_operations svc_pool_stats_seq_ops = { | |
1452 | .start = svc_pool_stats_start, | |
1453 | .next = svc_pool_stats_next, | |
1454 | .stop = svc_pool_stats_stop, | |
1455 | .show = svc_pool_stats_show, | |
1456 | }; | |
1457 | ||
1458 | int svc_pool_stats_open(struct svc_serv *serv, struct file *file) | |
1459 | { | |
1460 | int err; | |
1461 | ||
1462 | err = seq_open(file, &svc_pool_stats_seq_ops); | |
1463 | if (!err) | |
1464 | ((struct seq_file *) file->private_data)->private = serv; | |
1465 | return err; | |
1466 | } | |
1467 | EXPORT_SYMBOL(svc_pool_stats_open); | |
1468 | ||
1469 | /*----------------------------------------------------------------------------*/ |