Commit | Line | Data |
---|---|---|
1d8206b9 TT |
1 | /* |
2 | * linux/net/sunrpc/svc_xprt.c | |
3 | * | |
4 | * Author: Tom Tucker <tom@opengridcomputing.com> | |
5 | */ | |
6 | ||
7 | #include <linux/sched.h> | |
8 | #include <linux/errno.h> | |
9 | #include <linux/fcntl.h> | |
10 | #include <linux/net.h> | |
11 | #include <linux/in.h> | |
12 | #include <linux/inet.h> | |
13 | #include <linux/udp.h> | |
14 | #include <linux/tcp.h> | |
15 | #include <linux/unistd.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/netdevice.h> | |
18 | #include <linux/skbuff.h> | |
19 | #include <linux/file.h> | |
20 | #include <linux/freezer.h> | |
21 | #include <net/sock.h> | |
22 | #include <net/checksum.h> | |
23 | #include <net/ip.h> | |
24 | #include <net/ipv6.h> | |
25 | #include <net/tcp_states.h> | |
26 | #include <linux/uaccess.h> | |
27 | #include <asm/ioctls.h> | |
28 | ||
29 | #include <linux/sunrpc/types.h> | |
30 | #include <linux/sunrpc/clnt.h> | |
31 | #include <linux/sunrpc/xdr.h> | |
32 | #include <linux/sunrpc/svcsock.h> | |
33 | #include <linux/sunrpc/stats.h> | |
34 | #include <linux/sunrpc/svc_xprt.h> | |
35 | ||
36 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | |
37 | ||
0f0257ea TT |
38 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); |
39 | static int svc_deferred_recv(struct svc_rqst *rqstp); | |
40 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | |
41 | static void svc_age_temp_xprts(unsigned long closure); | |
42 | ||
43 | /* apparently the "standard" is that clients close | |
44 | * idle connections after 5 minutes, servers after | |
45 | * 6 minutes | |
46 | * http://www.connectathon.org/talks96/nfstcp.pdf | |
47 | */ | |
48 | static int svc_conn_age_period = 6*60; | |
49 | ||
1d8206b9 TT |
50 | /* List of registered transport classes */ |
51 | static DEFINE_SPINLOCK(svc_xprt_class_lock); | |
52 | static LIST_HEAD(svc_xprt_class_list); | |
53 | ||
0f0257ea TT |
54 | /* SMP locking strategy: |
55 | * | |
56 | * svc_pool->sp_lock protects most of the fields of that pool. | |
57 | * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. | |
58 | * when both need to be taken (rare), svc_serv->sv_lock is first. | |
59 | * BKL protects svc_serv->sv_nrthread. | |
60 | * svc_sock->sk_lock protects the svc_sock->sk_deferred list | |
61 | * and the ->sk_info_authunix cache. | |
62 | * | |
63 | * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being | |
64 | * enqueued multiply. During normal transport processing this bit | |
65 | * is set by svc_xprt_enqueue and cleared by svc_xprt_received. | |
66 | * Providers should not manipulate this bit directly. | |
67 | * | |
68 | * Some flags can be set to certain values at any time | |
69 | * providing that certain rules are followed: | |
70 | * | |
71 | * XPT_CONN, XPT_DATA: | |
72 | * - Can be set or cleared at any time. | |
73 | * - After a set, svc_xprt_enqueue must be called to enqueue | |
74 | * the transport for processing. | |
75 | * - After a clear, the transport must be read/accepted. | |
76 | * If this succeeds, it must be set again. | |
77 | * XPT_CLOSE: | |
78 | * - Can set at any time. It is never cleared. | |
79 | * XPT_DEAD: | |
80 | * - Can only be set while XPT_BUSY is held which ensures | |
81 | * that no other thread will be using the transport or will | |
82 | * try to set XPT_DEAD. | |
83 | */ | |
84 | ||
1d8206b9 TT |
85 | int svc_reg_xprt_class(struct svc_xprt_class *xcl) |
86 | { | |
87 | struct svc_xprt_class *cl; | |
88 | int res = -EEXIST; | |
89 | ||
90 | dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); | |
91 | ||
92 | INIT_LIST_HEAD(&xcl->xcl_list); | |
93 | spin_lock(&svc_xprt_class_lock); | |
94 | /* Make sure there isn't already a class with the same name */ | |
95 | list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { | |
96 | if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) | |
97 | goto out; | |
98 | } | |
99 | list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); | |
100 | res = 0; | |
101 | out: | |
102 | spin_unlock(&svc_xprt_class_lock); | |
103 | return res; | |
104 | } | |
105 | EXPORT_SYMBOL_GPL(svc_reg_xprt_class); | |
106 | ||
107 | void svc_unreg_xprt_class(struct svc_xprt_class *xcl) | |
108 | { | |
109 | dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); | |
110 | spin_lock(&svc_xprt_class_lock); | |
111 | list_del_init(&xcl->xcl_list); | |
112 | spin_unlock(&svc_xprt_class_lock); | |
113 | } | |
114 | EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); | |
115 | ||
e1b3157f TT |
116 | static void svc_xprt_free(struct kref *kref) |
117 | { | |
118 | struct svc_xprt *xprt = | |
119 | container_of(kref, struct svc_xprt, xpt_ref); | |
120 | struct module *owner = xprt->xpt_class->xcl_owner; | |
def13d74 TT |
121 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) |
122 | && xprt->xpt_auth_cache != NULL) | |
123 | svcauth_unix_info_release(xprt->xpt_auth_cache); | |
e1b3157f TT |
124 | xprt->xpt_ops->xpo_free(xprt); |
125 | module_put(owner); | |
126 | } | |
127 | ||
128 | void svc_xprt_put(struct svc_xprt *xprt) | |
129 | { | |
130 | kref_put(&xprt->xpt_ref, svc_xprt_free); | |
131 | } | |
132 | EXPORT_SYMBOL_GPL(svc_xprt_put); | |
133 | ||
1d8206b9 TT |
134 | /* |
135 | * Called by transport drivers to initialize the transport independent | |
136 | * portion of the transport instance. | |
137 | */ | |
bb5cf160 TT |
138 | void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, |
139 | struct svc_serv *serv) | |
1d8206b9 TT |
140 | { |
141 | memset(xprt, 0, sizeof(*xprt)); | |
142 | xprt->xpt_class = xcl; | |
143 | xprt->xpt_ops = xcl->xcl_ops; | |
e1b3157f | 144 | kref_init(&xprt->xpt_ref); |
bb5cf160 | 145 | xprt->xpt_server = serv; |
7a182083 TT |
146 | INIT_LIST_HEAD(&xprt->xpt_list); |
147 | INIT_LIST_HEAD(&xprt->xpt_ready); | |
8c7b0172 | 148 | INIT_LIST_HEAD(&xprt->xpt_deferred); |
a50fea26 | 149 | mutex_init(&xprt->xpt_mutex); |
def13d74 | 150 | spin_lock_init(&xprt->xpt_lock); |
4e5caaa5 | 151 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
1d8206b9 TT |
152 | } |
153 | EXPORT_SYMBOL_GPL(svc_xprt_init); | |
b700cbb1 TT |
154 | |
155 | int svc_create_xprt(struct svc_serv *serv, char *xprt_name, unsigned short port, | |
156 | int flags) | |
157 | { | |
158 | struct svc_xprt_class *xcl; | |
b700cbb1 TT |
159 | struct sockaddr_in sin = { |
160 | .sin_family = AF_INET, | |
161 | .sin_addr.s_addr = INADDR_ANY, | |
162 | .sin_port = htons(port), | |
163 | }; | |
164 | dprintk("svc: creating transport %s[%d]\n", xprt_name, port); | |
165 | spin_lock(&svc_xprt_class_lock); | |
166 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { | |
4e5caaa5 TT |
167 | struct svc_xprt *newxprt; |
168 | ||
169 | if (strcmp(xprt_name, xcl->xcl_name)) | |
170 | continue; | |
171 | ||
172 | if (!try_module_get(xcl->xcl_owner)) | |
173 | goto err; | |
174 | ||
175 | spin_unlock(&svc_xprt_class_lock); | |
176 | newxprt = xcl->xcl_ops-> | |
177 | xpo_create(serv, (struct sockaddr *)&sin, sizeof(sin), | |
178 | flags); | |
179 | if (IS_ERR(newxprt)) { | |
180 | module_put(xcl->xcl_owner); | |
181 | return PTR_ERR(newxprt); | |
b700cbb1 | 182 | } |
4e5caaa5 TT |
183 | |
184 | clear_bit(XPT_TEMP, &newxprt->xpt_flags); | |
185 | spin_lock_bh(&serv->sv_lock); | |
186 | list_add(&newxprt->xpt_list, &serv->sv_permsocks); | |
187 | spin_unlock_bh(&serv->sv_lock); | |
188 | clear_bit(XPT_BUSY, &newxprt->xpt_flags); | |
189 | return svc_xprt_local_port(newxprt); | |
b700cbb1 | 190 | } |
4e5caaa5 | 191 | err: |
b700cbb1 TT |
192 | spin_unlock(&svc_xprt_class_lock); |
193 | dprintk("svc: transport %s not found\n", xprt_name); | |
4e5caaa5 | 194 | return -ENOENT; |
b700cbb1 TT |
195 | } |
196 | EXPORT_SYMBOL_GPL(svc_create_xprt); | |
9dbc240f TT |
197 | |
198 | /* | |
199 | * Copy the local and remote xprt addresses to the rqstp structure | |
200 | */ | |
201 | void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) | |
202 | { | |
203 | struct sockaddr *sin; | |
204 | ||
205 | memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); | |
206 | rqstp->rq_addrlen = xprt->xpt_remotelen; | |
207 | ||
208 | /* | |
209 | * Destination address in request is needed for binding the | |
210 | * source address in RPC replies/callbacks later. | |
211 | */ | |
212 | sin = (struct sockaddr *)&xprt->xpt_local; | |
213 | switch (sin->sa_family) { | |
214 | case AF_INET: | |
215 | rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; | |
216 | break; | |
217 | case AF_INET6: | |
218 | rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; | |
219 | break; | |
220 | } | |
221 | } | |
222 | EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); | |
223 | ||
0f0257ea TT |
224 | /** |
225 | * svc_print_addr - Format rq_addr field for printing | |
226 | * @rqstp: svc_rqst struct containing address to print | |
227 | * @buf: target buffer for formatted address | |
228 | * @len: length of target buffer | |
229 | * | |
230 | */ | |
231 | char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) | |
232 | { | |
233 | return __svc_print_addr(svc_addr(rqstp), buf, len); | |
234 | } | |
235 | EXPORT_SYMBOL_GPL(svc_print_addr); | |
236 | ||
237 | /* | |
238 | * Queue up an idle server thread. Must have pool->sp_lock held. | |
239 | * Note: this is really a stack rather than a queue, so that we only | |
240 | * use as many different threads as we need, and the rest don't pollute | |
241 | * the cache. | |
242 | */ | |
243 | static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) | |
244 | { | |
245 | list_add(&rqstp->rq_list, &pool->sp_threads); | |
246 | } | |
247 | ||
248 | /* | |
249 | * Dequeue an nfsd thread. Must have pool->sp_lock held. | |
250 | */ | |
251 | static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) | |
252 | { | |
253 | list_del(&rqstp->rq_list); | |
254 | } | |
255 | ||
256 | /* | |
257 | * Queue up a transport with data pending. If there are idle nfsd | |
258 | * processes, wake 'em up. | |
259 | * | |
260 | */ | |
261 | void svc_xprt_enqueue(struct svc_xprt *xprt) | |
262 | { | |
263 | struct svc_serv *serv = xprt->xpt_server; | |
264 | struct svc_pool *pool; | |
265 | struct svc_rqst *rqstp; | |
266 | int cpu; | |
267 | ||
268 | if (!(xprt->xpt_flags & | |
269 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | |
270 | return; | |
271 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) | |
272 | return; | |
273 | ||
274 | cpu = get_cpu(); | |
275 | pool = svc_pool_for_cpu(xprt->xpt_server, cpu); | |
276 | put_cpu(); | |
277 | ||
278 | spin_lock_bh(&pool->sp_lock); | |
279 | ||
280 | if (!list_empty(&pool->sp_threads) && | |
281 | !list_empty(&pool->sp_sockets)) | |
282 | printk(KERN_ERR | |
283 | "svc_xprt_enqueue: " | |
284 | "threads and transports both waiting??\n"); | |
285 | ||
286 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { | |
287 | /* Don't enqueue dead transports */ | |
288 | dprintk("svc: transport %p is dead, not enqueued\n", xprt); | |
289 | goto out_unlock; | |
290 | } | |
291 | ||
292 | /* Mark transport as busy. It will remain in this state until | |
293 | * the provider calls svc_xprt_received. We update XPT_BUSY | |
294 | * atomically because it also guards against trying to enqueue | |
295 | * the transport twice. | |
296 | */ | |
297 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { | |
298 | /* Don't enqueue transport while already enqueued */ | |
299 | dprintk("svc: transport %p busy, not enqueued\n", xprt); | |
300 | goto out_unlock; | |
301 | } | |
302 | BUG_ON(xprt->xpt_pool != NULL); | |
303 | xprt->xpt_pool = pool; | |
304 | ||
305 | /* Handle pending connection */ | |
306 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) | |
307 | goto process; | |
308 | ||
309 | /* Handle close in-progress */ | |
310 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) | |
311 | goto process; | |
312 | ||
313 | /* Check if we have space to reply to a request */ | |
314 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { | |
315 | /* Don't enqueue while not enough space for reply */ | |
316 | dprintk("svc: no write space, transport %p not enqueued\n", | |
317 | xprt); | |
318 | xprt->xpt_pool = NULL; | |
319 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | |
320 | goto out_unlock; | |
321 | } | |
322 | ||
323 | process: | |
324 | if (!list_empty(&pool->sp_threads)) { | |
325 | rqstp = list_entry(pool->sp_threads.next, | |
326 | struct svc_rqst, | |
327 | rq_list); | |
328 | dprintk("svc: transport %p served by daemon %p\n", | |
329 | xprt, rqstp); | |
330 | svc_thread_dequeue(pool, rqstp); | |
331 | if (rqstp->rq_xprt) | |
332 | printk(KERN_ERR | |
333 | "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", | |
334 | rqstp, rqstp->rq_xprt); | |
335 | rqstp->rq_xprt = xprt; | |
336 | svc_xprt_get(xprt); | |
337 | rqstp->rq_reserved = serv->sv_max_mesg; | |
338 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | |
339 | BUG_ON(xprt->xpt_pool != pool); | |
340 | wake_up(&rqstp->rq_wait); | |
341 | } else { | |
342 | dprintk("svc: transport %p put into queue\n", xprt); | |
343 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | |
344 | BUG_ON(xprt->xpt_pool != pool); | |
345 | } | |
346 | ||
347 | out_unlock: | |
348 | spin_unlock_bh(&pool->sp_lock); | |
349 | } | |
350 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | |
351 | ||
352 | /* | |
353 | * Dequeue the first transport. Must be called with the pool->sp_lock held. | |
354 | */ | |
355 | static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |
356 | { | |
357 | struct svc_xprt *xprt; | |
358 | ||
359 | if (list_empty(&pool->sp_sockets)) | |
360 | return NULL; | |
361 | ||
362 | xprt = list_entry(pool->sp_sockets.next, | |
363 | struct svc_xprt, xpt_ready); | |
364 | list_del_init(&xprt->xpt_ready); | |
365 | ||
366 | dprintk("svc: transport %p dequeued, inuse=%d\n", | |
367 | xprt, atomic_read(&xprt->xpt_ref.refcount)); | |
368 | ||
369 | return xprt; | |
370 | } | |
371 | ||
372 | /* | |
373 | * svc_xprt_received conditionally queues the transport for processing | |
374 | * by another thread. The caller must hold the XPT_BUSY bit and must | |
375 | * not thereafter touch transport data. | |
376 | * | |
377 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | |
378 | * insufficient) data. | |
379 | */ | |
380 | void svc_xprt_received(struct svc_xprt *xprt) | |
381 | { | |
382 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | |
383 | xprt->xpt_pool = NULL; | |
384 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | |
385 | svc_xprt_enqueue(xprt); | |
386 | } | |
387 | EXPORT_SYMBOL_GPL(svc_xprt_received); | |
388 | ||
389 | /** | |
390 | * svc_reserve - change the space reserved for the reply to a request. | |
391 | * @rqstp: The request in question | |
392 | * @space: new max space to reserve | |
393 | * | |
394 | * Each request reserves some space on the output queue of the transport | |
395 | * to make sure the reply fits. This function reduces that reserved | |
396 | * space to be the amount of space used already, plus @space. | |
397 | * | |
398 | */ | |
399 | void svc_reserve(struct svc_rqst *rqstp, int space) | |
400 | { | |
401 | space += rqstp->rq_res.head[0].iov_len; | |
402 | ||
403 | if (space < rqstp->rq_reserved) { | |
404 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
405 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); | |
406 | rqstp->rq_reserved = space; | |
407 | ||
408 | svc_xprt_enqueue(xprt); | |
409 | } | |
410 | } | |
411 | ||
412 | static void svc_xprt_release(struct svc_rqst *rqstp) | |
413 | { | |
414 | struct svc_xprt *xprt = rqstp->rq_xprt; | |
415 | ||
416 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | |
417 | ||
418 | svc_free_res_pages(rqstp); | |
419 | rqstp->rq_res.page_len = 0; | |
420 | rqstp->rq_res.page_base = 0; | |
421 | ||
422 | /* Reset response buffer and release | |
423 | * the reservation. | |
424 | * But first, check that enough space was reserved | |
425 | * for the reply, otherwise we have a bug! | |
426 | */ | |
427 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | |
428 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | |
429 | rqstp->rq_reserved, | |
430 | rqstp->rq_res.len); | |
431 | ||
432 | rqstp->rq_res.head[0].iov_len = 0; | |
433 | svc_reserve(rqstp, 0); | |
434 | rqstp->rq_xprt = NULL; | |
435 | ||
436 | svc_xprt_put(xprt); | |
437 | } | |
438 | ||
439 | /* | |
440 | * External function to wake up a server waiting for data | |
441 | * This really only makes sense for services like lockd | |
442 | * which have exactly one thread anyway. | |
443 | */ | |
444 | void svc_wake_up(struct svc_serv *serv) | |
445 | { | |
446 | struct svc_rqst *rqstp; | |
447 | unsigned int i; | |
448 | struct svc_pool *pool; | |
449 | ||
450 | for (i = 0; i < serv->sv_nrpools; i++) { | |
451 | pool = &serv->sv_pools[i]; | |
452 | ||
453 | spin_lock_bh(&pool->sp_lock); | |
454 | if (!list_empty(&pool->sp_threads)) { | |
455 | rqstp = list_entry(pool->sp_threads.next, | |
456 | struct svc_rqst, | |
457 | rq_list); | |
458 | dprintk("svc: daemon %p woken up.\n", rqstp); | |
459 | /* | |
460 | svc_thread_dequeue(pool, rqstp); | |
461 | rqstp->rq_xprt = NULL; | |
462 | */ | |
463 | wake_up(&rqstp->rq_wait); | |
464 | } | |
465 | spin_unlock_bh(&pool->sp_lock); | |
466 | } | |
467 | } | |
468 | ||
469 | int svc_port_is_privileged(struct sockaddr *sin) | |
470 | { | |
471 | switch (sin->sa_family) { | |
472 | case AF_INET: | |
473 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | |
474 | < PROT_SOCK; | |
475 | case AF_INET6: | |
476 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | |
477 | < PROT_SOCK; | |
478 | default: | |
479 | return 0; | |
480 | } | |
481 | } | |
482 | ||
483 | /* | |
484 | * Make sure that we don't have too many active connections. If we | |
485 | * have, something must be dropped. | |
486 | * | |
487 | * There's no point in trying to do random drop here for DoS | |
488 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | |
489 | * attacker can easily beat that. | |
490 | * | |
491 | * The only somewhat efficient mechanism would be if drop old | |
492 | * connections from the same IP first. But right now we don't even | |
493 | * record the client IP in svc_sock. | |
494 | */ | |
495 | static void svc_check_conn_limits(struct svc_serv *serv) | |
496 | { | |
497 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | |
498 | struct svc_xprt *xprt = NULL; | |
499 | spin_lock_bh(&serv->sv_lock); | |
500 | if (!list_empty(&serv->sv_tempsocks)) { | |
501 | if (net_ratelimit()) { | |
502 | /* Try to help the admin */ | |
503 | printk(KERN_NOTICE "%s: too many open " | |
504 | "connections, consider increasing the " | |
505 | "number of nfsd threads\n", | |
506 | serv->sv_name); | |
507 | } | |
508 | /* | |
509 | * Always select the oldest connection. It's not fair, | |
510 | * but so is life | |
511 | */ | |
512 | xprt = list_entry(serv->sv_tempsocks.prev, | |
513 | struct svc_xprt, | |
514 | xpt_list); | |
515 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
516 | svc_xprt_get(xprt); | |
517 | } | |
518 | spin_unlock_bh(&serv->sv_lock); | |
519 | ||
520 | if (xprt) { | |
521 | svc_xprt_enqueue(xprt); | |
522 | svc_xprt_put(xprt); | |
523 | } | |
524 | } | |
525 | } | |
526 | ||
527 | /* | |
528 | * Receive the next request on any transport. This code is carefully | |
529 | * organised not to touch any cachelines in the shared svc_serv | |
530 | * structure, only cachelines in the local svc_pool. | |
531 | */ | |
532 | int svc_recv(struct svc_rqst *rqstp, long timeout) | |
533 | { | |
534 | struct svc_xprt *xprt = NULL; | |
535 | struct svc_serv *serv = rqstp->rq_server; | |
536 | struct svc_pool *pool = rqstp->rq_pool; | |
537 | int len, i; | |
538 | int pages; | |
539 | struct xdr_buf *arg; | |
540 | DECLARE_WAITQUEUE(wait, current); | |
541 | ||
542 | dprintk("svc: server %p waiting for data (to = %ld)\n", | |
543 | rqstp, timeout); | |
544 | ||
545 | if (rqstp->rq_xprt) | |
546 | printk(KERN_ERR | |
547 | "svc_recv: service %p, transport not NULL!\n", | |
548 | rqstp); | |
549 | if (waitqueue_active(&rqstp->rq_wait)) | |
550 | printk(KERN_ERR | |
551 | "svc_recv: service %p, wait queue active!\n", | |
552 | rqstp); | |
553 | ||
554 | /* now allocate needed pages. If we get a failure, sleep briefly */ | |
555 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; | |
556 | for (i = 0; i < pages ; i++) | |
557 | while (rqstp->rq_pages[i] == NULL) { | |
558 | struct page *p = alloc_page(GFP_KERNEL); | |
559 | if (!p) { | |
560 | int j = msecs_to_jiffies(500); | |
561 | schedule_timeout_uninterruptible(j); | |
562 | } | |
563 | rqstp->rq_pages[i] = p; | |
564 | } | |
565 | rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ | |
566 | BUG_ON(pages >= RPCSVC_MAXPAGES); | |
567 | ||
568 | /* Make arg->head point to first page and arg->pages point to rest */ | |
569 | arg = &rqstp->rq_arg; | |
570 | arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); | |
571 | arg->head[0].iov_len = PAGE_SIZE; | |
572 | arg->pages = rqstp->rq_pages + 1; | |
573 | arg->page_base = 0; | |
574 | /* save at least one page for response */ | |
575 | arg->page_len = (pages-2)*PAGE_SIZE; | |
576 | arg->len = (pages-1)*PAGE_SIZE; | |
577 | arg->tail[0].iov_len = 0; | |
578 | ||
579 | try_to_freeze(); | |
580 | cond_resched(); | |
581 | if (signalled()) | |
582 | return -EINTR; | |
583 | ||
584 | spin_lock_bh(&pool->sp_lock); | |
585 | xprt = svc_xprt_dequeue(pool); | |
586 | if (xprt) { | |
587 | rqstp->rq_xprt = xprt; | |
588 | svc_xprt_get(xprt); | |
589 | rqstp->rq_reserved = serv->sv_max_mesg; | |
590 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | |
591 | } else { | |
592 | /* No data pending. Go to sleep */ | |
593 | svc_thread_enqueue(pool, rqstp); | |
594 | ||
595 | /* | |
596 | * We have to be able to interrupt this wait | |
597 | * to bring down the daemons ... | |
598 | */ | |
599 | set_current_state(TASK_INTERRUPTIBLE); | |
600 | add_wait_queue(&rqstp->rq_wait, &wait); | |
601 | spin_unlock_bh(&pool->sp_lock); | |
602 | ||
603 | schedule_timeout(timeout); | |
604 | ||
605 | try_to_freeze(); | |
606 | ||
607 | spin_lock_bh(&pool->sp_lock); | |
608 | remove_wait_queue(&rqstp->rq_wait, &wait); | |
609 | ||
610 | xprt = rqstp->rq_xprt; | |
611 | if (!xprt) { | |
612 | svc_thread_dequeue(pool, rqstp); | |
613 | spin_unlock_bh(&pool->sp_lock); | |
614 | dprintk("svc: server %p, no data yet\n", rqstp); | |
615 | return signalled()? -EINTR : -EAGAIN; | |
616 | } | |
617 | } | |
618 | spin_unlock_bh(&pool->sp_lock); | |
619 | ||
620 | len = 0; | |
621 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | |
622 | dprintk("svc_recv: found XPT_CLOSE\n"); | |
623 | svc_delete_xprt(xprt); | |
624 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | |
625 | struct svc_xprt *newxpt; | |
626 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | |
627 | if (newxpt) { | |
628 | /* | |
629 | * We know this module_get will succeed because the | |
630 | * listener holds a reference too | |
631 | */ | |
632 | __module_get(newxpt->xpt_class->xcl_owner); | |
633 | svc_check_conn_limits(xprt->xpt_server); | |
634 | spin_lock_bh(&serv->sv_lock); | |
635 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | |
636 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | |
637 | serv->sv_tmpcnt++; | |
638 | if (serv->sv_temptimer.function == NULL) { | |
639 | /* setup timer to age temp transports */ | |
640 | setup_timer(&serv->sv_temptimer, | |
641 | svc_age_temp_xprts, | |
642 | (unsigned long)serv); | |
643 | mod_timer(&serv->sv_temptimer, | |
644 | jiffies + svc_conn_age_period * HZ); | |
645 | } | |
646 | spin_unlock_bh(&serv->sv_lock); | |
647 | svc_xprt_received(newxpt); | |
648 | } | |
649 | svc_xprt_received(xprt); | |
650 | } else { | |
651 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | |
652 | rqstp, pool->sp_id, xprt, | |
653 | atomic_read(&xprt->xpt_ref.refcount)); | |
654 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | |
655 | if (rqstp->rq_deferred) { | |
656 | svc_xprt_received(xprt); | |
657 | len = svc_deferred_recv(rqstp); | |
658 | } else | |
659 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | |
660 | dprintk("svc: got len=%d\n", len); | |
661 | } | |
662 | ||
663 | /* No data, incomplete (TCP) read, or accept() */ | |
664 | if (len == 0 || len == -EAGAIN) { | |
665 | rqstp->rq_res.len = 0; | |
666 | svc_xprt_release(rqstp); | |
667 | return -EAGAIN; | |
668 | } | |
669 | clear_bit(XPT_OLD, &xprt->xpt_flags); | |
670 | ||
671 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); | |
672 | rqstp->rq_chandle.defer = svc_defer; | |
673 | ||
674 | if (serv->sv_stats) | |
675 | serv->sv_stats->netcnt++; | |
676 | return len; | |
677 | } | |
678 | ||
679 | /* | |
680 | * Drop request | |
681 | */ | |
682 | void svc_drop(struct svc_rqst *rqstp) | |
683 | { | |
684 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); | |
685 | svc_xprt_release(rqstp); | |
686 | } | |
687 | ||
688 | /* | |
689 | * Return reply to client. | |
690 | */ | |
691 | int svc_send(struct svc_rqst *rqstp) | |
692 | { | |
693 | struct svc_xprt *xprt; | |
694 | int len; | |
695 | struct xdr_buf *xb; | |
696 | ||
697 | xprt = rqstp->rq_xprt; | |
698 | if (!xprt) | |
699 | return -EFAULT; | |
700 | ||
701 | /* release the receive skb before sending the reply */ | |
702 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | |
703 | ||
704 | /* calculate over-all length */ | |
705 | xb = &rqstp->rq_res; | |
706 | xb->len = xb->head[0].iov_len + | |
707 | xb->page_len + | |
708 | xb->tail[0].iov_len; | |
709 | ||
710 | /* Grab mutex to serialize outgoing data. */ | |
711 | mutex_lock(&xprt->xpt_mutex); | |
712 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) | |
713 | len = -ENOTCONN; | |
714 | else | |
715 | len = xprt->xpt_ops->xpo_sendto(rqstp); | |
716 | mutex_unlock(&xprt->xpt_mutex); | |
717 | svc_xprt_release(rqstp); | |
718 | ||
719 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | |
720 | return 0; | |
721 | return len; | |
722 | } | |
723 | ||
724 | /* | |
725 | * Timer function to close old temporary transports, using | |
726 | * a mark-and-sweep algorithm. | |
727 | */ | |
728 | static void svc_age_temp_xprts(unsigned long closure) | |
729 | { | |
730 | struct svc_serv *serv = (struct svc_serv *)closure; | |
731 | struct svc_xprt *xprt; | |
732 | struct list_head *le, *next; | |
733 | LIST_HEAD(to_be_aged); | |
734 | ||
735 | dprintk("svc_age_temp_xprts\n"); | |
736 | ||
737 | if (!spin_trylock_bh(&serv->sv_lock)) { | |
738 | /* busy, try again 1 sec later */ | |
739 | dprintk("svc_age_temp_xprts: busy\n"); | |
740 | mod_timer(&serv->sv_temptimer, jiffies + HZ); | |
741 | return; | |
742 | } | |
743 | ||
744 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | |
745 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
746 | ||
747 | /* First time through, just mark it OLD. Second time | |
748 | * through, close it. */ | |
749 | if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) | |
750 | continue; | |
751 | if (atomic_read(&xprt->xpt_ref.refcount) > 1 | |
752 | || test_bit(XPT_BUSY, &xprt->xpt_flags)) | |
753 | continue; | |
754 | svc_xprt_get(xprt); | |
755 | list_move(le, &to_be_aged); | |
756 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
757 | set_bit(XPT_DETACHED, &xprt->xpt_flags); | |
758 | } | |
759 | spin_unlock_bh(&serv->sv_lock); | |
760 | ||
761 | while (!list_empty(&to_be_aged)) { | |
762 | le = to_be_aged.next; | |
763 | /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ | |
764 | list_del_init(le); | |
765 | xprt = list_entry(le, struct svc_xprt, xpt_list); | |
766 | ||
767 | dprintk("queuing xprt %p for closing\n", xprt); | |
768 | ||
769 | /* a thread will dequeue and close it soon */ | |
770 | svc_xprt_enqueue(xprt); | |
771 | svc_xprt_put(xprt); | |
772 | } | |
773 | ||
774 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | |
775 | } | |
776 | ||
777 | /* | |
778 | * Remove a dead transport | |
779 | */ | |
780 | void svc_delete_xprt(struct svc_xprt *xprt) | |
781 | { | |
782 | struct svc_serv *serv = xprt->xpt_server; | |
783 | ||
784 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | |
785 | xprt->xpt_ops->xpo_detach(xprt); | |
786 | ||
787 | spin_lock_bh(&serv->sv_lock); | |
788 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) | |
789 | list_del_init(&xprt->xpt_list); | |
790 | /* | |
791 | * We used to delete the transport from whichever list | |
792 | * it's sk_xprt.xpt_ready node was on, but we don't actually | |
793 | * need to. This is because the only time we're called | |
794 | * while still attached to a queue, the queue itself | |
795 | * is about to be destroyed (in svc_destroy). | |
796 | */ | |
797 | if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { | |
798 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); | |
799 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | |
800 | serv->sv_tmpcnt--; | |
801 | svc_xprt_put(xprt); | |
802 | } | |
803 | spin_unlock_bh(&serv->sv_lock); | |
804 | } | |
805 | ||
806 | void svc_close_xprt(struct svc_xprt *xprt) | |
807 | { | |
808 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
809 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | |
810 | /* someone else will have to effect the close */ | |
811 | return; | |
812 | ||
813 | svc_xprt_get(xprt); | |
814 | svc_delete_xprt(xprt); | |
815 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | |
816 | svc_xprt_put(xprt); | |
817 | } | |
818 | ||
819 | void svc_close_all(struct list_head *xprt_list) | |
820 | { | |
821 | struct svc_xprt *xprt; | |
822 | struct svc_xprt *tmp; | |
823 | ||
824 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | |
825 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
826 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { | |
827 | /* Waiting to be processed, but no threads left, | |
828 | * So just remove it from the waiting list | |
829 | */ | |
830 | list_del_init(&xprt->xpt_ready); | |
831 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | |
832 | } | |
833 | svc_close_xprt(xprt); | |
834 | } | |
835 | } | |
836 | ||
837 | /* | |
838 | * Handle defer and revisit of requests | |
839 | */ | |
840 | ||
841 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |
842 | { | |
843 | struct svc_deferred_req *dr = | |
844 | container_of(dreq, struct svc_deferred_req, handle); | |
845 | struct svc_xprt *xprt = dr->xprt; | |
846 | ||
847 | if (too_many) { | |
848 | svc_xprt_put(xprt); | |
849 | kfree(dr); | |
850 | return; | |
851 | } | |
852 | dprintk("revisit queued\n"); | |
853 | dr->xprt = NULL; | |
854 | spin_lock(&xprt->xpt_lock); | |
855 | list_add(&dr->handle.recent, &xprt->xpt_deferred); | |
856 | spin_unlock(&xprt->xpt_lock); | |
857 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
858 | svc_xprt_enqueue(xprt); | |
859 | svc_xprt_put(xprt); | |
860 | } | |
861 | ||
862 | static struct cache_deferred_req *svc_defer(struct cache_req *req) | |
863 | { | |
864 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | |
865 | int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); | |
866 | struct svc_deferred_req *dr; | |
867 | ||
868 | if (rqstp->rq_arg.page_len) | |
869 | return NULL; /* if more than a page, give up FIXME */ | |
870 | if (rqstp->rq_deferred) { | |
871 | dr = rqstp->rq_deferred; | |
872 | rqstp->rq_deferred = NULL; | |
873 | } else { | |
874 | int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | |
875 | /* FIXME maybe discard if size too large */ | |
876 | dr = kmalloc(size, GFP_KERNEL); | |
877 | if (dr == NULL) | |
878 | return NULL; | |
879 | ||
880 | dr->handle.owner = rqstp->rq_server; | |
881 | dr->prot = rqstp->rq_prot; | |
882 | memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); | |
883 | dr->addrlen = rqstp->rq_addrlen; | |
884 | dr->daddr = rqstp->rq_daddr; | |
885 | dr->argslen = rqstp->rq_arg.len >> 2; | |
886 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, | |
887 | dr->argslen<<2); | |
888 | } | |
889 | svc_xprt_get(rqstp->rq_xprt); | |
890 | dr->xprt = rqstp->rq_xprt; | |
891 | ||
892 | dr->handle.revisit = svc_revisit; | |
893 | return &dr->handle; | |
894 | } | |
895 | ||
896 | /* | |
897 | * recv data from a deferred request into an active one | |
898 | */ | |
899 | static int svc_deferred_recv(struct svc_rqst *rqstp) | |
900 | { | |
901 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
902 | ||
903 | rqstp->rq_arg.head[0].iov_base = dr->args; | |
904 | rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; | |
905 | rqstp->rq_arg.page_len = 0; | |
906 | rqstp->rq_arg.len = dr->argslen<<2; | |
907 | rqstp->rq_prot = dr->prot; | |
908 | memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); | |
909 | rqstp->rq_addrlen = dr->addrlen; | |
910 | rqstp->rq_daddr = dr->daddr; | |
911 | rqstp->rq_respages = rqstp->rq_pages; | |
912 | return dr->argslen<<2; | |
913 | } | |
914 | ||
915 | ||
916 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | |
917 | { | |
918 | struct svc_deferred_req *dr = NULL; | |
919 | ||
920 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | |
921 | return NULL; | |
922 | spin_lock(&xprt->xpt_lock); | |
923 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
924 | if (!list_empty(&xprt->xpt_deferred)) { | |
925 | dr = list_entry(xprt->xpt_deferred.next, | |
926 | struct svc_deferred_req, | |
927 | handle.recent); | |
928 | list_del_init(&dr->handle.recent); | |
929 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | |
930 | } | |
931 | spin_unlock(&xprt->xpt_lock); | |
932 | return dr; | |
933 | } |