Merge tag 'sh-for-4.9' of git://git.libc.org/linux-sh
[linux-2.6-block.git] / fs / select.c
CommitLineData
1da177e4
LT
1/*
2 * This file contains the procedures for the handling of select and poll
3 *
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
6 *
7 * 4 February 1994
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
11 *
12 * 24 January 2000
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15 */
16
022a1692 17#include <linux/kernel.h>
a99bbaf5 18#include <linux/sched.h>
1da177e4 19#include <linux/syscalls.h>
630d9c47 20#include <linux/export.h>
1da177e4 21#include <linux/slab.h>
1da177e4
LT
22#include <linux/poll.h>
23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
24#include <linux/file.h>
9f3acc31 25#include <linux/fdtable.h>
1da177e4 26#include <linux/fs.h>
b835996f 27#include <linux/rcupdate.h>
8ff3e8e8 28#include <linux/hrtimer.h>
8bd75c77 29#include <linux/sched/rt.h>
9745cdb3 30#include <linux/freezer.h>
076bb0c8 31#include <net/busy_poll.h>
2d19309c 32#include <linux/vmalloc.h>
1da177e4
LT
33
34#include <asm/uaccess.h>
35
90d6e24a
AV
36
37/*
38 * Estimate expected accuracy in ns from a timeval.
39 *
40 * After quite a bit of churning around, we've settled on
41 * a simple thing of taking 0.1% of the timeout as the
42 * slack, with a cap of 100 msec.
43 * "nice" tasks get a 0.5% slack instead.
44 *
45 * Consider this comment an open invitation to come up with even
46 * better solutions..
47 */
48
5ae87e79
GK
49#define MAX_SLACK (100 * NSEC_PER_MSEC)
50
766b9f92 51static long __estimate_accuracy(struct timespec64 *tv)
90d6e24a 52{
96d2ab48 53 long slack;
90d6e24a
AV
54 int divfactor = 1000;
55
5ae87e79
GK
56 if (tv->tv_sec < 0)
57 return 0;
58
4ce105d3 59 if (task_nice(current) > 0)
90d6e24a
AV
60 divfactor = divfactor / 5;
61
5ae87e79
GK
62 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
63 return MAX_SLACK;
64
90d6e24a
AV
65 slack = tv->tv_nsec / divfactor;
66 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
67
5ae87e79
GK
68 if (slack > MAX_SLACK)
69 return MAX_SLACK;
96d2ab48 70
90d6e24a
AV
71 return slack;
72}
73
766b9f92 74u64 select_estimate_accuracy(struct timespec64 *tv)
90d6e24a 75{
da8b44d5 76 u64 ret;
766b9f92 77 struct timespec64 now;
90d6e24a
AV
78
79 /*
80 * Realtime tasks get a slack of 0 for obvious reasons.
81 */
82
4ce105d3 83 if (rt_task(current))
90d6e24a
AV
84 return 0;
85
766b9f92
DD
86 ktime_get_ts64(&now);
87 now = timespec64_sub(*tv, now);
90d6e24a
AV
88 ret = __estimate_accuracy(&now);
89 if (ret < current->timer_slack_ns)
90 return current->timer_slack_ns;
91 return ret;
92}
93
94
95
1da177e4
LT
96struct poll_table_page {
97 struct poll_table_page * next;
98 struct poll_table_entry * entry;
99 struct poll_table_entry entries[0];
100};
101
102#define POLL_TABLE_FULL(table) \
103 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
104
105/*
106 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
107 * I have rewritten this, taking some shortcuts: This code may not be easy to
108 * follow, but it should be free of race-conditions, and it's practical. If you
109 * understand what I'm doing here, then you understand how the linux
110 * sleep/wakeup mechanism works.
111 *
112 * Two very simple procedures, poll_wait() and poll_freewait() make all the
113 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
114 * as all select/poll functions have to call it to add an entry to the
115 * poll table.
116 */
75c96f85
AB
117static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
118 poll_table *p);
1da177e4
LT
119
120void poll_initwait(struct poll_wqueues *pwq)
121{
122 init_poll_funcptr(&pwq->pt, __pollwait);
5f820f64 123 pwq->polling_task = current;
b2add73d 124 pwq->triggered = 0;
1da177e4
LT
125 pwq->error = 0;
126 pwq->table = NULL;
70674f95 127 pwq->inline_index = 0;
1da177e4 128}
1da177e4
LT
129EXPORT_SYMBOL(poll_initwait);
130
70674f95
AK
131static void free_poll_entry(struct poll_table_entry *entry)
132{
ccf6780d 133 remove_wait_queue(entry->wait_address, &entry->wait);
70674f95
AK
134 fput(entry->filp);
135}
136
1da177e4
LT
137void poll_freewait(struct poll_wqueues *pwq)
138{
139 struct poll_table_page * p = pwq->table;
70674f95
AK
140 int i;
141 for (i = 0; i < pwq->inline_index; i++)
142 free_poll_entry(pwq->inline_entries + i);
1da177e4
LT
143 while (p) {
144 struct poll_table_entry * entry;
145 struct poll_table_page *old;
146
147 entry = p->entry;
148 do {
149 entry--;
70674f95 150 free_poll_entry(entry);
1da177e4
LT
151 } while (entry > p->entries);
152 old = p;
153 p = p->next;
154 free_page((unsigned long) old);
155 }
156}
1da177e4
LT
157EXPORT_SYMBOL(poll_freewait);
158
5f820f64 159static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
1da177e4 160{
1da177e4
LT
161 struct poll_table_page *table = p->table;
162
70674f95
AK
163 if (p->inline_index < N_INLINE_POLL_ENTRIES)
164 return p->inline_entries + p->inline_index++;
165
1da177e4
LT
166 if (!table || POLL_TABLE_FULL(table)) {
167 struct poll_table_page *new_table;
168
169 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
170 if (!new_table) {
171 p->error = -ENOMEM;
70674f95 172 return NULL;
1da177e4
LT
173 }
174 new_table->entry = new_table->entries;
175 new_table->next = table;
176 p->table = new_table;
177 table = new_table;
178 }
179
70674f95
AK
180 return table->entry++;
181}
182
4938d7e0 183static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
5f820f64
TH
184{
185 struct poll_wqueues *pwq = wait->private;
186 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
187
188 /*
189 * Although this function is called under waitqueue lock, LOCK
190 * doesn't imply write barrier and the users expect write
191 * barrier semantics on wakeup functions. The following
192 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
b92b8b35 193 * and is paired with smp_store_mb() in poll_schedule_timeout.
5f820f64
TH
194 */
195 smp_wmb();
196 pwq->triggered = 1;
197
198 /*
199 * Perform the default wake up operation using a dummy
200 * waitqueue.
201 *
202 * TODO: This is hacky but there currently is no interface to
203 * pass in @sync. @sync is scheduled to be removed and once
204 * that happens, wake_up_process() can be used directly.
205 */
206 return default_wake_function(&dummy_wait, mode, sync, key);
207}
208
4938d7e0
ED
209static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
210{
211 struct poll_table_entry *entry;
212
213 entry = container_of(wait, struct poll_table_entry, wait);
214 if (key && !((unsigned long)key & entry->key))
215 return 0;
216 return __pollwake(wait, mode, sync, key);
217}
218
70674f95
AK
219/* Add a new entry */
220static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
221 poll_table *p)
222{
5f820f64
TH
223 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
224 struct poll_table_entry *entry = poll_get_entry(pwq);
70674f95
AK
225 if (!entry)
226 return;
cb0942b8 227 entry->filp = get_file(filp);
70674f95 228 entry->wait_address = wait_address;
626cf236 229 entry->key = p->_key;
5f820f64
TH
230 init_waitqueue_func_entry(&entry->wait, pollwake);
231 entry->wait.private = pwq;
ccf6780d 232 add_wait_queue(wait_address, &entry->wait);
1da177e4
LT
233}
234
5f820f64
TH
235int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
236 ktime_t *expires, unsigned long slack)
237{
238 int rc = -EINTR;
239
240 set_current_state(state);
241 if (!pwq->triggered)
59612d18 242 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
5f820f64
TH
243 __set_current_state(TASK_RUNNING);
244
245 /*
246 * Prepare for the next iteration.
247 *
b92b8b35 248 * The following smp_store_mb() serves two purposes. First, it's
5f820f64
TH
249 * the counterpart rmb of the wmb in pollwake() such that data
250 * written before wake up is always visible after wake up.
251 * Second, the full barrier guarantees that triggered clearing
252 * doesn't pass event check of the next iteration. Note that
253 * this problem doesn't exist for the first iteration as
254 * add_wait_queue() has full barrier semantics.
255 */
b92b8b35 256 smp_store_mb(pwq->triggered, 0);
5f820f64
TH
257
258 return rc;
259}
260EXPORT_SYMBOL(poll_schedule_timeout);
261
b773ad40
TG
262/**
263 * poll_select_set_timeout - helper function to setup the timeout value
766b9f92 264 * @to: pointer to timespec64 variable for the final timeout
b773ad40
TG
265 * @sec: seconds (from user space)
266 * @nsec: nanoseconds (from user space)
267 *
268 * Note, we do not use a timespec for the user space value here, That
269 * way we can use the function for timeval and compat interfaces as well.
270 *
271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
272 */
766b9f92 273int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
b773ad40 274{
766b9f92 275 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
b773ad40 276
766b9f92 277 if (!timespec64_valid(&ts))
b773ad40
TG
278 return -EINVAL;
279
280 /* Optimize for the zero timeout value here */
281 if (!sec && !nsec) {
282 to->tv_sec = to->tv_nsec = 0;
283 } else {
766b9f92
DD
284 ktime_get_ts64(to);
285 *to = timespec64_add_safe(*to, ts);
b773ad40
TG
286 }
287 return 0;
288}
289
766b9f92
DD
290static int poll_select_copy_remaining(struct timespec64 *end_time,
291 void __user *p,
b773ad40
TG
292 int timeval, int ret)
293{
766b9f92 294 struct timespec64 rts64;
b773ad40
TG
295 struct timespec rts;
296 struct timeval rtv;
297
298 if (!p)
299 return ret;
300
301 if (current->personality & STICKY_TIMEOUTS)
302 goto sticky;
303
304 /* No update for zero timeout */
305 if (!end_time->tv_sec && !end_time->tv_nsec)
306 return ret;
307
766b9f92
DD
308 ktime_get_ts64(&rts64);
309 rts64 = timespec64_sub(*end_time, rts64);
310 if (rts64.tv_sec < 0)
311 rts64.tv_sec = rts64.tv_nsec = 0;
312
313 rts = timespec64_to_timespec(rts64);
b773ad40
TG
314
315 if (timeval) {
65329bf4
VK
316 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
317 memset(&rtv, 0, sizeof(rtv));
766b9f92
DD
318 rtv.tv_sec = rts64.tv_sec;
319 rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC;
b773ad40
TG
320
321 if (!copy_to_user(p, &rtv, sizeof(rtv)))
322 return ret;
323
324 } else if (!copy_to_user(p, &rts, sizeof(rts)))
325 return ret;
326
327 /*
328 * If an application puts its timeval in read-only memory, we
329 * don't want the Linux-specific update to the timeval to
330 * cause a fault after the select has completed
331 * successfully. However, because we're not updating the
332 * timeval, we can't restart the system call.
333 */
334
335sticky:
336 if (ret == -ERESTARTNOHAND)
337 ret = -EINTR;
338 return ret;
339}
340
1da177e4
LT
341#define FDS_IN(fds, n) (fds->in + n)
342#define FDS_OUT(fds, n) (fds->out + n)
343#define FDS_EX(fds, n) (fds->ex + n)
344
345#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
346
347static int max_select_fd(unsigned long n, fd_set_bits *fds)
348{
349 unsigned long *open_fds;
350 unsigned long set;
351 int max;
badf1662 352 struct fdtable *fdt;
1da177e4
LT
353
354 /* handle last in-complete long-word first */
8ded2bbc
JB
355 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
356 n /= BITS_PER_LONG;
badf1662 357 fdt = files_fdtable(current->files);
1fd36adc 358 open_fds = fdt->open_fds + n;
1da177e4
LT
359 max = 0;
360 if (set) {
361 set &= BITS(fds, n);
362 if (set) {
363 if (!(set & ~*open_fds))
364 goto get_max;
365 return -EBADF;
366 }
367 }
368 while (n) {
369 open_fds--;
370 n--;
371 set = BITS(fds, n);
372 if (!set)
373 continue;
374 if (set & ~*open_fds)
375 return -EBADF;
376 if (max)
377 continue;
378get_max:
379 do {
380 max++;
381 set >>= 1;
382 } while (set);
8ded2bbc 383 max += n * BITS_PER_LONG;
1da177e4
LT
384 }
385
386 return max;
387}
388
1da177e4
LT
389#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
390#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
391#define POLLEX_SET (POLLPRI)
392
4938d7e0 393static inline void wait_key_set(poll_table *wait, unsigned long in,
2d48d67f
ET
394 unsigned long out, unsigned long bit,
395 unsigned int ll_flag)
4938d7e0 396{
2d48d67f 397 wait->_key = POLLEX_SET | ll_flag;
626cf236
HV
398 if (in & bit)
399 wait->_key |= POLLIN_SET;
400 if (out & bit)
401 wait->_key |= POLLOUT_SET;
4938d7e0
ED
402}
403
766b9f92 404int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
1da177e4 405{
8ff3e8e8 406 ktime_t expire, *to = NULL;
1da177e4
LT
407 struct poll_wqueues table;
408 poll_table *wait;
8ff3e8e8 409 int retval, i, timed_out = 0;
da8b44d5 410 u64 slack = 0;
cbf55001 411 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
76b1e9b9 412 unsigned long busy_end = 0;
1da177e4 413
b835996f 414 rcu_read_lock();
1da177e4 415 retval = max_select_fd(n, fds);
b835996f 416 rcu_read_unlock();
1da177e4
LT
417
418 if (retval < 0)
419 return retval;
420 n = retval;
421
422 poll_initwait(&table);
423 wait = &table.pt;
8ff3e8e8 424 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
626cf236 425 wait->_qproc = NULL;
8ff3e8e8
AV
426 timed_out = 1;
427 }
428
96d2ab48 429 if (end_time && !timed_out)
231f3d39 430 slack = select_estimate_accuracy(end_time);
90d6e24a 431
1da177e4
LT
432 retval = 0;
433 for (;;) {
434 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
cbf55001 435 bool can_busy_loop = false;
1da177e4 436
1da177e4
LT
437 inp = fds->in; outp = fds->out; exp = fds->ex;
438 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
439
440 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
441 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
442 unsigned long res_in = 0, res_out = 0, res_ex = 0;
1da177e4
LT
443
444 in = *inp++; out = *outp++; ex = *exp++;
445 all_bits = in | out | ex;
446 if (all_bits == 0) {
8ded2bbc 447 i += BITS_PER_LONG;
1da177e4
LT
448 continue;
449 }
450
8ded2bbc 451 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
2903ff01 452 struct fd f;
1da177e4
LT
453 if (i >= n)
454 break;
455 if (!(bit & all_bits))
456 continue;
2903ff01
AV
457 f = fdget(i);
458 if (f.file) {
459 const struct file_operations *f_op;
460 f_op = f.file->f_op;
1da177e4 461 mask = DEFAULT_POLLMASK;
72c2d531 462 if (f_op->poll) {
2d48d67f 463 wait_key_set(wait, in, out,
cbf55001 464 bit, busy_flag);
2903ff01 465 mask = (*f_op->poll)(f.file, wait);
4938d7e0 466 }
2903ff01 467 fdput(f);
1da177e4
LT
468 if ((mask & POLLIN_SET) && (in & bit)) {
469 res_in |= bit;
470 retval++;
626cf236 471 wait->_qproc = NULL;
1da177e4
LT
472 }
473 if ((mask & POLLOUT_SET) && (out & bit)) {
474 res_out |= bit;
475 retval++;
626cf236 476 wait->_qproc = NULL;
1da177e4
LT
477 }
478 if ((mask & POLLEX_SET) && (ex & bit)) {
479 res_ex |= bit;
480 retval++;
626cf236 481 wait->_qproc = NULL;
1da177e4 482 }
2d48d67f 483 /* got something, stop busy polling */
cbf55001
ET
484 if (retval) {
485 can_busy_loop = false;
486 busy_flag = 0;
487
488 /*
489 * only remember a returned
490 * POLL_BUSY_LOOP if we asked for it
491 */
492 } else if (busy_flag & mask)
493 can_busy_loop = true;
494
1da177e4 495 }
1da177e4
LT
496 }
497 if (res_in)
498 *rinp = res_in;
499 if (res_out)
500 *routp = res_out;
501 if (res_ex)
502 *rexp = res_ex;
55d85384 503 cond_resched();
1da177e4 504 }
626cf236 505 wait->_qproc = NULL;
8ff3e8e8 506 if (retval || timed_out || signal_pending(current))
1da177e4 507 break;
f5264481 508 if (table.error) {
1da177e4
LT
509 retval = table.error;
510 break;
511 }
9f72949f 512
cbf55001 513 /* only if found POLL_BUSY_LOOP sockets && not out of time */
76b1e9b9
ET
514 if (can_busy_loop && !need_resched()) {
515 if (!busy_end) {
516 busy_end = busy_loop_end_time();
517 continue;
518 }
519 if (!busy_loop_timeout(busy_end))
520 continue;
521 }
522 busy_flag = 0;
2d48d67f 523
8ff3e8e8
AV
524 /*
525 * If this is the first loop and we have a timeout
526 * given, then we convert to ktime_t and set the to
527 * pointer to the expiry value.
528 */
529 if (end_time && !to) {
766b9f92 530 expire = timespec64_to_ktime(*end_time);
8ff3e8e8 531 to = &expire;
9f72949f 532 }
8ff3e8e8 533
5f820f64
TH
534 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
535 to, slack))
8ff3e8e8 536 timed_out = 1;
1da177e4 537 }
1da177e4
LT
538
539 poll_freewait(&table);
540
1da177e4
LT
541 return retval;
542}
543
1da177e4
LT
544/*
545 * We can actually return ERESTARTSYS instead of EINTR, but I'd
546 * like to be certain this leads to no problems. So I return
547 * EINTR just for safety.
548 *
549 * Update: ERESTARTSYS breaks at least the xview clock binary, so
550 * I'm trying ERESTARTNOHAND which restart only when you want to.
551 */
a2dcb44c 552int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
766b9f92 553 fd_set __user *exp, struct timespec64 *end_time)
1da177e4
LT
554{
555 fd_set_bits fds;
29ff2db5 556 void *bits;
bbea9f69 557 int ret, max_fds;
2d19309c 558 size_t size, alloc_size;
badf1662 559 struct fdtable *fdt;
70674f95 560 /* Allocate small arguments on the stack to save memory and be faster */
30c14e40 561 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1da177e4 562
1da177e4
LT
563 ret = -EINVAL;
564 if (n < 0)
565 goto out_nofds;
566
bbea9f69 567 /* max_fds can increase, so grab it once to avoid race */
b835996f 568 rcu_read_lock();
badf1662 569 fdt = files_fdtable(current->files);
bbea9f69 570 max_fds = fdt->max_fds;
b835996f 571 rcu_read_unlock();
bbea9f69
VL
572 if (n > max_fds)
573 n = max_fds;
1da177e4
LT
574
575 /*
576 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
577 * since we used fdset we need to allocate memory in units of
578 * long-words.
579 */
1da177e4 580 size = FDS_BYTES(n);
b04eb6aa
MBJ
581 bits = stack_fds;
582 if (size > sizeof(stack_fds) / 6) {
583 /* Not enough space in on-stack array; must use kmalloc */
584 ret = -ENOMEM;
2d19309c
VB
585 if (size > (SIZE_MAX / 6))
586 goto out_nofds;
587
588 alloc_size = 6 * size;
589 bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
590 if (!bits && alloc_size > PAGE_SIZE)
591 bits = vmalloc(alloc_size);
592
b04eb6aa
MBJ
593 if (!bits)
594 goto out_nofds;
595 }
29ff2db5
AM
596 fds.in = bits;
597 fds.out = bits + size;
598 fds.ex = bits + 2*size;
599 fds.res_in = bits + 3*size;
600 fds.res_out = bits + 4*size;
601 fds.res_ex = bits + 5*size;
1da177e4
LT
602
603 if ((ret = get_fd_set(n, inp, fds.in)) ||
604 (ret = get_fd_set(n, outp, fds.out)) ||
605 (ret = get_fd_set(n, exp, fds.ex)))
606 goto out;
607 zero_fd_set(n, fds.res_in);
608 zero_fd_set(n, fds.res_out);
609 zero_fd_set(n, fds.res_ex);
610
8ff3e8e8 611 ret = do_select(n, &fds, end_time);
1da177e4
LT
612
613 if (ret < 0)
614 goto out;
615 if (!ret) {
616 ret = -ERESTARTNOHAND;
617 if (signal_pending(current))
618 goto out;
619 ret = 0;
620 }
621
622 if (set_fd_set(n, inp, fds.res_in) ||
623 set_fd_set(n, outp, fds.res_out) ||
624 set_fd_set(n, exp, fds.res_ex))
625 ret = -EFAULT;
626
627out:
70674f95 628 if (bits != stack_fds)
2d19309c 629 kvfree(bits);
1da177e4
LT
630out_nofds:
631 return ret;
632}
633
5a8a82b1
HC
634SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
635 fd_set __user *, exp, struct timeval __user *, tvp)
9f72949f 636{
766b9f92 637 struct timespec64 end_time, *to = NULL;
9f72949f
DW
638 struct timeval tv;
639 int ret;
640
641 if (tvp) {
642 if (copy_from_user(&tv, tvp, sizeof(tv)))
643 return -EFAULT;
644
8ff3e8e8 645 to = &end_time;
4d36a9e6
AV
646 if (poll_select_set_timeout(to,
647 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
648 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
9f72949f 649 return -EINVAL;
9f72949f
DW
650 }
651
8ff3e8e8
AV
652 ret = core_sys_select(n, inp, outp, exp, to);
653 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
9f72949f
DW
654
655 return ret;
656}
657
c9da9f21
HC
658static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
659 fd_set __user *exp, struct timespec __user *tsp,
660 const sigset_t __user *sigmask, size_t sigsetsize)
9f72949f 661{
9f72949f 662 sigset_t ksigmask, sigsaved;
766b9f92
DD
663 struct timespec ts;
664 struct timespec64 ts64, end_time, *to = NULL;
9f72949f
DW
665 int ret;
666
667 if (tsp) {
668 if (copy_from_user(&ts, tsp, sizeof(ts)))
669 return -EFAULT;
766b9f92 670 ts64 = timespec_to_timespec64(ts);
9f72949f 671
8ff3e8e8 672 to = &end_time;
766b9f92 673 if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec))
9f72949f 674 return -EINVAL;
9f72949f
DW
675 }
676
677 if (sigmask) {
678 /* XXX: Don't preclude handling different sized sigset_t's. */
679 if (sigsetsize != sizeof(sigset_t))
680 return -EINVAL;
681 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
682 return -EFAULT;
683
684 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
685 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
686 }
687
62568510 688 ret = core_sys_select(n, inp, outp, exp, to);
8ff3e8e8 689 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
9f72949f
DW
690
691 if (ret == -ERESTARTNOHAND) {
692 /*
693 * Don't restore the signal mask yet. Let do_signal() deliver
694 * the signal on the way back to userspace, before the signal
695 * mask is restored.
696 */
697 if (sigmask) {
698 memcpy(&current->saved_sigmask, &sigsaved,
699 sizeof(sigsaved));
4e4c22c7 700 set_restore_sigmask();
9f72949f
DW
701 }
702 } else if (sigmask)
703 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
704
705 return ret;
706}
707
708/*
709 * Most architectures can't handle 7-argument syscalls. So we provide a
710 * 6-argument version where the sixth argument is a pointer to a structure
711 * which has a pointer to the sigset_t itself followed by a size_t containing
712 * the sigset size.
713 */
d4e82042
HC
714SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
715 fd_set __user *, exp, struct timespec __user *, tsp,
716 void __user *, sig)
9f72949f
DW
717{
718 size_t sigsetsize = 0;
719 sigset_t __user *up = NULL;
720
721 if (sig) {
722 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
e110ab94 723 || __get_user(up, (sigset_t __user * __user *)sig)
9f72949f 724 || __get_user(sigsetsize,
e110ab94 725 (size_t __user *)(sig+sizeof(void *))))
9f72949f
DW
726 return -EFAULT;
727 }
728
c9da9f21 729 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
9f72949f 730}
9f72949f 731
5d0e5283
CH
732#ifdef __ARCH_WANT_SYS_OLD_SELECT
733struct sel_arg_struct {
734 unsigned long n;
735 fd_set __user *inp, *outp, *exp;
736 struct timeval __user *tvp;
737};
738
739SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
740{
741 struct sel_arg_struct a;
742
743 if (copy_from_user(&a, arg, sizeof(a)))
744 return -EFAULT;
745 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
746}
747#endif
748
1da177e4
LT
749struct poll_list {
750 struct poll_list *next;
751 int len;
752 struct pollfd entries[0];
753};
754
755#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
756
4a4b69f7
VL
757/*
758 * Fish for pollable events on the pollfd->fd file descriptor. We're only
759 * interested in events matching the pollfd->events mask, and the result
760 * matching that mask is both recorded in pollfd->revents and returned. The
761 * pwait poll_table will be used by the fd-provided poll handler for waiting,
626cf236 762 * if pwait->_qproc is non-NULL.
4a4b69f7 763 */
2d48d67f 764static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
cbf55001
ET
765 bool *can_busy_poll,
766 unsigned int busy_flag)
1da177e4 767{
4a4b69f7
VL
768 unsigned int mask;
769 int fd;
770
771 mask = 0;
772 fd = pollfd->fd;
773 if (fd >= 0) {
2903ff01 774 struct fd f = fdget(fd);
4a4b69f7 775 mask = POLLNVAL;
2903ff01 776 if (f.file) {
4a4b69f7 777 mask = DEFAULT_POLLMASK;
72c2d531 778 if (f.file->f_op->poll) {
626cf236 779 pwait->_key = pollfd->events|POLLERR|POLLHUP;
cbf55001 780 pwait->_key |= busy_flag;
2903ff01 781 mask = f.file->f_op->poll(f.file, pwait);
cbf55001
ET
782 if (mask & busy_flag)
783 *can_busy_poll = true;
4938d7e0 784 }
4a4b69f7
VL
785 /* Mask out unneeded events. */
786 mask &= pollfd->events | POLLERR | POLLHUP;
2903ff01 787 fdput(f);
1da177e4 788 }
1da177e4 789 }
4a4b69f7
VL
790 pollfd->revents = mask;
791
792 return mask;
1da177e4
LT
793}
794
ccec5ee3 795static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
766b9f92 796 struct timespec64 *end_time)
1da177e4 797{
1da177e4 798 poll_table* pt = &wait->pt;
8ff3e8e8
AV
799 ktime_t expire, *to = NULL;
800 int timed_out = 0, count = 0;
da8b44d5 801 u64 slack = 0;
cbf55001 802 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
76b1e9b9 803 unsigned long busy_end = 0;
1da177e4 804
9f72949f 805 /* Optimise the no-wait case */
8ff3e8e8 806 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
626cf236 807 pt->_qproc = NULL;
8ff3e8e8
AV
808 timed_out = 1;
809 }
9bf084f7 810
96d2ab48 811 if (end_time && !timed_out)
231f3d39 812 slack = select_estimate_accuracy(end_time);
90d6e24a 813
1da177e4
LT
814 for (;;) {
815 struct poll_list *walk;
cbf55001 816 bool can_busy_loop = false;
9f72949f 817
4a4b69f7
VL
818 for (walk = list; walk != NULL; walk = walk->next) {
819 struct pollfd * pfd, * pfd_end;
820
821 pfd = walk->entries;
822 pfd_end = pfd + walk->len;
823 for (; pfd != pfd_end; pfd++) {
824 /*
825 * Fish for events. If we found one, record it
626cf236 826 * and kill poll_table->_qproc, so we don't
4a4b69f7
VL
827 * needlessly register any other waiters after
828 * this. They'll get immediately deregistered
829 * when we break out and return.
830 */
cbf55001
ET
831 if (do_pollfd(pfd, pt, &can_busy_loop,
832 busy_flag)) {
4a4b69f7 833 count++;
626cf236 834 pt->_qproc = NULL;
cbf55001
ET
835 /* found something, stop busy polling */
836 busy_flag = 0;
837 can_busy_loop = false;
4a4b69f7
VL
838 }
839 }
1da177e4 840 }
4a4b69f7
VL
841 /*
842 * All waiters have already been registered, so don't provide
626cf236 843 * a poll_table->_qproc to them on the next loop iteration.
4a4b69f7 844 */
626cf236 845 pt->_qproc = NULL;
9bf084f7
ON
846 if (!count) {
847 count = wait->error;
848 if (signal_pending(current))
849 count = -EINTR;
850 }
8ff3e8e8 851 if (count || timed_out)
1da177e4 852 break;
9f72949f 853
cbf55001 854 /* only if found POLL_BUSY_LOOP sockets && not out of time */
76b1e9b9
ET
855 if (can_busy_loop && !need_resched()) {
856 if (!busy_end) {
857 busy_end = busy_loop_end_time();
858 continue;
859 }
860 if (!busy_loop_timeout(busy_end))
861 continue;
862 }
863 busy_flag = 0;
91e2fd33 864
8ff3e8e8
AV
865 /*
866 * If this is the first loop and we have a timeout
867 * given, then we convert to ktime_t and set the to
868 * pointer to the expiry value.
869 */
870 if (end_time && !to) {
766b9f92 871 expire = timespec64_to_ktime(*end_time);
8ff3e8e8 872 to = &expire;
9f72949f
DW
873 }
874
5f820f64 875 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
8ff3e8e8 876 timed_out = 1;
1da177e4 877 }
1da177e4
LT
878 return count;
879}
880
70674f95
AK
881#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
882 sizeof(struct pollfd))
883
8ff3e8e8 884int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
766b9f92 885 struct timespec64 *end_time)
1da177e4
LT
886{
887 struct poll_wqueues table;
252e5725 888 int err = -EFAULT, fdcount, len, size;
30c14e40
JS
889 /* Allocate small arguments on the stack to save memory and be
890 faster - use long to make sure the buffer is aligned properly
891 on 64 bit archs to avoid unaligned access */
892 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
252e5725
ON
893 struct poll_list *const head = (struct poll_list *)stack_pps;
894 struct poll_list *walk = head;
895 unsigned long todo = nfds;
1da177e4 896
d554ed89 897 if (nfds > rlimit(RLIMIT_NOFILE))
1da177e4
LT
898 return -EINVAL;
899
252e5725
ON
900 len = min_t(unsigned int, nfds, N_STACK_PPS);
901 for (;;) {
902 walk->next = NULL;
903 walk->len = len;
904 if (!len)
905 break;
1da177e4 906
252e5725
ON
907 if (copy_from_user(walk->entries, ufds + nfds-todo,
908 sizeof(struct pollfd) * walk->len))
909 goto out_fds;
910
911 todo -= walk->len;
912 if (!todo)
913 break;
1da177e4 914
252e5725
ON
915 len = min(todo, POLLFD_PER_PAGE);
916 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
917 walk = walk->next = kmalloc(size, GFP_KERNEL);
918 if (!walk) {
919 err = -ENOMEM;
1da177e4
LT
920 goto out_fds;
921 }
1da177e4 922 }
9f72949f 923
252e5725 924 poll_initwait(&table);
ccec5ee3 925 fdcount = do_poll(head, &table, end_time);
252e5725 926 poll_freewait(&table);
1da177e4 927
252e5725 928 for (walk = head; walk; walk = walk->next) {
1da177e4
LT
929 struct pollfd *fds = walk->entries;
930 int j;
931
252e5725
ON
932 for (j = 0; j < walk->len; j++, ufds++)
933 if (__put_user(fds[j].revents, &ufds->revents))
1da177e4 934 goto out_fds;
1da177e4 935 }
252e5725 936
1da177e4 937 err = fdcount;
1da177e4 938out_fds:
252e5725
ON
939 walk = head->next;
940 while (walk) {
941 struct poll_list *pos = walk;
942 walk = walk->next;
943 kfree(pos);
1da177e4 944 }
252e5725 945
1da177e4
LT
946 return err;
947}
9f72949f 948
3075d9da
CW
949static long do_restart_poll(struct restart_block *restart_block)
950{
8ff3e8e8
AV
951 struct pollfd __user *ufds = restart_block->poll.ufds;
952 int nfds = restart_block->poll.nfds;
766b9f92 953 struct timespec64 *to = NULL, end_time;
3075d9da
CW
954 int ret;
955
8ff3e8e8
AV
956 if (restart_block->poll.has_timeout) {
957 end_time.tv_sec = restart_block->poll.tv_sec;
958 end_time.tv_nsec = restart_block->poll.tv_nsec;
959 to = &end_time;
960 }
961
962 ret = do_sys_poll(ufds, nfds, to);
963
3075d9da
CW
964 if (ret == -EINTR) {
965 restart_block->fn = do_restart_poll;
3075d9da
CW
966 ret = -ERESTART_RESTARTBLOCK;
967 }
968 return ret;
969}
970
5a8a82b1 971SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
faf30900 972 int, timeout_msecs)
9f72949f 973{
766b9f92 974 struct timespec64 end_time, *to = NULL;
3075d9da 975 int ret;
9f72949f 976
8ff3e8e8
AV
977 if (timeout_msecs >= 0) {
978 to = &end_time;
979 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
980 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
9f72949f
DW
981 }
982
8ff3e8e8
AV
983 ret = do_sys_poll(ufds, nfds, to);
984
3075d9da
CW
985 if (ret == -EINTR) {
986 struct restart_block *restart_block;
8ff3e8e8 987
f56141e3 988 restart_block = &current->restart_block;
3075d9da 989 restart_block->fn = do_restart_poll;
8ff3e8e8
AV
990 restart_block->poll.ufds = ufds;
991 restart_block->poll.nfds = nfds;
992
993 if (timeout_msecs >= 0) {
994 restart_block->poll.tv_sec = end_time.tv_sec;
995 restart_block->poll.tv_nsec = end_time.tv_nsec;
996 restart_block->poll.has_timeout = 1;
997 } else
998 restart_block->poll.has_timeout = 0;
999
3075d9da
CW
1000 ret = -ERESTART_RESTARTBLOCK;
1001 }
1002 return ret;
9f72949f
DW
1003}
1004
d4e82042
HC
1005SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1006 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1007 size_t, sigsetsize)
9f72949f
DW
1008{
1009 sigset_t ksigmask, sigsaved;
766b9f92
DD
1010 struct timespec ts;
1011 struct timespec64 end_time, *to = NULL;
9f72949f
DW
1012 int ret;
1013
1014 if (tsp) {
1015 if (copy_from_user(&ts, tsp, sizeof(ts)))
1016 return -EFAULT;
1017
8ff3e8e8
AV
1018 to = &end_time;
1019 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1020 return -EINVAL;
9f72949f
DW
1021 }
1022
1023 if (sigmask) {
1024 /* XXX: Don't preclude handling different sized sigset_t's. */
1025 if (sigsetsize != sizeof(sigset_t))
1026 return -EINVAL;
1027 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1028 return -EFAULT;
1029
1030 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1031 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1032 }
1033
8ff3e8e8 1034 ret = do_sys_poll(ufds, nfds, to);
9f72949f
DW
1035
1036 /* We can restart this syscall, usually */
1037 if (ret == -EINTR) {
1038 /*
1039 * Don't restore the signal mask yet. Let do_signal() deliver
1040 * the signal on the way back to userspace, before the signal
1041 * mask is restored.
1042 */
1043 if (sigmask) {
1044 memcpy(&current->saved_sigmask, &sigsaved,
1045 sizeof(sigsaved));
4e4c22c7 1046 set_restore_sigmask();
9f72949f
DW
1047 }
1048 ret = -ERESTARTNOHAND;
1049 } else if (sigmask)
1050 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1051
8ff3e8e8 1052 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
9f72949f
DW
1053
1054 return ret;
1055}