Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_POLL_H |
3 | #define _LINUX_POLL_H | |
4 | ||
1da177e4 LT |
5 | |
6 | #include <linux/compiler.h> | |
a99bbaf5 | 7 | #include <linux/ktime.h> |
1da177e4 LT |
8 | #include <linux/wait.h> |
9 | #include <linux/string.h> | |
f23f6e08 | 10 | #include <linux/fs.h> |
9ff99339 | 11 | #include <linux/sysctl.h> |
7c0f6ba6 | 12 | #include <linux/uaccess.h> |
607ca46e | 13 | #include <uapi/linux/poll.h> |
e78cd95b | 14 | #include <uapi/linux/eventpoll.h> |
1da177e4 | 15 | |
9ff99339 | 16 | extern struct ctl_table epoll_table[]; /* for sysctl */ |
70674f95 AK |
17 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
18 | additional memory. */ | |
19 | #define MAX_STACK_ALLOC 832 | |
20 | #define FRONTEND_STACK_ALLOC 256 | |
21 | #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC | |
22 | #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC | |
23 | #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) | |
24 | #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) | |
25 | ||
e78cd95b | 26 | #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) |
dd23aae4 | 27 | |
1da177e4 LT |
28 | struct poll_table_struct; |
29 | ||
30 | /* | |
31 | * structures and helpers for f_op->poll implementations | |
32 | */ | |
33 | typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); | |
34 | ||
626cf236 HV |
35 | /* |
36 | * Do not touch the structure directly, use the access functions | |
37 | * poll_does_not_wait() and poll_requested_events() instead. | |
38 | */ | |
1da177e4 | 39 | typedef struct poll_table_struct { |
626cf236 | 40 | poll_queue_proc _qproc; |
01699437 | 41 | __poll_t _key; |
1da177e4 LT |
42 | } poll_table; |
43 | ||
44 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) | |
45 | { | |
626cf236 HV |
46 | if (p && p->_qproc && wait_address) |
47 | p->_qproc(filp, wait_address, p); | |
48 | } | |
49 | ||
50 | /* | |
51 | * Return true if it is guaranteed that poll will not wait. This is the case | |
52 | * if the poll() of another file descriptor in the set got an event, so there | |
53 | * is no need for waiting. | |
54 | */ | |
55 | static inline bool poll_does_not_wait(const poll_table *p) | |
56 | { | |
57 | return p == NULL || p->_qproc == NULL; | |
58 | } | |
59 | ||
60 | /* | |
61 | * Return the set of events that the application wants to poll for. | |
62 | * This is useful for drivers that need to know whether a DMA transfer has | |
63 | * to be started implicitly on poll(). You typically only want to do that | |
64 | * if the application is actually polling for POLLIN and/or POLLOUT. | |
65 | */ | |
01699437 | 66 | static inline __poll_t poll_requested_events(const poll_table *p) |
626cf236 | 67 | { |
01699437 | 68 | return p ? p->_key : ~(__poll_t)0; |
1da177e4 LT |
69 | } |
70 | ||
71 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) | |
72 | { | |
626cf236 | 73 | pt->_qproc = qproc; |
01699437 | 74 | pt->_key = ~(__poll_t)0; /* all events enabled */ |
1da177e4 LT |
75 | } |
76 | ||
a11e1d43 | 77 | static inline bool file_can_poll(struct file *file) |
9965ed17 | 78 | { |
a11e1d43 | 79 | return file->f_op->poll; |
9965ed17 CH |
80 | } |
81 | ||
a11e1d43 | 82 | static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) |
9965ed17 | 83 | { |
a11e1d43 LT |
84 | if (unlikely(!file->f_op->poll)) |
85 | return DEFAULT_POLLMASK; | |
86 | return file->f_op->poll(file, pt); | |
9965ed17 CH |
87 | } |
88 | ||
70674f95 | 89 | struct poll_table_entry { |
5f820f64 | 90 | struct file *filp; |
ddc0505f | 91 | __poll_t key; |
ac6424b9 | 92 | wait_queue_entry_t wait; |
5f820f64 | 93 | wait_queue_head_t *wait_address; |
70674f95 AK |
94 | }; |
95 | ||
1da177e4 | 96 | /* |
dac36dd8 | 97 | * Structures and helpers for select/poll syscall |
1da177e4 LT |
98 | */ |
99 | struct poll_wqueues { | |
100 | poll_table pt; | |
5f820f64 TH |
101 | struct poll_table_page *table; |
102 | struct task_struct *polling_task; | |
103 | int triggered; | |
1da177e4 | 104 | int error; |
70674f95 AK |
105 | int inline_index; |
106 | struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; | |
1da177e4 LT |
107 | }; |
108 | ||
109 | extern void poll_initwait(struct poll_wqueues *pwq); | |
110 | extern void poll_freewait(struct poll_wqueues *pwq); | |
766b9f92 | 111 | extern u64 select_estimate_accuracy(struct timespec64 *tv); |
95aac7b1 | 112 | |
9f72949f DW |
113 | #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) |
114 | ||
a2dcb44c | 115 | extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
766b9f92 | 116 | fd_set __user *exp, struct timespec64 *end_time); |
1da177e4 | 117 | |
766b9f92 DD |
118 | extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, |
119 | long nsec); | |
b773ad40 | 120 | |
7a163b21 AV |
121 | #define __MAP(v, from, to) \ |
122 | (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) | |
123 | ||
124 | static inline __u16 mangle_poll(__poll_t val) | |
125 | { | |
126 | __u16 v = (__force __u16)val; | |
127 | #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) | |
128 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | | |
129 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | | |
130 | M(HUP) | M(RDHUP) | M(MSG); | |
131 | #undef M | |
132 | } | |
133 | ||
134 | static inline __poll_t demangle_poll(u16 val) | |
135 | { | |
136 | #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) | |
137 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | | |
138 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | | |
139 | M(HUP) | M(RDHUP) | M(MSG); | |
140 | #undef M | |
141 | } | |
142 | #undef __MAP | |
143 | ||
144 | ||
1da177e4 | 145 | #endif /* _LINUX_POLL_H */ |