Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_POLL_H |
3 | #define _LINUX_POLL_H | |
4 | ||
1da177e4 LT |
5 | |
6 | #include <linux/compiler.h> | |
a99bbaf5 | 7 | #include <linux/ktime.h> |
1da177e4 LT |
8 | #include <linux/wait.h> |
9 | #include <linux/string.h> | |
f23f6e08 | 10 | #include <linux/fs.h> |
7c0f6ba6 | 11 | #include <linux/uaccess.h> |
607ca46e | 12 | #include <uapi/linux/poll.h> |
e78cd95b | 13 | #include <uapi/linux/eventpoll.h> |
1da177e4 | 14 | |
70674f95 AK |
15 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
16 | additional memory. */ | |
ad312f95 AB |
17 | #ifdef __clang__ |
18 | #define MAX_STACK_ALLOC 768 | |
19 | #else | |
70674f95 | 20 | #define MAX_STACK_ALLOC 832 |
ad312f95 | 21 | #endif |
70674f95 AK |
22 | #define FRONTEND_STACK_ALLOC 256 |
23 | #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC | |
24 | #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC | |
25 | #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) | |
26 | #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) | |
27 | ||
e78cd95b | 28 | #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) |
dd23aae4 | 29 | |
1da177e4 LT |
30 | struct poll_table_struct; |
31 | ||
32 | /* | |
33 | * structures and helpers for f_op->poll implementations | |
34 | */ | |
35 | typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); | |
36 | ||
626cf236 HV |
37 | /* |
38 | * Do not touch the structure directly, use the access functions | |
39 | * poll_does_not_wait() and poll_requested_events() instead. | |
40 | */ | |
1da177e4 | 41 | typedef struct poll_table_struct { |
626cf236 | 42 | poll_queue_proc _qproc; |
01699437 | 43 | __poll_t _key; |
1da177e4 LT |
44 | } poll_table; |
45 | ||
46 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) | |
47 | { | |
626cf236 HV |
48 | if (p && p->_qproc && wait_address) |
49 | p->_qproc(filp, wait_address, p); | |
50 | } | |
51 | ||
52 | /* | |
53 | * Return true if it is guaranteed that poll will not wait. This is the case | |
54 | * if the poll() of another file descriptor in the set got an event, so there | |
55 | * is no need for waiting. | |
56 | */ | |
57 | static inline bool poll_does_not_wait(const poll_table *p) | |
58 | { | |
59 | return p == NULL || p->_qproc == NULL; | |
60 | } | |
61 | ||
62 | /* | |
63 | * Return the set of events that the application wants to poll for. | |
64 | * This is useful for drivers that need to know whether a DMA transfer has | |
65 | * to be started implicitly on poll(). You typically only want to do that | |
66 | * if the application is actually polling for POLLIN and/or POLLOUT. | |
67 | */ | |
01699437 | 68 | static inline __poll_t poll_requested_events(const poll_table *p) |
626cf236 | 69 | { |
01699437 | 70 | return p ? p->_key : ~(__poll_t)0; |
1da177e4 LT |
71 | } |
72 | ||
73 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) | |
74 | { | |
626cf236 | 75 | pt->_qproc = qproc; |
01699437 | 76 | pt->_key = ~(__poll_t)0; /* all events enabled */ |
1da177e4 LT |
77 | } |
78 | ||
a11e1d43 | 79 | static inline bool file_can_poll(struct file *file) |
9965ed17 | 80 | { |
a11e1d43 | 81 | return file->f_op->poll; |
9965ed17 CH |
82 | } |
83 | ||
a11e1d43 | 84 | static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) |
9965ed17 | 85 | { |
a11e1d43 LT |
86 | if (unlikely(!file->f_op->poll)) |
87 | return DEFAULT_POLLMASK; | |
88 | return file->f_op->poll(file, pt); | |
9965ed17 CH |
89 | } |
90 | ||
70674f95 | 91 | struct poll_table_entry { |
5f820f64 | 92 | struct file *filp; |
ddc0505f | 93 | __poll_t key; |
ac6424b9 | 94 | wait_queue_entry_t wait; |
5f820f64 | 95 | wait_queue_head_t *wait_address; |
70674f95 AK |
96 | }; |
97 | ||
1da177e4 | 98 | /* |
dac36dd8 | 99 | * Structures and helpers for select/poll syscall |
1da177e4 LT |
100 | */ |
101 | struct poll_wqueues { | |
102 | poll_table pt; | |
5f820f64 TH |
103 | struct poll_table_page *table; |
104 | struct task_struct *polling_task; | |
105 | int triggered; | |
1da177e4 | 106 | int error; |
70674f95 AK |
107 | int inline_index; |
108 | struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; | |
1da177e4 LT |
109 | }; |
110 | ||
111 | extern void poll_initwait(struct poll_wqueues *pwq); | |
112 | extern void poll_freewait(struct poll_wqueues *pwq); | |
766b9f92 | 113 | extern u64 select_estimate_accuracy(struct timespec64 *tv); |
95aac7b1 | 114 | |
9f72949f DW |
115 | #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) |
116 | ||
a2dcb44c | 117 | extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
766b9f92 | 118 | fd_set __user *exp, struct timespec64 *end_time); |
1da177e4 | 119 | |
766b9f92 DD |
120 | extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, |
121 | long nsec); | |
b773ad40 | 122 | |
7a163b21 AV |
123 | #define __MAP(v, from, to) \ |
124 | (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) | |
125 | ||
126 | static inline __u16 mangle_poll(__poll_t val) | |
127 | { | |
128 | __u16 v = (__force __u16)val; | |
129 | #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) | |
130 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | | |
131 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | | |
132 | M(HUP) | M(RDHUP) | M(MSG); | |
133 | #undef M | |
134 | } | |
135 | ||
136 | static inline __poll_t demangle_poll(u16 val) | |
137 | { | |
138 | #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) | |
139 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | | |
140 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | | |
141 | M(HUP) | M(RDHUP) | M(MSG); | |
142 | #undef M | |
143 | } | |
144 | #undef __MAP | |
145 | ||
146 | ||
1da177e4 | 147 | #endif /* _LINUX_POLL_H */ |