Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_POLL_H |
3 | #define _LINUX_POLL_H | |
4 | ||
1da177e4 LT |
5 | |
6 | #include <linux/compiler.h> | |
a99bbaf5 | 7 | #include <linux/ktime.h> |
1da177e4 LT |
8 | #include <linux/wait.h> |
9 | #include <linux/string.h> | |
f23f6e08 | 10 | #include <linux/fs.h> |
9ff99339 | 11 | #include <linux/sysctl.h> |
7c0f6ba6 | 12 | #include <linux/uaccess.h> |
607ca46e | 13 | #include <uapi/linux/poll.h> |
e78cd95b | 14 | #include <uapi/linux/eventpoll.h> |
1da177e4 | 15 | |
9ff99339 | 16 | extern struct ctl_table epoll_table[]; /* for sysctl */ |
70674f95 AK |
17 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
18 | additional memory. */ | |
ad312f95 AB |
19 | #ifdef __clang__ |
20 | #define MAX_STACK_ALLOC 768 | |
21 | #else | |
70674f95 | 22 | #define MAX_STACK_ALLOC 832 |
ad312f95 | 23 | #endif |
70674f95 AK |
24 | #define FRONTEND_STACK_ALLOC 256 |
25 | #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC | |
26 | #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC | |
27 | #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) | |
28 | #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) | |
29 | ||
e78cd95b | 30 | #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) |
dd23aae4 | 31 | |
1da177e4 LT |
32 | struct poll_table_struct; |
33 | ||
34 | /* | |
35 | * structures and helpers for f_op->poll implementations | |
36 | */ | |
37 | typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); | |
38 | ||
626cf236 HV |
39 | /* |
40 | * Do not touch the structure directly, use the access functions | |
41 | * poll_does_not_wait() and poll_requested_events() instead. | |
42 | */ | |
1da177e4 | 43 | typedef struct poll_table_struct { |
626cf236 | 44 | poll_queue_proc _qproc; |
01699437 | 45 | __poll_t _key; |
1da177e4 LT |
46 | } poll_table; |
47 | ||
48 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) | |
49 | { | |
626cf236 HV |
50 | if (p && p->_qproc && wait_address) |
51 | p->_qproc(filp, wait_address, p); | |
52 | } | |
53 | ||
54 | /* | |
55 | * Return true if it is guaranteed that poll will not wait. This is the case | |
56 | * if the poll() of another file descriptor in the set got an event, so there | |
57 | * is no need for waiting. | |
58 | */ | |
59 | static inline bool poll_does_not_wait(const poll_table *p) | |
60 | { | |
61 | return p == NULL || p->_qproc == NULL; | |
62 | } | |
63 | ||
64 | /* | |
65 | * Return the set of events that the application wants to poll for. | |
66 | * This is useful for drivers that need to know whether a DMA transfer has | |
67 | * to be started implicitly on poll(). You typically only want to do that | |
68 | * if the application is actually polling for POLLIN and/or POLLOUT. | |
69 | */ | |
01699437 | 70 | static inline __poll_t poll_requested_events(const poll_table *p) |
626cf236 | 71 | { |
01699437 | 72 | return p ? p->_key : ~(__poll_t)0; |
1da177e4 LT |
73 | } |
74 | ||
75 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) | |
76 | { | |
626cf236 | 77 | pt->_qproc = qproc; |
01699437 | 78 | pt->_key = ~(__poll_t)0; /* all events enabled */ |
1da177e4 LT |
79 | } |
80 | ||
a11e1d43 | 81 | static inline bool file_can_poll(struct file *file) |
9965ed17 | 82 | { |
a11e1d43 | 83 | return file->f_op->poll; |
9965ed17 CH |
84 | } |
85 | ||
a11e1d43 | 86 | static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) |
9965ed17 | 87 | { |
a11e1d43 LT |
88 | if (unlikely(!file->f_op->poll)) |
89 | return DEFAULT_POLLMASK; | |
90 | return file->f_op->poll(file, pt); | |
9965ed17 CH |
91 | } |
92 | ||
70674f95 | 93 | struct poll_table_entry { |
5f820f64 | 94 | struct file *filp; |
ddc0505f | 95 | __poll_t key; |
ac6424b9 | 96 | wait_queue_entry_t wait; |
5f820f64 | 97 | wait_queue_head_t *wait_address; |
70674f95 AK |
98 | }; |
99 | ||
1da177e4 | 100 | /* |
dac36dd8 | 101 | * Structures and helpers for select/poll syscall |
1da177e4 LT |
102 | */ |
103 | struct poll_wqueues { | |
104 | poll_table pt; | |
5f820f64 TH |
105 | struct poll_table_page *table; |
106 | struct task_struct *polling_task; | |
107 | int triggered; | |
1da177e4 | 108 | int error; |
70674f95 AK |
109 | int inline_index; |
110 | struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; | |
1da177e4 LT |
111 | }; |
112 | ||
113 | extern void poll_initwait(struct poll_wqueues *pwq); | |
114 | extern void poll_freewait(struct poll_wqueues *pwq); | |
766b9f92 | 115 | extern u64 select_estimate_accuracy(struct timespec64 *tv); |
95aac7b1 | 116 | |
9f72949f DW |
117 | #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) |
118 | ||
a2dcb44c | 119 | extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
766b9f92 | 120 | fd_set __user *exp, struct timespec64 *end_time); |
1da177e4 | 121 | |
766b9f92 DD |
122 | extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, |
123 | long nsec); | |
b773ad40 | 124 | |
7a163b21 AV |
125 | #define __MAP(v, from, to) \ |
126 | (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) | |
127 | ||
128 | static inline __u16 mangle_poll(__poll_t val) | |
129 | { | |
130 | __u16 v = (__force __u16)val; | |
131 | #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) | |
132 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | | |
133 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | | |
134 | M(HUP) | M(RDHUP) | M(MSG); | |
135 | #undef M | |
136 | } | |
137 | ||
138 | static inline __poll_t demangle_poll(u16 val) | |
139 | { | |
140 | #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) | |
141 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | | |
142 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | | |
143 | M(HUP) | M(RDHUP) | M(MSG); | |
144 | #undef M | |
145 | } | |
146 | #undef __MAP | |
147 | ||
148 | ||
1da177e4 | 149 | #endif /* _LINUX_POLL_H */ |