1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/syscalls.h>
4 #include <linux/time_namespace.h>
9 * Support for robust futexes: the kernel cleans up held futexes at
12 * Implementation: user-space maintains a per-thread list of locks it
13 * is holding. Upon do_exit(), the kernel carefully walks this list,
14 * and marks all locks that are owned by this thread with the
15 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
16 * always manipulated with the lock held, so the list is private and
17 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
18 * field, to allow the kernel to clean up if the thread dies after
19 * acquiring the lock, but just before it could have added itself to
20 * the list. There can only be one such pending lock.
24 * sys_set_robust_list() - Set the robust-futex list head of a task
25 * @head: pointer to the list-head
26 * @len: length of the list-head, as userspace expects
28 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
32 * The kernel knows only one size for now:
34 if (unlikely(len != sizeof(*head)))
37 current->robust_list = head;
43 * sys_get_robust_list() - Get the robust-futex list head of a task
44 * @pid: pid of the process [zero for current task]
45 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
46 * @len_ptr: pointer to a length field, the kernel fills in the header size
48 SYSCALL_DEFINE3(get_robust_list, int, pid,
49 struct robust_list_head __user * __user *, head_ptr,
50 size_t __user *, len_ptr)
52 struct robust_list_head __user *head;
54 struct task_struct *p;
62 p = find_task_by_vpid(pid);
68 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
71 head = p->robust_list;
74 if (put_user(sizeof(*head), len_ptr))
76 return put_user(head, head_ptr);
84 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
85 u32 __user *uaddr2, u32 val2, u32 val3)
87 unsigned int flags = futex_to_flags(op);
88 int cmd = op & FUTEX_CMD_MASK;
90 if (flags & FLAGS_CLOCKRT) {
91 if (cmd != FUTEX_WAIT_BITSET &&
92 cmd != FUTEX_WAIT_REQUEUE_PI &&
93 cmd != FUTEX_LOCK_PI2)
99 val3 = FUTEX_BITSET_MATCH_ANY;
101 case FUTEX_WAIT_BITSET:
102 return futex_wait(uaddr, flags, val, timeout, val3);
104 val3 = FUTEX_BITSET_MATCH_ANY;
106 case FUTEX_WAKE_BITSET:
107 return futex_wake(uaddr, flags, val, val3);
109 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
110 case FUTEX_CMP_REQUEUE:
111 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
113 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
115 flags |= FLAGS_CLOCKRT;
118 return futex_lock_pi(uaddr, flags, timeout, 0);
119 case FUTEX_UNLOCK_PI:
120 return futex_unlock_pi(uaddr, flags);
121 case FUTEX_TRYLOCK_PI:
122 return futex_lock_pi(uaddr, flags, NULL, 1);
123 case FUTEX_WAIT_REQUEUE_PI:
124 val3 = FUTEX_BITSET_MATCH_ANY;
125 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
127 case FUTEX_CMP_REQUEUE_PI:
128 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
133 static __always_inline bool futex_cmd_has_timeout(u32 cmd)
139 case FUTEX_WAIT_BITSET:
140 case FUTEX_WAIT_REQUEUE_PI:
146 static __always_inline int
147 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t)
149 if (!timespec64_valid(ts))
152 *t = timespec64_to_ktime(*ts);
153 if (cmd == FUTEX_WAIT)
154 *t = ktime_add_safe(ktime_get(), *t);
155 else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
156 *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t);
160 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
161 const struct __kernel_timespec __user *, utime,
162 u32 __user *, uaddr2, u32, val3)
164 int ret, cmd = op & FUTEX_CMD_MASK;
165 ktime_t t, *tp = NULL;
166 struct timespec64 ts;
168 if (utime && futex_cmd_has_timeout(cmd)) {
169 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
171 if (get_timespec64(&ts, utime))
173 ret = futex_init_timeout(cmd, op, &ts, &t);
179 return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
182 #define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_PRIVATE)
185 * futex_parse_waitv - Parse a waitv array from userspace
186 * @futexv: Kernel side list of waiters to be filled
187 * @uwaitv: Userspace list to be parsed
188 * @nr_futexes: Length of futexv
190 * Return: Error code on failure, 0 on success
192 static int futex_parse_waitv(struct futex_vector *futexv,
193 struct futex_waitv __user *uwaitv,
194 unsigned int nr_futexes)
196 struct futex_waitv aux;
199 for (i = 0; i < nr_futexes; i++) {
202 if (copy_from_user(&aux, &uwaitv[i], sizeof(aux)))
205 if ((aux.flags & ~FUTEX2_VALID_MASK) || aux.__reserved)
208 flags = futex2_to_flags(aux.flags);
209 if (!futex_flags_valid(flags))
212 if (!futex_validate_input(flags, aux.val))
215 futexv[i].w.flags = flags;
216 futexv[i].w.val = aux.val;
217 futexv[i].w.uaddr = aux.uaddr;
218 futexv[i].q = futex_q_init;
225 * sys_futex_waitv - Wait on a list of futexes
226 * @waiters: List of futexes to wait on
227 * @nr_futexes: Length of futexv
228 * @flags: Flag for timeout (monotonic/realtime)
229 * @timeout: Optional absolute timeout.
230 * @clockid: Clock to be used for the timeout, realtime or monotonic.
232 * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
233 * if a futex_wake() is performed at any uaddr. The syscall returns immediately
234 * if any waiter has *uaddr != val. *timeout is an optional timeout value for
235 * the operation. Each waiter has individual flags. The `flags` argument for
236 * the syscall should be used solely for specifying the timeout as realtime, if
237 * needed. Flags for private futexes, sizes, etc. should be used on the
238 * individual flags of each waiter.
240 * Returns the array index of one of the woken futexes. No further information
241 * is provided: any number of other futexes may also have been woken by the
242 * same event, and if more than one futex was woken, the retrned index may
243 * refer to any one of them. (It is not necessaryily the futex with the
244 * smallest index, nor the one most recently woken, nor...)
247 SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
248 unsigned int, nr_futexes, unsigned int, flags,
249 struct __kernel_timespec __user *, timeout, clockid_t, clockid)
251 struct hrtimer_sleeper to;
252 struct futex_vector *futexv;
253 struct timespec64 ts;
257 /* This syscall supports no flags for now */
261 if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
265 int flag_clkid = 0, flag_init = 0;
267 if (clockid == CLOCK_REALTIME) {
268 flag_clkid = FLAGS_CLOCKRT;
269 flag_init = FUTEX_CLOCK_REALTIME;
272 if (clockid != CLOCK_REALTIME && clockid != CLOCK_MONOTONIC)
275 if (get_timespec64(&ts, timeout))
279 * Since there's no opcode for futex_waitv, use
280 * FUTEX_WAIT_BITSET that uses absolute timeout as well
282 ret = futex_init_timeout(FUTEX_WAIT_BITSET, flag_init, &ts, &time);
286 futex_setup_timer(&time, &to, flag_clkid, 0);
289 futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
295 ret = futex_parse_waitv(futexv, waiters, nr_futexes);
297 ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
303 hrtimer_cancel(&to.timer);
304 destroy_hrtimer_on_stack(&to.timer);
310 * sys_futex_wake - Wake a number of futexes
311 * @uaddr: Address of the futex(es) to wake
313 * @nr: Number of the futexes to wake
314 * @flags: FUTEX2 flags
316 * Identical to the traditional FUTEX_WAKE_BITSET op, except it is part of the
317 * futex2 family of calls.
320 SYSCALL_DEFINE4(futex_wake,
321 void __user *, uaddr,
326 if (flags & ~FUTEX2_VALID_MASK)
329 flags = futex2_to_flags(flags);
330 if (!futex_flags_valid(flags))
333 if (!futex_validate_input(flags, mask))
336 return futex_wake(uaddr, flags, nr, mask);
340 COMPAT_SYSCALL_DEFINE2(set_robust_list,
341 struct compat_robust_list_head __user *, head,
344 if (unlikely(len != sizeof(*head)))
347 current->compat_robust_list = head;
352 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
353 compat_uptr_t __user *, head_ptr,
354 compat_size_t __user *, len_ptr)
356 struct compat_robust_list_head __user *head;
358 struct task_struct *p;
366 p = find_task_by_vpid(pid);
372 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
375 head = p->compat_robust_list;
378 if (put_user(sizeof(*head), len_ptr))
380 return put_user(ptr_to_compat(head), head_ptr);
387 #endif /* CONFIG_COMPAT */
389 #ifdef CONFIG_COMPAT_32BIT_TIME
390 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
391 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
394 int ret, cmd = op & FUTEX_CMD_MASK;
395 ktime_t t, *tp = NULL;
396 struct timespec64 ts;
398 if (utime && futex_cmd_has_timeout(cmd)) {
399 if (get_old_timespec32(&ts, utime))
401 ret = futex_init_timeout(cmd, op, &ts, &t);
407 return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
409 #endif /* CONFIG_COMPAT_32BIT_TIME */