#include <linux/nospec.h>
#include <linux/posix-clock.h>
#include <linux/posix-timers.h>
+#include <linux/prctl.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
static const struct k_clock *clockid_to_kclock(const clockid_t id);
static const struct k_clock clock_realtime, clock_monotonic;
+#define TIMER_ANY_ID INT_MIN
+
/* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */
#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
return false;
}
-static int posix_timer_add(struct k_itimer *timer)
+static bool posix_timer_add_at(struct k_itimer *timer, struct signal_struct *sig, unsigned int id)
+{
+ struct timer_hash_bucket *bucket = hash_bucket(sig, id);
+
+ scoped_guard (spinlock, &bucket->lock) {
+ /*
+ * Validate under the lock as this could have raced against
+ * another thread ending up with the same ID, which is
+ * highly unlikely, but possible.
+ */
+ if (!posix_timer_hashed(bucket, sig, id)) {
+ /*
+ * Set the timer ID and the signal pointer to make
+ * it identifiable in the hash table. The signal
+ * pointer has bit 0 set to indicate that it is not
+ * yet fully initialized. posix_timer_hashed()
+ * masks this bit out, but the syscall lookup fails
+ * to match due to it being set. This guarantees
+ * that there can't be duplicate timer IDs handed
+ * out.
+ */
+ timer->it_id = (timer_t)id;
+ timer->it_signal = (struct signal_struct *)((unsigned long)sig | 1UL);
+ hlist_add_head_rcu(&timer->t_hash, &bucket->head);
+ return true;
+ }
+ }
+ return false;
+}
+
+static int posix_timer_add(struct k_itimer *timer, int req_id)
{
struct signal_struct *sig = current->signal;
+ if (unlikely(req_id != TIMER_ANY_ID)) {
+ if (!posix_timer_add_at(timer, sig, req_id))
+ return -EBUSY;
+
+ /*
+ * Move the ID counter past the requested ID, so that after
+ * switching back to normal mode the IDs are outside of the
+ * exact allocated region. That avoids ID collisions on the
+ * next regular timer_create() invocations.
+ */
+ atomic_set(&sig->next_posix_timer_id, req_id + 1);
+ return req_id;
+ }
+
for (unsigned int cnt = 0; cnt <= INT_MAX; cnt++) {
/* Get the next timer ID and clamp it to positive space */
unsigned int id = atomic_fetch_inc(&sig->next_posix_timer_id) & INT_MAX;
- struct timer_hash_bucket *bucket = hash_bucket(sig, id);
- scoped_guard (spinlock, &bucket->lock) {
- /*
- * Validate under the lock as this could have raced
- * against another thread ending up with the same
- * ID, which is highly unlikely, but possible.
- */
- if (!posix_timer_hashed(bucket, sig, id)) {
- /*
- * Set the timer ID and the signal pointer to make
- * it identifiable in the hash table. The signal
- * pointer has bit 0 set to indicate that it is not
- * yet fully initialized. posix_timer_hashed()
- * masks this bit out, but the syscall lookup fails
- * to match due to it being set. This guarantees
- * that there can't be duplicate timer IDs handed
- * out.
- */
- timer->it_id = (timer_t)id;
- timer->it_signal = (struct signal_struct *)((unsigned long)sig | 1UL);
- hlist_add_head_rcu(&timer->t_hash, &bucket->head);
- return id;
- }
- }
+ if (posix_timer_add_at(timer, sig, id))
+ return id;
cond_resched();
}
/* POSIX return code when no timer ID could be allocated */
return HRTIMER_NORESTART;
}
+long posixtimer_create_prctl(unsigned long ctrl)
+{
+ switch (ctrl) {
+ case PR_TIMER_CREATE_RESTORE_IDS_OFF:
+ current->signal->timer_create_restore_ids = 0;
+ return 0;
+ case PR_TIMER_CREATE_RESTORE_IDS_ON:
+ current->signal->timer_create_restore_ids = 1;
+ return 0;
+ case PR_TIMER_CREATE_RESTORE_IDS_GET:
+ return current->signal->timer_create_restore_ids;
+ }
+ return -EINVAL;
+}
+
static struct pid *good_sigevent(sigevent_t * event)
{
struct pid *pid = task_tgid(current);
timer_t __user *created_timer_id)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
+ timer_t req_id = TIMER_ANY_ID;
struct k_itimer *new_timer;
int error, new_timer_id;
spin_lock_init(&new_timer->it_lock);
+ /* Special case for CRIU to restore timers with a given timer ID. */
+ if (unlikely(current->signal->timer_create_restore_ids)) {
+ if (copy_from_user(&req_id, created_timer_id, sizeof(req_id)))
+ return -EFAULT;
+ /* Valid IDs are 0..INT_MAX */
+ if ((unsigned int)req_id > INT_MAX)
+ return -EINVAL;
+ }
+
/*
* Add the timer to the hash table. The timer is not yet valid
* after insertion, but has a unique ID allocated.
*/
- new_timer_id = posix_timer_add(new_timer);
+ new_timer_id = posix_timer_add(new_timer, req_id);
if (new_timer_id < 0) {
posixtimer_free_timer(new_timer);
return new_timer_id;
struct hlist_node *next;
struct k_itimer *timer;
+ /* Clear restore mode for exec() */
+ tsk->signal->timer_create_restore_ids = 0;
+
if (hlist_empty(&tsk->signal->posix_timers))
return;