return ret;
}
-static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
+struct userfault_wait {
+ unsigned int task_state;
+ bool timeout;
+};
+
+static struct userfault_wait userfaultfd_get_blocking_state(unsigned int flags)
{
+ /*
+ * If the fault has already been tried AND there's a signal pending
+ * for this task, use TASK_UNINTERRUPTIBLE with a small timeout.
+ * This prevents busy looping where schedule() otherwise does nothing
+ * for TASK_INTERRUPTIBLE when the task has a signal pending.
+ */
+ if ((flags & FAULT_FLAG_TRIED) && signal_pending(current))
+ return (struct userfault_wait) { TASK_UNINTERRUPTIBLE, true };
+
if (flags & FAULT_FLAG_INTERRUPTIBLE)
- return TASK_INTERRUPTIBLE;
+ return (struct userfault_wait) { TASK_INTERRUPTIBLE, false };
if (flags & FAULT_FLAG_KILLABLE)
- return TASK_KILLABLE;
+ return (struct userfault_wait) { TASK_KILLABLE, false };
- return TASK_UNINTERRUPTIBLE;
+ return (struct userfault_wait) { TASK_UNINTERRUPTIBLE, false };
}
/*
struct userfaultfd_wait_queue uwq;
vm_fault_t ret = VM_FAULT_SIGBUS;
bool must_wait;
- unsigned int blocking_state;
+ struct userfault_wait wait_mode;
/*
* We don't do userfault handling for the final child pid update
uwq.ctx = ctx;
uwq.waken = false;
- blocking_state = userfaultfd_get_blocking_state(vmf->flags);
+ wait_mode = userfaultfd_get_blocking_state(vmf->flags);
/*
* Take the vma lock now, in order to safely call
* following the spin_unlock to happen before the list_add in
* __add_wait_queue.
*/
- set_current_state(blocking_state);
+ set_current_state(wait_mode.task_state);
spin_unlock_irq(&ctx->fault_pending_wqh.lock);
if (!is_vm_hugetlb_page(vma))
if (likely(must_wait && !READ_ONCE(ctx->released))) {
wake_up_poll(&ctx->fd_wqh, EPOLLIN);
- schedule();
+ /* See comment in userfaultfd_get_blocking_state() */
+ if (!wait_mode.timeout)
+ schedule();
+ else
+ schedule_timeout(HZ / 10);
}
__set_current_state(TASK_RUNNING);