2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/mm.h>
28 #include <linux/uaccess.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
32 #include "kfd_events.h"
33 #include <linux/device.h>
36 * Wrapper around wait_queue_entry_t
38 struct kfd_event_waiter {
39 wait_queue_entry_t wait;
40 struct kfd_event *event; /* Event to wait for */
41 bool activated; /* Becomes true when event is signaled */
45 * Each signal event needs a 64-bit signal slot where the signaler will write
46 * a 1 before sending an interrupt. (This is needed because some interrupts
47 * do not contain enough spare data bits to identify an event.)
48 * We get whole pages and map them to the process VA.
49 * Individual signal events use their event_id as slot index.
51 struct kfd_signal_page {
52 uint64_t *kernel_address;
53 uint64_t __user *user_address;
57 static uint64_t *page_slots(struct kfd_signal_page *page)
59 return page->kernel_address;
62 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
65 struct kfd_signal_page *page;
67 page = kzalloc(sizeof(*page), GFP_KERNEL);
71 backing_store = (void *) __get_free_pages(GFP_KERNEL,
72 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
74 goto fail_alloc_signal_store;
76 /* Initialize all events to unsignaled */
77 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
78 KFD_SIGNAL_EVENT_LIMIT * 8);
80 page->kernel_address = backing_store;
81 pr_debug("Allocated new event signal page at %p, for process %p\n",
86 fail_alloc_signal_store:
91 static int allocate_event_notification_slot(struct kfd_process *p,
96 if (!p->signal_page) {
97 p->signal_page = allocate_signal_page(p);
100 /* Oldest user mode expects 256 event slots */
101 p->signal_mapped_size = 256*8;
105 * Compatibility with old user mode: Only use signal slots
106 * user mode has mapped, may be less than
107 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
108 * of the event limit without breaking user mode.
110 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
116 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
122 * Assumes that p->event_mutex is held and of course that p is not going
123 * away (current or locked).
125 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
127 return idr_find(&p->event_idr, id);
131 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
132 * @p: Pointer to struct kfd_process
134 * @bits: Number of valid bits in @id
136 * Finds the first signaled event with a matching partial ID. If no
137 * matching signaled event is found, returns NULL. In that case the
138 * caller should assume that the partial ID is invalid and do an
139 * exhaustive search of all siglaned events.
141 * If multiple events with the same partial ID signal at the same
142 * time, they will be found one interrupt at a time, not necessarily
143 * in the same order the interrupts occurred. As long as the number of
144 * interrupts is correct, all signaled events will be seen by the
147 static struct kfd_event *lookup_signaled_event_by_partial_id(
148 struct kfd_process *p, uint32_t id, uint32_t bits)
150 struct kfd_event *ev;
152 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
155 /* Fast path for the common case that @id is not a partial ID
156 * and we only need a single lookup.
158 if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
159 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
162 return idr_find(&p->event_idr, id);
165 /* General case for partial IDs: Iterate over all matching IDs
166 * and find the first one that has signaled.
168 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
169 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
172 ev = idr_find(&p->event_idr, id);
178 static int create_signal_event(struct file *devkfd,
179 struct kfd_process *p,
180 struct kfd_event *ev)
184 if (p->signal_mapped_size &&
185 p->signal_event_count == p->signal_mapped_size / 8) {
186 if (!p->signal_event_limit_reached) {
187 pr_warn("Signal event wasn't created because limit was reached\n");
188 p->signal_event_limit_reached = true;
193 ret = allocate_event_notification_slot(p, ev);
195 pr_warn("Signal event wasn't created because out of kernel memory\n");
199 p->signal_event_count++;
201 ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
202 pr_debug("Signal event number %zu created with id %d, address %p\n",
203 p->signal_event_count, ev->event_id,
204 ev->user_signal_address);
209 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
211 /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
212 * intentional integer overflow to -1 without a compiler
213 * warning. idr_alloc treats a negative value as "maximum
216 int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
217 (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
227 void kfd_event_init_process(struct kfd_process *p)
229 mutex_init(&p->event_mutex);
230 idr_init(&p->event_idr);
231 p->signal_page = NULL;
232 p->signal_event_count = 0;
235 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
237 struct kfd_event_waiter *waiter;
239 /* Wake up pending waiters. They will return failure */
240 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
241 waiter->event = NULL;
242 wake_up_all(&ev->wq);
244 if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
245 ev->type == KFD_EVENT_TYPE_DEBUG)
246 p->signal_event_count--;
248 idr_remove(&p->event_idr, ev->event_id);
252 static void destroy_events(struct kfd_process *p)
254 struct kfd_event *ev;
257 idr_for_each_entry(&p->event_idr, ev, id)
258 destroy_event(p, ev);
259 idr_destroy(&p->event_idr);
263 * We assume that the process is being destroyed and there is no need to
264 * unmap the pages or keep bookkeeping data in order.
266 static void shutdown_signal_page(struct kfd_process *p)
268 struct kfd_signal_page *page = p->signal_page;
271 free_pages((unsigned long)page->kernel_address,
272 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
277 void kfd_event_free_process(struct kfd_process *p)
280 shutdown_signal_page(p);
283 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
285 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
286 ev->type == KFD_EVENT_TYPE_DEBUG;
289 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
291 return ev->type == KFD_EVENT_TYPE_SIGNAL;
294 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
295 uint32_t event_type, bool auto_reset, uint32_t node_id,
296 uint32_t *event_id, uint32_t *event_trigger_data,
297 uint64_t *event_page_offset, uint32_t *event_slot_index)
300 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
305 ev->type = event_type;
306 ev->auto_reset = auto_reset;
307 ev->signaled = false;
309 init_waitqueue_head(&ev->wq);
311 *event_page_offset = 0;
313 mutex_lock(&p->event_mutex);
315 switch (event_type) {
316 case KFD_EVENT_TYPE_SIGNAL:
317 case KFD_EVENT_TYPE_DEBUG:
318 ret = create_signal_event(devkfd, p, ev);
320 *event_page_offset = KFD_MMAP_EVENTS_MASK;
321 *event_page_offset <<= PAGE_SHIFT;
322 *event_slot_index = ev->event_id;
326 ret = create_other_event(p, ev);
331 *event_id = ev->event_id;
332 *event_trigger_data = ev->event_id;
337 mutex_unlock(&p->event_mutex);
342 /* Assumes that p is current. */
343 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
345 struct kfd_event *ev;
348 mutex_lock(&p->event_mutex);
350 ev = lookup_event_by_id(p, event_id);
353 destroy_event(p, ev);
357 mutex_unlock(&p->event_mutex);
361 static void set_event(struct kfd_event *ev)
363 struct kfd_event_waiter *waiter;
365 /* Auto reset if the list is non-empty and we're waking
366 * someone. waitqueue_active is safe here because we're
367 * protected by the p->event_mutex, which is also held when
368 * updating the wait queues in kfd_wait_on_events.
370 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
372 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
373 waiter->activated = true;
375 wake_up_all(&ev->wq);
378 /* Assumes that p is current. */
379 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
382 struct kfd_event *ev;
384 mutex_lock(&p->event_mutex);
386 ev = lookup_event_by_id(p, event_id);
388 if (ev && event_can_be_cpu_signaled(ev))
393 mutex_unlock(&p->event_mutex);
397 static void reset_event(struct kfd_event *ev)
399 ev->signaled = false;
402 /* Assumes that p is current. */
403 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
406 struct kfd_event *ev;
408 mutex_lock(&p->event_mutex);
410 ev = lookup_event_by_id(p, event_id);
412 if (ev && event_can_be_cpu_signaled(ev))
417 mutex_unlock(&p->event_mutex);
422 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
424 page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
427 static void set_event_from_interrupt(struct kfd_process *p,
428 struct kfd_event *ev)
430 if (ev && event_can_be_gpu_signaled(ev)) {
431 acknowledge_signal(p, ev);
436 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
437 uint32_t valid_id_bits)
439 struct kfd_event *ev = NULL;
442 * Because we are called from arbitrary context (workqueue) as opposed
443 * to process context, kfd_process could attempt to exit while we are
444 * running so the lookup function returns a locked process.
446 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
449 return; /* Presumably process exited. */
451 mutex_lock(&p->event_mutex);
454 ev = lookup_signaled_event_by_partial_id(p, partial_id,
457 set_event_from_interrupt(p, ev);
458 } else if (p->signal_page) {
460 * Partial ID lookup failed. Assume that the event ID
461 * in the interrupt payload was invalid and do an
462 * exhaustive search of signaled events.
464 uint64_t *slots = page_slots(p->signal_page);
468 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
469 partial_id, valid_id_bits);
471 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
472 /* With relatively few events, it's faster to
473 * iterate over the event IDR
475 idr_for_each_entry(&p->event_idr, ev, id) {
476 if (id >= KFD_SIGNAL_EVENT_LIMIT)
479 if (slots[id] != UNSIGNALED_EVENT_SLOT)
480 set_event_from_interrupt(p, ev);
483 /* With relatively many events, it's faster to
484 * iterate over the signal slots and lookup
485 * only signaled events from the IDR.
487 for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
488 if (slots[id] != UNSIGNALED_EVENT_SLOT) {
489 ev = lookup_event_by_id(p, id);
490 set_event_from_interrupt(p, ev);
495 mutex_unlock(&p->event_mutex);
496 mutex_unlock(&p->mutex);
499 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
501 struct kfd_event_waiter *event_waiters;
504 event_waiters = kmalloc_array(num_events,
505 sizeof(struct kfd_event_waiter),
508 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
509 init_wait(&event_waiters[i].wait);
510 event_waiters[i].activated = false;
513 return event_waiters;
516 static int init_event_waiter_get_status(struct kfd_process *p,
517 struct kfd_event_waiter *waiter,
520 struct kfd_event *ev = lookup_event_by_id(p, event_id);
526 waiter->activated = ev->signaled;
527 ev->signaled = ev->signaled && !ev->auto_reset;
532 static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
534 struct kfd_event *ev = waiter->event;
536 /* Only add to the wait list if we actually need to
537 * wait on this event.
539 if (!waiter->activated)
540 add_wait_queue(&ev->wq, &waiter->wait);
543 /* test_event_condition - Test condition of events being waited for
544 * @all: Return completion only if all events have signaled
545 * @num_events: Number of events to wait for
546 * @event_waiters: Array of event waiters, one per event
548 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
549 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
550 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
551 * the events have been destroyed.
553 static uint32_t test_event_condition(bool all, uint32_t num_events,
554 struct kfd_event_waiter *event_waiters)
557 uint32_t activated_count = 0;
559 for (i = 0; i < num_events; i++) {
560 if (!event_waiters[i].event)
561 return KFD_IOC_WAIT_RESULT_FAIL;
563 if (event_waiters[i].activated) {
565 return KFD_IOC_WAIT_RESULT_COMPLETE;
571 return activated_count == num_events ?
572 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
576 * Copy event specific data, if defined.
577 * Currently only memory exception events have additional data to copy to user
579 static int copy_signaled_event_data(uint32_t num_events,
580 struct kfd_event_waiter *event_waiters,
581 struct kfd_event_data __user *data)
583 struct kfd_hsa_memory_exception_data *src;
584 struct kfd_hsa_memory_exception_data __user *dst;
585 struct kfd_event_waiter *waiter;
586 struct kfd_event *event;
589 for (i = 0; i < num_events; i++) {
590 waiter = &event_waiters[i];
591 event = waiter->event;
592 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
593 dst = &data[i].memory_exception_data;
594 src = &event->memory_exception_data;
595 if (copy_to_user(dst, src,
596 sizeof(struct kfd_hsa_memory_exception_data)))
607 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
609 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
612 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
613 return MAX_SCHEDULE_TIMEOUT;
616 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
617 * but we consider them finite.
618 * This hack is wrong, but nobody is likely to notice.
620 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
622 return msecs_to_jiffies(user_timeout_ms) + 1;
625 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
629 for (i = 0; i < num_events; i++)
630 if (waiters[i].event)
631 remove_wait_queue(&waiters[i].event->wq,
637 int kfd_wait_on_events(struct kfd_process *p,
638 uint32_t num_events, void __user *data,
639 bool all, uint32_t user_timeout_ms,
640 uint32_t *wait_result)
642 struct kfd_event_data __user *events =
643 (struct kfd_event_data __user *) data;
647 struct kfd_event_waiter *event_waiters = NULL;
648 long timeout = user_timeout_to_jiffies(user_timeout_ms);
650 event_waiters = alloc_event_waiters(num_events);
651 if (!event_waiters) {
656 mutex_lock(&p->event_mutex);
658 for (i = 0; i < num_events; i++) {
659 struct kfd_event_data event_data;
661 if (copy_from_user(&event_data, &events[i],
662 sizeof(struct kfd_event_data))) {
667 ret = init_event_waiter_get_status(p, &event_waiters[i],
668 event_data.event_id);
673 /* Check condition once. */
674 *wait_result = test_event_condition(all, num_events, event_waiters);
675 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
676 ret = copy_signaled_event_data(num_events,
677 event_waiters, events);
679 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
680 /* This should not happen. Events shouldn't be
681 * destroyed while we're holding the event_mutex
686 /* Add to wait lists if we need to wait. */
687 for (i = 0; i < num_events; i++)
688 init_event_waiter_add_to_waitlist(&event_waiters[i]);
690 mutex_unlock(&p->event_mutex);
693 if (fatal_signal_pending(current)) {
698 if (signal_pending(current)) {
700 * This is wrong when a nonzero, non-infinite timeout
701 * is specified. We need to use
702 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
703 * contains a union with data for each user and it's
704 * in generic kernel code that I don't want to
711 /* Set task state to interruptible sleep before
712 * checking wake-up conditions. A concurrent wake-up
713 * will put the task back into runnable state. In that
714 * case schedule_timeout will not put the task to
715 * sleep and we'll get a chance to re-check the
716 * updated conditions almost immediately. Otherwise,
717 * this race condition would lead to a soft hang or a
720 set_current_state(TASK_INTERRUPTIBLE);
722 *wait_result = test_event_condition(all, num_events,
724 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
730 timeout = schedule_timeout(timeout);
732 __set_current_state(TASK_RUNNING);
734 /* copy_signaled_event_data may sleep. So this has to happen
735 * after the task state is set back to RUNNING.
737 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
738 ret = copy_signaled_event_data(num_events,
739 event_waiters, events);
741 mutex_lock(&p->event_mutex);
743 free_waiters(num_events, event_waiters);
744 mutex_unlock(&p->event_mutex);
747 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
748 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
754 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
757 struct kfd_signal_page *page;
760 /* check required size doesn't exceed the allocated size */
761 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
762 get_order(vma->vm_end - vma->vm_start)) {
763 pr_err("Event page mmap requested illegal size\n");
767 page = p->signal_page;
769 /* Probably KFD bug, but mmap is user-accessible. */
770 pr_debug("Signal page could not be found\n");
774 pfn = __pa(page->kernel_address);
777 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
778 | VM_DONTDUMP | VM_PFNMAP;
780 pr_debug("Mapping signal page\n");
781 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
782 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
783 pr_debug(" pfn == 0x%016lX\n", pfn);
784 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
785 pr_debug(" size == 0x%08lX\n",
786 vma->vm_end - vma->vm_start);
788 page->user_address = (uint64_t __user *)vma->vm_start;
790 /* mapping the page to user process */
791 ret = remap_pfn_range(vma, vma->vm_start, pfn,
792 vma->vm_end - vma->vm_start, vma->vm_page_prot);
794 p->signal_mapped_size = vma->vm_end - vma->vm_start;
800 * Assumes that p->event_mutex is held and of course
801 * that p is not going away (current or locked).
803 static void lookup_events_by_type_and_signal(struct kfd_process *p,
804 int type, void *event_data)
806 struct kfd_hsa_memory_exception_data *ev_data;
807 struct kfd_event *ev;
809 bool send_signal = true;
811 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
813 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
814 idr_for_each_entry_continue(&p->event_idr, ev, id)
815 if (ev->type == type) {
818 "Event found: id %X type %d",
819 ev->event_id, ev->type);
821 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
822 ev->memory_exception_data = *ev_data;
825 /* Send SIGTERM no event of type "type" has been found*/
829 "Sending SIGTERM to HSA Process with PID %d ",
830 p->lead_thread->pid);
831 send_sig(SIGTERM, p->lead_thread, 0);
834 "HSA Process (PID %d) got unhandled exception",
835 p->lead_thread->pid);
840 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
841 unsigned long address, bool is_write_requested,
842 bool is_execute_requested)
844 struct kfd_hsa_memory_exception_data memory_exception_data;
845 struct vm_area_struct *vma;
848 * Because we are called from arbitrary context (workqueue) as opposed
849 * to process context, kfd_process could attempt to exit while we are
850 * running so the lookup function returns a locked process.
852 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
853 struct mm_struct *mm;
856 return; /* Presumably process exited. */
858 /* Take a safe reference to the mm_struct, which may otherwise
859 * disappear even while the kfd_process is still referenced.
861 mm = get_task_mm(p->lead_thread);
863 mutex_unlock(&p->mutex);
864 return; /* Process is exiting */
867 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
869 down_read(&mm->mmap_sem);
870 vma = find_vma(mm, address);
872 memory_exception_data.gpu_id = dev->id;
873 memory_exception_data.va = address;
874 /* Set failure reason */
875 memory_exception_data.failure.NotPresent = 1;
876 memory_exception_data.failure.NoExecute = 0;
877 memory_exception_data.failure.ReadOnly = 0;
879 if (vma->vm_start > address) {
880 memory_exception_data.failure.NotPresent = 1;
881 memory_exception_data.failure.NoExecute = 0;
882 memory_exception_data.failure.ReadOnly = 0;
884 memory_exception_data.failure.NotPresent = 0;
885 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
886 memory_exception_data.failure.ReadOnly = 1;
888 memory_exception_data.failure.ReadOnly = 0;
889 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
890 memory_exception_data.failure.NoExecute = 1;
892 memory_exception_data.failure.NoExecute = 0;
896 up_read(&mm->mmap_sem);
899 mutex_lock(&p->event_mutex);
901 /* Lookup events by type and signal them */
902 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
903 &memory_exception_data);
905 mutex_unlock(&p->event_mutex);
906 mutex_unlock(&p->mutex);
909 void kfd_signal_hw_exception_event(unsigned int pasid)
912 * Because we are called from arbitrary context (workqueue) as opposed
913 * to process context, kfd_process could attempt to exit while we are
914 * running so the lookup function returns a locked process.
916 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
919 return; /* Presumably process exited. */
921 mutex_lock(&p->event_mutex);
923 /* Lookup events by type and signal them */
924 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
926 mutex_unlock(&p->event_mutex);
927 mutex_unlock(&p->mutex);