Merge tag 'leds-next-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/leds
[linux-block.git] / arch / x86 / mm / kmmio.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8b7d89d0 2/* Support for MMIO probes.
d9f6e12f 3 * Benefit many code from kprobes
8b7d89d0
PP
4 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
5 * 2007 Alexander Eichner
6 * 2008 Pekka Paalanen <pq@iki.fi>
7 */
8
1bd591a5
JP
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
0fd0e3da 11#include <linux/list.h>
668a6c36 12#include <linux/rculist.h>
8b7d89d0
PP
13#include <linux/spinlock.h>
14#include <linux/hash.h>
4b599fed 15#include <linux/export.h>
8b7d89d0 16#include <linux/kernel.h>
8b7d89d0
PP
17#include <linux/uaccess.h>
18#include <linux/ptrace.h>
19#include <linux/preempt.h>
f5136380 20#include <linux/percpu.h>
0fd0e3da 21#include <linux/kdebug.h>
d61fc448 22#include <linux/mutex.h>
970e6fa0 23#include <linux/io.h>
5a0e3ad6 24#include <linux/slab.h>
8b7d89d0 25#include <asm/cacheflush.h>
8b7d89d0 26#include <asm/tlbflush.h>
970e6fa0 27#include <linux/errno.h>
13829537 28#include <asm/debugreg.h>
0fd0e3da 29#include <linux/mmiotrace.h>
8b7d89d0 30
8b7d89d0
PP
31#define KMMIO_PAGE_HASH_BITS 4
32#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
33
0fd0e3da
PP
34struct kmmio_fault_page {
35 struct list_head list;
36 struct kmmio_fault_page *release_next;
cfa52c0c 37 unsigned long addr; /* the requested address */
46e91d00 38 pteval_t old_presence; /* page presence prior to arming */
5359b585 39 bool armed;
0fd0e3da
PP
40
41 /*
42 * Number of times this page has been registered as a part
43 * of a probe. If zero, page is disarmed and this may be freed.
340430c5
PP
44 * Used only by writers (RCU) and post_kmmio_handler().
45 * Protected by kmmio_lock, when linked into kmmio_page_table.
0fd0e3da
PP
46 */
47 int count;
8b8f79b9
MS
48
49 bool scheduled_for_release;
0fd0e3da
PP
50};
51
52struct kmmio_delayed_release {
53 struct rcu_head rcu;
54 struct kmmio_fault_page *release_list;
55};
56
8b7d89d0
PP
57struct kmmio_context {
58 struct kmmio_fault_page *fpage;
59 struct kmmio_probe *probe;
60 unsigned long saved_flags;
0fd0e3da 61 unsigned long addr;
8b7d89d0
PP
62 int active;
63};
64
4994e387
SR
65/*
66 * The kmmio_lock is taken in int3 context, which is treated as NMI context.
67 * This causes lockdep to complain about it bein in both NMI and normal
68 * context. Hide it from lockdep, as it should not have any other locks
69 * taken under it, and this is only enabled for debugging mmio anyway.
70 */
71static arch_spinlock_t kmmio_lock = __ARCH_SPIN_LOCK_UNLOCKED;
8b7d89d0 72
13829537 73/* Protected by kmmio_lock */
8b7d89d0 74unsigned int kmmio_count;
0fd0e3da
PP
75
76/* Read-protected by RCU, write-protected by kmmio_lock. */
8b7d89d0
PP
77static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
78static LIST_HEAD(kmmio_probes);
79
cfa52c0c 80static struct list_head *kmmio_page_list(unsigned long addr)
0fd0e3da 81{
cfa52c0c
KH
82 unsigned int l;
83 pte_t *pte = lookup_address(addr, &l);
84
85 if (!pte)
86 return NULL;
87 addr &= page_level_mask(l);
88
89 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
0fd0e3da
PP
90}
91
f5136380
PP
92/* Accessed per-cpu */
93static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
8b7d89d0 94
8b7d89d0
PP
95/*
96 * this is basically a dynamic stabbing problem:
97 * Could use the existing prio tree code or
98 * Possible better implementations:
99 * The Interval Skip List: A Data Structure for Finding All Intervals That
100 * Overlap a Point (might be simple)
101 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
102 */
0fd0e3da 103/* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
8b7d89d0
PP
104static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
105{
106 struct kmmio_probe *p;
0fd0e3da 107 list_for_each_entry_rcu(p, &kmmio_probes, list) {
33015c85 108 if (addr >= p->addr && addr < (p->addr + p->len))
8b7d89d0
PP
109 return p;
110 }
111 return NULL;
112}
113
0fd0e3da 114/* You must be holding RCU read lock. */
cfa52c0c 115static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
8b7d89d0 116{
0fd0e3da 117 struct list_head *head;
0492e1bb 118 struct kmmio_fault_page *f;
cfa52c0c
KH
119 unsigned int l;
120 pte_t *pte = lookup_address(addr, &l);
8b7d89d0 121
cfa52c0c
KH
122 if (!pte)
123 return NULL;
124 addr &= page_level_mask(l);
125 head = kmmio_page_list(addr);
0492e1bb 126 list_for_each_entry_rcu(f, head, list) {
cfa52c0c 127 if (f->addr == addr)
0492e1bb 128 return f;
8b7d89d0 129 }
8b7d89d0
PP
130 return NULL;
131}
132
46e91d00 133static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
0b700a6a 134{
1063711b 135 pmd_t new_pmd;
0b700a6a 136 pmdval_t v = pmd_val(*pmd);
46e91d00 137 if (clear) {
1063711b 138 *old = v;
86ec2da0 139 new_pmd = pmd_mkinvalid(*pmd);
1063711b
AK
140 } else {
141 /* Presume this has been called with clear==true previously */
142 new_pmd = __pmd(*old);
143 }
144 set_pmd(pmd, new_pmd);
0b700a6a
PP
145}
146
46e91d00 147static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
0b700a6a
PP
148{
149 pteval_t v = pte_val(*pte);
46e91d00 150 if (clear) {
1063711b
AK
151 *old = v;
152 /* Nothing should care about address */
153 pte_clear(&init_mm, 0, pte);
154 } else {
155 /* Presume this has been called with clear==true previously */
156 set_pte_atomic(pte, __pte(*old));
157 }
0b700a6a
PP
158}
159
46e91d00 160static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
8b7d89d0 161{
790e2a29 162 unsigned int level;
cfa52c0c 163 pte_t *pte = lookup_address(f->addr, &level);
8b7d89d0 164
75bb8835 165 if (!pte) {
cfa52c0c 166 pr_err("no pte for addr 0x%08lx\n", f->addr);
e9d54cae 167 return -1;
75bb8835
PP
168 }
169
13829537
PP
170 switch (level) {
171 case PG_LEVEL_2M:
46e91d00 172 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
13829537 173 break;
13829537 174 case PG_LEVEL_4K:
46e91d00 175 clear_pte_presence(pte, clear, &f->old_presence);
13829537 176 break;
13829537 177 default:
1bd591a5 178 pr_err("unexpected page level 0x%x.\n", level);
e9d54cae 179 return -1;
8b7d89d0
PP
180 }
181
58430c5d 182 flush_tlb_one_kernel(f->addr);
e9d54cae 183 return 0;
13829537 184}
75bb8835 185
5359b585
PP
186/*
187 * Mark the given page as not present. Access to it will trigger a fault.
188 *
189 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
190 * protection is ignored here. RCU read lock is assumed held, so the struct
191 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
192 * that double arming the same virtual address (page) cannot occur.
193 *
194 * Double disarming on the other hand is allowed, and may occur when a fault
195 * and mmiotrace shutdown happen simultaneously.
196 */
197static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
13829537 198{
5359b585 199 int ret;
1bd591a5 200 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
5359b585 201 if (f->armed) {
8d3bcc44
KW
202 pr_warn("double-arm: addr 0x%08lx, ref %d, old %d\n",
203 f->addr, f->count, !!f->old_presence);
5359b585 204 }
46e91d00 205 ret = clear_page_presence(f, true);
cfa52c0c
KH
206 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
207 f->addr);
5359b585 208 f->armed = true;
e9d54cae 209 return ret;
8b7d89d0
PP
210}
211
5359b585
PP
212/** Restore the given page to saved presence state. */
213static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
8b7d89d0 214{
46e91d00 215 int ret = clear_page_presence(f, false);
5359b585 216 WARN_ONCE(ret < 0,
cfa52c0c 217 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
5359b585 218 f->armed = false;
8b7d89d0
PP
219}
220
0fd0e3da
PP
221/*
222 * This is being called from do_page_fault().
223 *
224 * We may be in an interrupt or a critical section. Also prefecthing may
225 * trigger a page fault. We may be in the middle of process switch.
226 * We cannot take any locks, because we could be executing especially
227 * within a kmmio critical section.
228 *
229 * Local interrupts are disabled, so preemption cannot happen.
230 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
231 */
8b7d89d0
PP
232/*
233 * Interrupts are disabled on entry as trap3 is an interrupt gate
af901ca1 234 * and they remain disabled throughout this function.
8b7d89d0 235 */
0fd0e3da 236int kmmio_handler(struct pt_regs *regs, unsigned long addr)
8b7d89d0 237{
0fd0e3da
PP
238 struct kmmio_context *ctx;
239 struct kmmio_fault_page *faultpage;
13829537 240 int ret = 0; /* default to fault not handled */
cfa52c0c
KH
241 unsigned long page_base = addr;
242 unsigned int l;
243 pte_t *pte = lookup_address(addr, &l);
244 if (!pte)
245 return -EINVAL;
246 page_base &= page_level_mask(l);
8b7d89d0
PP
247
248 /*
3e127583
SRG
249 * Hold the RCU read lock over single stepping to avoid looking
250 * up the probe and kmmio_fault_page again. The rcu_read_lock_sched()
251 * also disables preemption and prevents process switch during
252 * the single stepping. We can only handle one active kmmio trace
8b7d89d0 253 * per cpu, so ensure that we finish it before something else
3e127583 254 * gets to run.
8b7d89d0 255 */
20fb6c99 256 rcu_read_lock_sched_notrace();
d61fc448 257
cfa52c0c 258 faultpage = get_kmmio_fault_page(page_base);
0fd0e3da
PP
259 if (!faultpage) {
260 /*
261 * Either this page fault is not caused by kmmio, or
262 * another CPU just pulled the kmmio probe from under
13829537 263 * our feet. The latter case should not be possible.
0fd0e3da
PP
264 */
265 goto no_kmmio;
266 }
267
6a9feaa8 268 ctx = this_cpu_ptr(&kmmio_ctx);
8b7d89d0 269 if (ctx->active) {
cfa52c0c 270 if (page_base == ctx->addr) {
13829537 271 /*
3e39aa15
SB
272 * A second fault on the same page means some other
273 * condition needs handling by do_page_fault(), the
274 * page really not being present is the most common.
13829537 275 */
1bd591a5
JP
276 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
277 addr, smp_processor_id());
3e39aa15
SB
278
279 if (!faultpage->old_presence)
1bd591a5
JP
280 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
281 addr, smp_processor_id());
3e39aa15
SB
282 } else {
283 /*
284 * Prevent overwriting already in-flight context.
285 * This should not happen, let's hope disarming at
286 * least prevents a panic.
287 */
1bd591a5
JP
288 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
289 smp_processor_id(), addr);
290 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
3e39aa15
SB
291 disarm_kmmio_fault_page(faultpage);
292 }
6a9feaa8 293 goto no_kmmio;
8b7d89d0
PP
294 }
295 ctx->active++;
296
0fd0e3da 297 ctx->fpage = faultpage;
cfa52c0c 298 ctx->probe = get_kmmio_probe(page_base);
49023168 299 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
cfa52c0c 300 ctx->addr = page_base;
8b7d89d0
PP
301
302 if (ctx->probe && ctx->probe->pre_handler)
303 ctx->probe->pre_handler(ctx->probe, regs, addr);
304
d61fc448
PP
305 /*
306 * Enable single-stepping and disable interrupts for the faulting
307 * context. Local interrupts must not get enabled during stepping.
308 */
49023168
IM
309 regs->flags |= X86_EFLAGS_TF;
310 regs->flags &= ~X86_EFLAGS_IF;
8b7d89d0 311
0fd0e3da 312 /* Now we set present bit in PTE and single step. */
5359b585 313 disarm_kmmio_fault_page(ctx->fpage);
8b7d89d0 314
d61fc448
PP
315 /*
316 * If another cpu accesses the same page while we are stepping,
317 * the access will not be caught. It will simply succeed and the
318 * only downside is we lose the event. If this becomes a problem,
319 * the user should drop to single cpu before tracing.
320 */
321
13829537 322 return 1; /* fault handled */
8b7d89d0 323
8b7d89d0 324no_kmmio:
20fb6c99 325 rcu_read_unlock_sched_notrace();
13829537 326 return ret;
8b7d89d0
PP
327}
328
329/*
330 * Interrupts are disabled on entry as trap1 is an interrupt gate
af901ca1 331 * and they remain disabled throughout this function.
0fd0e3da 332 * This must always get called as the pair to kmmio_handler().
8b7d89d0
PP
333 */
334static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
335{
f5136380 336 int ret = 0;
6a9feaa8 337 struct kmmio_context *ctx = this_cpu_ptr(&kmmio_ctx);
8b7d89d0 338
13829537 339 if (!ctx->active) {
0f9a623d
SB
340 /*
341 * debug traps without an active context are due to either
342 * something external causing them (f.e. using a debugger while
343 * mmio tracing enabled), or erroneous behaviour
344 */
8d3bcc44 345 pr_warn("unexpected debug trap on CPU %d.\n", smp_processor_id());
f5136380 346 goto out;
13829537 347 }
8b7d89d0
PP
348
349 if (ctx->probe && ctx->probe->post_handler)
350 ctx->probe->post_handler(ctx->probe, condition, regs);
351
340430c5 352 /* Prevent racing against release_kmmio_fault_page(). */
4994e387 353 arch_spin_lock(&kmmio_lock);
340430c5
PP
354 if (ctx->fpage->count)
355 arm_kmmio_fault_page(ctx->fpage);
4994e387 356 arch_spin_unlock(&kmmio_lock);
8b7d89d0 357
49023168 358 regs->flags &= ~X86_EFLAGS_TF;
8b7d89d0
PP
359 regs->flags |= ctx->saved_flags;
360
361 /* These were acquired in kmmio_handler(). */
362 ctx->active--;
0fd0e3da 363 BUG_ON(ctx->active);
20fb6c99 364 rcu_read_unlock_sched_notrace();
8b7d89d0
PP
365
366 /*
367 * if somebody else is singlestepping across a probe point, flags
368 * will have TF set, in which case, continue the remaining processing
369 * of do_debug, as if this is not a probe hit.
370 */
49023168 371 if (!(regs->flags & X86_EFLAGS_TF))
f5136380 372 ret = 1;
f5136380 373out:
f5136380 374 return ret;
8b7d89d0
PP
375}
376
0fd0e3da 377/* You must be holding kmmio_lock. */
cfa52c0c 378static int add_kmmio_fault_page(unsigned long addr)
8b7d89d0
PP
379{
380 struct kmmio_fault_page *f;
381
cfa52c0c 382 f = get_kmmio_fault_page(addr);
8b7d89d0 383 if (f) {
0fd0e3da 384 if (!f->count)
5359b585 385 arm_kmmio_fault_page(f);
8b7d89d0
PP
386 f->count++;
387 return 0;
388 }
389
5359b585 390 f = kzalloc(sizeof(*f), GFP_ATOMIC);
8b7d89d0
PP
391 if (!f)
392 return -1;
393
394 f->count = 1;
cfa52c0c 395 f->addr = addr;
8b7d89d0 396
5359b585 397 if (arm_kmmio_fault_page(f)) {
e9d54cae
SB
398 kfree(f);
399 return -1;
400 }
401
cfa52c0c 402 list_add_rcu(&f->list, kmmio_page_list(f->addr));
8b7d89d0
PP
403
404 return 0;
405}
406
0fd0e3da 407/* You must be holding kmmio_lock. */
cfa52c0c 408static void release_kmmio_fault_page(unsigned long addr,
0fd0e3da 409 struct kmmio_fault_page **release_list)
8b7d89d0
PP
410{
411 struct kmmio_fault_page *f;
412
cfa52c0c 413 f = get_kmmio_fault_page(addr);
8b7d89d0
PP
414 if (!f)
415 return;
416
417 f->count--;
0fd0e3da 418 BUG_ON(f->count < 0);
8b7d89d0 419 if (!f->count) {
5359b585 420 disarm_kmmio_fault_page(f);
8b8f79b9
MS
421 if (!f->scheduled_for_release) {
422 f->release_next = *release_list;
423 *release_list = f;
424 f->scheduled_for_release = true;
425 }
8b7d89d0
PP
426 }
427}
428
87e547fe
PP
429/*
430 * With page-unaligned ioremaps, one or two armed pages may contain
431 * addresses from outside the intended mapping. Events for these addresses
432 * are currently silently dropped. The events may result only from programming
433 * mistakes by accessing addresses before the beginning or past the end of a
434 * mapping.
435 */
8b7d89d0
PP
436int register_kmmio_probe(struct kmmio_probe *p)
437{
d61fc448 438 unsigned long flags;
8b7d89d0
PP
439 int ret = 0;
440 unsigned long size = 0;
6d60ce38 441 unsigned long addr = p->addr & PAGE_MASK;
87e547fe 442 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
cfa52c0c
KH
443 unsigned int l;
444 pte_t *pte;
8b7d89d0 445
4994e387
SR
446 local_irq_save(flags);
447 arch_spin_lock(&kmmio_lock);
6d60ce38 448 if (get_kmmio_probe(addr)) {
8b7d89d0
PP
449 ret = -EEXIST;
450 goto out;
451 }
cfa52c0c 452
6d60ce38 453 pte = lookup_address(addr, &l);
cfa52c0c
KH
454 if (!pte) {
455 ret = -EINVAL;
456 goto out;
457 }
458
d61fc448 459 kmmio_count++;
0fd0e3da 460 list_add_rcu(&p->list, &kmmio_probes);
87e547fe 461 while (size < size_lim) {
6d60ce38 462 if (add_kmmio_fault_page(addr + size))
1bd591a5 463 pr_err("Unable to set page fault.\n");
cfa52c0c 464 size += page_level_size(l);
8b7d89d0 465 }
8b7d89d0 466out:
4994e387
SR
467 arch_spin_unlock(&kmmio_lock);
468 local_irq_restore(flags);
469
8b7d89d0
PP
470 /*
471 * XXX: What should I do here?
472 * Here was a call to global_flush_tlb(), but it does not exist
0fd0e3da 473 * anymore. It seems it's not needed after all.
8b7d89d0
PP
474 */
475 return ret;
476}
0fd0e3da 477EXPORT_SYMBOL(register_kmmio_probe);
8b7d89d0 478
0fd0e3da
PP
479static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
480{
481 struct kmmio_delayed_release *dr = container_of(
482 head,
483 struct kmmio_delayed_release,
484 rcu);
0492e1bb
SB
485 struct kmmio_fault_page *f = dr->release_list;
486 while (f) {
487 struct kmmio_fault_page *next = f->release_next;
488 BUG_ON(f->count);
489 kfree(f);
490 f = next;
0fd0e3da
PP
491 }
492 kfree(dr);
493}
494
495static void remove_kmmio_fault_pages(struct rcu_head *head)
496{
d0fc63f7
SB
497 struct kmmio_delayed_release *dr =
498 container_of(head, struct kmmio_delayed_release, rcu);
0492e1bb 499 struct kmmio_fault_page *f = dr->release_list;
0fd0e3da
PP
500 struct kmmio_fault_page **prevp = &dr->release_list;
501 unsigned long flags;
d0fc63f7 502
4994e387
SR
503 local_irq_save(flags);
504 arch_spin_lock(&kmmio_lock);
0492e1bb
SB
505 while (f) {
506 if (!f->count) {
507 list_del_rcu(&f->list);
508 prevp = &f->release_next;
d0fc63f7 509 } else {
0492e1bb 510 *prevp = f->release_next;
8b8f79b9
MS
511 f->release_next = NULL;
512 f->scheduled_for_release = false;
d0fc63f7 513 }
8b8f79b9 514 f = *prevp;
0fd0e3da 515 }
4994e387
SR
516 arch_spin_unlock(&kmmio_lock);
517 local_irq_restore(flags);
d0fc63f7 518
0fd0e3da
PP
519 /* This is the real RCU destroy call. */
520 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
521}
522
523/*
524 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
d61fc448
PP
525 * sure that the callbacks will not be called anymore. Only after that
526 * you may actually release your struct kmmio_probe.
0fd0e3da
PP
527 *
528 * Unregistering a kmmio fault page has three steps:
529 * 1. release_kmmio_fault_page()
530 * Disarm the page, wait a grace period to let all faults finish.
531 * 2. remove_kmmio_fault_pages()
532 * Remove the pages from kmmio_page_table.
533 * 3. rcu_free_kmmio_fault_pages()
8055039c 534 * Actually free the kmmio_fault_page structs as with RCU.
0fd0e3da 535 */
8b7d89d0
PP
536void unregister_kmmio_probe(struct kmmio_probe *p)
537{
d61fc448 538 unsigned long flags;
8b7d89d0 539 unsigned long size = 0;
6d60ce38 540 unsigned long addr = p->addr & PAGE_MASK;
87e547fe 541 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
0fd0e3da
PP
542 struct kmmio_fault_page *release_list = NULL;
543 struct kmmio_delayed_release *drelease;
cfa52c0c
KH
544 unsigned int l;
545 pte_t *pte;
546
6d60ce38 547 pte = lookup_address(addr, &l);
cfa52c0c
KH
548 if (!pte)
549 return;
8b7d89d0 550
4994e387
SR
551 local_irq_save(flags);
552 arch_spin_lock(&kmmio_lock);
87e547fe 553 while (size < size_lim) {
6d60ce38 554 release_kmmio_fault_page(addr + size, &release_list);
cfa52c0c 555 size += page_level_size(l);
8b7d89d0 556 }
0fd0e3da 557 list_del_rcu(&p->list);
8b7d89d0 558 kmmio_count--;
4994e387
SR
559 arch_spin_unlock(&kmmio_lock);
560 local_irq_restore(flags);
8b7d89d0 561
8b8f79b9
MS
562 if (!release_list)
563 return;
564
0fd0e3da
PP
565 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
566 if (!drelease) {
1bd591a5 567 pr_crit("leaking kmmio_fault_page objects.\n");
0fd0e3da
PP
568 return;
569 }
570 drelease->release_list = release_list;
571
572 /*
573 * This is not really RCU here. We have just disarmed a set of
574 * pages so that they cannot trigger page faults anymore. However,
575 * we cannot remove the pages from kmmio_page_table,
576 * because a probe hit might be in flight on another CPU. The
577 * pages are collected into a list, and they will be removed from
578 * kmmio_page_table when it is certain that no probe hit related to
579 * these pages can be in flight. RCU grace period sounds like a
580 * good choice.
581 *
582 * If we removed the pages too early, kmmio page fault handler might
583 * not find the respective kmmio_fault_page and determine it's not
584 * a kmmio fault, when it actually is. This would lead to madness.
585 */
586 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
8b7d89d0 587}
0fd0e3da 588EXPORT_SYMBOL(unregister_kmmio_probe);
8b7d89d0 589
0f9a623d
SB
590static int
591kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
8b7d89d0
PP
592{
593 struct die_args *arg = args;
0bb7a95f 594 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
8b7d89d0 595
0bb7a95f
LB
596 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
597 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
62edab90
P
598 /*
599 * Reset the BS bit in dr6 (pointed by args->err) to
600 * denote completion of processing
601 */
0bb7a95f 602 *dr6_p &= ~DR_STEP;
8b7d89d0 603 return NOTIFY_STOP;
62edab90 604 }
8b7d89d0
PP
605
606 return NOTIFY_DONE;
607}
13829537
PP
608
609static struct notifier_block nb_die = {
610 .notifier_call = kmmio_die_notifier
611};
612
0f9a623d 613int kmmio_init(void)
13829537
PP
614{
615 int i;
0f9a623d 616
13829537
PP
617 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
618 INIT_LIST_HEAD(&kmmio_page_table[i]);
0f9a623d 619
13829537
PP
620 return register_die_notifier(&nb_die);
621}
0f9a623d
SB
622
623void kmmio_cleanup(void)
624{
625 int i;
626
627 unregister_die_notifier(&nb_die);
628 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
629 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
630 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
631 }
632}