2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Heterogeneous Memory Management (HMM)
19 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
20 * is for. Here we focus on the HMM API description, with some explanation of
21 * the underlying implementation.
23 * Short description: HMM provides a set of helpers to share a virtual address
24 * space between CPU and a device, so that the device can access any valid
25 * address of the process (while still obeying memory protection). HMM also
26 * provides helpers to migrate process memory to device memory, and back. Each
27 * set of functionality (address space mirroring, and migration to and from
28 * device memory) can be used independently of the other.
31 * HMM address space mirroring API:
33 * Use HMM address space mirroring if you want to mirror range of the CPU page
34 * table of a process into a device page table. Here, "mirror" means "keep
35 * synchronized". Prerequisites: the device must provide the ability to write-
36 * protect its page tables (at PAGE_SIZE granularity), and must be able to
37 * recover from the resulting potential page faults.
39 * HMM guarantees that at any point in time, a given virtual address points to
40 * either the same memory in both CPU and device page tables (that is: CPU and
41 * device page tables each point to the same pages), or that one page table (CPU
42 * or device) points to no entry, while the other still points to the old page
43 * for the address. The latter case happens when the CPU page table update
44 * happens first, and then the update is mirrored over to the device page table.
45 * This does not cause any issue, because the CPU page table cannot start
46 * pointing to a new page until the device page table is invalidated.
48 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
49 * updates to each device driver that has registered a mirror. It also provides
50 * some API calls to help with taking a snapshot of the CPU page table, and to
51 * synchronize with any updates that might happen concurrently.
54 * HMM migration to and from device memory:
56 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
57 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
58 * of the device memory, and allows the device driver to manage its memory
59 * using those struct pages. Having struct pages for device memory makes
60 * migration easier. Because that memory is not addressable by the CPU it must
61 * never be pinned to the device; in other words, any CPU page fault can always
62 * cause the device memory to be migrated (copied/moved) back to regular memory.
64 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
65 * allows use of a device DMA engine to perform the copy operation between
66 * regular system memory and device memory.
71 #include <linux/kconfig.h>
72 #include <asm/pgtable.h>
74 #if IS_ENABLED(CONFIG_HMM)
76 #include <linux/device.h>
77 #include <linux/migrate.h>
78 #include <linux/memremap.h>
79 #include <linux/completion.h>
80 #include <linux/mmu_notifier.h>
84 * struct hmm - HMM per mm struct
86 * @mm: mm struct this HMM struct is bound to
87 * @lock: lock protecting ranges list
88 * @ranges: list of range being snapshotted
89 * @mirrors: list of mirrors for this mm
90 * @mmu_notifier: mmu notifier to track updates to CPU page table
91 * @mirrors_sem: read/write semaphore protecting the mirrors list
92 * @wq: wait queue for user waiting on a range invalidation
93 * @notifiers: count of active mmu notifiers
94 * @dead: is the mm dead ?
100 struct list_head ranges;
101 struct list_head mirrors;
102 struct mmu_notifier mmu_notifier;
103 struct rw_semaphore mirrors_sem;
104 wait_queue_head_t wq;
110 * hmm_pfn_flag_e - HMM flag enums
113 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
114 * HMM_PFN_WRITE: CPU page table has write permission set
115 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
117 * The driver provide a flags array, if driver valid bit for an entry is bit
118 * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
119 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
120 * Same logic apply to all flags. This is same idea as vm_page_prot in vma
121 * except that this is per device driver rather than per architecture.
123 enum hmm_pfn_flag_e {
126 HMM_PFN_DEVICE_PRIVATE,
131 * hmm_pfn_value_e - HMM pfn special value
134 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
135 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
136 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
137 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
138 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
139 * set and the pfn value is undefined.
141 * Driver provide entry value for none entry, error entry and special entry,
142 * driver can alias (ie use same value for error and special for instance). It
143 * should not alias none and error or special.
145 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
146 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
147 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
148 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
150 enum hmm_pfn_value_e {
158 * struct hmm_range - track invalidation lock on virtual address range
160 * @hmm: the core HMM structure this range is active against
161 * @vma: the vm area struct for the range
162 * @list: all range lock are on a list
163 * @start: range virtual start address (inclusive)
164 * @end: range virtual end address (exclusive)
165 * @pfns: array of pfns (big enough for the range)
166 * @flags: pfn flags to match device driver page table
167 * @values: pfn value for some special case (none, special, error, ...)
168 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
169 * @valid: pfns array did not change since it has been fill by an HMM function
173 struct vm_area_struct *vma;
174 struct list_head list;
178 const uint64_t *flags;
179 const uint64_t *values;
185 * hmm_range_wait_until_valid() - wait for range to be valid
186 * @range: range affected by invalidation to wait on
187 * @timeout: time out for wait in ms (ie abort wait after that period of time)
188 * Returns: true if the range is valid, false otherwise.
190 static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
191 unsigned long timeout)
193 /* Check if mm is dead ? */
194 if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) {
195 range->valid = false;
200 wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead,
201 msecs_to_jiffies(timeout));
202 /* Return current valid status just in case we get lucky */
207 * hmm_range_valid() - test if a range is valid or not
209 * Returns: true if the range is valid, false otherwise.
211 static inline bool hmm_range_valid(struct hmm_range *range)
217 * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
218 * @range: range use to decode HMM pfn value
219 * @pfn: HMM pfn value to get corresponding struct page from
220 * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
222 * If the HMM pfn is valid (ie valid flag set) then return the struct page
223 * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
225 static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
228 if (pfn == range->values[HMM_PFN_NONE])
230 if (pfn == range->values[HMM_PFN_ERROR])
232 if (pfn == range->values[HMM_PFN_SPECIAL])
234 if (!(pfn & range->flags[HMM_PFN_VALID]))
236 return pfn_to_page(pfn >> range->pfn_shift);
240 * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
241 * @range: range use to decode HMM pfn value
242 * @pfn: HMM pfn value to extract pfn from
243 * Returns: pfn value if HMM pfn is valid, -1UL otherwise
245 static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
248 if (pfn == range->values[HMM_PFN_NONE])
250 if (pfn == range->values[HMM_PFN_ERROR])
252 if (pfn == range->values[HMM_PFN_SPECIAL])
254 if (!(pfn & range->flags[HMM_PFN_VALID]))
256 return (pfn >> range->pfn_shift);
260 * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
261 * @range: range use to encode HMM pfn value
262 * @page: struct page pointer for which to create the HMM pfn
263 * Returns: valid HMM pfn for the page
265 static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
268 return (page_to_pfn(page) << range->pfn_shift) |
269 range->flags[HMM_PFN_VALID];
273 * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
274 * @range: range use to encode HMM pfn value
275 * @pfn: pfn value for which to create the HMM pfn
276 * Returns: valid HMM pfn for the pfn
278 static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
281 return (pfn << range->pfn_shift) |
282 range->flags[HMM_PFN_VALID];
286 #if IS_ENABLED(CONFIG_HMM_MIRROR)
288 * Mirroring: how to synchronize device page table with CPU page table.
290 * A device driver that is participating in HMM mirroring must always
291 * synchronize with CPU page table updates. For this, device drivers can either
292 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
293 * drivers can decide to register one mirror per device per process, or just
294 * one mirror per process for a group of devices. The pattern is:
296 * int device_bind_address_space(..., struct mm_struct *mm, ...)
298 * struct device_address_space *das;
300 * // Device driver specific initialization, and allocation of das
301 * // which contains an hmm_mirror struct as one of its fields.
304 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
306 * // Cleanup on error
310 * // Other device driver specific initialization
314 * Once an hmm_mirror is registered for an address space, the device driver
315 * will get callbacks through sync_cpu_device_pagetables() operation (see
316 * hmm_mirror_ops struct).
318 * Device driver must not free the struct containing the hmm_mirror struct
319 * before calling hmm_mirror_unregister(). The expected usage is to do that when
320 * the device driver is unbinding from an address space.
323 * void device_unbind_address_space(struct device_address_space *das)
325 * // Device driver specific cleanup
328 * hmm_mirror_unregister(&das->mirror);
330 * // Other device driver specific cleanup, and now das can be freed
338 * enum hmm_update_event - type of update
339 * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
341 enum hmm_update_event {
342 HMM_UPDATE_INVALIDATE,
346 * struct hmm_update - HMM update informations for callback
348 * @start: virtual start address of the range to update
349 * @end: virtual end address of the range to update
350 * @event: event triggering the update (what is happening)
351 * @blockable: can the callback block/sleep ?
356 enum hmm_update_event event;
361 * struct hmm_mirror_ops - HMM mirror device operations callback
363 * @update: callback to update range on a device
365 struct hmm_mirror_ops {
366 /* release() - release hmm_mirror
368 * @mirror: pointer to struct hmm_mirror
370 * This is called when the mm_struct is being released.
371 * The callback should make sure no references to the mirror occur
372 * after the callback returns.
374 void (*release)(struct hmm_mirror *mirror);
376 /* sync_cpu_device_pagetables() - synchronize page tables
378 * @mirror: pointer to struct hmm_mirror
379 * @update: update informations (see struct hmm_update)
380 * Returns: -EAGAIN if update.blockable false and callback need to
381 * block, 0 otherwise.
383 * This callback ultimately originates from mmu_notifiers when the CPU
384 * page table is updated. The device driver must update its page table
385 * in response to this callback. The update argument tells what action
388 * The device driver must not return from this callback until the device
389 * page tables are completely updated (TLBs flushed, etc); this is a
392 int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
393 const struct hmm_update *update);
397 * struct hmm_mirror - mirror struct for a device driver
399 * @hmm: pointer to struct hmm (which is unique per mm_struct)
400 * @ops: device driver callback for HMM mirror operations
401 * @list: for list of mirrors of a given mm
403 * Each address space (mm_struct) being mirrored by a device must register one
404 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
405 * mirrors for each mm_struct.
409 const struct hmm_mirror_ops *ops;
410 struct list_head list;
413 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
414 void hmm_mirror_unregister(struct hmm_mirror *mirror);
418 * Please see Documentation/vm/hmm.rst for how to use the range API.
420 int hmm_range_register(struct hmm_range *range,
421 struct mm_struct *mm,
424 void hmm_range_unregister(struct hmm_range *range);
425 long hmm_range_snapshot(struct hmm_range *range);
426 long hmm_range_fault(struct hmm_range *range, bool block);
429 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
431 * When waiting for mmu notifiers we need some kind of time out otherwise we
432 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
435 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
437 /* This is a temporary helper to avoid merge conflict between trees. */
438 static inline bool hmm_vma_range_done(struct hmm_range *range)
440 bool ret = hmm_range_valid(range);
442 hmm_range_unregister(range);
446 /* This is a temporary helper to avoid merge conflict between trees. */
447 static inline int hmm_vma_fault(struct hmm_range *range, bool block)
451 ret = hmm_range_register(range, range->vma->vm_mm,
452 range->start, range->end);
456 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
458 * The mmap_sem was taken by driver we release it here and
459 * returns -EAGAIN which correspond to mmap_sem have been
460 * drop in the old API.
462 up_read(&range->vma->vm_mm->mmap_sem);
466 ret = hmm_range_fault(range, block);
468 if (ret == -EBUSY || !ret) {
469 /* Same as above drop mmap_sem to match old API. */
470 up_read(&range->vma->vm_mm->mmap_sem);
472 } else if (ret == -EAGAIN)
474 hmm_range_unregister(range);
480 /* Below are for HMM internal use only! Not to be used by device driver! */
481 void hmm_mm_destroy(struct mm_struct *mm);
483 static inline void hmm_mm_init(struct mm_struct *mm)
487 #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
488 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
489 static inline void hmm_mm_init(struct mm_struct *mm) {}
490 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
492 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
495 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
499 * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
501 * @free: call when refcount on page reach 1 and thus is no longer use
502 * @fault: call when there is a page fault to unaddressable memory
504 * Both callback happens from page_free() and page_fault() callback of struct
505 * dev_pagemap respectively. See include/linux/memremap.h for more details on
508 * The hmm_devmem_ops callback are just here to provide a coherent and
509 * uniq API to device driver and device driver should not register their
510 * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
513 struct hmm_devmem_ops {
515 * free() - free a device page
516 * @devmem: device memory structure (see struct hmm_devmem)
517 * @page: pointer to struct page being freed
519 * Call back occurs whenever a device page refcount reach 1 which
520 * means that no one is holding any reference on the page anymore
521 * (ZONE_DEVICE page have an elevated refcount of 1 as default so
522 * that they are not release to the general page allocator).
524 * Note that callback has exclusive ownership of the page (as no
525 * one is holding any reference).
527 void (*free)(struct hmm_devmem *devmem, struct page *page);
529 * fault() - CPU page fault or get user page (GUP)
530 * @devmem: device memory structure (see struct hmm_devmem)
531 * @vma: virtual memory area containing the virtual address
532 * @addr: virtual address that faulted or for which there is a GUP
533 * @page: pointer to struct page backing virtual address (unreliable)
534 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
535 * @pmdp: page middle directory
536 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
539 * The callback occurs whenever there is a CPU page fault or GUP on a
540 * virtual address. This means that the device driver must migrate the
541 * page back to regular memory (CPU accessible).
543 * The device driver is free to migrate more than one page from the
544 * fault() callback as an optimization. However if device decide to
545 * migrate more than one page it must always priotirize the faulting
546 * address over the others.
548 * The struct page pointer is only given as an hint to allow quick
549 * lookup of internal device driver data. A concurrent migration
550 * might have already free that page and the virtual address might
551 * not longer be back by it. So it should not be modified by the
554 * Note that mmap semaphore is held in read mode at least when this
555 * callback occurs, hence the vma is valid upon callback entry.
557 vm_fault_t (*fault)(struct hmm_devmem *devmem,
558 struct vm_area_struct *vma,
560 const struct page *page,
566 * struct hmm_devmem - track device memory
568 * @completion: completion object for device memory
569 * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
570 * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
571 * @resource: IO resource reserved for this chunk of memory
572 * @pagemap: device page map for that chunk
573 * @device: device to bind resource to
574 * @ops: memory operations callback
575 * @ref: per CPU refcount
576 * @page_fault: callback when CPU fault on an unaddressable device page
578 * This an helper structure for device drivers that do not wish to implement
579 * the gory details related to hotplugging new memoy and allocating struct
582 * Device drivers can directly use ZONE_DEVICE memory on their own if they
585 * The page_fault() callback must migrate page back, from device memory to
586 * system memory, so that the CPU can access it. This might fail for various
587 * reasons (device issues, device have been unplugged, ...). When such error
588 * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
589 * set the CPU page table entry to "poisoned".
591 * Note that because memory cgroup charges are transferred to the device memory,
592 * this should never fail due to memory restrictions. However, allocation
593 * of a regular system page might still fail because we are out of memory. If
594 * that happens, the page_fault() callback must return VM_FAULT_OOM.
596 * The page_fault() callback can also try to migrate back multiple pages in one
597 * chunk, as an optimization. It must, however, prioritize the faulting address
598 * over all the others.
600 typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
602 const struct page *page,
607 struct completion completion;
608 unsigned long pfn_first;
609 unsigned long pfn_last;
610 struct resource *resource;
611 struct device *device;
612 struct dev_pagemap pagemap;
613 const struct hmm_devmem_ops *ops;
614 struct percpu_ref ref;
615 dev_page_fault_t page_fault;
619 * To add (hotplug) device memory, HMM assumes that there is no real resource
620 * that reserves a range in the physical address space (this is intended to be
621 * use by unaddressable device memory). It will reserve a physical range big
622 * enough and allocate struct page for it.
624 * The device driver can wrap the hmm_devmem struct inside a private device
627 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
628 struct device *device,
630 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
631 struct device *device,
632 struct resource *res);
635 * hmm_devmem_page_set_drvdata - set per-page driver data field
637 * @page: pointer to struct page
638 * @data: driver data value to set
640 * Because page can not be on lru we have an unsigned long that driver can use
641 * to store a per page field. This just a simple helper to do that.
643 static inline void hmm_devmem_page_set_drvdata(struct page *page,
646 page->hmm_data = data;
650 * hmm_devmem_page_get_drvdata - get per page driver data field
652 * @page: pointer to struct page
653 * Return: driver data value
655 static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
657 return page->hmm_data;
662 * struct hmm_device - fake device to hang device memory onto
664 * @device: device struct
665 * @minor: device minor number
668 struct device device;
673 * A device driver that wants to handle multiple devices memory through a
674 * single fake device can use hmm_device to do so. This is purely a helper and
675 * it is not strictly needed, in order to make use of any HMM functionality.
677 struct hmm_device *hmm_device_new(void *drvdata);
678 void hmm_device_put(struct hmm_device *hmm_device);
679 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
680 #else /* IS_ENABLED(CONFIG_HMM) */
681 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
682 static inline void hmm_mm_init(struct mm_struct *mm) {}
683 #endif /* IS_ENABLED(CONFIG_HMM) */
685 #endif /* LINUX_HMM_H */