treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[linux-block.git] / include / linux / hmm.h
CommitLineData
133ff0ea
JG
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
f813f219 14 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
15 */
16/*
17 * Heterogeneous Memory Management (HMM)
18 *
ad56b738 19 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
133ff0ea
JG
20 * is for. Here we focus on the HMM API description, with some explanation of
21 * the underlying implementation.
22 *
23 * Short description: HMM provides a set of helpers to share a virtual address
24 * space between CPU and a device, so that the device can access any valid
25 * address of the process (while still obeying memory protection). HMM also
26 * provides helpers to migrate process memory to device memory, and back. Each
27 * set of functionality (address space mirroring, and migration to and from
28 * device memory) can be used independently of the other.
29 *
30 *
31 * HMM address space mirroring API:
32 *
33 * Use HMM address space mirroring if you want to mirror range of the CPU page
34 * table of a process into a device page table. Here, "mirror" means "keep
35 * synchronized". Prerequisites: the device must provide the ability to write-
36 * protect its page tables (at PAGE_SIZE granularity), and must be able to
37 * recover from the resulting potential page faults.
38 *
39 * HMM guarantees that at any point in time, a given virtual address points to
40 * either the same memory in both CPU and device page tables (that is: CPU and
41 * device page tables each point to the same pages), or that one page table (CPU
42 * or device) points to no entry, while the other still points to the old page
43 * for the address. The latter case happens when the CPU page table update
44 * happens first, and then the update is mirrored over to the device page table.
45 * This does not cause any issue, because the CPU page table cannot start
46 * pointing to a new page until the device page table is invalidated.
47 *
48 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
49 * updates to each device driver that has registered a mirror. It also provides
50 * some API calls to help with taking a snapshot of the CPU page table, and to
51 * synchronize with any updates that might happen concurrently.
52 *
53 *
54 * HMM migration to and from device memory:
55 *
56 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
57 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
58 * of the device memory, and allows the device driver to manage its memory
59 * using those struct pages. Having struct pages for device memory makes
60 * migration easier. Because that memory is not addressable by the CPU it must
61 * never be pinned to the device; in other words, any CPU page fault can always
62 * cause the device memory to be migrated (copied/moved) back to regular memory.
63 *
64 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
65 * allows use of a device DMA engine to perform the copy operation between
66 * regular system memory and device memory.
67 */
68#ifndef LINUX_HMM_H
69#define LINUX_HMM_H
70
71#include <linux/kconfig.h>
063a7d1d 72#include <asm/pgtable.h>
133ff0ea
JG
73
74#if IS_ENABLED(CONFIG_HMM)
75
858b54da 76#include <linux/device.h>
4ef589dc
JG
77#include <linux/migrate.h>
78#include <linux/memremap.h>
79#include <linux/completion.h>
a3e0d41c 80#include <linux/mmu_notifier.h>
4ef589dc 81
a3e0d41c
JG
82
83/*
84 * struct hmm - HMM per mm struct
85 *
86 * @mm: mm struct this HMM struct is bound to
87 * @lock: lock protecting ranges list
88 * @ranges: list of range being snapshotted
89 * @mirrors: list of mirrors for this mm
90 * @mmu_notifier: mmu notifier to track updates to CPU page table
91 * @mirrors_sem: read/write semaphore protecting the mirrors list
92 * @wq: wait queue for user waiting on a range invalidation
93 * @notifiers: count of active mmu notifiers
94 * @dead: is the mm dead ?
95 */
96struct hmm {
97 struct mm_struct *mm;
98 struct kref kref;
99 struct mutex lock;
100 struct list_head ranges;
101 struct list_head mirrors;
102 struct mmu_notifier mmu_notifier;
103 struct rw_semaphore mirrors_sem;
104 wait_queue_head_t wq;
105 long notifiers;
106 bool dead;
107};
133ff0ea
JG
108
109/*
f88a1e90
JG
110 * hmm_pfn_flag_e - HMM flag enums
111 *
133ff0ea 112 * Flags:
86586a41 113 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
133ff0ea 114 * HMM_PFN_WRITE: CPU page table has write permission set
f88a1e90
JG
115 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
116 *
117 * The driver provide a flags array, if driver valid bit for an entry is bit
118 * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
119 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
120 * Same logic apply to all flags. This is same idea as vm_page_prot in vma
121 * except that this is per device driver rather than per architecture.
122 */
123enum hmm_pfn_flag_e {
124 HMM_PFN_VALID = 0,
125 HMM_PFN_WRITE,
126 HMM_PFN_DEVICE_PRIVATE,
127 HMM_PFN_FLAG_MAX
128};
129
130/*
131 * hmm_pfn_value_e - HMM pfn special value
132 *
133 * Flags:
da4c3c73 134 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
f88a1e90 135 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
da4c3c73 136 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
67fa1666 137 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
da4c3c73
JG
138 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
139 * set and the pfn value is undefined.
f88a1e90
JG
140 *
141 * Driver provide entry value for none entry, error entry and special entry,
142 * driver can alias (ie use same value for error and special for instance). It
143 * should not alias none and error or special.
144 *
145 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
146 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
147 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
148 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
133ff0ea 149 */
f88a1e90
JG
150enum hmm_pfn_value_e {
151 HMM_PFN_ERROR,
152 HMM_PFN_NONE,
153 HMM_PFN_SPECIAL,
154 HMM_PFN_VALUE_MAX
155};
156
157/*
158 * struct hmm_range - track invalidation lock on virtual address range
159 *
704f3f2c 160 * @hmm: the core HMM structure this range is active against
f88a1e90
JG
161 * @vma: the vm area struct for the range
162 * @list: all range lock are on a list
163 * @start: range virtual start address (inclusive)
164 * @end: range virtual end address (exclusive)
165 * @pfns: array of pfns (big enough for the range)
166 * @flags: pfn flags to match device driver page table
167 * @values: pfn value for some special case (none, special, error, ...)
023a019a
JG
168 * @default_flags: default flags for the range (write, read, ... see hmm doc)
169 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
f88a1e90
JG
170 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
171 * @valid: pfns array did not change since it has been fill by an HMM function
172 */
173struct hmm_range {
704f3f2c 174 struct hmm *hmm;
f88a1e90
JG
175 struct vm_area_struct *vma;
176 struct list_head list;
177 unsigned long start;
178 unsigned long end;
179 uint64_t *pfns;
180 const uint64_t *flags;
181 const uint64_t *values;
023a019a
JG
182 uint64_t default_flags;
183 uint64_t pfn_flags_mask;
63d5066f 184 uint8_t page_shift;
f88a1e90
JG
185 uint8_t pfn_shift;
186 bool valid;
187};
133ff0ea 188
63d5066f
JG
189/*
190 * hmm_range_page_shift() - return the page shift for the range
191 * @range: range being queried
192 * Returns: page shift (page size = 1 << page shift) for the range
193 */
194static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
195{
196 return range->page_shift;
197}
198
199/*
200 * hmm_range_page_size() - return the page size for the range
201 * @range: range being queried
202 * Returns: page size for the range in bytes
203 */
204static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
205{
206 return 1UL << hmm_range_page_shift(range);
207}
208
a3e0d41c
JG
209/*
210 * hmm_range_wait_until_valid() - wait for range to be valid
211 * @range: range affected by invalidation to wait on
212 * @timeout: time out for wait in ms (ie abort wait after that period of time)
213 * Returns: true if the range is valid, false otherwise.
214 */
215static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
216 unsigned long timeout)
217{
218 /* Check if mm is dead ? */
219 if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) {
220 range->valid = false;
221 return false;
222 }
223 if (range->valid)
224 return true;
225 wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead,
226 msecs_to_jiffies(timeout));
227 /* Return current valid status just in case we get lucky */
228 return range->valid;
229}
230
231/*
232 * hmm_range_valid() - test if a range is valid or not
233 * @range: range
234 * Returns: true if the range is valid, false otherwise.
235 */
236static inline bool hmm_range_valid(struct hmm_range *range)
237{
238 return range->valid;
239}
240
133ff0ea 241/*
391aab11
JG
242 * hmm_device_entry_to_page() - return struct page pointed to by a device entry
243 * @range: range use to decode device entry value
244 * @entry: device entry value to get corresponding struct page from
245 * Returns: struct page pointer if entry is a valid, NULL otherwise
133ff0ea 246 *
391aab11
JG
247 * If the device entry is valid (ie valid flag set) then return the struct page
248 * matching the entry value. Otherwise return NULL.
133ff0ea 249 */
391aab11
JG
250static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
251 uint64_t entry)
133ff0ea 252{
391aab11 253 if (entry == range->values[HMM_PFN_NONE])
f88a1e90 254 return NULL;
391aab11 255 if (entry == range->values[HMM_PFN_ERROR])
f88a1e90 256 return NULL;
391aab11 257 if (entry == range->values[HMM_PFN_SPECIAL])
133ff0ea 258 return NULL;
391aab11 259 if (!(entry & range->flags[HMM_PFN_VALID]))
f88a1e90 260 return NULL;
391aab11 261 return pfn_to_page(entry >> range->pfn_shift);
133ff0ea
JG
262}
263
264/*
391aab11
JG
265 * hmm_device_entry_to_pfn() - return pfn value store in a device entry
266 * @range: range use to decode device entry value
267 * @entry: device entry to extract pfn from
268 * Returns: pfn value if device entry is valid, -1UL otherwise
133ff0ea 269 */
391aab11
JG
270static inline unsigned long
271hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
133ff0ea 272{
f88a1e90
JG
273 if (pfn == range->values[HMM_PFN_NONE])
274 return -1UL;
275 if (pfn == range->values[HMM_PFN_ERROR])
276 return -1UL;
277 if (pfn == range->values[HMM_PFN_SPECIAL])
133ff0ea 278 return -1UL;
f88a1e90
JG
279 if (!(pfn & range->flags[HMM_PFN_VALID]))
280 return -1UL;
281 return (pfn >> range->pfn_shift);
133ff0ea
JG
282}
283
284/*
391aab11 285 * hmm_device_entry_from_page() - create a valid device entry for a page
f88a1e90 286 * @range: range use to encode HMM pfn value
391aab11
JG
287 * @page: page for which to create the device entry
288 * Returns: valid device entry for the page
133ff0ea 289 */
391aab11
JG
290static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
291 struct page *page)
133ff0ea 292{
f88a1e90
JG
293 return (page_to_pfn(page) << range->pfn_shift) |
294 range->flags[HMM_PFN_VALID];
133ff0ea
JG
295}
296
297/*
391aab11 298 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
f88a1e90 299 * @range: range use to encode HMM pfn value
391aab11
JG
300 * @pfn: pfn value for which to create the device entry
301 * Returns: valid device entry for the pfn
133ff0ea 302 */
391aab11
JG
303static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
304 unsigned long pfn)
133ff0ea 305{
f88a1e90
JG
306 return (pfn << range->pfn_shift) |
307 range->flags[HMM_PFN_VALID];
133ff0ea
JG
308}
309
391aab11
JG
310/*
311 * Old API:
312 * hmm_pfn_to_page()
313 * hmm_pfn_to_pfn()
314 * hmm_pfn_from_page()
315 * hmm_pfn_from_pfn()
316 *
317 * This are the OLD API please use new API, it is here to avoid cross-tree
318 * merge painfullness ie we convert things to new API in stages.
319 */
320static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
321 uint64_t pfn)
322{
323 return hmm_device_entry_to_page(range, pfn);
324}
325
326static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
327 uint64_t pfn)
328{
329 return hmm_device_entry_to_pfn(range, pfn);
330}
331
332static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
333 struct page *page)
334{
335 return hmm_device_entry_from_page(range, page);
336}
337
338static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
339 unsigned long pfn)
340{
341 return hmm_device_entry_from_pfn(range, pfn);
342}
343
344
133ff0ea 345
c0b12405
JG
346#if IS_ENABLED(CONFIG_HMM_MIRROR)
347/*
348 * Mirroring: how to synchronize device page table with CPU page table.
349 *
350 * A device driver that is participating in HMM mirroring must always
351 * synchronize with CPU page table updates. For this, device drivers can either
352 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
353 * drivers can decide to register one mirror per device per process, or just
354 * one mirror per process for a group of devices. The pattern is:
355 *
356 * int device_bind_address_space(..., struct mm_struct *mm, ...)
357 * {
358 * struct device_address_space *das;
359 *
360 * // Device driver specific initialization, and allocation of das
361 * // which contains an hmm_mirror struct as one of its fields.
362 * ...
363 *
364 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
365 * if (ret) {
366 * // Cleanup on error
367 * return ret;
368 * }
369 *
370 * // Other device driver specific initialization
371 * ...
372 * }
373 *
374 * Once an hmm_mirror is registered for an address space, the device driver
375 * will get callbacks through sync_cpu_device_pagetables() operation (see
376 * hmm_mirror_ops struct).
377 *
378 * Device driver must not free the struct containing the hmm_mirror struct
379 * before calling hmm_mirror_unregister(). The expected usage is to do that when
380 * the device driver is unbinding from an address space.
381 *
382 *
383 * void device_unbind_address_space(struct device_address_space *das)
384 * {
385 * // Device driver specific cleanup
386 * ...
387 *
388 * hmm_mirror_unregister(&das->mirror);
389 *
390 * // Other device driver specific cleanup, and now das can be freed
391 * ...
392 * }
393 */
394
395struct hmm_mirror;
396
397/*
44532d4c 398 * enum hmm_update_event - type of update
c0b12405
JG
399 * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
400 */
44532d4c 401enum hmm_update_event {
c0b12405
JG
402 HMM_UPDATE_INVALIDATE,
403};
404
44532d4c
JG
405/*
406 * struct hmm_update - HMM update informations for callback
407 *
408 * @start: virtual start address of the range to update
409 * @end: virtual end address of the range to update
410 * @event: event triggering the update (what is happening)
411 * @blockable: can the callback block/sleep ?
412 */
413struct hmm_update {
414 unsigned long start;
415 unsigned long end;
416 enum hmm_update_event event;
417 bool blockable;
418};
419
c0b12405
JG
420/*
421 * struct hmm_mirror_ops - HMM mirror device operations callback
422 *
423 * @update: callback to update range on a device
424 */
425struct hmm_mirror_ops {
e1401513
RC
426 /* release() - release hmm_mirror
427 *
428 * @mirror: pointer to struct hmm_mirror
429 *
430 * This is called when the mm_struct is being released.
431 * The callback should make sure no references to the mirror occur
432 * after the callback returns.
433 */
434 void (*release)(struct hmm_mirror *mirror);
435
c0b12405
JG
436 /* sync_cpu_device_pagetables() - synchronize page tables
437 *
438 * @mirror: pointer to struct hmm_mirror
44532d4c
JG
439 * @update: update informations (see struct hmm_update)
440 * Returns: -EAGAIN if update.blockable false and callback need to
441 * block, 0 otherwise.
c0b12405
JG
442 *
443 * This callback ultimately originates from mmu_notifiers when the CPU
444 * page table is updated. The device driver must update its page table
445 * in response to this callback. The update argument tells what action
446 * to perform.
447 *
448 * The device driver must not return from this callback until the device
449 * page tables are completely updated (TLBs flushed, etc); this is a
450 * synchronous call.
451 */
44532d4c
JG
452 int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
453 const struct hmm_update *update);
c0b12405
JG
454};
455
456/*
457 * struct hmm_mirror - mirror struct for a device driver
458 *
459 * @hmm: pointer to struct hmm (which is unique per mm_struct)
460 * @ops: device driver callback for HMM mirror operations
461 * @list: for list of mirrors of a given mm
462 *
463 * Each address space (mm_struct) being mirrored by a device must register one
464 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
465 * mirrors for each mm_struct.
466 */
467struct hmm_mirror {
468 struct hmm *hmm;
469 const struct hmm_mirror_ops *ops;
470 struct list_head list;
471};
472
473int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
474void hmm_mirror_unregister(struct hmm_mirror *mirror);
da4c3c73 475
20239417
JG
476/*
477 * hmm_mirror_mm_is_alive() - test if mm is still alive
478 * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
479 * Returns: false if the mm is dead, true otherwise
480 *
481 * This is an optimization it will not accurately always return -EINVAL if the
482 * mm is dead ie there can be false negative (process is being kill but HMM is
483 * not yet inform of that). It is only intented to be use to optimize out case
484 * where driver is about to do something time consuming and it would be better
485 * to skip it if the mm is dead.
486 */
487static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
488{
489 struct mm_struct *mm;
490
491 if (!mirror || !mirror->hmm)
492 return false;
493 mm = READ_ONCE(mirror->hmm->mm);
494 if (mirror->hmm->dead || !mm)
495 return false;
496
497 return true;
498}
499
da4c3c73 500
da4c3c73 501/*
a3e0d41c 502 * Please see Documentation/vm/hmm.rst for how to use the range API.
da4c3c73 503 */
a3e0d41c
JG
504int hmm_range_register(struct hmm_range *range,
505 struct mm_struct *mm,
506 unsigned long start,
63d5066f
JG
507 unsigned long end,
508 unsigned page_shift);
a3e0d41c 509void hmm_range_unregister(struct hmm_range *range);
25f23a0c 510long hmm_range_snapshot(struct hmm_range *range);
a3e0d41c 511long hmm_range_fault(struct hmm_range *range, bool block);
55c0ece8
JG
512long hmm_range_dma_map(struct hmm_range *range,
513 struct device *device,
514 dma_addr_t *daddrs,
515 bool block);
516long hmm_range_dma_unmap(struct hmm_range *range,
517 struct vm_area_struct *vma,
518 struct device *device,
519 dma_addr_t *daddrs,
520 bool dirty);
74eee180
JG
521
522/*
a3e0d41c 523 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
74eee180 524 *
a3e0d41c
JG
525 * When waiting for mmu notifiers we need some kind of time out otherwise we
526 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
527 * wait already.
74eee180 528 */
a3e0d41c
JG
529#define HMM_RANGE_DEFAULT_TIMEOUT 1000
530
531/* This is a temporary helper to avoid merge conflict between trees. */
532static inline bool hmm_vma_range_done(struct hmm_range *range)
533{
534 bool ret = hmm_range_valid(range);
535
536 hmm_range_unregister(range);
537 return ret;
538}
73231612
JG
539
540/* This is a temporary helper to avoid merge conflict between trees. */
541static inline int hmm_vma_fault(struct hmm_range *range, bool block)
542{
a3e0d41c
JG
543 long ret;
544
023a019a
JG
545 /*
546 * With the old API the driver must set each individual entries with
547 * the requested flags (valid, write, ...). So here we set the mask to
548 * keep intact the entries provided by the driver and zero out the
549 * default_flags.
550 */
551 range->default_flags = 0;
552 range->pfn_flags_mask = -1UL;
553
a3e0d41c 554 ret = hmm_range_register(range, range->vma->vm_mm,
63d5066f
JG
555 range->start, range->end,
556 PAGE_SHIFT);
a3e0d41c
JG
557 if (ret)
558 return (int)ret;
559
560 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
561 /*
562 * The mmap_sem was taken by driver we release it here and
563 * returns -EAGAIN which correspond to mmap_sem have been
564 * drop in the old API.
565 */
566 up_read(&range->vma->vm_mm->mmap_sem);
567 return -EAGAIN;
568 }
569
570 ret = hmm_range_fault(range, block);
571 if (ret <= 0) {
572 if (ret == -EBUSY || !ret) {
573 /* Same as above drop mmap_sem to match old API. */
574 up_read(&range->vma->vm_mm->mmap_sem);
575 ret = -EBUSY;
576 } else if (ret == -EAGAIN)
577 ret = -EBUSY;
578 hmm_range_unregister(range);
579 return ret;
580 }
581 return 0;
73231612 582}
c0b12405 583
9d8a463a
AB
584/* Below are for HMM internal use only! Not to be used by device driver! */
585void hmm_mm_destroy(struct mm_struct *mm);
586
587static inline void hmm_mm_init(struct mm_struct *mm)
588{
589 mm->hmm = NULL;
590}
591#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
592static inline void hmm_mm_destroy(struct mm_struct *mm) {}
593static inline void hmm_mm_init(struct mm_struct *mm) {}
594#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
c0b12405 595
df6ad698 596#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
4ef589dc
JG
597struct hmm_devmem;
598
599struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
600 unsigned long addr);
601
602/*
603 * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
604 *
605 * @free: call when refcount on page reach 1 and thus is no longer use
606 * @fault: call when there is a page fault to unaddressable memory
607 *
608 * Both callback happens from page_free() and page_fault() callback of struct
609 * dev_pagemap respectively. See include/linux/memremap.h for more details on
610 * those.
611 *
612 * The hmm_devmem_ops callback are just here to provide a coherent and
613 * uniq API to device driver and device driver should not register their
614 * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
615 * back.
616 */
617struct hmm_devmem_ops {
618 /*
619 * free() - free a device page
620 * @devmem: device memory structure (see struct hmm_devmem)
621 * @page: pointer to struct page being freed
622 *
623 * Call back occurs whenever a device page refcount reach 1 which
624 * means that no one is holding any reference on the page anymore
625 * (ZONE_DEVICE page have an elevated refcount of 1 as default so
626 * that they are not release to the general page allocator).
627 *
628 * Note that callback has exclusive ownership of the page (as no
629 * one is holding any reference).
630 */
631 void (*free)(struct hmm_devmem *devmem, struct page *page);
632 /*
633 * fault() - CPU page fault or get user page (GUP)
634 * @devmem: device memory structure (see struct hmm_devmem)
635 * @vma: virtual memory area containing the virtual address
636 * @addr: virtual address that faulted or for which there is a GUP
637 * @page: pointer to struct page backing virtual address (unreliable)
638 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
639 * @pmdp: page middle directory
640 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
641 * on error
642 *
643 * The callback occurs whenever there is a CPU page fault or GUP on a
644 * virtual address. This means that the device driver must migrate the
645 * page back to regular memory (CPU accessible).
646 *
647 * The device driver is free to migrate more than one page from the
648 * fault() callback as an optimization. However if device decide to
649 * migrate more than one page it must always priotirize the faulting
650 * address over the others.
651 *
652 * The struct page pointer is only given as an hint to allow quick
653 * lookup of internal device driver data. A concurrent migration
654 * might have already free that page and the virtual address might
655 * not longer be back by it. So it should not be modified by the
656 * callback.
657 *
658 * Note that mmap semaphore is held in read mode at least when this
659 * callback occurs, hence the vma is valid upon callback entry.
660 */
b57e622e 661 vm_fault_t (*fault)(struct hmm_devmem *devmem,
4ef589dc
JG
662 struct vm_area_struct *vma,
663 unsigned long addr,
664 const struct page *page,
665 unsigned int flags,
666 pmd_t *pmdp);
667};
668
669/*
670 * struct hmm_devmem - track device memory
671 *
672 * @completion: completion object for device memory
673 * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
674 * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
675 * @resource: IO resource reserved for this chunk of memory
676 * @pagemap: device page map for that chunk
677 * @device: device to bind resource to
678 * @ops: memory operations callback
679 * @ref: per CPU refcount
063a7d1d 680 * @page_fault: callback when CPU fault on an unaddressable device page
4ef589dc
JG
681 *
682 * This an helper structure for device drivers that do not wish to implement
683 * the gory details related to hotplugging new memoy and allocating struct
684 * pages.
685 *
686 * Device drivers can directly use ZONE_DEVICE memory on their own if they
687 * wish to do so.
063a7d1d
DW
688 *
689 * The page_fault() callback must migrate page back, from device memory to
690 * system memory, so that the CPU can access it. This might fail for various
691 * reasons (device issues, device have been unplugged, ...). When such error
692 * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
693 * set the CPU page table entry to "poisoned".
694 *
695 * Note that because memory cgroup charges are transferred to the device memory,
696 * this should never fail due to memory restrictions. However, allocation
697 * of a regular system page might still fail because we are out of memory. If
698 * that happens, the page_fault() callback must return VM_FAULT_OOM.
699 *
700 * The page_fault() callback can also try to migrate back multiple pages in one
701 * chunk, as an optimization. It must, however, prioritize the faulting address
702 * over all the others.
4ef589dc 703 */
b57e622e 704typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
063a7d1d
DW
705 unsigned long addr,
706 const struct page *page,
707 unsigned int flags,
708 pmd_t *pmdp);
709
4ef589dc
JG
710struct hmm_devmem {
711 struct completion completion;
712 unsigned long pfn_first;
713 unsigned long pfn_last;
714 struct resource *resource;
715 struct device *device;
716 struct dev_pagemap pagemap;
717 const struct hmm_devmem_ops *ops;
718 struct percpu_ref ref;
063a7d1d 719 dev_page_fault_t page_fault;
4ef589dc
JG
720};
721
722/*
723 * To add (hotplug) device memory, HMM assumes that there is no real resource
724 * that reserves a range in the physical address space (this is intended to be
725 * use by unaddressable device memory). It will reserve a physical range big
726 * enough and allocate struct page for it.
727 *
728 * The device driver can wrap the hmm_devmem struct inside a private device
58ef15b7 729 * driver struct.
4ef589dc
JG
730 */
731struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
732 struct device *device,
733 unsigned long size);
d3df0a42
JG
734struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
735 struct device *device,
736 struct resource *res);
4ef589dc
JG
737
738/*
739 * hmm_devmem_page_set_drvdata - set per-page driver data field
740 *
741 * @page: pointer to struct page
742 * @data: driver data value to set
743 *
744 * Because page can not be on lru we have an unsigned long that driver can use
745 * to store a per page field. This just a simple helper to do that.
746 */
747static inline void hmm_devmem_page_set_drvdata(struct page *page,
748 unsigned long data)
749{
50e7fbc3 750 page->hmm_data = data;
4ef589dc
JG
751}
752
753/*
754 * hmm_devmem_page_get_drvdata - get per page driver data field
755 *
756 * @page: pointer to struct page
757 * Return: driver data value
758 */
0bea803e 759static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
4ef589dc 760{
50e7fbc3 761 return page->hmm_data;
4ef589dc 762}
858b54da
JG
763
764
765/*
766 * struct hmm_device - fake device to hang device memory onto
767 *
768 * @device: device struct
769 * @minor: device minor number
770 */
771struct hmm_device {
772 struct device device;
773 unsigned int minor;
774};
775
776/*
777 * A device driver that wants to handle multiple devices memory through a
778 * single fake device can use hmm_device to do so. This is purely a helper and
779 * it is not strictly needed, in order to make use of any HMM functionality.
780 */
781struct hmm_device *hmm_device_new(void *drvdata);
782void hmm_device_put(struct hmm_device *hmm_device);
df6ad698 783#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
6b368cd4 784#else /* IS_ENABLED(CONFIG_HMM) */
133ff0ea
JG
785static inline void hmm_mm_destroy(struct mm_struct *mm) {}
786static inline void hmm_mm_init(struct mm_struct *mm) {}
b28b08de 787#endif /* IS_ENABLED(CONFIG_HMM) */
9d8a463a 788
133ff0ea 789#endif /* LINUX_HMM_H */