Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
133ff0ea JG |
2 | /* |
3 | * Copyright 2013 Red Hat Inc. | |
4 | * | |
f813f219 | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0ea JG |
6 | */ |
7 | /* | |
8 | * Heterogeneous Memory Management (HMM) | |
9 | * | |
ad56b738 | 10 | * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it |
133ff0ea JG |
11 | * is for. Here we focus on the HMM API description, with some explanation of |
12 | * the underlying implementation. | |
13 | * | |
14 | * Short description: HMM provides a set of helpers to share a virtual address | |
15 | * space between CPU and a device, so that the device can access any valid | |
16 | * address of the process (while still obeying memory protection). HMM also | |
17 | * provides helpers to migrate process memory to device memory, and back. Each | |
18 | * set of functionality (address space mirroring, and migration to and from | |
19 | * device memory) can be used independently of the other. | |
20 | * | |
21 | * | |
22 | * HMM address space mirroring API: | |
23 | * | |
085ea250 RC |
24 | * Use HMM address space mirroring if you want to mirror a range of the CPU |
25 | * page tables of a process into a device page table. Here, "mirror" means "keep | |
133ff0ea JG |
26 | * synchronized". Prerequisites: the device must provide the ability to write- |
27 | * protect its page tables (at PAGE_SIZE granularity), and must be able to | |
28 | * recover from the resulting potential page faults. | |
29 | * | |
30 | * HMM guarantees that at any point in time, a given virtual address points to | |
31 | * either the same memory in both CPU and device page tables (that is: CPU and | |
32 | * device page tables each point to the same pages), or that one page table (CPU | |
33 | * or device) points to no entry, while the other still points to the old page | |
34 | * for the address. The latter case happens when the CPU page table update | |
35 | * happens first, and then the update is mirrored over to the device page table. | |
36 | * This does not cause any issue, because the CPU page table cannot start | |
37 | * pointing to a new page until the device page table is invalidated. | |
38 | * | |
39 | * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any | |
40 | * updates to each device driver that has registered a mirror. It also provides | |
41 | * some API calls to help with taking a snapshot of the CPU page table, and to | |
42 | * synchronize with any updates that might happen concurrently. | |
43 | * | |
44 | * | |
45 | * HMM migration to and from device memory: | |
46 | * | |
47 | * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with | |
48 | * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page | |
49 | * of the device memory, and allows the device driver to manage its memory | |
50 | * using those struct pages. Having struct pages for device memory makes | |
51 | * migration easier. Because that memory is not addressable by the CPU it must | |
52 | * never be pinned to the device; in other words, any CPU page fault can always | |
53 | * cause the device memory to be migrated (copied/moved) back to regular memory. | |
54 | * | |
55 | * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that | |
56 | * allows use of a device DMA engine to perform the copy operation between | |
57 | * regular system memory and device memory. | |
58 | */ | |
59 | #ifndef LINUX_HMM_H | |
60 | #define LINUX_HMM_H | |
61 | ||
62 | #include <linux/kconfig.h> | |
063a7d1d | 63 | #include <asm/pgtable.h> |
133ff0ea JG |
64 | |
65 | #if IS_ENABLED(CONFIG_HMM) | |
66 | ||
858b54da | 67 | #include <linux/device.h> |
4ef589dc JG |
68 | #include <linux/migrate.h> |
69 | #include <linux/memremap.h> | |
70 | #include <linux/completion.h> | |
a3e0d41c | 71 | #include <linux/mmu_notifier.h> |
4ef589dc | 72 | |
a3e0d41c JG |
73 | |
74 | /* | |
75 | * struct hmm - HMM per mm struct | |
76 | * | |
77 | * @mm: mm struct this HMM struct is bound to | |
78 | * @lock: lock protecting ranges list | |
79 | * @ranges: list of range being snapshotted | |
80 | * @mirrors: list of mirrors for this mm | |
81 | * @mmu_notifier: mmu notifier to track updates to CPU page table | |
82 | * @mirrors_sem: read/write semaphore protecting the mirrors list | |
83 | * @wq: wait queue for user waiting on a range invalidation | |
84 | * @notifiers: count of active mmu notifiers | |
85 | * @dead: is the mm dead ? | |
86 | */ | |
87 | struct hmm { | |
88 | struct mm_struct *mm; | |
89 | struct kref kref; | |
90 | struct mutex lock; | |
91 | struct list_head ranges; | |
92 | struct list_head mirrors; | |
93 | struct mmu_notifier mmu_notifier; | |
94 | struct rw_semaphore mirrors_sem; | |
95 | wait_queue_head_t wq; | |
96 | long notifiers; | |
97 | bool dead; | |
98 | }; | |
133ff0ea JG |
99 | |
100 | /* | |
f88a1e90 JG |
101 | * hmm_pfn_flag_e - HMM flag enums |
102 | * | |
133ff0ea | 103 | * Flags: |
86586a41 | 104 | * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. |
133ff0ea | 105 | * HMM_PFN_WRITE: CPU page table has write permission set |
f88a1e90 JG |
106 | * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) |
107 | * | |
085ea250 RC |
108 | * The driver provides a flags array for mapping page protections to device |
109 | * PTE bits. If the driver valid bit for an entry is bit 3, | |
110 | * i.e., (entry & (1 << 3)), then the driver must provide | |
f88a1e90 | 111 | * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. |
085ea250 | 112 | * Same logic apply to all flags. This is the same idea as vm_page_prot in vma |
f88a1e90 JG |
113 | * except that this is per device driver rather than per architecture. |
114 | */ | |
115 | enum hmm_pfn_flag_e { | |
116 | HMM_PFN_VALID = 0, | |
117 | HMM_PFN_WRITE, | |
118 | HMM_PFN_DEVICE_PRIVATE, | |
119 | HMM_PFN_FLAG_MAX | |
120 | }; | |
121 | ||
122 | /* | |
123 | * hmm_pfn_value_e - HMM pfn special value | |
124 | * | |
125 | * Flags: | |
da4c3c73 | 126 | * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory |
f88a1e90 | 127 | * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() |
da4c3c73 | 128 | * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the |
67fa1666 | 129 | * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not |
da4c3c73 JG |
130 | * be mirrored by a device, because the entry will never have HMM_PFN_VALID |
131 | * set and the pfn value is undefined. | |
f88a1e90 | 132 | * |
085ea250 RC |
133 | * Driver provides values for none entry, error entry, and special entry. |
134 | * Driver can alias (i.e., use same value) error and special, but | |
135 | * it should not alias none with error or special. | |
f88a1e90 JG |
136 | * |
137 | * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: | |
138 | * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, | |
085ea250 | 139 | * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, |
f88a1e90 | 140 | * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one |
133ff0ea | 141 | */ |
f88a1e90 JG |
142 | enum hmm_pfn_value_e { |
143 | HMM_PFN_ERROR, | |
144 | HMM_PFN_NONE, | |
145 | HMM_PFN_SPECIAL, | |
146 | HMM_PFN_VALUE_MAX | |
147 | }; | |
148 | ||
149 | /* | |
150 | * struct hmm_range - track invalidation lock on virtual address range | |
151 | * | |
704f3f2c | 152 | * @hmm: the core HMM structure this range is active against |
f88a1e90 JG |
153 | * @vma: the vm area struct for the range |
154 | * @list: all range lock are on a list | |
155 | * @start: range virtual start address (inclusive) | |
156 | * @end: range virtual end address (exclusive) | |
157 | * @pfns: array of pfns (big enough for the range) | |
158 | * @flags: pfn flags to match device driver page table | |
159 | * @values: pfn value for some special case (none, special, error, ...) | |
023a019a JG |
160 | * @default_flags: default flags for the range (write, read, ... see hmm doc) |
161 | * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter | |
085ea250 | 162 | * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT) |
f88a1e90 JG |
163 | * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) |
164 | * @valid: pfns array did not change since it has been fill by an HMM function | |
165 | */ | |
166 | struct hmm_range { | |
704f3f2c | 167 | struct hmm *hmm; |
f88a1e90 JG |
168 | struct vm_area_struct *vma; |
169 | struct list_head list; | |
170 | unsigned long start; | |
171 | unsigned long end; | |
172 | uint64_t *pfns; | |
173 | const uint64_t *flags; | |
174 | const uint64_t *values; | |
023a019a JG |
175 | uint64_t default_flags; |
176 | uint64_t pfn_flags_mask; | |
63d5066f | 177 | uint8_t page_shift; |
f88a1e90 JG |
178 | uint8_t pfn_shift; |
179 | bool valid; | |
180 | }; | |
133ff0ea | 181 | |
63d5066f JG |
182 | /* |
183 | * hmm_range_page_shift() - return the page shift for the range | |
184 | * @range: range being queried | |
085ea250 | 185 | * Return: page shift (page size = 1 << page shift) for the range |
63d5066f JG |
186 | */ |
187 | static inline unsigned hmm_range_page_shift(const struct hmm_range *range) | |
188 | { | |
189 | return range->page_shift; | |
190 | } | |
191 | ||
192 | /* | |
193 | * hmm_range_page_size() - return the page size for the range | |
194 | * @range: range being queried | |
085ea250 | 195 | * Return: page size for the range in bytes |
63d5066f JG |
196 | */ |
197 | static inline unsigned long hmm_range_page_size(const struct hmm_range *range) | |
198 | { | |
199 | return 1UL << hmm_range_page_shift(range); | |
200 | } | |
201 | ||
a3e0d41c JG |
202 | /* |
203 | * hmm_range_wait_until_valid() - wait for range to be valid | |
204 | * @range: range affected by invalidation to wait on | |
205 | * @timeout: time out for wait in ms (ie abort wait after that period of time) | |
085ea250 | 206 | * Return: true if the range is valid, false otherwise. |
a3e0d41c JG |
207 | */ |
208 | static inline bool hmm_range_wait_until_valid(struct hmm_range *range, | |
209 | unsigned long timeout) | |
210 | { | |
211 | /* Check if mm is dead ? */ | |
212 | if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) { | |
213 | range->valid = false; | |
214 | return false; | |
215 | } | |
216 | if (range->valid) | |
217 | return true; | |
218 | wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead, | |
219 | msecs_to_jiffies(timeout)); | |
220 | /* Return current valid status just in case we get lucky */ | |
221 | return range->valid; | |
222 | } | |
223 | ||
224 | /* | |
225 | * hmm_range_valid() - test if a range is valid or not | |
226 | * @range: range | |
085ea250 | 227 | * Return: true if the range is valid, false otherwise. |
a3e0d41c JG |
228 | */ |
229 | static inline bool hmm_range_valid(struct hmm_range *range) | |
230 | { | |
231 | return range->valid; | |
232 | } | |
233 | ||
133ff0ea | 234 | /* |
391aab11 JG |
235 | * hmm_device_entry_to_page() - return struct page pointed to by a device entry |
236 | * @range: range use to decode device entry value | |
237 | * @entry: device entry value to get corresponding struct page from | |
085ea250 | 238 | * Return: struct page pointer if entry is a valid, NULL otherwise |
133ff0ea | 239 | * |
391aab11 JG |
240 | * If the device entry is valid (ie valid flag set) then return the struct page |
241 | * matching the entry value. Otherwise return NULL. | |
133ff0ea | 242 | */ |
391aab11 JG |
243 | static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range, |
244 | uint64_t entry) | |
133ff0ea | 245 | { |
391aab11 | 246 | if (entry == range->values[HMM_PFN_NONE]) |
f88a1e90 | 247 | return NULL; |
391aab11 | 248 | if (entry == range->values[HMM_PFN_ERROR]) |
f88a1e90 | 249 | return NULL; |
391aab11 | 250 | if (entry == range->values[HMM_PFN_SPECIAL]) |
133ff0ea | 251 | return NULL; |
391aab11 | 252 | if (!(entry & range->flags[HMM_PFN_VALID])) |
f88a1e90 | 253 | return NULL; |
391aab11 | 254 | return pfn_to_page(entry >> range->pfn_shift); |
133ff0ea JG |
255 | } |
256 | ||
257 | /* | |
391aab11 JG |
258 | * hmm_device_entry_to_pfn() - return pfn value store in a device entry |
259 | * @range: range use to decode device entry value | |
260 | * @entry: device entry to extract pfn from | |
085ea250 | 261 | * Return: pfn value if device entry is valid, -1UL otherwise |
133ff0ea | 262 | */ |
391aab11 JG |
263 | static inline unsigned long |
264 | hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) | |
133ff0ea | 265 | { |
f88a1e90 JG |
266 | if (pfn == range->values[HMM_PFN_NONE]) |
267 | return -1UL; | |
268 | if (pfn == range->values[HMM_PFN_ERROR]) | |
269 | return -1UL; | |
270 | if (pfn == range->values[HMM_PFN_SPECIAL]) | |
133ff0ea | 271 | return -1UL; |
f88a1e90 JG |
272 | if (!(pfn & range->flags[HMM_PFN_VALID])) |
273 | return -1UL; | |
274 | return (pfn >> range->pfn_shift); | |
133ff0ea JG |
275 | } |
276 | ||
277 | /* | |
391aab11 | 278 | * hmm_device_entry_from_page() - create a valid device entry for a page |
f88a1e90 | 279 | * @range: range use to encode HMM pfn value |
391aab11 | 280 | * @page: page for which to create the device entry |
085ea250 | 281 | * Return: valid device entry for the page |
133ff0ea | 282 | */ |
391aab11 JG |
283 | static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, |
284 | struct page *page) | |
133ff0ea | 285 | { |
f88a1e90 JG |
286 | return (page_to_pfn(page) << range->pfn_shift) | |
287 | range->flags[HMM_PFN_VALID]; | |
133ff0ea JG |
288 | } |
289 | ||
290 | /* | |
391aab11 | 291 | * hmm_device_entry_from_pfn() - create a valid device entry value from pfn |
f88a1e90 | 292 | * @range: range use to encode HMM pfn value |
391aab11 | 293 | * @pfn: pfn value for which to create the device entry |
085ea250 | 294 | * Return: valid device entry for the pfn |
133ff0ea | 295 | */ |
391aab11 JG |
296 | static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, |
297 | unsigned long pfn) | |
133ff0ea | 298 | { |
f88a1e90 JG |
299 | return (pfn << range->pfn_shift) | |
300 | range->flags[HMM_PFN_VALID]; | |
133ff0ea JG |
301 | } |
302 | ||
391aab11 JG |
303 | /* |
304 | * Old API: | |
305 | * hmm_pfn_to_page() | |
306 | * hmm_pfn_to_pfn() | |
307 | * hmm_pfn_from_page() | |
308 | * hmm_pfn_from_pfn() | |
309 | * | |
310 | * This are the OLD API please use new API, it is here to avoid cross-tree | |
311 | * merge painfullness ie we convert things to new API in stages. | |
312 | */ | |
313 | static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, | |
314 | uint64_t pfn) | |
315 | { | |
316 | return hmm_device_entry_to_page(range, pfn); | |
317 | } | |
318 | ||
319 | static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, | |
320 | uint64_t pfn) | |
321 | { | |
322 | return hmm_device_entry_to_pfn(range, pfn); | |
323 | } | |
324 | ||
325 | static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, | |
326 | struct page *page) | |
327 | { | |
328 | return hmm_device_entry_from_page(range, page); | |
329 | } | |
330 | ||
331 | static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, | |
332 | unsigned long pfn) | |
333 | { | |
334 | return hmm_device_entry_from_pfn(range, pfn); | |
335 | } | |
336 | ||
337 | ||
133ff0ea | 338 | |
c0b12405 JG |
339 | #if IS_ENABLED(CONFIG_HMM_MIRROR) |
340 | /* | |
341 | * Mirroring: how to synchronize device page table with CPU page table. | |
342 | * | |
343 | * A device driver that is participating in HMM mirroring must always | |
344 | * synchronize with CPU page table updates. For this, device drivers can either | |
345 | * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device | |
346 | * drivers can decide to register one mirror per device per process, or just | |
347 | * one mirror per process for a group of devices. The pattern is: | |
348 | * | |
349 | * int device_bind_address_space(..., struct mm_struct *mm, ...) | |
350 | * { | |
351 | * struct device_address_space *das; | |
352 | * | |
353 | * // Device driver specific initialization, and allocation of das | |
354 | * // which contains an hmm_mirror struct as one of its fields. | |
355 | * ... | |
356 | * | |
357 | * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); | |
358 | * if (ret) { | |
359 | * // Cleanup on error | |
360 | * return ret; | |
361 | * } | |
362 | * | |
363 | * // Other device driver specific initialization | |
364 | * ... | |
365 | * } | |
366 | * | |
367 | * Once an hmm_mirror is registered for an address space, the device driver | |
368 | * will get callbacks through sync_cpu_device_pagetables() operation (see | |
369 | * hmm_mirror_ops struct). | |
370 | * | |
371 | * Device driver must not free the struct containing the hmm_mirror struct | |
372 | * before calling hmm_mirror_unregister(). The expected usage is to do that when | |
373 | * the device driver is unbinding from an address space. | |
374 | * | |
375 | * | |
376 | * void device_unbind_address_space(struct device_address_space *das) | |
377 | * { | |
378 | * // Device driver specific cleanup | |
379 | * ... | |
380 | * | |
381 | * hmm_mirror_unregister(&das->mirror); | |
382 | * | |
383 | * // Other device driver specific cleanup, and now das can be freed | |
384 | * ... | |
385 | * } | |
386 | */ | |
387 | ||
388 | struct hmm_mirror; | |
389 | ||
390 | /* | |
44532d4c | 391 | * enum hmm_update_event - type of update |
c0b12405 JG |
392 | * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) |
393 | */ | |
44532d4c | 394 | enum hmm_update_event { |
c0b12405 JG |
395 | HMM_UPDATE_INVALIDATE, |
396 | }; | |
397 | ||
44532d4c | 398 | /* |
085ea250 | 399 | * struct hmm_update - HMM update information for callback |
44532d4c JG |
400 | * |
401 | * @start: virtual start address of the range to update | |
402 | * @end: virtual end address of the range to update | |
403 | * @event: event triggering the update (what is happening) | |
404 | * @blockable: can the callback block/sleep ? | |
405 | */ | |
406 | struct hmm_update { | |
407 | unsigned long start; | |
408 | unsigned long end; | |
409 | enum hmm_update_event event; | |
410 | bool blockable; | |
411 | }; | |
412 | ||
c0b12405 JG |
413 | /* |
414 | * struct hmm_mirror_ops - HMM mirror device operations callback | |
415 | * | |
416 | * @update: callback to update range on a device | |
417 | */ | |
418 | struct hmm_mirror_ops { | |
e1401513 RC |
419 | /* release() - release hmm_mirror |
420 | * | |
421 | * @mirror: pointer to struct hmm_mirror | |
422 | * | |
2076e5c0 RC |
423 | * This is called when the mm_struct is being released. The callback |
424 | * must ensure that all access to any pages obtained from this mirror | |
425 | * is halted before the callback returns. All future access should | |
426 | * fault. | |
e1401513 RC |
427 | */ |
428 | void (*release)(struct hmm_mirror *mirror); | |
429 | ||
c0b12405 JG |
430 | /* sync_cpu_device_pagetables() - synchronize page tables |
431 | * | |
432 | * @mirror: pointer to struct hmm_mirror | |
085ea250 RC |
433 | * @update: update information (see struct hmm_update) |
434 | * Return: -EAGAIN if update.blockable false and callback need to | |
44532d4c | 435 | * block, 0 otherwise. |
c0b12405 JG |
436 | * |
437 | * This callback ultimately originates from mmu_notifiers when the CPU | |
438 | * page table is updated. The device driver must update its page table | |
439 | * in response to this callback. The update argument tells what action | |
440 | * to perform. | |
441 | * | |
442 | * The device driver must not return from this callback until the device | |
443 | * page tables are completely updated (TLBs flushed, etc); this is a | |
444 | * synchronous call. | |
445 | */ | |
44532d4c JG |
446 | int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, |
447 | const struct hmm_update *update); | |
c0b12405 JG |
448 | }; |
449 | ||
450 | /* | |
451 | * struct hmm_mirror - mirror struct for a device driver | |
452 | * | |
453 | * @hmm: pointer to struct hmm (which is unique per mm_struct) | |
454 | * @ops: device driver callback for HMM mirror operations | |
455 | * @list: for list of mirrors of a given mm | |
456 | * | |
457 | * Each address space (mm_struct) being mirrored by a device must register one | |
458 | * instance of an hmm_mirror struct with HMM. HMM will track the list of all | |
459 | * mirrors for each mm_struct. | |
460 | */ | |
461 | struct hmm_mirror { | |
462 | struct hmm *hmm; | |
463 | const struct hmm_mirror_ops *ops; | |
464 | struct list_head list; | |
465 | }; | |
466 | ||
467 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); | |
468 | void hmm_mirror_unregister(struct hmm_mirror *mirror); | |
da4c3c73 | 469 | |
20239417 JG |
470 | /* |
471 | * hmm_mirror_mm_is_alive() - test if mm is still alive | |
472 | * @mirror: the HMM mm mirror for which we want to lock the mmap_sem | |
085ea250 | 473 | * Return: false if the mm is dead, true otherwise |
20239417 | 474 | * |
085ea250 RC |
475 | * This is an optimization, it will not always accurately return false if the |
476 | * mm is dead; i.e., there can be false negatives (process is being killed but | |
477 | * HMM is not yet informed of that). It is only intended to be used to optimize | |
478 | * out cases where the driver is about to do something time consuming and it | |
479 | * would be better to skip it if the mm is dead. | |
20239417 JG |
480 | */ |
481 | static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) | |
482 | { | |
483 | struct mm_struct *mm; | |
484 | ||
485 | if (!mirror || !mirror->hmm) | |
486 | return false; | |
487 | mm = READ_ONCE(mirror->hmm->mm); | |
488 | if (mirror->hmm->dead || !mm) | |
489 | return false; | |
490 | ||
491 | return true; | |
492 | } | |
493 | ||
da4c3c73 | 494 | /* |
a3e0d41c | 495 | * Please see Documentation/vm/hmm.rst for how to use the range API. |
da4c3c73 | 496 | */ |
a3e0d41c JG |
497 | int hmm_range_register(struct hmm_range *range, |
498 | struct mm_struct *mm, | |
499 | unsigned long start, | |
63d5066f JG |
500 | unsigned long end, |
501 | unsigned page_shift); | |
a3e0d41c | 502 | void hmm_range_unregister(struct hmm_range *range); |
25f23a0c | 503 | long hmm_range_snapshot(struct hmm_range *range); |
a3e0d41c | 504 | long hmm_range_fault(struct hmm_range *range, bool block); |
55c0ece8 JG |
505 | long hmm_range_dma_map(struct hmm_range *range, |
506 | struct device *device, | |
507 | dma_addr_t *daddrs, | |
508 | bool block); | |
509 | long hmm_range_dma_unmap(struct hmm_range *range, | |
510 | struct vm_area_struct *vma, | |
511 | struct device *device, | |
512 | dma_addr_t *daddrs, | |
513 | bool dirty); | |
74eee180 JG |
514 | |
515 | /* | |
a3e0d41c | 516 | * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range |
74eee180 | 517 | * |
a3e0d41c JG |
518 | * When waiting for mmu notifiers we need some kind of time out otherwise we |
519 | * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to | |
520 | * wait already. | |
74eee180 | 521 | */ |
a3e0d41c JG |
522 | #define HMM_RANGE_DEFAULT_TIMEOUT 1000 |
523 | ||
524 | /* This is a temporary helper to avoid merge conflict between trees. */ | |
525 | static inline bool hmm_vma_range_done(struct hmm_range *range) | |
526 | { | |
527 | bool ret = hmm_range_valid(range); | |
528 | ||
529 | hmm_range_unregister(range); | |
530 | return ret; | |
531 | } | |
73231612 JG |
532 | |
533 | /* This is a temporary helper to avoid merge conflict between trees. */ | |
534 | static inline int hmm_vma_fault(struct hmm_range *range, bool block) | |
535 | { | |
a3e0d41c JG |
536 | long ret; |
537 | ||
023a019a JG |
538 | /* |
539 | * With the old API the driver must set each individual entries with | |
540 | * the requested flags (valid, write, ...). So here we set the mask to | |
541 | * keep intact the entries provided by the driver and zero out the | |
542 | * default_flags. | |
543 | */ | |
544 | range->default_flags = 0; | |
545 | range->pfn_flags_mask = -1UL; | |
546 | ||
a3e0d41c | 547 | ret = hmm_range_register(range, range->vma->vm_mm, |
63d5066f JG |
548 | range->start, range->end, |
549 | PAGE_SHIFT); | |
a3e0d41c JG |
550 | if (ret) |
551 | return (int)ret; | |
552 | ||
553 | if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { | |
554 | /* | |
555 | * The mmap_sem was taken by driver we release it here and | |
556 | * returns -EAGAIN which correspond to mmap_sem have been | |
557 | * drop in the old API. | |
558 | */ | |
559 | up_read(&range->vma->vm_mm->mmap_sem); | |
560 | return -EAGAIN; | |
561 | } | |
562 | ||
563 | ret = hmm_range_fault(range, block); | |
564 | if (ret <= 0) { | |
565 | if (ret == -EBUSY || !ret) { | |
085ea250 | 566 | /* Same as above, drop mmap_sem to match old API. */ |
a3e0d41c JG |
567 | up_read(&range->vma->vm_mm->mmap_sem); |
568 | ret = -EBUSY; | |
569 | } else if (ret == -EAGAIN) | |
570 | ret = -EBUSY; | |
571 | hmm_range_unregister(range); | |
572 | return ret; | |
573 | } | |
574 | return 0; | |
73231612 | 575 | } |
c0b12405 | 576 | |
9d8a463a AB |
577 | /* Below are for HMM internal use only! Not to be used by device driver! */ |
578 | void hmm_mm_destroy(struct mm_struct *mm); | |
579 | ||
580 | static inline void hmm_mm_init(struct mm_struct *mm) | |
581 | { | |
582 | mm->hmm = NULL; | |
583 | } | |
584 | #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ | |
585 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} | |
586 | static inline void hmm_mm_init(struct mm_struct *mm) {} | |
587 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ | |
c0b12405 | 588 | |
df6ad698 | 589 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
4ef589dc JG |
590 | struct hmm_devmem; |
591 | ||
592 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, | |
593 | unsigned long addr); | |
594 | ||
595 | /* | |
596 | * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events | |
597 | * | |
598 | * @free: call when refcount on page reach 1 and thus is no longer use | |
599 | * @fault: call when there is a page fault to unaddressable memory | |
600 | * | |
601 | * Both callback happens from page_free() and page_fault() callback of struct | |
602 | * dev_pagemap respectively. See include/linux/memremap.h for more details on | |
603 | * those. | |
604 | * | |
605 | * The hmm_devmem_ops callback are just here to provide a coherent and | |
606 | * uniq API to device driver and device driver should not register their | |
607 | * own page_free() or page_fault() but rely on the hmm_devmem_ops call- | |
608 | * back. | |
609 | */ | |
610 | struct hmm_devmem_ops { | |
611 | /* | |
612 | * free() - free a device page | |
613 | * @devmem: device memory structure (see struct hmm_devmem) | |
614 | * @page: pointer to struct page being freed | |
615 | * | |
616 | * Call back occurs whenever a device page refcount reach 1 which | |
617 | * means that no one is holding any reference on the page anymore | |
618 | * (ZONE_DEVICE page have an elevated refcount of 1 as default so | |
619 | * that they are not release to the general page allocator). | |
620 | * | |
621 | * Note that callback has exclusive ownership of the page (as no | |
622 | * one is holding any reference). | |
623 | */ | |
624 | void (*free)(struct hmm_devmem *devmem, struct page *page); | |
625 | /* | |
626 | * fault() - CPU page fault or get user page (GUP) | |
627 | * @devmem: device memory structure (see struct hmm_devmem) | |
628 | * @vma: virtual memory area containing the virtual address | |
629 | * @addr: virtual address that faulted or for which there is a GUP | |
630 | * @page: pointer to struct page backing virtual address (unreliable) | |
631 | * @flags: FAULT_FLAG_* (see include/linux/mm.h) | |
632 | * @pmdp: page middle directory | |
085ea250 | 633 | * Return: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR |
4ef589dc JG |
634 | * on error |
635 | * | |
636 | * The callback occurs whenever there is a CPU page fault or GUP on a | |
637 | * virtual address. This means that the device driver must migrate the | |
638 | * page back to regular memory (CPU accessible). | |
639 | * | |
640 | * The device driver is free to migrate more than one page from the | |
085ea250 RC |
641 | * fault() callback as an optimization. However if the device decides |
642 | * to migrate more than one page it must always priotirize the faulting | |
4ef589dc JG |
643 | * address over the others. |
644 | * | |
085ea250 | 645 | * The struct page pointer is only given as a hint to allow quick |
4ef589dc | 646 | * lookup of internal device driver data. A concurrent migration |
085ea250 RC |
647 | * might have already freed that page and the virtual address might |
648 | * no longer be backed by it. So it should not be modified by the | |
4ef589dc JG |
649 | * callback. |
650 | * | |
651 | * Note that mmap semaphore is held in read mode at least when this | |
652 | * callback occurs, hence the vma is valid upon callback entry. | |
653 | */ | |
b57e622e | 654 | vm_fault_t (*fault)(struct hmm_devmem *devmem, |
4ef589dc JG |
655 | struct vm_area_struct *vma, |
656 | unsigned long addr, | |
657 | const struct page *page, | |
658 | unsigned int flags, | |
659 | pmd_t *pmdp); | |
660 | }; | |
661 | ||
662 | /* | |
663 | * struct hmm_devmem - track device memory | |
664 | * | |
665 | * @completion: completion object for device memory | |
666 | * @pfn_first: first pfn for this resource (set by hmm_devmem_add()) | |
667 | * @pfn_last: last pfn for this resource (set by hmm_devmem_add()) | |
668 | * @resource: IO resource reserved for this chunk of memory | |
669 | * @pagemap: device page map for that chunk | |
670 | * @device: device to bind resource to | |
671 | * @ops: memory operations callback | |
672 | * @ref: per CPU refcount | |
063a7d1d | 673 | * @page_fault: callback when CPU fault on an unaddressable device page |
4ef589dc | 674 | * |
085ea250 | 675 | * This is a helper structure for device drivers that do not wish to implement |
4ef589dc JG |
676 | * the gory details related to hotplugging new memoy and allocating struct |
677 | * pages. | |
678 | * | |
679 | * Device drivers can directly use ZONE_DEVICE memory on their own if they | |
680 | * wish to do so. | |
063a7d1d DW |
681 | * |
682 | * The page_fault() callback must migrate page back, from device memory to | |
683 | * system memory, so that the CPU can access it. This might fail for various | |
684 | * reasons (device issues, device have been unplugged, ...). When such error | |
685 | * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and | |
686 | * set the CPU page table entry to "poisoned". | |
687 | * | |
688 | * Note that because memory cgroup charges are transferred to the device memory, | |
689 | * this should never fail due to memory restrictions. However, allocation | |
690 | * of a regular system page might still fail because we are out of memory. If | |
691 | * that happens, the page_fault() callback must return VM_FAULT_OOM. | |
692 | * | |
693 | * The page_fault() callback can also try to migrate back multiple pages in one | |
694 | * chunk, as an optimization. It must, however, prioritize the faulting address | |
695 | * over all the others. | |
4ef589dc | 696 | */ |
b57e622e | 697 | typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma, |
063a7d1d DW |
698 | unsigned long addr, |
699 | const struct page *page, | |
700 | unsigned int flags, | |
701 | pmd_t *pmdp); | |
702 | ||
4ef589dc JG |
703 | struct hmm_devmem { |
704 | struct completion completion; | |
705 | unsigned long pfn_first; | |
706 | unsigned long pfn_last; | |
707 | struct resource *resource; | |
708 | struct device *device; | |
709 | struct dev_pagemap pagemap; | |
710 | const struct hmm_devmem_ops *ops; | |
711 | struct percpu_ref ref; | |
063a7d1d | 712 | dev_page_fault_t page_fault; |
4ef589dc JG |
713 | }; |
714 | ||
715 | /* | |
716 | * To add (hotplug) device memory, HMM assumes that there is no real resource | |
717 | * that reserves a range in the physical address space (this is intended to be | |
718 | * use by unaddressable device memory). It will reserve a physical range big | |
719 | * enough and allocate struct page for it. | |
720 | * | |
721 | * The device driver can wrap the hmm_devmem struct inside a private device | |
58ef15b7 | 722 | * driver struct. |
4ef589dc JG |
723 | */ |
724 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |
725 | struct device *device, | |
726 | unsigned long size); | |
d3df0a42 JG |
727 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
728 | struct device *device, | |
729 | struct resource *res); | |
4ef589dc JG |
730 | |
731 | /* | |
732 | * hmm_devmem_page_set_drvdata - set per-page driver data field | |
733 | * | |
734 | * @page: pointer to struct page | |
735 | * @data: driver data value to set | |
736 | * | |
737 | * Because page can not be on lru we have an unsigned long that driver can use | |
738 | * to store a per page field. This just a simple helper to do that. | |
739 | */ | |
740 | static inline void hmm_devmem_page_set_drvdata(struct page *page, | |
741 | unsigned long data) | |
742 | { | |
50e7fbc3 | 743 | page->hmm_data = data; |
4ef589dc JG |
744 | } |
745 | ||
746 | /* | |
747 | * hmm_devmem_page_get_drvdata - get per page driver data field | |
748 | * | |
749 | * @page: pointer to struct page | |
750 | * Return: driver data value | |
751 | */ | |
0bea803e | 752 | static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) |
4ef589dc | 753 | { |
50e7fbc3 | 754 | return page->hmm_data; |
4ef589dc | 755 | } |
858b54da JG |
756 | |
757 | ||
758 | /* | |
759 | * struct hmm_device - fake device to hang device memory onto | |
760 | * | |
761 | * @device: device struct | |
762 | * @minor: device minor number | |
763 | */ | |
764 | struct hmm_device { | |
765 | struct device device; | |
766 | unsigned int minor; | |
767 | }; | |
768 | ||
769 | /* | |
770 | * A device driver that wants to handle multiple devices memory through a | |
771 | * single fake device can use hmm_device to do so. This is purely a helper and | |
772 | * it is not strictly needed, in order to make use of any HMM functionality. | |
773 | */ | |
774 | struct hmm_device *hmm_device_new(void *drvdata); | |
775 | void hmm_device_put(struct hmm_device *hmm_device); | |
df6ad698 | 776 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |
6b368cd4 | 777 | #else /* IS_ENABLED(CONFIG_HMM) */ |
133ff0ea JG |
778 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} |
779 | static inline void hmm_mm_init(struct mm_struct *mm) {} | |
b28b08de | 780 | #endif /* IS_ENABLED(CONFIG_HMM) */ |
9d8a463a | 781 | |
133ff0ea | 782 | #endif /* LINUX_HMM_H */ |