Commit | Line | Data |
---|---|---|
133ff0ea JG |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
f813f219 | 14 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0ea JG |
15 | */ |
16 | /* | |
17 | * Heterogeneous Memory Management (HMM) | |
18 | * | |
ad56b738 | 19 | * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it |
133ff0ea JG |
20 | * is for. Here we focus on the HMM API description, with some explanation of |
21 | * the underlying implementation. | |
22 | * | |
23 | * Short description: HMM provides a set of helpers to share a virtual address | |
24 | * space between CPU and a device, so that the device can access any valid | |
25 | * address of the process (while still obeying memory protection). HMM also | |
26 | * provides helpers to migrate process memory to device memory, and back. Each | |
27 | * set of functionality (address space mirroring, and migration to and from | |
28 | * device memory) can be used independently of the other. | |
29 | * | |
30 | * | |
31 | * HMM address space mirroring API: | |
32 | * | |
33 | * Use HMM address space mirroring if you want to mirror range of the CPU page | |
34 | * table of a process into a device page table. Here, "mirror" means "keep | |
35 | * synchronized". Prerequisites: the device must provide the ability to write- | |
36 | * protect its page tables (at PAGE_SIZE granularity), and must be able to | |
37 | * recover from the resulting potential page faults. | |
38 | * | |
39 | * HMM guarantees that at any point in time, a given virtual address points to | |
40 | * either the same memory in both CPU and device page tables (that is: CPU and | |
41 | * device page tables each point to the same pages), or that one page table (CPU | |
42 | * or device) points to no entry, while the other still points to the old page | |
43 | * for the address. The latter case happens when the CPU page table update | |
44 | * happens first, and then the update is mirrored over to the device page table. | |
45 | * This does not cause any issue, because the CPU page table cannot start | |
46 | * pointing to a new page until the device page table is invalidated. | |
47 | * | |
48 | * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any | |
49 | * updates to each device driver that has registered a mirror. It also provides | |
50 | * some API calls to help with taking a snapshot of the CPU page table, and to | |
51 | * synchronize with any updates that might happen concurrently. | |
52 | * | |
53 | * | |
54 | * HMM migration to and from device memory: | |
55 | * | |
56 | * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with | |
57 | * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page | |
58 | * of the device memory, and allows the device driver to manage its memory | |
59 | * using those struct pages. Having struct pages for device memory makes | |
60 | * migration easier. Because that memory is not addressable by the CPU it must | |
61 | * never be pinned to the device; in other words, any CPU page fault can always | |
62 | * cause the device memory to be migrated (copied/moved) back to regular memory. | |
63 | * | |
64 | * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that | |
65 | * allows use of a device DMA engine to perform the copy operation between | |
66 | * regular system memory and device memory. | |
67 | */ | |
68 | #ifndef LINUX_HMM_H | |
69 | #define LINUX_HMM_H | |
70 | ||
71 | #include <linux/kconfig.h> | |
72 | ||
73 | #if IS_ENABLED(CONFIG_HMM) | |
74 | ||
858b54da | 75 | #include <linux/device.h> |
4ef589dc JG |
76 | #include <linux/migrate.h> |
77 | #include <linux/memremap.h> | |
78 | #include <linux/completion.h> | |
79 | ||
c0b12405 | 80 | struct hmm; |
133ff0ea JG |
81 | |
82 | /* | |
f88a1e90 JG |
83 | * hmm_pfn_flag_e - HMM flag enums |
84 | * | |
133ff0ea | 85 | * Flags: |
86586a41 | 86 | * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. |
133ff0ea | 87 | * HMM_PFN_WRITE: CPU page table has write permission set |
f88a1e90 JG |
88 | * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) |
89 | * | |
90 | * The driver provide a flags array, if driver valid bit for an entry is bit | |
91 | * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide | |
92 | * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. | |
93 | * Same logic apply to all flags. This is same idea as vm_page_prot in vma | |
94 | * except that this is per device driver rather than per architecture. | |
95 | */ | |
96 | enum hmm_pfn_flag_e { | |
97 | HMM_PFN_VALID = 0, | |
98 | HMM_PFN_WRITE, | |
99 | HMM_PFN_DEVICE_PRIVATE, | |
100 | HMM_PFN_FLAG_MAX | |
101 | }; | |
102 | ||
103 | /* | |
104 | * hmm_pfn_value_e - HMM pfn special value | |
105 | * | |
106 | * Flags: | |
da4c3c73 | 107 | * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory |
f88a1e90 | 108 | * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() |
da4c3c73 | 109 | * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the |
67fa1666 | 110 | * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not |
da4c3c73 JG |
111 | * be mirrored by a device, because the entry will never have HMM_PFN_VALID |
112 | * set and the pfn value is undefined. | |
f88a1e90 JG |
113 | * |
114 | * Driver provide entry value for none entry, error entry and special entry, | |
115 | * driver can alias (ie use same value for error and special for instance). It | |
116 | * should not alias none and error or special. | |
117 | * | |
118 | * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: | |
119 | * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, | |
120 | * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table | |
121 | * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one | |
133ff0ea | 122 | */ |
f88a1e90 JG |
123 | enum hmm_pfn_value_e { |
124 | HMM_PFN_ERROR, | |
125 | HMM_PFN_NONE, | |
126 | HMM_PFN_SPECIAL, | |
127 | HMM_PFN_VALUE_MAX | |
128 | }; | |
129 | ||
130 | /* | |
131 | * struct hmm_range - track invalidation lock on virtual address range | |
132 | * | |
133 | * @vma: the vm area struct for the range | |
134 | * @list: all range lock are on a list | |
135 | * @start: range virtual start address (inclusive) | |
136 | * @end: range virtual end address (exclusive) | |
137 | * @pfns: array of pfns (big enough for the range) | |
138 | * @flags: pfn flags to match device driver page table | |
139 | * @values: pfn value for some special case (none, special, error, ...) | |
140 | * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) | |
141 | * @valid: pfns array did not change since it has been fill by an HMM function | |
142 | */ | |
143 | struct hmm_range { | |
144 | struct vm_area_struct *vma; | |
145 | struct list_head list; | |
146 | unsigned long start; | |
147 | unsigned long end; | |
148 | uint64_t *pfns; | |
149 | const uint64_t *flags; | |
150 | const uint64_t *values; | |
151 | uint8_t pfn_shift; | |
152 | bool valid; | |
153 | }; | |
133ff0ea JG |
154 | |
155 | /* | |
ff05c0c6 | 156 | * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn |
f88a1e90 | 157 | * @range: range use to decode HMM pfn value |
ff05c0c6 JG |
158 | * @pfn: HMM pfn value to get corresponding struct page from |
159 | * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise | |
133ff0ea | 160 | * |
ff05c0c6 JG |
161 | * If the HMM pfn is valid (ie valid flag set) then return the struct page |
162 | * matching the pfn value stored in the HMM pfn. Otherwise return NULL. | |
133ff0ea | 163 | */ |
f88a1e90 JG |
164 | static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, |
165 | uint64_t pfn) | |
133ff0ea | 166 | { |
f88a1e90 JG |
167 | if (pfn == range->values[HMM_PFN_NONE]) |
168 | return NULL; | |
169 | if (pfn == range->values[HMM_PFN_ERROR]) | |
170 | return NULL; | |
171 | if (pfn == range->values[HMM_PFN_SPECIAL]) | |
133ff0ea | 172 | return NULL; |
f88a1e90 JG |
173 | if (!(pfn & range->flags[HMM_PFN_VALID])) |
174 | return NULL; | |
175 | return pfn_to_page(pfn >> range->pfn_shift); | |
133ff0ea JG |
176 | } |
177 | ||
178 | /* | |
ff05c0c6 | 179 | * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn |
f88a1e90 | 180 | * @range: range use to decode HMM pfn value |
ff05c0c6 JG |
181 | * @pfn: HMM pfn value to extract pfn from |
182 | * Returns: pfn value if HMM pfn is valid, -1UL otherwise | |
133ff0ea | 183 | */ |
f88a1e90 JG |
184 | static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, |
185 | uint64_t pfn) | |
133ff0ea | 186 | { |
f88a1e90 JG |
187 | if (pfn == range->values[HMM_PFN_NONE]) |
188 | return -1UL; | |
189 | if (pfn == range->values[HMM_PFN_ERROR]) | |
190 | return -1UL; | |
191 | if (pfn == range->values[HMM_PFN_SPECIAL]) | |
133ff0ea | 192 | return -1UL; |
f88a1e90 JG |
193 | if (!(pfn & range->flags[HMM_PFN_VALID])) |
194 | return -1UL; | |
195 | return (pfn >> range->pfn_shift); | |
133ff0ea JG |
196 | } |
197 | ||
198 | /* | |
ff05c0c6 | 199 | * hmm_pfn_from_page() - create a valid HMM pfn value from struct page |
f88a1e90 | 200 | * @range: range use to encode HMM pfn value |
ff05c0c6 JG |
201 | * @page: struct page pointer for which to create the HMM pfn |
202 | * Returns: valid HMM pfn for the page | |
133ff0ea | 203 | */ |
f88a1e90 JG |
204 | static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, |
205 | struct page *page) | |
133ff0ea | 206 | { |
f88a1e90 JG |
207 | return (page_to_pfn(page) << range->pfn_shift) | |
208 | range->flags[HMM_PFN_VALID]; | |
133ff0ea JG |
209 | } |
210 | ||
211 | /* | |
ff05c0c6 | 212 | * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn |
f88a1e90 | 213 | * @range: range use to encode HMM pfn value |
ff05c0c6 JG |
214 | * @pfn: pfn value for which to create the HMM pfn |
215 | * Returns: valid HMM pfn for the pfn | |
133ff0ea | 216 | */ |
f88a1e90 JG |
217 | static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, |
218 | unsigned long pfn) | |
133ff0ea | 219 | { |
f88a1e90 JG |
220 | return (pfn << range->pfn_shift) | |
221 | range->flags[HMM_PFN_VALID]; | |
133ff0ea JG |
222 | } |
223 | ||
224 | ||
c0b12405 JG |
225 | #if IS_ENABLED(CONFIG_HMM_MIRROR) |
226 | /* | |
227 | * Mirroring: how to synchronize device page table with CPU page table. | |
228 | * | |
229 | * A device driver that is participating in HMM mirroring must always | |
230 | * synchronize with CPU page table updates. For this, device drivers can either | |
231 | * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device | |
232 | * drivers can decide to register one mirror per device per process, or just | |
233 | * one mirror per process for a group of devices. The pattern is: | |
234 | * | |
235 | * int device_bind_address_space(..., struct mm_struct *mm, ...) | |
236 | * { | |
237 | * struct device_address_space *das; | |
238 | * | |
239 | * // Device driver specific initialization, and allocation of das | |
240 | * // which contains an hmm_mirror struct as one of its fields. | |
241 | * ... | |
242 | * | |
243 | * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); | |
244 | * if (ret) { | |
245 | * // Cleanup on error | |
246 | * return ret; | |
247 | * } | |
248 | * | |
249 | * // Other device driver specific initialization | |
250 | * ... | |
251 | * } | |
252 | * | |
253 | * Once an hmm_mirror is registered for an address space, the device driver | |
254 | * will get callbacks through sync_cpu_device_pagetables() operation (see | |
255 | * hmm_mirror_ops struct). | |
256 | * | |
257 | * Device driver must not free the struct containing the hmm_mirror struct | |
258 | * before calling hmm_mirror_unregister(). The expected usage is to do that when | |
259 | * the device driver is unbinding from an address space. | |
260 | * | |
261 | * | |
262 | * void device_unbind_address_space(struct device_address_space *das) | |
263 | * { | |
264 | * // Device driver specific cleanup | |
265 | * ... | |
266 | * | |
267 | * hmm_mirror_unregister(&das->mirror); | |
268 | * | |
269 | * // Other device driver specific cleanup, and now das can be freed | |
270 | * ... | |
271 | * } | |
272 | */ | |
273 | ||
274 | struct hmm_mirror; | |
275 | ||
276 | /* | |
44532d4c | 277 | * enum hmm_update_event - type of update |
c0b12405 JG |
278 | * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) |
279 | */ | |
44532d4c | 280 | enum hmm_update_event { |
c0b12405 JG |
281 | HMM_UPDATE_INVALIDATE, |
282 | }; | |
283 | ||
44532d4c JG |
284 | /* |
285 | * struct hmm_update - HMM update informations for callback | |
286 | * | |
287 | * @start: virtual start address of the range to update | |
288 | * @end: virtual end address of the range to update | |
289 | * @event: event triggering the update (what is happening) | |
290 | * @blockable: can the callback block/sleep ? | |
291 | */ | |
292 | struct hmm_update { | |
293 | unsigned long start; | |
294 | unsigned long end; | |
295 | enum hmm_update_event event; | |
296 | bool blockable; | |
297 | }; | |
298 | ||
c0b12405 JG |
299 | /* |
300 | * struct hmm_mirror_ops - HMM mirror device operations callback | |
301 | * | |
302 | * @update: callback to update range on a device | |
303 | */ | |
304 | struct hmm_mirror_ops { | |
e1401513 RC |
305 | /* release() - release hmm_mirror |
306 | * | |
307 | * @mirror: pointer to struct hmm_mirror | |
308 | * | |
309 | * This is called when the mm_struct is being released. | |
310 | * The callback should make sure no references to the mirror occur | |
311 | * after the callback returns. | |
312 | */ | |
313 | void (*release)(struct hmm_mirror *mirror); | |
314 | ||
c0b12405 JG |
315 | /* sync_cpu_device_pagetables() - synchronize page tables |
316 | * | |
317 | * @mirror: pointer to struct hmm_mirror | |
44532d4c JG |
318 | * @update: update informations (see struct hmm_update) |
319 | * Returns: -EAGAIN if update.blockable false and callback need to | |
320 | * block, 0 otherwise. | |
c0b12405 JG |
321 | * |
322 | * This callback ultimately originates from mmu_notifiers when the CPU | |
323 | * page table is updated. The device driver must update its page table | |
324 | * in response to this callback. The update argument tells what action | |
325 | * to perform. | |
326 | * | |
327 | * The device driver must not return from this callback until the device | |
328 | * page tables are completely updated (TLBs flushed, etc); this is a | |
329 | * synchronous call. | |
330 | */ | |
44532d4c JG |
331 | int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, |
332 | const struct hmm_update *update); | |
c0b12405 JG |
333 | }; |
334 | ||
335 | /* | |
336 | * struct hmm_mirror - mirror struct for a device driver | |
337 | * | |
338 | * @hmm: pointer to struct hmm (which is unique per mm_struct) | |
339 | * @ops: device driver callback for HMM mirror operations | |
340 | * @list: for list of mirrors of a given mm | |
341 | * | |
342 | * Each address space (mm_struct) being mirrored by a device must register one | |
343 | * instance of an hmm_mirror struct with HMM. HMM will track the list of all | |
344 | * mirrors for each mm_struct. | |
345 | */ | |
346 | struct hmm_mirror { | |
347 | struct hmm *hmm; | |
348 | const struct hmm_mirror_ops *ops; | |
349 | struct list_head list; | |
350 | }; | |
351 | ||
352 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); | |
353 | void hmm_mirror_unregister(struct hmm_mirror *mirror); | |
da4c3c73 JG |
354 | |
355 | ||
da4c3c73 JG |
356 | /* |
357 | * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device | |
358 | * driver lock that serializes device page table updates, then call | |
359 | * hmm_vma_range_done(), to check if the snapshot is still valid. The same | |
360 | * device driver page table update lock must also be used in the | |
361 | * hmm_mirror_ops.sync_cpu_device_pagetables() callback, so that CPU page | |
362 | * table invalidation serializes on it. | |
363 | * | |
364 | * YOU MUST CALL hmm_vma_range_done() ONCE AND ONLY ONCE EACH TIME YOU CALL | |
365 | * hmm_vma_get_pfns() WITHOUT ERROR ! | |
366 | * | |
367 | * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID ! | |
368 | */ | |
08232a45 JG |
369 | int hmm_vma_get_pfns(struct hmm_range *range); |
370 | bool hmm_vma_range_done(struct hmm_range *range); | |
74eee180 JG |
371 | |
372 | ||
373 | /* | |
374 | * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will | |
ff05c0c6 | 375 | * not migrate any device memory back to system memory. The HMM pfn array will |
74eee180 JG |
376 | * be updated with the fault result and current snapshot of the CPU page table |
377 | * for the range. | |
378 | * | |
379 | * The mmap_sem must be taken in read mode before entering and it might be | |
380 | * dropped by the function if the block argument is false. In that case, the | |
381 | * function returns -EAGAIN. | |
382 | * | |
383 | * Return value does not reflect if the fault was successful for every single | |
ff05c0c6 | 384 | * address or not. Therefore, the caller must to inspect the HMM pfn array to |
74eee180 JG |
385 | * determine fault status for each address. |
386 | * | |
387 | * Trying to fault inside an invalid vma will result in -EINVAL. | |
388 | * | |
389 | * See the function description in mm/hmm.c for further documentation. | |
390 | */ | |
2aee09d8 | 391 | int hmm_vma_fault(struct hmm_range *range, bool block); |
c0b12405 | 392 | |
9d8a463a AB |
393 | /* Below are for HMM internal use only! Not to be used by device driver! */ |
394 | void hmm_mm_destroy(struct mm_struct *mm); | |
395 | ||
396 | static inline void hmm_mm_init(struct mm_struct *mm) | |
397 | { | |
398 | mm->hmm = NULL; | |
399 | } | |
400 | #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ | |
401 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} | |
402 | static inline void hmm_mm_init(struct mm_struct *mm) {} | |
403 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ | |
c0b12405 | 404 | |
df6ad698 | 405 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
4ef589dc JG |
406 | struct hmm_devmem; |
407 | ||
408 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, | |
409 | unsigned long addr); | |
410 | ||
411 | /* | |
412 | * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events | |
413 | * | |
414 | * @free: call when refcount on page reach 1 and thus is no longer use | |
415 | * @fault: call when there is a page fault to unaddressable memory | |
416 | * | |
417 | * Both callback happens from page_free() and page_fault() callback of struct | |
418 | * dev_pagemap respectively. See include/linux/memremap.h for more details on | |
419 | * those. | |
420 | * | |
421 | * The hmm_devmem_ops callback are just here to provide a coherent and | |
422 | * uniq API to device driver and device driver should not register their | |
423 | * own page_free() or page_fault() but rely on the hmm_devmem_ops call- | |
424 | * back. | |
425 | */ | |
426 | struct hmm_devmem_ops { | |
427 | /* | |
428 | * free() - free a device page | |
429 | * @devmem: device memory structure (see struct hmm_devmem) | |
430 | * @page: pointer to struct page being freed | |
431 | * | |
432 | * Call back occurs whenever a device page refcount reach 1 which | |
433 | * means that no one is holding any reference on the page anymore | |
434 | * (ZONE_DEVICE page have an elevated refcount of 1 as default so | |
435 | * that they are not release to the general page allocator). | |
436 | * | |
437 | * Note that callback has exclusive ownership of the page (as no | |
438 | * one is holding any reference). | |
439 | */ | |
440 | void (*free)(struct hmm_devmem *devmem, struct page *page); | |
441 | /* | |
442 | * fault() - CPU page fault or get user page (GUP) | |
443 | * @devmem: device memory structure (see struct hmm_devmem) | |
444 | * @vma: virtual memory area containing the virtual address | |
445 | * @addr: virtual address that faulted or for which there is a GUP | |
446 | * @page: pointer to struct page backing virtual address (unreliable) | |
447 | * @flags: FAULT_FLAG_* (see include/linux/mm.h) | |
448 | * @pmdp: page middle directory | |
449 | * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR | |
450 | * on error | |
451 | * | |
452 | * The callback occurs whenever there is a CPU page fault or GUP on a | |
453 | * virtual address. This means that the device driver must migrate the | |
454 | * page back to regular memory (CPU accessible). | |
455 | * | |
456 | * The device driver is free to migrate more than one page from the | |
457 | * fault() callback as an optimization. However if device decide to | |
458 | * migrate more than one page it must always priotirize the faulting | |
459 | * address over the others. | |
460 | * | |
461 | * The struct page pointer is only given as an hint to allow quick | |
462 | * lookup of internal device driver data. A concurrent migration | |
463 | * might have already free that page and the virtual address might | |
464 | * not longer be back by it. So it should not be modified by the | |
465 | * callback. | |
466 | * | |
467 | * Note that mmap semaphore is held in read mode at least when this | |
468 | * callback occurs, hence the vma is valid upon callback entry. | |
469 | */ | |
470 | int (*fault)(struct hmm_devmem *devmem, | |
471 | struct vm_area_struct *vma, | |
472 | unsigned long addr, | |
473 | const struct page *page, | |
474 | unsigned int flags, | |
475 | pmd_t *pmdp); | |
476 | }; | |
477 | ||
478 | /* | |
479 | * struct hmm_devmem - track device memory | |
480 | * | |
481 | * @completion: completion object for device memory | |
482 | * @pfn_first: first pfn for this resource (set by hmm_devmem_add()) | |
483 | * @pfn_last: last pfn for this resource (set by hmm_devmem_add()) | |
484 | * @resource: IO resource reserved for this chunk of memory | |
485 | * @pagemap: device page map for that chunk | |
486 | * @device: device to bind resource to | |
487 | * @ops: memory operations callback | |
488 | * @ref: per CPU refcount | |
489 | * | |
490 | * This an helper structure for device drivers that do not wish to implement | |
491 | * the gory details related to hotplugging new memoy and allocating struct | |
492 | * pages. | |
493 | * | |
494 | * Device drivers can directly use ZONE_DEVICE memory on their own if they | |
495 | * wish to do so. | |
496 | */ | |
497 | struct hmm_devmem { | |
498 | struct completion completion; | |
499 | unsigned long pfn_first; | |
500 | unsigned long pfn_last; | |
501 | struct resource *resource; | |
502 | struct device *device; | |
503 | struct dev_pagemap pagemap; | |
504 | const struct hmm_devmem_ops *ops; | |
505 | struct percpu_ref ref; | |
506 | }; | |
507 | ||
508 | /* | |
509 | * To add (hotplug) device memory, HMM assumes that there is no real resource | |
510 | * that reserves a range in the physical address space (this is intended to be | |
511 | * use by unaddressable device memory). It will reserve a physical range big | |
512 | * enough and allocate struct page for it. | |
513 | * | |
514 | * The device driver can wrap the hmm_devmem struct inside a private device | |
515 | * driver struct. The device driver must call hmm_devmem_remove() before the | |
516 | * device goes away and before freeing the hmm_devmem struct memory. | |
517 | */ | |
518 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |
519 | struct device *device, | |
520 | unsigned long size); | |
d3df0a42 JG |
521 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
522 | struct device *device, | |
523 | struct resource *res); | |
4ef589dc JG |
524 | void hmm_devmem_remove(struct hmm_devmem *devmem); |
525 | ||
526 | /* | |
527 | * hmm_devmem_page_set_drvdata - set per-page driver data field | |
528 | * | |
529 | * @page: pointer to struct page | |
530 | * @data: driver data value to set | |
531 | * | |
532 | * Because page can not be on lru we have an unsigned long that driver can use | |
533 | * to store a per page field. This just a simple helper to do that. | |
534 | */ | |
535 | static inline void hmm_devmem_page_set_drvdata(struct page *page, | |
536 | unsigned long data) | |
537 | { | |
50e7fbc3 | 538 | page->hmm_data = data; |
4ef589dc JG |
539 | } |
540 | ||
541 | /* | |
542 | * hmm_devmem_page_get_drvdata - get per page driver data field | |
543 | * | |
544 | * @page: pointer to struct page | |
545 | * Return: driver data value | |
546 | */ | |
0bea803e | 547 | static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) |
4ef589dc | 548 | { |
50e7fbc3 | 549 | return page->hmm_data; |
4ef589dc | 550 | } |
858b54da JG |
551 | |
552 | ||
553 | /* | |
554 | * struct hmm_device - fake device to hang device memory onto | |
555 | * | |
556 | * @device: device struct | |
557 | * @minor: device minor number | |
558 | */ | |
559 | struct hmm_device { | |
560 | struct device device; | |
561 | unsigned int minor; | |
562 | }; | |
563 | ||
564 | /* | |
565 | * A device driver that wants to handle multiple devices memory through a | |
566 | * single fake device can use hmm_device to do so. This is purely a helper and | |
567 | * it is not strictly needed, in order to make use of any HMM functionality. | |
568 | */ | |
569 | struct hmm_device *hmm_device_new(void *drvdata); | |
570 | void hmm_device_put(struct hmm_device *hmm_device); | |
df6ad698 | 571 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |
6b368cd4 | 572 | #else /* IS_ENABLED(CONFIG_HMM) */ |
133ff0ea JG |
573 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} |
574 | static inline void hmm_mm_init(struct mm_struct *mm) {} | |
b28b08de | 575 | #endif /* IS_ENABLED(CONFIG_HMM) */ |
9d8a463a | 576 | |
133ff0ea | 577 | #endif /* LINUX_HMM_H */ |