Merge tag 'gvt-fixes-2019-11-12' of https://github.com/intel/gvt-linux into drm-intel...
[linux-2.6-block.git] / include / linux / hmm.h
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
133ff0ea
JG
2/*
3 * Copyright 2013 Red Hat Inc.
4 *
f813f219 5 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
6 */
7/*
8 * Heterogeneous Memory Management (HMM)
9 *
ad56b738 10 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
133ff0ea
JG
11 * is for. Here we focus on the HMM API description, with some explanation of
12 * the underlying implementation.
13 *
14 * Short description: HMM provides a set of helpers to share a virtual address
15 * space between CPU and a device, so that the device can access any valid
16 * address of the process (while still obeying memory protection). HMM also
17 * provides helpers to migrate process memory to device memory, and back. Each
18 * set of functionality (address space mirroring, and migration to and from
19 * device memory) can be used independently of the other.
20 *
21 *
22 * HMM address space mirroring API:
23 *
085ea250
RC
24 * Use HMM address space mirroring if you want to mirror a range of the CPU
25 * page tables of a process into a device page table. Here, "mirror" means "keep
133ff0ea
JG
26 * synchronized". Prerequisites: the device must provide the ability to write-
27 * protect its page tables (at PAGE_SIZE granularity), and must be able to
28 * recover from the resulting potential page faults.
29 *
30 * HMM guarantees that at any point in time, a given virtual address points to
31 * either the same memory in both CPU and device page tables (that is: CPU and
32 * device page tables each point to the same pages), or that one page table (CPU
33 * or device) points to no entry, while the other still points to the old page
34 * for the address. The latter case happens when the CPU page table update
35 * happens first, and then the update is mirrored over to the device page table.
36 * This does not cause any issue, because the CPU page table cannot start
37 * pointing to a new page until the device page table is invalidated.
38 *
39 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
40 * updates to each device driver that has registered a mirror. It also provides
41 * some API calls to help with taking a snapshot of the CPU page table, and to
42 * synchronize with any updates that might happen concurrently.
43 *
44 *
45 * HMM migration to and from device memory:
46 *
47 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
48 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
49 * of the device memory, and allows the device driver to manage its memory
50 * using those struct pages. Having struct pages for device memory makes
51 * migration easier. Because that memory is not addressable by the CPU it must
52 * never be pinned to the device; in other words, any CPU page fault can always
53 * cause the device memory to be migrated (copied/moved) back to regular memory.
54 *
55 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
56 * allows use of a device DMA engine to perform the copy operation between
57 * regular system memory and device memory.
58 */
59#ifndef LINUX_HMM_H
60#define LINUX_HMM_H
61
62#include <linux/kconfig.h>
063a7d1d 63#include <asm/pgtable.h>
133ff0ea 64
43535b0a 65#ifdef CONFIG_HMM_MIRROR
133ff0ea 66
858b54da 67#include <linux/device.h>
4ef589dc
JG
68#include <linux/migrate.h>
69#include <linux/memremap.h>
70#include <linux/completion.h>
a3e0d41c 71#include <linux/mmu_notifier.h>
4ef589dc 72
a3e0d41c
JG
73
74/*
75 * struct hmm - HMM per mm struct
76 *
77 * @mm: mm struct this HMM struct is bound to
78 * @lock: lock protecting ranges list
79 * @ranges: list of range being snapshotted
80 * @mirrors: list of mirrors for this mm
81 * @mmu_notifier: mmu notifier to track updates to CPU page table
82 * @mirrors_sem: read/write semaphore protecting the mirrors list
83 * @wq: wait queue for user waiting on a range invalidation
84 * @notifiers: count of active mmu notifiers
a3e0d41c
JG
85 */
86struct hmm {
c7d8b782 87 struct mmu_notifier mmu_notifier;
5a136b4a 88 spinlock_t ranges_lock;
a3e0d41c
JG
89 struct list_head ranges;
90 struct list_head mirrors;
a3e0d41c
JG
91 struct rw_semaphore mirrors_sem;
92 wait_queue_head_t wq;
93 long notifiers;
a3e0d41c 94};
133ff0ea
JG
95
96/*
f88a1e90
JG
97 * hmm_pfn_flag_e - HMM flag enums
98 *
133ff0ea 99 * Flags:
86586a41 100 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
133ff0ea 101 * HMM_PFN_WRITE: CPU page table has write permission set
f88a1e90
JG
102 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
103 *
085ea250
RC
104 * The driver provides a flags array for mapping page protections to device
105 * PTE bits. If the driver valid bit for an entry is bit 3,
106 * i.e., (entry & (1 << 3)), then the driver must provide
f88a1e90 107 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
085ea250 108 * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
f88a1e90
JG
109 * except that this is per device driver rather than per architecture.
110 */
111enum hmm_pfn_flag_e {
112 HMM_PFN_VALID = 0,
113 HMM_PFN_WRITE,
114 HMM_PFN_DEVICE_PRIVATE,
115 HMM_PFN_FLAG_MAX
116};
117
118/*
119 * hmm_pfn_value_e - HMM pfn special value
120 *
121 * Flags:
da4c3c73 122 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
f88a1e90 123 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
da4c3c73 124 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
67fa1666 125 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
da4c3c73
JG
126 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
127 * set and the pfn value is undefined.
f88a1e90 128 *
085ea250
RC
129 * Driver provides values for none entry, error entry, and special entry.
130 * Driver can alias (i.e., use same value) error and special, but
131 * it should not alias none with error or special.
f88a1e90
JG
132 *
133 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
134 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
085ea250 135 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
f88a1e90 136 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
133ff0ea 137 */
f88a1e90
JG
138enum hmm_pfn_value_e {
139 HMM_PFN_ERROR,
140 HMM_PFN_NONE,
141 HMM_PFN_SPECIAL,
142 HMM_PFN_VALUE_MAX
143};
144
145/*
146 * struct hmm_range - track invalidation lock on virtual address range
147 *
704f3f2c 148 * @hmm: the core HMM structure this range is active against
f88a1e90
JG
149 * @vma: the vm area struct for the range
150 * @list: all range lock are on a list
151 * @start: range virtual start address (inclusive)
152 * @end: range virtual end address (exclusive)
153 * @pfns: array of pfns (big enough for the range)
154 * @flags: pfn flags to match device driver page table
155 * @values: pfn value for some special case (none, special, error, ...)
023a019a
JG
156 * @default_flags: default flags for the range (write, read, ... see hmm doc)
157 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
f88a1e90
JG
158 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
159 * @valid: pfns array did not change since it has been fill by an HMM function
160 */
161struct hmm_range {
704f3f2c 162 struct hmm *hmm;
f88a1e90
JG
163 struct list_head list;
164 unsigned long start;
165 unsigned long end;
166 uint64_t *pfns;
167 const uint64_t *flags;
168 const uint64_t *values;
023a019a
JG
169 uint64_t default_flags;
170 uint64_t pfn_flags_mask;
f88a1e90
JG
171 uint8_t pfn_shift;
172 bool valid;
173};
133ff0ea 174
a3e0d41c
JG
175/*
176 * hmm_range_wait_until_valid() - wait for range to be valid
177 * @range: range affected by invalidation to wait on
178 * @timeout: time out for wait in ms (ie abort wait after that period of time)
085ea250 179 * Return: true if the range is valid, false otherwise.
a3e0d41c
JG
180 */
181static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
182 unsigned long timeout)
183{
378a6040
JG
184 return wait_event_timeout(range->hmm->wq, range->valid,
185 msecs_to_jiffies(timeout)) != 0;
a3e0d41c
JG
186}
187
188/*
189 * hmm_range_valid() - test if a range is valid or not
190 * @range: range
085ea250 191 * Return: true if the range is valid, false otherwise.
a3e0d41c
JG
192 */
193static inline bool hmm_range_valid(struct hmm_range *range)
194{
195 return range->valid;
196}
197
133ff0ea 198/*
391aab11
JG
199 * hmm_device_entry_to_page() - return struct page pointed to by a device entry
200 * @range: range use to decode device entry value
201 * @entry: device entry value to get corresponding struct page from
085ea250 202 * Return: struct page pointer if entry is a valid, NULL otherwise
133ff0ea 203 *
391aab11
JG
204 * If the device entry is valid (ie valid flag set) then return the struct page
205 * matching the entry value. Otherwise return NULL.
133ff0ea 206 */
391aab11
JG
207static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
208 uint64_t entry)
133ff0ea 209{
391aab11 210 if (entry == range->values[HMM_PFN_NONE])
f88a1e90 211 return NULL;
391aab11 212 if (entry == range->values[HMM_PFN_ERROR])
f88a1e90 213 return NULL;
391aab11 214 if (entry == range->values[HMM_PFN_SPECIAL])
133ff0ea 215 return NULL;
391aab11 216 if (!(entry & range->flags[HMM_PFN_VALID]))
f88a1e90 217 return NULL;
391aab11 218 return pfn_to_page(entry >> range->pfn_shift);
133ff0ea
JG
219}
220
221/*
391aab11
JG
222 * hmm_device_entry_to_pfn() - return pfn value store in a device entry
223 * @range: range use to decode device entry value
224 * @entry: device entry to extract pfn from
085ea250 225 * Return: pfn value if device entry is valid, -1UL otherwise
133ff0ea 226 */
391aab11
JG
227static inline unsigned long
228hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
133ff0ea 229{
f88a1e90
JG
230 if (pfn == range->values[HMM_PFN_NONE])
231 return -1UL;
232 if (pfn == range->values[HMM_PFN_ERROR])
233 return -1UL;
234 if (pfn == range->values[HMM_PFN_SPECIAL])
133ff0ea 235 return -1UL;
f88a1e90
JG
236 if (!(pfn & range->flags[HMM_PFN_VALID]))
237 return -1UL;
238 return (pfn >> range->pfn_shift);
133ff0ea
JG
239}
240
241/*
391aab11 242 * hmm_device_entry_from_page() - create a valid device entry for a page
f88a1e90 243 * @range: range use to encode HMM pfn value
391aab11 244 * @page: page for which to create the device entry
085ea250 245 * Return: valid device entry for the page
133ff0ea 246 */
391aab11
JG
247static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
248 struct page *page)
133ff0ea 249{
f88a1e90
JG
250 return (page_to_pfn(page) << range->pfn_shift) |
251 range->flags[HMM_PFN_VALID];
133ff0ea
JG
252}
253
254/*
391aab11 255 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
f88a1e90 256 * @range: range use to encode HMM pfn value
391aab11 257 * @pfn: pfn value for which to create the device entry
085ea250 258 * Return: valid device entry for the pfn
133ff0ea 259 */
391aab11
JG
260static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
261 unsigned long pfn)
133ff0ea 262{
f88a1e90
JG
263 return (pfn << range->pfn_shift) |
264 range->flags[HMM_PFN_VALID];
133ff0ea
JG
265}
266
c0b12405
JG
267/*
268 * Mirroring: how to synchronize device page table with CPU page table.
269 *
270 * A device driver that is participating in HMM mirroring must always
271 * synchronize with CPU page table updates. For this, device drivers can either
272 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
273 * drivers can decide to register one mirror per device per process, or just
274 * one mirror per process for a group of devices. The pattern is:
275 *
276 * int device_bind_address_space(..., struct mm_struct *mm, ...)
277 * {
278 * struct device_address_space *das;
279 *
280 * // Device driver specific initialization, and allocation of das
281 * // which contains an hmm_mirror struct as one of its fields.
282 * ...
283 *
284 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
285 * if (ret) {
286 * // Cleanup on error
287 * return ret;
288 * }
289 *
290 * // Other device driver specific initialization
291 * ...
292 * }
293 *
294 * Once an hmm_mirror is registered for an address space, the device driver
295 * will get callbacks through sync_cpu_device_pagetables() operation (see
296 * hmm_mirror_ops struct).
297 *
298 * Device driver must not free the struct containing the hmm_mirror struct
299 * before calling hmm_mirror_unregister(). The expected usage is to do that when
300 * the device driver is unbinding from an address space.
301 *
302 *
303 * void device_unbind_address_space(struct device_address_space *das)
304 * {
305 * // Device driver specific cleanup
306 * ...
307 *
308 * hmm_mirror_unregister(&das->mirror);
309 *
310 * // Other device driver specific cleanup, and now das can be freed
311 * ...
312 * }
313 */
314
315struct hmm_mirror;
316
c0b12405
JG
317/*
318 * struct hmm_mirror_ops - HMM mirror device operations callback
319 *
320 * @update: callback to update range on a device
321 */
322struct hmm_mirror_ops {
e1401513
RC
323 /* release() - release hmm_mirror
324 *
325 * @mirror: pointer to struct hmm_mirror
326 *
2076e5c0
RC
327 * This is called when the mm_struct is being released. The callback
328 * must ensure that all access to any pages obtained from this mirror
329 * is halted before the callback returns. All future access should
330 * fault.
e1401513
RC
331 */
332 void (*release)(struct hmm_mirror *mirror);
333
c0b12405
JG
334 /* sync_cpu_device_pagetables() - synchronize page tables
335 *
336 * @mirror: pointer to struct hmm_mirror
1f961807
RC
337 * @update: update information (see struct mmu_notifier_range)
338 * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
339 * and callback needs to block, 0 otherwise.
c0b12405
JG
340 *
341 * This callback ultimately originates from mmu_notifiers when the CPU
342 * page table is updated. The device driver must update its page table
343 * in response to this callback. The update argument tells what action
344 * to perform.
345 *
346 * The device driver must not return from this callback until the device
347 * page tables are completely updated (TLBs flushed, etc); this is a
348 * synchronous call.
349 */
1f961807
RC
350 int (*sync_cpu_device_pagetables)(
351 struct hmm_mirror *mirror,
352 const struct mmu_notifier_range *update);
c0b12405
JG
353};
354
355/*
356 * struct hmm_mirror - mirror struct for a device driver
357 *
358 * @hmm: pointer to struct hmm (which is unique per mm_struct)
359 * @ops: device driver callback for HMM mirror operations
360 * @list: for list of mirrors of a given mm
361 *
362 * Each address space (mm_struct) being mirrored by a device must register one
363 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
364 * mirrors for each mm_struct.
365 */
366struct hmm_mirror {
367 struct hmm *hmm;
368 const struct hmm_mirror_ops *ops;
369 struct list_head list;
370};
371
372int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
373void hmm_mirror_unregister(struct hmm_mirror *mirror);
da4c3c73 374
da4c3c73 375/*
a3e0d41c 376 * Please see Documentation/vm/hmm.rst for how to use the range API.
da4c3c73 377 */
fac555ac 378int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
a3e0d41c 379void hmm_range_unregister(struct hmm_range *range);
9a4903e4
CH
380
381/*
382 * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
383 */
384#define HMM_FAULT_ALLOW_RETRY (1 << 0)
385
d45d464b
CH
386/* Don't fault in missing PTEs, just snapshot the current state. */
387#define HMM_FAULT_SNAPSHOT (1 << 1)
388
9a4903e4
CH
389long hmm_range_fault(struct hmm_range *range, unsigned int flags);
390
55c0ece8
JG
391long hmm_range_dma_map(struct hmm_range *range,
392 struct device *device,
393 dma_addr_t *daddrs,
9a4903e4 394 unsigned int flags);
55c0ece8 395long hmm_range_dma_unmap(struct hmm_range *range,
55c0ece8
JG
396 struct device *device,
397 dma_addr_t *daddrs,
398 bool dirty);
74eee180
JG
399
400/*
a3e0d41c 401 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
74eee180 402 *
a3e0d41c
JG
403 * When waiting for mmu notifiers we need some kind of time out otherwise we
404 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
405 * wait already.
74eee180 406 */
a3e0d41c
JG
407#define HMM_RANGE_DEFAULT_TIMEOUT 1000
408
9d8a463a 409#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
c0b12405 410
133ff0ea 411#endif /* LINUX_HMM_H */