Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
133ff0ea JG |
2 | /* |
3 | * Copyright 2013 Red Hat Inc. | |
4 | * | |
f813f219 | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0ea | 6 | * |
f970b977 | 7 | * See Documentation/vm/hmm.rst for reasons and overview of what HMM is. |
133ff0ea JG |
8 | */ |
9 | #ifndef LINUX_HMM_H | |
10 | #define LINUX_HMM_H | |
11 | ||
730ff521 | 12 | #include <linux/mm.h> |
133ff0ea | 13 | |
730ff521 | 14 | struct mmu_interval_notifier; |
4ef589dc | 15 | |
133ff0ea | 16 | /* |
2733ea14 JG |
17 | * On output: |
18 | * 0 - The page is faultable and a future call with | |
19 | * HMM_PFN_REQ_FAULT could succeed. | |
20 | * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at | |
21 | * least readable. If dev_private_owner is !NULL then this could | |
22 | * point at a DEVICE_PRIVATE page. | |
23 | * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID) | |
24 | * HMM_PFN_ERROR - accessing the pfn is impossible and the device should | |
25 | * fail. ie poisoned memory, special pages, no vma, etc | |
f88a1e90 | 26 | * |
2733ea14 JG |
27 | * On input: |
28 | * 0 - Return the current state of the page, do not fault it. | |
29 | * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault() | |
30 | * will fail | |
31 | * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault() | |
32 | * will fail. Must be combined with HMM_PFN_REQ_FAULT. | |
f88a1e90 | 33 | */ |
2733ea14 | 34 | enum hmm_pfn_flags { |
3b50a6e5 | 35 | /* Output fields and flags */ |
2733ea14 JG |
36 | HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1), |
37 | HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2), | |
38 | HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3), | |
3b50a6e5 | 39 | HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8), |
2733ea14 JG |
40 | |
41 | /* Input flags */ | |
42 | HMM_PFN_REQ_FAULT = HMM_PFN_VALID, | |
43 | HMM_PFN_REQ_WRITE = HMM_PFN_WRITE, | |
44 | ||
3b50a6e5 | 45 | HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT, |
f88a1e90 JG |
46 | }; |
47 | ||
48 | /* | |
2733ea14 | 49 | * hmm_pfn_to_page() - return struct page pointed to by a device entry |
f88a1e90 | 50 | * |
2733ea14 JG |
51 | * This must be called under the caller 'user_lock' after a successful |
52 | * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID | |
53 | * already. | |
133ff0ea | 54 | */ |
2733ea14 JG |
55 | static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn) |
56 | { | |
57 | return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS); | |
58 | } | |
f88a1e90 | 59 | |
3b50a6e5 RC |
60 | /* |
61 | * hmm_pfn_to_map_order() - return the CPU mapping size order | |
62 | * | |
63 | * This is optionally useful to optimize processing of the pfn result | |
64 | * array. It indicates that the page starts at the order aligned VA and is | |
65 | * 1<<order bytes long. Every pfn within an high order page will have the | |
66 | * same pfn flags, both access protections and the map_order. The caller must | |
67 | * be careful with edge cases as the start and end VA of the given page may | |
68 | * extend past the range used with hmm_range_fault(). | |
69 | * | |
70 | * This must be called under the caller 'user_lock' after a successful | |
71 | * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID | |
72 | * already. | |
73 | */ | |
74 | static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn) | |
75 | { | |
76 | return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F; | |
77 | } | |
78 | ||
f88a1e90 JG |
79 | /* |
80 | * struct hmm_range - track invalidation lock on virtual address range | |
81 | * | |
a22dd506 JG |
82 | * @notifier: a mmu_interval_notifier that includes the start/end |
83 | * @notifier_seq: result of mmu_interval_read_begin() | |
f88a1e90 JG |
84 | * @start: range virtual start address (inclusive) |
85 | * @end: range virtual end address (exclusive) | |
2733ea14 | 86 | * @hmm_pfns: array of pfns (big enough for the range) |
023a019a JG |
87 | * @default_flags: default flags for the range (write, read, ... see hmm doc) |
88 | * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter | |
08ddddda | 89 | * @dev_private_owner: owner of device private pages |
f88a1e90 JG |
90 | */ |
91 | struct hmm_range { | |
04ec32fb JG |
92 | struct mmu_interval_notifier *notifier; |
93 | unsigned long notifier_seq; | |
f88a1e90 JG |
94 | unsigned long start; |
95 | unsigned long end; | |
2733ea14 JG |
96 | unsigned long *hmm_pfns; |
97 | unsigned long default_flags; | |
98 | unsigned long pfn_flags_mask; | |
08ddddda | 99 | void *dev_private_owner; |
f88a1e90 | 100 | }; |
133ff0ea | 101 | |
da4c3c73 | 102 | /* |
a3e0d41c | 103 | * Please see Documentation/vm/hmm.rst for how to use the range API. |
da4c3c73 | 104 | */ |
be957c88 | 105 | int hmm_range_fault(struct hmm_range *range); |
74eee180 JG |
106 | |
107 | /* | |
a3e0d41c | 108 | * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range |
74eee180 | 109 | * |
a3e0d41c | 110 | * When waiting for mmu notifiers we need some kind of time out otherwise we |
06c88398 | 111 | * could potentially wait for ever, 1000ms ie 1s sounds like a long time to |
a3e0d41c | 112 | * wait already. |
74eee180 | 113 | */ |
a3e0d41c JG |
114 | #define HMM_RANGE_DEFAULT_TIMEOUT 1000 |
115 | ||
133ff0ea | 116 | #endif /* LINUX_HMM_H */ |