Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
9476df7d DW |
2 | #ifndef _LINUX_MEMREMAP_H_ |
3 | #define _LINUX_MEMREMAP_H_ | |
dc90f084 | 4 | |
5bb88dc5 | 5 | #include <linux/mmzone.h> |
a4574f63 | 6 | #include <linux/range.h> |
5c2c2587 DW |
7 | #include <linux/ioport.h> |
8 | #include <linux/percpu-refcount.h> | |
9476df7d DW |
9 | |
10 | struct resource; | |
11 | struct device; | |
4b94ffdc DW |
12 | |
13 | /** | |
14 | * struct vmem_altmap - pre-allocated storage for vmemmap_populate | |
15 | * @base_pfn: base of the entire dev_pagemap mapping | |
16 | * @reserve: pages mapped, but reserved for driver use (relative to @base) | |
17 | * @free: free pages set aside in the mapping for memmap storage | |
18 | * @align: pages reserved to meet allocation alignments | |
19 | * @alloc: track pages consumed, private to vmemmap_populate() | |
20 | */ | |
21 | struct vmem_altmap { | |
a08a2ae3 | 22 | unsigned long base_pfn; |
cf387d96 | 23 | const unsigned long end_pfn; |
4b94ffdc DW |
24 | const unsigned long reserve; |
25 | unsigned long free; | |
26 | unsigned long align; | |
27 | unsigned long alloc; | |
28 | }; | |
29 | ||
5042db43 | 30 | /* |
041711ce | 31 | * Specialize ZONE_DEVICE memory into multiple types each has a different |
5042db43 JG |
32 | * usage. |
33 | * | |
5042db43 JG |
34 | * MEMORY_DEVICE_PRIVATE: |
35 | * Device memory that is not directly addressable by the CPU: CPU can neither | |
36 | * read nor write private memory. In this case, we do still have struct pages | |
37 | * backing the device memory. Doing so simplifies the implementation, but it is | |
38 | * important to remember that there are certain points at which the struct page | |
39 | * must be treated as an opaque object, rather than a "normal" struct page. | |
40 | * | |
41 | * A more complete discussion of unaddressable memory may be found in | |
ee65728e | 42 | * include/linux/hmm.h and Documentation/mm/hmm.rst. |
df6ad698 | 43 | * |
f25cbb7a AS |
44 | * MEMORY_DEVICE_COHERENT: |
45 | * Device memory that is cache coherent from device and CPU point of view. This | |
46 | * is used on platforms that have an advanced system bus (like CAPI or CXL). A | |
47 | * driver can hotplug the device memory using ZONE_DEVICE and with that memory | |
48 | * type. Any page of a process can be migrated to such memory. However no one | |
49 | * should be allowed to pin such memory so that it can always be evicted. | |
50 | * | |
e7638488 DW |
51 | * MEMORY_DEVICE_FS_DAX: |
52 | * Host memory that has similar access semantics as System RAM i.e. DMA | |
53 | * coherent and supports page pinning. In support of coordinating page | |
54 | * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a | |
55 | * wakeup event whenever a page is unpinned and becomes idle. This | |
56 | * wakeup is used to coordinate physical address space management (ex: | |
57 | * fs truncate/hole punch) vs pinned pages (ex: device dma). | |
52916982 | 58 | * |
4533d3ae | 59 | * MEMORY_DEVICE_GENERIC: |
3ed2dcdf | 60 | * Host memory that has similar access semantics as System RAM i.e. DMA |
4533d3ae RPM |
61 | * coherent and supports page pinning. This is for example used by DAX devices |
62 | * that expose memory using a character device. | |
3ed2dcdf | 63 | * |
52916982 LG |
64 | * MEMORY_DEVICE_PCI_P2PDMA: |
65 | * Device memory residing in a PCI BAR intended for use with Peer-to-Peer | |
66 | * transactions. | |
5042db43 JG |
67 | */ |
68 | enum memory_type { | |
3ed2dcdf | 69 | /* 0 is reserved to catch uninitialized type fields */ |
e7638488 | 70 | MEMORY_DEVICE_PRIVATE = 1, |
f25cbb7a | 71 | MEMORY_DEVICE_COHERENT, |
e7638488 | 72 | MEMORY_DEVICE_FS_DAX, |
4533d3ae | 73 | MEMORY_DEVICE_GENERIC, |
52916982 | 74 | MEMORY_DEVICE_PCI_P2PDMA, |
5042db43 JG |
75 | }; |
76 | ||
1e240e8d CH |
77 | struct dev_pagemap_ops { |
78 | /* | |
27674ef6 CH |
79 | * Called once the page refcount reaches 0. The reference count will be |
80 | * reset to one by the core code after the method is called to prepare | |
81 | * for handing out the page again. | |
1e240e8d | 82 | */ |
80a72d0a | 83 | void (*page_free)(struct page *page); |
1e240e8d | 84 | |
897e6365 CH |
85 | /* |
86 | * Used for private (un-addressable) device memory only. Must migrate | |
87 | * the page back to a CPU accessible page. | |
88 | */ | |
89 | vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); | |
33a8f7f2 SR |
90 | |
91 | /* | |
92 | * Handle the memory failure happens on a range of pfns. Notify the | |
93 | * processes who are using these pfns, and try to recover the data on | |
94 | * them if necessary. The mf_flags is finally passed to the recover | |
95 | * function through the whole notify routine. | |
96 | * | |
97 | * When this is not implemented, or it returns -EOPNOTSUPP, the caller | |
98 | * will fall back to a common handler called mf_generic_kill_procs(). | |
99 | */ | |
100 | int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, | |
101 | unsigned long nr_pages, int mf_flags); | |
1e240e8d | 102 | }; |
5042db43 | 103 | |
514caf23 CH |
104 | #define PGMAP_ALTMAP_VALID (1 << 0) |
105 | ||
9476df7d DW |
106 | /** |
107 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings | |
4b94ffdc | 108 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
5c2c2587 | 109 | * @ref: reference count that pins the devm_memremap_pages() mapping |
b80892ca | 110 | * @done: completion for @ref |
5042db43 | 111 | * @type: memory type: see MEMORY_* in memory_hotplug.h |
514caf23 | 112 | * @flags: PGMAP_* flags to specify defailed behavior |
c4386bd8 JM |
113 | * @vmemmap_shift: structural definition of how the vmemmap page metadata |
114 | * is populated, specifically the metadata page order. | |
115 | * A zero value (default) uses base pages as the vmemmap metadata | |
116 | * representation. A bigger value will set up compound struct pages | |
117 | * of the requested order value. | |
1e240e8d | 118 | * @ops: method table |
f894ddd5 CH |
119 | * @owner: an opaque pointer identifying the entity that manages this |
120 | * instance. Used by various helpers to make sure that no | |
121 | * foreign ZONE_DEVICE memory is accessed. | |
b7b3c01b DW |
122 | * @nr_range: number of ranges to be mapped |
123 | * @range: range to be mapped when nr_range == 1 | |
124 | * @ranges: array of ranges to be mapped when nr_range > 1 | |
9476df7d DW |
125 | */ |
126 | struct dev_pagemap { | |
e7744aa2 | 127 | struct vmem_altmap altmap; |
b80892ca | 128 | struct percpu_ref ref; |
24917f6b | 129 | struct completion done; |
5042db43 | 130 | enum memory_type type; |
514caf23 | 131 | unsigned int flags; |
c4386bd8 | 132 | unsigned long vmemmap_shift; |
1e240e8d | 133 | const struct dev_pagemap_ops *ops; |
f894ddd5 | 134 | void *owner; |
b7b3c01b DW |
135 | int nr_range; |
136 | union { | |
137 | struct range range; | |
06919d22 | 138 | DECLARE_FLEX_ARRAY(struct range, ranges); |
b7b3c01b | 139 | }; |
9476df7d DW |
140 | }; |
141 | ||
65d3440e DW |
142 | static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap) |
143 | { | |
144 | return pgmap->ops && pgmap->ops->memory_failure; | |
145 | } | |
146 | ||
514caf23 CH |
147 | static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) |
148 | { | |
149 | if (pgmap->flags & PGMAP_ALTMAP_VALID) | |
150 | return &pgmap->altmap; | |
151 | return NULL; | |
152 | } | |
153 | ||
c4386bd8 JM |
154 | static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) |
155 | { | |
156 | return 1 << pgmap->vmemmap_shift; | |
157 | } | |
158 | ||
dc90f084 CH |
159 | static inline bool is_device_private_page(const struct page *page) |
160 | { | |
27674ef6 | 161 | return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && |
dc90f084 CH |
162 | is_zone_device_page(page) && |
163 | page->pgmap->type == MEMORY_DEVICE_PRIVATE; | |
164 | } | |
165 | ||
536939ff MWO |
166 | static inline bool folio_is_device_private(const struct folio *folio) |
167 | { | |
168 | return is_device_private_page(&folio->page); | |
169 | } | |
170 | ||
dc90f084 CH |
171 | static inline bool is_pci_p2pdma_page(const struct page *page) |
172 | { | |
27674ef6 | 173 | return IS_ENABLED(CONFIG_PCI_P2PDMA) && |
dc90f084 CH |
174 | is_zone_device_page(page) && |
175 | page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; | |
176 | } | |
177 | ||
f25cbb7a AS |
178 | static inline bool is_device_coherent_page(const struct page *page) |
179 | { | |
180 | return is_zone_device_page(page) && | |
181 | page->pgmap->type == MEMORY_DEVICE_COHERENT; | |
182 | } | |
183 | ||
184 | static inline bool folio_is_device_coherent(const struct folio *folio) | |
185 | { | |
186 | return is_device_coherent_page(&folio->page); | |
187 | } | |
188 | ||
9476df7d | 189 | #ifdef CONFIG_ZONE_DEVICE |
ef233450 | 190 | void zone_device_page_init(struct page *page); |
6869b7b2 CH |
191 | void *memremap_pages(struct dev_pagemap *pgmap, int nid); |
192 | void memunmap_pages(struct dev_pagemap *pgmap); | |
e8d51348 | 193 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); |
2e3f139e | 194 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); |
0822acb8 CH |
195 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
196 | struct dev_pagemap *pgmap); | |
34dc45be | 197 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); |
7b2d55d2 | 198 | |
8e37d00a CH |
199 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); |
200 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); | |
9ffc1d19 | 201 | unsigned long memremap_compat_align(void); |
9476df7d DW |
202 | #else |
203 | static inline void *devm_memremap_pages(struct device *dev, | |
e8d51348 | 204 | struct dev_pagemap *pgmap) |
9476df7d DW |
205 | { |
206 | /* | |
207 | * Fail attempts to call devm_memremap_pages() without | |
208 | * ZONE_DEVICE support enabled, this requires callers to fall | |
209 | * back to plain devm_memremap() based on config | |
210 | */ | |
211 | WARN_ON_ONCE(1); | |
212 | return ERR_PTR(-ENXIO); | |
213 | } | |
214 | ||
2e3f139e DW |
215 | static inline void devm_memunmap_pages(struct device *dev, |
216 | struct dev_pagemap *pgmap) | |
217 | { | |
218 | } | |
219 | ||
0822acb8 CH |
220 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
221 | struct dev_pagemap *pgmap) | |
9476df7d DW |
222 | { |
223 | return NULL; | |
224 | } | |
8e37d00a | 225 | |
34dc45be DW |
226 | static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
227 | { | |
228 | return false; | |
229 | } | |
230 | ||
8e37d00a CH |
231 | static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
232 | { | |
233 | return 0; | |
234 | } | |
235 | ||
236 | static inline void vmem_altmap_free(struct vmem_altmap *altmap, | |
237 | unsigned long nr_pfns) | |
238 | { | |
239 | } | |
9ffc1d19 DW |
240 | |
241 | /* when memremap_pages() is disabled all archs can remap a single page */ | |
242 | static inline unsigned long memremap_compat_align(void) | |
243 | { | |
244 | return PAGE_SIZE; | |
245 | } | |
8e37d00a | 246 | #endif /* CONFIG_ZONE_DEVICE */ |
7b2d55d2 | 247 | |
5c2c2587 DW |
248 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) |
249 | { | |
250 | if (pgmap) | |
b80892ca | 251 | percpu_ref_put(&pgmap->ref); |
5c2c2587 | 252 | } |
9ffc1d19 | 253 | |
9476df7d | 254 | #endif /* _LINUX_MEMREMAP_H_ */ |