Commit | Line | Data |
---|---|---|
2840d498 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5981690d | 2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ |
7d3dcf26 | 3 | #include <linux/device.h> |
92281dee | 4 | #include <linux/io.h> |
0207df4f | 5 | #include <linux/kasan.h> |
41e94a85 | 6 | #include <linux/memory_hotplug.h> |
dc90f084 | 7 | #include <linux/memremap.h> |
bcfa4b72 | 8 | #include <linux/pfn_t.h> |
5042db43 | 9 | #include <linux/swap.h> |
95a2ac93 | 10 | #include <linux/mm.h> |
9ffc1d19 | 11 | #include <linux/mmzone.h> |
5042db43 | 12 | #include <linux/swapops.h> |
bcfa4b72 | 13 | #include <linux/types.h> |
e7638488 | 14 | #include <linux/wait_bit.h> |
bcfa4b72 | 15 | #include <linux/xarray.h> |
27674ef6 | 16 | #include "internal.h" |
92281dee | 17 | |
bcfa4b72 | 18 | static DEFINE_XARRAY(pgmap_array); |
9476df7d | 19 | |
9ffc1d19 DW |
20 | /* |
21 | * The memremap() and memremap_pages() interfaces are alternately used | |
22 | * to map persistent memory namespaces. These interfaces place different | |
23 | * constraints on the alignment and size of the mapping (namespace). | |
24 | * memremap() can map individual PAGE_SIZE pages. memremap_pages() can | |
25 | * only map subsections (2MB), and at least one architecture (PowerPC) | |
26 | * the minimum mapping granularity of memremap_pages() is 16MB. | |
27 | * | |
28 | * The role of memremap_compat_align() is to communicate the minimum | |
29 | * arch supported alignment of a namespace such that it can freely | |
30 | * switch modes without violating the arch constraint. Namely, do not | |
31 | * allow a namespace to be PAGE_SIZE aligned since that namespace may be | |
32 | * reconfigured into a mode that requires SUBSECTION_SIZE alignment. | |
33 | */ | |
34 | #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN | |
35 | unsigned long memremap_compat_align(void) | |
36 | { | |
37 | return SUBSECTION_SIZE; | |
38 | } | |
39 | EXPORT_SYMBOL_GPL(memremap_compat_align); | |
40 | #endif | |
41 | ||
27674ef6 | 42 | #ifdef CONFIG_FS_DAX |
f6a55e1a CH |
43 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); |
44 | EXPORT_SYMBOL(devmap_managed_key); | |
f6a55e1a | 45 | |
46b1ee38 | 46 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
f6a55e1a | 47 | { |
27674ef6 | 48 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
46b1ee38 | 49 | static_branch_dec(&devmap_managed_key); |
f6a55e1a CH |
50 | } |
51 | ||
46b1ee38 | 52 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 53 | { |
27674ef6 | 54 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
46b1ee38 | 55 | static_branch_inc(&devmap_managed_key); |
f6a55e1a CH |
56 | } |
57 | #else | |
46b1ee38 | 58 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 59 | { |
f6a55e1a | 60 | } |
46b1ee38 | 61 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
6f42193f CH |
62 | { |
63 | } | |
27674ef6 | 64 | #endif /* CONFIG_FS_DAX */ |
f6a55e1a | 65 | |
a4574f63 | 66 | static void pgmap_array_delete(struct range *range) |
ab1b597e | 67 | { |
a4574f63 | 68 | xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), |
bcfa4b72 | 69 | NULL, GFP_KERNEL); |
ab1b597e | 70 | synchronize_rcu(); |
9476df7d DW |
71 | } |
72 | ||
b7b3c01b | 73 | static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 74 | { |
b7b3c01b DW |
75 | struct range *range = &pgmap->ranges[range_id]; |
76 | unsigned long pfn = PHYS_PFN(range->start); | |
77 | ||
78 | if (range_id) | |
79 | return pfn; | |
80 | return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); | |
5c2c2587 DW |
81 | } |
82 | ||
34dc45be DW |
83 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
84 | { | |
85 | int i; | |
86 | ||
87 | for (i = 0; i < pgmap->nr_range; i++) { | |
88 | struct range *range = &pgmap->ranges[i]; | |
89 | ||
90 | if (pfn >= PHYS_PFN(range->start) && | |
91 | pfn <= PHYS_PFN(range->end)) | |
92 | return pfn >= pfn_first(pgmap, i); | |
93 | } | |
94 | ||
95 | return false; | |
96 | } | |
97 | ||
b7b3c01b | 98 | static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) |
5c2c2587 | 99 | { |
b7b3c01b | 100 | const struct range *range = &pgmap->ranges[range_id]; |
5c2c2587 | 101 | |
a4574f63 | 102 | return (range->start + range_len(range)) >> PAGE_SHIFT; |
5c2c2587 DW |
103 | } |
104 | ||
c4386bd8 JM |
105 | static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) |
106 | { | |
107 | return (pfn_end(pgmap, range_id) - | |
108 | pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; | |
949b9325 DW |
109 | } |
110 | ||
b7b3c01b | 111 | static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) |
41e94a85 | 112 | { |
b7b3c01b | 113 | struct range *range = &pgmap->ranges[range_id]; |
77e080e7 | 114 | struct page *first_page; |
71389703 | 115 | |
77e080e7 | 116 | /* make sure to access a memmap that was actually initialized */ |
b7b3c01b | 117 | first_page = pfn_to_page(pfn_first(pgmap, range_id)); |
77e080e7 | 118 | |
41e94a85 | 119 | /* pages are dead and unused, undo the arch mapping */ |
f931ab47 | 120 | mem_hotplug_begin(); |
a4574f63 DW |
121 | remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), |
122 | PHYS_PFN(range_len(range))); | |
69324b8f | 123 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
a4574f63 DW |
124 | __remove_pages(PHYS_PFN(range->start), |
125 | PHYS_PFN(range_len(range)), NULL); | |
69324b8f | 126 | } else { |
65a2aa5f | 127 | arch_remove_memory(range->start, range_len(range), |
514caf23 | 128 | pgmap_altmap(pgmap)); |
a4574f63 | 129 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); |
69324b8f | 130 | } |
f931ab47 | 131 | mem_hotplug_done(); |
b5d24fda | 132 | |
68f48381 | 133 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); |
a4574f63 | 134 | pgmap_array_delete(range); |
b7b3c01b DW |
135 | } |
136 | ||
137 | void memunmap_pages(struct dev_pagemap *pgmap) | |
138 | { | |
b7b3c01b DW |
139 | int i; |
140 | ||
b80892ca | 141 | percpu_ref_kill(&pgmap->ref); |
0dc45ca1 AP |
142 | if (pgmap->type != MEMORY_DEVICE_PRIVATE && |
143 | pgmap->type != MEMORY_DEVICE_COHERENT) | |
144 | for (i = 0; i < pgmap->nr_range; i++) | |
145 | percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); | |
146 | ||
b80892ca | 147 | wait_for_completion(&pgmap->done); |
b7b3c01b DW |
148 | |
149 | for (i = 0; i < pgmap->nr_range; i++) | |
150 | pageunmap_range(pgmap, i); | |
1e57ffb6 | 151 | percpu_ref_exit(&pgmap->ref); |
b7b3c01b | 152 | |
fdc029b1 | 153 | WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); |
46b1ee38 | 154 | devmap_managed_enable_put(pgmap); |
9476df7d | 155 | } |
6869b7b2 CH |
156 | EXPORT_SYMBOL_GPL(memunmap_pages); |
157 | ||
158 | static void devm_memremap_pages_release(void *data) | |
159 | { | |
160 | memunmap_pages(data); | |
161 | } | |
9476df7d | 162 | |
24917f6b CH |
163 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
164 | { | |
b80892ca | 165 | struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); |
24917f6b CH |
166 | |
167 | complete(&pgmap->done); | |
168 | } | |
169 | ||
b7b3c01b DW |
170 | static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, |
171 | int range_id, int nid) | |
41e94a85 | 172 | { |
bca3feaa | 173 | const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; |
b7b3c01b | 174 | struct range *range = &pgmap->ranges[range_id]; |
966cf44f | 175 | struct dev_pagemap *conflict_pgmap; |
6869b7b2 | 176 | int error, is_ram; |
5f29a77c | 177 | |
b7b3c01b DW |
178 | if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, |
179 | "altmap not supported for multiple ranges\n")) | |
180 | return -EINVAL; | |
f6a55e1a | 181 | |
a4574f63 | 182 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); |
15d36fec | 183 | if (conflict_pgmap) { |
6869b7b2 | 184 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 185 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 186 | return -ENOMEM; |
15d36fec DJ |
187 | } |
188 | ||
a4574f63 | 189 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); |
15d36fec | 190 | if (conflict_pgmap) { |
6869b7b2 | 191 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 192 | put_dev_pagemap(conflict_pgmap); |
b7b3c01b | 193 | return -ENOMEM; |
15d36fec DJ |
194 | } |
195 | ||
a4574f63 | 196 | is_ram = region_intersects(range->start, range_len(range), |
d37a14bb | 197 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a85 | 198 | |
06489cfb | 199 | if (is_ram != REGION_DISJOINT) { |
a4574f63 DW |
200 | WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", |
201 | is_ram == REGION_MIXED ? "mixed" : "ram", | |
202 | range->start, range->end); | |
b7b3c01b | 203 | return -ENXIO; |
41e94a85 CH |
204 | } |
205 | ||
a4574f63 DW |
206 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), |
207 | PHYS_PFN(range->end), pgmap, GFP_KERNEL)); | |
9476df7d | 208 | if (error) |
b7b3c01b | 209 | return error; |
9476df7d | 210 | |
41e94a85 | 211 | if (nid < 0) |
7eff93b7 | 212 | nid = numa_mem_id(); |
41e94a85 | 213 | |
b7b3c01b | 214 | error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, |
a4574f63 | 215 | range_len(range)); |
9049771f DW |
216 | if (error) |
217 | goto err_pfn_remap; | |
218 | ||
bca3feaa AK |
219 | if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { |
220 | error = -EINVAL; | |
a04e1928 | 221 | goto err_kasan; |
bca3feaa AK |
222 | } |
223 | ||
f931ab47 | 224 | mem_hotplug_begin(); |
69324b8f DW |
225 | |
226 | /* | |
227 | * For device private memory we call add_pages() as we only need to | |
228 | * allocate and initialize struct page for the device memory. More- | |
229 | * over the device memory is un-accessible thus we do not want to | |
230 | * create a linear mapping for the memory like arch_add_memory() | |
231 | * would do. | |
232 | * | |
233 | * For all other device memory types, which are accessible by | |
234 | * the CPU, we do want the linear mapping and thus use | |
235 | * arch_add_memory(). | |
236 | */ | |
bca3feaa | 237 | if (is_private) { |
a4574f63 | 238 | error = add_pages(nid, PHYS_PFN(range->start), |
b7b3c01b | 239 | PHYS_PFN(range_len(range)), params); |
69324b8f | 240 | } else { |
a4574f63 | 241 | error = kasan_add_zero_shadow(__va(range->start), range_len(range)); |
69324b8f DW |
242 | if (error) { |
243 | mem_hotplug_done(); | |
244 | goto err_kasan; | |
245 | } | |
246 | ||
a4574f63 | 247 | error = arch_add_memory(nid, range->start, range_len(range), |
b7b3c01b | 248 | params); |
69324b8f DW |
249 | } |
250 | ||
251 | if (!error) { | |
252 | struct zone *zone; | |
253 | ||
254 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | |
a4574f63 | 255 | move_pfn_range_to_zone(zone, PHYS_PFN(range->start), |
d882c006 DH |
256 | PHYS_PFN(range_len(range)), params->altmap, |
257 | MIGRATE_MOVABLE); | |
0207df4f AR |
258 | } |
259 | ||
f931ab47 | 260 | mem_hotplug_done(); |
9476df7d DW |
261 | if (error) |
262 | goto err_add_memory; | |
41e94a85 | 263 | |
966cf44f AD |
264 | /* |
265 | * Initialization of the pages has been deferred until now in order | |
266 | * to allow us to do the work while not holding the hotplug lock. | |
267 | */ | |
268 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
a4574f63 DW |
269 | PHYS_PFN(range->start), |
270 | PHYS_PFN(range_len(range)), pgmap); | |
0dc45ca1 AP |
271 | if (pgmap->type != MEMORY_DEVICE_PRIVATE && |
272 | pgmap->type != MEMORY_DEVICE_COHERENT) | |
273 | percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); | |
b7b3c01b | 274 | return 0; |
9476df7d | 275 | |
b7b3c01b | 276 | err_add_memory: |
a74c6c00 ML |
277 | if (!is_private) |
278 | kasan_remove_zero_shadow(__va(range->start), range_len(range)); | |
b7b3c01b | 279 | err_kasan: |
68f48381 | 280 | untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); |
b7b3c01b | 281 | err_pfn_remap: |
a4574f63 | 282 | pgmap_array_delete(range); |
b7b3c01b DW |
283 | return error; |
284 | } | |
285 | ||
286 | ||
287 | /* | |
23689037 ML |
288 | * Not device managed version of devm_memremap_pages, undone by |
289 | * memunmap_pages(). Please use devm_memremap_pages if you have a struct | |
b7b3c01b DW |
290 | * device available. |
291 | */ | |
292 | void *memremap_pages(struct dev_pagemap *pgmap, int nid) | |
293 | { | |
294 | struct mhp_params params = { | |
295 | .altmap = pgmap_altmap(pgmap), | |
4917f55b | 296 | .pgmap = pgmap, |
b7b3c01b DW |
297 | .pgprot = PAGE_KERNEL, |
298 | }; | |
299 | const int nr_range = pgmap->nr_range; | |
b7b3c01b DW |
300 | int error, i; |
301 | ||
302 | if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) | |
303 | return ERR_PTR(-EINVAL); | |
304 | ||
305 | switch (pgmap->type) { | |
306 | case MEMORY_DEVICE_PRIVATE: | |
307 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { | |
308 | WARN(1, "Device private memory not supported\n"); | |
309 | return ERR_PTR(-EINVAL); | |
310 | } | |
311 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { | |
312 | WARN(1, "Missing migrate_to_ram method\n"); | |
313 | return ERR_PTR(-EINVAL); | |
314 | } | |
46b1ee38 RC |
315 | if (!pgmap->ops->page_free) { |
316 | WARN(1, "Missing page_free method\n"); | |
317 | return ERR_PTR(-EINVAL); | |
318 | } | |
b7b3c01b DW |
319 | if (!pgmap->owner) { |
320 | WARN(1, "Missing owner\n"); | |
321 | return ERR_PTR(-EINVAL); | |
322 | } | |
323 | break; | |
f25cbb7a AS |
324 | case MEMORY_DEVICE_COHERENT: |
325 | if (!pgmap->ops->page_free) { | |
326 | WARN(1, "Missing page_free method\n"); | |
327 | return ERR_PTR(-EINVAL); | |
328 | } | |
329 | if (!pgmap->owner) { | |
330 | WARN(1, "Missing owner\n"); | |
331 | return ERR_PTR(-EINVAL); | |
332 | } | |
333 | break; | |
b7b3c01b | 334 | case MEMORY_DEVICE_FS_DAX: |
be8a80b3 | 335 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { |
b7b3c01b DW |
336 | WARN(1, "File system DAX not supported\n"); |
337 | return ERR_PTR(-EINVAL); | |
338 | } | |
867400af | 339 | params.pgprot = pgprot_decrypted(params.pgprot); |
b7b3c01b DW |
340 | break; |
341 | case MEMORY_DEVICE_GENERIC: | |
b7b3c01b DW |
342 | break; |
343 | case MEMORY_DEVICE_PCI_P2PDMA: | |
344 | params.pgprot = pgprot_noncached(params.pgprot); | |
b7b3c01b DW |
345 | break; |
346 | default: | |
347 | WARN(1, "Invalid pgmap type %d\n", pgmap->type); | |
348 | break; | |
349 | } | |
350 | ||
b80892ca CH |
351 | init_completion(&pgmap->done); |
352 | error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0, | |
353 | GFP_KERNEL); | |
354 | if (error) | |
355 | return ERR_PTR(error); | |
b7b3c01b | 356 | |
46b1ee38 | 357 | devmap_managed_enable_get(pgmap); |
b7b3c01b DW |
358 | |
359 | /* | |
360 | * Clear the pgmap nr_range as it will be incremented for each | |
361 | * successfully processed range. This communicates how many | |
362 | * regions to unwind in the abort case. | |
363 | */ | |
364 | pgmap->nr_range = 0; | |
365 | error = 0; | |
366 | for (i = 0; i < nr_range; i++) { | |
367 | error = pagemap_range(pgmap, ¶ms, i, nid); | |
368 | if (error) | |
369 | break; | |
370 | pgmap->nr_range++; | |
371 | } | |
372 | ||
373 | if (i < nr_range) { | |
374 | memunmap_pages(pgmap); | |
375 | pgmap->nr_range = nr_range; | |
376 | return ERR_PTR(error); | |
377 | } | |
378 | ||
379 | return __va(pgmap->ranges[0].start); | |
41e94a85 | 380 | } |
6869b7b2 CH |
381 | EXPORT_SYMBOL_GPL(memremap_pages); |
382 | ||
383 | /** | |
384 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
385 | * @dev: hosting device for @res | |
386 | * @pgmap: pointer to a struct dev_pagemap | |
387 | * | |
388 | * Notes: | |
223ec6ab | 389 | * 1/ At a minimum the range and type members of @pgmap must be initialized |
6869b7b2 CH |
390 | * by the caller before passing it to this function |
391 | * | |
392 | * 2/ The altmap field may optionally be initialized, in which case | |
393 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. | |
394 | * | |
395 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be | |
396 | * 'live' on entry and will be killed and reaped at | |
397 | * devm_memremap_pages_release() time, or if this routine fails. | |
398 | * | |
a4574f63 | 399 | * 4/ range is expected to be a host memory range that could feasibly be |
6869b7b2 CH |
400 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
401 | * this is not enforced. | |
402 | */ | |
403 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |
404 | { | |
405 | int error; | |
406 | void *ret; | |
407 | ||
408 | ret = memremap_pages(pgmap, dev_to_node(dev)); | |
409 | if (IS_ERR(ret)) | |
410 | return ret; | |
411 | ||
412 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, | |
413 | pgmap); | |
414 | if (error) | |
415 | return ERR_PTR(error); | |
416 | return ret; | |
417 | } | |
808153e1 | 418 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc | 419 | |
2e3f139e DW |
420 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
421 | { | |
422 | devm_release_action(dev, devm_memremap_pages_release, pgmap); | |
423 | } | |
424 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); | |
425 | ||
0822acb8 CH |
426 | /** |
427 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
428 | * @pfn: page frame number to lookup page_map | |
429 | * @pgmap: optional known pgmap that already has a reference | |
430 | * | |
832d7aa0 CH |
431 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
432 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
433 | */ |
434 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
435 | struct dev_pagemap *pgmap) | |
436 | { | |
0822acb8 CH |
437 | resource_size_t phys = PFN_PHYS(pfn); |
438 | ||
439 | /* | |
832d7aa0 | 440 | * In the cached case we're already holding a live reference. |
0822acb8 | 441 | */ |
832d7aa0 | 442 | if (pgmap) { |
a4574f63 | 443 | if (phys >= pgmap->range.start && phys <= pgmap->range.end) |
832d7aa0 CH |
444 | return pgmap; |
445 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
446 | } |
447 | ||
448 | /* fall back to slow path lookup */ | |
449 | rcu_read_lock(); | |
bcfa4b72 | 450 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
e7b72c48 | 451 | if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref)) |
0822acb8 CH |
452 | pgmap = NULL; |
453 | rcu_read_unlock(); | |
454 | ||
455 | return pgmap; | |
456 | } | |
e7638488 | 457 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
7b2d55d2 | 458 | |
27674ef6 | 459 | void free_zone_device_page(struct page *page) |
7b2d55d2 | 460 | { |
5cbf9942 | 461 | if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) |
429589d6 | 462 | return; |
7ab0ad0e | 463 | |
bbc6b703 | 464 | mem_cgroup_uncharge(page_folio(page)); |
429589d6 | 465 | |
78fbe906 DH |
466 | /* |
467 | * Note: we don't expect anonymous compound pages yet. Once supported | |
468 | * and we could PTE-map them similar to THP, we'd have to clear | |
469 | * PG_anon_exclusive on all tail pages. | |
470 | */ | |
471 | VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); | |
472 | if (PageAnon(page)) | |
473 | __ClearPageAnonExclusive(page); | |
474 | ||
429589d6 | 475 | /* |
b2926ac8 | 476 | * When a device managed page is freed, the folio->mapping field |
429589d6 | 477 | * may still contain a (stale) mapping value. For example, the |
b2926ac8 MWO |
478 | * lower bits of folio->mapping may still identify the folio as an |
479 | * anonymous folio. Ultimately, this entire field is just stale | |
480 | * and wrong, and it will cause errors if not cleared. | |
429589d6 DW |
481 | * |
482 | * For other types of ZONE_DEVICE pages, migration is either | |
483 | * handled differently or not done at all, so there is no need | |
484 | * to clear page->mapping. | |
485 | */ | |
486 | page->mapping = NULL; | |
487 | page->pgmap->ops->page_free(page); | |
27674ef6 | 488 | |
ef233450 AP |
489 | if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && |
490 | page->pgmap->type != MEMORY_DEVICE_COHERENT) | |
0dc45ca1 AP |
491 | /* |
492 | * Reset the page count to 1 to prepare for handing out the page | |
493 | * again. | |
494 | */ | |
ef233450 | 495 | set_page_count(page, 1); |
0dc45ca1 AP |
496 | else |
497 | put_dev_pagemap(page->pgmap); | |
ef233450 AP |
498 | } |
499 | ||
500 | void zone_device_page_init(struct page *page) | |
501 | { | |
0dc45ca1 AP |
502 | /* |
503 | * Drivers shouldn't be allocating pages after calling | |
504 | * memunmap_pages(). | |
505 | */ | |
506 | WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); | |
27674ef6 | 507 | set_page_count(page, 1); |
ef233450 | 508 | lock_page(page); |
7b2d55d2 | 509 | } |
ef233450 | 510 | EXPORT_SYMBOL_GPL(zone_device_page_init); |
75e55d8a | 511 | |
27674ef6 | 512 | #ifdef CONFIG_FS_DAX |
f4f451a1 | 513 | bool __put_devmap_managed_page_refs(struct page *page, int refs) |
75e55d8a | 514 | { |
27674ef6 | 515 | if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) |
dc90f084 CH |
516 | return false; |
517 | ||
75e55d8a | 518 | /* |
27674ef6 | 519 | * fsdax page refcounts are 1-based, rather than 0-based: if |
75e55d8a CH |
520 | * refcount is 1, then the page is free and the refcount is |
521 | * stable because nobody holds a reference on the page. | |
522 | */ | |
f4f451a1 | 523 | if (page_ref_sub_return(page, refs) == 1) |
27674ef6 | 524 | wake_up_var(&page->_refcount); |
89574945 | 525 | return true; |
7b2d55d2 | 526 | } |
f4f451a1 | 527 | EXPORT_SYMBOL(__put_devmap_managed_page_refs); |
27674ef6 | 528 | #endif /* CONFIG_FS_DAX */ |