1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/types.h>
12 #include <linux/wait_bit.h>
13 #include <linux/xarray.h>
15 static DEFINE_XARRAY(pgmap_array);
17 #ifdef CONFIG_DEV_PAGEMAP_OPS
18 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
19 EXPORT_SYMBOL(devmap_managed_key);
20 static atomic_t devmap_managed_enable;
22 static void devmap_managed_enable_put(void)
24 if (atomic_dec_and_test(&devmap_managed_enable))
25 static_branch_disable(&devmap_managed_key);
28 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
30 if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
31 (!pgmap->ops || !pgmap->ops->page_free)) {
32 WARN(1, "Missing page_free method\n");
36 if (atomic_inc_return(&devmap_managed_enable) == 1)
37 static_branch_enable(&devmap_managed_key);
41 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
45 static void devmap_managed_enable_put(void)
48 #endif /* CONFIG_DEV_PAGEMAP_OPS */
50 static void pgmap_array_delete(struct resource *res)
52 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
57 static unsigned long pfn_first(struct dev_pagemap *pgmap)
59 return PHYS_PFN(pgmap->res.start) +
60 vmem_altmap_offset(pgmap_altmap(pgmap));
63 static unsigned long pfn_end(struct dev_pagemap *pgmap)
65 const struct resource *res = &pgmap->res;
67 return (res->start + resource_size(res)) >> PAGE_SHIFT;
70 static unsigned long pfn_next(unsigned long pfn)
77 #define for_each_device_pfn(pfn, map) \
78 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
80 static void dev_pagemap_kill(struct dev_pagemap *pgmap)
82 if (pgmap->ops && pgmap->ops->kill)
83 pgmap->ops->kill(pgmap);
85 percpu_ref_kill(pgmap->ref);
88 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
90 if (pgmap->ops && pgmap->ops->cleanup) {
91 pgmap->ops->cleanup(pgmap);
93 wait_for_completion(&pgmap->done);
94 percpu_ref_exit(pgmap->ref);
97 * Undo the pgmap ref assignment for the internal case as the
98 * caller may re-enable the same pgmap.
100 if (pgmap->ref == &pgmap->internal_ref)
104 void memunmap_pages(struct dev_pagemap *pgmap)
106 struct resource *res = &pgmap->res;
107 struct page *first_page;
111 dev_pagemap_kill(pgmap);
112 for_each_device_pfn(pfn, pgmap)
113 put_page(pfn_to_page(pfn));
114 dev_pagemap_cleanup(pgmap);
116 /* make sure to access a memmap that was actually initialized */
117 first_page = pfn_to_page(pfn_first(pgmap));
119 /* pages are dead and unused, undo the arch mapping */
120 nid = page_to_nid(first_page);
123 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start),
124 PHYS_PFN(resource_size(res)));
125 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
126 __remove_pages(PHYS_PFN(res->start),
127 PHYS_PFN(resource_size(res)), NULL);
129 arch_remove_memory(nid, res->start, resource_size(res),
130 pgmap_altmap(pgmap));
131 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
135 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
136 pgmap_array_delete(res);
137 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
138 devmap_managed_enable_put();
140 EXPORT_SYMBOL_GPL(memunmap_pages);
142 static void devm_memremap_pages_release(void *data)
144 memunmap_pages(data);
147 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
149 struct dev_pagemap *pgmap =
150 container_of(ref, struct dev_pagemap, internal_ref);
152 complete(&pgmap->done);
156 * Not device managed version of dev_memremap_pages, undone by
157 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
160 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
162 struct resource *res = &pgmap->res;
163 struct dev_pagemap *conflict_pgmap;
164 struct mhp_restrictions restrictions = {
166 * We do not want any optional features only our own memmap
168 .altmap = pgmap_altmap(pgmap),
170 pgprot_t pgprot = PAGE_KERNEL;
172 bool need_devmap_managed = true;
174 switch (pgmap->type) {
175 case MEMORY_DEVICE_PRIVATE:
176 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
177 WARN(1, "Device private memory not supported\n");
178 return ERR_PTR(-EINVAL);
180 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
181 WARN(1, "Missing migrate_to_ram method\n");
182 return ERR_PTR(-EINVAL);
185 WARN(1, "Missing owner\n");
186 return ERR_PTR(-EINVAL);
189 case MEMORY_DEVICE_FS_DAX:
190 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
191 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
192 WARN(1, "File system DAX not supported\n");
193 return ERR_PTR(-EINVAL);
196 case MEMORY_DEVICE_DEVDAX:
197 case MEMORY_DEVICE_PCI_P2PDMA:
198 need_devmap_managed = false;
201 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
206 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
207 return ERR_PTR(-EINVAL);
209 init_completion(&pgmap->done);
210 error = percpu_ref_init(&pgmap->internal_ref,
211 dev_pagemap_percpu_release, 0, GFP_KERNEL);
213 return ERR_PTR(error);
214 pgmap->ref = &pgmap->internal_ref;
216 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
217 WARN(1, "Missing reference count teardown definition\n");
218 return ERR_PTR(-EINVAL);
222 if (need_devmap_managed) {
223 error = devmap_managed_enable_get(pgmap);
225 return ERR_PTR(error);
228 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
229 if (conflict_pgmap) {
230 WARN(1, "Conflicting mapping in same section\n");
231 put_dev_pagemap(conflict_pgmap);
236 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
237 if (conflict_pgmap) {
238 WARN(1, "Conflicting mapping in same section\n");
239 put_dev_pagemap(conflict_pgmap);
244 is_ram = region_intersects(res->start, resource_size(res),
245 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
247 if (is_ram != REGION_DISJOINT) {
248 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
249 is_ram == REGION_MIXED ? "mixed" : "ram", res);
254 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
255 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
262 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
270 * For device private memory we call add_pages() as we only need to
271 * allocate and initialize struct page for the device memory. More-
272 * over the device memory is un-accessible thus we do not want to
273 * create a linear mapping for the memory like arch_add_memory()
276 * For all other device memory types, which are accessible by
277 * the CPU, we do want the linear mapping and thus use
280 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
281 error = add_pages(nid, PHYS_PFN(res->start),
282 PHYS_PFN(resource_size(res)), &restrictions);
284 error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
290 error = arch_add_memory(nid, res->start, resource_size(res),
297 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
298 move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
299 PHYS_PFN(resource_size(res)), restrictions.altmap);
307 * Initialization of the pages has been deferred until now in order
308 * to allow us to do the work while not holding the hotplug lock.
310 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
311 PHYS_PFN(res->start),
312 PHYS_PFN(resource_size(res)), pgmap);
313 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
314 return __va(res->start);
317 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
319 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
321 pgmap_array_delete(res);
323 dev_pagemap_kill(pgmap);
324 dev_pagemap_cleanup(pgmap);
325 devmap_managed_enable_put();
326 return ERR_PTR(error);
328 EXPORT_SYMBOL_GPL(memremap_pages);
331 * devm_memremap_pages - remap and provide memmap backing for the given resource
332 * @dev: hosting device for @res
333 * @pgmap: pointer to a struct dev_pagemap
336 * 1/ At a minimum the res and type members of @pgmap must be initialized
337 * by the caller before passing it to this function
339 * 2/ The altmap field may optionally be initialized, in which case
340 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
342 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
343 * 'live' on entry and will be killed and reaped at
344 * devm_memremap_pages_release() time, or if this routine fails.
346 * 4/ res is expected to be a host memory range that could feasibly be
347 * treated as a "System RAM" range, i.e. not a device mmio range, but
348 * this is not enforced.
350 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
355 ret = memremap_pages(pgmap, dev_to_node(dev));
359 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
362 return ERR_PTR(error);
365 EXPORT_SYMBOL_GPL(devm_memremap_pages);
367 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
369 devm_release_action(dev, devm_memremap_pages_release, pgmap);
371 EXPORT_SYMBOL_GPL(devm_memunmap_pages);
373 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
375 /* number of pfns from base where pfn_to_page() is valid */
377 return altmap->reserve + altmap->free;
381 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
383 altmap->alloc -= nr_pfns;
387 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
388 * @pfn: page frame number to lookup page_map
389 * @pgmap: optional known pgmap that already has a reference
391 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
392 * is non-NULL but does not cover @pfn the reference to it will be released.
394 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
395 struct dev_pagemap *pgmap)
397 resource_size_t phys = PFN_PHYS(pfn);
400 * In the cached case we're already holding a live reference.
403 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
405 put_dev_pagemap(pgmap);
408 /* fall back to slow path lookup */
410 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
411 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
417 EXPORT_SYMBOL_GPL(get_dev_pagemap);
419 #ifdef CONFIG_DEV_PAGEMAP_OPS
420 void free_devmap_managed_page(struct page *page)
422 /* notify page idle for dax */
423 if (!is_device_private_page(page)) {
424 wake_up_var(&page->_refcount);
428 /* Clear Active bit in case of parallel mark_page_accessed */
429 __ClearPageActive(page);
430 __ClearPageWaiters(page);
432 mem_cgroup_uncharge(page);
435 * When a device_private page is freed, the page->mapping field
436 * may still contain a (stale) mapping value. For example, the
437 * lower bits of page->mapping may still identify the page as an
438 * anonymous page. Ultimately, this entire field is just stale
439 * and wrong, and it will cause errors if not cleared. One
442 * migrate_vma_pages()
443 * migrate_vma_insert_page()
444 * page_add_new_anon_rmap()
445 * __page_set_anon_rmap()
446 * ...checks page->mapping, via PageAnon(page) call,
447 * and incorrectly concludes that the page is an
448 * anonymous page. Therefore, it incorrectly,
449 * silently fails to set up the new anon rmap.
451 * For other types of ZONE_DEVICE pages, migration is either
452 * handled differently or not done at all, so there is no need
453 * to clear page->mapping.
455 page->mapping = NULL;
456 page->pgmap->ops->page_free(page);
458 #endif /* CONFIG_DEV_PAGEMAP_OPS */