memremap: pass a struct dev_pagemap to ->kill and ->cleanup
[linux-2.6-block.git] / kernel / memremap.c
CommitLineData
5981690d
DW
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26 3#include <linux/device.h>
92281dee 4#include <linux/io.h>
0207df4f 5#include <linux/kasan.h>
41e94a85 6#include <linux/memory_hotplug.h>
bcfa4b72
MW
7#include <linux/mm.h>
8#include <linux/pfn_t.h>
5042db43
JG
9#include <linux/swap.h>
10#include <linux/swapops.h>
bcfa4b72 11#include <linux/types.h>
e7638488 12#include <linux/wait_bit.h>
bcfa4b72 13#include <linux/xarray.h>
063a7d1d 14#include <linux/hmm.h>
92281dee 15
bcfa4b72 16static DEFINE_XARRAY(pgmap_array);
9476df7d
DW
17#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
18#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19
5042db43 20#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
2b740303 21vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
5042db43
JG
22 unsigned long addr,
23 swp_entry_t entry,
24 unsigned int flags,
25 pmd_t *pmdp)
26{
27 struct page *page = device_private_entry_to_page(entry);
063a7d1d
DW
28 struct hmm_devmem *devmem;
29
30 devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
5042db43
JG
31
32 /*
33 * The page_fault() callback must migrate page back to system memory
34 * so that CPU can access it. This might fail for various reasons
35 * (device issue, device was unsafely unplugged, ...). When such
36 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
37 *
38 * Note that because memory cgroup charges are accounted to the device
39 * memory, this should never fail because of memory restrictions (but
40 * allocation of regular system page might still fail because we are
41 * out of memory).
42 *
43 * There is a more in-depth description of what that callback can and
44 * cannot do, in include/linux/memremap.h
45 */
063a7d1d 46 return devmem->page_fault(vma, addr, page, flags, pmdp);
5042db43 47}
5042db43
JG
48#endif /* CONFIG_DEVICE_PRIVATE */
49
bcfa4b72 50static void pgmap_array_delete(struct resource *res)
ab1b597e 51{
bcfa4b72
MW
52 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
53 NULL, GFP_KERNEL);
ab1b597e 54 synchronize_rcu();
9476df7d
DW
55}
56
e7744aa2 57static unsigned long pfn_first(struct dev_pagemap *pgmap)
5c2c2587 58{
e7744aa2
LG
59 const struct resource *res = &pgmap->res;
60 struct vmem_altmap *altmap = &pgmap->altmap;
5c2c2587
DW
61 unsigned long pfn;
62
63 pfn = res->start >> PAGE_SHIFT;
e7744aa2 64 if (pgmap->altmap_valid)
5c2c2587
DW
65 pfn += vmem_altmap_offset(altmap);
66 return pfn;
67}
68
e7744aa2 69static unsigned long pfn_end(struct dev_pagemap *pgmap)
5c2c2587 70{
e7744aa2 71 const struct resource *res = &pgmap->res;
5c2c2587
DW
72
73 return (res->start + resource_size(res)) >> PAGE_SHIFT;
74}
75
949b9325
DW
76static unsigned long pfn_next(unsigned long pfn)
77{
78 if (pfn % 1024 == 0)
79 cond_resched();
80 return pfn + 1;
81}
82
5c2c2587 83#define for_each_device_pfn(pfn, map) \
949b9325 84 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
5c2c2587 85
e8d51348 86static void devm_memremap_pages_release(void *data)
41e94a85 87{
e7744aa2 88 struct dev_pagemap *pgmap = data;
e8d51348 89 struct device *dev = pgmap->dev;
e7744aa2 90 struct resource *res = &pgmap->res;
9476df7d 91 resource_size_t align_start, align_size;
71389703 92 unsigned long pfn;
2c2a5af6 93 int nid;
71389703 94
d8668bb0 95 pgmap->ops->kill(pgmap);
e7744aa2 96 for_each_device_pfn(pfn, pgmap)
71389703 97 put_page(pfn_to_page(pfn));
d8668bb0 98 pgmap->ops->cleanup(pgmap);
9476df7d 99
41e94a85 100 /* pages are dead and unused, undo the arch mapping */
9476df7d 101 align_start = res->start & ~(SECTION_SIZE - 1);
10a0cd6e
JS
102 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
103 - align_start;
b5d24fda 104
2c2a5af6
OS
105 nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
106
f931ab47 107 mem_hotplug_begin();
69324b8f
DW
108 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
109 pfn = align_start >> PAGE_SHIFT;
110 __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
111 align_size >> PAGE_SHIFT, NULL);
112 } else {
2c2a5af6 113 arch_remove_memory(nid, align_start, align_size,
69324b8f
DW
114 pgmap->altmap_valid ? &pgmap->altmap : NULL);
115 kasan_remove_zero_shadow(__va(align_start), align_size);
116 }
f931ab47 117 mem_hotplug_done();
b5d24fda 118
9049771f 119 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
bcfa4b72 120 pgmap_array_delete(res);
e7744aa2
LG
121 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
122 "%s: failed to free all reserved pages\n", __func__);
9476df7d
DW
123}
124
4b94ffdc
DW
125/**
126 * devm_memremap_pages - remap and provide memmap backing for the given resource
127 * @dev: hosting device for @res
a95c90f1 128 * @pgmap: pointer to a struct dev_pagemap
4b94ffdc 129 *
5c2c2587 130 * Notes:
1e240e8d
CH
131 * 1/ At a minimum the res, ref and type and ops members of @pgmap must be
132 * initialized by the caller before passing it to this function
e8d51348
CH
133 *
134 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
135 * must be set to true
136 *
50f44ee7
DW
137 * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
138 * at devm_memremap_pages_release() time, or if this routine fails.
5c2c2587 139 *
e8d51348 140 * 4/ res is expected to be a host memory range that could feasibly be
5c2c2587
DW
141 * treated as a "System RAM" range, i.e. not a device mmio range, but
142 * this is not enforced.
4b94ffdc 143 */
e8d51348 144void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
41e94a85 145{
ab1b597e 146 resource_size_t align_start, align_size, align_end;
e8d51348
CH
147 struct vmem_altmap *altmap = pgmap->altmap_valid ?
148 &pgmap->altmap : NULL;
949b9325 149 struct resource *res = &pgmap->res;
966cf44f 150 struct dev_pagemap *conflict_pgmap;
940519f0
MH
151 struct mhp_restrictions restrictions = {
152 /*
153 * We do not want any optional features only our own memmap
154 */
155 .altmap = altmap,
156 };
9049771f 157 pgprot_t pgprot = PAGE_KERNEL;
949b9325 158 int error, nid, is_ram;
5f29a77c 159
3ed2dcdf
CH
160 switch (pgmap->type) {
161 case MEMORY_DEVICE_PRIVATE:
162 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
163 WARN(1, "Device private memory not supported\n");
164 return ERR_PTR(-EINVAL);
165 }
166 break;
167 case MEMORY_DEVICE_FS_DAX:
168 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
169 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
170 WARN(1, "File system DAX not supported\n");
171 return ERR_PTR(-EINVAL);
172 }
173 break;
174 case MEMORY_DEVICE_DEVDAX:
175 case MEMORY_DEVICE_PCI_P2PDMA:
176 break;
177 default:
178 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
179 break;
180 }
181
1e240e8d
CH
182 if (!pgmap->ref || !pgmap->ops || !pgmap->ops->kill ||
183 !pgmap->ops->cleanup) {
50f44ee7 184 WARN(1, "Missing reference count teardown definition\n");
a95c90f1 185 return ERR_PTR(-EINVAL);
50f44ee7 186 }
a95c90f1 187
5f29a77c
DW
188 align_start = res->start & ~(SECTION_SIZE - 1);
189 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
190 - align_start;
15d36fec
DJ
191 align_end = align_start + align_size - 1;
192
193 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
194 if (conflict_pgmap) {
195 dev_WARN(dev, "Conflicting mapping in same section\n");
196 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
197 error = -ENOMEM;
198 goto err_array;
15d36fec
DJ
199 }
200
201 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
202 if (conflict_pgmap) {
203 dev_WARN(dev, "Conflicting mapping in same section\n");
204 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
205 error = -ENOMEM;
206 goto err_array;
15d36fec
DJ
207 }
208
d37a14bb
LT
209 is_ram = region_intersects(align_start, align_size,
210 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a85 211
06489cfb
DW
212 if (is_ram != REGION_DISJOINT) {
213 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
214 is_ram == REGION_MIXED ? "mixed" : "ram", res);
a95c90f1
DW
215 error = -ENXIO;
216 goto err_array;
41e94a85
CH
217 }
218
4b94ffdc 219 pgmap->dev = dev;
4b94ffdc 220
bcfa4b72
MW
221 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
222 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
9476df7d 223 if (error)
bcfa4b72 224 goto err_array;
9476df7d 225
41e94a85
CH
226 nid = dev_to_node(dev);
227 if (nid < 0)
7eff93b7 228 nid = numa_mem_id();
41e94a85 229
9049771f
DW
230 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
231 align_size);
232 if (error)
233 goto err_pfn_remap;
234
f931ab47 235 mem_hotplug_begin();
69324b8f
DW
236
237 /*
238 * For device private memory we call add_pages() as we only need to
239 * allocate and initialize struct page for the device memory. More-
240 * over the device memory is un-accessible thus we do not want to
241 * create a linear mapping for the memory like arch_add_memory()
242 * would do.
243 *
244 * For all other device memory types, which are accessible by
245 * the CPU, we do want the linear mapping and thus use
246 * arch_add_memory().
247 */
248 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
249 error = add_pages(nid, align_start >> PAGE_SHIFT,
940519f0 250 align_size >> PAGE_SHIFT, &restrictions);
69324b8f
DW
251 } else {
252 error = kasan_add_zero_shadow(__va(align_start), align_size);
253 if (error) {
254 mem_hotplug_done();
255 goto err_kasan;
256 }
257
940519f0
MH
258 error = arch_add_memory(nid, align_start, align_size,
259 &restrictions);
69324b8f
DW
260 }
261
262 if (!error) {
263 struct zone *zone;
264
265 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
266 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
267 align_size >> PAGE_SHIFT, altmap);
0207df4f
AR
268 }
269
f931ab47 270 mem_hotplug_done();
9476df7d
DW
271 if (error)
272 goto err_add_memory;
41e94a85 273
966cf44f
AD
274 /*
275 * Initialization of the pages has been deferred until now in order
276 * to allow us to do the work while not holding the hotplug lock.
277 */
278 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
279 align_start >> PAGE_SHIFT,
280 align_size >> PAGE_SHIFT, pgmap);
281 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
e8d51348 282
a95c90f1
DW
283 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
284 pgmap);
285 if (error)
286 return ERR_PTR(error);
e8d51348 287
41e94a85 288 return __va(res->start);
9476df7d
DW
289
290 err_add_memory:
0207df4f
AR
291 kasan_remove_zero_shadow(__va(align_start), align_size);
292 err_kasan:
9049771f
DW
293 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
294 err_pfn_remap:
bcfa4b72
MW
295 pgmap_array_delete(res);
296 err_array:
d8668bb0
CH
297 pgmap->ops->kill(pgmap);
298 pgmap->ops->cleanup(pgmap);
9476df7d 299 return ERR_PTR(error);
41e94a85 300}
808153e1 301EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc 302
2e3f139e
DW
303void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
304{
305 devm_release_action(dev, devm_memremap_pages_release, pgmap);
306}
307EXPORT_SYMBOL_GPL(devm_memunmap_pages);
308
4b94ffdc
DW
309unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
310{
311 /* number of pfns from base where pfn_to_page() is valid */
312 return altmap->reserve + altmap->free;
313}
314
315void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
316{
317 altmap->alloc -= nr_pfns;
318}
319
0822acb8
CH
320/**
321 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
322 * @pfn: page frame number to lookup page_map
323 * @pgmap: optional known pgmap that already has a reference
324 *
832d7aa0
CH
325 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
326 * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb8
CH
327 */
328struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
329 struct dev_pagemap *pgmap)
330{
0822acb8
CH
331 resource_size_t phys = PFN_PHYS(pfn);
332
333 /*
832d7aa0 334 * In the cached case we're already holding a live reference.
0822acb8 335 */
832d7aa0 336 if (pgmap) {
e7744aa2 337 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
832d7aa0
CH
338 return pgmap;
339 put_dev_pagemap(pgmap);
0822acb8
CH
340 }
341
342 /* fall back to slow path lookup */
343 rcu_read_lock();
bcfa4b72 344 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb8
CH
345 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
346 pgmap = NULL;
347 rcu_read_unlock();
348
349 return pgmap;
350}
e7638488 351EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2 352
e7638488
DW
353#ifdef CONFIG_DEV_PAGEMAP_OPS
354DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
31c5bda3 355EXPORT_SYMBOL(devmap_managed_key);
e7638488
DW
356static atomic_t devmap_enable;
357
358/*
359 * Toggle the static key for ->page_free() callbacks when dev_pagemap
360 * pages go idle.
361 */
362void dev_pagemap_get_ops(void)
363{
364 if (atomic_inc_return(&devmap_enable) == 1)
365 static_branch_enable(&devmap_managed_key);
366}
367EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
368
369void dev_pagemap_put_ops(void)
370{
371 if (atomic_dec_and_test(&devmap_enable))
372 static_branch_disable(&devmap_managed_key);
373}
374EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
375
376void __put_devmap_managed_page(struct page *page)
7b2d55d2
JG
377{
378 int count = page_ref_dec_return(page);
379
380 /*
381 * If refcount is 1 then page is freed and refcount is stable as nobody
382 * holds a reference on the page.
383 */
384 if (count == 1) {
385 /* Clear Active bit in case of parallel mark_page_accessed */
386 __ClearPageActive(page);
387 __ClearPageWaiters(page);
388
c733a828 389 mem_cgroup_uncharge(page);
7b2d55d2 390
1e240e8d 391 page->pgmap->ops->page_free(page, page->pgmap->data);
7b2d55d2
JG
392 } else if (!count)
393 __put_page(page);
394}
31c5bda3 395EXPORT_SYMBOL(__put_devmap_managed_page);
e7638488 396#endif /* CONFIG_DEV_PAGEMAP_OPS */