mm: move MAP_SYNC to asm-generic/mman-common.h
[linux-2.6-block.git] / kernel / memremap.c
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3#include <linux/device.h>
4#include <linux/io.h>
5#include <linux/kasan.h>
6#include <linux/memory_hotplug.h>
7#include <linux/mm.h>
8#include <linux/pfn_t.h>
9#include <linux/swap.h>
10#include <linux/swapops.h>
11#include <linux/types.h>
12#include <linux/wait_bit.h>
13#include <linux/xarray.h>
14
15static DEFINE_XARRAY(pgmap_array);
16#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
18
19#ifdef CONFIG_DEV_PAGEMAP_OPS
20DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
21EXPORT_SYMBOL(devmap_managed_key);
22static atomic_t devmap_managed_enable;
23
24static void devmap_managed_enable_put(void *data)
25{
26 if (atomic_dec_and_test(&devmap_managed_enable))
27 static_branch_disable(&devmap_managed_key);
28}
29
30static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
31{
32 if (!pgmap->ops || !pgmap->ops->page_free) {
33 WARN(1, "Missing page_free method\n");
34 return -EINVAL;
35 }
36
37 if (atomic_inc_return(&devmap_managed_enable) == 1)
38 static_branch_enable(&devmap_managed_key);
39 return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
40}
41#else
42static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
43{
44 return -EINVAL;
45}
46#endif /* CONFIG_DEV_PAGEMAP_OPS */
47
48static void pgmap_array_delete(struct resource *res)
49{
50 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
51 NULL, GFP_KERNEL);
52 synchronize_rcu();
53}
54
55static unsigned long pfn_first(struct dev_pagemap *pgmap)
56{
57 return (pgmap->res.start >> PAGE_SHIFT) +
58 vmem_altmap_offset(pgmap_altmap(pgmap));
59}
60
61static unsigned long pfn_end(struct dev_pagemap *pgmap)
62{
63 const struct resource *res = &pgmap->res;
64
65 return (res->start + resource_size(res)) >> PAGE_SHIFT;
66}
67
68static unsigned long pfn_next(unsigned long pfn)
69{
70 if (pfn % 1024 == 0)
71 cond_resched();
72 return pfn + 1;
73}
74
75#define for_each_device_pfn(pfn, map) \
76 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
77
78static void dev_pagemap_kill(struct dev_pagemap *pgmap)
79{
80 if (pgmap->ops && pgmap->ops->kill)
81 pgmap->ops->kill(pgmap);
82 else
83 percpu_ref_kill(pgmap->ref);
84}
85
86static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
87{
88 if (pgmap->ops && pgmap->ops->cleanup) {
89 pgmap->ops->cleanup(pgmap);
90 } else {
91 wait_for_completion(&pgmap->done);
92 percpu_ref_exit(pgmap->ref);
93 }
94}
95
96static void devm_memremap_pages_release(void *data)
97{
98 struct dev_pagemap *pgmap = data;
99 struct device *dev = pgmap->dev;
100 struct resource *res = &pgmap->res;
101 resource_size_t align_start, align_size;
102 unsigned long pfn;
103 int nid;
104
105 dev_pagemap_kill(pgmap);
106 for_each_device_pfn(pfn, pgmap)
107 put_page(pfn_to_page(pfn));
108 dev_pagemap_cleanup(pgmap);
109
110 /* pages are dead and unused, undo the arch mapping */
111 align_start = res->start & ~(SECTION_SIZE - 1);
112 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
113 - align_start;
114
115 nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
116
117 mem_hotplug_begin();
118 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
119 pfn = align_start >> PAGE_SHIFT;
120 __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
121 align_size >> PAGE_SHIFT, NULL);
122 } else {
123 arch_remove_memory(nid, align_start, align_size,
124 pgmap_altmap(pgmap));
125 kasan_remove_zero_shadow(__va(align_start), align_size);
126 }
127 mem_hotplug_done();
128
129 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
130 pgmap_array_delete(res);
131 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
132 "%s: failed to free all reserved pages\n", __func__);
133}
134
135static void dev_pagemap_percpu_release(struct percpu_ref *ref)
136{
137 struct dev_pagemap *pgmap =
138 container_of(ref, struct dev_pagemap, internal_ref);
139
140 complete(&pgmap->done);
141}
142
143/**
144 * devm_memremap_pages - remap and provide memmap backing for the given resource
145 * @dev: hosting device for @res
146 * @pgmap: pointer to a struct dev_pagemap
147 *
148 * Notes:
149 * 1/ At a minimum the res and type members of @pgmap must be initialized
150 * by the caller before passing it to this function
151 *
152 * 2/ The altmap field may optionally be initialized, in which case
153 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
154 *
155 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
156 * 'live' on entry and will be killed and reaped at
157 * devm_memremap_pages_release() time, or if this routine fails.
158 *
159 * 4/ res is expected to be a host memory range that could feasibly be
160 * treated as a "System RAM" range, i.e. not a device mmio range, but
161 * this is not enforced.
162 */
163void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
164{
165 resource_size_t align_start, align_size, align_end;
166 struct resource *res = &pgmap->res;
167 struct dev_pagemap *conflict_pgmap;
168 struct mhp_restrictions restrictions = {
169 /*
170 * We do not want any optional features only our own memmap
171 */
172 .altmap = pgmap_altmap(pgmap),
173 };
174 pgprot_t pgprot = PAGE_KERNEL;
175 int error, nid, is_ram;
176 bool need_devmap_managed = true;
177
178 switch (pgmap->type) {
179 case MEMORY_DEVICE_PRIVATE:
180 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
181 WARN(1, "Device private memory not supported\n");
182 return ERR_PTR(-EINVAL);
183 }
184 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
185 WARN(1, "Missing migrate_to_ram method\n");
186 return ERR_PTR(-EINVAL);
187 }
188 break;
189 case MEMORY_DEVICE_FS_DAX:
190 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
191 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
192 WARN(1, "File system DAX not supported\n");
193 return ERR_PTR(-EINVAL);
194 }
195 break;
196 case MEMORY_DEVICE_DEVDAX:
197 case MEMORY_DEVICE_PCI_P2PDMA:
198 need_devmap_managed = false;
199 break;
200 default:
201 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
202 break;
203 }
204
205 if (!pgmap->ref) {
206 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
207 return ERR_PTR(-EINVAL);
208
209 init_completion(&pgmap->done);
210 error = percpu_ref_init(&pgmap->internal_ref,
211 dev_pagemap_percpu_release, 0, GFP_KERNEL);
212 if (error)
213 return ERR_PTR(error);
214 pgmap->ref = &pgmap->internal_ref;
215 } else {
216 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
217 WARN(1, "Missing reference count teardown definition\n");
218 return ERR_PTR(-EINVAL);
219 }
220 }
221
222 if (need_devmap_managed) {
223 error = devmap_managed_enable_get(dev, pgmap);
224 if (error)
225 return ERR_PTR(error);
226 }
227
228 align_start = res->start & ~(SECTION_SIZE - 1);
229 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
230 - align_start;
231 align_end = align_start + align_size - 1;
232
233 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
234 if (conflict_pgmap) {
235 dev_WARN(dev, "Conflicting mapping in same section\n");
236 put_dev_pagemap(conflict_pgmap);
237 error = -ENOMEM;
238 goto err_array;
239 }
240
241 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
242 if (conflict_pgmap) {
243 dev_WARN(dev, "Conflicting mapping in same section\n");
244 put_dev_pagemap(conflict_pgmap);
245 error = -ENOMEM;
246 goto err_array;
247 }
248
249 is_ram = region_intersects(align_start, align_size,
250 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
251
252 if (is_ram != REGION_DISJOINT) {
253 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
254 is_ram == REGION_MIXED ? "mixed" : "ram", res);
255 error = -ENXIO;
256 goto err_array;
257 }
258
259 pgmap->dev = dev;
260
261 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
262 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
263 if (error)
264 goto err_array;
265
266 nid = dev_to_node(dev);
267 if (nid < 0)
268 nid = numa_mem_id();
269
270 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
271 align_size);
272 if (error)
273 goto err_pfn_remap;
274
275 mem_hotplug_begin();
276
277 /*
278 * For device private memory we call add_pages() as we only need to
279 * allocate and initialize struct page for the device memory. More-
280 * over the device memory is un-accessible thus we do not want to
281 * create a linear mapping for the memory like arch_add_memory()
282 * would do.
283 *
284 * For all other device memory types, which are accessible by
285 * the CPU, we do want the linear mapping and thus use
286 * arch_add_memory().
287 */
288 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
289 error = add_pages(nid, align_start >> PAGE_SHIFT,
290 align_size >> PAGE_SHIFT, &restrictions);
291 } else {
292 error = kasan_add_zero_shadow(__va(align_start), align_size);
293 if (error) {
294 mem_hotplug_done();
295 goto err_kasan;
296 }
297
298 error = arch_add_memory(nid, align_start, align_size,
299 &restrictions);
300 }
301
302 if (!error) {
303 struct zone *zone;
304
305 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
306 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
307 align_size >> PAGE_SHIFT, pgmap_altmap(pgmap));
308 }
309
310 mem_hotplug_done();
311 if (error)
312 goto err_add_memory;
313
314 /*
315 * Initialization of the pages has been deferred until now in order
316 * to allow us to do the work while not holding the hotplug lock.
317 */
318 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
319 align_start >> PAGE_SHIFT,
320 align_size >> PAGE_SHIFT, pgmap);
321 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
322
323 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
324 pgmap);
325 if (error)
326 return ERR_PTR(error);
327
328 return __va(res->start);
329
330 err_add_memory:
331 kasan_remove_zero_shadow(__va(align_start), align_size);
332 err_kasan:
333 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
334 err_pfn_remap:
335 pgmap_array_delete(res);
336 err_array:
337 dev_pagemap_kill(pgmap);
338 dev_pagemap_cleanup(pgmap);
339 return ERR_PTR(error);
340}
341EXPORT_SYMBOL_GPL(devm_memremap_pages);
342
343void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
344{
345 devm_release_action(dev, devm_memremap_pages_release, pgmap);
346}
347EXPORT_SYMBOL_GPL(devm_memunmap_pages);
348
349unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
350{
351 /* number of pfns from base where pfn_to_page() is valid */
352 if (altmap)
353 return altmap->reserve + altmap->free;
354 return 0;
355}
356
357void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
358{
359 altmap->alloc -= nr_pfns;
360}
361
362/**
363 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
364 * @pfn: page frame number to lookup page_map
365 * @pgmap: optional known pgmap that already has a reference
366 *
367 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
368 * is non-NULL but does not cover @pfn the reference to it will be released.
369 */
370struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
371 struct dev_pagemap *pgmap)
372{
373 resource_size_t phys = PFN_PHYS(pfn);
374
375 /*
376 * In the cached case we're already holding a live reference.
377 */
378 if (pgmap) {
379 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
380 return pgmap;
381 put_dev_pagemap(pgmap);
382 }
383
384 /* fall back to slow path lookup */
385 rcu_read_lock();
386 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
387 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
388 pgmap = NULL;
389 rcu_read_unlock();
390
391 return pgmap;
392}
393EXPORT_SYMBOL_GPL(get_dev_pagemap);
394
395#ifdef CONFIG_DEV_PAGEMAP_OPS
396void __put_devmap_managed_page(struct page *page)
397{
398 int count = page_ref_dec_return(page);
399
400 /*
401 * If refcount is 1 then page is freed and refcount is stable as nobody
402 * holds a reference on the page.
403 */
404 if (count == 1) {
405 /* Clear Active bit in case of parallel mark_page_accessed */
406 __ClearPageActive(page);
407 __ClearPageWaiters(page);
408
409 mem_cgroup_uncharge(page);
410
411 page->pgmap->ops->page_free(page);
412 } else if (!count)
413 __put_page(page);
414}
415EXPORT_SYMBOL(__put_devmap_managed_page);
416#endif /* CONFIG_DEV_PAGEMAP_OPS */