PCI: Make early dump functionality generic
[linux-2.6-block.git] / kernel / memremap.c
CommitLineData
5981690d
DW
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
9476df7d 3#include <linux/radix-tree.h>
7d3dcf26 4#include <linux/device.h>
92281dee 5#include <linux/types.h>
34c0fd54 6#include <linux/pfn_t.h>
92281dee
DW
7#include <linux/io.h>
8#include <linux/mm.h>
41e94a85 9#include <linux/memory_hotplug.h>
5042db43
JG
10#include <linux/swap.h>
11#include <linux/swapops.h>
e7638488 12#include <linux/wait_bit.h>
92281dee 13
9476df7d
DW
14static DEFINE_MUTEX(pgmap_lock);
15static RADIX_TREE(pgmap_radix, GFP_KERNEL);
16#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
18
ab1b597e 19static unsigned long order_at(struct resource *res, unsigned long pgoff)
9476df7d 20{
ab1b597e
DW
21 unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
22 unsigned long nr_pages, mask;
eb7d78c9 23
ab1b597e
DW
24 nr_pages = PHYS_PFN(resource_size(res));
25 if (nr_pages == pgoff)
26 return ULONG_MAX;
27
28 /*
29 * What is the largest aligned power-of-2 range available from
30 * this resource pgoff to the end of the resource range,
31 * considering the alignment of the current pgoff?
32 */
33 mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
34 if (!mask)
35 return ULONG_MAX;
36
37 return find_first_bit(&mask, BITS_PER_LONG);
38}
39
40#define foreach_order_pgoff(res, order, pgoff) \
41 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
42 pgoff += 1UL << order, order = order_at((res), pgoff))
43
5042db43
JG
44#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
45int device_private_entry_fault(struct vm_area_struct *vma,
46 unsigned long addr,
47 swp_entry_t entry,
48 unsigned int flags,
49 pmd_t *pmdp)
50{
51 struct page *page = device_private_entry_to_page(entry);
52
53 /*
54 * The page_fault() callback must migrate page back to system memory
55 * so that CPU can access it. This might fail for various reasons
56 * (device issue, device was unsafely unplugged, ...). When such
57 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
58 *
59 * Note that because memory cgroup charges are accounted to the device
60 * memory, this should never fail because of memory restrictions (but
61 * allocation of regular system page might still fail because we are
62 * out of memory).
63 *
64 * There is a more in-depth description of what that callback can and
65 * cannot do, in include/linux/memremap.h
66 */
67 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
68}
69EXPORT_SYMBOL(device_private_entry_fault);
70#endif /* CONFIG_DEVICE_PRIVATE */
71
77dd66a3 72static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
ab1b597e
DW
73{
74 unsigned long pgoff, order;
9476df7d
DW
75
76 mutex_lock(&pgmap_lock);
77dd66a3
JS
77 foreach_order_pgoff(res, order, pgoff) {
78 if (pgoff >= end_pgoff)
79 break;
ab1b597e 80 radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
77dd66a3 81 }
9476df7d 82 mutex_unlock(&pgmap_lock);
ab1b597e
DW
83
84 synchronize_rcu();
9476df7d
DW
85}
86
e7744aa2 87static unsigned long pfn_first(struct dev_pagemap *pgmap)
5c2c2587 88{
e7744aa2
LG
89 const struct resource *res = &pgmap->res;
90 struct vmem_altmap *altmap = &pgmap->altmap;
5c2c2587
DW
91 unsigned long pfn;
92
93 pfn = res->start >> PAGE_SHIFT;
e7744aa2 94 if (pgmap->altmap_valid)
5c2c2587
DW
95 pfn += vmem_altmap_offset(altmap);
96 return pfn;
97}
98
e7744aa2 99static unsigned long pfn_end(struct dev_pagemap *pgmap)
5c2c2587 100{
e7744aa2 101 const struct resource *res = &pgmap->res;
5c2c2587
DW
102
103 return (res->start + resource_size(res)) >> PAGE_SHIFT;
104}
105
949b9325
DW
106static unsigned long pfn_next(unsigned long pfn)
107{
108 if (pfn % 1024 == 0)
109 cond_resched();
110 return pfn + 1;
111}
112
5c2c2587 113#define for_each_device_pfn(pfn, map) \
949b9325 114 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
5c2c2587 115
e8d51348 116static void devm_memremap_pages_release(void *data)
41e94a85 117{
e7744aa2 118 struct dev_pagemap *pgmap = data;
e8d51348 119 struct device *dev = pgmap->dev;
e7744aa2 120 struct resource *res = &pgmap->res;
9476df7d 121 resource_size_t align_start, align_size;
71389703
DW
122 unsigned long pfn;
123
e7744aa2 124 for_each_device_pfn(pfn, pgmap)
71389703 125 put_page(pfn_to_page(pfn));
9476df7d 126
5c2c2587
DW
127 if (percpu_ref_tryget_live(pgmap->ref)) {
128 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
129 percpu_ref_put(pgmap->ref);
130 }
131
41e94a85 132 /* pages are dead and unused, undo the arch mapping */
9476df7d 133 align_start = res->start & ~(SECTION_SIZE - 1);
10a0cd6e
JS
134 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
135 - align_start;
b5d24fda 136
f931ab47 137 mem_hotplug_begin();
e7744aa2
LG
138 arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
139 &pgmap->altmap : NULL);
f931ab47 140 mem_hotplug_done();
b5d24fda 141
9049771f 142 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
77dd66a3 143 pgmap_radix_release(res, -1);
e7744aa2
LG
144 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
145 "%s: failed to free all reserved pages\n", __func__);
9476df7d
DW
146}
147
4b94ffdc
DW
148/**
149 * devm_memremap_pages - remap and provide memmap backing for the given resource
150 * @dev: hosting device for @res
e8d51348 151 * @pgmap: pointer to a struct dev_pgmap
4b94ffdc 152 *
5c2c2587 153 * Notes:
e8d51348
CH
154 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
155 * by the caller before passing it to this function
156 *
157 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
158 * must be set to true
159 *
160 * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
161 * time (or devm release event). The expected order of events is that ref has
71389703
DW
162 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
163 * wait for the completion of all references being dropped and
164 * percpu_ref_exit() must occur after devm_memremap_pages_release().
5c2c2587 165 *
e8d51348 166 * 4/ res is expected to be a host memory range that could feasibly be
5c2c2587
DW
167 * treated as a "System RAM" range, i.e. not a device mmio range, but
168 * this is not enforced.
4b94ffdc 169 */
e8d51348 170void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
41e94a85 171{
ab1b597e 172 resource_size_t align_start, align_size, align_end;
e8d51348
CH
173 struct vmem_altmap *altmap = pgmap->altmap_valid ?
174 &pgmap->altmap : NULL;
949b9325 175 struct resource *res = &pgmap->res;
ab1b597e 176 unsigned long pfn, pgoff, order;
9049771f 177 pgprot_t pgprot = PAGE_KERNEL;
949b9325 178 int error, nid, is_ram;
5f29a77c
DW
179
180 align_start = res->start & ~(SECTION_SIZE - 1);
181 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
182 - align_start;
d37a14bb
LT
183 is_ram = region_intersects(align_start, align_size,
184 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a85
CH
185
186 if (is_ram == REGION_MIXED) {
187 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
188 __func__, res);
189 return ERR_PTR(-ENXIO);
190 }
191
192 if (is_ram == REGION_INTERSECTS)
193 return __va(res->start);
194
e8d51348 195 if (!pgmap->ref)
5c2c2587
DW
196 return ERR_PTR(-EINVAL);
197
4b94ffdc 198 pgmap->dev = dev;
4b94ffdc 199
9476df7d
DW
200 mutex_lock(&pgmap_lock);
201 error = 0;
eb7d78c9 202 align_end = align_start + align_size - 1;
ab1b597e
DW
203
204 foreach_order_pgoff(res, order, pgoff) {
ab1b597e 205 error = __radix_tree_insert(&pgmap_radix,
e7744aa2 206 PHYS_PFN(res->start) + pgoff, order, pgmap);
9476df7d
DW
207 if (error) {
208 dev_err(dev, "%s: failed: %d\n", __func__, error);
209 break;
210 }
211 }
212 mutex_unlock(&pgmap_lock);
213 if (error)
214 goto err_radix;
215
41e94a85
CH
216 nid = dev_to_node(dev);
217 if (nid < 0)
7eff93b7 218 nid = numa_mem_id();
41e94a85 219
9049771f
DW
220 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
221 align_size);
222 if (error)
223 goto err_pfn_remap;
224
f931ab47 225 mem_hotplug_begin();
24e6d5a5 226 error = arch_add_memory(nid, align_start, align_size, altmap, false);
f1dd2cd1
MH
227 if (!error)
228 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
229 align_start >> PAGE_SHIFT,
a99583e7 230 align_size >> PAGE_SHIFT, altmap);
f931ab47 231 mem_hotplug_done();
9476df7d
DW
232 if (error)
233 goto err_add_memory;
41e94a85 234
e7744aa2 235 for_each_device_pfn(pfn, pgmap) {
5c2c2587
DW
236 struct page *page = pfn_to_page(pfn);
237
d77a117e
DW
238 /*
239 * ZONE_DEVICE pages union ->lru with a ->pgmap back
240 * pointer. It is a bug if a ZONE_DEVICE page is ever
241 * freed or placed on a driver-private list. Seed the
242 * storage with LIST_POISON* values.
243 */
244 list_del(&page->lru);
5c2c2587 245 page->pgmap = pgmap;
e8d51348 246 percpu_ref_get(pgmap->ref);
5c2c2587 247 }
e8d51348
CH
248
249 devm_add_action(dev, devm_memremap_pages_release, pgmap);
250
41e94a85 251 return __va(res->start);
9476df7d
DW
252
253 err_add_memory:
9049771f
DW
254 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
255 err_pfn_remap:
9476df7d 256 err_radix:
77dd66a3 257 pgmap_radix_release(res, pgoff);
9476df7d 258 return ERR_PTR(error);
41e94a85
CH
259}
260EXPORT_SYMBOL(devm_memremap_pages);
4b94ffdc
DW
261
262unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
263{
264 /* number of pfns from base where pfn_to_page() is valid */
265 return altmap->reserve + altmap->free;
266}
267
268void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
269{
270 altmap->alloc -= nr_pfns;
271}
272
0822acb8
CH
273/**
274 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
275 * @pfn: page frame number to lookup page_map
276 * @pgmap: optional known pgmap that already has a reference
277 *
832d7aa0
CH
278 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
279 * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb8
CH
280 */
281struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
282 struct dev_pagemap *pgmap)
283{
0822acb8
CH
284 resource_size_t phys = PFN_PHYS(pfn);
285
286 /*
832d7aa0 287 * In the cached case we're already holding a live reference.
0822acb8 288 */
832d7aa0 289 if (pgmap) {
e7744aa2 290 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
832d7aa0
CH
291 return pgmap;
292 put_dev_pagemap(pgmap);
0822acb8
CH
293 }
294
295 /* fall back to slow path lookup */
296 rcu_read_lock();
e697c5b9 297 pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
0822acb8
CH
298 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
299 pgmap = NULL;
300 rcu_read_unlock();
301
302 return pgmap;
303}
e7638488 304EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2 305
e7638488
DW
306#ifdef CONFIG_DEV_PAGEMAP_OPS
307DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
308EXPORT_SYMBOL_GPL(devmap_managed_key);
309static atomic_t devmap_enable;
310
311/*
312 * Toggle the static key for ->page_free() callbacks when dev_pagemap
313 * pages go idle.
314 */
315void dev_pagemap_get_ops(void)
316{
317 if (atomic_inc_return(&devmap_enable) == 1)
318 static_branch_enable(&devmap_managed_key);
319}
320EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
321
322void dev_pagemap_put_ops(void)
323{
324 if (atomic_dec_and_test(&devmap_enable))
325 static_branch_disable(&devmap_managed_key);
326}
327EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
328
329void __put_devmap_managed_page(struct page *page)
7b2d55d2
JG
330{
331 int count = page_ref_dec_return(page);
332
333 /*
334 * If refcount is 1 then page is freed and refcount is stable as nobody
335 * holds a reference on the page.
336 */
337 if (count == 1) {
338 /* Clear Active bit in case of parallel mark_page_accessed */
339 __ClearPageActive(page);
340 __ClearPageWaiters(page);
341
342 page->mapping = NULL;
c733a828 343 mem_cgroup_uncharge(page);
7b2d55d2
JG
344
345 page->pgmap->page_free(page, page->pgmap->data);
346 } else if (!count)
347 __put_page(page);
348}
e7638488
DW
349EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
350#endif /* CONFIG_DEV_PAGEMAP_OPS */