mm/debug_pagealloc: ask users for default setting of debug_pagealloc
[linux-2.6-block.git] / kernel / memremap.c
... / ...
CommitLineData
1/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/radix-tree.h>
14#include <linux/memremap.h>
15#include <linux/device.h>
16#include <linux/types.h>
17#include <linux/pfn_t.h>
18#include <linux/io.h>
19#include <linux/mm.h>
20#include <linux/memory_hotplug.h>
21
22#ifndef ioremap_cache
23/* temporary while we convert existing ioremap_cache users to memremap */
24__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
25{
26 return ioremap(offset, size);
27}
28#endif
29
30static void *try_ram_remap(resource_size_t offset, size_t size)
31{
32 unsigned long pfn = PHYS_PFN(offset);
33
34 /* In the simple case just return the existing linear address */
35 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
36 return __va(offset);
37 return NULL; /* fallback to ioremap_cache */
38}
39
40/**
41 * memremap() - remap an iomem_resource as cacheable memory
42 * @offset: iomem resource start address
43 * @size: size of remap
44 * @flags: either MEMREMAP_WB or MEMREMAP_WT
45 *
46 * memremap() is "ioremap" for cases where it is known that the resource
47 * being mapped does not have i/o side effects and the __iomem
48 * annotation is not applicable.
49 *
50 * MEMREMAP_WB - matches the default mapping for System RAM on
51 * the architecture. This is usually a read-allocate write-back cache.
52 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
53 * memremap() will bypass establishing a new mapping and instead return
54 * a pointer into the direct map.
55 *
56 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
57 * cache or are written through to memory and never exist in a
58 * cache-dirty state with respect to program visibility. Attempts to
59 * map System RAM with this mapping type will fail.
60 */
61void *memremap(resource_size_t offset, size_t size, unsigned long flags)
62{
63 int is_ram = region_intersects(offset, size,
64 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
65 void *addr = NULL;
66
67 if (is_ram == REGION_MIXED) {
68 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
69 &offset, (unsigned long) size);
70 return NULL;
71 }
72
73 /* Try all mapping types requested until one returns non-NULL */
74 if (flags & MEMREMAP_WB) {
75 flags &= ~MEMREMAP_WB;
76 /*
77 * MEMREMAP_WB is special in that it can be satisifed
78 * from the direct map. Some archs depend on the
79 * capability of memremap() to autodetect cases where
80 * the requested range is potentially in System RAM.
81 */
82 if (is_ram == REGION_INTERSECTS)
83 addr = try_ram_remap(offset, size);
84 if (!addr)
85 addr = ioremap_cache(offset, size);
86 }
87
88 /*
89 * If we don't have a mapping yet and more request flags are
90 * pending then we will be attempting to establish a new virtual
91 * address mapping. Enforce that this mapping is not aliasing
92 * System RAM.
93 */
94 if (!addr && is_ram == REGION_INTERSECTS && flags) {
95 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
96 &offset, (unsigned long) size);
97 return NULL;
98 }
99
100 if (!addr && (flags & MEMREMAP_WT)) {
101 flags &= ~MEMREMAP_WT;
102 addr = ioremap_wt(offset, size);
103 }
104
105 return addr;
106}
107EXPORT_SYMBOL(memremap);
108
109void memunmap(void *addr)
110{
111 if (is_vmalloc_addr(addr))
112 iounmap((void __iomem *) addr);
113}
114EXPORT_SYMBOL(memunmap);
115
116static void devm_memremap_release(struct device *dev, void *res)
117{
118 memunmap(*(void **)res);
119}
120
121static int devm_memremap_match(struct device *dev, void *res, void *match_data)
122{
123 return *(void **)res == match_data;
124}
125
126void *devm_memremap(struct device *dev, resource_size_t offset,
127 size_t size, unsigned long flags)
128{
129 void **ptr, *addr;
130
131 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
132 dev_to_node(dev));
133 if (!ptr)
134 return ERR_PTR(-ENOMEM);
135
136 addr = memremap(offset, size, flags);
137 if (addr) {
138 *ptr = addr;
139 devres_add(dev, ptr);
140 } else {
141 devres_free(ptr);
142 return ERR_PTR(-ENXIO);
143 }
144
145 return addr;
146}
147EXPORT_SYMBOL(devm_memremap);
148
149void devm_memunmap(struct device *dev, void *addr)
150{
151 WARN_ON(devres_release(dev, devm_memremap_release,
152 devm_memremap_match, addr));
153}
154EXPORT_SYMBOL(devm_memunmap);
155
156pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
157{
158 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
159}
160EXPORT_SYMBOL(phys_to_pfn_t);
161
162#ifdef CONFIG_ZONE_DEVICE
163static DEFINE_MUTEX(pgmap_lock);
164static RADIX_TREE(pgmap_radix, GFP_KERNEL);
165#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
166#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
167
168struct page_map {
169 struct resource res;
170 struct percpu_ref *ref;
171 struct dev_pagemap pgmap;
172 struct vmem_altmap altmap;
173};
174
175void get_zone_device_page(struct page *page)
176{
177 percpu_ref_get(page->pgmap->ref);
178}
179EXPORT_SYMBOL(get_zone_device_page);
180
181void put_zone_device_page(struct page *page)
182{
183 put_dev_pagemap(page->pgmap);
184}
185EXPORT_SYMBOL(put_zone_device_page);
186
187static void pgmap_radix_release(struct resource *res)
188{
189 resource_size_t key, align_start, align_size, align_end;
190
191 align_start = res->start & ~(SECTION_SIZE - 1);
192 align_size = ALIGN(resource_size(res), SECTION_SIZE);
193 align_end = align_start + align_size - 1;
194
195 mutex_lock(&pgmap_lock);
196 for (key = res->start; key <= res->end; key += SECTION_SIZE)
197 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
198 mutex_unlock(&pgmap_lock);
199}
200
201static unsigned long pfn_first(struct page_map *page_map)
202{
203 struct dev_pagemap *pgmap = &page_map->pgmap;
204 const struct resource *res = &page_map->res;
205 struct vmem_altmap *altmap = pgmap->altmap;
206 unsigned long pfn;
207
208 pfn = res->start >> PAGE_SHIFT;
209 if (altmap)
210 pfn += vmem_altmap_offset(altmap);
211 return pfn;
212}
213
214static unsigned long pfn_end(struct page_map *page_map)
215{
216 const struct resource *res = &page_map->res;
217
218 return (res->start + resource_size(res)) >> PAGE_SHIFT;
219}
220
221#define for_each_device_pfn(pfn, map) \
222 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
223
224static void devm_memremap_pages_release(struct device *dev, void *data)
225{
226 struct page_map *page_map = data;
227 struct resource *res = &page_map->res;
228 resource_size_t align_start, align_size;
229 struct dev_pagemap *pgmap = &page_map->pgmap;
230
231 if (percpu_ref_tryget_live(pgmap->ref)) {
232 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
233 percpu_ref_put(pgmap->ref);
234 }
235
236 /* pages are dead and unused, undo the arch mapping */
237 align_start = res->start & ~(SECTION_SIZE - 1);
238 align_size = ALIGN(resource_size(res), SECTION_SIZE);
239 arch_remove_memory(align_start, align_size);
240 pgmap_radix_release(res);
241 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
242 "%s: failed to free all reserved pages\n", __func__);
243}
244
245/* assumes rcu_read_lock() held at entry */
246struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
247{
248 struct page_map *page_map;
249
250 WARN_ON_ONCE(!rcu_read_lock_held());
251
252 page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
253 return page_map ? &page_map->pgmap : NULL;
254}
255
256/**
257 * devm_memremap_pages - remap and provide memmap backing for the given resource
258 * @dev: hosting device for @res
259 * @res: "host memory" address range
260 * @ref: a live per-cpu reference count
261 * @altmap: optional descriptor for allocating the memmap from @res
262 *
263 * Notes:
264 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
265 * (or devm release event).
266 *
267 * 2/ @res is expected to be a host memory range that could feasibly be
268 * treated as a "System RAM" range, i.e. not a device mmio range, but
269 * this is not enforced.
270 */
271void *devm_memremap_pages(struct device *dev, struct resource *res,
272 struct percpu_ref *ref, struct vmem_altmap *altmap)
273{
274 resource_size_t key, align_start, align_size, align_end;
275 struct dev_pagemap *pgmap;
276 struct page_map *page_map;
277 int error, nid, is_ram;
278 unsigned long pfn;
279
280 align_start = res->start & ~(SECTION_SIZE - 1);
281 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
282 - align_start;
283 is_ram = region_intersects(align_start, align_size,
284 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
285
286 if (is_ram == REGION_MIXED) {
287 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
288 __func__, res);
289 return ERR_PTR(-ENXIO);
290 }
291
292 if (is_ram == REGION_INTERSECTS)
293 return __va(res->start);
294
295 if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) {
296 dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
297 __func__);
298 return ERR_PTR(-ENXIO);
299 }
300
301 if (!ref)
302 return ERR_PTR(-EINVAL);
303
304 page_map = devres_alloc_node(devm_memremap_pages_release,
305 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
306 if (!page_map)
307 return ERR_PTR(-ENOMEM);
308 pgmap = &page_map->pgmap;
309
310 memcpy(&page_map->res, res, sizeof(*res));
311
312 pgmap->dev = dev;
313 if (altmap) {
314 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
315 pgmap->altmap = &page_map->altmap;
316 }
317 pgmap->ref = ref;
318 pgmap->res = &page_map->res;
319
320 mutex_lock(&pgmap_lock);
321 error = 0;
322 align_end = align_start + align_size - 1;
323 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
324 struct dev_pagemap *dup;
325
326 rcu_read_lock();
327 dup = find_dev_pagemap(key);
328 rcu_read_unlock();
329 if (dup) {
330 dev_err(dev, "%s: %pr collides with mapping for %s\n",
331 __func__, res, dev_name(dup->dev));
332 error = -EBUSY;
333 break;
334 }
335 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
336 page_map);
337 if (error) {
338 dev_err(dev, "%s: failed: %d\n", __func__, error);
339 break;
340 }
341 }
342 mutex_unlock(&pgmap_lock);
343 if (error)
344 goto err_radix;
345
346 nid = dev_to_node(dev);
347 if (nid < 0)
348 nid = numa_mem_id();
349
350 error = arch_add_memory(nid, align_start, align_size, true);
351 if (error)
352 goto err_add_memory;
353
354 for_each_device_pfn(pfn, page_map) {
355 struct page *page = pfn_to_page(pfn);
356
357 /*
358 * ZONE_DEVICE pages union ->lru with a ->pgmap back
359 * pointer. It is a bug if a ZONE_DEVICE page is ever
360 * freed or placed on a driver-private list. Seed the
361 * storage with LIST_POISON* values.
362 */
363 list_del(&page->lru);
364 page->pgmap = pgmap;
365 }
366 devres_add(dev, page_map);
367 return __va(res->start);
368
369 err_add_memory:
370 err_radix:
371 pgmap_radix_release(res);
372 devres_free(page_map);
373 return ERR_PTR(error);
374}
375EXPORT_SYMBOL(devm_memremap_pages);
376
377unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
378{
379 /* number of pfns from base where pfn_to_page() is valid */
380 return altmap->reserve + altmap->free;
381}
382
383void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
384{
385 altmap->alloc -= nr_pfns;
386}
387
388#ifdef CONFIG_SPARSEMEM_VMEMMAP
389struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
390{
391 /*
392 * 'memmap_start' is the virtual address for the first "struct
393 * page" in this range of the vmemmap array. In the case of
394 * CONFIG_SPARSE_VMEMMAP a page_to_pfn conversion is simple
395 * pointer arithmetic, so we can perform this to_vmem_altmap()
396 * conversion without concern for the initialization state of
397 * the struct page fields.
398 */
399 struct page *page = (struct page *) memmap_start;
400 struct dev_pagemap *pgmap;
401
402 /*
403 * Uncoditionally retrieve a dev_pagemap associated with the
404 * given physical address, this is only for use in the
405 * arch_{add|remove}_memory() for setting up and tearing down
406 * the memmap.
407 */
408 rcu_read_lock();
409 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
410 rcu_read_unlock();
411
412 return pgmap ? pgmap->altmap : NULL;
413}
414#endif /* CONFIG_SPARSEMEM_VMEMMAP */
415#endif /* CONFIG_ZONE_DEVICE */