Commit | Line | Data |
---|---|---|
f14f75b8 | 1 | /* |
e4a064df | 2 | * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved. |
f14f75b8 JS |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | * | |
8 | * A simple uncached page allocator using the generic allocator. This | |
9 | * allocator first utilizes the spare (spill) pages found in the EFI | |
10 | * memmap and will then start converting cached pages to uncached ones | |
11 | * at a granule at a time. Node awareness is implemented by having a | |
12 | * pool of pages per node. | |
13 | */ | |
14 | ||
15 | #include <linux/types.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/errno.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/efi.h> | |
23 | #include <linux/genalloc.h> | |
24 | #include <asm/page.h> | |
25 | #include <asm/pal.h> | |
26 | #include <asm/system.h> | |
27 | #include <asm/pgtable.h> | |
28 | #include <asm/atomic.h> | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/sn/arch.h> | |
31 | ||
f14f75b8 | 32 | |
929f9727 | 33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); |
f14f75b8 | 34 | |
eca7994f DN |
35 | struct uncached_pool { |
36 | struct gen_pool *pool; | |
37 | struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ | |
38 | int nchunks_added; /* #of converted chunks added to pool */ | |
39 | atomic_t status; /* smp called function's return status*/ | |
40 | }; | |
41 | ||
42 | #define MAX_CONVERTED_CHUNKS_PER_NODE 2 | |
f14f75b8 | 43 | |
eca7994f | 44 | struct uncached_pool uncached_pools[MAX_NUMNODES]; |
f14f75b8 JS |
45 | |
46 | ||
47 | static void uncached_ipi_visibility(void *data) | |
48 | { | |
49 | int status; | |
eca7994f | 50 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; |
f14f75b8 JS |
51 | |
52 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | |
53 | if ((status != PAL_VISIBILITY_OK) && | |
54 | (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) | |
eca7994f | 55 | atomic_inc(&uc_pool->status); |
f14f75b8 JS |
56 | } |
57 | ||
58 | ||
59 | static void uncached_ipi_mc_drain(void *data) | |
60 | { | |
61 | int status; | |
eca7994f | 62 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; |
929f9727 | 63 | |
f14f75b8 | 64 | status = ia64_pal_mc_drain(); |
eca7994f DN |
65 | if (status != PAL_STATUS_SUCCESS) |
66 | atomic_inc(&uc_pool->status); | |
f14f75b8 JS |
67 | } |
68 | ||
69 | ||
929f9727 DN |
70 | /* |
71 | * Add a new chunk of uncached memory pages to the specified pool. | |
72 | * | |
73 | * @pool: pool to add new chunk of uncached memory to | |
74 | * @nid: node id of node to allocate memory from, or -1 | |
75 | * | |
76 | * This is accomplished by first allocating a granule of cached memory pages | |
77 | * and then converting them to uncached memory pages. | |
78 | */ | |
eca7994f | 79 | static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) |
f14f75b8 JS |
80 | { |
81 | struct page *page; | |
eca7994f | 82 | int status, i, nchunks_added = uc_pool->nchunks_added; |
929f9727 | 83 | unsigned long c_addr, uc_addr; |
f14f75b8 | 84 | |
eca7994f DN |
85 | if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) |
86 | return -1; /* interrupted by a signal */ | |
87 | ||
88 | if (uc_pool->nchunks_added > nchunks_added) { | |
89 | /* someone added a new chunk while we were waiting */ | |
90 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
91 | return 0; | |
92 | } | |
93 | ||
94 | if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { | |
95 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
929f9727 | 96 | return -1; |
eca7994f | 97 | } |
929f9727 DN |
98 | |
99 | /* attempt to allocate a granule's worth of cached memory pages */ | |
f14f75b8 | 100 | |
980128f2 | 101 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
f14f75b8 | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
eca7994f DN |
103 | if (!page) { |
104 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
929f9727 | 105 | return -1; |
eca7994f | 106 | } |
f14f75b8 | 107 | |
929f9727 | 108 | /* convert the memory pages from cached to uncached */ |
f14f75b8 | 109 | |
929f9727 DN |
110 | c_addr = (unsigned long)page_address(page); |
111 | uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; | |
f14f75b8 JS |
112 | |
113 | /* | |
114 | * There's a small race here where it's possible for someone to | |
115 | * access the page through /dev/mem halfway through the conversion | |
116 | * to uncached - not sure it's really worth bothering about | |
117 | */ | |
118 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | |
119 | SetPageUncached(&page[i]); | |
120 | ||
285fbd66 | 121 | flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
f14f75b8 JS |
122 | |
123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | |
eca7994f DN |
124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
125 | atomic_set(&uc_pool->status, 0); | |
8691e5a8 | 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); |
eca7994f | 127 | if (status || atomic_read(&uc_pool->status)) |
929f9727 | 128 | goto failed; |
eca7994f DN |
129 | } else if (status != PAL_VISIBILITY_OK) |
130 | goto failed; | |
f14f75b8 | 131 | |
929f9727 DN |
132 | preempt_disable(); |
133 | ||
f14f75b8 | 134 | if (ia64_platform_is("sn2")) |
929f9727 | 135 | sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); |
f14f75b8 | 136 | else |
929f9727 DN |
137 | flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
138 | ||
139 | /* flush the just introduced uncached translation from the TLB */ | |
140 | local_flush_tlb_all(); | |
141 | ||
142 | preempt_enable(); | |
f14f75b8 | 143 | |
eca7994f DN |
144 | status = ia64_pal_mc_drain(); |
145 | if (status != PAL_STATUS_SUCCESS) | |
146 | goto failed; | |
147 | atomic_set(&uc_pool->status, 0); | |
8691e5a8 | 148 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); |
eca7994f | 149 | if (status || atomic_read(&uc_pool->status)) |
929f9727 | 150 | goto failed; |
f14f75b8 | 151 | |
929f9727 DN |
152 | /* |
153 | * The chunk of memory pages has been converted to uncached so now we | |
154 | * can add it to the pool. | |
155 | */ | |
eca7994f | 156 | status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); |
929f9727 DN |
157 | if (status) |
158 | goto failed; | |
f14f75b8 | 159 | |
eca7994f DN |
160 | uc_pool->nchunks_added++; |
161 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
929f9727 DN |
162 | return 0; |
163 | ||
164 | /* failed to convert or add the chunk so give it back to the kernel */ | |
165 | failed: | |
166 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | |
167 | ClearPageUncached(&page[i]); | |
168 | ||
169 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); | |
eca7994f | 170 | mutex_unlock(&uc_pool->add_chunk_mutex); |
929f9727 | 171 | return -1; |
f14f75b8 JS |
172 | } |
173 | ||
174 | ||
175 | /* | |
176 | * uncached_alloc_page | |
177 | * | |
929f9727 | 178 | * @starting_nid: node id of node to start with, or -1 |
e4a064df | 179 | * @n_pages: number of contiguous pages to allocate |
929f9727 | 180 | * |
e4a064df DN |
181 | * Allocate the specified number of contiguous uncached pages on the |
182 | * the requested node. If not enough contiguous uncached pages are available | |
183 | * on the requested node, roundrobin starting with the next higher node. | |
f14f75b8 | 184 | */ |
e4a064df | 185 | unsigned long uncached_alloc_page(int starting_nid, int n_pages) |
f14f75b8 | 186 | { |
929f9727 | 187 | unsigned long uc_addr; |
eca7994f | 188 | struct uncached_pool *uc_pool; |
929f9727 | 189 | int nid; |
f14f75b8 | 190 | |
929f9727 DN |
191 | if (unlikely(starting_nid >= MAX_NUMNODES)) |
192 | return 0; | |
f14f75b8 | 193 | |
929f9727 DN |
194 | if (starting_nid < 0) |
195 | starting_nid = numa_node_id(); | |
196 | nid = starting_nid; | |
f14f75b8 | 197 | |
929f9727 | 198 | do { |
2dca53a9 | 199 | if (!node_state(nid, N_HIGH_MEMORY)) |
929f9727 | 200 | continue; |
eca7994f DN |
201 | uc_pool = &uncached_pools[nid]; |
202 | if (uc_pool->pool == NULL) | |
929f9727 DN |
203 | continue; |
204 | do { | |
e4a064df DN |
205 | uc_addr = gen_pool_alloc(uc_pool->pool, |
206 | n_pages * PAGE_SIZE); | |
929f9727 DN |
207 | if (uc_addr != 0) |
208 | return uc_addr; | |
eca7994f | 209 | } while (uncached_add_chunk(uc_pool, nid) == 0); |
929f9727 DN |
210 | |
211 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); | |
f14f75b8 | 212 | |
929f9727 | 213 | return 0; |
f14f75b8 JS |
214 | } |
215 | EXPORT_SYMBOL(uncached_alloc_page); | |
216 | ||
217 | ||
218 | /* | |
219 | * uncached_free_page | |
220 | * | |
e4a064df DN |
221 | * @uc_addr: uncached address of first page to free |
222 | * @n_pages: number of contiguous pages to free | |
929f9727 | 223 | * |
e4a064df | 224 | * Free the specified number of uncached pages. |
f14f75b8 | 225 | */ |
e4a064df | 226 | void uncached_free_page(unsigned long uc_addr, int n_pages) |
f14f75b8 | 227 | { |
929f9727 | 228 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); |
eca7994f | 229 | struct gen_pool *pool = uncached_pools[nid].pool; |
f14f75b8 | 230 | |
929f9727 DN |
231 | if (unlikely(pool == NULL)) |
232 | return; | |
f14f75b8 | 233 | |
929f9727 DN |
234 | if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) |
235 | panic("uncached_free_page invalid address %lx\n", uc_addr); | |
f14f75b8 | 236 | |
e4a064df | 237 | gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); |
f14f75b8 JS |
238 | } |
239 | EXPORT_SYMBOL(uncached_free_page); | |
240 | ||
241 | ||
242 | /* | |
243 | * uncached_build_memmap, | |
244 | * | |
929f9727 DN |
245 | * @uc_start: uncached starting address of a chunk of uncached memory |
246 | * @uc_end: uncached ending address of a chunk of uncached memory | |
247 | * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc()) | |
248 | * | |
f14f75b8 JS |
249 | * Called at boot time to build a map of pages that can be used for |
250 | * memory special operations. | |
251 | */ | |
929f9727 DN |
252 | static int __init uncached_build_memmap(unsigned long uc_start, |
253 | unsigned long uc_end, void *arg) | |
f14f75b8 | 254 | { |
929f9727 | 255 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); |
eca7994f | 256 | struct gen_pool *pool = uncached_pools[nid].pool; |
929f9727 | 257 | size_t size = uc_end - uc_start; |
f14f75b8 | 258 | |
386d1d50 | 259 | touch_softlockup_watchdog(); |
f14f75b8 | 260 | |
929f9727 DN |
261 | if (pool != NULL) { |
262 | memset((char *)uc_start, 0, size); | |
263 | (void) gen_pool_add(pool, uc_start, size, nid); | |
f14f75b8 | 264 | } |
f14f75b8 JS |
265 | return 0; |
266 | } | |
267 | ||
268 | ||
929f9727 DN |
269 | static int __init uncached_init(void) |
270 | { | |
271 | int nid; | |
f14f75b8 | 272 | |
2dca53a9 | 273 | for_each_node_state(nid, N_ONLINE) { |
eca7994f DN |
274 | uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); |
275 | mutex_init(&uncached_pools[nid].add_chunk_mutex); | |
f14f75b8 JS |
276 | } |
277 | ||
929f9727 | 278 | efi_memmap_walk_uc(uncached_build_memmap, NULL); |
f14f75b8 JS |
279 | return 0; |
280 | } | |
281 | ||
282 | __initcall(uncached_init); |