Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * High memory handling common code and variables. | |
4 | * | |
5 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de | |
6 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de | |
7 | * | |
8 | * | |
9 | * Redesigned the x86 32-bit VM architecture to deal with | |
10 | * 64-bit physical space. With current x86 CPUs this | |
11 | * means up to 64 Gigabytes physical RAM. | |
12 | * | |
13 | * Rewrote high memory support to move the page cache into | |
14 | * high memory. Implemented permanent (schedulable) kmaps | |
15 | * based on Linus' idea. | |
16 | * | |
17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
18 | */ | |
19 | ||
20 | #include <linux/mm.h> | |
b95f1b31 | 21 | #include <linux/export.h> |
1da177e4 LT |
22 | #include <linux/swap.h> |
23 | #include <linux/bio.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/mempool.h> | |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/hash.h> | |
29 | #include <linux/highmem.h> | |
eac79005 | 30 | #include <linux/kgdb.h> |
1da177e4 | 31 | #include <asm/tlbflush.h> |
186525bd | 32 | #include <linux/vmalloc.h> |
a8e23a29 | 33 | |
1da177e4 LT |
34 | /* |
35 | * Virtual_count is not a pure "count". | |
36 | * 0 means that it is not mapped, and has not been mapped | |
37 | * since a TLB flush - it is usable. | |
38 | * 1 means that there are no users, but it has been mapped | |
39 | * since the last TLB flush - so we can't use it. | |
40 | * n means that there are (n-1) current users of it. | |
41 | */ | |
42 | #ifdef CONFIG_HIGHMEM | |
260b2367 | 43 | |
15de36a4 MF |
44 | /* |
45 | * Architecture with aliasing data cache may define the following family of | |
46 | * helper functions in its asm/highmem.h to control cache color of virtual | |
47 | * addresses where physical memory pages are mapped by kmap. | |
48 | */ | |
49 | #ifndef get_pkmap_color | |
50 | ||
51 | /* | |
52 | * Determine color of virtual address where the page should be mapped. | |
53 | */ | |
54 | static inline unsigned int get_pkmap_color(struct page *page) | |
55 | { | |
56 | return 0; | |
57 | } | |
58 | #define get_pkmap_color get_pkmap_color | |
59 | ||
60 | /* | |
61 | * Get next index for mapping inside PKMAP region for page with given color. | |
62 | */ | |
63 | static inline unsigned int get_next_pkmap_nr(unsigned int color) | |
64 | { | |
65 | static unsigned int last_pkmap_nr; | |
66 | ||
67 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | |
68 | return last_pkmap_nr; | |
69 | } | |
70 | ||
71 | /* | |
72 | * Determine if page index inside PKMAP region (pkmap_nr) of given color | |
73 | * has wrapped around PKMAP region end. When this happens an attempt to | |
74 | * flush all unused PKMAP slots is made. | |
75 | */ | |
76 | static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) | |
77 | { | |
78 | return pkmap_nr == 0; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Get the number of PKMAP entries of the given color. If no free slot is | |
83 | * found after checking that many entries, kmap will sleep waiting for | |
84 | * someone to call kunmap and free PKMAP slot. | |
85 | */ | |
86 | static inline int get_pkmap_entries_count(unsigned int color) | |
87 | { | |
88 | return LAST_PKMAP; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Get head of a wait queue for PKMAP entries of the given color. | |
93 | * Wait queues for different mapping colors should be independent to avoid | |
94 | * unnecessary wakeups caused by freeing of slots of other colors. | |
95 | */ | |
96 | static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) | |
97 | { | |
98 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | |
99 | ||
100 | return &pkmap_map_wait; | |
101 | } | |
102 | #endif | |
103 | ||
ca79b0c2 AK |
104 | atomic_long_t _totalhigh_pages __read_mostly; |
105 | EXPORT_SYMBOL(_totalhigh_pages); | |
3e4d3af5 | 106 | |
13f876ba | 107 | unsigned int __nr_free_highpages (void) |
c1f60a5a | 108 | { |
33499bfe | 109 | struct zone *zone; |
c1f60a5a CL |
110 | unsigned int pages = 0; |
111 | ||
33499bfe JK |
112 | for_each_populated_zone(zone) { |
113 | if (is_highmem(zone)) | |
114 | pages += zone_page_state(zone, NR_FREE_PAGES); | |
2a1e274a | 115 | } |
c1f60a5a CL |
116 | |
117 | return pages; | |
118 | } | |
119 | ||
1da177e4 | 120 | static int pkmap_count[LAST_PKMAP]; |
1da177e4 LT |
121 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
122 | ||
123 | pte_t * pkmap_page_table; | |
124 | ||
3297e760 NP |
125 | /* |
126 | * Most architectures have no use for kmap_high_get(), so let's abstract | |
127 | * the disabling of IRQ out of the locking in that case to save on a | |
128 | * potential useless overhead. | |
129 | */ | |
130 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | |
131 | #define lock_kmap() spin_lock_irq(&kmap_lock) | |
132 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) | |
133 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) | |
134 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) | |
135 | #else | |
136 | #define lock_kmap() spin_lock(&kmap_lock) | |
137 | #define unlock_kmap() spin_unlock(&kmap_lock) | |
138 | #define lock_kmap_any(flags) \ | |
139 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) | |
140 | #define unlock_kmap_any(flags) \ | |
141 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) | |
142 | #endif | |
143 | ||
13f876ba | 144 | struct page *__kmap_to_page(void *vaddr) |
5a178119 MG |
145 | { |
146 | unsigned long addr = (unsigned long)vaddr; | |
147 | ||
498c2280 | 148 | if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
4de22c05 | 149 | int i = PKMAP_NR(addr); |
5a178119 MG |
150 | return pte_page(pkmap_page_table[i]); |
151 | } | |
152 | ||
153 | return virt_to_page(addr); | |
154 | } | |
13f876ba | 155 | EXPORT_SYMBOL(__kmap_to_page); |
5a178119 | 156 | |
1da177e4 LT |
157 | static void flush_all_zero_pkmaps(void) |
158 | { | |
159 | int i; | |
5843d9a4 | 160 | int need_flush = 0; |
1da177e4 LT |
161 | |
162 | flush_cache_kmaps(); | |
163 | ||
164 | for (i = 0; i < LAST_PKMAP; i++) { | |
165 | struct page *page; | |
166 | ||
167 | /* | |
168 | * zero means we don't have anything to do, | |
169 | * >1 means that it is still in use. Only | |
170 | * a count of 1 means that it is free but | |
171 | * needs to be unmapped | |
172 | */ | |
173 | if (pkmap_count[i] != 1) | |
174 | continue; | |
175 | pkmap_count[i] = 0; | |
176 | ||
177 | /* sanity check */ | |
75babcac | 178 | BUG_ON(pte_none(pkmap_page_table[i])); |
1da177e4 LT |
179 | |
180 | /* | |
181 | * Don't need an atomic fetch-and-clear op here; | |
182 | * no-one has the page mapped, and cannot get at | |
183 | * its virtual address (and hence PTE) without first | |
184 | * getting the kmap_lock (which is held here). | |
185 | * So no dangers, even with speculative execution. | |
186 | */ | |
187 | page = pte_page(pkmap_page_table[i]); | |
eb2db439 | 188 | pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); |
1da177e4 LT |
189 | |
190 | set_page_address(page, NULL); | |
5843d9a4 | 191 | need_flush = 1; |
1da177e4 | 192 | } |
5843d9a4 NP |
193 | if (need_flush) |
194 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | |
1da177e4 LT |
195 | } |
196 | ||
13f876ba | 197 | void __kmap_flush_unused(void) |
ce6234b5 | 198 | { |
3297e760 | 199 | lock_kmap(); |
ce6234b5 | 200 | flush_all_zero_pkmaps(); |
3297e760 | 201 | unlock_kmap(); |
ce6234b5 JF |
202 | } |
203 | ||
1da177e4 LT |
204 | static inline unsigned long map_new_virtual(struct page *page) |
205 | { | |
206 | unsigned long vaddr; | |
207 | int count; | |
15de36a4 MF |
208 | unsigned int last_pkmap_nr; |
209 | unsigned int color = get_pkmap_color(page); | |
1da177e4 LT |
210 | |
211 | start: | |
15de36a4 | 212 | count = get_pkmap_entries_count(color); |
1da177e4 LT |
213 | /* Find an empty entry */ |
214 | for (;;) { | |
15de36a4 MF |
215 | last_pkmap_nr = get_next_pkmap_nr(color); |
216 | if (no_more_pkmaps(last_pkmap_nr, color)) { | |
1da177e4 | 217 | flush_all_zero_pkmaps(); |
15de36a4 | 218 | count = get_pkmap_entries_count(color); |
1da177e4 LT |
219 | } |
220 | if (!pkmap_count[last_pkmap_nr]) | |
221 | break; /* Found a usable entry */ | |
222 | if (--count) | |
223 | continue; | |
224 | ||
225 | /* | |
226 | * Sleep for somebody else to unmap their entries | |
227 | */ | |
228 | { | |
229 | DECLARE_WAITQUEUE(wait, current); | |
15de36a4 MF |
230 | wait_queue_head_t *pkmap_map_wait = |
231 | get_pkmap_wait_queue_head(color); | |
1da177e4 LT |
232 | |
233 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
15de36a4 | 234 | add_wait_queue(pkmap_map_wait, &wait); |
3297e760 | 235 | unlock_kmap(); |
1da177e4 | 236 | schedule(); |
15de36a4 | 237 | remove_wait_queue(pkmap_map_wait, &wait); |
3297e760 | 238 | lock_kmap(); |
1da177e4 LT |
239 | |
240 | /* Somebody else might have mapped it while we slept */ | |
241 | if (page_address(page)) | |
242 | return (unsigned long)page_address(page); | |
243 | ||
244 | /* Re-start */ | |
245 | goto start; | |
246 | } | |
247 | } | |
248 | vaddr = PKMAP_ADDR(last_pkmap_nr); | |
249 | set_pte_at(&init_mm, vaddr, | |
250 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | |
251 | ||
252 | pkmap_count[last_pkmap_nr] = 1; | |
253 | set_page_address(page, (void *)vaddr); | |
254 | ||
255 | return vaddr; | |
256 | } | |
257 | ||
77f6078a RD |
258 | /** |
259 | * kmap_high - map a highmem page into memory | |
260 | * @page: &struct page to map | |
261 | * | |
262 | * Returns the page's virtual memory address. | |
263 | * | |
264 | * We cannot call this from interrupts, as it may block. | |
265 | */ | |
920c7a5d | 266 | void *kmap_high(struct page *page) |
1da177e4 LT |
267 | { |
268 | unsigned long vaddr; | |
269 | ||
270 | /* | |
271 | * For highmem pages, we can't trust "virtual" until | |
272 | * after we have the lock. | |
1da177e4 | 273 | */ |
3297e760 | 274 | lock_kmap(); |
1da177e4 LT |
275 | vaddr = (unsigned long)page_address(page); |
276 | if (!vaddr) | |
277 | vaddr = map_new_virtual(page); | |
278 | pkmap_count[PKMAP_NR(vaddr)]++; | |
75babcac | 279 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
3297e760 | 280 | unlock_kmap(); |
1da177e4 LT |
281 | return (void*) vaddr; |
282 | } | |
283 | ||
284 | EXPORT_SYMBOL(kmap_high); | |
285 | ||
3297e760 NP |
286 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
287 | /** | |
288 | * kmap_high_get - pin a highmem page into memory | |
289 | * @page: &struct page to pin | |
290 | * | |
291 | * Returns the page's current virtual memory address, or NULL if no mapping | |
5e39df56 | 292 | * exists. If and only if a non null address is returned then a |
3297e760 NP |
293 | * matching call to kunmap_high() is necessary. |
294 | * | |
295 | * This can be called from any context. | |
296 | */ | |
297 | void *kmap_high_get(struct page *page) | |
298 | { | |
299 | unsigned long vaddr, flags; | |
300 | ||
301 | lock_kmap_any(flags); | |
302 | vaddr = (unsigned long)page_address(page); | |
303 | if (vaddr) { | |
304 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); | |
305 | pkmap_count[PKMAP_NR(vaddr)]++; | |
306 | } | |
307 | unlock_kmap_any(flags); | |
308 | return (void*) vaddr; | |
309 | } | |
310 | #endif | |
311 | ||
77f6078a | 312 | /** |
4e9dc5df | 313 | * kunmap_high - unmap a highmem page into memory |
77f6078a | 314 | * @page: &struct page to unmap |
3297e760 NP |
315 | * |
316 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called | |
317 | * only from user context. | |
77f6078a | 318 | */ |
920c7a5d | 319 | void kunmap_high(struct page *page) |
1da177e4 LT |
320 | { |
321 | unsigned long vaddr; | |
322 | unsigned long nr; | |
3297e760 | 323 | unsigned long flags; |
1da177e4 | 324 | int need_wakeup; |
15de36a4 MF |
325 | unsigned int color = get_pkmap_color(page); |
326 | wait_queue_head_t *pkmap_map_wait; | |
1da177e4 | 327 | |
3297e760 | 328 | lock_kmap_any(flags); |
1da177e4 | 329 | vaddr = (unsigned long)page_address(page); |
75babcac | 330 | BUG_ON(!vaddr); |
1da177e4 LT |
331 | nr = PKMAP_NR(vaddr); |
332 | ||
333 | /* | |
334 | * A count must never go down to zero | |
335 | * without a TLB flush! | |
336 | */ | |
337 | need_wakeup = 0; | |
338 | switch (--pkmap_count[nr]) { | |
339 | case 0: | |
340 | BUG(); | |
341 | case 1: | |
342 | /* | |
343 | * Avoid an unnecessary wake_up() function call. | |
344 | * The common case is pkmap_count[] == 1, but | |
345 | * no waiters. | |
346 | * The tasks queued in the wait-queue are guarded | |
347 | * by both the lock in the wait-queue-head and by | |
348 | * the kmap_lock. As the kmap_lock is held here, | |
349 | * no need for the wait-queue-head's lock. Simply | |
350 | * test if the queue is empty. | |
351 | */ | |
15de36a4 MF |
352 | pkmap_map_wait = get_pkmap_wait_queue_head(color); |
353 | need_wakeup = waitqueue_active(pkmap_map_wait); | |
1da177e4 | 354 | } |
3297e760 | 355 | unlock_kmap_any(flags); |
1da177e4 LT |
356 | |
357 | /* do wake-up, if needed, race-free outside of the spin lock */ | |
358 | if (need_wakeup) | |
15de36a4 | 359 | wake_up(pkmap_map_wait); |
1da177e4 | 360 | } |
1da177e4 | 361 | EXPORT_SYMBOL(kunmap_high); |
0060ef3b MWO |
362 | |
363 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
364 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, | |
365 | unsigned start2, unsigned end2) | |
366 | { | |
367 | unsigned int i; | |
368 | ||
369 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); | |
370 | ||
371 | for (i = 0; i < compound_nr(page); i++) { | |
372 | void *kaddr = NULL; | |
373 | ||
374 | if (start1 < PAGE_SIZE || start2 < PAGE_SIZE) | |
375 | kaddr = kmap_atomic(page + i); | |
376 | ||
377 | if (start1 >= PAGE_SIZE) { | |
378 | start1 -= PAGE_SIZE; | |
379 | end1 -= PAGE_SIZE; | |
380 | } else { | |
381 | unsigned this_end = min_t(unsigned, end1, PAGE_SIZE); | |
382 | ||
383 | if (end1 > start1) | |
384 | memset(kaddr + start1, 0, this_end - start1); | |
385 | end1 -= this_end; | |
386 | start1 = 0; | |
387 | } | |
388 | ||
389 | if (start2 >= PAGE_SIZE) { | |
390 | start2 -= PAGE_SIZE; | |
391 | end2 -= PAGE_SIZE; | |
392 | } else { | |
393 | unsigned this_end = min_t(unsigned, end2, PAGE_SIZE); | |
394 | ||
395 | if (end2 > start2) | |
396 | memset(kaddr + start2, 0, this_end - start2); | |
397 | end2 -= this_end; | |
398 | start2 = 0; | |
399 | } | |
400 | ||
401 | if (kaddr) { | |
402 | kunmap_atomic(kaddr); | |
403 | flush_dcache_page(page + i); | |
404 | } | |
405 | ||
406 | if (!end1 && !end2) | |
407 | break; | |
408 | } | |
409 | ||
410 | BUG_ON((start1 | start2 | end1 | end2) != 0); | |
411 | } | |
412 | EXPORT_SYMBOL(zero_user_segments); | |
413 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
298fa1ad TG |
414 | #endif /* CONFIG_HIGHMEM */ |
415 | ||
416 | #ifdef CONFIG_KMAP_LOCAL | |
417 | ||
418 | #include <asm/kmap_size.h> | |
419 | ||
389755c2 | 420 | /* |
6e799cb6 | 421 | * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second |
389755c2 TG |
422 | * slot is unused which acts as a guard page |
423 | */ | |
6e799cb6 | 424 | #ifdef CONFIG_DEBUG_KMAP_LOCAL |
389755c2 TG |
425 | # define KM_INCR 2 |
426 | #else | |
427 | # define KM_INCR 1 | |
428 | #endif | |
429 | ||
298fa1ad TG |
430 | static inline int kmap_local_idx_push(void) |
431 | { | |
298fa1ad | 432 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); |
5fbda3ec TG |
433 | current->kmap_ctrl.idx += KM_INCR; |
434 | BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); | |
435 | return current->kmap_ctrl.idx - 1; | |
298fa1ad TG |
436 | } |
437 | ||
438 | static inline int kmap_local_idx(void) | |
439 | { | |
5fbda3ec | 440 | return current->kmap_ctrl.idx - 1; |
298fa1ad TG |
441 | } |
442 | ||
443 | static inline void kmap_local_idx_pop(void) | |
444 | { | |
5fbda3ec TG |
445 | current->kmap_ctrl.idx -= KM_INCR; |
446 | BUG_ON(current->kmap_ctrl.idx < 0); | |
298fa1ad TG |
447 | } |
448 | ||
449 | #ifndef arch_kmap_local_post_map | |
450 | # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) | |
451 | #endif | |
3c1016b5 | 452 | |
298fa1ad TG |
453 | #ifndef arch_kmap_local_pre_unmap |
454 | # define arch_kmap_local_pre_unmap(vaddr) do { } while (0) | |
455 | #endif | |
456 | ||
457 | #ifndef arch_kmap_local_post_unmap | |
458 | # define arch_kmap_local_post_unmap(vaddr) do { } while (0) | |
459 | #endif | |
460 | ||
461 | #ifndef arch_kmap_local_map_idx | |
462 | #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) | |
463 | #endif | |
464 | ||
465 | #ifndef arch_kmap_local_unmap_idx | |
466 | #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) | |
467 | #endif | |
468 | ||
469 | #ifndef arch_kmap_local_high_get | |
470 | static inline void *arch_kmap_local_high_get(struct page *page) | |
471 | { | |
472 | return NULL; | |
473 | } | |
474 | #endif | |
475 | ||
a1dce7fd TG |
476 | #ifndef arch_kmap_local_set_pte |
477 | #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ | |
478 | set_pte_at(mm, vaddr, ptep, ptev) | |
479 | #endif | |
480 | ||
298fa1ad | 481 | /* Unmap a local mapping which was obtained by kmap_high_get() */ |
2a656cad | 482 | static inline bool kmap_high_unmap_local(unsigned long vaddr) |
298fa1ad TG |
483 | { |
484 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | |
2a656cad | 485 | if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
298fa1ad | 486 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); |
2a656cad TG |
487 | return true; |
488 | } | |
298fa1ad | 489 | #endif |
2a656cad | 490 | return false; |
298fa1ad TG |
491 | } |
492 | ||
493 | static inline int kmap_local_calc_idx(int idx) | |
494 | { | |
495 | return idx + KM_MAX_IDX * smp_processor_id(); | |
496 | } | |
497 | ||
498 | static pte_t *__kmap_pte; | |
499 | ||
500 | static pte_t *kmap_get_pte(void) | |
501 | { | |
502 | if (!__kmap_pte) | |
503 | __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); | |
504 | return __kmap_pte; | |
505 | } | |
506 | ||
507 | void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) | |
508 | { | |
509 | pte_t pteval, *kmap_pte = kmap_get_pte(); | |
510 | unsigned long vaddr; | |
511 | int idx; | |
512 | ||
f3ba3c71 TG |
513 | /* |
514 | * Disable migration so resulting virtual address is stable | |
515 | * accross preemption. | |
516 | */ | |
517 | migrate_disable(); | |
298fa1ad TG |
518 | preempt_disable(); |
519 | idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); | |
520 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
521 | BUG_ON(!pte_none(*(kmap_pte - idx))); | |
522 | pteval = pfn_pte(pfn, prot); | |
a1dce7fd | 523 | arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); |
298fa1ad | 524 | arch_kmap_local_post_map(vaddr, pteval); |
5fbda3ec | 525 | current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; |
298fa1ad TG |
526 | preempt_enable(); |
527 | ||
528 | return (void *)vaddr; | |
529 | } | |
530 | EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); | |
531 | ||
532 | void *__kmap_local_page_prot(struct page *page, pgprot_t prot) | |
533 | { | |
534 | void *kmap; | |
535 | ||
0e91a0c6 TG |
536 | /* |
537 | * To broaden the usage of the actual kmap_local() machinery always map | |
538 | * pages when debugging is enabled and the architecture has no problems | |
539 | * with alias mappings. | |
540 | */ | |
541 | if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page)) | |
298fa1ad TG |
542 | return page_address(page); |
543 | ||
544 | /* Try kmap_high_get() if architecture has it enabled */ | |
545 | kmap = arch_kmap_local_high_get(page); | |
546 | if (kmap) | |
547 | return kmap; | |
548 | ||
549 | return __kmap_local_pfn_prot(page_to_pfn(page), prot); | |
550 | } | |
551 | EXPORT_SYMBOL(__kmap_local_page_prot); | |
552 | ||
553 | void kunmap_local_indexed(void *vaddr) | |
554 | { | |
555 | unsigned long addr = (unsigned long) vaddr & PAGE_MASK; | |
556 | pte_t *kmap_pte = kmap_get_pte(); | |
557 | int idx; | |
558 | ||
559 | if (addr < __fix_to_virt(FIX_KMAP_END) || | |
560 | addr > __fix_to_virt(FIX_KMAP_BEGIN)) { | |
0e91a0c6 TG |
561 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) { |
562 | /* This _should_ never happen! See above. */ | |
563 | WARN_ON_ONCE(1); | |
564 | return; | |
565 | } | |
2a656cad TG |
566 | /* |
567 | * Handle mappings which were obtained by kmap_high_get() | |
568 | * first as the virtual address of such mappings is below | |
569 | * PAGE_OFFSET. Warn for all other addresses which are in | |
570 | * the user space part of the virtual address space. | |
571 | */ | |
572 | if (!kmap_high_unmap_local(addr)) | |
573 | WARN_ON_ONCE(addr < PAGE_OFFSET); | |
298fa1ad TG |
574 | return; |
575 | } | |
576 | ||
577 | preempt_disable(); | |
578 | idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); | |
579 | WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | |
580 | ||
581 | arch_kmap_local_pre_unmap(addr); | |
582 | pte_clear(&init_mm, addr, kmap_pte - idx); | |
583 | arch_kmap_local_post_unmap(addr); | |
5fbda3ec | 584 | current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); |
298fa1ad TG |
585 | kmap_local_idx_pop(); |
586 | preempt_enable(); | |
f3ba3c71 | 587 | migrate_enable(); |
298fa1ad TG |
588 | } |
589 | EXPORT_SYMBOL(kunmap_local_indexed); | |
5fbda3ec TG |
590 | |
591 | /* | |
592 | * Invoked before switch_to(). This is safe even when during or after | |
593 | * clearing the maps an interrupt which needs a kmap_local happens because | |
594 | * the task::kmap_ctrl.idx is not modified by the unmapping code so a | |
595 | * nested kmap_local will use the next unused index and restore the index | |
596 | * on unmap. The already cleared kmaps of the outgoing task are irrelevant | |
597 | * because the interrupt context does not know about them. The same applies | |
598 | * when scheduling back in for an interrupt which happens before the | |
599 | * restore is complete. | |
600 | */ | |
601 | void __kmap_local_sched_out(void) | |
602 | { | |
603 | struct task_struct *tsk = current; | |
604 | pte_t *kmap_pte = kmap_get_pte(); | |
605 | int i; | |
606 | ||
607 | /* Clear kmaps */ | |
608 | for (i = 0; i < tsk->kmap_ctrl.idx; i++) { | |
609 | pte_t pteval = tsk->kmap_ctrl.pteval[i]; | |
610 | unsigned long addr; | |
611 | int idx; | |
612 | ||
613 | /* With debug all even slots are unmapped and act as guard */ | |
614 | if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { | |
615 | WARN_ON_ONCE(!pte_none(pteval)); | |
616 | continue; | |
617 | } | |
618 | if (WARN_ON_ONCE(pte_none(pteval))) | |
619 | continue; | |
620 | ||
621 | /* | |
622 | * This is a horrible hack for XTENSA to calculate the | |
623 | * coloured PTE index. Uses the PFN encoded into the pteval | |
624 | * and the map index calculation because the actual mapped | |
625 | * virtual address is not stored in task::kmap_ctrl. | |
626 | * For any sane architecture this is optimized out. | |
627 | */ | |
628 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); | |
629 | ||
630 | addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
631 | arch_kmap_local_pre_unmap(addr); | |
632 | pte_clear(&init_mm, addr, kmap_pte - idx); | |
633 | arch_kmap_local_post_unmap(addr); | |
634 | } | |
635 | } | |
636 | ||
637 | void __kmap_local_sched_in(void) | |
638 | { | |
639 | struct task_struct *tsk = current; | |
640 | pte_t *kmap_pte = kmap_get_pte(); | |
641 | int i; | |
642 | ||
643 | /* Restore kmaps */ | |
644 | for (i = 0; i < tsk->kmap_ctrl.idx; i++) { | |
645 | pte_t pteval = tsk->kmap_ctrl.pteval[i]; | |
646 | unsigned long addr; | |
647 | int idx; | |
648 | ||
649 | /* With debug all even slots are unmapped and act as guard */ | |
650 | if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { | |
651 | WARN_ON_ONCE(!pte_none(pteval)); | |
652 | continue; | |
653 | } | |
654 | if (WARN_ON_ONCE(pte_none(pteval))) | |
655 | continue; | |
656 | ||
657 | /* See comment in __kmap_local_sched_out() */ | |
658 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); | |
659 | addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
660 | set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); | |
661 | arch_kmap_local_post_map(addr, pteval); | |
662 | } | |
663 | } | |
664 | ||
665 | void kmap_local_fork(struct task_struct *tsk) | |
666 | { | |
667 | if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) | |
668 | memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); | |
669 | } | |
670 | ||
298fa1ad | 671 | #endif |
1da177e4 | 672 | |
1da177e4 LT |
673 | #if defined(HASHED_PAGE_VIRTUAL) |
674 | ||
675 | #define PA_HASH_ORDER 7 | |
676 | ||
677 | /* | |
678 | * Describes one page->virtual association | |
679 | */ | |
680 | struct page_address_map { | |
681 | struct page *page; | |
682 | void *virtual; | |
683 | struct list_head list; | |
684 | }; | |
685 | ||
a354e2c8 | 686 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
1da177e4 LT |
687 | |
688 | /* | |
689 | * Hash table bucket | |
690 | */ | |
691 | static struct page_address_slot { | |
692 | struct list_head lh; /* List of page_address_maps */ | |
693 | spinlock_t lock; /* Protect this bucket's list */ | |
694 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | |
695 | ||
f9918794 | 696 | static struct page_address_slot *page_slot(const struct page *page) |
1da177e4 LT |
697 | { |
698 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | |
699 | } | |
700 | ||
77f6078a RD |
701 | /** |
702 | * page_address - get the mapped virtual address of a page | |
703 | * @page: &struct page to get the virtual address of | |
704 | * | |
705 | * Returns the page's virtual address. | |
706 | */ | |
f9918794 | 707 | void *page_address(const struct page *page) |
1da177e4 LT |
708 | { |
709 | unsigned long flags; | |
710 | void *ret; | |
711 | struct page_address_slot *pas; | |
712 | ||
713 | if (!PageHighMem(page)) | |
714 | return lowmem_page_address(page); | |
715 | ||
716 | pas = page_slot(page); | |
717 | ret = NULL; | |
718 | spin_lock_irqsave(&pas->lock, flags); | |
719 | if (!list_empty(&pas->lh)) { | |
720 | struct page_address_map *pam; | |
721 | ||
722 | list_for_each_entry(pam, &pas->lh, list) { | |
723 | if (pam->page == page) { | |
724 | ret = pam->virtual; | |
725 | goto done; | |
726 | } | |
727 | } | |
728 | } | |
729 | done: | |
730 | spin_unlock_irqrestore(&pas->lock, flags); | |
731 | return ret; | |
732 | } | |
733 | ||
734 | EXPORT_SYMBOL(page_address); | |
735 | ||
77f6078a RD |
736 | /** |
737 | * set_page_address - set a page's virtual address | |
738 | * @page: &struct page to set | |
739 | * @virtual: virtual address to use | |
740 | */ | |
1da177e4 LT |
741 | void set_page_address(struct page *page, void *virtual) |
742 | { | |
743 | unsigned long flags; | |
744 | struct page_address_slot *pas; | |
745 | struct page_address_map *pam; | |
746 | ||
747 | BUG_ON(!PageHighMem(page)); | |
748 | ||
749 | pas = page_slot(page); | |
750 | if (virtual) { /* Add */ | |
a354e2c8 | 751 | pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; |
1da177e4 LT |
752 | pam->page = page; |
753 | pam->virtual = virtual; | |
754 | ||
755 | spin_lock_irqsave(&pas->lock, flags); | |
756 | list_add_tail(&pam->list, &pas->lh); | |
757 | spin_unlock_irqrestore(&pas->lock, flags); | |
758 | } else { /* Remove */ | |
759 | spin_lock_irqsave(&pas->lock, flags); | |
760 | list_for_each_entry(pam, &pas->lh, list) { | |
761 | if (pam->page == page) { | |
762 | list_del(&pam->list); | |
763 | spin_unlock_irqrestore(&pas->lock, flags); | |
1da177e4 LT |
764 | goto done; |
765 | } | |
766 | } | |
767 | spin_unlock_irqrestore(&pas->lock, flags); | |
768 | } | |
769 | done: | |
770 | return; | |
771 | } | |
772 | ||
1da177e4 LT |
773 | void __init page_address_init(void) |
774 | { | |
775 | int i; | |
776 | ||
1da177e4 LT |
777 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
778 | INIT_LIST_HEAD(&page_address_htable[i].lh); | |
779 | spin_lock_init(&page_address_htable[i].lock); | |
780 | } | |
1da177e4 LT |
781 | } |
782 | ||
955cc774 | 783 | #endif /* defined(HASHED_PAGE_VIRTUAL) */ |