3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
11 * This file contains low-level assembler routines for managing
12 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
13 * hash table, so this file is not used on them.)
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <asm/pgtable.h>
25 #include <asm/cputable.h>
26 #include <asm/ppc_asm.h>
27 #include <asm/thread_info.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/export.h>
30 #include <asm/feature-fixups.h>
31 #include <asm/code-patching-asm.h>
38 #endif /* CONFIG_SMP */
41 * Load a PTE into the hash table, if possible.
42 * The address is in r4, and r3 contains an access flag:
43 * _PAGE_RW (0x400) if a write.
44 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
45 * SPRG_THREAD contains the physical address of the current task's thread.
47 * Returns to the caller if the access is illegal or there is no
48 * mapping for the address. Otherwise it places an appropriate PTE
49 * in the hash table and returns from the exception.
50 * Uses r0, r3 - r6, r8, r10, ctr, lr.
55 lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
56 ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
69 /* Get PTE (linux-style) and check access */
70 lis r0,KERNELBASE@h /* check if kernel address */
72 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
73 mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
74 blt+ 112f /* assume user more likely */
75 lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
76 addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
77 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
79 #ifndef CONFIG_PTE_64BIT
80 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
81 lwz r8,0(r5) /* get pmd entry */
82 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
84 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
85 lwzx r8,r8,r5 /* Get L1 entry */
86 rlwinm. r8,r8,0,0,20 /* extract pt base address */
89 beq- hash_page_out /* return if no mapping */
91 /* XXX it seems like the 601 will give a machine fault on the
92 rfi if its alignment is wrong (bottom 4 bits of address are
93 8 or 0xc) and we have had a not-taken conditional branch
94 to the address following the rfi. */
97 #ifndef CONFIG_PTE_64BIT
98 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
100 rlwimi r8,r4,23,20,28 /* compute pte address */
102 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
103 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
106 * Update the linux PTE atomically. We do the lwarx up-front
107 * because almost always, there won't be a permission violation
108 * and there won't already be an HPTE, and thus we will have
109 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
111 * If PTE_64BIT is set, the low word is the flags word; use that
112 * word for locking since it contains all the interesting bits.
114 #if (PTE_FLAGS_OFFSET != 0)
115 addi r8,r8,PTE_FLAGS_OFFSET
118 lwarx r6,0,r8 /* get linux-style pte, flag word */
119 andc. r5,r3,r6 /* check access & ~permission */
121 bne- hash_page_out /* return if access not permitted */
125 or r5,r0,r6 /* set accessed/dirty bits */
126 #ifdef CONFIG_PTE_64BIT
128 subf r10,r6,r8 /* create false data dependency */
129 subi r10,r10,PTE_FLAGS_OFFSET
130 lwzx r10,r6,r10 /* Get upper PTE word */
132 lwz r10,-PTE_FLAGS_OFFSET(r8)
133 #endif /* CONFIG_SMP */
134 #endif /* CONFIG_PTE_64BIT */
135 stwcx. r5,0,r8 /* attempt to update PTE */
136 bne- retry /* retry if someone got there first */
138 mfsrin r3,r4 /* get segment reg for segment */
141 bl create_hpte /* add the hash table entry */
145 lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
147 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
150 /* Return from the exception */
155 b fast_exception_return
160 lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
162 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
164 #endif /* CONFIG_SMP */
167 * Add an entry for a particular page to the hash table.
169 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
171 * We assume any necessary modifications to the pte (e.g. setting
172 * the accessed bit) have already been done and that there is actually
173 * a hash table in use (i.e. we're not on a 603).
175 _GLOBAL(add_hash_page)
179 /* Convert context and va to VSID */
180 mulli r3,r3,897*16 /* multiply context by context skew */
181 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
182 mulli r0,r0,0x111 /* multiply by ESID skew */
183 add r3,r3,r0 /* note create_hpte trims to 24 bits */
186 lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
188 #endif /* CONFIG_SMP */
191 * We disable interrupts here, even on UP, because we don't
192 * want to race with hash_page, and because we want the
193 * _PAGE_HASHPTE bit to be a reliable indication of whether
194 * the HPTE exists (or at least whether one did once).
195 * We also turn off the MMU for data accesses so that we
196 * we can't take a hash table miss (assuming the code is
197 * covered by a BAT). -- paulus
201 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
202 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
208 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
209 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
210 10: lwarx r0,0,r6 /* take the mmu_hash_lock */
223 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
224 * If _PAGE_HASHPTE was already set, we don't replace the existing
225 * HPTE, so we just unlock and return.
228 #ifndef CONFIG_PTE_64BIT
229 rlwimi r8,r4,22,20,29
231 rlwimi r8,r4,23,20,28
232 addi r8,r8,PTE_FLAGS_OFFSET
235 andi. r0,r6,_PAGE_HASHPTE
236 bne 9f /* if HASHPTE already set, done */
237 #ifdef CONFIG_PTE_64BIT
239 subf r10,r6,r8 /* create false data dependency */
240 subi r10,r10,PTE_FLAGS_OFFSET
241 lwzx r10,r6,r10 /* Get upper PTE word */
243 lwz r10,-PTE_FLAGS_OFFSET(r8)
244 #endif /* CONFIG_SMP */
245 #endif /* CONFIG_PTE_64BIT */
246 ori r5,r6,_PAGE_HASHPTE
254 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
255 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
258 stw r0,0(r6) /* clear mmu_hash_lock */
261 /* reenable interrupts and DR */
271 * This routine adds a hardware PTE to the hash table.
272 * It is designed to be called with the MMU either on or off.
273 * r3 contains the VSID, r4 contains the virtual address,
274 * r5 contains the linux PTE, r6 contains the old value of the
275 * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
276 * upper half of the PTE if CONFIG_PTE_64BIT.
277 * On SMP, the caller should have the mmu_hash_lock held.
278 * We assume that the caller has (or will) set the _PAGE_HASHPTE
279 * bit in the linux PTE in memory. The value passed in r6 should
280 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
281 * this routine will skip the search for an existing HPTE.
282 * This procedure modifies r0, r3 - r6, r8, cr0.
285 * For speed, 4 of the instructions get patched once the size and
286 * physical address of the hash table are known. These definitions
287 * of Hash_base and Hash_bits below are just an example.
289 Hash_base = 0xc0180000
290 Hash_bits = 12 /* e.g. 256kB hash table */
291 Hash_msk = (((1 << Hash_bits) - 1) * 64)
293 /* defines for the PTE format for 32-bit PPCs */
296 #define LG_PTEG_SIZE 6
302 #define PTE_V 0x80000000
303 #define TST_V(r) rlwinm. r,r,0,0,0
304 #define SET_V(r) oris r,r,PTE_V@h
305 #define CLR_V(r,t) rlwinm r,r,0,1,31
307 #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
308 #define HASH_RIGHT 31-LG_PTEG_SIZE
311 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
312 rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
313 rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
314 and r8,r8,r0 /* writable if _RW & _DIRTY */
315 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
316 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
317 ori r8,r8,0xe04 /* clear out reserved bits */
318 andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
320 rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
321 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
322 #ifdef CONFIG_PTE_64BIT
323 /* Put the XPN bits into the PTE */
324 rlwimi r8,r10,8,20,22
325 rlwimi r8,r10,2,29,29
328 /* Construct the high word of the PPC-style PTE (r5) */
329 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
330 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
331 SET_V(r5) /* set V (valid) bit */
333 patch_site 0f, patch__hash_page_A0
334 patch_site 1f, patch__hash_page_A1
335 patch_site 2f, patch__hash_page_A2
336 /* Get the address of the primary PTE group in the hash table (r3) */
337 0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
338 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
339 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
340 xor r3,r3,r0 /* make primary hash */
341 li r0,8 /* PTEs/group */
344 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
345 * if it is clear, meaning that the HPTE isn't there already...
347 andi. r6,r6,_PAGE_HASHPTE
348 beq+ 10f /* no PTE: go look for an empty slot */
351 lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
352 lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
353 addi r6,r6,1 /* count how many searches we do */
354 stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
356 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
358 addi r4,r3,-HPTE_SIZE
359 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
361 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
364 patch_site 0f, patch__hash_page_B
365 /* Search the secondary PTEG for a matching PTE */
366 ori r5,r5,PTE_H /* set H (secondary hash) bit */
367 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
368 xori r4,r4,(-PTEG_SIZE & 0xffff)
369 addi r4,r4,-HPTE_SIZE
371 2: LDPTEu r6,HPTE_SIZE(r4)
375 xori r5,r5,PTE_H /* clear H bit again */
377 /* Search the primary PTEG for an empty slot */
379 addi r4,r3,-HPTE_SIZE /* search primary PTEG */
380 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
381 TST_V(r6) /* test valid bit */
382 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
385 /* update counter of times that the primary PTEG is full */
386 lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
387 lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
389 stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
391 patch_site 0f, patch__hash_page_C
392 /* Search the secondary PTEG for an empty slot */
393 ori r5,r5,PTE_H /* set H (secondary hash) bit */
394 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
395 xori r4,r4,(-PTEG_SIZE & 0xffff)
396 addi r4,r4,-HPTE_SIZE
398 2: LDPTEu r6,HPTE_SIZE(r4)
402 xori r5,r5,PTE_H /* clear H bit again */
405 * Choose an arbitrary slot in the primary PTEG to overwrite.
406 * Since both the primary and secondary PTEGs are full, and we
407 * have no information that the PTEs in the primary PTEG are
408 * more important or useful than those in the secondary PTEG,
409 * and we know there is a definite (although small) speed
410 * advantage to putting the PTE in the primary PTEG, we always
411 * put the PTE in the primary PTEG.
413 * In addition, we skip any slot that is mapping kernel text in
414 * order to avoid a deadlock when not using BAT mappings if
415 * trying to hash in the kernel hash code itself after it has
416 * already taken the hash table lock. This works in conjunction
417 * with pre-faulting of the kernel text.
419 * If the hash table bucket is full of kernel text entries, we'll
420 * lockup here but that shouldn't happen
423 1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
424 lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
425 addi r6,r6,HPTE_SIZE /* search for candidate */
426 andi. r6,r6,7*HPTE_SIZE
427 stw r6,next_slot@l(r4)
429 LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
432 ori r6,r6,etext@l /* get etext */
434 cmpl cr0,r0,r6 /* compare and try again */
438 /* Store PTE in PTEG */
442 STPTE r8,HPTE_SIZE/2(r4)
444 #else /* CONFIG_SMP */
446 * Between the tlbie above and updating the hash table entry below,
447 * another CPU could read the hash table entry and put it in its TLB.
449 * 1. using an empty slot
450 * 2. updating an earlier entry to change permissions (i.e. enable write)
451 * 3. taking over the PTE for an unrelated address
453 * In each case it doesn't really matter if the other CPUs have the old
454 * PTE in their TLB. So we don't need to bother with another tlbie here,
455 * which is convenient as we've overwritten the register that had the
456 * address. :-) The tlbie above is mainly to make sure that this CPU comes
457 * and gets the new PTE from the hash table.
459 * We do however have to make sure that the PTE is never in an invalid
460 * state with the V bit set.
464 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
468 STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
471 STPTE r5,0(r4) /* finally set V bit in PTE */
472 #endif /* CONFIG_SMP */
474 sync /* make sure pte updates get to memory */
488 * Flush the entry for a particular page from the hash table.
490 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
493 * We assume that there is a hash table in use (Hash != 0).
495 _GLOBAL(flush_hash_pages)
497 * We disable interrupts here, even on UP, because we want
498 * the _PAGE_HASHPTE bit to be a reliable indication of
499 * whether the HPTE exists (or at least whether one did once).
500 * We also turn off the MMU for data accesses so that we
501 * we can't take a hash table miss (assuming the code is
502 * covered by a BAT). -- paulus
506 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
507 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
512 /* First find a PTE in the range that has _PAGE_HASHPTE set */
513 #ifndef CONFIG_PTE_64BIT
514 rlwimi r5,r4,22,20,29
516 rlwimi r5,r4,23,20,28
518 1: lwz r0,PTE_FLAGS_OFFSET(r5)
520 andi. r0,r0,_PAGE_HASHPTE
528 /* Convert context and va to VSID */
529 2: mulli r3,r3,897*16 /* multiply context by context skew */
530 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
531 mulli r0,r0,0x111 /* multiply by ESID skew */
532 add r3,r3,r0 /* note code below trims to 24 bits */
534 /* Construct the high word of the PPC-style PTE (r11) */
535 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
536 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
537 SET_V(r11) /* set V (valid) bit */
540 lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
541 addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
557 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
558 * already clear, we're done (for this pte). If not,
559 * clear it (atomically) and proceed. -- paulus.
561 #if (PTE_FLAGS_OFFSET != 0)
562 addi r5,r5,PTE_FLAGS_OFFSET
564 33: lwarx r8,0,r5 /* fetch the pte flags word */
565 andi. r0,r8,_PAGE_HASHPTE
566 beq 8f /* done if HASHPTE is already clear */
567 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
568 stwcx. r8,0,r5 /* update the pte */
571 patch_site 0f, patch__flush_hash_A0
572 patch_site 1f, patch__flush_hash_A1
573 patch_site 2f, patch__flush_hash_A2
574 /* Get the address of the primary PTE group in the hash table (r3) */
575 0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
576 1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
577 2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
578 xor r8,r0,r8 /* make primary hash */
580 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
581 li r0,8 /* PTEs/group */
583 addi r12,r8,-HPTE_SIZE
584 1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
586 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
589 patch_site 0f, patch__flush_hash_B
590 /* Search the secondary PTEG for a matching PTE */
591 ori r11,r11,PTE_H /* set H (secondary hash) bit */
592 li r0,8 /* PTEs/group */
593 0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
594 xori r12,r12,(-PTEG_SIZE & 0xffff)
595 addi r12,r12,-HPTE_SIZE
597 2: LDPTEu r0,HPTE_SIZE(r12)
600 xori r11,r11,PTE_H /* clear H again */
601 bne- 4f /* should rarely fail to find it */
604 STPTE r0,0(r12) /* invalidate entry */
606 tlbie r4 /* in hw tlb too */
609 8: ble cr1,9f /* if all ptes checked */
613 lwz r0,0(r5) /* check next pte */
615 andi. r0,r0,_PAGE_HASHPTE
623 stw r0,0(r9) /* clear mmu_hash_lock */
630 EXPORT_SYMBOL(flush_hash_pages)
633 * Flush an entry from the TLB
641 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
642 rlwinm r0,r0,0,28,26 /* clear DR */
646 lis r9,mmu_hash_lock@h
647 ori r9,r9,mmu_hash_lock@l
659 stw r0,0(r9) /* clear mmu_hash_lock */
663 #else /* CONFIG_SMP */
666 #endif /* CONFIG_SMP */
670 * Flush the entire TLB. 603/603e only
673 #if defined(CONFIG_SMP)
678 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
679 rlwinm r0,r0,0,28,26 /* clear DR */
683 lis r9,mmu_hash_lock@h
684 ori r9,r9,mmu_hash_lock@l
696 stw r0,0(r9) /* clear mmu_hash_lock */
700 #else /* CONFIG_SMP */
704 #endif /* CONFIG_SMP */