2 * TLB support routines.
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
8 * Modified RID allocation for SMP
9 * Goutham Rao <goutham.rao@intel.com>
10 * IPI based ptc implementation and A-step IPI implementation.
11 * Rohit Seth <rohit.seth@intel.com>
12 * Ken Chen <kenneth.w.chen@intel.com>
13 * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
14 * Copyright (C) 2007 Intel Corp
15 * Fenghua Yu <fenghua.yu@intel.com>
16 * Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
27 #include <asm/delay.h>
28 #include <asm/mmu_context.h>
29 #include <asm/pgalloc.h>
31 #include <asm/tlbflush.h>
33 #include <asm/processor.h>
38 u64 mask; /* mask of supported purge page-sizes */
39 unsigned long max_bits; /* log2 of largest supported purge page-size */
42 struct ia64_ctx ia64_ctx = {
43 .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
48 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
49 DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
50 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
52 struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
55 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
56 * Called after cpu_init() has setup ia64_ctx.max_ctx based on
57 * maximum RID that is supported by boot CPU.
60 mmu_context_init (void)
62 ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
64 ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
69 * Acquire the ia64_ctx.lock before calling this function!
72 wrap_mmu_context (struct mm_struct *mm)
75 unsigned long flush_bit;
77 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
78 flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
79 ia64_ctx.bitmap[i] ^= flush_bit;
82 /* use offset at 300 to skip daemons */
83 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
84 ia64_ctx.max_ctx, 300);
85 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
86 ia64_ctx.max_ctx, ia64_ctx.next);
89 * can't call flush_tlb_all() here because of race condition
90 * with O(1) scheduler [EF]
92 cpu = get_cpu(); /* prevent preemption/migration */
93 for_each_online_cpu(i)
95 per_cpu(ia64_need_tlb_flush, i) = 1;
97 local_flush_tlb_all();
101 * Implement "spinaphores" ... like counting semaphores, but they
102 * spin instead of sleeping. If there are ever any other users for
103 * this primitive it can be moved up to a spinaphore.h header.
106 unsigned long ticket;
110 static inline void spinaphore_init(struct spinaphore *ss, int val)
116 static inline void down_spin(struct spinaphore *ss)
118 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
120 if (time_before(t, ss->serve))
126 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
127 if (time_before(t, serve))
133 static inline void up_spin(struct spinaphore *ss)
135 ia64_fetchadd(1, &ss->serve, rel);
138 static struct spinaphore ptcg_sem;
139 static u16 nptcg = 1;
140 static int need_ptcg_sem = 1;
141 static int toolatetochangeptcgsem = 0;
144 * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
145 * purges which is reported from either PAL or SAL PALO.
147 * We don't have sanity checking for nptcg value. It's the user's responsibility
148 * for valid nptcg value on the platform. Otherwise, kernel may hang in some
156 get_option(&str, &value);
157 setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
162 __setup("nptcg=", set_nptcg);
165 * Maximum number of simultaneous ptc.g purges in the system can
166 * be defined by PAL_VM_SUMMARY (in which case we should take
167 * the smallest value for any cpu in the system) or by the PAL
168 * override table (in which case we should ignore the value from
171 * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
172 * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
173 * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
175 * Complicating the logic here is the fact that num_possible_cpus()
176 * isn't fully setup until we start bringing cpus online.
179 setup_ptcg_sem(int max_purges, int nptcg_from)
181 static int kp_override;
182 static int palo_override;
183 static int firstcpu = 1;
185 if (toolatetochangeptcgsem) {
186 if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
189 BUG_ON(max_purges < nptcg);
193 if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
199 need_ptcg_sem = num_possible_cpus() > nptcg;
203 if (nptcg_from == NPTCG_FROM_PALO) {
206 /* In PALO max_purges == 0 really means it! */
208 panic("Whoa! Platform does not support global TLB purges.\n");
210 if (nptcg == PALO_MAX_TLB_PURGES) {
217 if (nptcg != PALO_MAX_TLB_PURGES)
218 need_ptcg_sem = (num_possible_cpus() > nptcg);
222 /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
223 if (max_purges == 0) max_purges = 1;
229 if (max_purges < nptcg)
231 if (nptcg == PAL_MAX_PURGES) {
235 need_ptcg_sem = (num_possible_cpus() > nptcg);
238 spinaphore_init(&ptcg_sem, max_purges);
242 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
243 unsigned long end, unsigned long nbits)
245 struct mm_struct *active_mm = current->active_mm;
247 toolatetochangeptcgsem = 1;
249 if (mm != active_mm) {
250 /* Restore region IDs for mm */
251 if (mm && active_mm) {
252 activate_context(mm);
260 down_spin(&ptcg_sem);
264 * Flush ALAT entries also.
266 ia64_ptcga(start, (nbits << 2));
268 start += (1UL << nbits);
269 } while (start < end);
274 if (mm != active_mm) {
275 activate_context(active_mm);
280 local_flush_tlb_all (void)
282 unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
284 addr = local_cpu_data->ptce_base;
285 count0 = local_cpu_data->ptce_count[0];
286 count1 = local_cpu_data->ptce_count[1];
287 stride0 = local_cpu_data->ptce_stride[0];
288 stride1 = local_cpu_data->ptce_stride[1];
290 local_irq_save(flags);
291 for (i = 0; i < count0; ++i) {
292 for (j = 0; j < count1; ++j) {
298 local_irq_restore(flags);
299 ia64_srlz_i(); /* srlz.i implies srlz.d */
303 flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
306 struct mm_struct *mm = vma->vm_mm;
307 unsigned long size = end - start;
311 if (mm != current->active_mm) {
317 nbits = ia64_fls(size + 0xfff);
318 while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
319 (nbits < purge.max_bits))
321 if (nbits > purge.max_bits)
322 nbits = purge.max_bits;
323 start &= ~((1UL << nbits) - 1);
327 if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
328 platform_global_tlb_purge(mm, start, end, nbits);
334 ia64_ptcl(start, (nbits<<2));
335 start += (1UL << nbits);
336 } while (start < end);
338 ia64_srlz_i(); /* srlz.i implies srlz.d */
340 EXPORT_SYMBOL(flush_tlb_range);
342 void ia64_tlb_init(void)
344 ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
347 pal_vm_info_1_u_t vm_info_1;
348 pal_vm_info_2_u_t vm_info_2;
349 int cpu = smp_processor_id();
351 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
352 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
353 "defaulting to architected purge page-sizes.\n", status);
354 purge.mask = 0x115557000UL;
356 purge.max_bits = ia64_fls(purge.mask);
358 ia64_get_ptce(&ptce_info);
359 local_cpu_data->ptce_base = ptce_info.base;
360 local_cpu_data->ptce_count[0] = ptce_info.count[0];
361 local_cpu_data->ptce_count[1] = ptce_info.count[1];
362 local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
363 local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
365 local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
366 status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
369 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
370 per_cpu(ia64_tr_num, cpu) = 8;
373 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
374 if (per_cpu(ia64_tr_num, cpu) >
375 (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
376 per_cpu(ia64_tr_num, cpu) =
377 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
378 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
379 static int justonce = 1;
380 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
383 printk(KERN_DEBUG "TR register number exceeds "
384 "IA64_TR_ALLOC_MAX!\n");
392 * Check overlap with inserted TRs.
394 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
398 u64 va_rr = ia64_get_rr(va);
399 u64 va_rid = RR_TO_RID(va_rr);
400 u64 va_end = va + (1<<log_size) - 1;
402 if (va_rid != RR_TO_RID(p->rr))
404 tr_log_size = (p->itir & 0xff) >> 2;
405 tr_end = p->ifa + (1<<tr_log_size) - 1;
407 if (va > tr_end || p->ifa > va_end)
414 * ia64_insert_tr in virtual mode. Allocate a TR slot
416 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
418 * va : virtual address.
419 * pte : pte entries inserted.
420 * log_size: range to be covered.
422 * Return value: <0 : error No.
424 * >=0 : slot number allocated for TR.
425 * Must be called with preemption disabled.
427 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
431 struct ia64_tr_entry *p;
432 int cpu = smp_processor_id();
434 if (!ia64_idtrs[cpu]) {
435 ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
436 sizeof(struct ia64_tr_entry),
438 if (!ia64_idtrs[cpu])
442 /*Check overlap with existing TR entries*/
443 if (target_mask & 0x1) {
445 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
448 if (is_tr_overlap(p, va, log_size)) {
449 printk(KERN_DEBUG "Overlapped Entry"
450 "Inserted for TR Register!!\n");
455 if (target_mask & 0x2) {
456 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
457 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
460 if (is_tr_overlap(p, va, log_size)) {
461 printk(KERN_DEBUG "Overlapped Entry"
462 "Inserted for TR Register!!\n");
468 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
469 switch (target_mask & 0x3) {
471 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
475 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
479 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
480 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
489 if (i >= per_cpu(ia64_tr_num, cpu))
492 /*Record tr info for mca hander use!*/
493 if (i > per_cpu(ia64_tr_used, cpu))
494 per_cpu(ia64_tr_used, cpu) = i;
496 psr = ia64_clear_ic();
497 if (target_mask & 0x1) {
498 ia64_itr(0x1, i, va, pte, log_size);
500 p = ia64_idtrs[cpu] + i;
503 p->itir = log_size << 2;
504 p->rr = ia64_get_rr(va);
506 if (target_mask & 0x2) {
507 ia64_itr(0x2, i, va, pte, log_size);
509 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
512 p->itir = log_size << 2;
513 p->rr = ia64_get_rr(va);
520 EXPORT_SYMBOL_GPL(ia64_itr_entry);
525 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
526 * slot: slot number to be freed.
528 * Must be called with preemption disabled.
530 void ia64_ptr_entry(u64 target_mask, int slot)
532 int cpu = smp_processor_id();
534 struct ia64_tr_entry *p;
536 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
539 if (target_mask & 0x1) {
540 p = ia64_idtrs[cpu] + slot;
541 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
543 ia64_ptr(0x1, p->ifa, p->itir>>2);
548 if (target_mask & 0x2) {
549 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
550 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
552 ia64_ptr(0x2, p->ifa, p->itir>>2);
557 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
558 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
559 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
562 per_cpu(ia64_tr_used, cpu) = i;
564 EXPORT_SYMBOL_GPL(ia64_ptr_entry);