| 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or |
| 3 | * modify it under the terms of the GNU General Public License |
| 4 | * as published by the Free Software Foundation; either version 2 |
| 5 | * of the License, or (at your option) any later version. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | * You should have received a copy of the GNU General Public License |
| 13 | * along with this program; if not, write to the Free Software |
| 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 15 | * |
| 16 | * Copyright (C) 2004 Mips Technologies, Inc |
| 17 | * Copyright (C) 2008 Kevin D. Kissell |
| 18 | */ |
| 19 | |
| 20 | #include <linux/clockchips.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/smp.h> |
| 24 | #include <linux/cpumask.h> |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/kernel_stat.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/ftrace.h> |
| 29 | |
| 30 | #include <asm/cpu.h> |
| 31 | #include <asm/processor.h> |
| 32 | #include <asm/atomic.h> |
| 33 | #include <asm/system.h> |
| 34 | #include <asm/hardirq.h> |
| 35 | #include <asm/hazards.h> |
| 36 | #include <asm/irq.h> |
| 37 | #include <asm/mmu_context.h> |
| 38 | #include <asm/mipsregs.h> |
| 39 | #include <asm/cacheflush.h> |
| 40 | #include <asm/time.h> |
| 41 | #include <asm/addrspace.h> |
| 42 | #include <asm/smtc.h> |
| 43 | #include <asm/smtc_proc.h> |
| 44 | |
| 45 | /* |
| 46 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask |
| 47 | * in do_IRQ. These are passed in setup_irq_smtc() and stored |
| 48 | * in this table. |
| 49 | */ |
| 50 | unsigned long irq_hwmask[NR_IRQS]; |
| 51 | |
| 52 | #define LOCK_MT_PRA() \ |
| 53 | local_irq_save(flags); \ |
| 54 | mtflags = dmt() |
| 55 | |
| 56 | #define UNLOCK_MT_PRA() \ |
| 57 | emt(mtflags); \ |
| 58 | local_irq_restore(flags) |
| 59 | |
| 60 | #define LOCK_CORE_PRA() \ |
| 61 | local_irq_save(flags); \ |
| 62 | mtflags = dvpe() |
| 63 | |
| 64 | #define UNLOCK_CORE_PRA() \ |
| 65 | evpe(mtflags); \ |
| 66 | local_irq_restore(flags) |
| 67 | |
| 68 | /* |
| 69 | * Data structures purely associated with SMTC parallelism |
| 70 | */ |
| 71 | |
| 72 | |
| 73 | /* |
| 74 | * Table for tracking ASIDs whose lifetime is prolonged. |
| 75 | */ |
| 76 | |
| 77 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
| 78 | |
| 79 | /* |
| 80 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
| 81 | */ |
| 82 | |
| 83 | #define IPIBUF_PER_CPU 4 |
| 84 | |
| 85 | struct smtc_ipi_q IPIQ[NR_CPUS]; |
| 86 | static struct smtc_ipi_q freeIPIq; |
| 87 | |
| 88 | |
| 89 | /* Forward declarations */ |
| 90 | |
| 91 | void ipi_decode(struct smtc_ipi *); |
| 92 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); |
| 93 | static void setup_cross_vpe_interrupts(unsigned int nvpe); |
| 94 | void init_smtc_stats(void); |
| 95 | |
| 96 | /* Global SMTC Status */ |
| 97 | |
| 98 | unsigned int smtc_status; |
| 99 | |
| 100 | /* Boot command line configuration overrides */ |
| 101 | |
| 102 | static int vpe0limit; |
| 103 | static int ipibuffers; |
| 104 | static int nostlb; |
| 105 | static int asidmask; |
| 106 | unsigned long smtc_asid_mask = 0xff; |
| 107 | |
| 108 | static int __init vpe0tcs(char *str) |
| 109 | { |
| 110 | get_option(&str, &vpe0limit); |
| 111 | |
| 112 | return 1; |
| 113 | } |
| 114 | |
| 115 | static int __init ipibufs(char *str) |
| 116 | { |
| 117 | get_option(&str, &ipibuffers); |
| 118 | return 1; |
| 119 | } |
| 120 | |
| 121 | static int __init stlb_disable(char *s) |
| 122 | { |
| 123 | nostlb = 1; |
| 124 | return 1; |
| 125 | } |
| 126 | |
| 127 | static int __init asidmask_set(char *str) |
| 128 | { |
| 129 | get_option(&str, &asidmask); |
| 130 | switch (asidmask) { |
| 131 | case 0x1: |
| 132 | case 0x3: |
| 133 | case 0x7: |
| 134 | case 0xf: |
| 135 | case 0x1f: |
| 136 | case 0x3f: |
| 137 | case 0x7f: |
| 138 | case 0xff: |
| 139 | smtc_asid_mask = (unsigned long)asidmask; |
| 140 | break; |
| 141 | default: |
| 142 | printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); |
| 143 | } |
| 144 | return 1; |
| 145 | } |
| 146 | |
| 147 | __setup("vpe0tcs=", vpe0tcs); |
| 148 | __setup("ipibufs=", ipibufs); |
| 149 | __setup("nostlb", stlb_disable); |
| 150 | __setup("asidmask=", asidmask_set); |
| 151 | |
| 152 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
| 153 | |
| 154 | static int hang_trig; |
| 155 | |
| 156 | static int __init hangtrig_enable(char *s) |
| 157 | { |
| 158 | hang_trig = 1; |
| 159 | return 1; |
| 160 | } |
| 161 | |
| 162 | |
| 163 | __setup("hangtrig", hangtrig_enable); |
| 164 | |
| 165 | #define DEFAULT_BLOCKED_IPI_LIMIT 32 |
| 166 | |
| 167 | static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; |
| 168 | |
| 169 | static int __init tintq(char *str) |
| 170 | { |
| 171 | get_option(&str, &timerq_limit); |
| 172 | return 1; |
| 173 | } |
| 174 | |
| 175 | __setup("tintq=", tintq); |
| 176 | |
| 177 | static int imstuckcount[2][8]; |
| 178 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ |
| 179 | static int vpemask[2][8] = { |
| 180 | {0, 0, 1, 0, 0, 0, 0, 1}, |
| 181 | {0, 0, 0, 0, 0, 0, 0, 1} |
| 182 | }; |
| 183 | int tcnoprog[NR_CPUS]; |
| 184 | static atomic_t idle_hook_initialized = {0}; |
| 185 | static int clock_hang_reported[NR_CPUS]; |
| 186 | |
| 187 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
| 188 | |
| 189 | /* |
| 190 | * Configure shared TLB - VPC configuration bit must be set by caller |
| 191 | */ |
| 192 | |
| 193 | static void smtc_configure_tlb(void) |
| 194 | { |
| 195 | int i, tlbsiz, vpes; |
| 196 | unsigned long mvpconf0; |
| 197 | unsigned long config1val; |
| 198 | |
| 199 | /* Set up ASID preservation table */ |
| 200 | for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { |
| 201 | for(i = 0; i < MAX_SMTC_ASIDS; i++) { |
| 202 | smtc_live_asid[vpes][i] = 0; |
| 203 | } |
| 204 | } |
| 205 | mvpconf0 = read_c0_mvpconf0(); |
| 206 | |
| 207 | if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) |
| 208 | >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { |
| 209 | /* If we have multiple VPEs, try to share the TLB */ |
| 210 | if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { |
| 211 | /* |
| 212 | * If TLB sizing is programmable, shared TLB |
| 213 | * size is the total available complement. |
| 214 | * Otherwise, we have to take the sum of all |
| 215 | * static VPE TLB entries. |
| 216 | */ |
| 217 | if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) |
| 218 | >> MVPCONF0_PTLBE_SHIFT)) == 0) { |
| 219 | /* |
| 220 | * If there's more than one VPE, there had better |
| 221 | * be more than one TC, because we need one to bind |
| 222 | * to each VPE in turn to be able to read |
| 223 | * its configuration state! |
| 224 | */ |
| 225 | settc(1); |
| 226 | /* Stop the TC from doing anything foolish */ |
| 227 | write_tc_c0_tchalt(TCHALT_H); |
| 228 | mips_ihb(); |
| 229 | /* No need to un-Halt - that happens later anyway */ |
| 230 | for (i=0; i < vpes; i++) { |
| 231 | write_tc_c0_tcbind(i); |
| 232 | /* |
| 233 | * To be 100% sure we're really getting the right |
| 234 | * information, we exit the configuration state |
| 235 | * and do an IHB after each rebinding. |
| 236 | */ |
| 237 | write_c0_mvpcontrol( |
| 238 | read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); |
| 239 | mips_ihb(); |
| 240 | /* |
| 241 | * Only count if the MMU Type indicated is TLB |
| 242 | */ |
| 243 | if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { |
| 244 | config1val = read_vpe_c0_config1(); |
| 245 | tlbsiz += ((config1val >> 25) & 0x3f) + 1; |
| 246 | } |
| 247 | |
| 248 | /* Put core back in configuration state */ |
| 249 | write_c0_mvpcontrol( |
| 250 | read_c0_mvpcontrol() | MVPCONTROL_VPC ); |
| 251 | mips_ihb(); |
| 252 | } |
| 253 | } |
| 254 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); |
| 255 | ehb(); |
| 256 | |
| 257 | /* |
| 258 | * Setup kernel data structures to use software total, |
| 259 | * rather than read the per-VPE Config1 value. The values |
| 260 | * for "CPU 0" gets copied to all the other CPUs as part |
| 261 | * of their initialization in smtc_cpu_setup(). |
| 262 | */ |
| 263 | |
| 264 | /* MIPS32 limits TLB indices to 64 */ |
| 265 | if (tlbsiz > 64) |
| 266 | tlbsiz = 64; |
| 267 | cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; |
| 268 | smtc_status |= SMTC_TLB_SHARED; |
| 269 | local_flush_tlb_all(); |
| 270 | |
| 271 | printk("TLB of %d entry pairs shared by %d VPEs\n", |
| 272 | tlbsiz, vpes); |
| 273 | } else { |
| 274 | printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); |
| 275 | } |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | |
| 280 | /* |
| 281 | * Incrementally build the CPU map out of constituent MIPS MT cores, |
| 282 | * using the specified available VPEs and TCs. Plaform code needs |
| 283 | * to ensure that each MIPS MT core invokes this routine on reset, |
| 284 | * one at a time(!). |
| 285 | * |
| 286 | * This version of the build_cpu_map and prepare_cpus routines assumes |
| 287 | * that *all* TCs of a MIPS MT core will be used for Linux, and that |
| 288 | * they will be spread across *all* available VPEs (to minimise the |
| 289 | * loss of efficiency due to exception service serialization). |
| 290 | * An improved version would pick up configuration information and |
| 291 | * possibly leave some TCs/VPEs as "slave" processors. |
| 292 | * |
| 293 | * Use c0_MVPConf0 to find out how many TCs are available, setting up |
| 294 | * cpu_possible_map and the logical/physical mappings. |
| 295 | */ |
| 296 | |
| 297 | int __init smtc_build_cpu_map(int start_cpu_slot) |
| 298 | { |
| 299 | int i, ntcs; |
| 300 | |
| 301 | /* |
| 302 | * The CPU map isn't actually used for anything at this point, |
| 303 | * so it's not clear what else we should do apart from set |
| 304 | * everything up so that "logical" = "physical". |
| 305 | */ |
| 306 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
| 307 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { |
| 308 | set_cpu_possible(i, true); |
| 309 | __cpu_number_map[i] = i; |
| 310 | __cpu_logical_map[i] = i; |
| 311 | } |
| 312 | #ifdef CONFIG_MIPS_MT_FPAFF |
| 313 | /* Initialize map of CPUs with FPUs */ |
| 314 | cpus_clear(mt_fpu_cpumask); |
| 315 | #endif |
| 316 | |
| 317 | /* One of those TC's is the one booting, and not a secondary... */ |
| 318 | printk("%i available secondary CPU TC(s)\n", i - 1); |
| 319 | |
| 320 | return i; |
| 321 | } |
| 322 | |
| 323 | /* |
| 324 | * Common setup before any secondaries are started |
| 325 | * Make sure all CPU's are in a sensible state before we boot any of the |
| 326 | * secondaries. |
| 327 | * |
| 328 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly |
| 329 | * as possible across the available VPEs. |
| 330 | */ |
| 331 | |
| 332 | static void smtc_tc_setup(int vpe, int tc, int cpu) |
| 333 | { |
| 334 | settc(tc); |
| 335 | write_tc_c0_tchalt(TCHALT_H); |
| 336 | mips_ihb(); |
| 337 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() |
| 338 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) |
| 339 | | TCSTATUS_A); |
| 340 | /* |
| 341 | * TCContext gets an offset from the base of the IPIQ array |
| 342 | * to be used in low-level code to detect the presence of |
| 343 | * an active IPI queue |
| 344 | */ |
| 345 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); |
| 346 | /* Bind tc to vpe */ |
| 347 | write_tc_c0_tcbind(vpe); |
| 348 | /* In general, all TCs should have the same cpu_data indications */ |
| 349 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); |
| 350 | /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ |
| 351 | if (cpu_data[0].cputype == CPU_34K || |
| 352 | cpu_data[0].cputype == CPU_1004K) |
| 353 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
| 354 | cpu_data[cpu].vpe_id = vpe; |
| 355 | cpu_data[cpu].tc_id = tc; |
| 356 | /* Multi-core SMTC hasn't been tested, but be prepared */ |
| 357 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; |
| 358 | } |
| 359 | |
| 360 | /* |
| 361 | * Tweak to get Count registes in as close a sync as possible. |
| 362 | * Value seems good for 34K-class cores. |
| 363 | */ |
| 364 | |
| 365 | #define CP0_SKEW 8 |
| 366 | |
| 367 | void smtc_prepare_cpus(int cpus) |
| 368 | { |
| 369 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
| 370 | unsigned long flags; |
| 371 | unsigned long val; |
| 372 | int nipi; |
| 373 | struct smtc_ipi *pipi; |
| 374 | |
| 375 | /* disable interrupts so we can disable MT */ |
| 376 | local_irq_save(flags); |
| 377 | /* disable MT so we can configure */ |
| 378 | dvpe(); |
| 379 | dmt(); |
| 380 | |
| 381 | spin_lock_init(&freeIPIq.lock); |
| 382 | |
| 383 | /* |
| 384 | * We probably don't have as many VPEs as we do SMP "CPUs", |
| 385 | * but it's possible - and in any case we'll never use more! |
| 386 | */ |
| 387 | for (i=0; i<NR_CPUS; i++) { |
| 388 | IPIQ[i].head = IPIQ[i].tail = NULL; |
| 389 | spin_lock_init(&IPIQ[i].lock); |
| 390 | IPIQ[i].depth = 0; |
| 391 | IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ |
| 392 | } |
| 393 | |
| 394 | /* cpu_data index starts at zero */ |
| 395 | cpu = 0; |
| 396 | cpu_data[cpu].vpe_id = 0; |
| 397 | cpu_data[cpu].tc_id = 0; |
| 398 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; |
| 399 | cpu++; |
| 400 | |
| 401 | /* Report on boot-time options */ |
| 402 | mips_mt_set_cpuoptions(); |
| 403 | if (vpelimit > 0) |
| 404 | printk("Limit of %d VPEs set\n", vpelimit); |
| 405 | if (tclimit > 0) |
| 406 | printk("Limit of %d TCs set\n", tclimit); |
| 407 | if (nostlb) { |
| 408 | printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); |
| 409 | } |
| 410 | if (asidmask) |
| 411 | printk("ASID mask value override to 0x%x\n", asidmask); |
| 412 | |
| 413 | /* Temporary */ |
| 414 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
| 415 | if (hang_trig) |
| 416 | printk("Logic Analyser Trigger on suspected TC hang\n"); |
| 417 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
| 418 | |
| 419 | /* Put MVPE's into 'configuration state' */ |
| 420 | write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); |
| 421 | |
| 422 | val = read_c0_mvpconf0(); |
| 423 | nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
| 424 | if (vpelimit > 0 && nvpe > vpelimit) |
| 425 | nvpe = vpelimit; |
| 426 | ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
| 427 | if (ntc > NR_CPUS) |
| 428 | ntc = NR_CPUS; |
| 429 | if (tclimit > 0 && ntc > tclimit) |
| 430 | ntc = tclimit; |
| 431 | slop = ntc % nvpe; |
| 432 | for (i = 0; i < nvpe; i++) { |
| 433 | tcpervpe[i] = ntc / nvpe; |
| 434 | if (slop) { |
| 435 | if((slop - i) > 0) tcpervpe[i]++; |
| 436 | } |
| 437 | } |
| 438 | /* Handle command line override for VPE0 */ |
| 439 | if (vpe0limit > ntc) vpe0limit = ntc; |
| 440 | if (vpe0limit > 0) { |
| 441 | int slopslop; |
| 442 | if (vpe0limit < tcpervpe[0]) { |
| 443 | /* Reducing TC count - distribute to others */ |
| 444 | slop = tcpervpe[0] - vpe0limit; |
| 445 | slopslop = slop % (nvpe - 1); |
| 446 | tcpervpe[0] = vpe0limit; |
| 447 | for (i = 1; i < nvpe; i++) { |
| 448 | tcpervpe[i] += slop / (nvpe - 1); |
| 449 | if(slopslop && ((slopslop - (i - 1) > 0))) |
| 450 | tcpervpe[i]++; |
| 451 | } |
| 452 | } else if (vpe0limit > tcpervpe[0]) { |
| 453 | /* Increasing TC count - steal from others */ |
| 454 | slop = vpe0limit - tcpervpe[0]; |
| 455 | slopslop = slop % (nvpe - 1); |
| 456 | tcpervpe[0] = vpe0limit; |
| 457 | for (i = 1; i < nvpe; i++) { |
| 458 | tcpervpe[i] -= slop / (nvpe - 1); |
| 459 | if(slopslop && ((slopslop - (i - 1) > 0))) |
| 460 | tcpervpe[i]--; |
| 461 | } |
| 462 | } |
| 463 | } |
| 464 | |
| 465 | /* Set up shared TLB */ |
| 466 | smtc_configure_tlb(); |
| 467 | |
| 468 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { |
| 469 | if (tcpervpe[vpe] == 0) |
| 470 | continue; |
| 471 | if (vpe != 0) |
| 472 | printk(", "); |
| 473 | printk("VPE %d: TC", vpe); |
| 474 | for (i = 0; i < tcpervpe[vpe]; i++) { |
| 475 | /* |
| 476 | * TC 0 is bound to VPE 0 at reset, |
| 477 | * and is presumably executing this |
| 478 | * code. Leave it alone! |
| 479 | */ |
| 480 | if (tc != 0) { |
| 481 | smtc_tc_setup(vpe, tc, cpu); |
| 482 | cpu++; |
| 483 | } |
| 484 | printk(" %d", tc); |
| 485 | tc++; |
| 486 | } |
| 487 | if (vpe != 0) { |
| 488 | /* |
| 489 | * Allow this VPE to control others. |
| 490 | */ |
| 491 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | |
| 492 | VPECONF0_MVP); |
| 493 | |
| 494 | /* |
| 495 | * Clear any stale software interrupts from VPE's Cause |
| 496 | */ |
| 497 | write_vpe_c0_cause(0); |
| 498 | |
| 499 | /* |
| 500 | * Clear ERL/EXL of VPEs other than 0 |
| 501 | * and set restricted interrupt enable/mask. |
| 502 | */ |
| 503 | write_vpe_c0_status((read_vpe_c0_status() |
| 504 | & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) |
| 505 | | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 |
| 506 | | ST0_IE)); |
| 507 | /* |
| 508 | * set config to be the same as vpe0, |
| 509 | * particularly kseg0 coherency alg |
| 510 | */ |
| 511 | write_vpe_c0_config(read_c0_config()); |
| 512 | /* Clear any pending timer interrupt */ |
| 513 | write_vpe_c0_compare(0); |
| 514 | /* Propagate Config7 */ |
| 515 | write_vpe_c0_config7(read_c0_config7()); |
| 516 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
| 517 | ehb(); |
| 518 | } |
| 519 | /* enable multi-threading within VPE */ |
| 520 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
| 521 | /* enable the VPE */ |
| 522 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * Pull any physically present but unused TCs out of circulation. |
| 527 | */ |
| 528 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { |
| 529 | set_cpu_possible(tc, false); |
| 530 | set_cpu_present(tc, false); |
| 531 | tc++; |
| 532 | } |
| 533 | |
| 534 | /* release config state */ |
| 535 | write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); |
| 536 | |
| 537 | printk("\n"); |
| 538 | |
| 539 | /* Set up coprocessor affinity CPU mask(s) */ |
| 540 | |
| 541 | #ifdef CONFIG_MIPS_MT_FPAFF |
| 542 | for (tc = 0; tc < ntc; tc++) { |
| 543 | if (cpu_data[tc].options & MIPS_CPU_FPU) |
| 544 | cpu_set(tc, mt_fpu_cpumask); |
| 545 | } |
| 546 | #endif |
| 547 | |
| 548 | /* set up ipi interrupts... */ |
| 549 | |
| 550 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ |
| 551 | |
| 552 | setup_cross_vpe_interrupts(nvpe); |
| 553 | |
| 554 | /* Set up queue of free IPI "messages". */ |
| 555 | nipi = NR_CPUS * IPIBUF_PER_CPU; |
| 556 | if (ipibuffers > 0) |
| 557 | nipi = ipibuffers; |
| 558 | |
| 559 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); |
| 560 | if (pipi == NULL) |
| 561 | panic("kmalloc of IPI message buffers failed\n"); |
| 562 | else |
| 563 | printk("IPI buffer pool of %d buffers\n", nipi); |
| 564 | for (i = 0; i < nipi; i++) { |
| 565 | smtc_ipi_nq(&freeIPIq, pipi); |
| 566 | pipi++; |
| 567 | } |
| 568 | |
| 569 | /* Arm multithreading and enable other VPEs - but all TCs are Halted */ |
| 570 | emt(EMT_ENABLE); |
| 571 | evpe(EVPE_ENABLE); |
| 572 | local_irq_restore(flags); |
| 573 | /* Initialize SMTC /proc statistics/diagnostics */ |
| 574 | init_smtc_stats(); |
| 575 | } |
| 576 | |
| 577 | |
| 578 | /* |
| 579 | * Setup the PC, SP, and GP of a secondary processor and start it |
| 580 | * running! |
| 581 | * smp_bootstrap is the place to resume from |
| 582 | * __KSTK_TOS(idle) is apparently the stack pointer |
| 583 | * (unsigned long)idle->thread_info the gp |
| 584 | * |
| 585 | */ |
| 586 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
| 587 | { |
| 588 | extern u32 kernelsp[NR_CPUS]; |
| 589 | unsigned long flags; |
| 590 | int mtflags; |
| 591 | |
| 592 | LOCK_MT_PRA(); |
| 593 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
| 594 | dvpe(); |
| 595 | } |
| 596 | settc(cpu_data[cpu].tc_id); |
| 597 | |
| 598 | /* pc */ |
| 599 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); |
| 600 | |
| 601 | /* stack pointer */ |
| 602 | kernelsp[cpu] = __KSTK_TOS(idle); |
| 603 | write_tc_gpr_sp(__KSTK_TOS(idle)); |
| 604 | |
| 605 | /* global pointer */ |
| 606 | write_tc_gpr_gp((unsigned long)task_thread_info(idle)); |
| 607 | |
| 608 | smtc_status |= SMTC_MTC_ACTIVE; |
| 609 | write_tc_c0_tchalt(0); |
| 610 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
| 611 | evpe(EVPE_ENABLE); |
| 612 | } |
| 613 | UNLOCK_MT_PRA(); |
| 614 | } |
| 615 | |
| 616 | void smtc_init_secondary(void) |
| 617 | { |
| 618 | local_irq_enable(); |
| 619 | } |
| 620 | |
| 621 | void smtc_smp_finish(void) |
| 622 | { |
| 623 | int cpu = smp_processor_id(); |
| 624 | |
| 625 | /* |
| 626 | * Lowest-numbered CPU per VPE starts a clock tick. |
| 627 | * Like per_cpu_trap_init() hack, this assumes that |
| 628 | * SMTC init code assigns TCs consdecutively and |
| 629 | * in ascending order across available VPEs. |
| 630 | */ |
| 631 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) |
| 632 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); |
| 633 | |
| 634 | printk("TC %d going on-line as CPU %d\n", |
| 635 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
| 636 | } |
| 637 | |
| 638 | void smtc_cpus_done(void) |
| 639 | { |
| 640 | } |
| 641 | |
| 642 | /* |
| 643 | * Support for SMTC-optimized driver IRQ registration |
| 644 | */ |
| 645 | |
| 646 | /* |
| 647 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask |
| 648 | * in do_IRQ. These are passed in setup_irq_smtc() and stored |
| 649 | * in this table. |
| 650 | */ |
| 651 | |
| 652 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, |
| 653 | unsigned long hwmask) |
| 654 | { |
| 655 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
| 656 | unsigned int vpe = current_cpu_data.vpe_id; |
| 657 | |
| 658 | vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; |
| 659 | #endif |
| 660 | irq_hwmask[irq] = hwmask; |
| 661 | |
| 662 | return setup_irq(irq, new); |
| 663 | } |
| 664 | |
| 665 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
| 666 | /* |
| 667 | * Support for IRQ affinity to TCs |
| 668 | */ |
| 669 | |
| 670 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) |
| 671 | { |
| 672 | /* |
| 673 | * If a "fast path" cache of quickly decodable affinity state |
| 674 | * is maintained, this is where it gets done, on a call up |
| 675 | * from the platform affinity code. |
| 676 | */ |
| 677 | } |
| 678 | |
| 679 | void smtc_forward_irq(unsigned int irq) |
| 680 | { |
| 681 | int target; |
| 682 | |
| 683 | /* |
| 684 | * OK wise guy, now figure out how to get the IRQ |
| 685 | * to be serviced on an authorized "CPU". |
| 686 | * |
| 687 | * Ideally, to handle the situation where an IRQ has multiple |
| 688 | * eligible CPUS, we would maintain state per IRQ that would |
| 689 | * allow a fair distribution of service requests. Since the |
| 690 | * expected use model is any-or-only-one, for simplicity |
| 691 | * and efficiency, we just pick the easiest one to find. |
| 692 | */ |
| 693 | |
| 694 | target = cpumask_first(irq_desc[irq].affinity); |
| 695 | |
| 696 | /* |
| 697 | * We depend on the platform code to have correctly processed |
| 698 | * IRQ affinity change requests to ensure that the IRQ affinity |
| 699 | * mask has been purged of bits corresponding to nonexistent and |
| 700 | * offline "CPUs", and to TCs bound to VPEs other than the VPE |
| 701 | * connected to the physical interrupt input for the interrupt |
| 702 | * in question. Otherwise we have a nasty problem with interrupt |
| 703 | * mask management. This is best handled in non-performance-critical |
| 704 | * platform IRQ affinity setting code, to minimize interrupt-time |
| 705 | * checks. |
| 706 | */ |
| 707 | |
| 708 | /* If no one is eligible, service locally */ |
| 709 | if (target >= NR_CPUS) { |
| 710 | do_IRQ_no_affinity(irq); |
| 711 | return; |
| 712 | } |
| 713 | |
| 714 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); |
| 715 | } |
| 716 | |
| 717 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 718 | |
| 719 | /* |
| 720 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. |
| 721 | * Within a VPE one TC can interrupt another by different approaches. |
| 722 | * The easiest to get right would probably be to make all TCs except |
| 723 | * the target IXMT and set a software interrupt, but an IXMT-based |
| 724 | * scheme requires that a handler must run before a new IPI could |
| 725 | * be sent, which would break the "broadcast" loops in MIPS MT. |
| 726 | * A more gonzo approach within a VPE is to halt the TC, extract |
| 727 | * its Restart, Status, and a couple of GPRs, and program the Restart |
| 728 | * address to emulate an interrupt. |
| 729 | * |
| 730 | * Within a VPE, one can be confident that the target TC isn't in |
| 731 | * a critical EXL state when halted, since the write to the Halt |
| 732 | * register could not have issued on the writing thread if the |
| 733 | * halting thread had EXL set. So k0 and k1 of the target TC |
| 734 | * can be used by the injection code. Across VPEs, one can't |
| 735 | * be certain that the target TC isn't in a critical exception |
| 736 | * state. So we try a two-step process of sending a software |
| 737 | * interrupt to the target VPE, which either handles the event |
| 738 | * itself (if it was the target) or injects the event within |
| 739 | * the VPE. |
| 740 | */ |
| 741 | |
| 742 | static void smtc_ipi_qdump(void) |
| 743 | { |
| 744 | int i; |
| 745 | struct smtc_ipi *temp; |
| 746 | |
| 747 | for (i = 0; i < NR_CPUS ;i++) { |
| 748 | pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", |
| 749 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, |
| 750 | IPIQ[i].depth); |
| 751 | temp = IPIQ[i].head; |
| 752 | |
| 753 | while (temp != IPIQ[i].tail) { |
| 754 | pr_debug("%d %d %d: ", temp->type, temp->dest, |
| 755 | (int)temp->arg); |
| 756 | #ifdef SMTC_IPI_DEBUG |
| 757 | pr_debug("%u %lu\n", temp->sender, temp->stamp); |
| 758 | #else |
| 759 | pr_debug("\n"); |
| 760 | #endif |
| 761 | temp = temp->flink; |
| 762 | } |
| 763 | } |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * The standard atomic.h primitives don't quite do what we want |
| 768 | * here: We need an atomic add-and-return-previous-value (which |
| 769 | * could be done with atomic_add_return and a decrement) and an |
| 770 | * atomic set/zero-and-return-previous-value (which can't really |
| 771 | * be done with the atomic.h primitives). And since this is |
| 772 | * MIPS MT, we can assume that we have LL/SC. |
| 773 | */ |
| 774 | static inline int atomic_postincrement(atomic_t *v) |
| 775 | { |
| 776 | unsigned long result; |
| 777 | |
| 778 | unsigned long temp; |
| 779 | |
| 780 | __asm__ __volatile__( |
| 781 | "1: ll %0, %2 \n" |
| 782 | " addu %1, %0, 1 \n" |
| 783 | " sc %1, %2 \n" |
| 784 | " beqz %1, 1b \n" |
| 785 | __WEAK_LLSC_MB |
| 786 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 787 | : "m" (v->counter) |
| 788 | : "memory"); |
| 789 | |
| 790 | return result; |
| 791 | } |
| 792 | |
| 793 | void smtc_send_ipi(int cpu, int type, unsigned int action) |
| 794 | { |
| 795 | int tcstatus; |
| 796 | struct smtc_ipi *pipi; |
| 797 | unsigned long flags; |
| 798 | int mtflags; |
| 799 | unsigned long tcrestart; |
| 800 | extern void r4k_wait_irqoff(void), __pastwait(void); |
| 801 | int set_resched_flag = (type == LINUX_SMP_IPI && |
| 802 | action == SMP_RESCHEDULE_YOURSELF); |
| 803 | |
| 804 | if (cpu == smp_processor_id()) { |
| 805 | printk("Cannot Send IPI to self!\n"); |
| 806 | return; |
| 807 | } |
| 808 | if (set_resched_flag && IPIQ[cpu].resched_flag != 0) |
| 809 | return; /* There is a reschedule queued already */ |
| 810 | |
| 811 | /* Set up a descriptor, to be delivered either promptly or queued */ |
| 812 | pipi = smtc_ipi_dq(&freeIPIq); |
| 813 | if (pipi == NULL) { |
| 814 | bust_spinlocks(1); |
| 815 | mips_mt_regdump(dvpe()); |
| 816 | panic("IPI Msg. Buffers Depleted\n"); |
| 817 | } |
| 818 | pipi->type = type; |
| 819 | pipi->arg = (void *)action; |
| 820 | pipi->dest = cpu; |
| 821 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
| 822 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
| 823 | IPIQ[cpu].resched_flag |= set_resched_flag; |
| 824 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
| 825 | LOCK_CORE_PRA(); |
| 826 | settc(cpu_data[cpu].tc_id); |
| 827 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); |
| 828 | UNLOCK_CORE_PRA(); |
| 829 | } else { |
| 830 | /* |
| 831 | * Not sufficient to do a LOCK_MT_PRA (dmt) here, |
| 832 | * since ASID shootdown on the other VPE may |
| 833 | * collide with this operation. |
| 834 | */ |
| 835 | LOCK_CORE_PRA(); |
| 836 | settc(cpu_data[cpu].tc_id); |
| 837 | /* Halt the targeted TC */ |
| 838 | write_tc_c0_tchalt(TCHALT_H); |
| 839 | mips_ihb(); |
| 840 | |
| 841 | /* |
| 842 | * Inspect TCStatus - if IXMT is set, we have to queue |
| 843 | * a message. Otherwise, we set up the "interrupt" |
| 844 | * of the other TC |
| 845 | */ |
| 846 | tcstatus = read_tc_c0_tcstatus(); |
| 847 | |
| 848 | if ((tcstatus & TCSTATUS_IXMT) != 0) { |
| 849 | /* |
| 850 | * If we're in the the irq-off version of the wait |
| 851 | * loop, we need to force exit from the wait and |
| 852 | * do a direct post of the IPI. |
| 853 | */ |
| 854 | if (cpu_wait == r4k_wait_irqoff) { |
| 855 | tcrestart = read_tc_c0_tcrestart(); |
| 856 | if (tcrestart >= (unsigned long)r4k_wait_irqoff |
| 857 | && tcrestart < (unsigned long)__pastwait) { |
| 858 | write_tc_c0_tcrestart(__pastwait); |
| 859 | tcstatus &= ~TCSTATUS_IXMT; |
| 860 | write_tc_c0_tcstatus(tcstatus); |
| 861 | goto postdirect; |
| 862 | } |
| 863 | } |
| 864 | /* |
| 865 | * Otherwise we queue the message for the target TC |
| 866 | * to pick up when he does a local_irq_restore() |
| 867 | */ |
| 868 | write_tc_c0_tchalt(0); |
| 869 | UNLOCK_CORE_PRA(); |
| 870 | IPIQ[cpu].resched_flag |= set_resched_flag; |
| 871 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
| 872 | } else { |
| 873 | postdirect: |
| 874 | post_direct_ipi(cpu, pipi); |
| 875 | write_tc_c0_tchalt(0); |
| 876 | UNLOCK_CORE_PRA(); |
| 877 | } |
| 878 | } |
| 879 | } |
| 880 | |
| 881 | /* |
| 882 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set |
| 883 | */ |
| 884 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) |
| 885 | { |
| 886 | struct pt_regs *kstack; |
| 887 | unsigned long tcstatus; |
| 888 | unsigned long tcrestart; |
| 889 | extern u32 kernelsp[NR_CPUS]; |
| 890 | extern void __smtc_ipi_vector(void); |
| 891 | //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); |
| 892 | |
| 893 | /* Extract Status, EPC from halted TC */ |
| 894 | tcstatus = read_tc_c0_tcstatus(); |
| 895 | tcrestart = read_tc_c0_tcrestart(); |
| 896 | /* If TCRestart indicates a WAIT instruction, advance the PC */ |
| 897 | if ((tcrestart & 0x80000000) |
| 898 | && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { |
| 899 | tcrestart += 4; |
| 900 | } |
| 901 | /* |
| 902 | * Save on TC's future kernel stack |
| 903 | * |
| 904 | * CU bit of Status is indicator that TC was |
| 905 | * already running on a kernel stack... |
| 906 | */ |
| 907 | if (tcstatus & ST0_CU0) { |
| 908 | /* Note that this "- 1" is pointer arithmetic */ |
| 909 | kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; |
| 910 | } else { |
| 911 | kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; |
| 912 | } |
| 913 | |
| 914 | kstack->cp0_epc = (long)tcrestart; |
| 915 | /* Save TCStatus */ |
| 916 | kstack->cp0_tcstatus = tcstatus; |
| 917 | /* Pass token of operation to be performed kernel stack pad area */ |
| 918 | kstack->pad0[4] = (unsigned long)pipi; |
| 919 | /* Pass address of function to be called likewise */ |
| 920 | kstack->pad0[5] = (unsigned long)&ipi_decode; |
| 921 | /* Set interrupt exempt and kernel mode */ |
| 922 | tcstatus |= TCSTATUS_IXMT; |
| 923 | tcstatus &= ~TCSTATUS_TKSU; |
| 924 | write_tc_c0_tcstatus(tcstatus); |
| 925 | ehb(); |
| 926 | /* Set TC Restart address to be SMTC IPI vector */ |
| 927 | write_tc_c0_tcrestart(__smtc_ipi_vector); |
| 928 | } |
| 929 | |
| 930 | static void ipi_resched_interrupt(void) |
| 931 | { |
| 932 | /* Return from interrupt should be enough to cause scheduler check */ |
| 933 | } |
| 934 | |
| 935 | static void ipi_call_interrupt(void) |
| 936 | { |
| 937 | /* Invoke generic function invocation code in smp.c */ |
| 938 | smp_call_function_interrupt(); |
| 939 | } |
| 940 | |
| 941 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
| 942 | |
| 943 | static void __irq_entry smtc_clock_tick_interrupt(void) |
| 944 | { |
| 945 | unsigned int cpu = smp_processor_id(); |
| 946 | struct clock_event_device *cd; |
| 947 | int irq = MIPS_CPU_IRQ_BASE + 1; |
| 948 | |
| 949 | irq_enter(); |
| 950 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
| 951 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 952 | cd->event_handler(cd); |
| 953 | irq_exit(); |
| 954 | } |
| 955 | |
| 956 | void ipi_decode(struct smtc_ipi *pipi) |
| 957 | { |
| 958 | void *arg_copy = pipi->arg; |
| 959 | int type_copy = pipi->type; |
| 960 | |
| 961 | smtc_ipi_nq(&freeIPIq, pipi); |
| 962 | |
| 963 | switch (type_copy) { |
| 964 | case SMTC_CLOCK_TICK: |
| 965 | smtc_clock_tick_interrupt(); |
| 966 | break; |
| 967 | |
| 968 | case LINUX_SMP_IPI: |
| 969 | switch ((int)arg_copy) { |
| 970 | case SMP_RESCHEDULE_YOURSELF: |
| 971 | ipi_resched_interrupt(); |
| 972 | break; |
| 973 | case SMP_CALL_FUNCTION: |
| 974 | ipi_call_interrupt(); |
| 975 | break; |
| 976 | default: |
| 977 | printk("Impossible SMTC IPI Argument 0x%x\n", |
| 978 | (int)arg_copy); |
| 979 | break; |
| 980 | } |
| 981 | break; |
| 982 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
| 983 | case IRQ_AFFINITY_IPI: |
| 984 | /* |
| 985 | * Accept a "forwarded" interrupt that was initially |
| 986 | * taken by a TC who doesn't have affinity for the IRQ. |
| 987 | */ |
| 988 | do_IRQ_no_affinity((int)arg_copy); |
| 989 | break; |
| 990 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 991 | default: |
| 992 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); |
| 993 | break; |
| 994 | } |
| 995 | } |
| 996 | |
| 997 | /* |
| 998 | * Similar to smtc_ipi_replay(), but invoked from context restore, |
| 999 | * so it reuses the current exception frame rather than set up a |
| 1000 | * new one with self_ipi. |
| 1001 | */ |
| 1002 | |
| 1003 | void deferred_smtc_ipi(void) |
| 1004 | { |
| 1005 | int cpu = smp_processor_id(); |
| 1006 | |
| 1007 | /* |
| 1008 | * Test is not atomic, but much faster than a dequeue, |
| 1009 | * and the vast majority of invocations will have a null queue. |
| 1010 | * If irq_disabled when this was called, then any IPIs queued |
| 1011 | * after we test last will be taken on the next irq_enable/restore. |
| 1012 | * If interrupts were enabled, then any IPIs added after the |
| 1013 | * last test will be taken directly. |
| 1014 | */ |
| 1015 | |
| 1016 | while (IPIQ[cpu].head != NULL) { |
| 1017 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
| 1018 | struct smtc_ipi *pipi; |
| 1019 | unsigned long flags; |
| 1020 | |
| 1021 | /* |
| 1022 | * It may be possible we'll come in with interrupts |
| 1023 | * already enabled. |
| 1024 | */ |
| 1025 | local_irq_save(flags); |
| 1026 | spin_lock(&q->lock); |
| 1027 | pipi = __smtc_ipi_dq(q); |
| 1028 | spin_unlock(&q->lock); |
| 1029 | if (pipi != NULL) { |
| 1030 | if (pipi->type == LINUX_SMP_IPI && |
| 1031 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) |
| 1032 | IPIQ[cpu].resched_flag = 0; |
| 1033 | ipi_decode(pipi); |
| 1034 | } |
| 1035 | /* |
| 1036 | * The use of the __raw_local restore isn't |
| 1037 | * as obviously necessary here as in smtc_ipi_replay(), |
| 1038 | * but it's more efficient, given that we're already |
| 1039 | * running down the IPI queue. |
| 1040 | */ |
| 1041 | __raw_local_irq_restore(flags); |
| 1042 | } |
| 1043 | } |
| 1044 | |
| 1045 | /* |
| 1046 | * Cross-VPE interrupts in the SMTC prototype use "software interrupts" |
| 1047 | * set via cross-VPE MTTR manipulation of the Cause register. It would be |
| 1048 | * in some regards preferable to have external logic for "doorbell" hardware |
| 1049 | * interrupts. |
| 1050 | */ |
| 1051 | |
| 1052 | static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; |
| 1053 | |
| 1054 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm) |
| 1055 | { |
| 1056 | int my_vpe = cpu_data[smp_processor_id()].vpe_id; |
| 1057 | int my_tc = cpu_data[smp_processor_id()].tc_id; |
| 1058 | int cpu; |
| 1059 | struct smtc_ipi *pipi; |
| 1060 | unsigned long tcstatus; |
| 1061 | int sent; |
| 1062 | unsigned long flags; |
| 1063 | unsigned int mtflags; |
| 1064 | unsigned int vpflags; |
| 1065 | |
| 1066 | /* |
| 1067 | * So long as cross-VPE interrupts are done via |
| 1068 | * MFTR/MTTR read-modify-writes of Cause, we need |
| 1069 | * to stop other VPEs whenever the local VPE does |
| 1070 | * anything similar. |
| 1071 | */ |
| 1072 | local_irq_save(flags); |
| 1073 | vpflags = dvpe(); |
| 1074 | clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); |
| 1075 | set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); |
| 1076 | irq_enable_hazard(); |
| 1077 | evpe(vpflags); |
| 1078 | local_irq_restore(flags); |
| 1079 | |
| 1080 | /* |
| 1081 | * Cross-VPE Interrupt handler: Try to directly deliver IPIs |
| 1082 | * queued for TCs on this VPE other than the current one. |
| 1083 | * Return-from-interrupt should cause us to drain the queue |
| 1084 | * for the current TC, so we ought not to have to do it explicitly here. |
| 1085 | */ |
| 1086 | |
| 1087 | for_each_online_cpu(cpu) { |
| 1088 | if (cpu_data[cpu].vpe_id != my_vpe) |
| 1089 | continue; |
| 1090 | |
| 1091 | pipi = smtc_ipi_dq(&IPIQ[cpu]); |
| 1092 | if (pipi != NULL) { |
| 1093 | if (cpu_data[cpu].tc_id != my_tc) { |
| 1094 | sent = 0; |
| 1095 | LOCK_MT_PRA(); |
| 1096 | settc(cpu_data[cpu].tc_id); |
| 1097 | write_tc_c0_tchalt(TCHALT_H); |
| 1098 | mips_ihb(); |
| 1099 | tcstatus = read_tc_c0_tcstatus(); |
| 1100 | if ((tcstatus & TCSTATUS_IXMT) == 0) { |
| 1101 | post_direct_ipi(cpu, pipi); |
| 1102 | sent = 1; |
| 1103 | } |
| 1104 | write_tc_c0_tchalt(0); |
| 1105 | UNLOCK_MT_PRA(); |
| 1106 | if (!sent) { |
| 1107 | smtc_ipi_req(&IPIQ[cpu], pipi); |
| 1108 | } |
| 1109 | } else { |
| 1110 | /* |
| 1111 | * ipi_decode() should be called |
| 1112 | * with interrupts off |
| 1113 | */ |
| 1114 | local_irq_save(flags); |
| 1115 | if (pipi->type == LINUX_SMP_IPI && |
| 1116 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) |
| 1117 | IPIQ[cpu].resched_flag = 0; |
| 1118 | ipi_decode(pipi); |
| 1119 | local_irq_restore(flags); |
| 1120 | } |
| 1121 | } |
| 1122 | } |
| 1123 | |
| 1124 | return IRQ_HANDLED; |
| 1125 | } |
| 1126 | |
| 1127 | static void ipi_irq_dispatch(void) |
| 1128 | { |
| 1129 | do_IRQ(cpu_ipi_irq); |
| 1130 | } |
| 1131 | |
| 1132 | static struct irqaction irq_ipi = { |
| 1133 | .handler = ipi_interrupt, |
| 1134 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
| 1135 | .name = "SMTC_IPI" |
| 1136 | }; |
| 1137 | |
| 1138 | static void setup_cross_vpe_interrupts(unsigned int nvpe) |
| 1139 | { |
| 1140 | if (nvpe < 1) |
| 1141 | return; |
| 1142 | |
| 1143 | if (!cpu_has_vint) |
| 1144 | panic("SMTC Kernel requires Vectored Interrupt support"); |
| 1145 | |
| 1146 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); |
| 1147 | |
| 1148 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); |
| 1149 | |
| 1150 | set_irq_handler(cpu_ipi_irq, handle_percpu_irq); |
| 1151 | } |
| 1152 | |
| 1153 | /* |
| 1154 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
| 1155 | */ |
| 1156 | |
| 1157 | /* |
| 1158 | * smtc_ipi_replay is called from raw_local_irq_restore |
| 1159 | */ |
| 1160 | |
| 1161 | void smtc_ipi_replay(void) |
| 1162 | { |
| 1163 | unsigned int cpu = smp_processor_id(); |
| 1164 | |
| 1165 | /* |
| 1166 | * To the extent that we've ever turned interrupts off, |
| 1167 | * we may have accumulated deferred IPIs. This is subtle. |
| 1168 | * we should be OK: If we pick up something and dispatch |
| 1169 | * it here, that's great. If we see nothing, but concurrent |
| 1170 | * with this operation, another TC sends us an IPI, IXMT |
| 1171 | * is clear, and we'll handle it as a real pseudo-interrupt |
| 1172 | * and not a pseudo-pseudo interrupt. The important thing |
| 1173 | * is to do the last check for queued message *after* the |
| 1174 | * re-enabling of interrupts. |
| 1175 | */ |
| 1176 | while (IPIQ[cpu].head != NULL) { |
| 1177 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
| 1178 | struct smtc_ipi *pipi; |
| 1179 | unsigned long flags; |
| 1180 | |
| 1181 | /* |
| 1182 | * It's just possible we'll come in with interrupts |
| 1183 | * already enabled. |
| 1184 | */ |
| 1185 | local_irq_save(flags); |
| 1186 | |
| 1187 | spin_lock(&q->lock); |
| 1188 | pipi = __smtc_ipi_dq(q); |
| 1189 | spin_unlock(&q->lock); |
| 1190 | /* |
| 1191 | ** But use a raw restore here to avoid recursion. |
| 1192 | */ |
| 1193 | __raw_local_irq_restore(flags); |
| 1194 | |
| 1195 | if (pipi) { |
| 1196 | self_ipi(pipi); |
| 1197 | smtc_cpu_stats[cpu].selfipis++; |
| 1198 | } |
| 1199 | } |
| 1200 | } |
| 1201 | |
| 1202 | EXPORT_SYMBOL(smtc_ipi_replay); |
| 1203 | |
| 1204 | void smtc_idle_loop_hook(void) |
| 1205 | { |
| 1206 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
| 1207 | int im; |
| 1208 | int flags; |
| 1209 | int mtflags; |
| 1210 | int bit; |
| 1211 | int vpe; |
| 1212 | int tc; |
| 1213 | int hook_ntcs; |
| 1214 | /* |
| 1215 | * printk within DMT-protected regions can deadlock, |
| 1216 | * so buffer diagnostic messages for later output. |
| 1217 | */ |
| 1218 | char *pdb_msg; |
| 1219 | char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ |
| 1220 | |
| 1221 | if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ |
| 1222 | if (atomic_add_return(1, &idle_hook_initialized) == 1) { |
| 1223 | int mvpconf0; |
| 1224 | /* Tedious stuff to just do once */ |
| 1225 | mvpconf0 = read_c0_mvpconf0(); |
| 1226 | hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
| 1227 | if (hook_ntcs > NR_CPUS) |
| 1228 | hook_ntcs = NR_CPUS; |
| 1229 | for (tc = 0; tc < hook_ntcs; tc++) { |
| 1230 | tcnoprog[tc] = 0; |
| 1231 | clock_hang_reported[tc] = 0; |
| 1232 | } |
| 1233 | for (vpe = 0; vpe < 2; vpe++) |
| 1234 | for (im = 0; im < 8; im++) |
| 1235 | imstuckcount[vpe][im] = 0; |
| 1236 | printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); |
| 1237 | atomic_set(&idle_hook_initialized, 1000); |
| 1238 | } else { |
| 1239 | /* Someone else is initializing in parallel - let 'em finish */ |
| 1240 | while (atomic_read(&idle_hook_initialized) < 1000) |
| 1241 | ; |
| 1242 | } |
| 1243 | } |
| 1244 | |
| 1245 | /* Have we stupidly left IXMT set somewhere? */ |
| 1246 | if (read_c0_tcstatus() & 0x400) { |
| 1247 | write_c0_tcstatus(read_c0_tcstatus() & ~0x400); |
| 1248 | ehb(); |
| 1249 | printk("Dangling IXMT in cpu_idle()\n"); |
| 1250 | } |
| 1251 | |
| 1252 | /* Have we stupidly left an IM bit turned off? */ |
| 1253 | #define IM_LIMIT 2000 |
| 1254 | local_irq_save(flags); |
| 1255 | mtflags = dmt(); |
| 1256 | pdb_msg = &id_ho_db_msg[0]; |
| 1257 | im = read_c0_status(); |
| 1258 | vpe = current_cpu_data.vpe_id; |
| 1259 | for (bit = 0; bit < 8; bit++) { |
| 1260 | /* |
| 1261 | * In current prototype, I/O interrupts |
| 1262 | * are masked for VPE > 0 |
| 1263 | */ |
| 1264 | if (vpemask[vpe][bit]) { |
| 1265 | if (!(im & (0x100 << bit))) |
| 1266 | imstuckcount[vpe][bit]++; |
| 1267 | else |
| 1268 | imstuckcount[vpe][bit] = 0; |
| 1269 | if (imstuckcount[vpe][bit] > IM_LIMIT) { |
| 1270 | set_c0_status(0x100 << bit); |
| 1271 | ehb(); |
| 1272 | imstuckcount[vpe][bit] = 0; |
| 1273 | pdb_msg += sprintf(pdb_msg, |
| 1274 | "Dangling IM %d fixed for VPE %d\n", bit, |
| 1275 | vpe); |
| 1276 | } |
| 1277 | } |
| 1278 | } |
| 1279 | |
| 1280 | emt(mtflags); |
| 1281 | local_irq_restore(flags); |
| 1282 | if (pdb_msg != &id_ho_db_msg[0]) |
| 1283 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); |
| 1284 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
| 1285 | |
| 1286 | smtc_ipi_replay(); |
| 1287 | } |
| 1288 | |
| 1289 | void smtc_soft_dump(void) |
| 1290 | { |
| 1291 | int i; |
| 1292 | |
| 1293 | printk("Counter Interrupts taken per CPU (TC)\n"); |
| 1294 | for (i=0; i < NR_CPUS; i++) { |
| 1295 | printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); |
| 1296 | } |
| 1297 | printk("Self-IPI invocations:\n"); |
| 1298 | for (i=0; i < NR_CPUS; i++) { |
| 1299 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); |
| 1300 | } |
| 1301 | smtc_ipi_qdump(); |
| 1302 | printk("%d Recoveries of \"stolen\" FPU\n", |
| 1303 | atomic_read(&smtc_fpu_recoveries)); |
| 1304 | } |
| 1305 | |
| 1306 | |
| 1307 | /* |
| 1308 | * TLB management routines special to SMTC |
| 1309 | */ |
| 1310 | |
| 1311 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
| 1312 | { |
| 1313 | unsigned long flags, mtflags, tcstat, prevhalt, asid; |
| 1314 | int tlb, i; |
| 1315 | |
| 1316 | /* |
| 1317 | * It would be nice to be able to use a spinlock here, |
| 1318 | * but this is invoked from within TLB flush routines |
| 1319 | * that protect themselves with DVPE, so if a lock is |
| 1320 | * held by another TC, it'll never be freed. |
| 1321 | * |
| 1322 | * DVPE/DMT must not be done with interrupts enabled, |
| 1323 | * so even so most callers will already have disabled |
| 1324 | * them, let's be really careful... |
| 1325 | */ |
| 1326 | |
| 1327 | local_irq_save(flags); |
| 1328 | if (smtc_status & SMTC_TLB_SHARED) { |
| 1329 | mtflags = dvpe(); |
| 1330 | tlb = 0; |
| 1331 | } else { |
| 1332 | mtflags = dmt(); |
| 1333 | tlb = cpu_data[cpu].vpe_id; |
| 1334 | } |
| 1335 | asid = asid_cache(cpu); |
| 1336 | |
| 1337 | do { |
| 1338 | if (!((asid += ASID_INC) & ASID_MASK) ) { |
| 1339 | if (cpu_has_vtag_icache) |
| 1340 | flush_icache_all(); |
| 1341 | /* Traverse all online CPUs (hack requires contiguous range) */ |
| 1342 | for_each_online_cpu(i) { |
| 1343 | /* |
| 1344 | * We don't need to worry about our own CPU, nor those of |
| 1345 | * CPUs who don't share our TLB. |
| 1346 | */ |
| 1347 | if ((i != smp_processor_id()) && |
| 1348 | ((smtc_status & SMTC_TLB_SHARED) || |
| 1349 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { |
| 1350 | settc(cpu_data[i].tc_id); |
| 1351 | prevhalt = read_tc_c0_tchalt() & TCHALT_H; |
| 1352 | if (!prevhalt) { |
| 1353 | write_tc_c0_tchalt(TCHALT_H); |
| 1354 | mips_ihb(); |
| 1355 | } |
| 1356 | tcstat = read_tc_c0_tcstatus(); |
| 1357 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); |
| 1358 | if (!prevhalt) |
| 1359 | write_tc_c0_tchalt(0); |
| 1360 | } |
| 1361 | } |
| 1362 | if (!asid) /* fix version if needed */ |
| 1363 | asid = ASID_FIRST_VERSION; |
| 1364 | local_flush_tlb_all(); /* start new asid cycle */ |
| 1365 | } |
| 1366 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); |
| 1367 | |
| 1368 | /* |
| 1369 | * SMTC shares the TLB within VPEs and possibly across all VPEs. |
| 1370 | */ |
| 1371 | for_each_online_cpu(i) { |
| 1372 | if ((smtc_status & SMTC_TLB_SHARED) || |
| 1373 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) |
| 1374 | cpu_context(i, mm) = asid_cache(i) = asid; |
| 1375 | } |
| 1376 | |
| 1377 | if (smtc_status & SMTC_TLB_SHARED) |
| 1378 | evpe(mtflags); |
| 1379 | else |
| 1380 | emt(mtflags); |
| 1381 | local_irq_restore(flags); |
| 1382 | } |
| 1383 | |
| 1384 | /* |
| 1385 | * Invoked from macros defined in mmu_context.h |
| 1386 | * which must already have disabled interrupts |
| 1387 | * and done a DVPE or DMT as appropriate. |
| 1388 | */ |
| 1389 | |
| 1390 | void smtc_flush_tlb_asid(unsigned long asid) |
| 1391 | { |
| 1392 | int entry; |
| 1393 | unsigned long ehi; |
| 1394 | |
| 1395 | entry = read_c0_wired(); |
| 1396 | |
| 1397 | /* Traverse all non-wired entries */ |
| 1398 | while (entry < current_cpu_data.tlbsize) { |
| 1399 | write_c0_index(entry); |
| 1400 | ehb(); |
| 1401 | tlb_read(); |
| 1402 | ehb(); |
| 1403 | ehi = read_c0_entryhi(); |
| 1404 | if ((ehi & ASID_MASK) == asid) { |
| 1405 | /* |
| 1406 | * Invalidate only entries with specified ASID, |
| 1407 | * makiing sure all entries differ. |
| 1408 | */ |
| 1409 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); |
| 1410 | write_c0_entrylo0(0); |
| 1411 | write_c0_entrylo1(0); |
| 1412 | mtc0_tlbw_hazard(); |
| 1413 | tlb_write_indexed(); |
| 1414 | } |
| 1415 | entry++; |
| 1416 | } |
| 1417 | write_c0_index(PARKED_INDEX); |
| 1418 | tlbw_use_hazard(); |
| 1419 | } |
| 1420 | |
| 1421 | /* |
| 1422 | * Support for single-threading cache flush operations. |
| 1423 | */ |
| 1424 | |
| 1425 | static int halt_state_save[NR_CPUS]; |
| 1426 | |
| 1427 | /* |
| 1428 | * To really, really be sure that nothing is being done |
| 1429 | * by other TCs, halt them all. This code assumes that |
| 1430 | * a DVPE has already been done, so while their Halted |
| 1431 | * state is theoretically architecturally unstable, in |
| 1432 | * practice, it's not going to change while we're looking |
| 1433 | * at it. |
| 1434 | */ |
| 1435 | |
| 1436 | void smtc_cflush_lockdown(void) |
| 1437 | { |
| 1438 | int cpu; |
| 1439 | |
| 1440 | for_each_online_cpu(cpu) { |
| 1441 | if (cpu != smp_processor_id()) { |
| 1442 | settc(cpu_data[cpu].tc_id); |
| 1443 | halt_state_save[cpu] = read_tc_c0_tchalt(); |
| 1444 | write_tc_c0_tchalt(TCHALT_H); |
| 1445 | } |
| 1446 | } |
| 1447 | mips_ihb(); |
| 1448 | } |
| 1449 | |
| 1450 | /* It would be cheating to change the cpu_online states during a flush! */ |
| 1451 | |
| 1452 | void smtc_cflush_release(void) |
| 1453 | { |
| 1454 | int cpu; |
| 1455 | |
| 1456 | /* |
| 1457 | * Start with a hazard barrier to ensure |
| 1458 | * that all CACHE ops have played through. |
| 1459 | */ |
| 1460 | mips_ihb(); |
| 1461 | |
| 1462 | for_each_online_cpu(cpu) { |
| 1463 | if (cpu != smp_processor_id()) { |
| 1464 | settc(cpu_data[cpu].tc_id); |
| 1465 | write_tc_c0_tchalt(halt_state_save[cpu]); |
| 1466 | } |
| 1467 | } |
| 1468 | mips_ihb(); |
| 1469 | } |