powerpc: Move default security feature flags
[linux-2.6-block.git] / arch / powerpc / platforms / pseries / setup.c
CommitLineData
1da177e4 1/*
033ef338 2 * 64-bit pSeries and RS/6000 setup code.
1da177e4
LT
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15/*
16 * bootup setup stuff..
17 */
18
62d60e9f 19#include <linux/cpu.h>
1da177e4
LT
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
1da177e4 26#include <linux/user.h>
1da177e4
LT
27#include <linux/tty.h>
28#include <linux/major.h>
29#include <linux/interrupt.h>
30#include <linux/reboot.h>
31#include <linux/init.h>
32#include <linux/ioport.h>
33#include <linux/console.h>
34#include <linux/pci.h>
cebb2b15 35#include <linux/utsname.h>
1da177e4 36#include <linux/adb.h>
4b16f8e2 37#include <linux/export.h>
1da177e4
LT
38#include <linux/delay.h>
39#include <linux/irq.h>
40#include <linux/seq_file.h>
41#include <linux/root_dev.h>
1cf3d8b3 42#include <linux/of.h>
705a7b47 43#include <linux/of_pci.h>
1da177e4
LT
44
45#include <asm/mmu.h>
46#include <asm/processor.h>
47#include <asm/io.h>
48#include <asm/pgtable.h>
49#include <asm/prom.h>
50#include <asm/rtas.h>
51#include <asm/pci-bridge.h>
52#include <asm/iommu.h>
53#include <asm/dma.h>
54#include <asm/machdep.h>
55#include <asm/irq.h>
56#include <asm/time.h>
57#include <asm/nvram.h>
180a3362 58#include <asm/pmc.h>
0b05ac6e 59#include <asm/xics.h>
eac1e731 60#include <asm/xive.h>
d387899f 61#include <asm/ppc-pci.h>
69a80d3f
PM
62#include <asm/i8259.h>
63#include <asm/udbg.h>
2249ca9d 64#include <asm/smp.h>
577830b0 65#include <asm/firmware.h>
bed59275 66#include <asm/eeh.h>
bf99de36 67#include <asm/reg.h>
212bebb4 68#include <asm/plpar_wrappers.h>
d81d8258 69#include <asm/kexec.h>
38e9d36b 70#include <asm/isa-bridge.h>
f636c147 71#include <asm/security_features.h>
1da177e4 72
577830b0 73#include "pseries.h"
a1218720 74
81f14997
RJ
75int CMO_PrPSP = -1;
76int CMO_SecPSP = -1;
e589a440 77unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
d617a402 78EXPORT_SYMBOL(CMO_PageSize);
1da177e4 79
1da177e4
LT
80int fwnmi_active; /* TRUE if an FWNMI handler is present */
81
8446196a 82static void pSeries_show_cpuinfo(struct seq_file *m)
1da177e4
LT
83{
84 struct device_node *root;
85 const char *model = "";
86
87 root = of_find_node_by_path("/");
88 if (root)
e2eb6392 89 model = of_get_property(root, "model", NULL);
1da177e4
LT
90 seq_printf(m, "machine\t\t: CHRP %s\n", model);
91 of_node_put(root);
3a4c2601
AK
92 if (radix_enabled())
93 seq_printf(m, "MMU\t\t: Radix\n");
94 else
95 seq_printf(m, "MMU\t\t: Hash\n");
1da177e4
LT
96}
97
98/* Initialize firmware assisted non-maskable interrupts if
99 * the firmware supports this feature.
1da177e4
LT
100 */
101static void __init fwnmi_init(void)
102{
8c4f1f29
ME
103 unsigned long system_reset_addr, machine_check_addr;
104
1da177e4
LT
105 int ibm_nmi_register = rtas_token("ibm,nmi-register");
106 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
107 return;
8c4f1f29
ME
108
109 /* If the kernel's not linked at zero we point the firmware at low
110 * addresses anyway, and use a trampoline to get to the real code. */
111 system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
112 machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
113
114 if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
115 machine_check_addr))
1da177e4
LT
116 fwnmi_active = 1;
117}
118
bd0b9ac4 119static void pseries_8259_cascade(struct irq_desc *desc)
b9e5b4e6 120{
ec775d0e 121 struct irq_chip *chip = irq_desc_get_chip(desc);
35a84c2f 122 unsigned int cascade_irq = i8259_irq();
79f26c26 123
ef24ba70 124 if (cascade_irq)
7d12e780 125 generic_handle_irq(cascade_irq);
79f26c26
LB
126
127 chip->irq_eoi(&desc->irq_data);
b9e5b4e6
BH
128}
129
30d6ad25 130static void __init pseries_setup_i8259_cascade(void)
032ace7e
ME
131{
132 struct device_node *np, *old, *found = NULL;
30d6ad25 133 unsigned int cascade;
032ace7e
ME
134 const u32 *addrp;
135 unsigned long intack = 0;
30d6ad25 136 int naddr;
032ace7e 137
30d6ad25 138 for_each_node_by_type(np, "interrupt-controller") {
032ace7e
ME
139 if (of_device_is_compatible(np, "chrp,iic")) {
140 found = np;
141 break;
142 }
30d6ad25
ME
143 }
144
032ace7e 145 if (found == NULL) {
30d6ad25 146 printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
032ace7e
ME
147 return;
148 }
30d6ad25 149
032ace7e 150 cascade = irq_of_parse_and_map(found, 0);
ef24ba70 151 if (!cascade) {
30d6ad25 152 printk(KERN_ERR "pic: failed to map cascade interrupt");
032ace7e
ME
153 return;
154 }
30d6ad25 155 pr_debug("pic: cascade mapped to irq %d\n", cascade);
032ace7e
ME
156
157 for (old = of_node_get(found); old != NULL ; old = np) {
158 np = of_get_parent(old);
159 of_node_put(old);
160 if (np == NULL)
161 break;
162 if (strcmp(np->name, "pci") != 0)
163 continue;
164 addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
165 if (addrp == NULL)
166 continue;
167 naddr = of_n_addr_cells(np);
168 intack = addrp[naddr-1];
169 if (naddr > 1)
170 intack |= ((unsigned long)addrp[naddr-2]) << 32;
171 }
172 if (intack)
30d6ad25 173 printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
032ace7e
ME
174 i8259_init(found, intack);
175 of_node_put(found);
ec775d0e 176 irq_set_chained_handler(cascade, pseries_8259_cascade);
032ace7e
ME
177}
178
e7da5dac 179static void __init pseries_init_irq(void)
032ace7e 180{
eac1e731
CLG
181 /* Try using a XIVE if available, otherwise use a XICS */
182 if (!xive_spapr_init()) {
183 xics_init();
184 pseries_setup_i8259_cascade();
185 }
032ace7e
ME
186}
187
180a3362
ME
188static void pseries_lpar_enable_pmcs(void)
189{
190 unsigned long set, reset;
191
180a3362
ME
192 set = 1UL << 63;
193 reset = 0;
194 plpar_hcall_norets(H_PERFMON, set, reset);
180a3362
ME
195}
196
f5242e5a 197static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
2eb4afb6 198{
f5242e5a 199 struct of_reconfig_data *rd = data;
ea0f8acf
GS
200 struct device_node *parent, *np = rd->dn;
201 struct pci_dn *pdn;
2eb4afb6
KG
202 int err = NOTIFY_OK;
203
204 switch (action) {
1cf3d8b3 205 case OF_RECONFIG_ATTACH_NODE:
ea0f8acf
GS
206 parent = of_get_parent(np);
207 pdn = parent ? PCI_DN(parent) : NULL;
8cc7581c 208 if (pdn)
d8f66f41 209 pci_add_device_node_info(pdn->phb, np);
ea0f8acf
GS
210
211 of_node_put(parent);
2eb4afb6 212 break;
590c7567 213 case OF_RECONFIG_DETACH_NODE:
ea0f8acf
GS
214 pdn = PCI_DN(np);
215 if (pdn)
216 list_del(&pdn->list);
590c7567 217 break;
2eb4afb6
KG
218 default:
219 err = NOTIFY_DONE;
220 break;
221 }
222 return err;
223}
224
225static struct notifier_block pci_dn_reconfig_nb = {
226 .notifier_call = pci_dn_reconfig_notifier,
227};
228
af442a1b
NA
229struct kmem_cache *dtl_cache;
230
abf917cd 231#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cf9efce0
PM
232/*
233 * Allocate space for the dispatch trace log for all possible cpus
234 * and register the buffers with the hypervisor. This is used for
235 * computing time stolen by the hypervisor.
236 */
237static int alloc_dispatch_logs(void)
238{
239 int cpu, ret;
240 struct paca_struct *pp;
241 struct dtl_entry *dtl;
242
243 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
244 return 0;
245
af442a1b 246 if (!dtl_cache)
127493d5 247 return 0;
127493d5 248
cf9efce0 249 for_each_possible_cpu(cpu) {
d2e60075 250 pp = paca_ptrs[cpu];
127493d5 251 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
cf9efce0
PM
252 if (!dtl) {
253 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
254 cpu);
255 pr_warn("Stolen time statistics will be unreliable\n");
256 break;
257 }
258
259 pp->dtl_ridx = 0;
260 pp->dispatch_log = dtl;
261 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
262 pp->dtl_curr = dtl;
263 }
264
265 /* Register the DTL for the current (boot) cpu */
266 dtl = get_paca()->dispatch_log;
267 get_paca()->dtl_ridx = 0;
268 get_paca()->dtl_curr = dtl;
269 get_paca()->lppaca_ptr->dtl_idx = 0;
270
271 /* hypervisor reads buffer length from this field */
7ffcf8ec 272 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
cf9efce0
PM
273 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
274 if (ret)
711ef84e
AB
275 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
276 "with %d\n", smp_processor_id(),
277 hard_smp_processor_id(), ret);
cf9efce0
PM
278 get_paca()->lppaca_ptr->dtl_enable_mask = 2;
279
280 return 0;
281}
abf917cd 282#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
af442a1b
NA
283static inline int alloc_dispatch_logs(void)
284{
285 return 0;
286}
abf917cd 287#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
cf9efce0 288
af442a1b
NA
289static int alloc_dispatch_log_kmem_cache(void)
290{
291 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
292 DISPATCH_LOG_BYTES, 0, NULL);
293 if (!dtl_cache) {
294 pr_warn("Failed to create dispatch trace log buffer cache\n");
295 pr_warn("Stolen time statistics will be unreliable\n");
296 return 0;
297 }
298
299 return alloc_dispatch_logs();
300}
8e83e905 301machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
af442a1b 302
363edbe2 303static void pseries_lpar_idle(void)
e179816c 304{
d8c6ad31
NP
305 /*
306 * Default handler to go into low thread priority and possibly
027dfac6 307 * low power mode by ceding processor to hypervisor
e179816c 308 */
d8c6ad31
NP
309
310 /* Indicate to hypervisor that we are idle. */
311 get_lppaca()->idle = 1;
312
313 /*
314 * Yield the processor to the hypervisor. We return if
315 * an external interrupt occurs (which are driven prior
316 * to returning here) or if a prod occurs from another
317 * processor. When returning here, external interrupts
318 * are enabled.
319 */
320 cede_processor();
321
322 get_lppaca()->idle = 0;
e179816c
DD
323}
324
fc8effa4
IM
325/*
326 * Enable relocation on during exceptions. This has partition wide scope and
327 * may take a while to complete, if it takes longer than one second we will
328 * just give up rather than wasting any more time on this - if that turns out
329 * to ever be a problem in practice we can move this into a kernel thread to
330 * finish off the process later in boot.
331 */
d3cbff1b 332void pseries_enable_reloc_on_exc(void)
fc8effa4
IM
333{
334 long rc;
335 unsigned int delay, total_delay = 0;
336
337 while (1) {
338 rc = enable_reloc_on_exceptions();
d3cbff1b
BH
339 if (!H_IS_LONG_BUSY(rc)) {
340 if (rc == H_P2) {
341 pr_info("Relocation on exceptions not"
342 " supported\n");
343 } else if (rc != H_SUCCESS) {
344 pr_warn("Unable to enable relocation"
345 " on exceptions: %ld\n", rc);
346 }
347 break;
348 }
fc8effa4
IM
349
350 delay = get_longbusy_msecs(rc);
351 total_delay += delay;
352 if (total_delay > 1000) {
353 pr_warn("Warning: Giving up waiting to enable "
354 "relocation on exceptions (%u msec)!\n",
355 total_delay);
d3cbff1b 356 return;
fc8effa4
IM
357 }
358
359 mdelay(delay);
360 }
361}
d3cbff1b 362EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
fc8effa4 363
d3cbff1b 364void pseries_disable_reloc_on_exc(void)
cedddd81
IM
365{
366 long rc;
367
368 while (1) {
369 rc = disable_reloc_on_exceptions();
370 if (!H_IS_LONG_BUSY(rc))
d3cbff1b 371 break;
cedddd81
IM
372 mdelay(get_longbusy_msecs(rc));
373 }
d3cbff1b 374 if (rc != H_SUCCESS)
f2c2cbcc
JP
375 pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
376 rc);
cedddd81 377}
d3cbff1b 378EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
cedddd81 379
da665885 380#ifdef CONFIG_KEXEC_CORE
cedddd81
IM
381static void pSeries_machine_kexec(struct kimage *image)
382{
d3cbff1b
BH
383 if (firmware_has_feature(FW_FEATURE_SET_MODE))
384 pseries_disable_reloc_on_exc();
cedddd81
IM
385
386 default_machine_kexec(image);
387}
388#endif
389
e844b1ee 390#ifdef __LITTLE_ENDIAN__
d3cbff1b 391void pseries_big_endian_exceptions(void)
e844b1ee
AB
392{
393 long rc;
394
395 while (1) {
396 rc = enable_big_endian_exceptions();
397 if (!H_IS_LONG_BUSY(rc))
d3cbff1b 398 break;
e844b1ee
AB
399 mdelay(get_longbusy_msecs(rc));
400 }
d3cbff1b
BH
401
402 /*
403 * At this point it is unlikely panic() will get anything
404 * out to the user, since this is called very late in kexec
405 * but at least this will stop us from continuing on further
406 * and creating an even more difficult to debug situation.
407 *
408 * There is a known problem when kdump'ing, if cpus are offline
409 * the above call will fail. Rather than panicking again, keep
410 * going and hope the kdump kernel is also little endian, which
411 * it usually is.
412 */
413 if (rc && !kdump_in_progress())
414 panic("Could not enable big endian exceptions");
e844b1ee
AB
415}
416
d3cbff1b 417void pseries_little_endian_exceptions(void)
e844b1ee
AB
418{
419 long rc;
420
421 while (1) {
422 rc = enable_little_endian_exceptions();
423 if (!H_IS_LONG_BUSY(rc))
d3cbff1b 424 break;
e844b1ee
AB
425 mdelay(get_longbusy_msecs(rc));
426 }
d3cbff1b
BH
427 if (rc) {
428 ppc_md.progress("H_SET_MODE LE exception fail", 0);
429 panic("Could not enable little endian exceptions");
430 }
e844b1ee
AB
431}
432#endif
433
bdc728a8
DA
434static void __init find_and_init_phbs(void)
435{
436 struct device_node *node;
437 struct pci_controller *phb;
438 struct device_node *root = of_find_node_by_path("/");
439
440 for_each_child_of_node(root, node) {
441 if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
442 strcmp(node->type, "pciex") != 0))
443 continue;
444
445 phb = pcibios_alloc_controller(node);
446 if (!phb)
447 continue;
448 rtas_setup_phb(phb);
449 pci_process_bridge_OF_ranges(phb, node, 0);
450 isa_bridge_find_early(phb);
38ae9ec4 451 phb->controller_ops = pseries_pci_controller_ops;
bdc728a8
DA
452 }
453
454 of_node_put(root);
bdc728a8
DA
455
456 /*
457 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
458 * in chosen.
459 */
705a7b47 460 of_pci_check_probe_only();
bdc728a8
DA
461}
462
f636c147
ME
463static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
464{
465 if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
466 security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
467
468 if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
469 security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
470
471 if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
472 security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
473
474 if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
475 security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
476
477 if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
478 security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
479
480 if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
481 security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
482
483 /*
484 * The features below are enabled by default, so we instead look to see
485 * if firmware has *disabled* them, and clear them if so.
486 */
0f9bdfe3 487 if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
f636c147
ME
488 security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
489
0f9bdfe3 490 if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
f636c147
ME
491 security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
492
0f9bdfe3 493 if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
f636c147
ME
494 security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
495}
496
921bc6cf 497void pseries_setup_rfi_flush(void)
8989d568
MN
498{
499 struct h_cpu_char_result result;
500 enum l1d_flush_type types;
501 bool enable;
502 long rc;
503
8989d568 504 rc = plpar_get_cpu_characteristics(&result);
2e4a1616 505 if (rc == H_SUCCESS)
f636c147
ME
506 init_cpu_char_feature_flags(&result);
507
f636c147
ME
508 /*
509 * We're the guest so this doesn't apply to us, clear it to simplify
510 * handling of it elsewhere.
511 */
512 security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
513
2e4a1616
ME
514 types = L1D_FLUSH_FALLBACK;
515
516 if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
517 types |= L1D_FLUSH_MTTRIG;
518
519 if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
520 types |= L1D_FLUSH_ORI;
521
522 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
523 security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
524
8989d568
MN
525 setup_rfi_flush(types, enable);
526}
527
fc5f6221
BL
528#ifdef CONFIG_PCI_IOV
529enum rtas_iov_fw_value_map {
530 NUM_RES_PROPERTY = 0, /* Number of Resources */
531 LOW_INT = 1, /* Lowest 32 bits of Address */
532 START_OF_ENTRIES = 2, /* Always start of entry */
533 APERTURE_PROPERTY = 2, /* Start of entry+ to Aperture Size */
534 WDW_SIZE_PROPERTY = 4, /* Start of entry+ to Window Size */
535 NEXT_ENTRY = 7 /* Go to next entry on array */
536};
537
538enum get_iov_fw_value_index {
539 BAR_ADDRS = 1, /* Get Bar Address */
540 APERTURE_SIZE = 2, /* Get Aperture Size */
541 WDW_SIZE = 3 /* Get Window Size */
542};
543
544resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
545 enum get_iov_fw_value_index value)
546{
547 const int *indexes;
548 struct device_node *dn = pci_device_to_OF_node(dev);
549 int i, num_res, ret = 0;
550
551 indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
552 if (!indexes)
553 return 0;
554
555 /*
556 * First element in the array is the number of Bars
557 * returned. Search through the list to find the matching
558 * bar
559 */
560 num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
561 if (resno >= num_res)
562 return 0; /* or an errror */
563
564 i = START_OF_ENTRIES + NEXT_ENTRY * resno;
565 switch (value) {
566 case BAR_ADDRS:
567 ret = of_read_number(&indexes[i], 2);
568 break;
569 case APERTURE_SIZE:
570 ret = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
571 break;
572 case WDW_SIZE:
573 ret = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
574 break;
575 }
576
577 return ret;
578}
579
580void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
581{
582 struct resource *res;
583 resource_size_t base, size;
584 int i, r, num_res;
585
586 num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
587 num_res = min_t(int, num_res, PCI_SRIOV_NUM_BARS);
588 for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
589 i += NEXT_ENTRY, r++) {
590 res = &dev->resource[r + PCI_IOV_RESOURCES];
591 base = of_read_number(&indexes[i], 2);
592 size = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
593 res->flags = pci_parse_of_flags(of_read_number
594 (&indexes[i + LOW_INT], 1), 0);
595 res->flags |= (IORESOURCE_MEM_64 | IORESOURCE_PCI_FIXED);
596 res->name = pci_name(dev);
597 res->start = base;
598 res->end = base + size - 1;
599 }
600}
601
602void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
603{
604 struct resource *res, *root, *conflict;
605 resource_size_t base, size;
606 int i, r, num_res;
607
608 /*
609 * First element in the array is the number of Bars
610 * returned. Search through the list to find the matching
611 * bars assign them from firmware into resources structure.
612 */
613 num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
614 for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
615 i += NEXT_ENTRY, r++) {
616 res = &dev->resource[r + PCI_IOV_RESOURCES];
617 base = of_read_number(&indexes[i], 2);
618 size = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
619 res->name = pci_name(dev);
620 res->start = base;
621 res->end = base + size - 1;
622 root = &iomem_resource;
623 dev_dbg(&dev->dev,
624 "pSeries IOV BAR %d: trying firmware assignment %pR\n",
625 r + PCI_IOV_RESOURCES, res);
626 conflict = request_resource_conflict(root, res);
627 if (conflict) {
628 dev_info(&dev->dev,
629 "BAR %d: %pR conflicts with %s %pR\n",
630 r + PCI_IOV_RESOURCES, res,
631 conflict->name, conflict);
632 res->flags |= IORESOURCE_UNSET;
633 }
634 }
635}
636
637static void pseries_pci_fixup_resources(struct pci_dev *pdev)
638{
639 const int *indexes;
640 struct device_node *dn = pci_device_to_OF_node(pdev);
641
642 /*Firmware must support open sriov otherwise dont configure*/
643 indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
644 if (!indexes)
645 return;
646 /* Assign the addresses from device tree*/
647 of_pci_set_vf_bar_size(pdev, indexes);
648}
649
650static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
651{
652 const int *indexes;
653 struct device_node *dn = pci_device_to_OF_node(pdev);
654
655 if (!pdev->is_physfn || pdev->is_added)
656 return;
657 /*Firmware must support open sriov otherwise dont configure*/
658 indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
659 if (!indexes)
660 return;
661 /* Assign the addresses from device tree*/
662 of_pci_parse_iov_addrs(pdev, indexes);
663}
664
665static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
666 int resno)
667{
668 const __be32 *reg;
669 struct device_node *dn = pci_device_to_OF_node(pdev);
670
671 /*Firmware must support open sriov otherwise report regular alignment*/
672 reg = of_get_property(dn, "ibm,is-open-sriov-pf", NULL);
673 if (!reg)
674 return pci_iov_resource_size(pdev, resno);
675
676 if (!pdev->is_physfn)
677 return 0;
678 return pseries_get_iov_fw_value(pdev,
679 resno - PCI_IOV_RESOURCES,
680 APERTURE_SIZE);
681}
682#endif
683
0ebfff14
BH
684static void __init pSeries_setup_arch(void)
685{
b71d47c1 686 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
a934904d 687
0ebfff14 688 /* Discover PIC type and setup ppc_md accordingly */
86425bed 689 smp_init_pseries();
e7da5dac 690
0ebfff14 691
1da177e4
LT
692 /* openpic global configuration register (64-bit format). */
693 /* openpic Interrupt Source Unit pointer (64-bit format). */
694 /* python0 facility area (mmio) (64-bit format) REAL address. */
695
696 /* init to some ~sane value until calibrate_delay() runs */
697 loops_per_jiffy = 50000000;
698
1da177e4
LT
699 fwnmi_init();
700
8989d568
MN
701 pseries_setup_rfi_flush();
702
446957ba 703 /* By default, only probe PCI (can be overridden by rtas_pci) */
673c9756 704 pci_add_flags(PCI_PROBE_ONLY);
3c13be01 705
1da177e4
LT
706 /* Find and initialize PCI host bridges */
707 init_pci_config_tokens();
1da177e4 708 find_and_init_phbs();
1cf3d8b3 709 of_reconfig_notifier_register(&pci_dn_reconfig_nb);
1da177e4 710
1da177e4
LT
711 pSeries_nvram_init();
712
363edbe2 713 if (firmware_has_feature(FW_FEATURE_LPAR)) {
8d15a3e5 714 vpa_init(boot_cpuid);
363edbe2 715 ppc_md.power_save = pseries_lpar_idle;
180a3362 716 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
fc5f6221
BL
717#ifdef CONFIG_PCI_IOV
718 ppc_md.pcibios_fixup_resources =
719 pseries_pci_fixup_resources;
720 ppc_md.pcibios_fixup_sriov =
721 pseries_pci_fixup_iov_resources;
722 ppc_md.pcibios_iov_resource_alignment =
723 pseries_pci_iov_resource_alignment;
724#endif
363edbe2
VS
725 } else {
726 /* No special idle routine */
180a3362 727 ppc_md.enable_pmcs = power4_enable_pmcs;
363edbe2 728 }
fc8effa4 729
d82fb31a 730 ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
1da177e4
LT
731}
732
35adacd6
NP
733static void pseries_panic(char *str)
734{
735 panic_flush_kmsg_end();
736 rtas_os_term(str);
737}
738
1da177e4
LT
739static int __init pSeries_init_panel(void)
740{
741 /* Manually leave the kernel version on the panel. */
983d8a6d 742#ifdef __BIG_ENDIAN__
1da177e4 743 ppc_md.progress("Linux ppc64\n", 0);
983d8a6d
TB
744#else
745 ppc_md.progress("Linux ppc64le\n", 0);
746#endif
96b644bd 747 ppc_md.progress(init_utsname()->version, 0);
1da177e4
LT
748
749 return 0;
750}
f86d6b9b 751machine_arch_initcall(pseries, pSeries_init_panel);
1da177e4 752
4474ef05 753static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
cab0af98 754{
76032de8 755 return plpar_hcall_norets(H_SET_DABR, dabr);
cab0af98
ME
756}
757
4474ef05 758static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
76032de8 759{
4474ef05
MN
760 /* Have to set at least one bit in the DABRX according to PAPR */
761 if (dabrx == 0 && dabr == 0)
762 dabrx = DABRX_USER;
763 /* PAPR says we can only set kernel and user bits */
cd144573 764 dabrx &= DABRX_KERNEL | DABRX_USER;
4474ef05
MN
765
766 return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
76032de8 767}
1da177e4 768
bf99de36
MN
769static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
770{
771 /* PAPR says we can't set HYP */
772 dawrx &= ~DAWRX_HYP;
773
7c09c186 774 return plpar_set_watchpoint0(dawr, dawrx);
bf99de36
MN
775}
776
e46de429
RJ
777#define CMO_CHARACTERISTICS_TOKEN 44
778#define CMO_MAXLENGTH 1026
779
9ee820fa
BK
780void pSeries_coalesce_init(void)
781{
782 struct hvcall_mpp_x_data mpp_x_data;
783
784 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
785 powerpc_firmware_features |= FW_FEATURE_XCMO;
786 else
787 powerpc_firmware_features &= ~FW_FEATURE_XCMO;
788}
789
e46de429
RJ
790/**
791 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
792 * handle that here. (Stolen from parse_system_parameter_string)
793 */
e51df2c1 794static void pSeries_cmo_feature_init(void)
e46de429
RJ
795{
796 char *ptr, *key, *value, *end;
797 int call_status;
e589a440 798 int page_order = IOMMU_PAGE_SHIFT_4K;
e46de429
RJ
799
800 pr_debug(" -> fw_cmo_feature_init()\n");
801 spin_lock(&rtas_data_buf_lock);
802 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
803 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
804 NULL,
805 CMO_CHARACTERISTICS_TOKEN,
806 __pa(rtas_data_buf),
807 RTAS_DATA_BUF_SIZE);
808
809 if (call_status != 0) {
810 spin_unlock(&rtas_data_buf_lock);
811 pr_debug("CMO not available\n");
812 pr_debug(" <- fw_cmo_feature_init()\n");
813 return;
814 }
815
816 end = rtas_data_buf + CMO_MAXLENGTH - 2;
817 ptr = rtas_data_buf + 2; /* step over strlen value */
818 key = value = ptr;
819
820 while (*ptr && (ptr <= end)) {
821 /* Separate the key and value by replacing '=' with '\0' and
822 * point the value at the string after the '='
823 */
824 if (ptr[0] == '=') {
825 ptr[0] = '\0';
826 value = ptr + 1;
827 } else if (ptr[0] == '\0' || ptr[0] == ',') {
828 /* Terminate the string containing the key/value pair */
829 ptr[0] = '\0';
830
831 if (key == value) {
832 pr_debug("Malformed key/value pair\n");
833 /* Never found a '=', end processing */
834 break;
835 }
836
81f14997
RJ
837 if (0 == strcmp(key, "CMOPageSize"))
838 page_order = simple_strtol(value, NULL, 10);
839 else if (0 == strcmp(key, "PrPSP"))
840 CMO_PrPSP = simple_strtol(value, NULL, 10);
e46de429 841 else if (0 == strcmp(key, "SecPSP"))
81f14997 842 CMO_SecPSP = simple_strtol(value, NULL, 10);
e46de429
RJ
843 value = key = ptr + 1;
844 }
845 ptr++;
846 }
847
81f14997
RJ
848 /* Page size is returned as the power of 2 of the page size,
849 * convert to the page size in bytes before returning
850 */
851 CMO_PageSize = 1 << page_order;
852 pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);
853
854 if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
e46de429 855 pr_info("CMO enabled\n");
81f14997
RJ
856 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
857 CMO_SecPSP);
e46de429 858 powerpc_firmware_features |= FW_FEATURE_CMO;
9ee820fa 859 pSeries_coalesce_init();
e46de429 860 } else
81f14997
RJ
861 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
862 CMO_SecPSP);
e46de429
RJ
863 spin_unlock(&rtas_data_buf_lock);
864 pr_debug(" <- fw_cmo_feature_init()\n");
865}
866
1da177e4
LT
867/*
868 * Early initialization. Relocation is on but do not reference unbolted pages
869 */
f2d57694 870static void __init pseries_init(void)
1da177e4 871{
f2d57694 872 pr_debug(" -> pseries_init()\n");
1da177e4 873
4d2bb3f5 874#ifdef CONFIG_HVC_CONSOLE
57cfb814 875 if (firmware_has_feature(FW_FEATURE_LPAR))
4d2bb3f5
BH
876 hvc_vio_init_early();
877#endif
06c88766 878 if (firmware_has_feature(FW_FEATURE_XDABR))
76032de8 879 ppc_md.set_dabr = pseries_set_xdabr;
06c88766
MN
880 else if (firmware_has_feature(FW_FEATURE_DABR))
881 ppc_md.set_dabr = pseries_set_dabr;
1da177e4 882
bf99de36
MN
883 if (firmware_has_feature(FW_FEATURE_SET_MODE))
884 ppc_md.set_dawr = pseries_set_dawr;
885
e46de429 886 pSeries_cmo_feature_init();
1da177e4
LT
887 iommu_init_early_pSeries();
888
f2d57694 889 pr_debug(" <- pseries_init()\n");
1da177e4
LT
890}
891
9178ba29
AG
892/**
893 * pseries_power_off - tell firmware about how to power off the system.
894 *
895 * This function calls either the power-off rtas token in normal cases
896 * or the ibm,power-off-ups token (if present & requested) in case of
897 * a power failure. If power-off token is used, power on will only be
898 * possible with power button press. If ibm,power-off-ups token is used
899 * it will allow auto poweron after power is restored.
900 */
901static void pseries_power_off(void)
902{
903 int rc;
904 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
905
906 if (rtas_flash_term_hook)
907 rtas_flash_term_hook(SYS_POWER_OFF);
908
909 if (rtas_poweron_auto == 0 ||
910 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
911 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
912 printk(KERN_INFO "RTAS power-off returned %d\n", rc);
913 } else {
914 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
915 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
916 }
917 for (;;);
918}
919
e8222502
BH
920static int __init pSeries_probe(void)
921{
406b0b6a 922 const char *dtype = of_get_property(of_root, "device_type", NULL);
5773bbcd 923
e8222502
BH
924 if (dtype == NULL)
925 return 0;
926 if (strcmp(dtype, "chrp"))
1da177e4
LT
927 return 0;
928
133dda1e
AB
929 /* Cell blades firmware claims to be chrp while it's not. Until this
930 * is fixed, we need to avoid those here.
931 */
406b0b6a
BH
932 if (of_machine_is_compatible("IBM,CPBW-1.0") ||
933 of_machine_is_compatible("IBM,CBEA"))
133dda1e
AB
934 return 0;
935
9178ba29
AG
936 pm_power_off = pseries_power_off;
937
f7ebf352
ME
938 pr_debug("Machine is%s LPAR !\n",
939 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
57cfb814 940
f2d57694
BH
941 pseries_init();
942
1da177e4
LT
943 return 1;
944}
945
4267292b
PM
946static int pSeries_pci_probe_mode(struct pci_bus *bus)
947{
57cfb814 948 if (firmware_has_feature(FW_FEATURE_LPAR))
4267292b
PM
949 return PCI_PROBE_DEVTREE;
950 return PCI_PROBE_NORMAL;
951}
952
38ae9ec4
DA
953struct pci_controller_ops pseries_pci_controller_ops = {
954 .probe_mode = pSeries_pci_probe_mode,
955};
956
e8222502
BH
957define_machine(pseries) {
958 .name = "pSeries",
1da177e4
LT
959 .probe = pSeries_probe,
960 .setup_arch = pSeries_setup_arch,
e7da5dac 961 .init_IRQ = pseries_init_irq,
0dd194d0 962 .show_cpuinfo = pSeries_show_cpuinfo,
1da177e4
LT
963 .log_error = pSeries_log_error,
964 .pcibios_fixup = pSeries_final_fixup,
f4fcbbe9 965 .restart = rtas_restart,
f4fcbbe9 966 .halt = rtas_halt,
35adacd6 967 .panic = pseries_panic,
773bf9c4
AB
968 .get_boot_time = rtas_get_boot_time,
969 .get_rtc_time = rtas_get_rtc_time,
970 .set_rtc_time = rtas_set_rtc_time,
10f7e7c1 971 .calibrate_decr = generic_calibrate_decr,
6566c6f1 972 .progress = rtas_progress,
1da177e4
LT
973 .system_reset_exception = pSeries_system_reset_exception,
974 .machine_check_exception = pSeries_machine_check_exception,
da665885 975#ifdef CONFIG_KEXEC_CORE
cedddd81 976 .machine_kexec = pSeries_machine_kexec,
d739d2ca 977 .kexec_cpu_down = pseries_kexec_cpu_down,
cedddd81 978#endif
a5d86257
AB
979#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
980 .memory_block_size = pseries_memory_block_size,
981#endif
1da177e4 982};