Commit | Line | Data |
---|---|---|
243e2511 BH |
1 | /* |
2 | * Copyright 2016,2017 IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | #ifndef _ASM_POWERPC_XIVE_H | |
10 | #define _ASM_POWERPC_XIVE_H | |
11 | ||
12 | #define XIVE_INVALID_VP 0xffffffff | |
13 | ||
14 | #ifdef CONFIG_PPC_XIVE | |
15 | ||
16 | /* | |
17 | * Thread Interrupt Management Area (TIMA) | |
18 | * | |
19 | * This is a global MMIO region divided in 4 pages of varying access | |
20 | * permissions, providing access to per-cpu interrupt management | |
21 | * functions. It always identifies the CPU doing the access based | |
22 | * on the PowerBus initiator ID, thus we always access via the | |
23 | * same offset regardless of where the code is executing | |
24 | */ | |
25 | extern void __iomem *xive_tima; | |
26 | ||
27 | /* | |
28 | * Offset in the TM area of our current execution level (provided by | |
29 | * the backend) | |
30 | */ | |
31 | extern u32 xive_tima_offset; | |
32 | ||
33 | /* | |
34 | * Per-irq data (irq_get_handler_data for normal IRQs), IPIs | |
35 | * have it stored in the xive_cpu structure. We also cache | |
36 | * for normal interrupts the current target CPU. | |
37 | * | |
38 | * This structure is setup by the backend for each interrupt. | |
39 | */ | |
40 | struct xive_irq_data { | |
41 | u64 flags; | |
42 | u64 eoi_page; | |
43 | void __iomem *eoi_mmio; | |
44 | u64 trig_page; | |
45 | void __iomem *trig_mmio; | |
46 | u32 esb_shift; | |
47 | int src_chip; | |
c58a14a9 | 48 | u32 hw_irq; |
243e2511 BH |
49 | |
50 | /* Setup/used by frontend */ | |
51 | int target; | |
52 | bool saved_p; | |
53 | }; | |
54 | #define XIVE_IRQ_FLAG_STORE_EOI 0x01 | |
55 | #define XIVE_IRQ_FLAG_LSI 0x02 | |
56 | #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 | |
57 | #define XIVE_IRQ_FLAG_MASK_FW 0x08 | |
58 | #define XIVE_IRQ_FLAG_EOI_FW 0x10 | |
bed81ee1 | 59 | #define XIVE_IRQ_FLAG_H_INT_ESB 0x20 |
243e2511 | 60 | |
7f1c410d BH |
61 | /* Special flag set by KVM for excalation interrupts */ |
62 | #define XIVE_IRQ_NO_EOI 0x80 | |
63 | ||
243e2511 BH |
64 | #define XIVE_INVALID_CHIP_ID -1 |
65 | ||
66 | /* A queue tracking structure in a CPU */ | |
67 | struct xive_q { | |
68 | __be32 *qpage; | |
69 | u32 msk; | |
70 | u32 idx; | |
71 | u32 toggle; | |
72 | u64 eoi_phys; | |
73 | u32 esc_irq; | |
74 | atomic_t count; | |
75 | atomic_t pending_count; | |
76 | }; | |
77 | ||
243e2511 BH |
78 | /* Global enable flags for the XIVE support */ |
79 | extern bool __xive_enabled; | |
80 | ||
81 | static inline bool xive_enabled(void) { return __xive_enabled; } | |
82 | ||
eac1e731 | 83 | extern bool xive_spapr_init(void); |
243e2511 BH |
84 | extern bool xive_native_init(void); |
85 | extern void xive_smp_probe(void); | |
86 | extern int xive_smp_prepare_cpu(unsigned int cpu); | |
87 | extern void xive_smp_setup_cpu(void); | |
88 | extern void xive_smp_disable_cpu(void); | |
eac1e731 | 89 | extern void xive_teardown_cpu(void); |
243e2511 BH |
90 | extern void xive_shutdown(void); |
91 | extern void xive_flush_interrupt(void); | |
92 | ||
93 | /* xmon hook */ | |
94 | extern void xmon_xive_do_dump(int cpu); | |
95 | ||
96 | /* APIs used by KVM */ | |
97 | extern u32 xive_native_default_eq_shift(void); | |
98 | extern u32 xive_native_alloc_vp_block(u32 max_vcpus); | |
99 | extern void xive_native_free_vp_block(u32 vp_base); | |
100 | extern int xive_native_populate_irq_data(u32 hw_irq, | |
101 | struct xive_irq_data *data); | |
102 | extern void xive_cleanup_irq_data(struct xive_irq_data *xd); | |
103 | extern u32 xive_native_alloc_irq(void); | |
104 | extern void xive_native_free_irq(u32 irq); | |
105 | extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); | |
106 | ||
107 | extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, | |
108 | __be32 *qpage, u32 order, bool can_escalate); | |
109 | extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); | |
110 | ||
5af50993 | 111 | extern void xive_native_sync_source(u32 hw_irq); |
243e2511 | 112 | extern bool is_xive_irq(struct irq_chip *chip); |
bf4159da | 113 | extern int xive_native_enable_vp(u32 vp_id, bool single_escalation); |
5af50993 BH |
114 | extern int xive_native_disable_vp(u32 vp_id); |
115 | extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); | |
bf4159da | 116 | extern bool xive_native_has_single_escalation(void); |
243e2511 BH |
117 | |
118 | #else | |
119 | ||
120 | static inline bool xive_enabled(void) { return false; } | |
121 | ||
eac1e731 | 122 | static inline bool xive_spapr_init(void) { return false; } |
243e2511 BH |
123 | static inline bool xive_native_init(void) { return false; } |
124 | static inline void xive_smp_probe(void) { } | |
38833faa | 125 | static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; } |
243e2511 BH |
126 | static inline void xive_smp_setup_cpu(void) { } |
127 | static inline void xive_smp_disable_cpu(void) { } | |
128 | static inline void xive_kexec_teardown_cpu(int secondary) { } | |
129 | static inline void xive_shutdown(void) { } | |
130 | static inline void xive_flush_interrupt(void) { } | |
131 | ||
132 | static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; } | |
133 | static inline void xive_native_free_vp_block(u32 vp_base) { } | |
134 | ||
135 | #endif | |
136 | ||
137 | #endif /* _ASM_POWERPC_XIVE_H */ |