Commit | Line | Data |
---|---|---|
e8c48efd YL |
1 | #ifndef __ASM_SUMMIT_APIC_H |
2 | #define __ASM_SUMMIT_APIC_H | |
1da177e4 | 3 | |
1da177e4 | 4 | #include <asm/smp.h> |
4d9f9431 | 5 | #include <linux/gfp.h> |
1da177e4 LT |
6 | |
7 | #define esr_disable (1) | |
8 | #define NO_BALANCE_IRQ (0) | |
9 | ||
1da177e4 LT |
10 | /* In clustered mode, the high nibble of APIC ID is a cluster number. |
11 | * The low nibble is a 4-bit bitmap. */ | |
12 | #define XAPIC_DEST_CPUS_SHIFT 4 | |
13 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | |
14 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | |
15 | ||
16 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | |
17 | ||
e7986739 | 18 | static inline const cpumask_t *target_cpus(void) |
1da177e4 LT |
19 | { |
20 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | |
21 | * dest_LowestPrio mode logical clustered apic interrupt routing | |
22 | * Just start on cpu 0. IRQ balancing will spread load | |
23 | */ | |
e7986739 | 24 | return &cpumask_of_cpu(0); |
e8c48efd | 25 | } |
1da177e4 | 26 | |
f8987a10 IM |
27 | #define IRQ_DELIVERY_MODE (dest_LowestPrio) |
28 | #define IRQ_DEST_MODE 1 /* logical delivery broadcast to all procs */ | |
1da177e4 LT |
29 | |
30 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | |
31 | { | |
32 | return 0; | |
e8c48efd | 33 | } |
1da177e4 LT |
34 | |
35 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | |
e8c48efd | 36 | static inline unsigned long check_apicid_present(int bit) |
1da177e4 LT |
37 | { |
38 | return 1; | |
39 | } | |
40 | ||
41 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | |
42 | ||
1da177e4 LT |
43 | extern u8 cpu_2_logical_apicid[]; |
44 | ||
45 | static inline void init_apic_ldr(void) | |
46 | { | |
47 | unsigned long val, id; | |
874c4fe3 | 48 | int count = 0; |
1da177e4 LT |
49 | u8 my_id = (u8)hard_smp_processor_id(); |
50 | u8 my_cluster = (u8)apicid_cluster(my_id); | |
874c4fe3 AK |
51 | #ifdef CONFIG_SMP |
52 | u8 lid; | |
53 | int i; | |
1da177e4 LT |
54 | |
55 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | |
9628937d | 56 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
1da177e4 LT |
57 | lid = cpu_2_logical_apicid[i]; |
58 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | |
59 | ++count; | |
60 | } | |
874c4fe3 | 61 | #endif |
1da177e4 LT |
62 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
63 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | |
64 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | |
65 | id = my_cluster | (1UL << count); | |
593f4a78 | 66 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
1da177e4 LT |
67 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
68 | val |= SET_APIC_LOGICAL_ID(id); | |
593f4a78 | 69 | apic_write(APIC_LDR, val); |
1da177e4 LT |
70 | } |
71 | ||
72 | static inline int multi_timer_check(int apic, int irq) | |
73 | { | |
74 | return 0; | |
75 | } | |
76 | ||
7ed248da | 77 | static inline int summit_apic_id_registered(void) |
1da177e4 LT |
78 | { |
79 | return 1; | |
80 | } | |
81 | ||
3c43f039 | 82 | static inline void setup_apic_routing(void) |
1da177e4 LT |
83 | { |
84 | printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", | |
85 | nr_ioapics); | |
86 | } | |
87 | ||
88 | static inline int apicid_to_node(int logical_apicid) | |
89 | { | |
38b5b036 | 90 | #ifdef CONFIG_SMP |
78b656b8 | 91 | return apicid_2_node[hard_smp_processor_id()]; |
38b5b036 AK |
92 | #else |
93 | return 0; | |
94 | #endif | |
1da177e4 LT |
95 | } |
96 | ||
97 | /* Mapping from cpu number to logical apicid */ | |
98 | static inline int cpu_to_logical_apicid(int cpu) | |
99 | { | |
874c4fe3 | 100 | #ifdef CONFIG_SMP |
9628937d MT |
101 | if (cpu >= nr_cpu_ids) |
102 | return BAD_APICID; | |
1da177e4 | 103 | return (int)cpu_2_logical_apicid[cpu]; |
874c4fe3 AK |
104 | #else |
105 | return logical_smp_processor_id(); | |
106 | #endif | |
1da177e4 LT |
107 | } |
108 | ||
109 | static inline int cpu_present_to_apicid(int mps_cpu) | |
110 | { | |
9628937d | 111 | if (mps_cpu < nr_cpu_ids) |
cbe879fc | 112 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
1da177e4 LT |
113 | else |
114 | return BAD_APICID; | |
115 | } | |
116 | ||
117 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | |
118 | { | |
119 | /* For clustered we don't have a good way to do this yet - hack */ | |
120 | return physids_promote(0x0F); | |
121 | } | |
122 | ||
123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | |
124 | { | |
e8c48efd | 125 | return physid_mask_of_physid(0); |
1da177e4 LT |
126 | } |
127 | ||
1da177e4 LT |
128 | static inline void setup_portio_remap(void) |
129 | { | |
130 | } | |
131 | ||
132 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |
133 | { | |
134 | return 1; | |
135 | } | |
136 | ||
137 | static inline void enable_apic_mode(void) | |
138 | { | |
139 | } | |
140 | ||
e7986739 | 141 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
1da177e4 LT |
142 | { |
143 | int num_bits_set; | |
144 | int cpus_found = 0; | |
145 | int cpu; | |
e8c48efd | 146 | int apicid; |
1da177e4 | 147 | |
e7986739 | 148 | num_bits_set = cpus_weight(*cpumask); |
1da177e4 | 149 | /* Return id to all */ |
9628937d | 150 | if (num_bits_set >= nr_cpu_ids) |
1da177e4 | 151 | return (int) 0xFF; |
e8c48efd YL |
152 | /* |
153 | * The cpus in the mask must all be on the apic cluster. If are not | |
154 | * on the same apicid cluster return default value of TARGET_CPUS. | |
1da177e4 | 155 | */ |
e7986739 | 156 | cpu = first_cpu(*cpumask); |
1da177e4 LT |
157 | apicid = cpu_to_logical_apicid(cpu); |
158 | while (cpus_found < num_bits_set) { | |
e7986739 | 159 | if (cpu_isset(cpu, *cpumask)) { |
1da177e4 | 160 | int new_apicid = cpu_to_logical_apicid(cpu); |
e8c48efd | 161 | if (apicid_cluster(apicid) != |
1da177e4 | 162 | apicid_cluster(new_apicid)){ |
d5c003b4 | 163 | printk ("%s: Not a valid mask!\n", __func__); |
1da177e4 LT |
164 | return 0xFF; |
165 | } | |
166 | apicid = apicid | new_apicid; | |
167 | cpus_found++; | |
168 | } | |
169 | cpu++; | |
170 | } | |
171 | return apicid; | |
172 | } | |
173 | ||
a775a38b | 174 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, |
6eeb7c5a | 175 | const struct cpumask *andmask) |
95d313cf | 176 | { |
9628937d | 177 | int apicid = cpu_to_logical_apicid(0); |
a775a38b MT |
178 | cpumask_var_t cpumask; |
179 | ||
180 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | |
9628937d | 181 | return apicid; |
a775a38b MT |
182 | |
183 | cpumask_and(cpumask, inmask, andmask); | |
184 | cpumask_and(cpumask, cpumask, cpu_online_mask); | |
9628937d | 185 | apicid = cpu_mask_to_apicid(cpumask); |
95d313cf | 186 | |
a775a38b | 187 | free_cpumask_var(cpumask); |
95d313cf MT |
188 | return apicid; |
189 | } | |
190 | ||
1da177e4 LT |
191 | /* cpuid returns the value latched in the HW at reset, not the APIC ID |
192 | * register's value. For any box whose BIOS changes APIC IDs, like | |
193 | * clustered APIC systems, we must use hard_smp_processor_id. | |
194 | * | |
195 | * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. | |
196 | */ | |
197 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |
198 | { | |
199 | return hard_smp_processor_id() >> index_msb; | |
200 | } | |
201 | ||
e8c48efd | 202 | #endif /* __ASM_SUMMIT_APIC_H */ |