Commit | Line | Data |
---|---|---|
e8c48efd YL |
1 | #ifndef __ASM_SUMMIT_APIC_H |
2 | #define __ASM_SUMMIT_APIC_H | |
1da177e4 | 3 | |
1da177e4 | 4 | #include <asm/smp.h> |
4d9f9431 | 5 | #include <linux/gfp.h> |
1da177e4 LT |
6 | |
7 | #define esr_disable (1) | |
8 | #define NO_BALANCE_IRQ (0) | |
9 | ||
1da177e4 LT |
10 | /* In clustered mode, the high nibble of APIC ID is a cluster number. |
11 | * The low nibble is a 4-bit bitmap. */ | |
12 | #define XAPIC_DEST_CPUS_SHIFT 4 | |
13 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | |
14 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | |
15 | ||
16 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | |
17 | ||
0a9cc20b | 18 | static inline const cpumask_t *summit_target_cpus(void) |
1da177e4 LT |
19 | { |
20 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | |
21 | * dest_LowestPrio mode logical clustered apic interrupt routing | |
22 | * Just start on cpu 0. IRQ balancing will spread load | |
23 | */ | |
e7986739 | 24 | return &cpumask_of_cpu(0); |
e8c48efd | 25 | } |
1da177e4 | 26 | |
1da177e4 LT |
27 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
28 | { | |
29 | return 0; | |
e8c48efd | 30 | } |
1da177e4 LT |
31 | |
32 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | |
e8c48efd | 33 | static inline unsigned long check_apicid_present(int bit) |
1da177e4 LT |
34 | { |
35 | return 1; | |
36 | } | |
37 | ||
38 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | |
39 | ||
1da177e4 LT |
40 | extern u8 cpu_2_logical_apicid[]; |
41 | ||
42 | static inline void init_apic_ldr(void) | |
43 | { | |
44 | unsigned long val, id; | |
874c4fe3 | 45 | int count = 0; |
1da177e4 LT |
46 | u8 my_id = (u8)hard_smp_processor_id(); |
47 | u8 my_cluster = (u8)apicid_cluster(my_id); | |
874c4fe3 AK |
48 | #ifdef CONFIG_SMP |
49 | u8 lid; | |
50 | int i; | |
1da177e4 LT |
51 | |
52 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | |
9628937d | 53 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
1da177e4 LT |
54 | lid = cpu_2_logical_apicid[i]; |
55 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | |
56 | ++count; | |
57 | } | |
874c4fe3 | 58 | #endif |
1da177e4 LT |
59 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
60 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | |
61 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | |
62 | id = my_cluster | (1UL << count); | |
593f4a78 | 63 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
1da177e4 LT |
64 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
65 | val |= SET_APIC_LOGICAL_ID(id); | |
593f4a78 | 66 | apic_write(APIC_LDR, val); |
1da177e4 LT |
67 | } |
68 | ||
69 | static inline int multi_timer_check(int apic, int irq) | |
70 | { | |
71 | return 0; | |
72 | } | |
73 | ||
7ed248da | 74 | static inline int summit_apic_id_registered(void) |
1da177e4 LT |
75 | { |
76 | return 1; | |
77 | } | |
78 | ||
3c43f039 | 79 | static inline void setup_apic_routing(void) |
1da177e4 LT |
80 | { |
81 | printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", | |
82 | nr_ioapics); | |
83 | } | |
84 | ||
85 | static inline int apicid_to_node(int logical_apicid) | |
86 | { | |
38b5b036 | 87 | #ifdef CONFIG_SMP |
78b656b8 | 88 | return apicid_2_node[hard_smp_processor_id()]; |
38b5b036 AK |
89 | #else |
90 | return 0; | |
91 | #endif | |
1da177e4 LT |
92 | } |
93 | ||
94 | /* Mapping from cpu number to logical apicid */ | |
95 | static inline int cpu_to_logical_apicid(int cpu) | |
96 | { | |
874c4fe3 | 97 | #ifdef CONFIG_SMP |
9628937d MT |
98 | if (cpu >= nr_cpu_ids) |
99 | return BAD_APICID; | |
1da177e4 | 100 | return (int)cpu_2_logical_apicid[cpu]; |
874c4fe3 AK |
101 | #else |
102 | return logical_smp_processor_id(); | |
103 | #endif | |
1da177e4 LT |
104 | } |
105 | ||
106 | static inline int cpu_present_to_apicid(int mps_cpu) | |
107 | { | |
9628937d | 108 | if (mps_cpu < nr_cpu_ids) |
cbe879fc | 109 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
1da177e4 LT |
110 | else |
111 | return BAD_APICID; | |
112 | } | |
113 | ||
114 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | |
115 | { | |
116 | /* For clustered we don't have a good way to do this yet - hack */ | |
117 | return physids_promote(0x0F); | |
118 | } | |
119 | ||
120 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | |
121 | { | |
e8c48efd | 122 | return physid_mask_of_physid(0); |
1da177e4 LT |
123 | } |
124 | ||
1da177e4 LT |
125 | static inline void setup_portio_remap(void) |
126 | { | |
127 | } | |
128 | ||
129 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |
130 | { | |
131 | return 1; | |
132 | } | |
133 | ||
134 | static inline void enable_apic_mode(void) | |
135 | { | |
136 | } | |
137 | ||
e7986739 | 138 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
1da177e4 LT |
139 | { |
140 | int num_bits_set; | |
141 | int cpus_found = 0; | |
142 | int cpu; | |
e8c48efd | 143 | int apicid; |
1da177e4 | 144 | |
e7986739 | 145 | num_bits_set = cpus_weight(*cpumask); |
1da177e4 | 146 | /* Return id to all */ |
9628937d | 147 | if (num_bits_set >= nr_cpu_ids) |
1da177e4 | 148 | return (int) 0xFF; |
e8c48efd YL |
149 | /* |
150 | * The cpus in the mask must all be on the apic cluster. If are not | |
151 | * on the same apicid cluster return default value of TARGET_CPUS. | |
1da177e4 | 152 | */ |
e7986739 | 153 | cpu = first_cpu(*cpumask); |
1da177e4 LT |
154 | apicid = cpu_to_logical_apicid(cpu); |
155 | while (cpus_found < num_bits_set) { | |
e7986739 | 156 | if (cpu_isset(cpu, *cpumask)) { |
1da177e4 | 157 | int new_apicid = cpu_to_logical_apicid(cpu); |
e8c48efd | 158 | if (apicid_cluster(apicid) != |
1da177e4 | 159 | apicid_cluster(new_apicid)){ |
d5c003b4 | 160 | printk ("%s: Not a valid mask!\n", __func__); |
1da177e4 LT |
161 | return 0xFF; |
162 | } | |
163 | apicid = apicid | new_apicid; | |
164 | cpus_found++; | |
165 | } | |
166 | cpu++; | |
167 | } | |
168 | return apicid; | |
169 | } | |
170 | ||
a775a38b | 171 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, |
6eeb7c5a | 172 | const struct cpumask *andmask) |
95d313cf | 173 | { |
9628937d | 174 | int apicid = cpu_to_logical_apicid(0); |
a775a38b MT |
175 | cpumask_var_t cpumask; |
176 | ||
177 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | |
9628937d | 178 | return apicid; |
a775a38b MT |
179 | |
180 | cpumask_and(cpumask, inmask, andmask); | |
181 | cpumask_and(cpumask, cpumask, cpu_online_mask); | |
9628937d | 182 | apicid = cpu_mask_to_apicid(cpumask); |
95d313cf | 183 | |
a775a38b | 184 | free_cpumask_var(cpumask); |
95d313cf MT |
185 | return apicid; |
186 | } | |
187 | ||
1da177e4 LT |
188 | /* cpuid returns the value latched in the HW at reset, not the APIC ID |
189 | * register's value. For any box whose BIOS changes APIC IDs, like | |
190 | * clustered APIC systems, we must use hard_smp_processor_id. | |
191 | * | |
192 | * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. | |
193 | */ | |
194 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |
195 | { | |
196 | return hard_smp_processor_id() >> index_msb; | |
197 | } | |
198 | ||
e8c48efd | 199 | #endif /* __ASM_SUMMIT_APIC_H */ |