Commit | Line | Data |
---|---|---|
e8c48efd YL |
1 | #ifndef __ASM_SUMMIT_APIC_H |
2 | #define __ASM_SUMMIT_APIC_H | |
1da177e4 | 3 | |
1da177e4 | 4 | #include <asm/smp.h> |
4d9f9431 | 5 | #include <linux/gfp.h> |
1da177e4 | 6 | |
1da177e4 LT |
7 | /* In clustered mode, the high nibble of APIC ID is a cluster number. |
8 | * The low nibble is a 4-bit bitmap. */ | |
9 | #define XAPIC_DEST_CPUS_SHIFT 4 | |
10 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | |
11 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | |
12 | ||
13 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | |
14 | ||
0a9cc20b | 15 | static inline const cpumask_t *summit_target_cpus(void) |
1da177e4 LT |
16 | { |
17 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | |
18 | * dest_LowestPrio mode logical clustered apic interrupt routing | |
19 | * Just start on cpu 0. IRQ balancing will spread load | |
20 | */ | |
e7986739 | 21 | return &cpumask_of_cpu(0); |
e8c48efd | 22 | } |
1da177e4 | 23 | |
d1d7cae8 IM |
24 | static inline unsigned long |
25 | summit_check_apicid_used(physid_mask_t bitmap, int apicid) | |
1da177e4 LT |
26 | { |
27 | return 0; | |
e8c48efd | 28 | } |
1da177e4 LT |
29 | |
30 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | |
d1d7cae8 | 31 | static inline unsigned long summit_check_apicid_present(int bit) |
1da177e4 LT |
32 | { |
33 | return 1; | |
34 | } | |
35 | ||
36 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | |
37 | ||
1da177e4 LT |
38 | extern u8 cpu_2_logical_apicid[]; |
39 | ||
a5c43296 | 40 | static inline void summit_init_apic_ldr(void) |
1da177e4 LT |
41 | { |
42 | unsigned long val, id; | |
874c4fe3 | 43 | int count = 0; |
1da177e4 LT |
44 | u8 my_id = (u8)hard_smp_processor_id(); |
45 | u8 my_cluster = (u8)apicid_cluster(my_id); | |
874c4fe3 AK |
46 | #ifdef CONFIG_SMP |
47 | u8 lid; | |
48 | int i; | |
1da177e4 LT |
49 | |
50 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | |
9628937d | 51 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
1da177e4 LT |
52 | lid = cpu_2_logical_apicid[i]; |
53 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | |
54 | ++count; | |
55 | } | |
874c4fe3 | 56 | #endif |
1da177e4 LT |
57 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
58 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | |
59 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | |
60 | id = my_cluster | (1UL << count); | |
593f4a78 | 61 | apic_write(APIC_DFR, APIC_DFR_VALUE); |
1da177e4 LT |
62 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
63 | val |= SET_APIC_LOGICAL_ID(id); | |
593f4a78 | 64 | apic_write(APIC_LDR, val); |
1da177e4 LT |
65 | } |
66 | ||
7ed248da | 67 | static inline int summit_apic_id_registered(void) |
1da177e4 LT |
68 | { |
69 | return 1; | |
70 | } | |
71 | ||
72ce0165 | 72 | static inline void summit_setup_apic_routing(void) |
1da177e4 LT |
73 | { |
74 | printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", | |
75 | nr_ioapics); | |
76 | } | |
77 | ||
3f57a318 | 78 | static inline int summit_apicid_to_node(int logical_apicid) |
1da177e4 | 79 | { |
38b5b036 | 80 | #ifdef CONFIG_SMP |
78b656b8 | 81 | return apicid_2_node[hard_smp_processor_id()]; |
38b5b036 AK |
82 | #else |
83 | return 0; | |
84 | #endif | |
1da177e4 LT |
85 | } |
86 | ||
87 | /* Mapping from cpu number to logical apicid */ | |
5257c511 | 88 | static inline int summit_cpu_to_logical_apicid(int cpu) |
1da177e4 | 89 | { |
874c4fe3 | 90 | #ifdef CONFIG_SMP |
9628937d MT |
91 | if (cpu >= nr_cpu_ids) |
92 | return BAD_APICID; | |
1da177e4 | 93 | return (int)cpu_2_logical_apicid[cpu]; |
874c4fe3 AK |
94 | #else |
95 | return logical_smp_processor_id(); | |
96 | #endif | |
1da177e4 LT |
97 | } |
98 | ||
a21769a4 | 99 | static inline int summit_cpu_present_to_apicid(int mps_cpu) |
1da177e4 | 100 | { |
9628937d | 101 | if (mps_cpu < nr_cpu_ids) |
cbe879fc | 102 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
1da177e4 LT |
103 | else |
104 | return BAD_APICID; | |
105 | } | |
106 | ||
d190cb87 IM |
107 | static inline physid_mask_t |
108 | summit_ioapic_phys_id_map(physid_mask_t phys_id_map) | |
1da177e4 LT |
109 | { |
110 | /* For clustered we don't have a good way to do this yet - hack */ | |
111 | return physids_promote(0x0F); | |
112 | } | |
113 | ||
114 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | |
115 | { | |
e8c48efd | 116 | return physid_mask_of_physid(0); |
1da177e4 LT |
117 | } |
118 | ||
1da177e4 LT |
119 | static inline void setup_portio_remap(void) |
120 | { | |
121 | } | |
122 | ||
123 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |
124 | { | |
125 | return 1; | |
126 | } | |
127 | ||
128 | static inline void enable_apic_mode(void) | |
129 | { | |
130 | } | |
131 | ||
e7986739 | 132 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
1da177e4 LT |
133 | { |
134 | int num_bits_set; | |
135 | int cpus_found = 0; | |
136 | int cpu; | |
e8c48efd | 137 | int apicid; |
1da177e4 | 138 | |
e7986739 | 139 | num_bits_set = cpus_weight(*cpumask); |
1da177e4 | 140 | /* Return id to all */ |
9628937d | 141 | if (num_bits_set >= nr_cpu_ids) |
1da177e4 | 142 | return (int) 0xFF; |
e8c48efd YL |
143 | /* |
144 | * The cpus in the mask must all be on the apic cluster. If are not | |
fe402e1f | 145 | * on the same apicid cluster return default value of target_cpus(): |
1da177e4 | 146 | */ |
e7986739 | 147 | cpu = first_cpu(*cpumask); |
5257c511 | 148 | apicid = summit_cpu_to_logical_apicid(cpu); |
1da177e4 | 149 | while (cpus_found < num_bits_set) { |
e7986739 | 150 | if (cpu_isset(cpu, *cpumask)) { |
5257c511 | 151 | int new_apicid = summit_cpu_to_logical_apicid(cpu); |
e8c48efd | 152 | if (apicid_cluster(apicid) != |
1da177e4 | 153 | apicid_cluster(new_apicid)){ |
d5c003b4 | 154 | printk ("%s: Not a valid mask!\n", __func__); |
1da177e4 LT |
155 | return 0xFF; |
156 | } | |
157 | apicid = apicid | new_apicid; | |
158 | cpus_found++; | |
159 | } | |
160 | cpu++; | |
161 | } | |
162 | return apicid; | |
163 | } | |
164 | ||
a775a38b | 165 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, |
6eeb7c5a | 166 | const struct cpumask *andmask) |
95d313cf | 167 | { |
5257c511 | 168 | int apicid = summit_cpu_to_logical_apicid(0); |
a775a38b MT |
169 | cpumask_var_t cpumask; |
170 | ||
171 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | |
9628937d | 172 | return apicid; |
a775a38b MT |
173 | |
174 | cpumask_and(cpumask, inmask, andmask); | |
175 | cpumask_and(cpumask, cpumask, cpu_online_mask); | |
9628937d | 176 | apicid = cpu_mask_to_apicid(cpumask); |
95d313cf | 177 | |
a775a38b | 178 | free_cpumask_var(cpumask); |
95d313cf MT |
179 | return apicid; |
180 | } | |
181 | ||
1da177e4 LT |
182 | /* cpuid returns the value latched in the HW at reset, not the APIC ID |
183 | * register's value. For any box whose BIOS changes APIC IDs, like | |
184 | * clustered APIC systems, we must use hard_smp_processor_id. | |
185 | * | |
186 | * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. | |
187 | */ | |
188 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |
189 | { | |
190 | return hard_smp_processor_id() >> index_msb; | |
191 | } | |
192 | ||
e8c48efd | 193 | #endif /* __ASM_SUMMIT_APIC_H */ |