x86/mpparse: Remove the physid_t bitmap wrapper
authorThomas Gleixner <tglx@linutronix.de>
Tue, 13 Feb 2024 21:05:17 +0000 (22:05 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 15 Feb 2024 21:07:41 +0000 (22:07 +0100)
physid_t is a wrapper around bitmap. Just remove the onion layer and use
bitmap functionality directly.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mhklinux@outlook.com>
Tested-by: Sohil Mehta <sohil.mehta@intel.com>
Link: https://lore.kernel.org/r/20240212154639.994904510@linutronix.de
arch/x86/include/asm/mpspec.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_common.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/local.h
arch/x86/kernel/smpboot.c

index b423d11e002d13a32a78e39ef89b744737e1ca0e..1b79d0ee95dfabbcfbdc9d661b84c2ec4014cddf 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_MPSPEC_H
 #define _ASM_X86_MPSPEC_H
 
+#include <linux/types.h>
 
 #include <asm/mpspec_def.h>
 #include <asm/x86_init.h>
@@ -62,32 +63,17 @@ static inline void e820__memblock_alloc_reserved_mpc_new(void) { }
 
 int generic_processor_info(int apicid);
 
-#define PHYSID_ARRAY_SIZE      BITS_TO_LONGS(MAX_LOCAL_APIC)
+extern DECLARE_BITMAP(phys_cpu_present_map, MAX_LOCAL_APIC);
 
-struct physid_mask {
-       unsigned long mask[PHYSID_ARRAY_SIZE];
-};
-
-typedef struct physid_mask physid_mask_t;
-
-#define physid_set(physid, map)                        set_bit(physid, (map).mask)
-#define physid_isset(physid, map)              test_bit(physid, (map).mask)
-
-#define physids_clear(map)                                     \
-       bitmap_zero((map).mask, MAX_LOCAL_APIC)
-
-#define physids_empty(map)                                     \
-       bitmap_empty((map).mask, MAX_LOCAL_APIC)
-
-static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
+static inline void reset_phys_cpu_present_map(u32 apicid)
 {
-       physids_clear(*map);
-       physid_set(physid, *map);
+       bitmap_zero(phys_cpu_present_map, MAX_LOCAL_APIC);
+       set_bit(apicid, phys_cpu_present_map);
 }
 
-#define PHYSID_MASK_ALL                { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
-#define PHYSID_MASK_NONE       { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
-
-extern physid_mask_t phys_cpu_present_map;
+static inline void copy_phys_cpu_present_map(unsigned long *dst)
+{
+       bitmap_copy(dst, phys_cpu_present_map, MAX_LOCAL_APIC);
+}
 
 #endif /* _ASM_X86_MPSPEC_H */
index 814e15c8b8cd5e5a8936bcbe61e9af3f082e1bed..ce6d31bfc6e3f0f0a934b4706d21d8ae6921f135 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/mc146818rtc.h>
 #include <linux/acpi_pmtmr.h>
+#include <linux/bitmap.h>
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
 #include <linux/memblock.h>
@@ -77,10 +78,8 @@ EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
 
 u8 boot_cpu_apic_version __ro_after_init;
 
-/*
- * Bitmask of physically existing CPUs:
- */
-physid_mask_t phys_cpu_present_map;
+/* Bitmap of physically present CPUs. */
+DECLARE_BITMAP(phys_cpu_present_map, MAX_LOCAL_APIC);
 
 /*
  * Processor to be disabled specified by kernel parameter
@@ -2387,7 +2386,7 @@ static void cpu_update_apic(int cpu, u32 apicid)
        early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
 #endif
        set_cpu_possible(cpu, true);
-       physid_set(apicid, phys_cpu_present_map);
+       set_bit(apicid, phys_cpu_present_map);
        set_cpu_present(cpu, true);
        num_processors++;
 
@@ -2489,7 +2488,7 @@ static void __init apic_bsp_up_setup(void)
 #ifdef CONFIG_X86_64
        apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid));
 #endif
-       physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
+       reset_phys_cpu_present_map(boot_cpu_physical_apicid);
 }
 
 /**
index 8a00141073ea81cfc72b6154e5d21821fcd01211..d4dfa437081a92a77cab4af97735326766ce4a3e 100644 (file)
@@ -18,16 +18,6 @@ u32 apic_flat_calc_apicid(unsigned int cpu)
        return 1U << cpu;
 }
 
-bool default_check_apicid_used(physid_mask_t *map, u32 apicid)
-{
-       return physid_isset(apicid, *map);
-}
-
-void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
-{
-       *retmap = *phys_map;
-}
-
 u32 default_cpu_present_to_apicid(int mps_cpu)
 {
        if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
@@ -39,7 +29,7 @@ EXPORT_SYMBOL_GPL(default_cpu_present_to_apicid);
 
 bool default_apic_id_registered(void)
 {
-       return physid_isset(read_apic_id(), phys_cpu_present_map);
+       return test_bit(read_apic_id(), phys_cpu_present_map);
 }
 
 /*
index 6285d880a64cc42bcb526d72049444740051a1e4..9f4fb3f2ddc403a768b3160f06fcae441992da7c 100644 (file)
@@ -1460,7 +1460,7 @@ void restore_boot_irq_mode(void)
  */
 static void __init setup_ioapic_ids_from_mpc_nocheck(void)
 {
-       physid_mask_t phys_id_present_map;
+       DECLARE_BITMAP(phys_id_present_map, MAX_LOCAL_APIC);
        const u32 broadcast_id = 0xF;
        union IO_APIC_reg_00 reg_00;
        unsigned char old_id;
@@ -1471,7 +1471,7 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void)
         * This is broken; anything with a real cpu count has to
         * circumvent this idiocy regardless.
         */
-       phys_id_present_map = phys_cpu_present_map;
+       copy_phys_cpu_present_map(phys_id_present_map);
 
        /*
         * Set the IOAPIC ID to the value stored in the MPC table.
@@ -1496,21 +1496,21 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void)
                 * system must have a unique ID or we get lots of nice
                 * 'stuck on smp_invalidate_needed IPI wait' messages.
                 */
-               if (physid_isset(mpc_ioapic_id(ioapic_idx), phys_id_present_map)) {
+               if (test_bit(mpc_ioapic_id(ioapic_idx), phys_id_present_map)) {
                        pr_err(FW_BUG "IO-APIC#%d ID %d is already used!...\n",
                               ioapic_idx, mpc_ioapic_id(ioapic_idx));
                        for (i = 0; i < broadcast_id; i++)
-                               if (!physid_isset(i, phys_id_present_map))
+                               if (!test_bit(i, phys_id_present_map))
                                        break;
                        if (i >= broadcast_id)
                                panic("Max APIC ID exceeded!\n");
                        pr_err("... fixing up to %d. (tell your hw vendor)\n", i);
-                       physid_set(i, phys_id_present_map);
+                       set_bit(i, phys_id_present_map);
                        ioapics[ioapic_idx].mp_config.apicid = i;
                } else {
                        apic_printk(APIC_VERBOSE, "Setting %d in the phys_id_present_map\n",
                                    mpc_ioapic_id(ioapic_idx));
-                       physid_set(mpc_ioapic_id(ioapic_idx), phys_id_present_map);
+                       set_bit(mpc_ioapic_id(ioapic_idx), phys_id_present_map);
                }
 
                /*
@@ -2491,15 +2491,15 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
 #ifdef CONFIG_X86_32
 static int io_apic_get_unique_id(int ioapic, int apic_id)
 {
-       static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
+       static DECLARE_BITMAP(apic_id_map, MAX_LOCAL_APIC);
        const u32 broadcast_id = 0xF;
        union IO_APIC_reg_00 reg_00;
        unsigned long flags;
        int i = 0;
 
        /* Initialize the ID map */
-       if (physids_empty(apic_id_map))
-               apic_id_map = phys_cpu_present_map;
+       if (bitmap_empty(apic_id_map, MAX_LOCAL_APIC))
+               copy_phys_cpu_present_map(apic_id_map);
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
        reg_00.raw = io_apic_read(ioapic, 0);
@@ -2512,9 +2512,9 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
        }
 
        /* Every APIC in a system must have a unique ID */
-       if (physid_isset(apic_id, apic_id_map)) {
+       if (test_bit(apic_id, apic_id_map)) {
                for (i = 0; i < broadcast_id; i++) {
-                       if (!physid_isset(i, apic_id_map))
+                       if (!test_bit(i, apic_id_map))
                                break;
                }
 
@@ -2525,7 +2525,7 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
                apic_id = i;
        }
 
-       physid_set(apic_id, apic_id_map);
+       set_bit(apic_id, apic_id_map);
 
        if (reg_00.bits.ID != apic_id) {
                reg_00.bits.ID = apic_id;
index 8fd37c9d1b344d27de0aee740be3834e46bf26c2..a77c23e624593dedd05f2f6b921a88d620edbc1d 100644 (file)
@@ -63,7 +63,6 @@ void default_send_IPI_all(int vector);
 void default_send_IPI_self(int vector);
 
 bool default_apic_id_registered(void);
-bool default_check_apicid_used(physid_mask_t *map, u32 apicid);
 
 #ifdef CONFIG_X86_32
 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector);
index 9cdb056c37b5bd8b34c86b94cb3d5f6b32ca2778..000b856db8cef600a1c810248b00164e5282757d 100644 (file)
@@ -1072,7 +1072,7 @@ int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
 
        pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 
-       if (apicid == BAD_APICID || !physid_isset(apicid, phys_cpu_present_map) ||
+       if (apicid == BAD_APICID || !test_bit(apicid, phys_cpu_present_map) ||
            !apic_id_valid(apicid)) {
                pr_err("%s: bad cpu %d\n", __func__, cpu);
                return -EINVAL;
@@ -1147,10 +1147,8 @@ static __init void disable_smp(void)
        init_cpu_present(cpumask_of(0));
        init_cpu_possible(cpumask_of(0));
 
-       if (smp_found_config)
-               physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
-       else
-               physid_set_mask_of_physid(0, &phys_cpu_present_map);
+       reset_phys_cpu_present_map(smp_found_config ? boot_cpu_physical_apicid : 0);
+
        cpumask_set_cpu(0, topology_sibling_cpumask(0));
        cpumask_set_cpu(0, topology_core_cpumask(0));
        cpumask_set_cpu(0, topology_die_cpumask(0));