Merge tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Apr 2015 17:19:03 +0000 (10:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Apr 2015 17:19:03 +0000 (10:19 -0700)
Pull final removal of deprecated cpus_* cpumask functions from Rusty Russell:
 "This is the final removal (after several years!) of the obsolete
  cpus_* functions, prompted by their mis-use in staging.

  With these function removed, all cpu functions should only iterate to
  nr_cpu_ids, so we finally only allocate that many bits when cpumasks
  are allocated offstack"

* tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (25 commits)
  cpumask: remove __first_cpu / __next_cpu
  cpumask: resurrect CPU_MASK_CPU0
  linux/cpumask.h: add typechecking to cpumask_test_cpu
  cpumask: only allocate nr_cpumask_bits.
  Fix weird uses of num_online_cpus().
  cpumask: remove deprecated functions.
  mips: fix obsolete cpumask_of_cpu usage.
  x86: fix more deprecated cpu function usage.
  ia64: remove deprecated cpus_ usage.
  powerpc: fix deprecated CPU_MASK_CPU0 usage.
  CPU_MASK_ALL/CPU_MASK_NONE: remove from deprecated region.
  staging/lustre/o2iblnd: Don't use cpus_weight
  staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_
  staging/lustre/ptlrpc: Do not use deprecated cpus_* functions
  blackfin: fix up obsolete cpu function usage.
  parisc: fix up obsolete cpu function usage.
  tile: fix up obsolete cpu function usage.
  arm64: fix up obsolete cpu function usage.
  mips: fix up obsolete cpu function usage.
  x86: fix up obsolete cpu function usage.
  ...

51 files changed:
Documentation/cpu-hotplug.txt
arch/arm64/kernel/smp.c
arch/blackfin/mach-bf561/smp.c
arch/ia64/include/asm/acpi.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/numa.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/smp.c
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/topology.c
arch/m32r/kernel/smpboot.c
arch/mips/bcm63xx/irq.c
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/smp.h
arch/mips/kernel/crash.c
arch/mips/kernel/mips-mt-fpaff.c
arch/mips/kernel/process.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/loongson/loongson-3/numa.c
arch/mips/loongson/loongson-3/smp.c
arch/mips/paravirt/paravirt-smp.c
arch/mips/sgi-ip27/ip27-init.c
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sgi-ip27/ip27-memory.c
arch/parisc/kernel/irq.c
arch/powerpc/include/asm/cputhreads.h
arch/sh/include/asm/mmu_context.h
arch/sh/kernel/smp.c
arch/sparc/kernel/time_32.c
arch/tile/kernel/setup.c
arch/x86/kernel/apic/x2apic_cluster.c
drivers/clocksource/dw_apb_timer.c
drivers/cpuidle/coupled.c
drivers/crypto/n2_core.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/net/ethernet/tile/tilegx.c
drivers/scsi/hpsa.c
include/linux/cpumask.h
lib/Kconfig
lib/cpumask.c

index a0b005d2bd95ce8d0251679adc90002a6e3b4809..f9ad5e048b111297549df37cc6a6fc8bff1fc75a 100644 (file)
@@ -108,7 +108,7 @@ Never use anything other than cpumask_t to represent bitmap of CPUs.
        for_each_possible_cpu     - Iterate over cpu_possible_mask
        for_each_online_cpu       - Iterate over cpu_online_mask
        for_each_present_cpu      - Iterate over cpu_present_mask
-       for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
+       for_each_cpu(x,mask)      - Iterate over some random collection of cpu mask.
 
        #include <linux/cpu.h>
        get_online_cpus() and put_online_cpus():
index ffe8e1b814e00dd8d4caf392a751569fe0787cb0..714411f623919250d7ae5da48b504151ccea447f 100644 (file)
@@ -636,7 +636,7 @@ void smp_send_stop(void)
                cpumask_t mask;
 
                cpumask_copy(&mask, cpu_online_mask);
-               cpu_clear(smp_processor_id(), mask);
+               cpumask_clear_cpu(smp_processor_id(), &mask);
 
                smp_cross_call(&mask, IPI_CPU_STOP);
        }
index 11789beca75a92fa041efc519b15c0ed3ca879c3..8c0c80fd1a45367eacec911d1896da529ab25215 100644 (file)
@@ -124,7 +124,7 @@ void platform_send_ipi(cpumask_t callmap, int irq)
        unsigned int cpu;
        int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
 
-       for_each_cpu_mask(cpu, callmap) {
+       for_each_cpu(cpu, &callmap) {
                BUG_ON(cpu >= 2);
                SSYNC();
                bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
index a1d91ab4c5ef2010edb46e0a54ea0f016600581f..aa0fdf125aba7b4a6dd174887018980acde79391 100644 (file)
@@ -117,7 +117,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
 #ifdef CONFIG_ACPI_NUMA
 extern cpumask_t early_cpu_possible_map;
 #define for_each_possible_early_cpu(cpu)  \
-       for_each_cpu_mask((cpu), early_cpu_possible_map)
+       for_each_cpu((cpu), &early_cpu_possible_map)
 
 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
 {
@@ -125,13 +125,13 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
        int cpu;
        int next_nid = 0;
 
-       low_cpu = cpus_weight(early_cpu_possible_map);
+       low_cpu = cpumask_weight(&early_cpu_possible_map);
 
        high_cpu = max(low_cpu, min_cpus);
        high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
 
        for (cpu = low_cpu; cpu < high_cpu; cpu++) {
-               cpu_set(cpu, early_cpu_possible_map);
+               cpumask_set_cpu(cpu, &early_cpu_possible_map);
                if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
                        node_cpuid[cpu].nid = next_nid;
                        next_nid++;
index 2c4498919d3c2bae493977ed922234117e3cc225..35bf22cc71b76358190a1a48dc4697e4947c3ad8 100644 (file)
@@ -483,7 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
            (pa->apic_id << 8) | (pa->local_sapic_eid);
        /* nid should be overridden as logical node id later */
        node_cpuid[srat_num_cpus].nid = pxm;
-       cpu_set(srat_num_cpus, early_cpu_possible_map);
+       cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
        srat_num_cpus++;
 }
 
index cd44a57c73be76b48227c285b4cf0421f5efdc1f..bc9501e36e776257c53f190df8db2411c7cac273 100644 (file)
@@ -690,7 +690,7 @@ skip_numa_setup:
        do {
                if (++cpu >= nr_cpu_ids)
                        cpu = 0;
-       } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
+       } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));
 
        return cpu_physical_id(cpu);
 #else  /* CONFIG_SMP */
index 698d8fefde6c2591071be0dd0ccc7d64bca8703e..eaa3199f98c8e0ed190b17686ad9023959b8bf92 100644 (file)
@@ -109,13 +109,13 @@ static inline int find_unassigned_vector(cpumask_t domain)
        int pos, vector;
 
        cpumask_and(&mask, &domain, cpu_online_mask);
-       if (cpus_empty(mask))
+       if (cpumask_empty(&mask))
                return -EINVAL;
 
        for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
                vector = IA64_FIRST_DEVICE_VECTOR + pos;
-               cpus_and(mask, domain, vector_table[vector]);
-               if (!cpus_empty(mask))
+               cpumask_and(&mask, &domain, &vector_table[vector]);
+               if (!cpumask_empty(&mask))
                        continue;
                return vector;
        }
@@ -132,18 +132,18 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
        BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
 
        cpumask_and(&mask, &domain, cpu_online_mask);
-       if (cpus_empty(mask))
+       if (cpumask_empty(&mask))
                return -EINVAL;
-       if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
+       if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
                return 0;
        if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
                return -EBUSY;
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu(cpu, &mask)
                per_cpu(vector_irq, cpu)[vector] = irq;
        cfg->vector = vector;
        cfg->domain = domain;
        irq_status[irq] = IRQ_USED;
-       cpus_or(vector_table[vector], vector_table[vector], domain);
+       cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
        return 0;
 }
 
@@ -161,7 +161,6 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)
 static void __clear_irq_vector(int irq)
 {
        int vector, cpu;
-       cpumask_t mask;
        cpumask_t domain;
        struct irq_cfg *cfg = &irq_cfg[irq];
 
@@ -169,13 +168,12 @@ static void __clear_irq_vector(int irq)
        BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
        vector = cfg->vector;
        domain = cfg->domain;
-       cpumask_and(&mask, &cfg->domain, cpu_online_mask);
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
                per_cpu(vector_irq, cpu)[vector] = -1;
        cfg->vector = IRQ_VECTOR_UNASSIGNED;
        cfg->domain = CPU_MASK_NONE;
        irq_status[irq] = IRQ_UNUSED;
-       cpus_andnot(vector_table[vector], vector_table[vector], domain);
+       cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
 }
 
 static void clear_irq_vector(int irq)
@@ -244,7 +242,7 @@ void __setup_vector_irq(int cpu)
                per_cpu(vector_irq, cpu)[vector] = -1;
        /* Mark the inuse vectors */
        for (irq = 0; irq < NR_IRQS; ++irq) {
-               if (!cpu_isset(cpu, irq_cfg[irq].domain))
+               if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
                        continue;
                vector = irq_to_vector(irq);
                per_cpu(vector_irq, cpu)[vector] = irq;
@@ -261,7 +259,7 @@ static enum vector_domain_type {
 static cpumask_t vector_allocation_domain(int cpu)
 {
        if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
-               return cpumask_of_cpu(cpu);
+               return *cpumask_of(cpu);
        return CPU_MASK_ALL;
 }
 
@@ -275,7 +273,7 @@ static int __irq_prepare_move(int irq, int cpu)
                return -EBUSY;
        if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
                return -EINVAL;
-       if (cpu_isset(cpu, cfg->domain))
+       if (cpumask_test_cpu(cpu, &cfg->domain))
                return 0;
        domain = vector_allocation_domain(cpu);
        vector = find_unassigned_vector(domain);
@@ -309,12 +307,12 @@ void irq_complete_move(unsigned irq)
        if (likely(!cfg->move_in_progress))
                return;
 
-       if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+       if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
                return;
 
        cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
-       cfg->move_cleanup_count = cpus_weight(cleanup_mask);
-       for_each_cpu_mask(i, cleanup_mask)
+       cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
+       for_each_cpu(i, &cleanup_mask)
                platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
        cfg->move_in_progress = 0;
 }
@@ -340,12 +338,12 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
                if (!cfg->move_cleanup_count)
                        goto unlock;
 
-               if (!cpu_isset(me, cfg->old_domain))
+               if (!cpumask_test_cpu(me, &cfg->old_domain))
                        goto unlock;
 
                spin_lock_irqsave(&vector_lock, flags);
                __this_cpu_write(vector_irq[vector], -1);
-               cpu_clear(me, vector_table[vector]);
+               cpumask_clear_cpu(me, &vector_table[vector]);
                spin_unlock_irqrestore(&vector_lock, flags);
                cfg->move_cleanup_count--;
        unlock:
index 8bfd36af46f8e216e7330cdc443be16ceb9520c7..dd5801eb4c693b90b84b2567c733e19d20a3d30f 100644 (file)
@@ -1293,7 +1293,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
                monarch_cpu = cpu;
                sos->monarch = 1;
        } else {
-               cpu_set(cpu, mca_cpu);
+               cpumask_set_cpu(cpu, &mca_cpu);
                sos->monarch = 0;
        }
        mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
@@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
                 */
                ia64_mca_wakeup_all();
        } else {
-               while (cpu_isset(cpu, mca_cpu))
+               while (cpumask_test_cpu(cpu, &mca_cpu))
                        cpu_relax();    /* spin until monarch wakes us */
        }
 
@@ -1355,9 +1355,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
                 * and put this cpu in the rendez loop.
                 */
                for_each_online_cpu(i) {
-                       if (cpu_isset(i, mca_cpu)) {
+                       if (cpumask_test_cpu(i, &mca_cpu)) {
                                monarch_cpu = i;
-                               cpu_clear(i, mca_cpu);  /* wake next cpu */
+                               cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */
                                while (monarch_cpu != -1)
                                        cpu_relax();    /* spin until last cpu leaves */
                                set_curr_task(cpu, previous_current);
@@ -1822,7 +1822,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
        ti->cpu = cpu;
        p->stack = ti;
        p->state = TASK_UNINTERRUPTIBLE;
-       cpu_set(cpu, p->cpus_allowed);
+       cpumask_set_cpu(cpu, &p->cpus_allowed);
        INIT_LIST_HEAD(&p->tasks);
        p->parent = p->real_parent = p->group_leader = p;
        INIT_LIST_HEAD(&p->children);
index 8ae36ea177d3eb97524f99a64897a2c5a4a2146e..9dd7464f8c1742848011ce1397ef8a79f1e60d4f 100644 (file)
@@ -47,15 +47,14 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
        struct msi_msg  msg;
        unsigned long   dest_phys_id;
        int     irq, vector;
-       cpumask_t mask;
 
        irq = create_irq();
        if (irq < 0)
                return irq;
 
        irq_set_msi_desc(irq, desc);
-       cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
-       dest_phys_id = cpu_physical_id(first_cpu(mask));
+       dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
+                                                      cpu_online_mask));
        vector = irq_to_vector(irq);
 
        msg.address_hi = 0;
@@ -171,10 +170,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
 {
        struct irq_cfg *cfg = irq_cfg + irq;
        unsigned dest;
-       cpumask_t mask;
 
-       cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
-       dest = cpu_physical_id(first_cpu(mask));
+       dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
+                                                cpu_online_mask));
 
        msg->address_hi = 0;
        msg->address_lo =
index d288cde9360666b510e65c9a2a24171edf35ac77..92c376279c6d53a1511ab4c95cc95231dffc2510 100644 (file)
@@ -39,7 +39,7 @@ void map_cpu_to_node(int cpu, int nid)
        }
        /* sanity check first */
        oldnid = cpu_to_node_map[cpu];
-       if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) {
+       if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
                return; /* nothing to do */
        }
        /* we don't have cpu-driven node hot add yet...
@@ -47,16 +47,16 @@ void map_cpu_to_node(int cpu, int nid)
        if (!node_online(nid))
                nid = first_online_node;
        cpu_to_node_map[cpu] = nid;
-       cpu_set(cpu, node_to_cpu_mask[nid]);
+       cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
        return;
 }
 
 void unmap_cpu_from_node(int cpu, int nid)
 {
-       WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
+       WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
        WARN_ON(cpu_to_node_map[cpu] != nid);
        cpu_to_node_map[cpu] = 0;
-       cpu_clear(cpu, node_to_cpu_mask[nid]);
+       cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
 }
 
 
@@ -71,7 +71,7 @@ void __init build_cpu_to_node_map(void)
        int cpu, i, node;
 
        for(node=0; node < MAX_NUMNODES; node++)
-               cpus_clear(node_to_cpu_mask[node]);
+               cpumask_clear(&node_to_cpu_mask[node]);
 
        for_each_possible_early_cpu(cpu) {
                node = -1;
index ee9719eebb1e217989f04de5ae0b72e870c876fa..1eeffb7fbb16b1e35d7e0abbe72dbebe9d1b5c04 100644 (file)
@@ -256,7 +256,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
                        data_saved->buffer = buffer;
                }
        }
-       cpu_set(smp_processor_id(), data->cpu_event);
+       cpumask_set_cpu(smp_processor_id(), &data->cpu_event);
        if (irqsafe) {
                salinfo_work_to_do(data);
                spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -274,7 +274,7 @@ salinfo_timeout_check(struct salinfo_data *data)
        unsigned long flags;
        if (!data->open)
                return;
-       if (!cpus_empty(data->cpu_event)) {
+       if (!cpumask_empty(&data->cpu_event)) {
                spin_lock_irqsave(&data_saved_lock, flags);
                salinfo_work_to_do(data);
                spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -308,7 +308,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
        int i, n, cpu = -1;
 
 retry:
-       if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
+       if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) {
                if (file->f_flags & O_NONBLOCK)
                        return -EAGAIN;
                if (down_interruptible(&data->mutex))
@@ -317,9 +317,9 @@ retry:
 
        n = data->cpu_check;
        for (i = 0; i < nr_cpu_ids; i++) {
-               if (cpu_isset(n, data->cpu_event)) {
+               if (cpumask_test_cpu(n, &data->cpu_event)) {
                        if (!cpu_online(n)) {
-                               cpu_clear(n, data->cpu_event);
+                               cpumask_clear_cpu(n, &data->cpu_event);
                                continue;
                        }
                        cpu = n;
@@ -451,7 +451,7 @@ retry:
                call_on_cpu(cpu, salinfo_log_read_cpu, data);
        if (!data->log_size) {
                data->state = STATE_NO_DATA;
-               cpu_clear(cpu, data->cpu_event);
+               cpumask_clear_cpu(cpu, &data->cpu_event);
        } else {
                data->state = STATE_LOG_RECORD;
        }
@@ -491,11 +491,11 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
        unsigned long flags;
        spin_lock_irqsave(&data_saved_lock, flags);
        data->state = STATE_NO_DATA;
-       if (!cpu_isset(cpu, data->cpu_event)) {
+       if (!cpumask_test_cpu(cpu, &data->cpu_event)) {
                spin_unlock_irqrestore(&data_saved_lock, flags);
                return 0;
        }
-       cpu_clear(cpu, data->cpu_event);
+       cpumask_clear_cpu(cpu, &data->cpu_event);
        if (data->saved_num) {
                shift1_data_saved(data, data->saved_num - 1);
                data->saved_num = 0;
@@ -509,7 +509,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
        salinfo_log_new_read(cpu, data);
        if (data->state == STATE_LOG_RECORD) {
                spin_lock_irqsave(&data_saved_lock, flags);
-               cpu_set(cpu, data->cpu_event);
+               cpumask_set_cpu(cpu, &data->cpu_event);
                salinfo_work_to_do(data);
                spin_unlock_irqrestore(&data_saved_lock, flags);
        }
@@ -581,7 +581,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
                for (i = 0, data = salinfo_data;
                     i < ARRAY_SIZE(salinfo_data);
                     ++i, ++data) {
-                       cpu_set(cpu, data->cpu_event);
+                       cpumask_set_cpu(cpu, &data->cpu_event);
                        salinfo_work_to_do(data);
                }
                spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -601,7 +601,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
                                        shift1_data_saved(data, j);
                                }
                        }
-                       cpu_clear(cpu, data->cpu_event);
+                       cpumask_clear_cpu(cpu, &data->cpu_event);
                }
                spin_unlock_irqrestore(&data_saved_lock, flags);
                break;
@@ -659,7 +659,7 @@ salinfo_init(void)
 
                /* we missed any events before now */
                for_each_online_cpu(j)
-                       cpu_set(j, data->cpu_event);
+                       cpumask_set_cpu(j, &data->cpu_event);
 
                *sdir++ = dir;
        }
index d86669bcdfb28abedce71a0eacdb7207eaba383c..b9761389cb8d4fcd41c4a44ff608f4d28ebadb5a 100644 (file)
@@ -562,8 +562,8 @@ setup_arch (char **cmdline_p)
 #  ifdef CONFIG_ACPI_HOTPLUG_CPU
        prefill_possible_map();
 #  endif
-       per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
-               32 : cpus_weight(early_cpu_possible_map)),
+       per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
+               32 : cpumask_weight(&early_cpu_possible_map)),
                additional_cpus > 0 ? additional_cpus : 0);
 # endif
 #endif /* CONFIG_APCI_BOOT */
@@ -702,7 +702,8 @@ show_cpuinfo (struct seq_file *m, void *v)
                   c->itc_freq / 1000000, c->itc_freq % 1000000,
                   lpj*HZ/500000, (lpj*HZ/5000) % 100);
 #ifdef CONFIG_SMP
-       seq_printf(m, "siblings   : %u\n", cpus_weight(cpu_core_map[cpunum]));
+       seq_printf(m, "siblings   : %u\n",
+                  cpumask_weight(&cpu_core_map[cpunum]));
        if (c->socket_id != -1)
                seq_printf(m, "physical id: %u\n", c->socket_id);
        if (c->threads_per_core > 1 || c->cores_per_socket > 1)
@@ -933,8 +934,8 @@ cpu_init (void)
         * (must be done after per_cpu area is setup)
         */
        if (smp_processor_id() == 0) {
-               cpu_set(0, per_cpu(cpu_sibling_map, 0));
-               cpu_set(0, cpu_core_map[0]);
+               cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
+               cpumask_set_cpu(0, &cpu_core_map[0]);
        } else {
                /*
                 * Set ar.k3 so that assembly code in MCA handler can compute
index 9fcd4e63048f65f5ae638833c7a51b756883acbb..7f706d4f84f7e8328bd77eb96879ed388fa1094d 100644 (file)
@@ -262,11 +262,11 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
        preempt_disable();
        mycpu = smp_processor_id();
 
-       for_each_cpu_mask(cpu, cpumask)
+       for_each_cpu(cpu, &cpumask)
                counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
 
        mb();
-       for_each_cpu_mask(cpu, cpumask) {
+       for_each_cpu(cpu, &cpumask) {
                if (cpu == mycpu)
                        flush_mycpu = 1;
                else
@@ -276,7 +276,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
        if (flush_mycpu)
                smp_local_flush_tlb();
 
-       for_each_cpu_mask(cpu, cpumask)
+       for_each_cpu(cpu, &cpumask)
                while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
                        udelay(FLUSH_DELAY);
 
index 547a48d78bd7f4c76eb84e551aa9ec776c1dde40..15051e9c2c6f98f3f2e8743739f10b63f795be3a 100644 (file)
@@ -434,7 +434,7 @@ smp_callin (void)
        /*
         * Allow the master to continue.
         */
-       cpu_set(cpuid, cpu_callin_map);
+       cpumask_set_cpu(cpuid, &cpu_callin_map);
        Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
 }
 
@@ -475,13 +475,13 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
         */
        Dprintk("Waiting on callin_map ...");
        for (timeout = 0; timeout < 100000; timeout++) {
-               if (cpu_isset(cpu, cpu_callin_map))
+               if (cpumask_test_cpu(cpu, &cpu_callin_map))
                        break;  /* It has booted */
                udelay(100);
        }
        Dprintk("\n");
 
-       if (!cpu_isset(cpu, cpu_callin_map)) {
+       if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
                printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
                ia64_cpu_to_sapicid[cpu] = -1;
                set_cpu_online(cpu, false);  /* was set in smp_callin() */
@@ -541,7 +541,7 @@ smp_prepare_cpus (unsigned int max_cpus)
 
        smp_setup_percpu_timer();
 
-       cpu_set(0, cpu_callin_map);
+       cpumask_set_cpu(0, &cpu_callin_map);
 
        local_cpu_data->loops_per_jiffy = loops_per_jiffy;
        ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -565,7 +565,7 @@ smp_prepare_cpus (unsigned int max_cpus)
 void smp_prepare_boot_cpu(void)
 {
        set_cpu_online(smp_processor_id(), true);
-       cpu_set(smp_processor_id(), cpu_callin_map);
+       cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
        set_numa_node(cpu_to_node_map[smp_processor_id()]);
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        paravirt_post_smp_prepare_boot_cpu();
@@ -577,10 +577,10 @@ clear_cpu_sibling_map(int cpu)
 {
        int i;
 
-       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
-       for_each_cpu_mask(i, cpu_core_map[cpu])
-               cpu_clear(cpu, cpu_core_map[i]);
+       for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
+       for_each_cpu(i, &cpu_core_map[cpu])
+               cpumask_clear_cpu(cpu, &cpu_core_map[i]);
 
        per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
 }
@@ -592,12 +592,12 @@ remove_siblinginfo(int cpu)
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_clear(cpu, cpu_core_map[cpu]);
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
+               cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
                return;
        }
 
-       last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
+       last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
 
        /* remove it from all sibling map's */
        clear_cpu_sibling_map(cpu);
@@ -673,7 +673,7 @@ int __cpu_disable(void)
        remove_siblinginfo(cpu);
        fixup_irqs();
        local_flush_tlb_all();
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
        return 0;
 }
 
@@ -718,11 +718,13 @@ static inline void set_cpu_sibling_map(int cpu)
 
        for_each_online_cpu(i) {
                if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpumask_set_cpu(i, &cpu_core_map[cpu]);
+                       cpumask_set_cpu(cpu, &cpu_core_map[i]);
                        if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+                               cpumask_set_cpu(i,
+                                               &per_cpu(cpu_sibling_map, cpu));
+                               cpumask_set_cpu(cpu,
+                                               &per_cpu(cpu_sibling_map, i));
                        }
                }
        }
@@ -742,7 +744,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
         * Already booted cpu? not valid anymore since we dont
         * do idle loop tightspin anymore.
         */
-       if (cpu_isset(cpu, cpu_callin_map))
+       if (cpumask_test_cpu(cpu, &cpu_callin_map))
                return -EINVAL;
 
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
@@ -753,8 +755,8 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-               cpu_set(cpu, cpu_core_map[cpu]);
+               cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
+               cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
                return 0;
        }
 
index 965ab42fabb022e0f49fda632815dcaa140aa9e8..c01fe89912445d0e05c9685f93a88c26c6f7c207 100644 (file)
@@ -148,7 +148,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
 
        if (cpu_data(cpu)->threads_per_core <= 1 &&
                cpu_data(cpu)->cores_per_socket <= 1) {
-               cpu_set(cpu, this_leaf->shared_cpu_map);
+               cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
                return;
        }
 
@@ -164,7 +164,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
                        if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
                                && cpu_data(j)->core_id == csi.log1_cid
                                && cpu_data(j)->thread_id == csi.log1_tid)
-                               cpu_set(j, this_leaf->shared_cpu_map);
+                               cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
 
                i++;
        } while (i < num_shared &&
@@ -177,7 +177,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
 static void cache_shared_cpu_map_setup(unsigned int cpu,
                struct cache_info * this_leaf)
 {
-       cpu_set(cpu, this_leaf->shared_cpu_map);
+       cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
        return;
 }
 #endif
index bb21f4f63170053c3b2237ba6f73727d9e134d13..a468467542f453d170e2ded0ac2e339ad574198f 100644 (file)
@@ -376,7 +376,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
        if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
                BUG();
 
-       for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
+       for_each_online_cpu(cpu_id)
                show_cpu_info(cpu_id);
 
        /*
index b94bf44d8d8e297f85c9cdce01f9836d457f3bb6..e3e808a6c54261841638e975ae8518aaee1fa2f0 100644 (file)
@@ -58,9 +58,9 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
 
 #ifdef CONFIG_SMP
        if (m)
-               enable &= cpu_isset(cpu, *m);
+               enable &= cpumask_test_cpu(cpu, m);
        else if (irqd_affinity_was_set(d))
-               enable &= cpu_isset(cpu, *d->affinity);
+               enable &= cpumask_test_cpu(cpu, d->affinity);
 #endif
        return enable;
 }
index 8b1eeffa12edf0fbc2446ba81ab19750e5d49891..56f5d080ef9d6cb698ba70cf7027783167000284 100644 (file)
@@ -72,7 +72,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
 {
        unsigned int i;
 
-       for_each_cpu_mask(i, *mask)
+       for_each_cpu(i, mask)
                octeon_send_ipi_single(i, action);
 }
 
@@ -239,7 +239,7 @@ static int octeon_cpu_disable(void)
                return -ENOTSUPP;
 
        set_cpu_online(cpu, false);
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
        octeon_fixup_irqs();
 
        flush_cache_all();
index eacf865d21c2fc533a36d6b2138253dacba667a9..bb02fac9b4fa0188e12b263376f3e1e08f83b022 100644 (file)
@@ -88,7 +88,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
 {
        extern struct plat_smp_ops *mp_ops;     /* private */
 
-       mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
+       mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION);
 }
 
 static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
index d21264681e97d805b6a6ef1defffd8ccfb8fb08b..d434d5d5ae6e70b5241fe2fae2f3d1a0ec8bb1ed 100644 (file)
@@ -25,9 +25,9 @@ static void crash_shutdown_secondary(void *ignore)
                return;
 
        local_irq_disable();
-       if (!cpu_isset(cpu, cpus_in_crash))
+       if (!cpumask_test_cpu(cpu, &cpus_in_crash))
                crash_save_cpu(regs, cpu);
-       cpu_set(cpu, cpus_in_crash);
+       cpumask_set_cpu(cpu, &cpus_in_crash);
 
        while (!atomic_read(&kexec_ready_to_reboot))
                cpu_relax();
@@ -50,7 +50,7 @@ static void crash_kexec_prepare_cpus(void)
         */
        pr_emerg("Sending IPI to other cpus...\n");
        msecs = 10000;
-       while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+       while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
                cpu_relax();
                mdelay(1);
        }
@@ -66,5 +66,5 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
        crashing_cpu = smp_processor_id();
        crash_save_cpu(regs, crashing_cpu);
        crash_kexec_prepare_cpus();
-       cpu_set(crashing_cpu, cpus_in_crash);
+       cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
 }
index 362bb3707e62519d070b5cdba6bfaf9abe2a9249..3e4491aa6d6b2425865e1d1a3a909cf05aaa4e28 100644 (file)
@@ -114,8 +114,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
        /* Compute new global allowed CPU set if necessary */
        ti = task_thread_info(p);
        if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
-           cpus_intersects(*new_mask, mt_fpu_cpumask)) {
-               cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
+           cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
+               cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
                retval = set_cpus_allowed_ptr(p, effective_mask);
        } else {
                cpumask_copy(effective_mask, new_mask);
index d295bd1e4996e28224be2a9c286b2f1b87d8b85d..f2975d4d1e449cc948d84dacac736451a3144cf9 100644 (file)
@@ -49,7 +49,7 @@
 void arch_cpu_idle_dead(void)
 {
        /* What the heck is this check doing ? */
-       if (!cpu_isset(smp_processor_id(), cpu_callin_map))
+       if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
                play_dead();
 }
 #endif
index b8bd9340c9c724935c97109d3b0825d94818eeeb..fd528d7ea27867ffed69abf25d3c7b3f374b7f64 100644 (file)
@@ -362,7 +362,7 @@ static int bmips_cpu_disable(void)
        pr_info("SMP: CPU%d is offline\n", cpu);
 
        set_cpu_online(cpu, false);
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
        clear_c0_status(IE_IRQ5);
 
        local_flush_tlb_all();
index e36a859af66677034a8a4203714a306ec2c0ea51..d5e0f949dc480ba5659bc395e2cf5144b0832caf 100644 (file)
@@ -66,7 +66,7 @@ static void cmp_smp_finish(void)
 #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(smp_processor_id(), mt_fpu_cpumask);
+               cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
 #endif /* CONFIG_MIPS_MT_FPAFF */
 
        local_irq_enable();
@@ -110,7 +110,7 @@ void __init cmp_smp_setup(void)
 #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(0, mt_fpu_cpumask);
+               cpumask_set_cpu(0, &mt_fpu_cpumask);
 #endif /* CONFIG_MIPS_MT_FPAFF */
 
        for (i = 1; i < NR_CPUS; i++) {
index d5589bedd0a402a7c5126af6fd3e6127e5f91c4b..7e011f95bb8e14785d447bf49bb1b8141cf0bc08 100644 (file)
@@ -290,7 +290,7 @@ static void cps_smp_finish(void)
 #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(smp_processor_id(), mt_fpu_cpumask);
+               cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
 #endif /* CONFIG_MIPS_MT_FPAFF */
 
        local_irq_enable();
@@ -313,7 +313,7 @@ static int cps_cpu_disable(void)
        atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
        smp_mb__after_atomic();
        set_cpu_online(cpu, false);
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
 
        return 0;
 }
index 17ea705f6c405081d89a1b5dba30916c1832d73c..86311a164ef1239487e6648e713d347432f750da 100644 (file)
@@ -178,7 +178,7 @@ static void vsmp_smp_finish(void)
 #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(smp_processor_id(), mt_fpu_cpumask);
+               cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
 #endif /* CONFIG_MIPS_MT_FPAFF */
 
        local_irq_enable();
@@ -239,7 +239,7 @@ static void __init vsmp_smp_setup(void)
 #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(0, mt_fpu_cpumask);
+               cpumask_set_cpu(0, &mt_fpu_cpumask);
 #endif /* CONFIG_MIPS_MT_FPAFF */
        if (!cpu_has_mipsmt)
                return;
index 5b020bda3e05763476a0001a79a606659de95714..193ace7955fb5eec377b666db534d76173a014c5 100644 (file)
@@ -75,30 +75,30 @@ static inline void set_cpu_sibling_map(int cpu)
 {
        int i;
 
-       cpu_set(cpu, cpu_sibling_setup_map);
+       cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
 
        if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
+               for_each_cpu(i, &cpu_sibling_setup_map) {
                        if (cpu_data[cpu].package == cpu_data[i].package &&
                                    cpu_data[cpu].core == cpu_data[i].core) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
+                               cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+                               cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
                        }
                }
        } else
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
 }
 
 static inline void set_cpu_core_map(int cpu)
 {
        int i;
 
-       cpu_set(cpu, cpu_core_setup_map);
+       cpumask_set_cpu(cpu, &cpu_core_setup_map);
 
-       for_each_cpu_mask(i, cpu_core_setup_map) {
+       for_each_cpu(i, &cpu_core_setup_map) {
                if (cpu_data[cpu].package == cpu_data[i].package) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpumask_set_cpu(i, &cpu_core_map[cpu]);
+                       cpumask_set_cpu(cpu, &cpu_core_map[i]);
                }
        }
 }
@@ -138,7 +138,7 @@ asmlinkage void start_secondary(void)
        cpu = smp_processor_id();
        cpu_data[cpu].udelay_val = loops_per_jiffy;
 
-       cpu_set(cpu, cpu_coherent_mask);
+       cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
 
        set_cpu_online(cpu, true);
@@ -146,7 +146,7 @@ asmlinkage void start_secondary(void)
        set_cpu_sibling_map(cpu);
        set_cpu_core_map(cpu);
 
-       cpu_set(cpu, cpu_callin_map);
+       cpumask_set_cpu(cpu, &cpu_callin_map);
 
        synchronise_count_slave(cpu);
 
@@ -208,7 +208,7 @@ void smp_prepare_boot_cpu(void)
 {
        set_cpu_possible(0, true);
        set_cpu_online(0, true);
-       cpu_set(0, cpu_callin_map);
+       cpumask_set_cpu(0, &cpu_callin_map);
 }
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
@@ -218,7 +218,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        /*
         * Trust is futile.  We should really have timeouts ...
         */
-       while (!cpu_isset(cpu, cpu_callin_map))
+       while (!cpumask_test_cpu(cpu, &cpu_callin_map))
                udelay(100);
 
        synchronise_count_master(cpu);
index e334c641a81b3d30bc9a90e2de98604f76993dc4..ba32e48d4697193f3410edd79153d392ab0d8311 100644 (file)
@@ -1153,13 +1153,13 @@ static void mt_ase_fp_affinity(void)
                 * restricted the allowed set to exclude any CPUs with FPUs,
                 * we'll skip the procedure.
                 */
-               if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
+               if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
                        cpumask_t tmask;
 
                        current->thread.user_cpus_allowed
                                = current->cpus_allowed;
-                       cpus_and(tmask, current->cpus_allowed,
-                               mt_fpu_cpumask);
+                       cpumask_and(&tmask, &current->cpus_allowed,
+                                   &mt_fpu_cpumask);
                        set_cpus_allowed_ptr(current, &tmask);
                        set_thread_flag(TIF_FPUBOUND);
                }
index 6cae0e75de279dbb171358841a9a42590ab7e832..12d14ed4877868abce36029ddad6df1b94339dba 100644 (file)
@@ -233,7 +233,7 @@ static __init void prom_meminit(void)
                if (node_online(node)) {
                        szmem(node);
                        node_mem_init(node);
-                       cpus_clear(__node_data[(node)]->cpumask);
+                       cpumask_clear(&__node_data[(node)]->cpumask);
                }
        }
        for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
@@ -244,7 +244,7 @@ static __init void prom_meminit(void)
                if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
                        continue;
 
-               cpu_set(active_cpu, __node_data[(node)]->cpumask);
+               cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask);
                pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
 
                active_cpu++;
index e2eb688b54345cf9619cb579e11f3f8ff72e631c..e3c68b5da18da4012de0aaed6363d5a5484d5e41 100644 (file)
@@ -408,7 +408,7 @@ static int loongson3_cpu_disable(void)
                return -EBUSY;
 
        set_cpu_online(cpu, false);
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
        local_irq_save(flags);
        fixup_irqs();
        local_irq_restore(flags);
index 0164b0c483525011773de3d018e3ca8779667b27..42181c7105df70992892ead68933bcd5375ab74b 100644 (file)
@@ -75,7 +75,7 @@ static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int acti
 {
        unsigned int cpu;
 
-       for_each_cpu_mask(cpu, *mask)
+       for_each_cpu(cpu, mask)
                paravirt_send_ipi_single(cpu, action);
 }
 
index ee736bd103f85dd77de320f6e78ef0a6bf92f645..570098bfdf870e7c0af2dacf030bc8f587b361eb 100644 (file)
@@ -60,7 +60,7 @@ static void per_hub_init(cnodeid_t cnode)
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
        int i;
 
-       cpu_set(smp_processor_id(), hub->h_cpus);
+       cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
 
        if (test_and_set_bit(cnode, hub_init_mask))
                return;
index ecbb62f339c5e7876dbb45d83afec579967ea536..bda90cf87e8cba2617f6bda905513e45fa058081 100644 (file)
@@ -29,8 +29,8 @@ static cpumask_t ktext_repmask;
 void __init setup_replication_mask(void)
 {
        /* Set only the master cnode's bit.  The master cnode is always 0. */
-       cpus_clear(ktext_repmask);
-       cpu_set(0, ktext_repmask);
+       cpumask_clear(&ktext_repmask);
+       cpumask_set_cpu(0, &ktext_repmask);
 
 #ifdef CONFIG_REPLICATE_KTEXT
 #ifndef CONFIG_MAPPED_KERNEL
@@ -43,7 +43,7 @@ void __init setup_replication_mask(void)
                        if (cnode == 0)
                                continue;
                        /* Advertise that we have a copy of the kernel */
-                       cpu_set(cnode, ktext_repmask);
+                       cpumask_set_cpu(cnode, &ktext_repmask);
                }
        }
 #endif
@@ -99,7 +99,7 @@ void __init replicate_kernel_text()
                client_nasid = COMPACT_TO_NASID_NODEID(cnode);
 
                /* Check if this node should get a copy of the kernel */
-               if (cpu_isset(cnode, ktext_repmask)) {
+               if (cpumask_test_cpu(cnode, &ktext_repmask)) {
                        server_nasid = client_nasid;
                        copy_kernel(server_nasid);
                }
@@ -124,7 +124,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode)
        loadbase += 16777216;
 #endif
        offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
-       if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
+       if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask)))
                return TO_NODE(nasid, offset) >> PAGE_SHIFT;
        else
                return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
index 0b68469e063f2b7a705ec45d7e1164d8d77a07a7..8d0eb264324809b828f1463cd604e044e2e1df42 100644 (file)
@@ -404,7 +404,7 @@ static void __init node_mem_init(cnodeid_t node)
        NODE_DATA(node)->node_start_pfn = start_pfn;
        NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
 
-       cpus_clear(hub_data(node)->h_cpus);
+       cpumask_clear(&hub_data(node)->h_cpus);
 
        slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
                               sizeof(struct hub_data));
index cfe056fe7f5c636f45f642950a01e0ce4cb7e536..f3191db6e2e94151868129466924b2a456b91ede 100644 (file)
@@ -525,8 +525,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
        desc = irq_to_desc(irq);
        cpumask_copy(&dest, desc->irq_data.affinity);
        if (irqd_is_per_cpu(&desc->irq_data) &&
-           !cpu_isset(smp_processor_id(), dest)) {
-               int cpu = first_cpu(dest);
+           !cpumask_test_cpu(smp_processor_id(), &dest)) {
+               int cpu = cpumask_first(&dest);
 
                printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
                       irq, smp_processor_id(), cpu);
index 4c8ad592ae3351d96f7e225411d9f551657c5bc7..5be6c4753667ef66b5c0151979802a5d2c627262 100644 (file)
@@ -25,7 +25,7 @@ extern cpumask_t threads_core_mask;
 #define threads_per_core       1
 #define threads_per_subcore    1
 #define threads_shift          0
-#define threads_core_mask      (CPU_MASK_CPU0)
+#define threads_core_mask      (*get_cpu_mask(0))
 #endif
 
 /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
index b9d9489a50125b8d732299cf707b009a37617927..9f417feaf6e80b86b0c1da4fc740b9d7fe76722a 100644 (file)
@@ -99,7 +99,7 @@ static inline int init_new_context(struct task_struct *tsk,
 {
        int i;
 
-       for (i = 0; i < num_online_cpus(); i++)
+       for_each_online_cpu(i)
                cpu_context(i, mm) = NO_CONTEXT;
 
        return 0;
index fc5acfc93c92bf9cbce40bfd306331c3075b75d8..de6be008fc0140ff7086bd4c153619f7213fc8cd 100644 (file)
@@ -363,7 +363,7 @@ void flush_tlb_mm(struct mm_struct *mm)
                smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
        } else {
                int i;
-               for (i = 0; i < num_online_cpus(); i++)
+               for_each_online_cpu(i)
                        if (smp_processor_id() != i)
                                cpu_context(i, mm) = 0;
        }
@@ -400,7 +400,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
                smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
        } else {
                int i;
-               for (i = 0; i < num_online_cpus(); i++)
+               for_each_online_cpu(i)
                        if (smp_processor_id() != i)
                                cpu_context(i, mm) = 0;
        }
@@ -443,7 +443,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
                smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
        } else {
                int i;
-               for (i = 0; i < num_online_cpus(); i++)
+               for_each_online_cpu(i)
                        if (smp_processor_id() != i)
                                cpu_context(i, vma->vm_mm) = 0;
        }
index 18147a5523d947736a3e7c0815a7eeb3526ec271..8caf45ee81d9e34417bc8a65f860c730521a6dc1 100644 (file)
@@ -194,7 +194,7 @@ static __init int setup_timer_cs(void)
 static void percpu_ce_setup(enum clock_event_mode mode,
                        struct clock_event_device *evt)
 {
-       int cpu = __first_cpu(evt->cpumask);
+       int cpu = cpumask_first(evt->cpumask);
 
        switch (mode) {
                case CLOCK_EVT_MODE_PERIODIC:
@@ -214,7 +214,7 @@ static void percpu_ce_setup(enum clock_event_mode mode,
 static int percpu_ce_set_next_event(unsigned long delta,
                                    struct clock_event_device *evt)
 {
-       int cpu = __first_cpu(evt->cpumask);
+       int cpu = cpumask_first(evt->cpumask);
        unsigned int next = (unsigned int)delta;
 
        sparc_config.load_profile_irq(cpu, next);
index 7833b2ccdfbcda44bce821384d37532e32d663e7..6873f006f7d04fb1e71f8e9be386854431acb513 100644 (file)
@@ -774,7 +774,7 @@ static void __init zone_sizes_init(void)
                 * though, there'll be no lowmem, so we just alloc_bootmem
                 * the memmap.  There will be no percpu memory either.
                 */
-               if (i != 0 && cpu_isset(i, isolnodes)) {
+               if (i != 0 && cpumask_test_cpu(i, &isolnodes)) {
                        node_memmap_pfn[i] =
                                alloc_bootmem_pfn(0, memmap_size, 0);
                        BUG_ON(node_percpu[i] != 0);
index d9d0bd2faaf42cf4b1e894628edb4139ee615780..ab3219b3fbda3794eeeded5c9002dd85d3a5eb29 100644 (file)
@@ -171,8 +171,8 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
                for_each_online_cpu(cpu) {
                        if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
                                continue;
-                       __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
-                       __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
+                       cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
+                       cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
                }
                free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
                free_cpumask_var(per_cpu(ipi_mask, this_cpu));
index f3656a6b0382c9982563e0abe84054e98a67dc98..35a88097af3c0daef2918c8e93585b004e268623 100644 (file)
@@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode,
        unsigned long period;
        struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
 
-       pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask),
+       pr_debug("%s CPU %d mode=%d\n", __func__,
+                cpumask_first(evt->cpumask),
                 mode);
 
        switch (mode) {
index 73fe2f8d7f961dad2c227dca5c32f0080e2b8604..7936dce4b8786f0ef00d2246a39d4d6692748e40 100644 (file)
@@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
         */
        smp_rmb();
 
-       for_each_cpu_mask(i, coupled->coupled_cpus)
+       for_each_cpu(i, &coupled->coupled_cpus)
                if (cpu_online(i) && coupled->requested_state[i] < state)
                        state = coupled->requested_state[i];
 
@@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
 {
        int cpu;
 
-       for_each_cpu_mask(cpu, coupled->coupled_cpus)
+       for_each_cpu(cpu, &coupled->coupled_cpus)
                if (cpu != this_cpu && cpu_online(cpu))
                        cpuidle_coupled_poke(cpu);
 }
@@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
        if (cpumask_empty(&dev->coupled_cpus))
                return 0;
 
-       for_each_cpu_mask(cpu, dev->coupled_cpus) {
+       for_each_cpu(cpu, &dev->coupled_cpus) {
                other_dev = per_cpu(cpuidle_devices, cpu);
                if (other_dev && other_dev->coupled) {
                        coupled = other_dev->coupled;
index afd136b45f49b1fa0a933cd4a33412f830298bcb..10a9aeff1666ed69bc0dac693a0cdf7b553f225d 100644 (file)
@@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
                                dev->dev.of_node->full_name);
                        return -EINVAL;
                }
-               cpu_set(*id, p->sharing);
+               cpumask_set_cpu(*id, &p->sharing);
                table[*id] = p;
        }
        return 0;
@@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
                return -ENOMEM;
        }
 
-       cpus_clear(p->sharing);
+       cpumask_clear(&p->sharing);
        spin_lock_init(&p->lock);
        p->q_type = q_type;
        INIT_LIST_HEAD(&p->jobs);
index 4f2fb62e6f373c5978bddaa17ac203abcc69703e..49875adb6b44bd4c293e3f7a95d422a40aafd3b5 100644 (file)
@@ -567,7 +567,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
         */
        smp_wmb();
 
-       for_each_cpu_mask(cpu, *mask) {
+       for_each_cpu(cpu, mask) {
                u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
                u16 tlist;
 
index bc48b7dc89ec6a978d3bb4eaebcd3f663403e1c8..57f09cb544644bcd97aa81bfc044c2686b350bbb 100644 (file)
@@ -389,19 +389,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
        int             i;
 
        cpumask_and(&tmp, cpumask, cpu_online_mask);
-       if (cpus_empty(tmp))
+       if (cpumask_empty(&tmp))
                return -EINVAL;
 
        /* Assumption : cpumask refers to a single CPU */
        spin_lock_irqsave(&gic_lock, flags);
 
        /* Re-route this IRQ */
-       gic_map_to_vpe(irq, first_cpu(tmp));
+       gic_map_to_vpe(irq, cpumask_first(&tmp));
 
        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
                clear_bit(irq, pcpu_masks[i].pcpu_mask);
-       set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+       set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
        cpumask_copy(d->affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
index a789a2054388ea1505e6c43ab5fe47efb73d13bc..a3f7610002aa12269bfa2953463d3da680e3bfa6 100644 (file)
@@ -1123,7 +1123,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
                        addr + i * sizeof(struct tile_net_comps);
 
        /* If this is a network cpu, create an iqueue. */
-       if (cpu_isset(cpu, network_cpus_map)) {
+       if (cpumask_test_cpu(cpu, &network_cpus_map)) {
                order = get_order(NOTIF_RING_SIZE);
                page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
                if (page == NULL) {
@@ -1299,7 +1299,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
        int first_ring, ring;
        int instance = mpipe_instance(dev);
        struct mpipe_data *md = &mpipe_data[instance];
-       int network_cpus_count = cpus_weight(network_cpus_map);
+       int network_cpus_count = cpumask_weight(&network_cpus_map);
 
        if (!hash_default) {
                netdev_err(dev, "Networking requires hash_default!\n");
index a1cfbd3dda4713d05f254b0a9ef33131f6c12c0b..8eab107b53fbab2641a5545b964873ad57677072 100644 (file)
@@ -6632,14 +6632,12 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h)
 
 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
 {
-       int i, cpu;
+       int cpu;
 
-       cpu = cpumask_first(cpu_online_mask);
-       for (i = 0; i < num_online_cpus(); i++) {
+       for_each_online_cpu(cpu) {
                u32 *lockup_detected;
                lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
                *lockup_detected = value;
-               cpu = cpumask_next(cpu, cpu_online_mask);
        }
        wmb(); /* be sure the per-cpu variables are out to memory */
 }
index 086549a665e283aeeb65cc7910ce87cb2c4ce87d..27e285b92b5f748b8ffe9a8e599c8850f0346007 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/bitmap.h>
 #include <linux/bug.h>
 
+/* Don't assign or return these: may not be this big! */
 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
 /**
@@ -289,11 +290,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
  * @cpumask: the cpumask pointer
  *
  * Returns 1 if @cpu is set in @cpumask, else returns 0
- *
- * No static inline type checking - see Subtlety (1) above.
  */
-#define cpumask_test_cpu(cpu, cpumask) \
-       test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
+static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+{
+       return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
+}
 
 /**
  * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -609,9 +610,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
  */
 static inline size_t cpumask_size(void)
 {
-       /* FIXME: Once all cpumask assignments are eliminated, this
-        * can be nr_cpumask_bits */
-       return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
+       return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
 }
 
 /*
@@ -768,7 +767,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
 #if NR_CPUS <= BITS_PER_LONG
 #define CPU_BITS_ALL                                           \
 {                                                              \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+       [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)     \
 }
 
 #else /* NR_CPUS > BITS_PER_LONG */
@@ -776,7 +775,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
 #define CPU_BITS_ALL                                           \
 {                                                              \
        [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD         \
+       [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)     \
 }
 #endif /* NR_CPUS > BITS_PER_LONG */
 
@@ -797,32 +796,18 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
                                      nr_cpu_ids);
 }
 
-/*
- *
- * From here down, all obsolete.  Use cpumask_ variants!
- *
- */
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
 #if NR_CPUS <= BITS_PER_LONG
-
 #define CPU_MASK_ALL                                                   \
 (cpumask_t) { {                                                                \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
+       [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)     \
 } }
-
 #else
-
 #define CPU_MASK_ALL                                                   \
 (cpumask_t) { {                                                                \
        [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                        \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
+       [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)     \
 } }
-
-#endif
+#endif /* NR_CPUS > BITS_PER_LONG */
 
 #define CPU_MASK_NONE                                                  \
 (cpumask_t) { {                                                                \
@@ -834,143 +819,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
        [0] =  1UL                                                      \
 } }
 
-#if NR_CPUS == 1
-#define first_cpu(src)         ({ (void)(src); 0; })
-#define next_cpu(n, src)       ({ (void)(src); 1; })
-#define any_online_cpu(mask)   0
-#define for_each_cpu_mask(cpu, mask)   \
-       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#else /* NR_CPUS > 1 */
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-
-#define first_cpu(src)         __first_cpu(&(src))
-#define next_cpu(n, src)       __next_cpu((n), &(src))
-#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
-#define for_each_cpu_mask(cpu, mask)                   \
-       for ((cpu) = -1;                                \
-               (cpu) = next_cpu((cpu), (mask)),        \
-               (cpu) < NR_CPUS; )
-#endif /* SMP */
-
-#if NR_CPUS <= 64
-
-#define for_each_cpu_mask_nr(cpu, mask)        for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define for_each_cpu_mask_nr(cpu, mask)                        \
-       for ((cpu) = -1;                                \
-               (cpu) = __next_cpu_nr((cpu), &(mask)),  \
-               (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
-       set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
-       clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits)
-{
-       bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits)
-{
-       bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
-       return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
-                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, unsigned int nbits)
-{
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
-{
-       return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
-{
-       return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
-                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-
 #endif /* __LINUX_CPUMASK_H */
index f5440221d9299a919f2db5fe9b59cee7e583039d..601965a948e8d40a3deff542882a772ab873e093 100644 (file)
@@ -396,10 +396,6 @@ config CPUMASK_OFFSTACK
          them on the stack.  This is a bit more expensive, but avoids
          stack overflow.
 
-config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-       bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
-       depends on BROKEN
-
 config CPU_RMAP
        bool
        depends on SMP
index 5ab1553fd07633c3b729a8501ce89238f37174f5..830dd5dec40f1697b2bf2e40a294e05284ded89c 100644 (file)
@@ -5,27 +5,6 @@
 #include <linux/export.h>
 #include <linux/bootmem.h>
 
-int __first_cpu(const cpumask_t *srcp)
-{
-       return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
-}
-EXPORT_SYMBOL(__first_cpu);
-
-int __next_cpu(int n, const cpumask_t *srcp)
-{
-       return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
-}
-EXPORT_SYMBOL(__next_cpu);
-
-#if NR_CPUS > 64
-int __next_cpu_nr(int n, const cpumask_t *srcp)
-{
-       return min_t(int, nr_cpu_ids,
-                               find_next_bit(srcp->bits, nr_cpu_ids, n+1));
-}
-EXPORT_SYMBOL(__next_cpu_nr);
-#endif
-
 /**
  * cpumask_next_and - get the next cpu in *src1p & *src2p
  * @n: the cpu prior to the place to search (ie. return will be > @n)
@@ -90,13 +69,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
                dump_stack();
        }
 #endif
-       /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
-       if (*mask) {
-               unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
-               unsigned int tail;
-               tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
-               memset(ptr + cpumask_size() - tail, 0, tail);
-       }
 
        return *mask != NULL;
 }