Merge branch 'linus' into cpus4096-for-linus
authorIngo Molnar <mingo@elte.hu>
Mon, 21 Jul 2008 15:19:50 +0000 (17:19 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 21 Jul 2008 15:19:50 +0000 (17:19 +0200)
Conflicts:

net/sunrpc/svc.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
50 files changed:
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/genapic_flat_64.c
arch/x86/kernel/genx2apic_uv_x.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/ldt.c
arch/x86/kernel/microcode.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp.c
drivers/acpi/processor_throttling.c
drivers/base/cpu.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_userspace.c
drivers/firmware/dcdbas.c
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/misc/sgi-xp/xpc_main.c
include/asm-x86/ipi.h
include/asm-x86/processor.h
include/linux/cpumask.h
kernel/cpu.c
kernel/rcuclassic.c
kernel/rcupreempt.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/stop_machine.c
kernel/taskstats.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/trace/trace_sysprof.c
kernel/workqueue.c
lib/cpumask.c
lib/smp_processor_id.c
mm/allocpercpu.c
mm/vmstat.c
net/core/dev.c
net/iucv/iucv.c
net/sunrpc/svc.c

index c2502eb9aa8355488a7057602bfdfa71134785a7..9220cf46aa10645a50ceb65e8ca24c69c9841958 100644 (file)
@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr(new_mask, cpu);
        int retval;
        unsigned int eax, ebx, ecx, edx;
        unsigned int edx_part;
@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
 
        /* Make sure we are running on right CPU */
        saved_mask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, new_mask);
        if (retval)
                return -1;
 
index b0c8208df9fa17363b5a3ed5ce35bdec8c52ad25..ff2fff56f0a8f2f1340a98a3305da24cd1dd3da1 100644 (file)
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
 static void drv_write(struct drv_cmd *cmd)
 {
        cpumask_t saved_mask = current->cpus_allowed;
+       cpumask_of_cpu_ptr_declare(cpu_mask);
        unsigned int i;
 
-       for_each_cpu_mask(i, cmd->mask) {
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+       for_each_cpu_mask_nr(i, cmd->mask) {
+               cpumask_of_cpu_ptr_next(cpu_mask, i);
+               set_cpus_allowed_ptr(current, cpu_mask);
                do_drv_write(cmd);
        }
 
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
        } aperf_cur, mperf_cur;
 
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr(cpu_mask, cpu);
        unsigned int perf_percent;
        unsigned int retval;
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, cpu_mask);
        if (get_cpu() != cpu) {
                /* We were not able to run on requested processor */
                put_cpu();
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
+       cpumask_of_cpu_ptr(cpu_mask, cpu);
        struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
        unsigned int freq;
        unsigned int cached_freq;
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
        }
 
        cached_freq = data->freq_table[data->acpi_data->state].frequency;
-       freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
+       freq = extract_freq(get_cur_val(cpu_mask), data);
        if (freq != cached_freq) {
                /*
                 * The dreaded BIOS frequency change behind our back.
@@ -451,7 +455,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
 
        freqs.old = perf->states[perf->state].core_frequency * 1000;
        freqs.new = data->freq_table[next_state].frequency;
-       for_each_cpu_mask(i, cmd.mask) {
+       for_each_cpu_mask_nr(i, cmd.mask) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -466,7 +470,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                }
        }
 
-       for_each_cpu_mask(i, cmd.mask) {
+       for_each_cpu_mask_nr(i, cmd.mask) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 199e4e05e5dc1c7cb82cbe417864377cb399d2d8..f1685fb91fbd313058b92af93ee055af160a2447 100644 (file)
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
                return 0;
 
        /* notifiers */
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
        /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
         * Developer's Manual, Volume 3
         */
-       for_each_cpu_mask(i, policy->cpus)
+       for_each_cpu_mask_nr(i, policy->cpus)
                cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
 
        /* notifiers */
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 206791eb46e3da04f61f70c2af2eb55d384bd606..53c7b6936973c6da85137145ed0eea8eec18746f 100644 (file)
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
 static int check_supported_cpu(unsigned int cpu)
 {
        cpumask_t oldmask;
+       cpumask_of_cpu_ptr(cpu_mask, cpu);
        u32 eax, ebx, ecx, edx;
        unsigned int rc = 0;
 
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, cpu_mask);
 
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -966,7 +967,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
        freqs.old = find_khz_freq_from_fid(data->currfid);
        freqs.new = find_khz_freq_from_fid(fid);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -974,7 +975,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
        res = transition_fid_vid(data, fid, vid);
        freqs.new = find_khz_freq_from_fid(data->currfid);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
@@ -997,7 +998,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
        freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -1005,7 +1006,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
        res = transition_pstate(data, pstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
 static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 {
        cpumask_t oldmask;
+       cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
        struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
        u32 checkfid;
        u32 checkvid;
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+       set_cpus_allowed_ptr(current, cpu_mask);
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask;
+       cpumask_of_cpu_ptr_declare(newmask);
        int rc;
 
        if (!cpu_online(pol->cpu))
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+       cpumask_of_cpu_ptr_next(newmask, pol->cpu);
+       set_cpus_allowed_ptr(current, newmask);
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        set_cpus_allowed_ptr(current, &oldmask);
 
        if (cpu_family == CPU_HW_PSTATE)
-               pol->cpus = cpumask_of_cpu(pol->cpu);
+               pol->cpus = *newmask;
        else
                pol->cpus = per_cpu(cpu_core_map, pol->cpu);
        data->available_cores = &(pol->cpus);
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask = current->cpus_allowed;
+       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int khz = 0;
        unsigned int first;
 
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
        if (!data)
                return -EINVAL;
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, newmask);
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX
                        "limiting to CPU %d failed in powernowk8_get\n", cpu);
index 908dd347c67ec3dc29aa5ab8b3dcbae9a12e5e37..ca2ac13b7af20b2221aaa1d883a7fb89821bf4d5 100644 (file)
@@ -28,7 +28,8 @@
 #define PFX            "speedstep-centrino: "
 #define MAINTAINER     "cpufreq@lists.linux.org.uk"
 
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
+#define dprintk(msg...) \
+       cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
 
 #define INTEL_MSR_RANGE        (0xffff)
 
@@ -66,11 +67,12 @@ struct cpu_model
 
        struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
 };
-static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x);
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+                                 const struct cpu_id *x);
 
 /* Operating points for current CPU */
-static struct cpu_model *centrino_model[NR_CPUS];
-static const struct cpu_id *centrino_cpu[NR_CPUS];
+static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
+static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
 
 static struct cpufreq_driver centrino_driver;
 
@@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
                return -ENOENT;
        }
 
-       centrino_model[policy->cpu] = model;
+       per_cpu(centrino_model, policy->cpu) = model;
 
        dprintk("found \"%s\": max frequency: %dkHz\n",
               model->model_name, model->max_freq);
@@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
 }
 
 #else
-static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; }
+static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+       return -ENODEV;
+}
 #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
 
-static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x)
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+                                 const struct cpu_id *x)
 {
        if ((c->x86 == x->x86) &&
            (c->x86_model == x->x86_model) &&
@@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
         * for centrino, as some DSDTs are buggy.
         * Ideally, this can be done using the acpi_data structure.
         */
-       if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) ||
-           (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) ||
-           (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) {
+       if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
+           (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
+           (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
                msr = (msr >> 8) & 0xff;
                return msr * 100000;
        }
 
-       if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points))
+       if ((!per_cpu(centrino_model, cpu)) ||
+           (!per_cpu(centrino_model, cpu)->op_points))
                return 0;
 
        msr &= 0xffff;
-       for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) {
-               if (msr == centrino_model[cpu]->op_points[i].index)
-                       return centrino_model[cpu]->op_points[i].frequency;
+       for (i = 0;
+               per_cpu(centrino_model, cpu)->op_points[i].frequency
+                                                       != CPUFREQ_TABLE_END;
+            i++) {
+               if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
+                       return per_cpu(centrino_model, cpu)->
+                                                       op_points[i].frequency;
        }
        if (failsafe)
-               return centrino_model[cpu]->op_points[i-1].frequency;
+               return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
        else
                return 0;
 }
@@ -313,9 +324,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
        unsigned l, h;
        unsigned clock_freq;
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr(new_mask, cpu);
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, new_mask);
        if (smp_processor_id() != cpu)
                return 0;
 
@@ -347,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
        int i;
 
        /* Only Intel makes Enhanced Speedstep-capable CPUs */
-       if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
+       if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+           !cpu_has(cpu, X86_FEATURE_EST))
                return -ENODEV;
 
        if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
@@ -361,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                        break;
 
        if (i != N_IDS)
-               centrino_cpu[policy->cpu] = &cpu_ids[i];
+               per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
 
-       if (!centrino_cpu[policy->cpu]) {
+       if (!per_cpu(centrino_cpu, policy->cpu)) {
                dprintk("found unsupported CPU with "
                "Enhanced SpeedStep: send /proc/cpuinfo to "
                MAINTAINER "\n");
@@ -386,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                /* check to see if it stuck */
                rdmsr(MSR_IA32_MISC_ENABLE, l, h);
                if (!(l & (1<<16))) {
-                       printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");
+                       printk(KERN_INFO PFX
+                               "couldn't enable Enhanced SpeedStep\n");
                        return -ENODEV;
                }
        }
 
        freq = get_cur_freq(policy->cpu);
-
-       policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */
+       policy->cpuinfo.transition_latency = 10000;
+                                               /* 10uS transition latency */
        policy->cur = freq;
 
        dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
 
-       ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points);
+       ret = cpufreq_frequency_table_cpuinfo(policy,
+               per_cpu(centrino_model, policy->cpu)->op_points);
        if (ret)
                return (ret);
 
-       cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu);
+       cpufreq_frequency_table_get_attr(
+               per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
 
        return 0;
 }
@@ -411,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
 
-       if (!centrino_model[cpu])
+       if (!per_cpu(centrino_model, cpu))
                return -ENODEV;
 
        cpufreq_frequency_table_put_attr(cpu);
 
-       centrino_model[cpu] = NULL;
+       per_cpu(centrino_model, cpu) = NULL;
 
        return 0;
 }
@@ -430,17 +446,26 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
  */
 static int centrino_verify (struct cpufreq_policy *policy)
 {
-       return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points);
+       return cpufreq_frequency_table_verify(policy,
+                       per_cpu(centrino_model, policy->cpu)->op_points);
 }
 
 /**
  * centrino_setpolicy - set a new CPUFreq policy
  * @policy: new policy
  * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @relation: how that frequency relates to achieved frequency
+ *     (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
  *
  * Sets a new CPUFreq policy.
  */
+struct allmasks {
+       cpumask_t               online_policy_cpus;
+       cpumask_t               saved_mask;
+       cpumask_t               set_mask;
+       cpumask_t               covered_cpus;
+};
+
 static int centrino_target (struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -448,48 +473,55 @@ static int centrino_target (struct cpufreq_policy *policy,
        unsigned int    newstate = 0;
        unsigned int    msr, oldmsr = 0, h = 0, cpu = policy->cpu;
        struct cpufreq_freqs    freqs;
-       cpumask_t               online_policy_cpus;
-       cpumask_t               saved_mask;
-       cpumask_t               set_mask;
-       cpumask_t               covered_cpus;
        int                     retval = 0;
        unsigned int            j, k, first_cpu, tmp;
-
-       if (unlikely(centrino_model[cpu] == NULL))
-               return -ENODEV;
+       CPUMASK_ALLOC(allmasks);
+       CPUMASK_PTR(online_policy_cpus, allmasks);
+       CPUMASK_PTR(saved_mask, allmasks);
+       CPUMASK_PTR(set_mask, allmasks);
+       CPUMASK_PTR(covered_cpus, allmasks);
+
+       if (unlikely(allmasks == NULL))
+               return -ENOMEM;
+
+       if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
+               retval = -ENODEV;
+               goto out;
+       }
 
        if (unlikely(cpufreq_frequency_table_target(policy,
-                       centrino_model[cpu]->op_points,
+                       per_cpu(centrino_model, cpu)->op_points,
                        target_freq,
                        relation,
                        &newstate))) {
-               return -EINVAL;
+               retval = -EINVAL;
+               goto out;
        }
 
 #ifdef CONFIG_HOTPLUG_CPU
        /* cpufreq holds the hotplug lock, so we are safe from here on */
-       cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+       cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
 #else
-       online_policy_cpus = policy->cpus;
+       *online_policy_cpus = policy->cpus;
 #endif
 
-       saved_mask = current->cpus_allowed;
+       *saved_mask = current->cpus_allowed;
        first_cpu = 1;
-       cpus_clear(covered_cpus);
-       for_each_cpu_mask(j, online_policy_cpus) {
+       cpus_clear(*covered_cpus);
+       for_each_cpu_mask_nr(j, *online_policy_cpus) {
                /*
                 * Support for SMP systems.
                 * Make sure we are running on CPU that wants to change freq
                 */
-               cpus_clear(set_mask);
+               cpus_clear(*set_mask);
                if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
-                       cpus_or(set_mask, set_mask, online_policy_cpus);
+                       cpus_or(*set_mask, *set_mask, *online_policy_cpus);
                else
-                       cpu_set(j, set_mask);
+                       cpu_set(j, *set_mask);
 
-               set_cpus_allowed_ptr(current, &set_mask);
+               set_cpus_allowed_ptr(current, set_mask);
                preempt_disable();
-               if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+               if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) {
                        dprintk("couldn't limit to CPUs in this domain\n");
                        retval = -EAGAIN;
                        if (first_cpu) {
@@ -500,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy,
                        break;
                }
 
-               msr = centrino_model[cpu]->op_points[newstate].index;
+               msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
 
                if (first_cpu) {
                        rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -517,7 +549,7 @@ static int centrino_target (struct cpufreq_policy *policy,
                        dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
                                target_freq, freqs.old, freqs.new, msr);
 
-                       for_each_cpu_mask(k, online_policy_cpus) {
+                       for_each_cpu_mask_nr(k, *online_policy_cpus) {
                                freqs.cpu = k;
                                cpufreq_notify_transition(&freqs,
                                        CPUFREQ_PRECHANGE);
@@ -536,11 +568,11 @@ static int centrino_target (struct cpufreq_policy *policy,
                        break;
                }
 
-               cpu_set(j, covered_cpus);
+               cpu_set(j, *covered_cpus);
                preempt_enable();
        }
 
-       for_each_cpu_mask(k, online_policy_cpus) {
+       for_each_cpu_mask_nr(k, *online_policy_cpus) {
                freqs.cpu = k;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
@@ -553,10 +585,12 @@ static int centrino_target (struct cpufreq_policy *policy,
                 * Best effort undo..
                 */
 
-               if (!cpus_empty(covered_cpus)) {
-                       for_each_cpu_mask(j, covered_cpus) {
-                               set_cpus_allowed_ptr(current,
-                                                    &cpumask_of_cpu(j));
+               if (!cpus_empty(*covered_cpus)) {
+                       cpumask_of_cpu_ptr_declare(new_mask);
+
+                       for_each_cpu_mask_nr(j, *covered_cpus) {
+                               cpumask_of_cpu_ptr_next(new_mask, j);
+                               set_cpus_allowed_ptr(current, new_mask);
                                wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
                        }
                }
@@ -564,19 +598,22 @@ static int centrino_target (struct cpufreq_policy *policy,
                tmp = freqs.new;
                freqs.new = freqs.old;
                freqs.old = tmp;
-               for_each_cpu_mask(j, online_policy_cpus) {
+               for_each_cpu_mask_nr(j, *online_policy_cpus) {
                        freqs.cpu = j;
                        cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
                        cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
                }
        }
-       set_cpus_allowed_ptr(current, &saved_mask);
-       return 0;
+       set_cpus_allowed_ptr(current, saved_mask);
+       retval = 0;
+       goto out;
 
 migrate_end:
        preempt_enable();
-       set_cpus_allowed_ptr(current, &saved_mask);
-       return 0;
+       set_cpus_allowed_ptr(current, saved_mask);
+out:
+       CPUMASK_FREE(allmasks);
+       return retval;
 }
 
 static struct freq_attr* centrino_attr[] = {
index 1b50244b1fdfd892e016d015e922fc12130992e2..2f3728dc24f60cc41db81e0066927a4ff15f409f 100644 (file)
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       return _speedstep_get(&cpumask_of_cpu(cpu));
+       cpumask_of_cpu_ptr(newmask, cpu);
+       return _speedstep_get(newmask);
 }
 
 /**
@@ -279,7 +280,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
 
        cpus_allowed = current->cpus_allowed;
 
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -292,7 +293,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
        /* allow to be run on all CPUs */
        set_cpus_allowed_ptr(current, &cpus_allowed);
 
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 2c8afafa18e860ff98fafb243f2ba7964496c42d..e4b8d189d7edacf0ac60d2dd636ad54100ce7fdc 100644 (file)
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
        int sibling;
 
        this_leaf = CPUID4_INFO_IDX(cpu, index);
-       for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
+       for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
                sibling_leaf = CPUID4_INFO_IDX(sibling, index); 
                cpu_clear(cpu, sibling_leaf->shared_cpu_map);
        }
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        unsigned long           j;
        int                     retval;
        cpumask_t               oldmask;
+       cpumask_of_cpu_ptr(newmask, cpu);
 
        if (num_cache_leaves == 0)
                return -ENOENT;
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
                return -ENOMEM;
 
        oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, newmask);
        if (retval)
                goto out;
 
index c4a7ec31394c1b9e4abb9971d7d3cfb62228c1bc..2fe06ab5c547ddfe9a07ec77c519688dfd13345d 100644 (file)
@@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
        char __user *buf = ubuf;
        int i, err;
 
-       cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
+       cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
        if (!cpu_tsc)
                return -ENOMEM;
 
index 7c9a813e11939b7e7fa583549d4c9affdde26476..88736cadbaa62d02e5e36671b06cfa512df7e683 100644 (file)
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
        if (err)
                goto out_free;
 
-       for_each_cpu_mask(i, b->cpus) {
+       for_each_cpu_mask_nr(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 #endif
 
        /* remove all sibling symlinks before unregistering */
-       for_each_cpu_mask(i, b->cpus) {
+       for_each_cpu_mask_nr(i, b->cpus) {
                if (i == cpu)
                        continue;
 
index 0d0d9057e7c0b63b36591385d89397ff7322bd99..a26c480b94915a2b460c0963bf38f9693c911641 100644 (file)
@@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
 {
        if (*pos == 0)  /* just in case, cpu 0 is not the first */
                *pos = first_cpu(cpu_online_map);
-       if ((*pos) < NR_CPUS && cpu_online(*pos))
+       if ((*pos) < nr_cpu_ids && cpu_online(*pos))
                return &cpu_data(*pos);
        return NULL;
 }
index 1a9c68845ee8c8538e6b6e6f1c04d5d4a19fb9be..786548a62d381b1b65af8651c912d491d2178f2c 100644 (file)
@@ -168,7 +168,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
         * May as well be the first.
         */
        cpu = first_cpu(cpumask);
-       if ((unsigned)cpu < NR_CPUS)
+       if ((unsigned)cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
        else
                return BAD_APICID;
index 711f11c30b060dd17bc86be21b439d2068040489..3e5d7b8698f91b70f6de1c63c674288bcf4b3b3c 100644 (file)
@@ -94,7 +94,7 @@ static void uv_send_IPI_mask(cpumask_t mask, int vector)
 {
        unsigned int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; ++cpu)
+       for_each_possible_cpu(cpu)
                if (cpu_isset(cpu, mask))
                        uv_send_IPI_one(cpu, vector);
 }
@@ -128,7 +128,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
         * May as well be the first.
         */
        cpu = first_cpu(cpumask);
-       if ((unsigned)cpu < NR_CPUS)
+       if ((unsigned)cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
        else
                return BAD_APICID;
index 6510cde36b3549149eabefa4aaf724c72e5a0959..a85db790754bf56a5b8af635802d652176737dac 100644 (file)
@@ -731,7 +731,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
                        return 0;
        }
 
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                cpumask_t domain, new_mask;
                int new_cpu;
                int vector, offset;
@@ -752,7 +752,7 @@ next:
                        continue;
                if (vector == IA32_SYSCALL_VECTOR)
                        goto next;
-               for_each_cpu_mask(new_cpu, new_mask)
+               for_each_cpu_mask_nr(new_cpu, new_mask)
                        if (per_cpu(vector_irq, new_cpu)[vector] != -1)
                                goto next;
                /* Found one! */
@@ -762,7 +762,7 @@ next:
                        cfg->move_in_progress = 1;
                        cfg->old_domain = cfg->domain;
                }
-               for_each_cpu_mask(new_cpu, new_mask)
+               for_each_cpu_mask_nr(new_cpu, new_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
                cfg->vector = vector;
                cfg->domain = domain;
@@ -794,7 +794,7 @@ static void __clear_irq_vector(int irq)
 
        vector = cfg->vector;
        cpus_and(mask, cfg->domain, cpu_online_map);
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu_mask_nr(cpu, mask)
                per_cpu(vector_irq, cpu)[vector] = -1;
 
        cfg->vector = 0;
@@ -1372,12 +1372,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
 static int ioapic_retrigger_irq(unsigned int irq)
 {
        struct irq_cfg *cfg = &irq_cfg[irq];
-       cpumask_t mask;
        unsigned long flags;
 
        spin_lock_irqsave(&vector_lock, flags);
-       mask = cpumask_of_cpu(first_cpu(cfg->domain));
-       send_IPI_mask(mask, cfg->vector);
+       send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
        spin_unlock_irqrestore(&vector_lock, flags);
 
        return 1;
index a8449571858ae9dae444076d7c6b28daff795ee9..3fee2aa50f3f63d594304611090338d36f3fcc01 100644 (file)
@@ -62,12 +62,12 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 
        if (reload) {
 #ifdef CONFIG_SMP
-               cpumask_t mask;
+               cpumask_of_cpu_ptr_declare(mask);
 
                preempt_disable();
                load_LDT(pc);
-               mask = cpumask_of_cpu(smp_processor_id());
-               if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+               cpumask_of_cpu_ptr_next(mask, smp_processor_id());
+               if (!cpus_equal(current->mm->cpu_vm_mask, *mask))
                        smp_call_function(flush_ldt, current->mm, 1);
                preempt_enable();
 #else
index 56b933119a04f5551094ce08feeb12bf2642aa6b..58520169e35dbbdfd7613a4648100197fbca7140 100644 (file)
@@ -388,6 +388,7 @@ static int do_microcode_update (void)
        void *new_mc = NULL;
        int cpu;
        cpumask_t old;
+       cpumask_of_cpu_ptr_declare(newmask);
 
        old = current->cpus_allowed;
 
@@ -404,7 +405,8 @@ static int do_microcode_update (void)
 
                        if (!uci->valid)
                                continue;
-                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+                       cpumask_of_cpu_ptr_next(newmask, cpu);
+                       set_cpus_allowed_ptr(current, newmask);
                        error = get_maching_microcode(new_mc, cpu);
                        if (error < 0)
                                goto out;
@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        cpumask_t old;
+       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int val[2];
        int err = 0;
 
@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu)
                return 0;
 
        old = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, newmask);
 
        /* Check if the microcode we have in memory matches the CPU */
        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu)
 static void microcode_init_cpu(int cpu, int resume)
 {
        cpumask_t old;
+       cpumask_of_cpu_ptr(newmask, cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
        old = current->cpus_allowed;
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+       set_cpus_allowed_ptr(current, newmask);
        mutex_lock(&microcode_mutex);
        collect_cpu_info(cpu);
        if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
                return -EINVAL;
        if (val == 1) {
                cpumask_t old;
+               cpumask_of_cpu_ptr(newmask, cpu);
 
                old = current->cpus_allowed;
 
                get_online_cpus();
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+               set_cpus_allowed_ptr(current, newmask);
 
                mutex_lock(&microcode_mutex);
                if (uci->valid)
index f8a62160e1513221a379a400cfd30dc6e3eaaf0b..214bbdfc851e274bd616fa1a981920ee61f76179 100644 (file)
@@ -403,24 +403,28 @@ void native_machine_shutdown(void)
 {
        /* Stop the cpus and apics */
 #ifdef CONFIG_SMP
-       int reboot_cpu_id;
 
        /* The boot cpu is always logical cpu 0 */
-       reboot_cpu_id = 0;
+       int reboot_cpu_id = 0;
+       cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
 
 #ifdef CONFIG_X86_32
        /* See if there has been given a command line override */
        if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
-               cpu_online(reboot_cpu))
+               cpu_online(reboot_cpu)) {
                reboot_cpu_id = reboot_cpu;
+               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
+       }
 #endif
 
        /* Make certain the cpu I'm about to reboot on is online */
-       if (!cpu_online(reboot_cpu_id))
+       if (!cpu_online(reboot_cpu_id)) {
                reboot_cpu_id = smp_processor_id();
+               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
+       }
 
        /* Make certain I only run on the appropriate processor */
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
+       set_cpus_allowed_ptr(current, newmask);
 
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
index 687376ab07e82ece4ab1eeb609d738d76245d226..09b98cd6332cd8ef2a742de580eece1b2ba51952 100644 (file)
@@ -438,7 +438,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        cpu_set(cpu, cpu_sibling_setup_map);
 
        if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
+               for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
                        if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
                            c->cpu_core_id == cpu_data(i).cpu_core_id) {
                                cpu_set(i, per_cpu(cpu_sibling_map, cpu));
@@ -461,7 +461,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                return;
        }
 
-       for_each_cpu_mask(i, cpu_sibling_setup_map) {
+       for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
                        cpu_set(i, c->llc_shared_map);
@@ -1230,7 +1230,7 @@ static void remove_siblinginfo(int cpu)
        int sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+       for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
                cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
                /*/
                 * last thread sibling in this cpu core going down
@@ -1239,7 +1239,7 @@ static void remove_siblinginfo(int cpu)
                        cpu_data(sibling).booted_cores--;
        }
 
-       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+       for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
                cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
        cpus_clear(per_cpu(cpu_sibling_map, cpu));
        cpus_clear(per_cpu(cpu_core_map, cpu));
index 233156f39b7f39f605fdb2c0cbd266da60783e28..463adecc5cba4a26270dc0479b8068797dd716f4 100644 (file)
@@ -351,7 +351,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
 
        cpus_and(mask, mask, cpu_online_map);
 
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu_mask_nr(cpu, mask)
                xen_send_IPI_one(cpu, vector);
 }
 
@@ -362,7 +362,7 @@ void xen_smp_send_call_function_ipi(cpumask_t mask)
        xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
        /* Make sure other vcpus get a chance to run if they need to. */
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                if (xen_vcpu_stolen(cpu)) {
                        HYPERVISOR_sched_op(SCHEDOP_yield, 0);
                        break;
index 0622ace05220b26d02ff6b72c49155a67e71213c..a2c3f9cfa5490fe5fef15c4c0e48676ad22f837a 100644 (file)
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
 static int acpi_processor_get_throttling(struct acpi_processor *pr)
 {
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr_declare(new_mask);
        int ret;
 
        if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
         * Migrate task to the cpu pointed by pr.
         */
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+       cpumask_of_cpu_ptr_next(new_mask, pr->id);
+       set_cpus_allowed_ptr(current, new_mask);
        ret = pr->throttling.acpi_processor_get_throttling(pr);
        /* restore the previous state */
        set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 {
        cpumask_t saved_mask;
+       cpumask_of_cpu_ptr_declare(new_mask);
        int ret = 0;
        unsigned int i;
        struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * affected cpu in order to get one proper T-state.
         * The notifier event is THROTTLING_PRECHANGE.
         */
-       for_each_cpu_mask(i, online_throttling_cpus) {
+       for_each_cpu_mask_nr(i, online_throttling_cpus) {
                t_state.cpu = i;
                acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
                                                        &t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * it can be called only for the cpu pointed by pr.
         */
        if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+               cpumask_of_cpu_ptr_next(new_mask, pr->id);
+               set_cpus_allowed_ptr(current, new_mask);
                ret = p_throttling->acpi_processor_set_throttling(pr,
                                                t_state.target_state);
        } else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
                 * it is necessary to set T-state for every affected
                 * cpus.
                 */
-               for_each_cpu_mask(i, online_throttling_cpus) {
+               for_each_cpu_mask_nr(i, online_throttling_cpus) {
                        match_pr = per_cpu(processors, i);
                        /*
                         * If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
                                continue;
                        }
                        t_state.cpu = i;
-                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+                       cpumask_of_cpu_ptr_next(new_mask, i);
+                       set_cpus_allowed_ptr(current, new_mask);
                        ret = match_pr->throttling.
                                acpi_processor_set_throttling(
                                match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * affected cpu to update the T-states.
         * The notifier event is THROTTLING_POSTCHANGE
         */
-       for_each_cpu_mask(i, online_throttling_cpus) {
+       for_each_cpu_mask_nr(i, online_throttling_cpus) {
                t_state.cpu = i;
                acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
                                                        &t_state);
index e38dfed41d80b8e6dff45379ee1fdfb8e8d3c750..5000402ae09234267aa9112ef4fe36a7d36b0d4f 100644 (file)
@@ -119,14 +119,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf)   \
 {                                                                      \
        return print_cpus_map(buf, &cpu_##type##_map);                  \
 }                                                                      \
-struct sysdev_class_attribute attr_##type##_map =                      \
+static struct sysdev_class_attribute attr_##type##_map =               \
        _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
 
 print_cpus_func(online);
 print_cpus_func(possible);
 print_cpus_func(present);
 
-struct sysdev_class_attribute *cpu_state_attr[] = {
+static struct sysdev_class_attribute *cpu_state_attr[] = {
        &attr_online_map,
        &attr_possible_map,
        &attr_present_map,
index 1d41496ed2f83b648e1311ee8fecccbd56ad741d..d8f8b1e0edde129f6511b051ee2565b38d86bd75 100644 (file)
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
        ssize_t i = 0;
        unsigned int cpu;
 
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                if (i)
                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        }
 #endif
 
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                if (cpu == j)
                        continue;
 
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        }
 
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                cpufreq_cpu_data[j] = policy;
                per_cpu(policy_cpu, j) = policy->cpu;
        }
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        /* symlink affected CPUs */
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                if (j == cpu)
                        continue;
                if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
 
 err_out_unregister:
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask(j, policy->cpus)
+       for_each_cpu_mask_nr(j, policy->cpus)
                cpufreq_cpu_data[j] = NULL;
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
         * links afterwards.
         */
        if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask(j, data->cpus) {
+               for_each_cpu_mask_nr(j, data->cpus) {
                        if (j == cpu)
                                continue;
                        cpufreq_cpu_data[j] = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask(j, data->cpus) {
+               for_each_cpu_mask_nr(j, data->cpus) {
                        if (j == cpu)
                                continue;
                        dprintk("removing link for cpu %u\n", j);
index 5d3a04ba6ad2c38aaf18d443b508511206bfac0c..fe565ee437575731124f3488a1962602bec51ee2 100644 (file)
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        return rc;
                }
 
-               for_each_cpu_mask(j, policy->cpus) {
+               for_each_cpu_mask_nr(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
index d2af20dda3827919d218d9dc592aeb8ca0fd8f03..33855cb3cf16f1ecd30528f35343c2c029959b1e 100644 (file)
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 
        /* Get Idle Time */
        idle_ticks = UINT_MAX;
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                cputime64_t total_idle_ticks;
                unsigned int tmp_idle_ticks;
                struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        return rc;
                }
 
-               for_each_cpu_mask(j, policy->cpus) {
+               for_each_cpu_mask_nr(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
index cb2ac01a41a1c55a0faa2480d823aa91b7b3b001..32244aa7cc0c1d5b8511d3486a1c8de9b1a59f7c 100644 (file)
 /**
  * A few values needed by the userspace governor
  */
-static unsigned int    cpu_max_freq[NR_CPUS];
-static unsigned int    cpu_min_freq[NR_CPUS];
-static unsigned int    cpu_cur_freq[NR_CPUS]; /* current CPU freq */
-static unsigned int    cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
-static unsigned int    cpu_is_managed[NR_CPUS];
+static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
+static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
+static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
+static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
+                                                       userspace */
+static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
 
 static DEFINE_MUTEX    (userspace_mutex);
 static int cpus_using_userspace_governor;
 
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
+#define dprintk(msg...) \
+       cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
 
 /* keep track of frequency transitions */
 static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 {
         struct cpufreq_freqs *freq = data;
 
-       if (!cpu_is_managed[freq->cpu])
+       if (!per_cpu(cpu_is_managed, freq->cpu))
                return 0;
 
        dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
                        freq->cpu, freq->new);
-       cpu_cur_freq[freq->cpu] = freq->new;
+       per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
 
         return 0;
 }
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
        dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
 
        mutex_lock(&userspace_mutex);
-       if (!cpu_is_managed[policy->cpu])
+       if (!per_cpu(cpu_is_managed, policy->cpu))
                goto err;
 
-       cpu_set_freq[policy->cpu] = freq;
+       per_cpu(cpu_set_freq, policy->cpu) = freq;
 
-       if (freq < cpu_min_freq[policy->cpu])
-               freq = cpu_min_freq[policy->cpu];
-       if (freq > cpu_max_freq[policy->cpu])
-               freq = cpu_max_freq[policy->cpu];
+       if (freq < per_cpu(cpu_min_freq, policy->cpu))
+               freq = per_cpu(cpu_min_freq, policy->cpu);
+       if (freq > per_cpu(cpu_max_freq, policy->cpu))
+               freq = per_cpu(cpu_max_freq, policy->cpu);
 
        /*
         * We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
 
 static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
 {
-       return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]);
+       return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
 }
 
 static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
                }
                cpus_using_userspace_governor++;
 
-               cpu_is_managed[cpu] = 1;
-               cpu_min_freq[cpu] = policy->min;
-               cpu_max_freq[cpu] = policy->max;
-               cpu_cur_freq[cpu] = policy->cur;
-               cpu_set_freq[cpu] = policy->cur;
-               dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
+               per_cpu(cpu_is_managed, cpu) = 1;
+               per_cpu(cpu_min_freq, cpu) = policy->min;
+               per_cpu(cpu_max_freq, cpu) = policy->max;
+               per_cpu(cpu_cur_freq, cpu) = policy->cur;
+               per_cpu(cpu_set_freq, cpu) = policy->cur;
+               dprintk("managing cpu %u started "
+                       "(%u - %u kHz, currently %u kHz)\n",
+                               cpu,
+                               per_cpu(cpu_min_freq, cpu),
+                               per_cpu(cpu_max_freq, cpu),
+                               per_cpu(cpu_cur_freq, cpu));
 
                mutex_unlock(&userspace_mutex);
                break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
                                        CPUFREQ_TRANSITION_NOTIFIER);
                }
 
-               cpu_is_managed[cpu] = 0;
-               cpu_min_freq[cpu] = 0;
-               cpu_max_freq[cpu] = 0;
-               cpu_set_freq[cpu] = 0;
+               per_cpu(cpu_is_managed, cpu) = 0;
+               per_cpu(cpu_min_freq, cpu) = 0;
+               per_cpu(cpu_max_freq, cpu) = 0;
+               per_cpu(cpu_set_freq, cpu) = 0;
                dprintk("managing cpu %u stopped\n", cpu);
                mutex_unlock(&userspace_mutex);
                break;
        case CPUFREQ_GOV_LIMITS:
                mutex_lock(&userspace_mutex);
-               dprintk("limit event for cpu %u: %u - %u kHz,"
+               dprintk("limit event for cpu %u: %u - %u kHz, "
                        "currently %u kHz, last set to %u kHz\n",
                        cpu, policy->min, policy->max,
-                       cpu_cur_freq[cpu], cpu_set_freq[cpu]);
-               if (policy->max < cpu_set_freq[cpu]) {
+                       per_cpu(cpu_cur_freq, cpu),
+                       per_cpu(cpu_set_freq, cpu));
+               if (policy->max < per_cpu(cpu_set_freq, cpu)) {
                        __cpufreq_driver_target(policy, policy->max,
                                                CPUFREQ_RELATION_H);
-               }
-               else if (policy->min > cpu_set_freq[cpu]) {
+               } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
                        __cpufreq_driver_target(policy, policy->min,
                                                CPUFREQ_RELATION_L);
-               }
-               else {
-                       __cpufreq_driver_target(policy, cpu_set_freq[cpu],
+               } else {
+                       __cpufreq_driver_target(policy,
+                                               per_cpu(cpu_set_freq, cpu),
                                                CPUFREQ_RELATION_L);
                }
-               cpu_min_freq[cpu] = policy->min;
-               cpu_max_freq[cpu] = policy->max;
-               cpu_cur_freq[cpu] = policy->cur;
+               per_cpu(cpu_min_freq, cpu) = policy->min;
+               per_cpu(cpu_max_freq, cpu) = policy->max;
+               per_cpu(cpu_cur_freq, cpu) = policy->cur;
                mutex_unlock(&userspace_mutex);
                break;
        }
index 25918f7dfd0fe73884ff86101ed90c04346e00cb..0b624e927a6fea8c4306949b85d28e0b5b980417 100644 (file)
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
 static int smi_request(struct smi_cmd *smi_cmd)
 {
        cpumask_t old_mask;
+       cpumask_of_cpu_ptr(new_mask, 0);
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
 
        /* SMI requires CPU 0 */
        old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+       set_cpus_allowed_ptr(current, new_mask);
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
index 0792d930c481d508fb4ea87e6161795bcdcb5cd4..7a64aa9b51b6e6bf3866ac72fba4899bb9fcd348 100644 (file)
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
                ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
 
        spin_lock_irqsave(&pool->last_cpu_lock, flags);
-       cpu = next_cpu(pool->last_cpu, cpu_online_map);
-       if (cpu == NR_CPUS)
+       cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+       if (cpu >= nr_cpu_ids)
                cpu = first_cpu(cpu_online_map);
        pool->last_cpu = cpu;
        spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
index 08256ed0d9a6ffdf42ada557d715d88925d6cbbc..579b01ff82d4ed73580365334f1790b29e70805d 100644 (file)
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
        int last_IRQ_count = 0;
        int new_IRQ_count;
        int force_IRQ = 0;
+       cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
 
        /* this thread was marked active by xpc_hb_init() */
 
-       set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
+       set_cpus_allowed_ptr(current, cpumask);
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
index 196d63c28aa44aa3365977da4b729d59444fcb69..bb1c09f7a76ced4cd028d631d79a13d0171e8c2c 100644 (file)
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
         * - mbligh
         */
        local_irq_save(flags);
-       for_each_cpu_mask(query_cpu, mask) {
+       for_each_cpu_mask_nr(query_cpu, mask) {
                __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
                                      vector, APIC_DEST_PHYSICAL);
        }
index 55402d2ab9380e3f621c889503af1cec0abd9851..f92315ed68e1e61ed35b05349fb005c12d21e5f7 100644 (file)
@@ -134,7 +134,7 @@ extern __u32                        cleared_cpu_caps[NCAPINTS];
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
 #define cpu_data(cpu)          per_cpu(cpu_info, cpu)
-#define current_cpu_data       cpu_data(smp_processor_id())
+#define current_cpu_data       __get_cpu_var(cpu_info)
 #else
 #define cpu_data(cpu)          boot_cpu_data
 #define current_cpu_data       boot_cpu_data
index c24875bd9c5b0ef1b6347f61a87716c5e8673914..30d59d1d062697128e5cc891e872979312f7f8c6 100644 (file)
  * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
  * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
  *
+ * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+ * Note: The alternate operations with the suffix "_nr" are used
+ *       to limit the range of the loop to nr_cpu_ids instead of
+ *       NR_CPUS when NR_CPUS > 64 for performance reasons.
+ *       If NR_CPUS is <= 64 then most assembler bitmask
+ *       operators execute faster with a constant range, so
+ *       the operator will continue to use NR_CPUS.
+ *
+ *       Another consideration is that nr_cpu_ids is initialized
+ *       to NR_CPUS and isn't lowered until the possible cpus are
+ *       discovered (including any disabled cpus).  So early uses
+ *       will span the entire range of NR_CPUS.
+ * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+ *
  * The available cpumask operations are:
  *
  * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
  * int cpus_empty(mask)                        Is mask empty (no bits sets)?
  * int cpus_full(mask)                 Is mask full (all bits sets)?
  * int cpus_weight(mask)               Hamming weigh - number of set bits
+ * int cpus_weight_nr(mask)            Same using nr_cpu_ids instead of NR_CPUS
  *
  * void cpus_shift_right(dst, src, n)  Shift right
  * void cpus_shift_left(dst, src, n)   Shift left
  *
  * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
  * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
+ * int next_cpu_nr(cpu, mask)          Next cpu past 'cpu', or nr_cpu_ids
  *
  * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ *ifdef CONFIG_HAS_CPUMASK_OF_CPU
+ * cpumask_of_cpu_ptr_declare(v)       Declares cpumask_t *v
+ * cpumask_of_cpu_ptr_next(v, cpu)     Sets v = &cpumask_of_cpu_map[cpu]
+ * cpumask_of_cpu_ptr(v, cpu)          Combines above two operations
+ *else
+ * cpumask_of_cpu_ptr_declare(v)       Declares cpumask_t _v and *v = &_v
+ * cpumask_of_cpu_ptr_next(v, cpu)     Sets _v = cpumask_of_cpu(cpu)
+ * cpumask_of_cpu_ptr(v, cpu)          Combines above two operations
+ *endif
  * CPU_MASK_ALL                                Initializer - all bits set
  * CPU_MASK_NONE                       Initializer - no bits set
  * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
  *
+ * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
+ * variables, and CPUMASK_PTR provides pointers to each field.
+ *
+ * The structure should be defined something like this:
+ * struct my_cpumasks {
+ *     cpumask_t mask1;
+ *     cpumask_t mask2;
+ * };
+ *
+ * Usage is then:
+ *     CPUMASK_ALLOC(my_cpumasks);
+ *     CPUMASK_PTR(mask1, my_cpumasks);
+ *     CPUMASK_PTR(mask2, my_cpumasks);
+ *
+ *     --- DO NOT reference cpumask_t pointers until this check ---
+ *     if (my_cpumasks == NULL)
+ *             "kmalloc failed"...
+ *
+ * References are now pointers to the cpumask_t variables (*mask1, ...)
+ *
+ *if NR_CPUS > BITS_PER_LONG
+ *   CPUMASK_ALLOC(m)                  Declares and allocates struct m *m =
+ *                                             kmalloc(sizeof(*m), GFP_KERNEL)
+ *   CPUMASK_FREE(m)                   Macro for kfree(m)
+ *else
+ *   CPUMASK_ALLOC(m)                  Declares struct m _m, *m = &_m
+ *   CPUMASK_FREE(m)                   Nop
+ *endif
+ *   CPUMASK_PTR(v, m)                 Declares cpumask_t *v = &(m->v)
+ * ------------------------------------------------------------------------
+ *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
  * int cpumask_parse_user(ubuf, ulen, mask)    Parse ascii string as cpumask
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
  * void cpus_onto(dst, orig, relmap)   *dst = orig relative to relmap
  * void cpus_fold(dst, orig, sz)       dst bits = orig bits mod sz
  *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask using NR_CPUS
+ * for_each_cpu_mask_nr(cpu, mask)     for-loop cpu over mask using nr_cpu_ids
  *
  * int num_online_cpus()               Number of online CPUs
  * int num_possible_cpus()             Number of all possible CPUs
@@ -216,23 +273,19 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
        bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
 }
 
-#ifdef CONFIG_SMP
-int __first_cpu(const cpumask_t *srcp);
-#define first_cpu(src) __first_cpu(&(src))
-int __next_cpu(int n, const cpumask_t *srcp);
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#else
-#define first_cpu(src)         ({ (void)(src); 0; })
-#define next_cpu(n, src)       ({ (void)(src); 1; })
-#endif
 
 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
 extern cpumask_t *cpumask_of_cpu_map;
-#define cpumask_of_cpu(cpu)    (cpumask_of_cpu_map[cpu])
-
+#define cpumask_of_cpu(cpu)    (cpumask_of_cpu_map[cpu])
+#define        cpumask_of_cpu_ptr(v, cpu)                                      \
+               const cpumask_t *v = &cpumask_of_cpu(cpu)
+#define        cpumask_of_cpu_ptr_declare(v)                                   \
+               const cpumask_t *v
+#define cpumask_of_cpu_ptr_next(v, cpu)                                        \
+                                       v = &cpumask_of_cpu(cpu)
 #else
 #define cpumask_of_cpu(cpu)                                            \
-(*({                                                                   \
+({                                                                     \
        typeof(_unused_cpumask_arg_) m;                                 \
        if (sizeof(m) == sizeof(unsigned long)) {                       \
                m.bits[0] = 1UL<<(cpu);                                 \
@@ -240,8 +293,16 @@ extern cpumask_t *cpumask_of_cpu_map;
                cpus_clear(m);                                          \
                cpu_set((cpu), m);                                      \
        }                                                               \
-       &m;                                                             \
-}))
+       m;                                                              \
+})
+#define        cpumask_of_cpu_ptr(v, cpu)                                      \
+               cpumask_t _##v = cpumask_of_cpu(cpu);                   \
+               const cpumask_t *v = &_##v
+#define        cpumask_of_cpu_ptr_declare(v)                                   \
+               cpumask_t _##v;                                         \
+               const cpumask_t *v = &_##v
+#define cpumask_of_cpu_ptr_next(v, cpu)                                        \
+                                       _##v = cpumask_of_cpu(cpu)
 #endif
 
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
@@ -281,6 +342,15 @@ extern cpumask_t cpu_mask_all;
 
 #define cpus_addr(src) ((src).bits)
 
+#if NR_CPUS > BITS_PER_LONG
+#define        CPUMASK_ALLOC(m)        struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
+#define        CPUMASK_FREE(m)         kfree(m)
+#else
+#define        CPUMASK_ALLOC(m)        struct m _m, *m = &_m
+#define        CPUMASK_FREE(m)
+#endif
+#define        CPUMASK_PTR(v, m)       cpumask_t *v = &(m->v)
+
 #define cpumask_scnprintf(buf, len, src) \
                        __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
 static inline int __cpumask_scnprintf(char *buf, int len,
@@ -343,20 +413,49 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
        bitmap_fold(dstp->bits, origp->bits, sz, nbits);
 }
 
-#if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < NR_CPUS;                \
-               (cpu) = next_cpu((cpu), (mask)))
-#else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask)           \
+#if NR_CPUS == 1
+
+#define nr_cpu_ids             1
+#define first_cpu(src)         ({ (void)(src); 0; })
+#define next_cpu(n, src)       ({ (void)(src); 1; })
+#define any_online_cpu(mask)   0
+#define for_each_cpu_mask(cpu, mask)   \
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#endif /* NR_CPUS */
+
+#else /* NR_CPUS > 1 */
+
+extern int nr_cpu_ids;
+int __first_cpu(const cpumask_t *srcp);
+int __next_cpu(int n, const cpumask_t *srcp);
+int __any_online_cpu(const cpumask_t *mask);
+
+#define first_cpu(src)         __first_cpu(&(src))
+#define next_cpu(n, src)       __next_cpu((n), &(src))
+#define any_online_cpu(mask) __any_online_cpu(&(mask))
+#define for_each_cpu_mask(cpu, mask)                   \
+       for ((cpu) = -1;                                \
+               (cpu) = next_cpu((cpu), (mask)),        \
+               (cpu) < NR_CPUS; )
+#endif
+
+#if NR_CPUS <= 64
 
 #define next_cpu_nr(n, src)            next_cpu(n, src)
 #define cpus_weight_nr(cpumask)                cpus_weight(cpumask)
 #define for_each_cpu_mask_nr(cpu, mask)        for_each_cpu_mask(cpu, mask)
 
+#else /* NR_CPUS > 64 */
+
+int __next_cpu_nr(int n, const cpumask_t *srcp);
+#define next_cpu_nr(n, src)    __next_cpu_nr((n), &(src))
+#define cpus_weight_nr(cpumask)        __cpus_weight(&(cpumask), nr_cpu_ids)
+#define for_each_cpu_mask_nr(cpu, mask)                        \
+       for ((cpu) = -1;                                \
+               (cpu) = next_cpu_nr((cpu), (mask)),     \
+               (cpu) < nr_cpu_ids; )
+
+#endif /* NR_CPUS > 64 */
+
 /*
  * The following particular system cpumasks and operations manage
  * possible, present and online cpus.  Each of them is a fixed size
@@ -418,9 +517,9 @@ extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_present_map;
 
 #if NR_CPUS > 1
-#define num_online_cpus()      cpus_weight(cpu_online_map)
-#define num_possible_cpus()    cpus_weight(cpu_possible_map)
-#define num_present_cpus()     cpus_weight(cpu_present_map)
+#define num_online_cpus()      cpus_weight_nr(cpu_online_map)
+#define num_possible_cpus()    cpus_weight_nr(cpu_possible_map)
+#define num_present_cpus()     cpus_weight_nr(cpu_present_map)
 #define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
 #define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
 #define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
@@ -435,17 +534,8 @@ extern cpumask_t cpu_present_map;
 
 #define cpu_is_offline(cpu)    unlikely(!cpu_online(cpu))
 
-#ifdef CONFIG_SMP
-extern int nr_cpu_ids;
-#define any_online_cpu(mask) __any_online_cpu(&(mask))
-int __any_online_cpu(const cpumask_t *mask);
-#else
-#define nr_cpu_ids                     1
-#define any_online_cpu(mask)           0
-#endif
-
-#define for_each_possible_cpu(cpu)  for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
+#define for_each_online_cpu(cpu)   for_each_cpu_mask_nr((cpu), cpu_online_map)
+#define for_each_present_cpu(cpu)  for_each_cpu_mask_nr((cpu), cpu_present_map)
 
 #endif /* __LINUX_CPUMASK_H */
index cfb1d43ab801b69f24c47c0cdb17332c25f6f99e..d26d0b095b3b2c9b59a49cb8e2b9cd4200b5e2a9 100644 (file)
@@ -413,7 +413,7 @@ void __ref enable_nonboot_cpus(void)
                goto out;
 
        printk("Enabling non-boot CPUs ...\n");
-       for_each_cpu_mask(cpu, frozen_cpus) {
+       for_each_cpu_mask_nr(cpu, frozen_cpus) {
                error = _cpu_up(cpu, 1);
                if (!error) {
                        printk("CPU%d is up\n", cpu);
index 16eeeaa9d618c7ef4c7ef4f31499fb1195a396b3..6f8696c502f429ebcbaf65e537fdf47e3127d300 100644 (file)
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
                 */
                cpus_and(cpumask, rcp->cpumask, cpu_online_map);
                cpu_clear(rdp->cpu, cpumask);
-               for_each_cpu_mask(cpu, cpumask)
+               for_each_cpu_mask_nr(cpu, cpumask)
                        smp_send_reschedule(cpu);
        }
 }
index 6f62b77d93c41978f030e28ef617b6ddfbe0c5e8..27827931ca0dd6ca905040955616c970b2e7539d 100644 (file)
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
 
        /* Now ask each CPU for acknowledgement of the flip. */
 
-       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
                dyntick_save_progress_counter(cpu);
        }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
        int cpu;
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
                if (rcu_try_flip_waitack_needed(cpu) &&
                    per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
        /* Check to see if the sum of the "last" counters is zero. */
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
                sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
        if (sum != 0) {
                RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
        smp_mb();  /*  ^^^^^^^^^^^^ */
 
        /* Call for a memory barrier from each CPU. */
-       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
                dyntick_save_progress_counter(cpu);
        }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
        int cpu;
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
                if (rcu_try_flip_waitmb_needed(cpu) &&
                    per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
index 99e6d850ecab097e4d1c5cbda1e0419548093681..a6d0a7f3f7c3fd4afd6027ddeb849e4b06fbf8bb 100644 (file)
@@ -2108,7 +2108,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
 
-               for_each_cpu_mask(i, group->cpumask) {
+               for_each_cpu_mask_nr(i, group->cpumask) {
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = source_load(i, load_idx);
@@ -2150,7 +2150,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
        /* Traverse only the allowed CPUs */
        cpus_and(*tmp, group->cpumask, p->cpus_allowed);
 
-       for_each_cpu_mask(i, *tmp) {
+       for_each_cpu_mask_nr(i, *tmp) {
                load = weighted_cpuload(i);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3168,7 +3168,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                max_cpu_load = 0;
                min_cpu_load = ~0UL;
 
-               for_each_cpu_mask(i, group->cpumask) {
+               for_each_cpu_mask_nr(i, group->cpumask) {
                        struct rq *rq;
 
                        if (!cpu_isset(i, *cpus))
@@ -3447,7 +3447,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
        unsigned long max_load = 0;
        int i;
 
-       for_each_cpu_mask(i, group->cpumask) {
+       for_each_cpu_mask_nr(i, group->cpumask) {
                unsigned long wl;
 
                if (!cpu_isset(i, *cpus))
@@ -3989,7 +3989,7 @@ static void run_rebalance_domains(struct softirq_action *h)
                int balance_cpu;
 
                cpu_clear(this_cpu, cpus);
-               for_each_cpu_mask(balance_cpu, cpus) {
+               for_each_cpu_mask_nr(balance_cpu, cpus) {
                        /*
                         * If this cpu gets work to do, stop the load balancing
                         * work being done for other cpus. Next load
@@ -6802,7 +6802,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
 
        cpus_clear(*covered);
 
-       for_each_cpu_mask(i, *span) {
+       for_each_cpu_mask_nr(i, *span) {
                struct sched_group *sg;
                int group = group_fn(i, cpu_map, &sg, tmpmask);
                int j;
@@ -6813,7 +6813,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
                cpus_clear(sg->cpumask);
                sg->__cpu_power = 0;
 
-               for_each_cpu_mask(j, *span) {
+               for_each_cpu_mask_nr(j, *span) {
                        if (group_fn(j, cpu_map, NULL, tmpmask) != group)
                                continue;
 
@@ -7013,7 +7013,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
        if (!sg)
                return;
        do {
-               for_each_cpu_mask(j, sg->cpumask) {
+               for_each_cpu_mask_nr(j, sg->cpumask) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(phys_domains, j);
@@ -7038,7 +7038,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
 {
        int cpu, i;
 
-       for_each_cpu_mask(cpu, *cpu_map) {
+       for_each_cpu_mask_nr(cpu, *cpu_map) {
                struct sched_group **sched_group_nodes
                        = sched_group_nodes_bycpu[cpu];
 
@@ -7277,7 +7277,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
        /*
         * Set up domains for cpus specified by the cpu_map.
         */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = NULL, *p;
                SCHED_CPUMASK_VAR(nodemask, allmasks);
 
@@ -7344,7 +7344,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 
 #ifdef CONFIG_SCHED_SMT
        /* Set up CPU (sibling) groups */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
                SCHED_CPUMASK_VAR(send_covered, allmasks);
 
@@ -7361,7 +7361,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 
 #ifdef CONFIG_SCHED_MC
        /* Set up multi-core groups */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                SCHED_CPUMASK_VAR(this_core_map, allmasks);
                SCHED_CPUMASK_VAR(send_covered, allmasks);
 
@@ -7428,7 +7428,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
                        goto error;
                }
                sched_group_nodes[i] = sg;
-               for_each_cpu_mask(j, *nodemask) {
+               for_each_cpu_mask_nr(j, *nodemask) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(node_domains, j);
@@ -7474,21 +7474,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 
        /* Calculate CPU power for physical packages and nodes */
 #ifdef CONFIG_SCHED_SMT
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = &per_cpu(cpu_domains, i);
 
                init_sched_groups_power(i, sd);
        }
 #endif
 #ifdef CONFIG_SCHED_MC
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = &per_cpu(core_domains, i);
 
                init_sched_groups_power(i, sd);
        }
 #endif
 
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = &per_cpu(phys_domains, i);
 
                init_sched_groups_power(i, sd);
@@ -7508,7 +7508,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 #endif
 
        /* Attach the domains */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
                sd = &per_cpu(cpu_domains, i);
@@ -7603,7 +7603,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
 
        unregister_sched_domain_sysctl();
 
-       for_each_cpu_mask(i, *cpu_map)
+       for_each_cpu_mask_nr(i, *cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
        synchronize_sched();
        arch_destroy_sched_domains(cpu_map, &tmpmask);
index f2aa987027d695750f2ca4b8f917d02171eeb3b8..bb61fe26b62cc50bd0e60a84781ff658f29e22e2 100644 (file)
@@ -1031,7 +1031,7 @@ static int wake_idle(int cpu, struct task_struct *p)
                    || ((sd->flags & SD_WAKE_IDLE_FAR)
                        && !task_hot(p, task_rq(p)->clock, sd))) {
                        cpus_and(tmp, sd->span, p->cpus_allowed);
-                       for_each_cpu_mask(i, tmp) {
+                       for_each_cpu_mask_nr(i, tmp) {
                                if (idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,
index 47ceac9e8552f309930c9f9af4d4131fd18ca8f3..7c9614728c597433eb4f10200207994d8e3eee6b 100644 (file)
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
 
        spin_lock(&rt_b->rt_runtime_lock);
        rt_period = ktime_to_ns(rt_b->rt_period);
-       for_each_cpu_mask(i, rd->span) {
+       for_each_cpu_mask_nr(i, rd->span) {
                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
                s64 diff;
 
@@ -1107,7 +1107,7 @@ static int pull_rt_task(struct rq *this_rq)
 
        next = pick_next_task_rt(this_rq);
 
-       for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
+       for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
 
index ba9b2054ecbdb99bbb868b4f9c01af0bb2ce2c30..738b411ff2d33dee86042404917640bcb2932ed5 100644 (file)
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
 {
        int irqs_disabled = 0;
        int prepared = 0;
+       cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
+       set_cpus_allowed_ptr(current, cpumask);
 
        /* Ack: we are alive */
        smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
index 4a23517169a6495cf1aa1bc689137e1c38698c62..06b17547f4e76869f16fedddc00f3a880fc6c364 100644 (file)
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
                return -EINVAL;
 
        if (isadd == REGISTER) {
-               for_each_cpu_mask(cpu, mask) {
+               for_each_cpu_mask_nr(cpu, mask) {
                        s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
                                         cpu_to_node(cpu));
                        if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
 
        /* Deregister or cleanup */
 cleanup:
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                listeners = &per_cpu(listener_array, cpu);
                down_write(&listeners->sem);
                list_for_each_entry_safe(s, tmp, &listeners->list, list) {
index dadde5361f32df668877aaa8d5a2e56abb5bb9ff..60ceabd53f2ead02e3c04045efc72aa1eac58ba5 100644 (file)
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
                 * Cycle through CPUs to check if the CPUs stay
                 * synchronized to each other.
                 */
-               int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
+               int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
 
-               if (next_cpu >= NR_CPUS)
+               if (next_cpu >= nr_cpu_ids)
                        next_cpu = first_cpu(cpu_online_map);
                watchdog_timer.expires += WATCHDOG_INTERVAL;
                add_timer_on(&watchdog_timer, next_cpu);
index f48d0f09d32f9c8763190a766f2f23a9fef9e923..31463d370b944ff37919d184a125e7efc1bf563a 100644 (file)
@@ -399,8 +399,7 @@ again:
        mask = CPU_MASK_NONE;
        now = ktime_get();
        /* Find all expired events */
-       for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
-            cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
+       for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
                td = &per_cpu(tick_cpu_device, cpu);
                if (td->evtdev->next_event.tv64 <= now.tv64)
                        cpu_set(cpu, mask);
index 4f3886562b8cb919c48110cc8c198c65ea93e54a..bf43284d6855ad083319d653d1b0fbc3235dde08 100644 (file)
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
  */
 static void tick_setup_device(struct tick_device *td,
                              struct clock_event_device *newdev, int cpu,
-                             cpumask_t cpumask)
+                             const cpumask_t *cpumask)
 {
        ktime_t next_event;
        void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td,
         * When the device is not per cpu, pin the interrupt to the
         * current cpu:
         */
-       if (!cpus_equal(newdev->cpumask, cpumask))
-               irq_set_affinity(newdev->irq, cpumask);
+       if (!cpus_equal(newdev->cpumask, *cpumask))
+               irq_set_affinity(newdev->irq, *cpumask);
 
        /*
         * When global broadcasting is active, check if the current
@@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev)
        struct tick_device *td;
        int cpu, ret = NOTIFY_OK;
        unsigned long flags;
-       cpumask_t cpumask;
+       cpumask_of_cpu_ptr_declare(cpumask);
 
        spin_lock_irqsave(&tick_device_lock, flags);
 
        cpu = smp_processor_id();
+       cpumask_of_cpu_ptr_next(cpumask, cpu);
        if (!cpu_isset(cpu, newdev->cpumask))
                goto out_bc;
 
        td = &per_cpu(tick_cpu_device, cpu);
        curdev = td->evtdev;
-       cpumask = cpumask_of_cpu(cpu);
 
        /* cpu local device ? */
-       if (!cpus_equal(newdev->cpumask, cpumask)) {
+       if (!cpus_equal(newdev->cpumask, *cpumask)) {
 
                /*
                 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
                 * If we have a cpu local device already, do not replace it
                 * by a non cpu local device
                 */
-               if (curdev && cpus_equal(curdev->cpumask, cpumask))
+               if (curdev && cpus_equal(curdev->cpumask, *cpumask))
                        goto out_bc;
        }
 
index 2301e1e7c60648d7376f430b32e771257973be8a..63528086337c0145b6e59e4a77c9e80b40686408 100644 (file)
@@ -213,7 +213,9 @@ static void start_stack_timers(void)
        int cpu;
 
        for_each_online_cpu(cpu) {
-               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+               cpumask_of_cpu_ptr(new_mask, cpu);
+
+               set_cpus_allowed_ptr(current, new_mask);
                start_stack_timer(cpu);
        }
        set_cpus_allowed_ptr(current, &saved_mask);
index ce7799540c91d28d3109e0e31fc53e623859ed1e..a6d36346d10ad11efc2cf0461720b34e5392ba19 100644 (file)
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
        might_sleep();
        lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
        lock_release(&wq->lockdep_map, 1, _THIS_IP_);
-       for_each_cpu_mask(cpu, *cpu_map)
+       for_each_cpu_mask_nr(cpu, *cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
        wq = cwq->wq;
        cpu_map = wq_cpu_map(wq);
 
-       for_each_cpu_mask(cpu, *cpu_map)
+       for_each_cpu_mask_nr(cpu, *cpu_map)
                wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
 
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
 
-       for_each_cpu_mask(cpu, *cpu_map)
+       for_each_cpu_mask_nr(cpu, *cpu_map)
                cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
        put_online_cpus();
 
index bb4f76d3c3e7cd327488006e4878296d58fe1167..5f97dc25ef9c925fb68b83fecec5a7c0114ab339 100644 (file)
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
 }
 EXPORT_SYMBOL(__next_cpu);
 
+#if NR_CPUS > 64
+int __next_cpu_nr(int n, const cpumask_t *srcp)
+{
+       return min_t(int, nr_cpu_ids,
+                               find_next_bit(srcp->bits, nr_cpu_ids, n+1));
+}
+EXPORT_SYMBOL(__next_cpu_nr);
+#endif
+
 int __any_online_cpu(const cpumask_t *mask)
 {
        int cpu;
index 3b4dc098181e47ae4d3a239aa4c27dcd6d9a7d47..c4381d9516f658ff62c6a2fd767310c78d80b7bc 100644 (file)
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
 {
        unsigned long preempt_count = preempt_count();
        int this_cpu = raw_smp_processor_id();
-       cpumask_t this_mask;
+       cpumask_of_cpu_ptr_declare(this_mask);
 
        if (likely(preempt_count))
                goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
         * Kernel threads bound to a single CPU can safely use
         * smp_processor_id():
         */
-       this_mask = cpumask_of_cpu(this_cpu);
+       cpumask_of_cpu_ptr_next(this_mask, this_cpu);
 
-       if (cpus_equal(current->cpus_allowed, this_mask))
+       if (cpus_equal(current->cpus_allowed, *this_mask))
                goto out;
 
        /*
index 05f2b4009cccd4c249ccfbd50aa8bc3237d4cb59..843364594e23d1674669713b023cd6670bde8364 100644 (file)
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate);
 void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
 {
        int cpu;
-       for_each_cpu_mask(cpu, *mask)
+       for_each_cpu_mask_nr(cpu, *mask)
                percpu_depopulate(__pdata, cpu);
 }
 EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
@@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
        int cpu;
 
        cpus_clear(populated);
-       for_each_cpu_mask(cpu, *mask)
+       for_each_cpu_mask_nr(cpu, *mask)
                if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
                        __percpu_depopulate_mask(__pdata, &populated);
                        return -ENOMEM;
index db9eabb2c5b3a992a04ac2396bf791b72bbdcf94..c3d4a781802f24c48a61864c9d40f1c94134cbc8 100644 (file)
@@ -26,7 +26,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
 
        memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 
-       for_each_cpu_mask(cpu, *cpumask) {
+       for_each_cpu_mask_nr(cpu, *cpumask) {
                struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
index 2eed17bcb2ddb5c88d3dd4625c89f86c13a5c8f4..106d5e6d987c6fe47d6b24552e5eeb17b4d141bf 100644 (file)
@@ -2376,7 +2376,7 @@ out:
         */
        if (!cpus_empty(net_dma.channel_mask)) {
                int chan_idx;
-               for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
+               for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
                        struct dma_chan *chan = net_dma.channels[chan_idx];
                        if (chan)
                                dma_async_memcpy_issue_pending(chan);
@@ -4510,7 +4510,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
        i = 0;
        cpu = first_cpu(cpu_online_map);
 
-       for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
+       for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
                chan = net_dma->channels[chan_idx];
 
                n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
index 265b1b289a32fcac92b19abde659bfacb03a102f..705959b31e24dab7cd85db52b1b098391cfa2489 100644 (file)
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void)
        /* Disable all cpu but the first in cpu_irq_cpumask. */
        cpumask = iucv_irq_cpumask;
        cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
-       for_each_cpu_mask(cpu, cpumask)
+       for_each_cpu_mask_nr(cpu, cpumask)
                smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
 }
 
index 5a32cb7c4bb486267a03d15892adc7ce5db93c93..835d274130838361212a8ffcbae3d4767af0cf57 100644 (file)
@@ -310,7 +310,8 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
        switch (m->mode) {
        case SVC_POOL_PERCPU:
        {
-               set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
+               cpumask_of_cpu_ptr(cpumask, node);
+               set_cpus_allowed_ptr(task, cpumask);
                break;
        }
        case SVC_POOL_PERNODE: