Re: [PATCH] powerpc: convert old cpumask API into new one

From: Thiago Farina
Date: Thu Apr 28 2011 - 11:20:02 EST


On Thu, Apr 28, 2011 at 12:07 PM, KOSAKI Motohiro
<kosaki.motohiro@xxxxxxxxxxxxxx> wrote:
> Adapt new API.
>
> Almost change is trivial. Most important change is the below line
> because we plan to change task->cpus_allowed implementation.
>
> - Â Â Â ctx->cpus_allowed = current->cpus_allowed;
>
> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx>
> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>
> Cc: Paul Mackerras <paulus@xxxxxxxxx>
> Cc: linuxppc-dev@xxxxxxxxxxxxxxxx
> ---
> Âarch/powerpc/include/asm/cputhreads.h    Â|  12 +++++-----
> Âarch/powerpc/include/asm/kexec.h       |  Â2 +-
> Âarch/powerpc/kernel/crash.c         Â|  32 +++++++++++++-------------
> Âarch/powerpc/kernel/setup-common.c      |  Â4 +-
> Âarch/powerpc/kernel/smp.c          Â|  Â4 +-
> Âarch/powerpc/kernel/traps.c         Â|  Â2 +-
> Âarch/powerpc/mm/numa.c            |  Â2 +-
> Âarch/powerpc/platforms/cell/beat_smp.c    |  Â2 +-
> Âarch/powerpc/platforms/cell/cbe_regs.c    |  11 +++++----
> Âarch/powerpc/platforms/cell/smp.c      Â|  13 +++++-----
> Âarch/powerpc/platforms/cell/spufs/sched.c  Â|  Â2 +-
> Âarch/powerpc/platforms/pseries/hotplug-cpu.c | Â Â2 +-
> Âarch/powerpc/xmon/xmon.c           |  16 ++++++------
> Â13 files changed, 52 insertions(+), 52 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
> index f71bb4c..ce516e5 100644
> --- a/arch/powerpc/include/asm/cputhreads.h
> +++ b/arch/powerpc/include/asm/cputhreads.h
> @@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask;
> Â* This can typically be used for things like IPI for tlb invalidations
> Â* since those need to be done only once per core/TLB
> Â*/
> -static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
> +static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
> Â{
>    Âcpumask_t    tmp, res;
>    Âint       i;
>
> - Â Â Â res = CPU_MASK_NONE;
> + Â Â Â cpumask_clear(&res);
> Â Â Â Âfor (i = 0; i < NR_CPUS; i += threads_per_core) {
> - Â Â Â Â Â Â Â cpus_shift_left(tmp, threads_core_mask, i);
> - Â Â Â Â Â Â Â if (cpus_intersects(threads, tmp))
> - Â Â Â Â Â Â Â Â Â Â Â cpu_set(i, res);
> + Â Â Â Â Â Â Â cpumask_shift_left(&tmp, &threads_core_mask, i);
> + Â Â Â Â Â Â Â if (cpumask_intersects(threads, &tmp))
> + Â Â Â Â Â Â Â Â Â Â Â cpumask_set_cpu(i, &res);
> Â Â Â Â}
> Â Â Â Âreturn res;
> Â}
> @@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void)
>
> Âstatic inline cpumask_t cpu_online_cores_map(void)
> Â{
> - Â Â Â return cpu_thread_mask_to_cores(cpu_online_map);
> + Â Â Â return cpu_thread_mask_to_cores(cpu_online_mask);
> Â}
>
> Â#ifdef CONFIG_SMP
> diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
> index f54408d..8a33698 100644
> --- a/arch/powerpc/include/asm/kexec.h
> +++ b/arch/powerpc/include/asm/kexec.h
> @@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
> Âextern cpumask_t cpus_in_sr;
> Âstatic inline int kexec_sr_activated(int cpu)
> Â{
> - Â Â Â return cpu_isset(cpu,cpus_in_sr);
> + Â Â Â return cpumask_test_cpu(cpu, &cpus_in_sr);
> Â}
>
> Âstruct kimage;
> diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
> index 3d3d416..88e294f 100644
> --- a/arch/powerpc/kernel/crash.c
> +++ b/arch/powerpc/kernel/crash.c
> @@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
> Â Â Â Â Â Â Â Âreturn;
>
> Â Â Â Âhard_irq_disable();
> - Â Â Â if (!cpu_isset(cpu, cpus_in_crash))
> + Â Â Â if (!cpumask_test_cpu(cpu, &cpus_in_crash))
> Â Â Â Â Â Â Â Âcrash_save_cpu(regs, cpu);
> - Â Â Â cpu_set(cpu, cpus_in_crash);
> + Â Â Â cpumask_set_cpu(cpu, &cpus_in_crash);
>
> Â Â Â Â/*
> Â Â Â Â * Entered via soft-reset - could be the kdump
> @@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
> Â Â Â Â * Tell the kexec CPU that entered via soft-reset and ready
> Â Â Â Â * to go down.
> Â Â Â Â */
> - Â Â Â if (cpu_isset(cpu, cpus_in_sr)) {
> - Â Â Â Â Â Â Â cpu_clear(cpu, cpus_in_sr);
> + Â Â Â if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
> + Â Â Â Â Â Â Â cpumask_clear_cpu(cpu, &cpus_in_sr);
> Â Â Â Â Â Â Â Âatomic_inc(&enter_on_soft_reset);
> Â Â Â Â}
>
> @@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
> Â Â Â Â * This barrier is needed to make sure that all CPUs are stopped.
> Â Â Â Â * If not, soft-reset will be invoked to bring other CPUs.
> Â Â Â Â */
> - Â Â Â while (!cpu_isset(crashing_cpu, cpus_in_crash))
> + Â Â Â while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
> Â Â Â Â Â Â Â Âcpu_relax();
>
> Â Â Â Âif (ppc_md.kexec_cpu_down)
> @@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
> Â{
> Â Â Â Âunsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
>
> - Â Â Â cpu_clear(cpu, cpus_in_sr);
> + Â Â Â cpumask_clear_cpu(cpu, &cpus_in_sr);
> Â Â Â Âwhile (atomic_read(&enter_on_soft_reset) != ncpus)
> Â Â Â Â Â Â Â Âcpu_relax();
> Â}
> @@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
> Â Â Â Â */
> Â Â Â Âprintk(KERN_EMERG "Sending IPI to other cpus...\n");
> Â Â Â Âmsecs = 10000;
> - Â Â Â while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
> + Â Â Â while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
> Â Â Â Â Â Â Â Âcpu_relax();
> Â Â Â Â Â Â Â Âmdelay(1);
> Â Â Â Â}
> @@ -144,20 +144,20 @@ static void crash_kexec_prepare_cpus(int cpu)
> Â Â Â Â * user to do soft reset such that we get all.
> Â Â Â Â * Soft-reset will be used until better mechanism is implemented.
> Â Â Â Â */
> - Â Â Â if (cpus_weight(cpus_in_crash) < ncpus) {
> + Â Â Â if (cpumask_weight(&cpus_in_crash) < ncpus) {
> Â Â Â Â Â Â Â Âprintk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
> - Â Â Â Â Â Â Â Â Â Â Â ncpus - cpus_weight(cpus_in_crash));
> + Â Â Â Â Â Â Â Â Â Â Â ncpus - cpumask_weight(&cpus_in_crash));
> Â Â Â Â Â Â Â Âprintk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
> - Â Â Â Â Â Â Â cpus_in_sr = CPU_MASK_NONE;
> + Â Â Â Â Â Â Â cpumask_clear(&cpus_in_sr);
> Â Â Â Â Â Â Â Âatomic_set(&enter_on_soft_reset, 0);
> - Â Â Â Â Â Â Â while (cpus_weight(cpus_in_crash) < ncpus)
> + Â Â Â Â Â Â Â while (cpumask_weight(&cpus_in_crash) < ncpus)
> Â Â Â Â Â Â Â Â Â Â Â Âcpu_relax();
> Â Â Â Â}
> Â Â Â Â/*
> Â Â Â Â * Make sure all CPUs are entered via soft-reset if the kdump is
> Â Â Â Â * invoked using soft-reset.
> Â Â Â Â */
> - Â Â Â if (cpu_isset(cpu, cpus_in_sr))
> + Â Â Â if (cpumask_test_cpu(cpu, &cpus_in_sr))
> Â Â Â Â Â Â Â Âcrash_soft_reset_check(cpu);
> Â Â Â Â/* Leave the IPI callback set */
> Â}
> @@ -212,7 +212,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
> Â Â Â Â Â Â Â Â Â Â Â Â * exited using 'x'(exit and recover) or
> Â Â Â Â Â Â Â Â Â Â Â Â * kexec_should_crash() failed for all running tasks.
> Â Â Â Â Â Â Â Â Â Â Â Â */
> - Â Â Â Â Â Â Â Â Â Â Â cpu_clear(cpu, cpus_in_sr);
> + Â Â Â Â Â Â Â Â Â Â Â cpumask_clear_cpu(cpu, &cpus_in_sr);
> Â Â Â Â Â Â Â Â Â Â Â Âlocal_irq_restore(flags);
> Â Â Â Â Â Â Â Â Â Â Â Âreturn;
> Â Â Â Â Â Â Â Â}
> @@ -226,7 +226,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
> Â Â Â Â Â Â Â Â * then start kexec boot.
> Â Â Â Â Â Â Â Â */
> Â Â Â Â Â Â Â Âcrash_soft_reset_check(cpu);
> - Â Â Â Â Â Â Â cpu_set(crashing_cpu, cpus_in_crash);
> + Â Â Â Â Â Â Â cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
> Â Â Â Â Â Â Â Âif (ppc_md.kexec_cpu_down)
> Â Â Â Â Â Â Â Â Â Â Â Âppc_md.kexec_cpu_down(1, 0);
> Â Â Â Â Â Â Â Âmachine_kexec(kexec_crash_image);
> @@ -253,7 +253,7 @@ static void crash_kexec_prepare_cpus(int cpu)
>
> Âvoid crash_kexec_secondary(struct pt_regs *regs)
> Â{
> - Â Â Â cpus_in_sr = CPU_MASK_NONE;
> + Â Â Â cpumask_clear(&cpus_in_sr);
> Â}
> Â#endif
>
> @@ -345,7 +345,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
> Â Â Â Âcrashing_cpu = smp_processor_id();
> Â Â Â Âcrash_save_cpu(regs, crashing_cpu);
> Â Â Â Âcrash_kexec_prepare_cpus(crashing_cpu);
> - Â Â Â cpu_set(crashing_cpu, cpus_in_crash);
> + Â Â Â cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
> Â Â Â Âcrash_kexec_wait_realmode(crashing_cpu);
>
> Â Â Â Âmachine_kexec_mask_interrupts();
> diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
> index 21f30cb..1475df6 100644
> --- a/arch/powerpc/kernel/setup-common.c
> +++ b/arch/powerpc/kernel/setup-common.c
> @@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
> Â Â Â Âint i;
>
> Â Â Â Âthreads_per_core = tpc;
> - Â Â Â threads_core_mask = CPU_MASK_NONE;
> + Â Â Â cpumask_clear(&threads_core_mask);
>
> Â Â Â Â/* This implementation only supports power of 2 number of threads
> Â Â Â Â * for simplicity and performance
> @@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
> Â Â Â ÂBUG_ON(tpc != (1 << threads_shift));
>
> Â Â Â Âfor (i = 0; i < tpc; i++)
> - Â Â Â Â Â Â Â cpu_set(i, threads_core_mask);
> + Â Â Â Â Â Â Â cpumask_set_cpu(i, &threads_core_mask);
>
> Â Â Â Âprintk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
> Â Â Â Â Â Â Â tpc, tpc > 1 ? "s" : "");
> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
> index 9f9c204..da584a9 100644
> --- a/arch/powerpc/kernel/smp.c
> +++ b/arch/powerpc/kernel/smp.c
> @@ -507,7 +507,7 @@ int cpu_first_thread_of_core(int core)
> Â}
> ÂEXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
>
> -/* Must be called when no change can occur to cpu_present_map,
> +/* Must be called when no change can occur to cpu_present_mask,
> Â* i.e. during cpu online or offline.
> Â*/
> Âstatic struct device_node *cpu_to_l2cache(int cpu)
> @@ -608,7 +608,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
> Â Â Â Â * se we pin us down to CPU 0 for a short while
> Â Â Â Â */
> Â Â Â Âalloc_cpumask_var(&old_mask, GFP_NOWAIT);
> - Â Â Â cpumask_copy(old_mask, &current->cpus_allowed);
> + Â Â Â cpumask_copy(old_mask, tsk_cpus_allowed(current));
> Â Â Â Âset_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
>
> Â Â Â Âif (smp_ops && smp_ops->setup_cpu)
> diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
> index 5ddb801..af1f8f4 100644
> --- a/arch/powerpc/kernel/traps.c
> +++ b/arch/powerpc/kernel/traps.c
> @@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs)
> Â Â Â Â}
>
> Â#ifdef CONFIG_KEXEC
> - Â Â Â cpu_set(smp_processor_id(), cpus_in_sr);
> + Â Â Â cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
> Â#endif
>
> Â Â Â Âdie("System Reset", regs, SIGABRT);
> diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
> index 5ec1dad..d6cc587 100644
> --- a/arch/powerpc/mm/numa.c
> +++ b/arch/powerpc/mm/numa.c
> @@ -1453,7 +1453,7 @@ int arch_update_cpu_topology(void)
> Â Â Â Âunsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
> Â Â Â Âstruct sys_device *sysdev;
>
> - Â Â Â for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
> + Â Â Â for_each_cpu(cpu,&cpu_associativity_changes_mask) {
> Â Â Â Â Â Â Â Âvphn_get_associativity(cpu, associativity);
> Â Â Â Â Â Â Â Ânid = associativity_to_nid(associativity);
>
> diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
> index 26efc20..fd3cdb4 100644
> --- a/arch/powerpc/platforms/cell/beat_smp.c
> +++ b/arch/powerpc/platforms/cell/beat_smp.c
> @@ -85,7 +85,7 @@ static void smp_beatic_message_pass(int target, int msg)
>
> Âstatic int __init smp_beatic_probe(void)
> Â{
> - Â Â Â return cpus_weight(cpu_possible_map);
> + Â Â Â return cpumask_weight(cpu_possible_mask);
> Â}
>
> Âstatic void __devinit smp_beatic_setup_cpu(int cpu)
> diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
> index dbc338f..f3917e7 100644
> --- a/arch/powerpc/platforms/cell/cbe_regs.c
> +++ b/arch/powerpc/platforms/cell/cbe_regs.c
> @@ -45,8 +45,8 @@ static struct cbe_thread_map
> Â Â Â Âunsigned int cbe_id;
> Â} cbe_thread_map[NR_CPUS];
>
> -static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE };
> -static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE;
> +static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
> +static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
>
> Âstatic struct cbe_regs_map *cbe_find_map(struct device_node *np)
> Â{
> @@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
>
> Âu32 cbe_node_to_cpu(int node)
> Â{
> - Â Â Â return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t));
> + Â Â Â return cpumask_first(&cbe_local_mask[node]);
> +
> Â}
> ÂEXPORT_SYMBOL_GPL(cbe_node_to_cpu);
>
> @@ -268,9 +269,9 @@ void __init cbe_regs_init(void)
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âthread->regs = map;
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âthread->cbe_id = cbe_id;
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âmap->be_node = thread->be_node;
> - Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpu_set(i, cbe_local_mask[cbe_id]);
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âif(thread->thread_id == 0)
while you are here, could you add a space between if and ( ?

> - Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpu_set(i, cbe_first_online_cpu);
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpumask_set_cpu(i, &cbe_first_online_cpu);
> Â Â Â Â Â Â Â Â Â Â Â Â}
> Â Â Â Â Â Â Â Â}
>
> diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
> index f774530..56e8fa0 100644
> --- a/arch/powerpc/platforms/cell/smp.c
> +++ b/arch/powerpc/platforms/cell/smp.c
> @@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
> Â Â Â Âunsigned int pcpu;
> Â Â Â Âint start_cpu;
>
> - Â Â Â if (cpu_isset(lcpu, of_spin_map))
> + Â Â Â if (cpumask_test_cpu(lcpu, &of_spin_map))
> Â Â Â Â Â Â Â Â/* Already started by OF and sitting in spin loop */
> Â Â Â Â Â Â Â Âreturn 1;
>
> @@ -123,7 +123,7 @@ static int __init smp_iic_probe(void)
> Â{
> Â Â Â Âiic_request_IPIs();
>
> - Â Â Â return cpus_weight(cpu_possible_map);
> + Â Â Â return cpumask_weight(cpu_possible_mask);
> Â}
>
> Âstatic void __devinit smp_cell_setup_cpu(int cpu)
> @@ -186,13 +186,12 @@ void __init smp_init_cell(void)
> Â Â Â Âif (cpu_has_feature(CPU_FTR_SMT)) {
> Â Â Â Â Â Â Â Âfor_each_present_cpu(i) {
> Â Â Â Â Â Â Â Â Â Â Â Âif (cpu_thread_in_core(i) == 0)
> - Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpu_set(i, of_spin_map);
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpumask_set_cpu(i, &of_spin_map);
> Â Â Â Â Â Â Â Â}
> - Â Â Â } else {
> - Â Â Â Â Â Â Â of_spin_map = cpu_present_map;
> - Â Â Â }
> + Â Â Â } else
> + Â Â Â Â Â Â Â cpumask_copy(&of_spin_map, cpu_present_mask);
>
> - Â Â Â cpu_clear(boot_cpuid, of_spin_map);
> + Â Â Â cpumask_clear_cpu(boot_cpuid, &of_spin_map);
>
> Â Â Â Â/* Non-lpar has additional take/give timebase */
> Â Â Â Âif (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
> diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
> index 6520385..32cb4e6 100644
> --- a/arch/powerpc/platforms/cell/spufs/sched.c
> +++ b/arch/powerpc/platforms/cell/spufs/sched.c
> @@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
> Â Â Â Â * runqueue. The context will be rescheduled on the proper node
> Â Â Â Â * if it is timesliced or preempted.
> Â Â Â Â */
> - Â Â Â ctx->cpus_allowed = current->cpus_allowed;
> + Â Â Â cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
>
> Â Â Â Â/* Save the current cpu id for spu interrupt routing. */
> Â Â Â Âctx->last_ran = raw_smp_processor_id();
> diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
> index ef8c454..7be7c20 100644
> --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
> +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
> @@ -280,7 +280,7 @@ static int pseries_add_processor(struct device_node *np)
> Â Â Â Â}
>
> Â Â Â Âfor_each_cpu(cpu, tmp) {
> - Â Â Â Â Â Â Â BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
> + Â Â Â Â Â Â Â BUG_ON(cpu_present(cpu));
> Â Â Â Â Â Â Â Âset_cpu_present(cpu, true);
> Â Â Â Â Â Â Â Âset_hard_smp_processor_id(cpu, *intserv++);
> Â Â Â Â}
> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
> index 33794c1..c160361 100644
> --- a/arch/powerpc/xmon/xmon.c
> +++ b/arch/powerpc/xmon/xmon.c
> @@ -334,7 +334,7 @@ static void release_output_lock(void)
>
> Âint cpus_are_in_xmon(void)
> Â{
> - Â Â Â return !cpus_empty(cpus_in_xmon);
> + Â Â Â return !cpumask_empty(&cpus_in_xmon);
> Â}
> Â#endif
>
> @@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
>
> Â#ifdef CONFIG_SMP
> Â Â Â Âcpu = smp_processor_id();
> - Â Â Â if (cpu_isset(cpu, cpus_in_xmon)) {
> + Â Â Â if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
> Â Â Â Â Â Â Â Âget_output_lock();
> Â Â Â Â Â Â Â Âexcprint(regs);
> Â Â Â Â Â Â Â Âprintf("cpu 0x%x: Exception %lx %s in xmon, "
> @@ -396,7 +396,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
> Â Â Â Â}
>
> Â Â Â Âxmon_fault_jmp[cpu] = recurse_jmp;
> - Â Â Â cpu_set(cpu, cpus_in_xmon);
> + Â Â Â cpumask_set_cpu(cpu, &cpus_in_xmon);
>
> Â Â Â Âbp = NULL;
> Â Â Â Âif ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
> @@ -440,7 +440,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
> Â Â Â Â Â Â Â Â Â Â Â Âsmp_send_debugger_break(MSG_ALL_BUT_SELF);
> Â Â Â Â Â Â Â Â Â Â Â Â/* wait for other cpus to come in */
> Â Â Â Â Â Â Â Â Â Â Â Âfor (timeout = 100000000; timeout != 0; --timeout) {
> - Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â if (cpus_weight(cpus_in_xmon) >= ncpus)
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â if (cpumask_weight(&cpus_in_xmon) >= ncpus)
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbreak;
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âbarrier();
> Â Â Â Â Â Â Â Â Â Â Â Â}
> @@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
> Â Â Â Â Â Â Â Â}
> Â Â Â Â}
> Âleave:
> - Â Â Â cpu_clear(cpu, cpus_in_xmon);
> + Â Â Â cpumask_clear_cpu(cpu, &cpus_in_xmon);
> Â Â Â Âxmon_fault_jmp[cpu] = NULL;
> Â#else
> Â Â Â Â/* UP is simple... */
> @@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs)
> Âstatic int xmon_ipi(struct pt_regs *regs)
> Â{
> Â#ifdef CONFIG_SMP
> - Â Â Â if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon))
> + Â Â Â if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon))
> Â Â Â Â Â Â Â Âxmon_core(regs, 1);
> Â#endif
> Â Â Â Âreturn 0;
> @@ -976,7 +976,7 @@ static int cpu_cmd(void)
> Â Â Â Â Â Â Â Âprintf("cpus stopped:");
> Â Â Â Â Â Â Â Âcount = 0;
> Â Â Â Â Â Â Â Âfor (cpu = 0; cpu < NR_CPUS; ++cpu) {
> - Â Â Â Â Â Â Â Â Â Â Â if (cpu_isset(cpu, cpus_in_xmon)) {
> + Â Â Â Â Â Â Â Â Â Â Â if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âif (count == 0)
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âprintf(" %x", cpu);
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â++count;
> @@ -992,7 +992,7 @@ static int cpu_cmd(void)
> Â Â Â Â Â Â Â Âreturn 0;
> Â Â Â Â}
> Â Â Â Â/* try to switch to cpu specified */
> - Â Â Â if (!cpu_isset(cpu, cpus_in_xmon)) {
> + Â Â Â if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
> Â Â Â Â Â Â Â Âprintf("cpu 0x%x isn't in xmon\n", cpu);
> Â Â Â Â Â Â Â Âreturn 0;
> Â Â Â Â}
> --
> 1.7.3.1
>
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@xxxxxxxxxxxxxxx
> More majordomo info at Âhttp://vger.kernel.org/majordomo-info.html
> Please read the FAQ at Âhttp://www.tux.org/lkml/
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/