[PATCH v4 09/11] smp: replace smp_call_function_single_async with smp_call_csd
From: Donghai Qiao
Date: Thu May 19 2022 - 16:50:41 EST
Replace smp_call_function_single_async with smp_call_csd
and modify all the invocations.
Signed-off-by: Donghai Qiao <dqiao@xxxxxxxxxx>
---
v1 -> v2: Removed 'x' from the function names and change XCALL to SMP_CALL
from the new macros
v2 -> v3: Modifed all the invocations of smp_call_function_single_async()
to smp_call_csd()
arch/mips/kernel/process.c | 2 +-
arch/mips/kernel/smp.c | 2 +-
arch/s390/pci/pci_irq.c | 2 +-
arch/x86/kernel/cpuid.c | 2 +-
arch/x86/lib/msr-smp.c | 2 +-
block/blk-mq.c | 2 +-
drivers/clocksource/ingenic-timer.c | 2 +-
drivers/cpuidle/coupled.c | 2 +-
drivers/net/ethernet/cavium/liquidio/lio_core.c | 2 +-
include/linux/smp.h | 3 ---
kernel/debug/debug_core.c | 2 +-
kernel/sched/core.c | 2 +-
kernel/sched/fair.c | 2 +-
net/core/dev.c | 2 +-
14 files changed, 13 insertions(+), 16 deletions(-)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c2d5f4bfe1f3..8a8012af3219 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -745,7 +745,7 @@ static void raise_backtrace(cpumask_t *mask)
}
csd = &per_cpu(backtrace_csd, cpu);
- smp_call_function_single_async(cpu, csd);
+ smp_call_csd(cpu, csd, SMP_CALL_TYPE_ASYNC);
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 27f94fde499d..ec070378e47f 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -704,7 +704,7 @@ void tick_broadcast(const struct cpumask *mask)
for_each_cpu(cpu, mask) {
csd = &per_cpu(tick_broadcast_csd, cpu);
- smp_call_function_single_async(cpu, csd);
+ smp_call_csd(cpu, csd, SMP_CALL_TYPE_ASYNC);
}
}
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 136af9f32f23..5cfd6c98cfe3 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -212,7 +212,7 @@ static void zpci_handle_fallback_irq(void)
continue;
INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
- smp_call_function_single_async(cpu, &cpu_data->csd);
+ smp_call_csd(cpu, &cpu_data->csd, SMP_CALL_TYPE_ASYNC);
}
}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 6f7b8cc1bc9f..760beece93e3 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -81,7 +81,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
cmd.regs.eax = pos;
cmd.regs.ecx = pos >> 32;
- err = smp_call_function_single_async(cpu, &csd);
+ err = smp_call_csd(cpu, &csd, SMP_CALL_TYPE_ASYNC);
if (err)
break;
wait_for_completion(&cmd.done);
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index cbe2874835f3..cc33097651fc 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -178,7 +178,7 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
init_completion(&rv.done);
rv.msr.msr_no = msr_no;
- err = smp_call_function_single_async(cpu, &csd);
+ err = smp_call_csd(cpu, &csd, SMP_CALL_TYPE_ASYNC);
if (!err) {
wait_for_completion(&rv.done);
err = rv.msr.err;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 84d749511f55..5c14cdffc70d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1063,7 +1063,7 @@ static void blk_mq_complete_send_ipi(struct request *rq)
list = &per_cpu(blk_cpu_done, cpu);
if (llist_add(&rq->ipi_list, list)) {
INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
- smp_call_function_single_async(cpu, &rq->csd);
+ smp_call_csd(cpu, &rq->csd, SMP_CALL_TYPE_ASYNC);
}
}
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 24ed0f1f089b..722694666063 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -121,7 +121,7 @@ static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
csd->info = (void *) &timer->cevt;
csd->func = ingenic_per_cpu_event_handler;
- smp_call_function_single_async(timer->cpu, csd);
+ smp_call_csd(timer->cpu, csd, SMP_CALL_TYPE_ASYNC);
}
return IRQ_HANDLED;
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 74068742cef3..26519f6e828a 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -334,7 +334,7 @@ static void cpuidle_coupled_poke(int cpu)
call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
- smp_call_function_single_async(cpu, csd);
+ smp_call_csd(cpu, csd, SMP_CALL_TYPE_ASYNC);
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..489777fd3e56 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -729,7 +729,7 @@ static void liquidio_napi_drv_callback(void *arg)
napi_schedule_irqoff(&droq->napi);
} else {
INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
- smp_call_function_single_async(droq->cpu_id, &droq->csd);
+ smp_call_csd(droq->cpu_id, &droq->csd, SMP_CALL_TYPE_ASYNC);
}
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 40c46d2bea88..1086f8d5e18c 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -193,9 +193,6 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
-#define smp_call_function_single_async(cpu, csd) \
- smp_call_csd(cpu, csd, SMP_CALL_TYPE_ASYNC)
-
/*
* Cpus stopping functions in panic. All have default weak definitions.
* Architecture-dependent code may override them.
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index da06a5553835..113229096a70 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -264,7 +264,7 @@ void __weak kgdb_roundup_cpus(void)
continue;
kgdb_info[cpu].rounding_up = true;
- ret = smp_call_function_single_async(cpu, csd);
+ ret = smp_call_csd(cpu, csd, SMP_CALL_TYPE_ASYNC);
if (ret)
kgdb_info[cpu].rounding_up = false;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6d1d30bd6220..09d54fb3eb4c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -836,7 +836,7 @@ void hrtick_start(struct rq *rq, u64 delay)
if (rq == this_rq())
__hrtick_restart(rq);
else
- smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
+ smp_call_csd(cpu_of(rq), &rq->hrtick_csd, SMP_CALL_TYPE_ASYNC);
}
#else
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a68482d66535..1bb83d3ae9ea 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10461,7 +10461,7 @@ static void kick_ilb(unsigned int flags)
* is idle. And the softirq performing nohz idle load balance
* will be run before returning from the IPI.
*/
- smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
+ smp_call_csd(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd, SMP_CALL_TYPE_ASYNC);
}
/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 1461c2d9dec8..e8319f789ed1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5788,7 +5788,7 @@ static void net_rps_send_ipi(struct softnet_data *remsd)
struct softnet_data *next = remsd->rps_ipi_next;
if (cpu_online(remsd->cpu))
- smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ smp_call_csd(remsd->cpu, &remsd->csd, SMP_CALL_TYPE_ASYNC);
remsd = next;
}
#endif
--
2.27.0