[PATCH v2 10/11] md: add atomic mode switching when I/O completion
From: tada keisuke
Date: Thu Apr 18 2024 - 01:45:00 EST
This patch depends on patch 09.
If percpu mode in softirq context, switch to atomic mode in delayed execution.
Move from softirq context to a context where RCUs are available and switch to atomic mode.
This patch completes the addition of atomic mode switching.
Signed-off-by: Keisuke TADA <keisuke1.tada@xxxxxxxxxx>
Signed-off-by: Toshifumi OHTAKE <toshifumi.ootake@xxxxxxxxxx>
---
drivers/md/md.c | 16 ++++++++++++++++
drivers/md/md.h | 6 ++++++
2 files changed, 22 insertions(+)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5f785353353d..18d0f214098b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9606,6 +9606,7 @@ void md_check_recovery(struct mddev *mddev)
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
+ test_bit(MD_RECOVERY_PERCPU, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
@@ -9616,6 +9617,21 @@ void md_check_recovery(struct mddev *mddev)
if (mddev_trylock(mddev)) {
bool try_set_sync = mddev->safemode != 0;
+ struct md_rdev *fault_rdev;
+ bool faulty_some = false;
+
+ rdev_for_each(fault_rdev, mddev) {
+ if (fault_rdev->raid_disk >= 0 &&
+ test_bit(Faulty, &fault_rdev->flags) &&
+ nr_pending_is_percpu_mode(fault_rdev)) {
+ percpu_ref_switch_to_atomic_sync(&fault_rdev->nr_pending);
+ faulty_some = true;
+ }
+ }
+ if (faulty_some) {
+ clear_bit(MD_RECOVERY_PERCPU, &mddev->recovery);
+ goto unlock;
+ }
if (!mddev->external && mddev->safemode == 1)
mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 57b09b567ffa..a3c0b5e8cf05 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -605,6 +605,7 @@ enum recovery_flags {
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
+ MD_RECOVERY_PERCPU, /* nr_pending when faulty needs to be switched to atomic */
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
@@ -887,6 +888,11 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
int faulty = test_bit(Faulty, &rdev->flags);
nr_pending_dec(rdev);
if (faulty) {
+ if (nr_pending_is_percpu_mode(rdev)) {
+ set_bit(MD_RECOVERY_PERCPU, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ return;
+ }
if (nr_pending_is_zero(rdev)) {
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
--
2.34.1