Re: [PATCH md-6.9 08/10] md/raid1: factor out choose_bb_rdev() from read_balance()

From: Xiao Ni
Date: Mon Feb 26 2024 - 20:50:48 EST


On Thu, Feb 22, 2024 at 4:06 PM Yu Kuai <yukuai1@xxxxxxxxxxxxxxx> wrote:
>
> From: Yu Kuai <yukuai3@xxxxxxxxxx>
>
> read_balance() is hard to understand because there are too many status
> and branches, and it's overlong.
>
> This patch factor out the case to read the rdev with bad blocks from
> read_balance(), there are no functional changes.
>
> Co-developed-by: Paul Luse <paul.e.luse@xxxxxxxxxxxxxxx>
> Signed-off-by: Paul Luse <paul.e.luse@xxxxxxxxxxxxxxx>
> Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx>
> ---
> drivers/md/raid1.c | 79 ++++++++++++++++++++++++++++------------------
> 1 file changed, 48 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index bc2f8fcbe5b3..4694e0e71e36 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -620,6 +620,44 @@ static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
> return -1;
> }
>
> +static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
> + int *max_sectors)
> +{
> + sector_t this_sector = r1_bio->sector;
> + int best_disk = -1;
> + int best_len = 0;
> + int disk;
> +
> + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
> + struct md_rdev *rdev;
> + int len;
> + int read_len;
> +
> + if (r1_bio->bios[disk] == IO_BLOCKED)
> + continue;
> +
> + rdev = conf->mirrors[disk].rdev;
> + if (!rdev || test_bit(Faulty, &rdev->flags) ||
> + test_bit(WriteMostly, &rdev->flags))
> + continue;
> +
> + /* keep track of the disk with the most readable sectors. */
> + len = r1_bio->sectors;
> + read_len = raid1_check_read_range(rdev, this_sector, &len);
> + if (read_len > best_len) {
> + best_disk = disk;
> + best_len = read_len;
> + }
> + }
> +
> + if (best_disk != -1) {
> + *max_sectors = best_len;
> + update_read_sectors(conf, best_disk, this_sector, best_len);
> + }
> +
> + return best_disk;
> +}
> +
> static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
> int *max_sectors)
> {
> @@ -707,8 +745,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
>
> for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
> sector_t dist;
> - sector_t first_bad;
> - int bad_sectors;
> unsigned int pending;
>
> rdev = conf->mirrors[disk].rdev;
> @@ -721,36 +757,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
> continue;
> if (test_bit(WriteMostly, &rdev->flags))
> continue;
> - /* This is a reasonable device to use. It might
> - * even be best.
> - */
> - if (is_badblock(rdev, this_sector, sectors,
> - &first_bad, &bad_sectors)) {
> - if (best_dist < MaxSector)
> - /* already have a better device */
> - continue;
> - if (first_bad <= this_sector) {
> - /* cannot read here. If this is the 'primary'
> - * device, then we must not read beyond
> - * bad_sectors from another device..
> - */
> - bad_sectors -= (this_sector - first_bad);
> - if (best_good_sectors > sectors)
> - best_good_sectors = sectors;
> -
> - } else {
> - sector_t good_sectors = first_bad - this_sector;
> - if (good_sectors > best_good_sectors) {
> - best_good_sectors = good_sectors;
> - best_disk = disk;
> - }
> - }
> + if (rdev_has_badblock(rdev, this_sector, sectors))
> continue;
> - } else {
> - if ((sectors > best_good_sectors) && (best_disk >= 0))
> - best_disk = -1;
> - best_good_sectors = sectors;
> - }
>
> if (best_disk >= 0)
> /* At least two disks to choose from so failfast is OK */
> @@ -834,6 +842,15 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
> if (best_disk >= 0)
> return best_disk;
>
> + /*
> + * If we are here it means we didn't find a perfectly good disk so
> + * now spend a bit more time trying to find one with the most good
> + * sectors.
> + */
> + disk = choose_bb_rdev(conf, r1_bio, max_sectors);
> + if (disk >= 0)
> + return disk;
> +
> return choose_slow_rdev(conf, r1_bio, max_sectors);
> }
>
> --
> 2.39.2
>
>
Hi
This patch looks good to me.
Reviewed-by: Xiao Ni <xni@xxxxxxxxxx>