Writes of the md superblock are aligned to the logical blocks of theFor super1.0 it doesn't need to consider this. Because the data and bitmap is before superblock.
containing device, but no attempt is made to align them to physical
block boundaries. This means that on a "512e" device (4k physical, 512
logical) every superblock update hits the 512-byte emulation and the
possible associated performance penalty.
Respect the physical block alignment when possible, that is, when the
write padded out to the physical block doesn't run into the data or
bitmap.
Signed-off-by: Christopher Unkel <cunkel@xxxxxxxxxxxxxx>
---
This series replaces the first patch of the previous series
(https://lkml.org/lkml/2020/10/22/1058), with the following changes:
1. Creates a helper function super_1_sb_length_ok().
2. Fixes operator placement style violation.
3. Covers case in super_1_sync().
4. Refactors duplicate logic.
5. Covers a case in existing code where aligned superblock could
run into bitmap.
drivers/md/md.c | 45 +++++++++++++++++++++++++++++++++++++++++----
1 file changed, 41 insertions(+), 4 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d6a55ca1d52e..802a9a256fe5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1646,15 +1646,52 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
return cpu_to_le32(csum);
}
+static int
+super_1_sb_length_ok(struct md_rdev *rdev, int minor_version, int sb_len)
+{
+ int sectors = sb_len / 512;
+ struct mdp_superblock_1 *sb;
+
+ /* superblock is stored in memory as a single page */
+ if (sb_len > PAGE_SIZE)
+ return 0;
+
+ /* check if sb runs into data */
+ if (minor_version) {
+ if (rdev->sb_start + sectors > rdev->data_offset
+ || rdev->sb_start + sectors > rdev->new_data_offset)
+ return 0;
+ } else if (sb_len > 4096)
+ return 0;
+
+ /* check if sb runs into bitmap */
+ sb = page_address(rdev->sb_page);
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) {
+ __s32 bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
+ if (bitmap_offset > 0 && sectors > bitmap_offset)
+ return 0;
+ }
+
+ return 1;
+}
+
/*
* set rdev->sb_size to that required for number of devices in array
* with appropriate padding to underlying sectors
*/
static void
-super_1_set_rdev_sb_size(struct md_rdev *rdev, int max_dev)
+super_1_set_rdev_sb_size(struct md_rdev *rdev, int max_dev, int minor_version)
{
int sb_size = max_dev * 2 + 256;
- rdev->sb_size = round_up(sb_size, bdev_logical_block_size(rdev->bdev));
+ int pb_aligned_size = round_up(sb_size,
+ bdev_physical_block_size(rdev->bdev));
+
+ /* generate physical-block aligned writes if legal */
+ if (super_1_sb_length_ok(rdev, minor_version, pb_aligned_size))
+ rdev->sb_size = pb_aligned_size;
+ else
+ rdev->sb_size = round_up(sb_size,
+ bdev_logical_block_size(rdev->bdev));
}
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
@@ -1730,7 +1767,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
- super_1_set_rdev_sb_size(rdev, le32_to_cpu(sb->max_dev));
+ super_1_set_rdev_sb_size(rdev, le32_to_cpu(sb->max_dev), minor_version);
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
@@ -2140,7 +2177,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
if (max_dev > le32_to_cpu(sb->max_dev)) {
sb->max_dev = cpu_to_le32(max_dev);
- super_1_set_rdev_sb_size(rdev, max_dev);
+ super_1_set_rdev_sb_size(rdev, max_dev, mddev->minor_version);
} else
max_dev = le32_to_cpu(sb->max_dev);