[PATCH 28/79] block: rnull: add partial I/O support for bad blocks
From: Andreas Hindborg
Date: Sun Feb 15 2026 - 19:02:23 EST
Add bad_blocks_partial_io configuration option that allows partial I/O
completion when encountering bad blocks, rather than failing the entire
request.
When enabled, requests are truncated to stop before the first bad block
range, allowing the valid portion to be processed successfully. This
improves compatibility with applications that can handle partial
reads/writes.
Signed-off-by: Andreas Hindborg <a.hindborg@xxxxxxxxxx>
---
drivers/block/rnull/configfs.rs | 32 ++--------
drivers/block/rnull/rnull.rs | 125 ++++++++++++++++++++++++++++------------
2 files changed, 95 insertions(+), 62 deletions(-)
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
index a39691b39e374..c08a3cbd66f18 100644
--- a/drivers/block/rnull/configfs.rs
+++ b/drivers/block/rnull/configfs.rs
@@ -104,6 +104,7 @@ fn make_group(
no_sched:11,
badblocks: 12,
badblocks_once: 13,
+ badblocks_partial_io: 14,
],
};
@@ -128,6 +129,7 @@ fn make_group(
no_sched: false,
bad_blocks: Arc::pin_init(BadBlocks::new(false), GFP_KERNEL)?,
bad_blocks_once: false,
+ bad_blocks_partial_io: false,
}),
}),
core::iter::empty(),
@@ -189,6 +191,7 @@ struct DeviceConfigInner {
no_sched: bool,
bad_blocks: Arc<BadBlocks>,
bad_blocks_once: bool,
+ bad_blocks_partial_io: bool,
}
#[vtable]
@@ -226,6 +229,7 @@ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
no_sched: guard.no_sched,
bad_blocks: guard.bad_blocks.clone(),
bad_blocks_once: guard.bad_blocks_once,
+ bad_blocks_partial_io: guard.bad_blocks_partial_io,
})?);
guard.powered = true;
} else if guard.powered && !power_op {
@@ -427,29 +431,5 @@ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
}
}
-#[vtable]
-impl configfs::AttributeOperations<13> for DeviceConfig {
- type Data = DeviceConfig;
-
- fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
- let mut writer = kernel::str::Formatter::new(page);
-
- if this.data.lock().bad_blocks_once {
- writer.write_str("1\n")?;
- } else {
- writer.write_str("0\n")?;
- }
-
- Ok(writer.bytes_written())
- }
-
- fn store(this: &DeviceConfig, page: &[u8]) -> Result {
- if this.data.lock().powered {
- return Err(EBUSY);
- }
-
- this.data.lock().bad_blocks_once = kstrtobool_bytes(page)?;
-
- Ok(())
- }
-}
+configfs_simple_bool_field!(DeviceConfig, 13, bad_blocks_once);
+configfs_simple_bool_field!(DeviceConfig, 14, bad_blocks_partial_io);
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
index 0f569c5b65f77..6691e5912c5c9 100644
--- a/drivers/block/rnull/rnull.rs
+++ b/drivers/block/rnull/rnull.rs
@@ -162,6 +162,7 @@ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
no_sched: *module_parameters::no_sched.value() != 0,
bad_blocks: Arc::pin_init(BadBlocks::new(false), GFP_KERNEL)?,
bad_blocks_once: false,
+ bad_blocks_partial_io: false,
})?;
disks.push(disk, GFP_KERNEL)?;
}
@@ -190,6 +191,7 @@ struct NullBlkOptions<'a> {
no_sched: bool,
bad_blocks: Arc<BadBlocks>,
bad_blocks_once: bool,
+ bad_blocks_partial_io: bool,
}
struct NullBlkDevice;
@@ -209,6 +211,7 @@ fn new(options: NullBlkOptions<'_>) -> Result<GenDisk<Self>> {
no_sched,
bad_blocks,
bad_blocks_once,
+ bad_blocks_partial_io,
} = options;
let mut flags = mq::tag_set::Flags::default();
@@ -239,6 +242,7 @@ fn new(options: NullBlkOptions<'_>) -> Result<GenDisk<Self>> {
block_size: block_size.into(),
bad_blocks,
bad_blocks_once,
+ bad_blocks_partial_io,
}),
GFP_KERNEL,
)?;
@@ -327,16 +331,62 @@ fn discard(tree: &Tree, mut sector: u64, sectors: u64, block_size: u64) -> Resul
}
#[inline(never)]
- fn transfer(
- command: bindings::req_op,
- tree: &Tree,
- sector: u64,
- segment: Segment<'_>,
+ fn transfer(rq: &mut Owned<mq::Request<Self>>, tree: &Tree, sectors: u32) -> Result {
+ let mut sector = rq.sector();
+ let end_sector = sector + <u32 as Into<u64>>::into(sectors);
+ let command = rq.command();
+
+ for bio in rq.bio_iter_mut() {
+ let segment_iter = bio.segment_iter();
+ for segment in segment_iter {
+ // Length might be limited by bad blocks.
+ let length = segment
+ .len()
+ .min((sector - end_sector) as u32 >> SECTOR_SHIFT);
+ match command {
+ bindings::req_op_REQ_OP_WRITE => Self::write(tree, sector, segment)?,
+ bindings::req_op_REQ_OP_READ => Self::read(tree, sector, segment)?,
+ _ => (),
+ }
+ sector += u64::from(length) >> SECTOR_SHIFT;
+
+ if sector >= end_sector {
+ return Ok(());
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn handle_bad_blocks(
+ rq: &mut Owned<mq::Request<Self>>,
+ queue_data: &QueueData,
+ sectors: &mut u32,
) -> Result {
- match command {
- bindings::req_op_REQ_OP_WRITE => Self::write(tree, sector, segment)?,
- bindings::req_op_REQ_OP_READ => Self::read(tree, sector, segment)?,
- _ => (),
+ if queue_data.bad_blocks.enabled() {
+ let start = rq.sector();
+ let end = start + u64::from(*sectors);
+ match queue_data.bad_blocks.check(start..end) {
+ badblocks::BlockStatus::None => {}
+ badblocks::BlockStatus::Acknowledged(mut range)
+ | badblocks::BlockStatus::Unacknowledged(mut range) => {
+ rq.data_ref().error.store(1, ordering::Relaxed);
+
+ if queue_data.bad_blocks_once {
+ queue_data.bad_blocks.set_good(range.clone())?;
+ }
+
+ if queue_data.bad_blocks_partial_io {
+ let block_size_sectors = queue_data.block_size >> SECTOR_SHIFT;
+ range.start = align_down(range.start, block_size_sectors);
+ if start < range.start {
+ *sectors = (range.start - start) as u32;
+ }
+ } else {
+ *sectors = 0;
+ }
+ }
+ };
}
Ok(())
}
@@ -398,6 +448,7 @@ struct QueueData {
block_size: u64,
bad_blocks: Arc<BadBlocks>,
bad_blocks_once: bool,
+ bad_blocks_partial_io: bool,
}
#[pin_data]
@@ -426,6 +477,30 @@ impl HasHrTimer<Self> for Pdu {
}
}
+fn is_power_of_two<T>(value: T) -> bool
+where
+ T: core::ops::Sub<T, Output = T>,
+ T: core::ops::BitAnd<Output = T>,
+ T: core::cmp::PartialOrd<T>,
+ T: Copy,
+ T: From<u8>,
+{
+ (value > 0u8.into()) && (value & (value - 1u8.into())) == 0u8.into()
+}
+
+fn align_down<T>(value: T, to: T) -> T
+where
+ T: core::ops::Sub<T, Output = T>,
+ T: core::ops::Not<Output = T>,
+ T: core::ops::BitAnd<Output = T>,
+ T: core::cmp::PartialOrd<T>,
+ T: Copy,
+ T: From<u8>,
+{
+ debug_assert!(is_power_of_two(to));
+ value & !(to - 1u8.into())
+}
+
#[vtable]
impl Operations for NullBlkDevice {
type QueueData = Pin<KBox<QueueData>>;
@@ -444,39 +519,17 @@ fn queue_rq(
mut rq: Owned<mq::Request<Self>>,
_is_last: bool,
) -> Result {
- if queue_data.bad_blocks.enabled() {
- let start = rq.sector();
- let end = start + u64::from(rq.sectors());
- match queue_data.bad_blocks.check(start..end) {
- badblocks::BlockStatus::None => {}
- badblocks::BlockStatus::Acknowledged(range)
- | badblocks::BlockStatus::Unacknowledged(range) => {
- rq.data_ref().error.store(1, ordering::Relaxed);
- if queue_data.bad_blocks_once {
- queue_data.bad_blocks.set_good(range)?;
- }
- }
- };
- }
+ let mut sectors = rq.sectors();
- // TODO: Skip IO if bad block.
+ Self::handle_bad_blocks(&mut rq, queue_data.get_ref(), &mut sectors)?;
if queue_data.memory_backed {
let tree = &queue_data.tree;
- let command = rq.command();
- let mut sector = rq.sector();
- if command == bindings::req_op_REQ_OP_DISCARD {
- Self::discard(tree, sector, rq.sectors().into(), queue_data.block_size)?;
+ if rq.command() == bindings::req_op_REQ_OP_DISCARD {
+ Self::discard(tree, rq.sector(), sectors.into(), queue_data.block_size)?;
} else {
- for bio in rq.bio_iter_mut() {
- let segment_iter = bio.segment_iter();
- for segment in segment_iter {
- let length = segment.len();
- Self::transfer(command, tree, sector, segment)?;
- sector += u64::from(length) >> block::SECTOR_SHIFT;
- }
- }
+ Self::transfer(&mut rq, tree, sectors)?;
}
}
--
2.51.2