[PATCH 54/79] block: rnull: add REQ_OP_FLUSH support

From: Andreas Hindborg

Date: Sun Feb 15 2026 - 18:40:51 EST


Add support for handling flush requests in rnull. When memory backing
and write cache are enabled, flush requests trigger a cache flush
operation that writes all dirty cache pages to the backing store.

Signed-off-by: Andreas Hindborg <a.hindborg@xxxxxxxxxx>
---
drivers/block/rnull/disk_storage.rs | 36 ++++++++++++++++++++++++++++--------
drivers/block/rnull/rnull.rs | 31 ++++++++++++++++++++++---------
2 files changed, 50 insertions(+), 17 deletions(-)

diff --git a/drivers/block/rnull/disk_storage.rs b/drivers/block/rnull/disk_storage.rs
index ce3e83671709a..a613ed5223ba7 100644
--- a/drivers/block/rnull/disk_storage.rs
+++ b/drivers/block/rnull/disk_storage.rs
@@ -85,6 +85,13 @@ pub(crate) fn discard(
remaining_bytes -= processed;
}
}
+
+ pub(crate) fn flush(&self, hw_data: &Pin<&SpinLock<HwQueueContext>>) -> Result {
+ let mut tree_guard = self.lock();
+ let mut hw_data_guard = hw_data.lock();
+ let mut access = self.access(&mut tree_guard, &mut hw_data_guard, None);
+ access.flush()
+ }
}

pub(crate) struct DiskStorageAccess<'a, 'b, 'c> {
@@ -118,13 +125,16 @@ fn to_sector(index: usize) -> u64 {
(index << block::PAGE_SECTORS_SHIFT) as u64
}

- fn extract_cache_page(&mut self) -> Result<KBox<NullBlockPage>> {
- let cache_entry = self
- .cache_guard
- .find_next_entry_circular(
- self.disk_storage.next_flush_sector.load(ordering::Relaxed) as usize
- )
- .expect("Expected to find a page in the cache");
+ fn extract_cache_page(&mut self) -> Result<Option<KBox<NullBlockPage>>> {
+ let cache_entry = self.cache_guard.find_next_entry_circular(
+ self.disk_storage.next_flush_sector.load(ordering::Relaxed) as usize,
+ );
+
+ let cache_entry = if let Some(entry) = cache_entry {
+ entry
+ } else {
+ return Ok(None);
+ };

let index = cache_entry.index();

@@ -168,7 +178,16 @@ fn extract_cache_page(&mut self) -> Result<KBox<NullBlockPage>> {
}
};

- Ok(page)
+ Ok(Some(page))
+ }
+
+ fn flush(&mut self) -> Result {
+ if self.disk_storage.cache_size > 0 {
+ while let Some(page) = self.extract_cache_page()? {
+ drop(page);
+ }
+ }
+ Ok(())
}

fn get_cache_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> {
@@ -186,6 +205,7 @@ fn get_cache_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> {
.expect("Expected to have a page available")
} else {
self.extract_cache_page()?
+ .expect("Expected to find a page in the cache")
};
Ok(self
.cache_guard
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
index 92e75f15e02c6..4870aa3b7a53e 100644
--- a/drivers/block/rnull/rnull.rs
+++ b/drivers/block/rnull/rnull.rs
@@ -673,6 +673,18 @@ fn end_request(rq: Owned<mq::Request<Self>>) {
_ => rq.end(bindings::BLK_STS_IOERR),
}
}
+
+ fn complete_request(&self, rq: Owned<mq::Request<Self>>) {
+ match self.irq_mode {
+ IRQMode::None => Self::end_request(rq),
+ IRQMode::Soft => mq::Request::complete(rq.into()),
+ IRQMode::Timer => {
+ OwnableRefCounted::into_shared(rq)
+ .start(self.completion_time)
+ .dismiss();
+ }
+ }
+ }
}

impl_has_hr_timer! {
@@ -789,6 +801,15 @@ fn queue_rq(

let mut rq = rq.start();

+ if rq.command() == mq::Command::Flush {
+ if this.memory_backed {
+ this.storage.flush(&hw_data)?;
+ }
+ this.complete_request(rq);
+
+ return Ok(());
+ }
+
#[cfg(CONFIG_BLK_DEV_ZONED)]
if this.zoned.enabled {
this.handle_zoned_command(&hw_data, &mut rq)?;
@@ -814,15 +835,7 @@ fn queue_rq(

hw_data.lock().poll_queue.push_head(rq)?;
} else {
- match this.irq_mode {
- IRQMode::None => Self::end_request(rq),
- IRQMode::Soft => mq::Request::complete(rq.into()),
- IRQMode::Timer => {
- OwnableRefCounted::into_shared(rq)
- .start(this.completion_time)
- .dismiss();
- }
- }
+ this.complete_request(rq);
}
Ok(())
}

--
2.51.2