[PATCH 15/24] netfs: Provide an iterator-reset function

From: David Howells
Date: Mon Jul 29 2024 - 12:27:40 EST


---
fs/netfs/internal.h | 4 +---
fs/netfs/misc.c | 18 ++++++++++++++++++
fs/netfs/write_collect.c | 3 +--
fs/netfs/write_issue.c | 6 +++---
4 files changed, 23 insertions(+), 8 deletions(-)

diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index e1149e05a5c8..21a3c7d13585 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -69,6 +69,7 @@ int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio
bool needs_put);
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
void netfs_clear_buffer(struct netfs_io_request *rreq);
+void netfs_reset_iter(struct netfs_io_subrequest *subreq);

/*
* objects.c
@@ -161,9 +162,6 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
void netfs_reissue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq,
struct iov_iter *source);
-int netfs_advance_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start, size_t len, bool to_eof);
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end,
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 1700849491a0..a9baa46a17ff 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -84,6 +84,24 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
}
}

+/*
+ * Reset the subrequest iterator to refer just to the region remaining to be
+ * read. The iterator may or may not have been advanced by socket ops or
+ * extraction ops to an extent that may or may not match the amount actually
+ * read.
+ */
+void netfs_reset_iter(struct netfs_io_subrequest *subreq)
+{
+ struct iov_iter *io_iter = &subreq->io_iter;
+ size_t remain = subreq->len - subreq->transferred;
+
+ if (io_iter->count > remain)
+ iov_iter_advance(io_iter, io_iter->count - remain);
+ else if (io_iter->count < remain)
+ iov_iter_revert(io_iter, remain - io_iter->count);
+ iov_iter_truncate(&subreq->io_iter, remain);
+}
+
/**
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @mapping: The mapping the folio belongs to.
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 1521a23077c3..801a130a0ce1 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -219,9 +219,8 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
/* Determine the set of buffers we're going to use. Each
* subreq gets a subset of a single overall contiguous buffer.
*/
+ netfs_reset_iter(from);
source = from->io_iter;
- iov_iter_revert(&source, subreq->len - source.count);
- iov_iter_advance(&source, from->transferred);
source.count = len;

/* Work through the sublist. */
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index d581fd8a568b..520be44d132e 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -259,9 +259,9 @@ static void netfs_issue_write(struct netfs_io_request *wreq,
* we can avoid overrunning the credits obtained (cifs) and try to parallelise
* content-crypto preparation with network writes.
*/
-int netfs_advance_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start, size_t len, bool to_eof)
+static int netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof)
{
struct netfs_io_subrequest *subreq = stream->construct;
size_t part;