Re: linux-next: manual merge of the vfs-brauner tree with the vfs-brauner-fixes tree
From: Christian Brauner
Date: Thu Mar 26 2026 - 09:33:49 EST
On Wed, Mar 25, 2026 at 08:13:56AM -0700, Darrick J. Wong wrote:
> On Wed, Mar 25, 2026 at 01:29:37PM +0000, Mark Brown wrote:
> > Hi all,
> >
> > Today's linux-next merge of the vfs-brauner tree got a conflict in:
> >
> > fs/iomap/bio.c
> >
> > between commit:
> >
> > f621324dfb3d6 ("iomap: fix lockdep complaint when reads fail")
> >
> > from the vfs-brauner-fixes tree and commit:
> >
> > e8f9cf03c9dc9 ("iomap: support ioends for buffered reads")
> >
> > from the vfs-brauner tree.
> >
> > I fixed it up (see below) and can carry the fix as necessary. This
> > is now fixed as far as linux-next is concerned, but any non trivial
> > conflicts should be mentioned to your upstream maintainer when your tree
> > is submitted for merging. You may also want to consider cooperating
> > with the maintainer of the conflicting tree to minimise any particularly
> > complex conflicts.
>
> That looks correct to me, thanks for pointing out the merge conflict. :)
Wait wait, the vfs.fixes tree is merged into the vfs.all tree which
should have the merge conflict resolution:
commit 45933bb2dca2b8a5c81cd50947b7d0f9c381c867
Merge: a61ed0d2b28c 1b63f91d1c90
Author: Christian Brauner <brauner@xxxxxxxxxx>
AuthorDate: Tue Mar 24 23:38:16 2026 +0100
Commit: Christian Brauner <brauner@xxxxxxxxxx>
CommitDate: Tue Mar 24 23:38:16 2026 +0100
Merge branch 'vfs-7.1.integrity' into vfs.all
Signed-off-by: Christian Brauner <brauner@xxxxxxxxxx>
# Conflicts:
# fs/iomap/bio.c
diff --cc fs/iomap/bio.c
index edd908183058,f989ffcaac96..4504f4633f17
--- a/fs/iomap/bio.c
+++ b/fs/iomap/bio.c
@@@ -8,66 -9,33 +9,77 @@@
#include "internal.h"
#include "trace.h"
+static DEFINE_SPINLOCK(failed_read_lock);
+static struct bio_list failed_read_list = BIO_EMPTY_LIST;
+
- static void __iomap_read_end_io(struct bio *bio)
+ static u32 __iomap_read_end_io(struct bio *bio, int error)
{
- int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
+ u32 folio_count = 0;
- bio_for_each_folio_all(fi, bio)
+ bio_for_each_folio_all(fi, bio) {
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+ folio_count++;
+ }
+ if (bio_integrity(bio))
+ fs_bio_integrity_free(bio);
bio_put(bio);
+ return folio_count;
}
+static void
+iomap_fail_reads(
+ struct work_struct *work)
+{
+ struct bio *bio;
+ struct bio_list tmp = BIO_EMPTY_LIST;
+ unsigned long flags;
+
+ spin_lock_irqsave(&failed_read_lock, flags);
+ bio_list_merge_init(&tmp, &failed_read_list);
+ spin_unlock_irqrestore(&failed_read_lock, flags);
+
+ while ((bio = bio_list_pop(&tmp)) != NULL) {
- __iomap_read_end_io(bio);
++ __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
+ cond_resched();
+ }
+}
+
+static DECLARE_WORK(failed_read_work, iomap_fail_reads);
+
+static void iomap_fail_buffered_read(struct bio *bio)
+{
+ unsigned long flags;
+
+ /*
+ * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
+ * in the fserror code. The caller no longer owns the bio reference
+ * after the spinlock drops.
+ */
+ spin_lock_irqsave(&failed_read_lock, flags);
+ if (bio_list_empty(&failed_read_list))
+ WARN_ON_ONCE(!schedule_work(&failed_read_work));
+ bio_list_add(&failed_read_list, bio);
+ spin_unlock_irqrestore(&failed_read_lock, flags);
+}
+
static void iomap_read_end_io(struct bio *bio)
{
- __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
+ if (bio->bi_status) {
+ iomap_fail_buffered_read(bio);
+ return;
+ }
+
- __iomap_read_end_io(bio);
++ __iomap_read_end_io(bio, 0);
+ }
+
+ u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
+ {
+ return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
}
- static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
+ static void iomap_bio_submit_read(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
>
> --D
>
> > diff --cc fs/iomap/bio.c
> > index edd908183058f,f989ffcaac96d..0000000000000
> > --- a/fs/iomap/bio.c
> > +++ b/fs/iomap/bio.c
> > @@@ -8,66 -9,33 +9,78 @@@
> > #include "internal.h"
> > #include "trace.h"
> >
> > +static DEFINE_SPINLOCK(failed_read_lock);
> > +static struct bio_list failed_read_list = BIO_EMPTY_LIST;
> > +
> > - static void __iomap_read_end_io(struct bio *bio)
> > + static u32 __iomap_read_end_io(struct bio *bio, int error)
> > {
> > - int error = blk_status_to_errno(bio->bi_status);
> > struct folio_iter fi;
> > + u32 folio_count = 0;
> >
> > - bio_for_each_folio_all(fi, bio)
> > + bio_for_each_folio_all(fi, bio) {
> > iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
> > + folio_count++;
> > + }
> > + if (bio_integrity(bio))
> > + fs_bio_integrity_free(bio);
> > bio_put(bio);
> > + return folio_count;
> > }
> >
> > +static void
> > +iomap_fail_reads(
> > + struct work_struct *work)
> > +{
> > + struct bio *bio;
> > + struct bio_list tmp = BIO_EMPTY_LIST;
> > + unsigned long flags;
> > +
> > + spin_lock_irqsave(&failed_read_lock, flags);
> > + bio_list_merge_init(&tmp, &failed_read_list);
> > + spin_unlock_irqrestore(&failed_read_lock, flags);
> > +
> > + while ((bio = bio_list_pop(&tmp)) != NULL) {
> > - __iomap_read_end_io(bio);
> > ++ __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
> > + cond_resched();
> > + }
> > +}
> > +
> > +static DECLARE_WORK(failed_read_work, iomap_fail_reads);
> > +
> > +static void iomap_fail_buffered_read(struct bio *bio)
> > +{
> > + unsigned long flags;
> > +
> > + /*
> > + * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
> > + * in the fserror code. The caller no longer owns the bio reference
> > + * after the spinlock drops.
> > + */
> > + spin_lock_irqsave(&failed_read_lock, flags);
> > + if (bio_list_empty(&failed_read_list))
> > + WARN_ON_ONCE(!schedule_work(&failed_read_work));
> > + bio_list_add(&failed_read_list, bio);
> > + spin_unlock_irqrestore(&failed_read_lock, flags);
> > +}
> > +
> > static void iomap_read_end_io(struct bio *bio)
> > {
> > - __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
> > + if (bio->bi_status) {
> > + iomap_fail_buffered_read(bio);
> > + return;
> > + }
> > +
> > - __iomap_read_end_io(bio);
> > ++ __iomap_read_end_io(bio, 0);
> > }
> >
> > - static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
> > ++
> > + u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
> > + {
> > + return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
> > + }
> > +
> > + static void iomap_bio_submit_read(const struct iomap_iter *iter,
> > + struct iomap_read_folio_ctx *ctx)
> > {
> > struct bio *bio = ctx->read_ctx;
> >
>
>