[PATCH 4.4 022/104] f2fs: check blkaddr more accuratly before issue a bio

From: Greg Kroah-Hartman
Date: Thu Jan 24 2019 - 14:24:50 EST


4.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Yunlei He <heyunlei@xxxxxxxxxx>

commit 0833721ec3658a4e9d5e58b6fa82cf9edc431e59 upstream.

This patch check blkaddr more accuratly before issue a
write or read bio.

Signed-off-by: Yunlei He <heyunlei@xxxxxxxxxx>
Reviewed-by: Chao Yu <yuchao0@xxxxxxxxxx>
Signed-off-by: Jaegeuk Kim <jaegeuk@xxxxxxxxxx>
[bwh: Backported to 4.4:
- CoW is not implemented so check f2fs_io_info::blk_addr instead of
f2fs_io_info::{old,new}_blkaddr
- Operation code is f2fs_io_info::rw instead of f2fs_io_info::op
- Adjust context]
Signed-off-by: Ben Hutchings <ben.hutchings@xxxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
fs/f2fs/checkpoint.c | 2 ++
fs/f2fs/data.c | 3 ++-
fs/f2fs/f2fs.h | 1 +
fs/f2fs/segment.h | 25 +++++++++++++++++++------
4 files changed, 24 insertions(+), 7 deletions(-)

--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -58,6 +58,7 @@ static struct page *__get_meta_page(stru
.rw = READ_SYNC | REQ_META | REQ_PRIO,
.blk_addr = index,
.encrypted_page = NULL,
+ .is_meta = is_meta,
};

if (unlikely(!is_meta))
@@ -151,6 +152,7 @@ int ra_meta_pages(struct f2fs_sb_info *s
.type = META,
.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
.encrypted_page = NULL,
+ .is_meta = (type != META_POR),
};

if (unlikely(type == META_POR))
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -147,6 +147,7 @@ int f2fs_submit_page_bio(struct f2fs_io_
struct bio *bio;
struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;

+ verify_block_addr(fio, fio->blk_addr);
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);

@@ -172,7 +173,7 @@ void f2fs_submit_page_mbio(struct f2fs_i

io = is_read ? &sbi->read_io : &sbi->write_io[btype];

- verify_block_addr(sbi, fio->blk_addr);
+ verify_block_addr(fio, fio->blk_addr);

down_write(&io->io_rwsem);

--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -684,6 +684,7 @@ struct f2fs_io_info {
block_t blk_addr; /* block address to be written */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
+ bool is_meta; /* indicate borrow meta inode mapping or not */
};

#define is_read_io(rw) (((rw) & 1) == READ)
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -48,13 +48,19 @@
(secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
sbi->segs_per_sec)) \

-#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
-#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
+#define MAIN_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
+#define SEG0_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))

#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
#define MAIN_SECS(sbi) (sbi->total_sections)

-#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
+#define TOTAL_SEGS(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->segment_count : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)

#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
@@ -576,10 +582,17 @@ static inline void check_seg_range(struc
f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
}

-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
{
- BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ struct f2fs_sb_info *sbi = fio->sbi;
+
+ if (PAGE_TYPE_OF_BIO(fio->type) == META &&
+ (!is_read_io(fio->rw) || fio->is_meta))
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
+ blk_addr >= MAIN_BLKADDR(sbi));
+ else
+ BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
+ blk_addr >= MAX_BLKADDR(sbi));
}

/*