Re: [PATCH 1/5] Squashfs: remove the FILE_CACHE option

From: Phillip Lougher
Date: Fri Sep 22 2017 - 22:01:39 EST


On Fri, Sep 22, 2017 at 10:55 PM, Daniel Rosenberg <drosen@xxxxxxxxxx> wrote:
> From: Adrien Schildknecht <adriens@xxxxxxxxxx>
>
> FILE_DIRECT is working fine and offers faster results and lower memory
> footprint.
>
> Removing FILE_CACHE makes our life easier because we don't have to
> maintain 2 differents function that does the same thing.

When I added FILE_DIRECT, I deliberately retained the original
FILE_CACHE behaviour and kept it as default, and I spent a lot of
effort to do so. It basically required complete rewriting to enable
FILE_CACHE and FILE_DIRECT to co-exist. FILE_CACHE wasn't simply some
old cruft I left in there because I couldn't be bothered to remove it.

There is a good reason for keeping the FILE_CACHE behaviour. While
FILE_DIRECT improves the performance of Squashfs by eliminating a
memcpy, and removing contention on a lock, it also increases the
amount of I/O and CPU Squashfs can use at any one time (by enabling
multiple Squashfs operations to run in parallel). This changes
scheduling and can take resources away from other tasks.

Who knows how many low-end embedded systems are out there that rely on
the original slower behaviour of FILE_CACHE and will break if it is
removed.

I didn't want to remove the FILE_CACHE behaviour and get a lot of
people complaining (and quite rightly too) that I'd removed a feature
they were relying on, and I don't intend to now either. You quite
simply don't remove a feature that people may be relying on.

Let's be clear. Your reason for removing FILE_CACHE is simply because
when adding your new whizzo feature, you needed to update both the
FILE_CACHE and FILE_DIRECT code, and as *you* didn't use FILE_CACHE,
you couldn't be bothered to update it, and removed it instead. This
is not a valid reason for removing it.

NACK

Phillip

>
> Signed-off-by: Adrien Schildknecht <adriens@xxxxxxxxxx>
> Signed-off-by: Daniel Rosenberg <drosen@xxxxxxxxxx>
> ---
> fs/squashfs/Kconfig | 28 ----------------------------
> fs/squashfs/Makefile | 3 +--
> fs/squashfs/file_cache.c | 38 --------------------------------------
> fs/squashfs/page_actor.h | 42 +-----------------------------------------
> 4 files changed, 2 insertions(+), 109 deletions(-)
> delete mode 100644 fs/squashfs/file_cache.c
>
> diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
> index 1adb3346b9d6..6c81bf620067 100644
> --- a/fs/squashfs/Kconfig
> +++ b/fs/squashfs/Kconfig
> @@ -25,34 +25,6 @@ config SQUASHFS
>
> If unsure, say N.
>
> -choice
> - prompt "File decompression options"
> - depends on SQUASHFS
> - help
> - Squashfs now supports two options for decompressing file
> - data. Traditionally Squashfs has decompressed into an
> - intermediate buffer and then memcopied it into the page cache.
> - Squashfs now supports the ability to decompress directly into
> - the page cache.
> -
> - If unsure, select "Decompress file data into an intermediate buffer"
> -
> -config SQUASHFS_FILE_CACHE
> - bool "Decompress file data into an intermediate buffer"
> - help
> - Decompress file data into an intermediate buffer and then
> - memcopy it into the page cache.
> -
> -config SQUASHFS_FILE_DIRECT
> - bool "Decompress files directly into the page cache"
> - help
> - Directly decompress file data into the page cache.
> - Doing so can significantly improve performance because
> - it eliminates a memcpy and it also removes the lock contention
> - on the single buffer.
> -
> -endchoice
> -
> choice
> prompt "Decompressor parallelisation options"
> depends on SQUASHFS
> diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
> index 6655631c53ae..225330ab7723 100644
> --- a/fs/squashfs/Makefile
> +++ b/fs/squashfs/Makefile
> @@ -5,8 +5,7 @@
> obj-$(CONFIG_SQUASHFS) += squashfs.o
> squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
> squashfs-y += namei.o super.o symlink.o decompressor.o
> -squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
> -squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
> +squashfs-y += file_direct.o page_actor.o
> squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
> squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
> squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
> diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
> deleted file mode 100644
> index f2310d2a2019..000000000000
> --- a/fs/squashfs/file_cache.c
> +++ /dev/null
> @@ -1,38 +0,0 @@
> -/*
> - * Copyright (c) 2013
> - * Phillip Lougher <phillip@xxxxxxxxxxxxxxx>
> - *
> - * This work is licensed under the terms of the GNU GPL, version 2. See
> - * the COPYING file in the top-level directory.
> - */
> -
> -#include <linux/fs.h>
> -#include <linux/vfs.h>
> -#include <linux/kernel.h>
> -#include <linux/slab.h>
> -#include <linux/string.h>
> -#include <linux/pagemap.h>
> -#include <linux/mutex.h>
> -
> -#include "squashfs_fs.h"
> -#include "squashfs_fs_sb.h"
> -#include "squashfs_fs_i.h"
> -#include "squashfs.h"
> -
> -/* Read separately compressed datablock and memcopy into page cache */
> -int squashfs_readpage_block(struct page *page, u64 block, int bsize)
> -{
> - struct inode *i = page->mapping->host;
> - struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
> - block, bsize);
> - int res = buffer->error;
> -
> - if (res)
> - ERROR("Unable to read page, block %llx, size %x\n", block,
> - bsize);
> - else
> - squashfs_copy_cache(page, buffer, buffer->length, 0);
> -
> - squashfs_cache_put(buffer);
> - return res;
> -}
> diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
> index 98537eab27e2..d2df0544e0df 100644
> --- a/fs/squashfs/page_actor.h
> +++ b/fs/squashfs/page_actor.h
> @@ -8,46 +8,6 @@
> * the COPYING file in the top-level directory.
> */
>
> -#ifndef CONFIG_SQUASHFS_FILE_DIRECT
> -struct squashfs_page_actor {
> - void **page;
> - int pages;
> - int length;
> - int next_page;
> -};
> -
> -static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
> - int pages, int length)
> -{
> - struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
> -
> - if (actor == NULL)
> - return NULL;
> -
> - actor->length = length ? : pages * PAGE_SIZE;
> - actor->page = page;
> - actor->pages = pages;
> - actor->next_page = 0;
> - return actor;
> -}
> -
> -static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
> -{
> - actor->next_page = 1;
> - return actor->page[0];
> -}
> -
> -static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
> -{
> - return actor->next_page == actor->pages ? NULL :
> - actor->page[actor->next_page++];
> -}
> -
> -static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
> -{
> - /* empty */
> -}
> -#else
> struct squashfs_page_actor {
> union {
> void **buffer;
> @@ -77,5 +37,5 @@ static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
> {
> actor->squashfs_finish_page(actor);
> }
> -#endif
> +
> #endif
> --
> 2.14.1.821.g8fa685d3b7-goog
>