Re: [PATCH 6.1.y 2/3] highmem: add kernel-doc for memcpy_*_folio()

From: Matthew Wilcox

Date: Mon Mar 09 2026 - 11:43:22 EST


On Mon, Mar 09, 2026 at 01:01:29PM +0800, Johnny Hao wrote:
> From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>
>
> [ Upstream commit 9af47276ed83cc346263e56243756543a2a33c9d ]

what? This patch isn't that commit. That commit does indeed add
kernel-doc. This patch adds the functions themselves. Please be
more careful.

> This was inadvertently skipped when adding the new functions.
>
> Link: https://lkml.kernel.org/r/20240124181217.1761674-1-willy@xxxxxxxxxxxxx
> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> Signed-off-by: Johnny Hao <johnny_haocn@xxxxxxxx>
> ---
> include/linux/highmem.h | 164 ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 164 insertions(+)
>
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 44242268f53b..a2a0cfbc19a0 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -415,6 +415,170 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
> kunmap_local(addr);
> }
>
> +/**
> + * memcpy_from_folio - Copy a range of bytes from a folio.
> + * @to: The memory to copy to.
> + * @folio: The folio to read from.
> + * @offset: The first byte in the folio to read.
> + * @len: The number of bytes to copy.
> + */
> +static inline void memcpy_from_folio(char *to, struct folio *folio,
> + size_t offset, size_t len)
> +{
> + VM_BUG_ON(offset + len > folio_size(folio));
> +
> + do {
> + const char *from = kmap_local_folio(folio, offset);
> + size_t chunk = len;
> +
> + if (folio_test_highmem(folio) &&
> + chunk > PAGE_SIZE - offset_in_page(offset))
> + chunk = PAGE_SIZE - offset_in_page(offset);
> + memcpy(to, from, chunk);
> + kunmap_local(from);
> +
> + to += chunk;
> + offset += chunk;
> + len -= chunk;
> + } while (len > 0);
> +}
> +
> +/**
> + * memcpy_to_folio - Copy a range of bytes to a folio.
> + * @folio: The folio to write to.
> + * @offset: The first byte in the folio to store to.
> + * @from: The memory to copy from.
> + * @len: The number of bytes to copy.
> + */
> +static inline void memcpy_to_folio(struct folio *folio, size_t offset,
> + const char *from, size_t len)
> +{
> + VM_BUG_ON(offset + len > folio_size(folio));
> +
> + do {
> + char *to = kmap_local_folio(folio, offset);
> + size_t chunk = len;
> +
> + if (folio_test_highmem(folio) &&
> + chunk > PAGE_SIZE - offset_in_page(offset))
> + chunk = PAGE_SIZE - offset_in_page(offset);
> + memcpy(to, from, chunk);
> + kunmap_local(to);
> +
> + from += chunk;
> + offset += chunk;
> + len -= chunk;
> + } while (len > 0);
> +
> + flush_dcache_folio(folio);
> +}
> +
> +/**
> + * folio_zero_tail - Zero the tail of a folio.
> + * @folio: The folio to zero.
> + * @offset: The byte offset in the folio to start zeroing at.
> + * @kaddr: The address the folio is currently mapped to.
> + *
> + * If you have already used kmap_local_folio() to map a folio, written
> + * some data to it and now need to zero the end of the folio (and flush
> + * the dcache), you can use this function. If you do not have the
> + * folio kmapped (eg the folio has been partially populated by DMA),
> + * use folio_zero_range() or folio_zero_segment() instead.
> + *
> + * Return: An address which can be passed to kunmap_local().
> + */
> +static inline __must_check void *folio_zero_tail(struct folio *folio,
> + size_t offset, void *kaddr)
> +{
> + size_t len = folio_size(folio) - offset;
> +
> + if (folio_test_highmem(folio)) {
> + size_t max = PAGE_SIZE - offset_in_page(offset);
> +
> + while (len > max) {
> + memset(kaddr, 0, max);
> + kunmap_local(kaddr);
> + len -= max;
> + offset += max;
> + max = PAGE_SIZE;
> + kaddr = kmap_local_folio(folio, offset);
> + }
> + }
> +
> + memset(kaddr, 0, len);
> + flush_dcache_folio(folio);
> +
> + return kaddr;
> +}
> +
> +/**
> + * folio_fill_tail - Copy some data to a folio and pad with zeroes.
> + * @folio: The destination folio.
> + * @offset: The offset into @folio at which to start copying.
> + * @from: The data to copy.
> + * @len: How many bytes of data to copy.
> + *
> + * This function is most useful for filesystems which support inline data.
> + * When they want to copy data from the inode into the page cache, this
> + * function does everything for them. It supports large folios even on
> + * HIGHMEM configurations.
> + */
> +static inline void folio_fill_tail(struct folio *folio, size_t offset,
> + const char *from, size_t len)
> +{
> + char *to = kmap_local_folio(folio, offset);
> +
> + VM_BUG_ON(offset + len > folio_size(folio));
> +
> + if (folio_test_highmem(folio)) {
> + size_t max = PAGE_SIZE - offset_in_page(offset);
> +
> + while (len > max) {
> + memcpy(to, from, max);
> + kunmap_local(to);
> + len -= max;
> + from += max;
> + offset += max;
> + max = PAGE_SIZE;
> + to = kmap_local_folio(folio, offset);
> + }
> + }
> +
> + memcpy(to, from, len);
> + to = folio_zero_tail(folio, offset + len, to + len);
> + kunmap_local(to);
> +}
> +
> +/**
> + * memcpy_from_file_folio - Copy some bytes from a file folio.
> + * @to: The destination buffer.
> + * @folio: The folio to copy from.
> + * @pos: The position in the file.
> + * @len: The maximum number of bytes to copy.
> + *
> + * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
> + * if the folio comes from HIGHMEM, and by the size of the folio.
> + *
> + * Return: The number of bytes copied from the folio.
> + */
> +static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
> + loff_t pos, size_t len)
> +{
> + size_t offset = offset_in_folio(folio, pos);
> + char *from = kmap_local_folio(folio, offset);
> +
> + if (folio_test_highmem(folio)) {
> + offset = offset_in_page(offset);
> + len = min_t(size_t, len, PAGE_SIZE - offset);
> + } else
> + len = min(len, folio_size(folio) - offset);
> +
> + memcpy(to, from, len);
> + kunmap_local(from);
> +
> + return len;
> +}
> +
> /**
> * folio_zero_segments() - Zero two byte ranges in a folio.
> * @folio: The folio to write to.
> --
> 2.34.1
>