Re: [PATCH 1/2] mm: Fix struct page layout on 32-bit systems
From: Jesper Dangaard Brouer
Date: Sat Apr 17 2021 - 03:34:36 EST
On Sat, 17 Apr 2021 00:07:23 +0100
"Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> wrote:
> 32-bit architectures which expect 8-byte alignment for 8-byte integers
> and need 64-bit DMA addresses (arc, arm, mips, ppc) had their struct
> page inadvertently expanded in 2019. When the dma_addr_t was added,
> it forced the alignment of the union to 8 bytes, which inserted a 4 byte
> gap between 'flags' and the union.
>
> Fix this by storing the dma_addr_t in one or two adjacent unsigned longs.
> This restores the alignment to that of an unsigned long, and also fixes a
> potential problem where (on a big endian platform), the bit used to denote
> PageTail could inadvertently get set, and a racing get_user_pages_fast()
> could dereference a bogus compound_head().
>
> Fixes: c25fff7171be ("mm: add dma_addr_t to struct page")
> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
> ---
Acked-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
Thanks you Matthew for working on a fix for this. It's been a pleasure
working with you and exchanging crazy ideas with you for solving this.
Most of them didn't work out, especially those that came to me during
restless nights ;-).
Having worked through the other solutions, some very intrusive and some
could even be consider ugly. I think we have a good and non-intrusive
solution/workaround in this patch. Thanks!
> include/linux/mm_types.h | 4 ++--
> include/net/page_pool.h | 12 +++++++++++-
> net/core/page_pool.c | 12 +++++++-----
> 3 files changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 6613b26a8894..5aacc1c10a45 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -97,10 +97,10 @@ struct page {
> };
> struct { /* page_pool used by netstack */
> /**
> - * @dma_addr: might require a 64-bit value even on
> + * @dma_addr: might require a 64-bit value on
> * 32-bit architectures.
> */
> - dma_addr_t dma_addr;
> + unsigned long dma_addr[2];
> };
> struct { /* slab, slob and slub */
> union {
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index b5b195305346..db7c7020746a 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
>
> static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
> {
> - return page->dma_addr;
> + dma_addr_t ret = page->dma_addr[0];
> + if (sizeof(dma_addr_t) > sizeof(unsigned long))
> + ret |= (dma_addr_t)page->dma_addr[1] << 32;
> + return ret;
> +}
> +
> +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> +{
> + page->dma_addr[0] = addr;
> + if (sizeof(dma_addr_t) > sizeof(unsigned long))
> + page->dma_addr[1] = addr >> 32;
> }
>
> static inline bool is_page_pool_compiled_in(void)
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index ad8b0707af04..f014fd8c19a6 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
> struct page *page,
> unsigned int dma_sync_size)
> {
> + dma_addr_t dma_addr = page_pool_get_dma_addr(page);
> +
> dma_sync_size = min(dma_sync_size, pool->p.max_len);
> - dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
> + dma_sync_single_range_for_device(pool->p.dev, dma_addr,
> pool->p.offset, dma_sync_size,
> pool->p.dma_dir);
> }
> @@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
> put_page(page);
> return NULL;
> }
> - page->dma_addr = dma;
> + page_pool_set_dma_addr(page, dma);
>
> if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
> @@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
> */
> goto skip_dma_unmap;
>
> - dma = page->dma_addr;
> + dma = page_pool_get_dma_addr(page);
>
> - /* When page is unmapped, it cannot be returned our pool */
> + /* When page is unmapped, it cannot be returned to our pool */
> dma_unmap_page_attrs(pool->p.dev, dma,
> PAGE_SIZE << pool->p.order, pool->p.dma_dir,
> DMA_ATTR_SKIP_CPU_SYNC);
> - page->dma_addr = 0;
> + page_pool_set_dma_addr(page, 0);
> skip_dma_unmap:
> /* This may be the last page returned, releasing the pool, so
> * it is not safe to reference pool afterwards.
--
Best regards,
Jesper Dangaard Brouer
MSc.CS, Principal Kernel Engineer at Red Hat
LinkedIn: http://www.linkedin.com/in/brouer