Re: [PATCH 4/4] iommu/dma, numa: Use NUMA aware memory allocations in __iommu_dma_alloc_pages
From: Ganapatrao Kulkarni
Date: Fri Sep 22 2017 - 11:44:58 EST
Hi Robin,
On Thu, Sep 21, 2017 at 5:11 PM, Robin Murphy <robin.murphy@xxxxxxx> wrote:
> On 21/09/17 09:59, Ganapatrao Kulkarni wrote:
>> Change function __iommu_dma_alloc_pages to allocate memory/pages
>> for dma from respective device numa node.
>>
>> Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@xxxxxxxxxx>
>> ---
>> drivers/iommu/dma-iommu.c | 17 ++++++++++-------
>> 1 file changed, 10 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>> index 9d1cebe..0626b58 100644
>> --- a/drivers/iommu/dma-iommu.c
>> +++ b/drivers/iommu/dma-iommu.c
>> @@ -428,20 +428,21 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
>> kvfree(pages);
>> }
>>
>> -static struct page **__iommu_dma_alloc_pages(unsigned int count,
>> - unsigned long order_mask, gfp_t gfp)
>> +static struct page **__iommu_dma_alloc_pages(struct device *dev,
>> + unsigned int count, unsigned long order_mask, gfp_t gfp)
>> {
>> struct page **pages;
>> unsigned int i = 0, array_size = count * sizeof(*pages);
>> + int numa_node = dev_to_node(dev);
>>
>> order_mask &= (2U << MAX_ORDER) - 1;
>> if (!order_mask)
>> return NULL;
>>
>> if (array_size <= PAGE_SIZE)
>> - pages = kzalloc(array_size, GFP_KERNEL);
>> + pages = kzalloc_node(array_size, GFP_KERNEL, numa_node);
>> else
>> - pages = vzalloc(array_size);
>> + pages = vzalloc_node(array_size, numa_node);
>
> kvzalloc{,_node}() didn't exist when this code was first written, but it
> does now - since you're touching it you may as well get rid of the whole
> if-else and array_size local.
thanks, i will update in next version.
>
> Further nit: some of the indentation below is a bit messed up.
ok, will fix it.
>
> Robin.
>
>> if (!pages)
>> return NULL;
>>
>> @@ -462,8 +463,9 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
>> unsigned int order = __fls(order_mask);
>>
>> order_size = 1U << order;
>> - page = alloc_pages((order_mask - order_size) ?
>> - gfp | __GFP_NORETRY : gfp, order);
>> + page = alloc_pages_node(numa_node,
>> + (order_mask - order_size) ?
>> + gfp | __GFP_NORETRY : gfp, order);
>> if (!page)
>> continue;
>> if (!order)
>> @@ -548,7 +550,8 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
>> alloc_sizes = min_size;
>>
>> count = PAGE_ALIGN(size) >> PAGE_SHIFT;
>> - pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
>> + pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
>> + gfp);
>> if (!pages)
>> return NULL;
>>
>>
>
thanks
Ganapat