Re: [PATCH kernel v3 2/2] powerpc/dma: Fallback to dma_ops when persistent memory present

From: Michael Ellerman
Date: Wed Oct 28 2020 - 20:40:26 EST


Alexey Kardashevskiy <aik@xxxxxxxxx> writes:
> diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
> index e4198700ed1a..91112e748491 100644
> --- a/arch/powerpc/platforms/pseries/iommu.c
> +++ b/arch/powerpc/platforms/pseries/iommu.c
> @@ -1111,11 +1112,13 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
> */
> static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
> {
> - int len, ret;
> + int len = 0, ret;
> + bool pmem_present = of_find_node_by_type(NULL, "ibm,pmemory") != NULL;

That leaks a reference on the returned node.

dn = of_find_node_by_type(NULL, "ibm,pmemory");
pmem_present = dn != NULL;
of_node_put(dn);


> @@ -1126,7 +1129,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
>
> mutex_lock(&direct_window_init_mutex);
>
> - dma_addr = find_existing_ddw(pdn);
> + dma_addr = find_existing_ddw(pdn, &len);

I don't see len used anywhere?

> if (dma_addr != 0)
> goto out_unlock;
>
> @@ -1212,14 +1215,26 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
> }
> /* verify the window * number of ptes will map the partition */
> /* check largest block * page size > max memory hotplug addr */
> - max_addr = ddw_memory_hotplug_max();
> - if (query.largest_available_block < (max_addr >> page_shift)) {
> - dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu "
> - "%llu-sized pages\n", max_addr, query.largest_available_block,
> - 1ULL << page_shift);
> + /*
> + * The "ibm,pmemory" can appear anywhere in the address space.
> + * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
> + * for the upper limit and fallback to max RAM otherwise but this
> + * disables device::dma_ops_bypass.
> + */
> + len = max_ram_len;

Here you override whatever find_existing_ddw() wrote to len?

> + if (pmem_present) {
> + if (query.largest_available_block >=
> + (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
> + len = MAX_PHYSMEM_BITS - page_shift;
> + else
> + dev_info(&dev->dev, "Skipping ibm,pmemory");
> + }
> +
> + if (query.largest_available_block < (1ULL << (len - page_shift))) {
> + dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu %llu-sized pages\n",
> + 1ULL << len, query.largest_available_block, 1ULL << page_shift);
> goto out_failed;
> }
> - len = order_base_2(max_addr);
> win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
> if (!win64) {
> dev_info(&dev->dev,


cheers