+static dma_addr_t hyperv_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t map, phys = (page_to_pfn(page) << PAGE_SHIFT) + offset;
+
+ if (!hv_is_isolation_supported())
+ return phys;
+
+ map = swiotlb_tbl_map_single(dev, phys, size, HV_HYP_PAGE_SIZE, dir,
+ attrs);
+ if (map == (phys_addr_t)DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
+
+ return map;
+}
This largerly duplicates what dma-direct + swiotlb does. Please use
force_dma_unencrypted to force bounce buffering and just use the generic
code.
+ if (hv_isolation_type_snp()) {
+ ret = hv_set_mem_host_visibility(
+ phys_to_virt(hyperv_io_tlb_start),
+ hyperv_io_tlb_size,
+ VMBUS_PAGE_VISIBLE_READ_WRITE);
+ if (ret)
+ panic("%s: Fail to mark Hyper-v swiotlb buffer visible to host. err=%d\n",
+ __func__, ret);
+
+ hyperv_io_tlb_remap = ioremap_cache(hyperv_io_tlb_start
+ + ms_hyperv.shared_gpa_boundary,
+ hyperv_io_tlb_size);
+ if (!hyperv_io_tlb_remap)
+ panic("%s: Fail to remap io tlb.\n", __func__);
+
+ memset(hyperv_io_tlb_remap, 0x00, hyperv_io_tlb_size);
+ swiotlb_set_bounce_remap(hyperv_io_tlb_remap);
And this really needs to go into a common hook where we currently just
call set_memory_decrypted so that all the different schemes for these
trusted VMs (we have about half a dozen now) can share code rather than
reinventing it.