Hi,
On 21-01-28 23:17:41, Keqian Zhu wrote:
[...]
+static void vfio_dma_dirty_log_start(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ struct vfio_domain *d;
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ /* Go through all domain anyway even if we fail */
+ iommu_split_block(d->domain, dma->iova, dma->size);
+ }
+}
This should be a switch to prepare for dirty log start. Per Intel
Vtd spec, there is SLADE defined in Scalable-Mode PASID Table Entry.
It enables Accessed/Dirty Flags in second-level paging entries.
So, a generic iommu interface here is better. For Intel iommu, it
enables SLADE. For ARM, it splits block.
+
+static void vfio_dma_dirty_log_stop(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ struct vfio_domain *d;
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ /* Go through all domain anyway even if we fail */
+ iommu_merge_page(d->domain, dma->iova, dma->size,
+ d->prot | dma->prot);
+ }
+}
Same as above comment, a generic interface is required here.
+_______________________________________________
+static void vfio_iommu_dirty_log_switch(struct vfio_iommu *iommu, bool start)
+{
+ struct rb_node *n;
+
+ /* Split and merge even if all iommu don't support HWDBM now */
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+ if (!dma->iommu_mapped)
+ continue;
+
+ /* Go through all dma range anyway even if we fail */
+ if (start)
+ vfio_dma_dirty_log_start(iommu, dma);
+ else
+ vfio_dma_dirty_log_stop(iommu, dma);
+ }
+}
+
static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
unsigned long arg)
{
@@ -2812,8 +2900,10 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
pgsize = 1 << __ffs(iommu->pgsize_bitmap);
if (!iommu->dirty_page_tracking) {
ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
- if (!ret)
+ if (!ret) {
iommu->dirty_page_tracking = true;
+ vfio_iommu_dirty_log_switch(iommu, true);
+ }
}
mutex_unlock(&iommu->lock);
return ret;
@@ -2822,6 +2912,7 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
if (iommu->dirty_page_tracking) {
iommu->dirty_page_tracking = false;
vfio_dma_bitmap_free_all(iommu);
+ vfio_iommu_dirty_log_switch(iommu, false);
}
mutex_unlock(&iommu->lock);
return 0;
--
2.19.1
iommu mailing list
iommu@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/iommu