From: Andrzej Hajda <andrzej.hajda@xxxxxxxxx>
Debugger needs to read/write program's vmas including userptr_vma.
Since hmm_range_fault is used to pin userptr vmas, it is possible
to map those vmas from debugger context.
v2: pin pages vs notifier, move to vm.c (Matthew)
v3: - iterate over system pages instead of DMA, fixes iommu enabled
- s/xe_uvma_access/xe_vm_uvma_access/ (Matt)
Signed-off-by: Andrzej Hajda <andrzej.hajda@xxxxxxxxx>
Signed-off-by: Maciej Patelczyk <maciej.patelczyk@xxxxxxxxx>
Signed-off-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@xxxxxxxxx> #v1
---
drivers/gpu/drm/xe/xe_eudebug.c | 3 ++-
drivers/gpu/drm/xe/xe_vm.c | 47 +++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm.h | 3 +++
3 files changed, 52 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_eudebug.c b/drivers/gpu/drm/xe/xe_eudebug.c
index 9d87df75348b..e5949e4dcad8 100644
--- a/drivers/gpu/drm/xe/xe_eudebug.c
+++ b/drivers/gpu/drm/xe/xe_eudebug.c
@@ -3076,7 +3076,8 @@ static int xe_eudebug_vma_access(struct xe_vma *vma, u64 offset_in_vma,
return ret;
}
- return -EINVAL;
+ return xe_vm_userptr_access(to_userptr_vma(vma), offset_in_vma,
+ buf, bytes, write);
}
static int xe_eudebug_vm_access(struct xe_vm *vm, u64 offset,
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 0f17bc8b627b..224ff9e16941 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3414,3 +3414,50 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
}
kvfree(snap);
}
+
+int xe_vm_userptr_access(struct xe_userptr_vma *uvma, u64 offset,
+ void *buf, u64 len, bool write)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ struct xe_userptr *up = &uvma->userptr;
+ struct xe_res_cursor cur = {};
+ int cur_len, ret = 0;
+
+ while (true) {
+ down_read(&vm->userptr.notifier_lock);
+ if (!xe_vma_userptr_check_repin(uvma))
+ break;
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&uvma->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ up_read(&vm->userptr.notifier_lock);
+ ret = xe_vma_userptr_pin_pages(uvma);
+ if (ret)
+ return ret;
+ }
+
+ if (!up->sg) {
+ ret = -EINVAL;
+ goto out_unlock_notifier;
+ }
+
+ for (xe_res_first_sg_system(up->sg, offset, len, &cur); cur.remaining;
+ xe_res_next(&cur, cur_len)) {
+ void *ptr = kmap_local_page(sg_page(cur.sgl)) + cur.start;
+
+ cur_len = min(cur.size, cur.remaining);
+ if (write)
+ memcpy(ptr, buf, cur_len);
+ else
+ memcpy(buf, ptr, cur_len);
+ kunmap_local(ptr);
+ buf += cur_len;
+ }
+ ret = len;
+
+out_unlock_notifier:
+ up_read(&vm->userptr.notifier_lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 23adb7442881..372ad40ad67f 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -280,3 +280,6 @@ struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+int xe_vm_userptr_access(struct xe_userptr_vma *uvma, u64 offset,
+ void *buf, u64 len, bool write);