Am 08.06.2018 um 06:36 schrieb Souptick Joarder:
On Sat, Jun 2, 2018 at 12:57 AM, Souptick Joarder <jrdr.linux@xxxxxxxxx> wrote:The patch looks good to me and I will pick it up for the next TTM pull request. I don't think it will make it into 4.18-rc-1, but 4.18-rc-x sounds realistic.Use new return type vm_fault_t for fault handler. ForIf no further comment, we would like get this patch in 4.18 / 4.18-rc-x.
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.
Ref-> commit 1c8f422059ae ("mm: change return type to vm_fault_t")
Previously vm_insert_{mixed,pfn} returns err which driver
mapped into VM_FAULT_* type. The new function
vmf_insert_{mixed,pfn} will replace this inefficiency by
returning VM_FAULT_* type.
Signed-off-by: Souptick Joarder <jrdr.linux@xxxxxxxxx>
---
v2: Address christian's comment. Put reverse
ÂÂÂÂ xmas tree order for variable declarations.
 drivers/gpu/drm/ttm/ttm_bo_vm.c | 45 ++++++++++++++++++++---------------------
 1 file changed, 22 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8eba95b..9de8b4f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -43,10 +43,11 @@
 #define TTM_BO_VM_NUM_PREFAULT 16
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct vm_fault *vmf)
 {
-ÂÂÂÂÂÂ int ret = 0;
+ÂÂÂÂÂÂ vm_fault_t ret = 0;
+ÂÂÂÂÂÂ int err = 0;
ÂÂÂÂÂÂÂÂ if (likely(!bo->moving))
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ goto out_unlock;
@@ -77,9 +78,9 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
ÂÂÂÂÂÂÂÂ /*
ÂÂÂÂÂÂÂÂÂ * Ordinary wait.
ÂÂÂÂÂÂÂÂÂ */
-ÂÂÂÂÂÂ ret = dma_fence_wait(bo->moving, true);
-ÂÂÂÂÂÂ if (unlikely(ret != 0)) {
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ÂÂÂÂÂÂ err = dma_fence_wait(bo->moving, true);
+ÂÂÂÂÂÂ if (unlikely(err != 0)) {
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ VM_FAULT_NOPAGE;
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ goto out_unlock;
ÂÂÂÂÂÂÂÂ }
@@ -104,7 +105,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ + page_offset;
 }
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 {
ÂÂÂÂÂÂÂÂ struct vm_area_struct *vma = vmf->vma;
ÂÂÂÂÂÂÂÂ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -115,8 +116,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ÂÂÂÂÂÂÂÂ unsigned long pfn;
ÂÂÂÂÂÂÂÂ struct ttm_tt *ttm = NULL;
ÂÂÂÂÂÂÂÂ struct page *page;
-ÂÂÂÂÂÂ int ret;
+ÂÂÂÂÂÂ int err;
ÂÂÂÂÂÂÂÂ int i;
+ÂÂÂÂÂÂ vm_fault_t ret = VM_FAULT_NOPAGE;
ÂÂÂÂÂÂÂÂ unsigned long address = vmf->address;
ÂÂÂÂÂÂÂÂ struct ttm_mem_type_manager *man =
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ &bdev->man[bo->mem.mem_type];
@@ -128,9 +130,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ÂÂÂÂÂÂÂÂÂ * for reserve, and if it fails, retry the fault after waiting
ÂÂÂÂÂÂÂÂÂ * for the buffer to become unreserved.
ÂÂÂÂÂÂÂÂÂ */
-ÂÂÂÂÂÂ ret = ttm_bo_reserve(bo, true, true, NULL);
-ÂÂÂÂÂÂ if (unlikely(ret != 0)) {
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (ret != -EBUSY)
+ÂÂÂÂÂÂ err = ttm_bo_reserve(bo, true, true, NULL);
+ÂÂÂÂÂÂ if (unlikely(err != 0)) {
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (err != -EBUSY)
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ return VM_FAULT_NOPAGE;
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
@@ -162,8 +164,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ÂÂÂÂÂÂÂÂ }
ÂÂÂÂÂÂÂÂ if (bdev->driver->fault_reserve_notify) {
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = bdev->driver->fault_reserve_notify(bo);
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ switch (ret) {
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ err = bdev->driver->fault_reserve_notify(bo);
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ switch (err) {
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ case 0:
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ break;
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ case -EBUSY:
@@ -191,13 +193,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ goto out_unlock;
ÂÂÂÂÂÂÂÂ }
-ÂÂÂÂÂÂ ret = ttm_mem_io_lock(man, true);
-ÂÂÂÂÂÂ if (unlikely(ret != 0)) {
+ÂÂÂÂÂÂ err = ttm_mem_io_lock(man, true);
+ÂÂÂÂÂÂ if (unlikely(err != 0)) {
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = VM_FAULT_NOPAGE;
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ goto out_unlock;
ÂÂÂÂÂÂÂÂ }
-ÂÂÂÂÂÂ ret = ttm_mem_io_reserve_vm(bo);
-ÂÂÂÂÂÂ if (unlikely(ret != 0)) {
+ÂÂÂÂÂÂ err = ttm_mem_io_reserve_vm(bo);
+ÂÂÂÂÂÂ if (unlikely(err != 0)) {
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = VM_FAULT_SIGBUS;
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ goto out_io_unlock;
ÂÂÂÂÂÂÂÂ }
@@ -265,23 +267,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ }
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (vma->vm_flags & VM_MIXEDMAP)
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = vm_insert_mixed(&cvma, address,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = vmf_insert_mixed(&cvma, address,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ __pfn_to_pfn_t(pfn, PFN_DEV));
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ else
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = vm_insert_pfn(&cvma, address, pfn);
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret = vmf_insert_pfn(&cvma, address, pfn);
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ /*
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ * Somebody beat us to this PTE or prefaulting to
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ * an already populated PTE, or prefaulting error.
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ */
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ break;
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ else if (unlikely(ret != 0)) {
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ ret =
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ else if (unlikely(ret & VM_FAULT_ERROR))
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ goto out_io_unlock;
-ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ }
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ address += PAGE_SIZE;
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (unlikely(++page_offset >= page_last))
--
1.9.1
Christian.