From: Christian König christian.koenig@amd.com
We need to grab the lock of the BO or otherwise can run into a crash when we try to inspect the current location.
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Alex Deucher alexander.deucher@amd.com Acked-by: Guchun Chen guchun.chen@amd.com Tested-by: Mikhail Gavrilov mikhail.v.gavrilov@gmail.com Signed-off-by: Alex Deucher alexander.deucher@amd.com (cherry picked from commit e2ad8e2df432498b1cee2af04df605723f4d75e6) Cc: stable@vger.kernel.org # 6.3.x --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 69 +++++++++++++++----------- 1 file changed, 39 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5b3a70becbdf..a252a206f37b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -920,42 +920,51 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; }
+static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, + struct amdgpu_mem_stats *stats) +{ + struct amdgpu_vm *vm = bo_va->base.vm; + struct amdgpu_bo *bo = bo_va->base.bo; + + if (!bo) + return; + + /* + * For now ignore BOs which are currently locked and potentially + * changing their location. + */ + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && + !dma_resv_trylock(bo->tbo.base.resv)) + return; + + amdgpu_bo_get_memory(bo, stats); + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) + dma_resv_unlock(bo->tbo.base.resv); +} + void amdgpu_vm_get_memory(struct amdgpu_vm *vm, struct amdgpu_mem_stats *stats) { struct amdgpu_bo_va *bo_va, *tmp;
spin_lock(&vm->status_lock); - list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { - if (!bo_va->base.bo) - continue; - amdgpu_bo_get_memory(bo_va->base.bo, stats); - } - list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { - if (!bo_va->base.bo) - continue; - amdgpu_bo_get_memory(bo_va->base.bo, stats); - } - list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { - if (!bo_va->base.bo) - continue; - amdgpu_bo_get_memory(bo_va->base.bo, stats); - } - list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { - if (!bo_va->base.bo) - continue; - amdgpu_bo_get_memory(bo_va->base.bo, stats); - } - list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { - if (!bo_va->base.bo) - continue; - amdgpu_bo_get_memory(bo_va->base.bo, stats); - } - list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { - if (!bo_va->base.bo) - continue; - amdgpu_bo_get_memory(bo_va->base.bo, stats); - } + list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) + amdgpu_vm_bo_get_memory(bo_va, stats); + + list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) + amdgpu_vm_bo_get_memory(bo_va, stats); + + list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) + amdgpu_vm_bo_get_memory(bo_va, stats); + + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) + amdgpu_vm_bo_get_memory(bo_va, stats); + + list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) + amdgpu_vm_bo_get_memory(bo_va, stats); + + list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) + amdgpu_vm_bo_get_memory(bo_va, stats); spin_unlock(&vm->status_lock); }