]> git.itanic.dy.fi Git - linux-stable/commitdiff
drm/amdgpu: flush gart changes after all BO recovery
authorNirmoy Das <nirmoy.das@amd.com>
Fri, 28 May 2021 14:35:27 +0000 (16:35 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 2 Jun 2021 02:55:38 +0000 (22:55 -0400)
Don't flush gart changes after recovering each BO instead
do it after recovering all the BOs. Flishing gart also needed
for amdgpu_ttm_alloc_gart().

v4: use containerof to retrieve adev struct.
v3: rename amdgpu_gart_tlb_flush() -> amdgpu_gart_invalidate_tlb().
v2: abstract out gart tlb flushing logic to amdgpu_gart.c

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 5562b5c90c032155b4acfd1d81c04804e581e9b6..35cc8009ac7a4c989684247670b8e0391ac5b2f2 100644 (file)
@@ -312,8 +312,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
                     uint64_t flags)
 {
-       int r, i;
-
        if (!adev->gart.ready) {
                WARN(1, "trying to bind memory to uninitialized GART !\n");
                return -EINVAL;
@@ -322,16 +320,26 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
        if (!adev->gart.ptr)
                return 0;
 
-       r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-                   adev->gart.ptr);
-       if (r)
-               return r;
+       return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+                              adev->gart.ptr);
+}
+
+/**
+ * amdgpu_gart_invalidate_tlb - invalidate gart TLB
+ *
+ * @adev: amdgpu device driver pointer
+ *
+ * Invalidate gart TLB which can be use as a way to flush gart changes
+ *
+ */
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
+{
+       int i;
 
        mb();
        amdgpu_asic_flush_hdp(adev, NULL);
        for (i = 0; i < adev->num_vmhubs; i++)
                amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
-       return 0;
 }
 
 /**
index a25fe97b0196da4b6754e3a7bfbc62c4cab906e8..e104022197aeac5e2bdef4dd24049a431246d7e8 100644 (file)
@@ -66,5 +66,5 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint64_t flags);
-
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
 #endif
index c026972ca9a1a3dc25c75fa5f97cd1471776c3d9..327da885eca91edc35f3f8cc5bbd05d57f58b3eb 100644 (file)
@@ -206,10 +206,12 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 {
        struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+       struct amdgpu_device *adev;
        struct amdgpu_gtt_node *node;
        struct drm_mm_node *mm_node;
        int r = 0;
 
+       adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
                node = container_of(mm_node, struct amdgpu_gtt_node, node);
@@ -219,6 +221,8 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
        }
        spin_unlock(&mgr->lock);
 
+       amdgpu_gart_invalidate_tlb(adev);
+
        return r;
 }
 
index a06c8a9ae06697d3f59f90d4edfdbb20f2d048cf..86259435803ecf860b8e33c0f1dd35b935c3e4b5 100644 (file)
@@ -1012,6 +1012,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
                        return r;
                }
 
+               amdgpu_gart_invalidate_tlb(adev);
                ttm_resource_free(bo, &bo->mem);
                bo->mem = tmp;
        }