]> git.itanic.dy.fi Git - linux-stable/commitdiff
drm/amdgpu/sdma: set sched.ready status after ring/IB test in sdma
authorGuchun Chen <guchun.chen@amd.com>
Fri, 12 May 2023 08:04:56 +0000 (16:04 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 9 Jun 2023 14:57:07 +0000 (10:57 -0400)
sched.ready is nothing with ring initialization, it needs to set
to be true after ring/IB test in amdgpu_ring_test_helper to tell
the ring is ready for submission.

Signed-off-by: Guchun Chen <guchun.chen@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c

index 67d16236b2168ddc4b3e8ae66beaa638f30a13a9..52598fbc9b39db0c08705216b5e88bedf950c9cf 100644 (file)
@@ -489,8 +489,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 #endif
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
-               ring->sched.ready = true;
        }
 
        cik_sdma_enable(adev, true);
index fd2a7b66ac56216ea3b2ba2cfb18089befb2d221..51afc92994a80d1ee525067102287913ce82bf86 100644 (file)
@@ -466,8 +466,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 #endif
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
-               ring->sched.ready = true;
        }
 
        sdma_v2_4_enable(adev, true);
index e572389089d249ae1e2c8f0ca76bf483b08f8e82..344202870aebec2d3a24d6c23c4f94c26019686a 100644 (file)
@@ -734,8 +734,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 #endif
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
-               ring->sched.ready = true;
        }
 
        /* unhalt the MEs */
index 70b0d1fd9868b6f99dbf45d325f09f55ed3f6a0b..1f83eebfc8a74030ecb0e2923acf7fb279e5d1e3 100644 (file)
@@ -1114,8 +1114,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 /**
@@ -1202,8 +1200,6 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 static void
index 590b08585901c53c5b6fd952dfb73daaa9b85641..ff41fb577cdd5d72a981fccbca9be92136fef606 100644 (file)
@@ -685,8 +685,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 /**
@@ -776,8 +774,6 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
 #endif
        /* enable DMA IBs */
        WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
-
-       ring->sched.ready = true;
 }
 
 static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
index a0077cf412955376928e1cfd4e15fce79724cda8..5c4d4df9cf94ccb075c605ddf85ac714bd8df397 100644 (file)
@@ -819,8 +819,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
 
-               ring->sched.ready = true;
-
                if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
                        sdma_v5_0_ctx_switch_enable(adev, true);
                        sdma_v5_0_enable(adev, true);
index efa2c84ee78e5a5c6da7aefd017e42c72cf84c51..6aae62b68f32db76a01bd25768db997cd970c5c3 100644 (file)
@@ -617,8 +617,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
 
-               ring->sched.ready = true;
-
                if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
                        sdma_v5_2_ctx_switch_enable(adev, true);
                        sdma_v5_2_enable(adev, true);
@@ -630,6 +628,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
                        return r;
                }
 
+               ring->sched.ready = true;
+
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
index 79d09792d2ce310eda0c22f8625a423c29460c99..1c90b5c661fb7e791c8344454a88fc002bc18a21 100644 (file)
@@ -585,16 +585,12 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
 
-               ring->sched.ready = true;
-
                if (amdgpu_sriov_vf(adev))
                        sdma_v6_0_enable(adev, true);
 
                r = amdgpu_ring_test_helper(ring);
-               if (r) {
-                       ring->sched.ready = false;
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
index abca8b529721ebcdcbe9aae406590081f95b22bb..42c4547f32ec9cc9f3eba7582f8f7a953a84897b 100644 (file)
@@ -174,8 +174,6 @@ static int si_dma_start(struct amdgpu_device *adev)
                WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 
-               ring->sched.ready = true;
-
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;