]> git.itanic.dy.fi Git - linux-stable/commitdiff
drm/amdgpu: move buffer funcs setting up a level
authorAlex Deucher <alexander.deucher@amd.com>
Wed, 25 Oct 2023 17:19:28 +0000 (13:19 -0400)
committerLuben Tuikov <ltuikov89@gmail.com>
Thu, 26 Oct 2023 20:04:24 +0000 (16:04 -0400)
Rather than doing this in the IP code for the SDMA paging
engine, move it up to the core device level init level.
This should fix the scheduler init ordering.

v2: drop extra parens
v3: drop SDMA helpers
v4: Added a Fixes tag because amdgpu dereferences an uninitialized
    scheduler without this patch, and this patch fixes this. (Luben)

Tested-by: Luben Tuikov <luben.tuikov@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Link: https://lore.kernel.org/r/20231025171928.3318505-1-alexander.deucher@amd.com
Acked-by: Christian König <christian.koenig@amd.com>
Fixes: 56e449603f0ac5 ("drm/sched: Convert the GPU scheduler to variable number of run-queues")
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c

index 15074232fbbd752359e6f00a0ecdcfc26628cf31..19b539cab7fecee26a91265a8725018e3cb812fd 100644 (file)
@@ -2450,6 +2450,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
+       if (adev->mman.buffer_funcs_ring->sched.ready)
+               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+
        /* Don't init kfd if whole hive need to be reset during init */
        if (!adev->gmc.xgmi.pending_reset) {
                kgd2kfd_init_zone_device(adev);
@@ -3045,6 +3048,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
                amdgpu_virt_request_full_gpu(adev, false);
        }
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        r = amdgpu_device_ip_suspend_phase1(adev);
        if (r)
                return r;
@@ -3234,6 +3239,9 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
 
        r = amdgpu_device_ip_resume_phase2(adev);
 
+       if (adev->mman.buffer_funcs_ring->sched.ready)
+               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+
        return r;
 }
 
@@ -4008,6 +4016,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
        /* disable ras feature must before hw fini */
        amdgpu_ras_pre_fini(adev);
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        amdgpu_device_ip_fini_early(adev);
 
        amdgpu_irq_fini_hw(adev);
@@ -4146,6 +4156,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        amdgpu_ras_suspend(adev);
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        amdgpu_device_ip_suspend_phase1(adev);
 
        if (!adev->in_s0ix)
@@ -4971,6 +4983,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                                if (r)
                                        goto out;
 
+                               if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
+                                       amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+
                                if (vram_lost)
                                        amdgpu_device_fill_reset_magic(tmp_adev);
 
index e2b9392d7f0de826b472117fde437b30c97f4cb6..12c3e8a6eb0e47c9f40959f9c2d04bf80dc6ef9f 100644 (file)
@@ -289,27 +289,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
        return err;
 }
 
-void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
-{
-       struct amdgpu_ring *sdma;
-       int i;
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               if (adev->sdma.has_page_queue) {
-                       sdma = &adev->sdma.instance[i].page;
-                       if (adev->mman.buffer_funcs_ring == sdma) {
-                               amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                               break;
-                       }
-               }
-               sdma = &adev->sdma.instance[i].ring;
-               if (adev->mman.buffer_funcs_ring == sdma) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       break;
-               }
-       }
-}
-
 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
 {
        int err = 0;
index 513ac22120c1fa5da5731c8612fdf47f0b49e939..173a2a308078168cf43e0819a197174284314fb8 100644 (file)
@@ -169,7 +169,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
                               bool duplicate);
 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
         bool duplicate);
-void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
 
 #endif
index 52598fbc9b39db0c08705216b5e88bedf950c9cf..6def8691e4449b2084f7e7898a602ef14deb013a 100644 (file)
@@ -308,8 +308,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
        u32 rb_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
@@ -498,9 +496,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
index 51afc92994a80d1ee525067102287913ce82bf86..e3f5af4762d0588f7700f025133c5920cfc7b4d2 100644 (file)
@@ -339,8 +339,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@@ -474,9 +472,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
index 344202870aebec2d3a24d6c23c4f94c26019686a..94dcce3c73b21619fb626152ddd47b33f2bfad7e 100644 (file)
@@ -513,8 +513,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@@ -746,9 +744,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
index cd37f45e01a119d824b808dca66e04727de25199..d7e8c21f2a5dd623168a690c40e649f4b357edab 100644 (file)
@@ -875,8 +875,6 @@ static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
@@ -911,8 +909,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
@@ -1399,13 +1395,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
                        r = amdgpu_ring_test_helper(page);
                        if (r)
                                return r;
-
-                       if (adev->mman.buffer_funcs_ring == page)
-                               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                }
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return r;
@@ -1917,11 +1907,8 @@ static int sdma_v4_0_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
                for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1960,7 +1947,6 @@ static int sdma_v4_0_resume(void *handle)
        if (adev->in_s0ix) {
                sdma_v4_0_enable(adev, true);
                sdma_v4_0_gfx_enable(adev, true);
-               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                return 0;
        }
 
index 1cc34efb455bb83e8b5ca7dd730415b72ce962f5..7d7b86048e6885c5073c0b930c72650b7795cec8 100644 (file)
@@ -559,8 +559,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@@ -825,9 +823,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
@@ -1427,11 +1422,8 @@ static int sdma_v5_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        sdma_v5_0_ctx_switch_enable(adev, false);
        sdma_v5_0_enable(adev, false);
index 2b3ebebc4299c043cc2819d78e886946717d339a..e53f4adfdc13ffd79613118a60f5baad81aa4f11 100644 (file)
@@ -364,8 +364,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@@ -625,9 +623,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
@@ -1285,11 +1280,8 @@ static int sdma_v5_2_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        sdma_v5_2_ctx_switch_enable(adev, false);
        sdma_v5_2_enable(adev, false);
index 45be0af2570b24c1a75e4c6b37a06c779d8f463a..c35bc0f0c1dd0fbac22aa5220120145bb6afe6d7 100644 (file)
@@ -381,8 +381,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
@@ -594,9 +592,6 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
@@ -1343,11 +1338,8 @@ static int sdma_v6_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        sdma_v6_0_ctxempty_int_enable(adev, false);
        sdma_v6_0_enable(adev, false);
index 42c4547f32ec9cc9f3eba7582f8f7a953a84897b..9aa0e11ee67327d9c66e7510b33fa54e4bc6ee1f 100644 (file)
@@ -115,8 +115,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
        u32 rb_cntl;
        unsigned i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                /* dma0 */
                rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
@@ -177,9 +175,6 @@ static int si_dma_start(struct amdgpu_device *adev)
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;