]> git.itanic.dy.fi Git - linux-stable/commitdiff
drm/msm/adreno: Simplify read64/write64 helpers
authorRob Clark <robdclark@chromium.org>
Mon, 14 Nov 2022 19:30:40 +0000 (11:30 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 May 2023 09:53:53 +0000 (11:53 +0200)
[ Upstream commit cade05b2a88558847984287dd389fae0c7de31d6 ]

The _HI reg is always following the _LO reg, so no need to pass these
offsets seprately.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/511581/
Link: https://lore.kernel.org/r/20221114193049.1533391-2-robdclark@gmail.com
Stable-dep-of: ca090c837b43 ("drm/msm: fix missing wq allocation error handling")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/msm_gpu.h

index 7cb8d9849c07327097ce383c246f923e4190b93e..a10feb8a4194ab95e489a19d9869d9f63703c019 100644 (file)
@@ -606,8 +606,7 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
 
 static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
 
        return 0;
 }
index 02ff306f96f42a6c81b8d3e87f336f27a9bf8a65..24feae285ccd683336d681793a58c66da7e1bc7c 100644 (file)
@@ -605,11 +605,9 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
                a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
        }
 
-       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
-               REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
 
-       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
-               REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
 
        return 0;
 }
@@ -868,8 +866,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
         * memory rendering at this point in time and we don't want to block off
         * part of the virtual memory space.
         */
-       gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
-               REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+       gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
        gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
 
        /* Put the GPU into 64 bit by default */
@@ -908,8 +905,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
                return ret;
 
        /* Set the ringbuffer address */
-       gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
-               gpu->rb[0]->iova);
+       gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
 
        /*
         * If the microcode supports the WHERE_AM_I opcode then we can use that
@@ -936,7 +932,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
                }
 
                gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
-                       REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+                           shadowptr(a5xx_gpu, gpu->rb[0]));
        } else if (gpu->nr_rings > 1) {
                /* Disable preemption if WHERE_AM_I isn't available */
                a5xx_preempt_fini(gpu);
@@ -1239,9 +1235,9 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
                gpu_read(gpu, REG_A5XX_RBBM_STATUS),
                gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
                gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
-               gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+               gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
                gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
-               gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+               gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
                gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
 
        /* Turn off the hangcheck timer to keep it from bothering us */
@@ -1427,8 +1423,7 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
 
 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
-               REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
 
        return 0;
 }
@@ -1465,8 +1460,7 @@ static int a5xx_crashdumper_run(struct msm_gpu *gpu,
        if (IS_ERR_OR_NULL(dumper->ptr))
                return -EINVAL;
 
-       gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
-               REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+       gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
 
        gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
 
@@ -1666,8 +1660,7 @@ static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
 {
        u64 busy_cycles;
 
-       busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
-                       REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+       busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
        *out_sample_rate = clk_get_rate(gpu->core_clk);
 
        return busy_cycles;
index e0eef47dae632d3e8e7a484556d0adb3b8bab500..f58dd564d122badde91a998b8f8f476849d533d5 100644 (file)
@@ -137,7 +137,6 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
 
        /* Set the address of the incoming preemption record */
        gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
-               REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
                a5xx_gpu->preempt_iova[ring->id]);
 
        a5xx_gpu->next_ring = ring;
@@ -212,8 +211,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
        }
 
        /* Write a 0 to signal that we aren't switching pagetables */
-       gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
-               REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
+       gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
 
        /* Reset the preemption state */
        set_preempt_state(a5xx_gpu, PREEMPT_NONE);
index 9d7fc44c1e2a93bd93de3b60b04c28df1f2c0bc6..dc53466864b05a403e0b9f3dc4881dbd2cf21f4a 100644 (file)
@@ -247,8 +247,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        OUT_RING(ring, submit->seqno);
 
        trace_msm_gpu_submit_flush(submit,
-               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
-                       REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
+               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO));
 
        a6xx_flush(gpu, ring);
 }
@@ -947,8 +946,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
        }
 
-       gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE,
-               REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova);
+       gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
 
        return 0;
 }
@@ -999,8 +997,7 @@ static int hw_init(struct msm_gpu *gpu)
         * memory rendering at this point in time and we don't want to block off
         * part of the virtual memory space.
         */
-       gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
-               REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+       gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
        gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
 
        /* Turn on 64 bit addressing for all blocks */
@@ -1049,11 +1046,9 @@ static int hw_init(struct msm_gpu *gpu)
 
        if (!adreno_is_a650_family(adreno_gpu)) {
                /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
-               gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
-                       REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+               gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
 
                gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
-                       REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
                        0x00100000 + adreno_gpu->gmem - 1);
        }
 
@@ -1145,8 +1140,7 @@ static int hw_init(struct msm_gpu *gpu)
                goto out;
 
        /* Set the ringbuffer address */
-       gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
-               gpu->rb[0]->iova);
+       gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
 
        /* Targets that support extended APRIV can use the RPTR shadow from
         * hardware but all the other ones need to disable the feature. Targets
@@ -1178,7 +1172,6 @@ static int hw_init(struct msm_gpu *gpu)
                }
 
                gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
-                       REG_A6XX_CP_RB_RPTR_ADDR_HI,
                        shadowptr(a6xx_gpu, gpu->rb[0]));
        }
 
@@ -1506,9 +1499,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
                gpu_read(gpu, REG_A6XX_RBBM_STATUS),
                gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
                gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
-               gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
+               gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
                gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
-               gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
+               gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
                gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
 
        /* Turn off the hangcheck timer to keep it from bothering us */
@@ -1719,8 +1712,7 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
-       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
-                           REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
index a5c3d1ed255a69d3e0e4cfc4b3d4a3e85722e34c..a023d5f962dce72ae2ee75b741f7f83f31d735a2 100644 (file)
@@ -147,8 +147,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
        /* Make sure all pending memory writes are posted */
        wmb();
 
-       gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
-               REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+       gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
 
        gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
 
index a89bfdc3d7f90f5db182c60aa91cdf28345baa01..7a36e0784f067a6b0099f2e4691a9ffed300ca0c 100644 (file)
@@ -548,7 +548,7 @@ static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
        msm_rmw(gpu->mmio + (reg << 2), mask, or);
 }
 
-static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
 {
        u64 val;
 
@@ -566,17 +566,17 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
         * when the lo is read, so make sure to read the lo first to trigger
         * that
         */
-       val = (u64) msm_readl(gpu->mmio + (lo << 2));
-       val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+       val = (u64) msm_readl(gpu->mmio + (reg << 2));
+       val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
 
        return val;
 }
 
-static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
 {
        /* Why not a writeq here? Read the screed above */
-       msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
-       msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+       msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+       msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
 }
 
 int msm_gpu_pm_suspend(struct msm_gpu *gpu);