]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/amdgpu: Add mem_sync implementation for all the ASICs.
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Fri, 8 May 2020 18:34:26 +0000 (14:34 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 18 May 2020 15:24:21 +0000 (11:24 -0400)
Implement the .mem_sync hook defined earlier.

v2: Rename functions

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c

index 84aa0d4f3c3eebdfd90672b27554f3b6b4edc943..7ab6f6ae9a637d361b5508d4554f301387d51bfa 100644 (file)
@@ -8020,6 +8020,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       const unsigned int gcr_cntl =
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
+                       PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
+
+       /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
+       amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+       amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
+}
+
 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
        .name = "gfx_v10_0",
        .early_init = gfx_v10_0_early_init,
@@ -8067,7 +8090,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               8, /* gfx_v10_0_emit_mem_sync */
        .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v10_0_ring_emit_fence,
@@ -8089,6 +8113,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
        .soft_recovery = gfx_v10_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v10_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
index aa1e1be852dd7559b49bdd5ec71b55d9e18f5430..96112fb9273b2dee12053429b457e1b317a3bd96 100644 (file)
@@ -3465,6 +3465,18 @@ static int gfx_v6_0_set_powergating_state(void *handle,
        return 0;
 }
 
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
        .name = "gfx_v6_0",
        .early_init = gfx_v6_0_early_init,
@@ -3495,7 +3507,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
                14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
                7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
                SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
-               3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+               3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
        .emit_ib = gfx_v6_0_ring_emit_ib,
        .emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3506,6 +3519,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
        .insert_nop = amdgpu_ring_insert_nop,
        .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
        .emit_wreg = gfx_v6_0_ring_emit_wreg,
+       .emit_mem_sync = gfx_v6_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
index e5a88cad44cb8ae83fade102cfba58b76fb380b1..b2f10e39eff169272f2ad71c0166de456d3ac280 100644 (file)
@@ -4998,6 +4998,18 @@ static int gfx_v7_0_set_powergating_state(void *handle,
        return 0;
 }
 
+static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
        .name = "gfx_v7_0",
        .early_init = gfx_v7_0_early_init,
@@ -5030,7 +5042,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
                12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
                7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
                CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
-               3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+               3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
@@ -5045,6 +5058,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
        .emit_wreg = gfx_v7_0_ring_emit_wreg,
        .soft_recovery = gfx_v7_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v7_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
index 2fcf6865abbade9bb68d6fa548ce4b6a1faefc3d..6ae78b9e9551d47e8ad687a0fd6c691cc7ef2df6 100644 (file)
@@ -6817,6 +6817,19 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA |
+                         PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
        .name = "gfx_v8_0",
        .early_init = gfx_v8_0_early_init,
@@ -6863,7 +6876,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                12 + 12 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               5, /* SURFACE_SYNC */
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6881,6 +6895,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
        .soft_recovery = gfx_v8_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v8_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
index a0988634aeaf580fdd7404b904ab2b202ba36c59..b6916f82c70539a9a394f30b2750778b72496ddc 100644 (file)
@@ -6634,6 +6634,25 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+       const unsigned int cp_coher_cntl =
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+                       PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+       /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+       amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+       amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
+       amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
+       amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+       amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
+       amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
        .name = "gfx_v9_0",
        .early_init = gfx_v9_0_early_init,
@@ -6680,7 +6699,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
-               2, /* SWITCH_BUFFER */
+               2 + /* SWITCH_BUFFER */
+               7, /* gfx_v9_0_emit_mem_sync */
        .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6701,6 +6721,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .soft_recovery = gfx_v9_0_ring_soft_recovery,
+       .emit_mem_sync = gfx_v9_0_emit_mem_sync,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {