]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gem: Use to_gt() helper
authorMichał Winiarski <michal.winiarski@intel.com>
Tue, 14 Dec 2021 19:33:35 +0000 (21:33 +0200)
committerMatt Roper <matthew.d.roper@intel.com>
Sat, 18 Dec 2021 05:50:32 +0000 (21:50 -0800)
Use to_gt() helper consistently throughout the codebase.
Pure mechanical s/i915->gt/to_gt(i915). No functional changes.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211214193346.21231-6-andi.shyti@linux.intel.com
15 files changed:
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_create.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c

index a534218ac7c9a876d03fc7e0802b02afecf71026..1f8d7bb32d3938e618141f3ebb822b30e6ebd68c 100644 (file)
@@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
                 * colateral damage, and we should not pretend we can by
                 * exposing the interface.
                 */
-               if (!intel_has_reset_engine(&i915->gt))
+               if (!intel_has_reset_engine(to_gt(i915)))
                        return -ENODEV;
 
                pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
@@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
 
        if (!protected) {
                pc->uses_protected_content = false;
-       } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+       } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
                ret = -ENODEV;
        } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
                   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
@@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
                 */
                pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-               if (!intel_pxp_is_active(&i915->gt.pxp))
-                       ret = intel_pxp_start(&i915->gt.pxp);
+               if (!intel_pxp_is_active(&to_gt(i915)->pxp))
+                       ret = intel_pxp_start(&to_gt(i915)->pxp);
        }
 
        return ret;
@@ -571,7 +571,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
        intel_engine_mask_t prev_mask;
 
        /* FIXME: This is NIY for execlists */
-       if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+       if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
                return -ENODEV;
 
        if (get_user(slot, &ext->engine_index))
@@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
                sseu = &pc->legacy_rcs_sseu;
        }
 
-       ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+       ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
        if (ret)
                return ret;
 
@@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
                                                struct intel_sseu rcs_sseu)
 {
-       const struct intel_gt *gt = &ctx->i915->gt;
+       const struct intel_gt *gt = to_gt(ctx->i915);
        struct intel_engine_cs *engine;
        struct i915_gem_engines *e, *err;
        enum intel_engine_id id;
@@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
                 * colateral damage, and we should not pretend we can by
                 * exposing the interface.
                 */
-               if (!intel_has_reset_engine(&ctx->i915->gt))
+               if (!intel_has_reset_engine(to_gt(ctx->i915)))
                        return -ENODEV;
 
                i915_gem_context_clear_persistence(ctx);
@@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
        } else if (HAS_FULL_PPGTT(i915)) {
                struct i915_ppgtt *ppgtt;
 
-               ppgtt = i915_ppgtt_create(&i915->gt, 0);
+               ppgtt = i915_ppgtt_create(to_gt(i915), 0);
                if (IS_ERR(ppgtt)) {
                        drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
                                PTR_ERR(ppgtt));
@@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
        if (args->flags)
                return -EINVAL;
 
-       ppgtt = i915_ppgtt_create(&i915->gt, 0);
+       ppgtt = i915_ppgtt_create(to_gt(i915), 0);
        if (IS_ERR(ppgtt))
                return PTR_ERR(ppgtt);
 
@@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
                return -EINVAL;
 
-       ret = intel_gt_terminally_wedged(&i915->gt);
+       ret = intel_gt_terminally_wedged(to_gt(i915));
        if (ret)
                return ret;
 
index 8955d6abcef12389660c92eb63e184888d3be8d2..9402d4bf4ffc5ffe8bc287497df01eedde9cda4a 100644 (file)
@@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
        if (ext.flags)
                return -EINVAL;
 
-       if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
+       if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
                return -ENODEV;
 
        ext_data->flags |= I915_BO_PROTECTED;
index 60ee60f7bb0922d3e3fc874d891458bdeaff0500..1f025a27c649d62dc37289f64a384f8ca5d1a399 100644 (file)
@@ -2361,9 +2361,9 @@ static int eb_submit(struct i915_execbuffer *eb)
        return err;
 }
 
-static int num_vcs_engines(const struct drm_i915_private *i915)
+static int num_vcs_engines(struct drm_i915_private *i915)
 {
-       return hweight_long(VDBOX_MASK(&i915->gt));
+       return hweight_long(VDBOX_MASK(to_gt(i915)));
 }
 
 /*
index c0c509e5c0aee2d5bb605afa2c995bb41cec6bef..2e9088b7df91b86c91c5fea6837909ad444a6104 100644 (file)
@@ -645,7 +645,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
                goto insert;
 
        /* Attempt to reap some mmap space from dead objects */
-       err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+       err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
                                               NULL);
        if (err)
                goto err;
index 7986612f48fad42872da18169f83f768ffa03d63..ca6faffcc4968b62f1a42bdbb2dc9230e9bf404a 100644 (file)
@@ -19,6 +19,7 @@
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct scatterlist *sg;
        struct sg_table *st;
        dma_addr_t dma;
@@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                dst += PAGE_SIZE;
        }
 
-       intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+       intel_gt_chipset_flush(to_gt(i915));
 
        /* We're no longer struct page backed */
        obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
@@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
 {
        void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
        char __user *user_data = u64_to_user_ptr(args->data_ptr);
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        int err;
 
        err = i915_gem_object_wait(obj,
@@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
                return -EFAULT;
 
        drm_clflush_virt_range(vaddr, args->size);
-       intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+       intel_gt_chipset_flush(to_gt(i915));
 
        i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        return 0;
index 726b40e1fbb052f30ab9d7ab2b360bc0f086d9c7..ac56124760e18172acdaf742b40a803271014ba6 100644 (file)
@@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       intel_gt_suspend_prepare(&i915->gt);
+       intel_gt_suspend_prepare(to_gt(i915));
 
        i915_gem_drain_freed_objects(i915);
 }
@@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
         * machine in an unusable condition.
         */
 
-       intel_gt_suspend_late(&i915->gt);
+       intel_gt_suspend_late(to_gt(i915));
 
        spin_lock_irqsave(&i915->mm.obj_lock, flags);
        for (phase = phases; *phase; phase++) {
@@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
         * guarantee that the context image is complete. So let's just reset
         * it and start again.
         */
-       intel_gt_resume(&i915->gt);
+       intel_gt_resume(to_gt(i915));
 
        ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
        GEM_WARN_ON(ret);
index 157a9765f4839a5b2dfb420ef5f58c45db31f7ab..05a1ba2f2e7bbcc357d27bde79f9455d970e6f4c 100644 (file)
@@ -153,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
         */
        if (shrink & I915_SHRINK_ACTIVE)
                /* Retire requests to unpin all idle contexts */
-               intel_gt_retire_requests(&i915->gt);
+               intel_gt_retire_requests(to_gt(i915));
 
        /*
         * As we may completely rewrite the (un)bound list whilst unbinding
index 1929d6cf415082ef5931f74896630948d55e1eb9..75501db71041232ff0eb35044283aa80c6525d30 100644 (file)
@@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 {
        const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct drm_i915_private *i915 = to_i915(dev);
        struct i915_gem_context *ctx;
        unsigned long idx;
        long ret;
 
        /* ABI: return -EIO if already wedged */
-       ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
+       ret = intel_gt_terminally_wedged(to_gt(i915));
        if (ret)
                return ret;
 
index 80df9f592407ec656c4398f30ede69049d295010..8ad09fcf3698669f0dc9946471066db9b86cd094 100644 (file)
@@ -397,7 +397,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
        enum i915_cache_level src_level, dst_level;
        int ret;
 
-       if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
+       if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
                return ERR_PTR(-EINVAL);
 
        /* With fail_gpu_migration, we always perform a GPU clear. */
@@ -410,8 +410,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
                    !I915_SELFTEST_ONLY(fail_gpu_migration))
                        return ERR_PTR(-EINVAL);
 
-               intel_engine_pm_get(i915->gt.migrate.context->engine);
-               ret = intel_context_migrate_clear(i915->gt.migrate.context, dep,
+               intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+               ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, dep,
                                                  dst_st->sgl, dst_level,
                                                  i915_ttm_gtt_binds_lmem(dst_mem),
                                                  0, &rq);
@@ -423,8 +423,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
                        return ERR_CAST(src_rsgt);
 
                src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
-               intel_engine_pm_get(i915->gt.migrate.context->engine);
-               ret = intel_context_migrate_copy(i915->gt.migrate.context,
+               intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+               ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
                                                 dep, src_rsgt->table.sgl,
                                                 src_level,
                                                 i915_ttm_gtt_binds_lmem(bo->resource),
@@ -435,7 +435,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
                i915_refct_sgt_put(src_rsgt);
        }
 
-       intel_engine_pm_put(i915->gt.migrate.context->engine);
+       intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
 
        if (ret && rq) {
                i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
index 3173c9f9a04022bd14652e81ecb161d3f19af702..3cc01c30dd62bc637e2b57fc49bb86a1ef293d6e 100644 (file)
@@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
                 * On almost all of the older hw, we cannot tell the GPU that
                 * a page is readonly.
                 */
-               if (!dev_priv->gt.vm->has_read_only)
+               if (!to_gt(dev_priv)->vm->has_read_only)
                        return -ENODEV;
        }
 
index c69c7d45aabc4fecf84b70faebc047665ff5d7ce..11f0aa65f8a319ee69f23dd93fba8380a29ffcba 100644 (file)
@@ -1705,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void)
        mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
        mkwrite_device_info(dev_priv)->ppgtt_size = 48;
 
-       ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+       ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
        if (IS_ERR(ppgtt)) {
                err = PTR_ERR(ppgtt);
                goto out_unlock;
@@ -1747,7 +1747,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
                return 0;
        }
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
index 8402ed925a69ac74c5b605d5ff35d63fd133091e..75947e9dada2fb0b35811d9f2317d8f03342b911 100644 (file)
@@ -592,7 +592,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_client_tiled_blits),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
index 21b71568cd5ffad81fd0e92ee696092c5acc6c34..45398adda9c874e6c52b7738d6f28e68d934c447 100644 (file)
@@ -90,7 +90,7 @@ static int live_nop_switch(void *arg)
                }
                if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
                        pr_err("Failed to populated %d contexts\n", nctx);
-                       intel_gt_set_wedged(&i915->gt);
+                       intel_gt_set_wedged(to_gt(i915));
                        i915_request_put(rq);
                        err = -EIO;
                        goto out_file;
@@ -146,7 +146,7 @@ static int live_nop_switch(void *arg)
                        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
                                pr_err("Switching between %ld contexts timed out\n",
                                       prime);
-                               intel_gt_set_wedged(&i915->gt);
+                               intel_gt_set_wedged(to_gt(i915));
                                i915_request_put(rq);
                                break;
                        }
@@ -1223,7 +1223,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
                return 0;
 
        if (flags & TEST_RESET)
-               igt_global_reset_lock(&i915->gt);
+               igt_global_reset_lock(to_gt(i915));
 
        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
@@ -1306,7 +1306,7 @@ out_put:
 
 out_unlock:
        if (flags & TEST_RESET)
-               igt_global_reset_unlock(&i915->gt);
+               igt_global_reset_unlock(to_gt(i915));
 
        if (ret)
                pr_err("%s: Failed with %d!\n", name, ret);
@@ -1877,7 +1877,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_vm_isolation),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
index 4b8e6b0986597ef84b24ee2e1d211c70fd3f737a..ecb691c81d1e318d5fb06d99c30f626423f5b424 100644 (file)
@@ -261,5 +261,5 @@ int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
        if (!HAS_LMEM(i915))
                return 0;
 
-       return intel_gt_live_subtests(tests, &i915->gt);
+       return intel_gt_live_subtests(tests, to_gt(i915));
 }
index 6d30cdfa80f37304e6ac5fc219031836af3acace..743e6ab2c40ba4baf103b4883edcbbf5c33135b4 100644 (file)
@@ -84,6 +84,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
                                 struct rnd_state *prng)
 {
        const unsigned long npages = obj->base.size / PAGE_SIZE;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt_view view;
        struct i915_vma *vma;
        unsigned long page;
@@ -141,7 +142,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
        if (offset >= obj->base.size)
                goto out;
 
-       intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+       intel_gt_flush_ggtt_writes(to_gt(i915));
 
        p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        cpu = kmap(p) + offset_in_page(offset);
@@ -175,6 +176,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
 {
        const unsigned int nreal = obj->scratch / PAGE_SIZE;
        const unsigned long npages = obj->base.size / PAGE_SIZE;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_vma *vma;
        unsigned long page;
        int err;
@@ -234,7 +236,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
                if (offset >= obj->base.size)
                        continue;
 
-               intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+               intel_gt_flush_ggtt_writes(to_gt(i915));
 
                p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
                cpu = kmap(p) + offset_in_page(offset);
@@ -616,14 +618,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
 static void disable_retire_worker(struct drm_i915_private *i915)
 {
        i915_gem_driver_unregister__shrinker(i915);
-       intel_gt_pm_get(&i915->gt);
-       cancel_delayed_work_sync(&i915->gt.requests.retire_work);
+       intel_gt_pm_get(to_gt(i915));
+       cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
 }
 
 static void restore_retire_worker(struct drm_i915_private *i915)
 {
        igt_flush_test(i915);
-       intel_gt_pm_put(&i915->gt);
+       intel_gt_pm_put(to_gt(i915));
        i915_gem_driver_register__shrinker(i915);
 }
 
@@ -651,8 +653,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
 
        /* Disable background reaper */
        disable_retire_worker(i915);
-       GEM_BUG_ON(!i915->gt.awake);
-       intel_gt_retire_requests(&i915->gt);
+       GEM_BUG_ON(!to_gt(i915)->awake);
+       intel_gt_retire_requests(to_gt(i915));
        i915_gem_drain_freed_objects(i915);
 
        /* Trim the device mmap space to only a page */
@@ -728,7 +730,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
 
        /* Now fill with busy dead objects that we expect to reap */
        for (loop = 0; loop < 3; loop++) {
-               if (intel_gt_is_wedged(&i915->gt))
+               if (intel_gt_is_wedged(to_gt(i915)))
                        break;
 
                obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -942,7 +944,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
        }
 
        if (type == I915_MMAP_TYPE_GTT)
-               intel_gt_flush_ggtt_writes(&i915->gt);
+               intel_gt_flush_ggtt_writes(to_gt(i915));
 
        err = wc_check(obj);
        if (err == -ENXIO)
@@ -1049,7 +1051,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
                goto out_unmap;
        }
 
-       intel_gt_flush_ggtt_writes(&i915->gt);
+       intel_gt_flush_ggtt_writes(to_gt(i915));
 
        err = access_process_vm(current, addr, &x, sizeof(x), 0);
        if (err != sizeof(x)) {
@@ -1065,7 +1067,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
                goto out_unmap;
        }
 
-       intel_gt_flush_ggtt_writes(&i915->gt);
+       intel_gt_flush_ggtt_writes(to_gt(i915));
 
        err = __get_user(y, ptr);
        if (err) {
@@ -1165,7 +1167,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
        }
 
        if (type == I915_MMAP_TYPE_GTT)
-               intel_gt_flush_ggtt_writes(&i915->gt);
+               intel_gt_flush_ggtt_writes(to_gt(i915));
 
        for_each_uabi_engine(engine, i915) {
                struct i915_request *rq;