]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Merge drm/drm-next into drm-intel-next-queued
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 22 Aug 2019 05:47:35 +0000 (22:47 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 22 Aug 2019 07:10:36 +0000 (00:10 -0700)
We need the rename of reservation_object to dma_resv.

The solution on this merge came from linux-next:
From: Stephen Rothwell <sfr@canb.auug.org.au>
Date: Wed, 14 Aug 2019 12:48:39 +1000
Subject: [PATCH] drm: fix up fallout from "dma-buf: rename reservation_object to dma_resv"

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
---
 drivers/gpu/drm/i915/gt/intel_engine_pool.c | 8 ++++----
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 03d90b49584a..4cd54c569911 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -43,12 +43,12 @@ static int pool_active(struct i915_active *ref)
 {
        struct intel_engine_pool_node *node =
                container_of(ref, typeof(*node), active);
-       struct reservation_object *resv = node->obj->base.resv;
+       struct dma_resv *resv = node->obj->base.resv;
        int err;

-       if (reservation_object_trylock(resv)) {
-               reservation_object_add_excl_fence(resv, NULL);
-               reservation_object_unlock(resv);
+       if (dma_resv_trylock(resv)) {
+               dma_resv_add_excl_fence(resv, NULL);
+               dma_resv_unlock(resv);
        }

        err = i915_gem_object_pin_pages(node->obj);

which is a simplified version from a previous one which had:
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
26 files changed:
1  2 
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_fence.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_engine_pool.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_sw_fence.c
drivers/gpu/drm/i915/i915_sw_fence.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h

Simple merge
index 03d90b49584a3d35f859b2bcad078e48cf8bca57,0000000000000000000000000000000000000000..4cd54c5699111673793a076cb820a9e49336c689
mode 100644,000000..100644
--- /dev/null
@@@ -1,177 -1,0 +1,177 @@@
-       struct reservation_object *resv = node->obj->base.resv;
 +/*
 + * SPDX-License-Identifier: MIT
 + *
 + * Copyright © 2014-2018 Intel Corporation
 + */
 +
 +#include "gem/i915_gem_object.h"
 +
 +#include "i915_drv.h"
 +#include "intel_engine_pm.h"
 +#include "intel_engine_pool.h"
 +
 +static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
 +{
 +      return container_of(pool, struct intel_engine_cs, pool);
 +}
 +
 +static struct list_head *
 +bucket_for_size(struct intel_engine_pool *pool, size_t sz)
 +{
 +      int n;
 +
 +      /*
 +       * Compute a power-of-two bucket, but throw everything greater than
 +       * 16KiB into the same bucket: i.e. the buckets hold objects of
 +       * (1 page, 2 pages, 4 pages, 8+ pages).
 +       */
 +      n = fls(sz >> PAGE_SHIFT) - 1;
 +      if (n >= ARRAY_SIZE(pool->cache_list))
 +              n = ARRAY_SIZE(pool->cache_list) - 1;
 +
 +      return &pool->cache_list[n];
 +}
 +
 +static void node_free(struct intel_engine_pool_node *node)
 +{
 +      i915_gem_object_put(node->obj);
 +      i915_active_fini(&node->active);
 +      kfree(node);
 +}
 +
 +static int pool_active(struct i915_active *ref)
 +{
 +      struct intel_engine_pool_node *node =
 +              container_of(ref, typeof(*node), active);
-       if (reservation_object_trylock(resv)) {
-               reservation_object_add_excl_fence(resv, NULL);
-               reservation_object_unlock(resv);
++      struct dma_resv *resv = node->obj->base.resv;
 +      int err;
 +
++      if (dma_resv_trylock(resv)) {
++              dma_resv_add_excl_fence(resv, NULL);
++              dma_resv_unlock(resv);
 +      }
 +
 +      err = i915_gem_object_pin_pages(node->obj);
 +      if (err)
 +              return err;
 +
 +      /* Hide this pinned object from the shrinker until retired */
 +      i915_gem_object_make_unshrinkable(node->obj);
 +
 +      return 0;
 +}
 +
 +static void pool_retire(struct i915_active *ref)
 +{
 +      struct intel_engine_pool_node *node =
 +              container_of(ref, typeof(*node), active);
 +      struct intel_engine_pool *pool = node->pool;
 +      struct list_head *list = bucket_for_size(pool, node->obj->base.size);
 +      unsigned long flags;
 +
 +      GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
 +
 +      i915_gem_object_unpin_pages(node->obj);
 +
 +      /* Return this object to the shrinker pool */
 +      i915_gem_object_make_purgeable(node->obj);
 +
 +      spin_lock_irqsave(&pool->lock, flags);
 +      list_add(&node->link, list);
 +      spin_unlock_irqrestore(&pool->lock, flags);
 +}
 +
 +static struct intel_engine_pool_node *
 +node_create(struct intel_engine_pool *pool, size_t sz)
 +{
 +      struct intel_engine_cs *engine = to_engine(pool);
 +      struct intel_engine_pool_node *node;
 +      struct drm_i915_gem_object *obj;
 +
 +      node = kmalloc(sizeof(*node),
 +                     GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 +      if (!node)
 +              return ERR_PTR(-ENOMEM);
 +
 +      node->pool = pool;
 +      i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
 +
 +      obj = i915_gem_object_create_internal(engine->i915, sz);
 +      if (IS_ERR(obj)) {
 +              i915_active_fini(&node->active);
 +              kfree(node);
 +              return ERR_CAST(obj);
 +      }
 +
 +      node->obj = obj;
 +      return node;
 +}
 +
 +struct intel_engine_pool_node *
 +intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
 +{
 +      struct intel_engine_pool_node *node;
 +      struct list_head *list;
 +      unsigned long flags;
 +      int ret;
 +
 +      GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
 +
 +      size = PAGE_ALIGN(size);
 +      list = bucket_for_size(pool, size);
 +
 +      spin_lock_irqsave(&pool->lock, flags);
 +      list_for_each_entry(node, list, link) {
 +              if (node->obj->base.size < size)
 +                      continue;
 +              list_del(&node->link);
 +              break;
 +      }
 +      spin_unlock_irqrestore(&pool->lock, flags);
 +
 +      if (&node->link == list) {
 +              node = node_create(pool, size);
 +              if (IS_ERR(node))
 +                      return node;
 +      }
 +
 +      ret = i915_active_acquire(&node->active);
 +      if (ret) {
 +              node_free(node);
 +              return ERR_PTR(ret);
 +      }
 +
 +      return node;
 +}
 +
 +void intel_engine_pool_init(struct intel_engine_pool *pool)
 +{
 +      int n;
 +
 +      spin_lock_init(&pool->lock);
 +      for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 +              INIT_LIST_HEAD(&pool->cache_list[n]);
 +}
 +
 +void intel_engine_pool_park(struct intel_engine_pool *pool)
 +{
 +      int n;
 +
 +      for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 +              struct list_head *list = &pool->cache_list[n];
 +              struct intel_engine_pool_node *node, *nn;
 +
 +              list_for_each_entry_safe(node, nn, list, link)
 +                      node_free(node);
 +
 +              INIT_LIST_HEAD(list);
 +      }
 +}
 +
 +void intel_engine_pool_fini(struct intel_engine_pool *pool)
 +{
 +      int n;
 +
 +      for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 +              GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
 +}
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 252edef6c59e50dda8a4ffef1f70185af9e927a1,2645f4e850c203b69596cb5b456c0bbcc2ae0a56..79f9d1fb7611450c23c15ea27b4833a8f25ccbcf
@@@ -906,22 -926,14 +906,22 @@@ int i915_vma_move_to_active(struct i915
        if (unlikely(err))
                return err;
  
 -      obj->write_domain = 0;
        if (flags & EXEC_OBJECT_WRITE) {
 -              obj->write_domain = I915_GEM_DOMAIN_RENDER;
 -
 -              if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
 -                      __i915_active_request_set(&obj->frontbuffer_write, rq);
 +              if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
 +                      i915_active_ref(&obj->frontbuffer->write,
 +                                      rq->timeline,
 +                                      rq);
  
-               reservation_object_add_excl_fence(vma->resv, &rq->fence);
++              dma_resv_add_excl_fence(vma->resv, &rq->fence);
 +              obj->write_domain = I915_GEM_DOMAIN_RENDER;
                obj->read_domains = 0;
-               err = reservation_object_reserve_shared(vma->resv, 1);
 +      } else {
-               reservation_object_add_shared_fence(vma->resv, &rq->fence);
++              err = dma_resv_reserve_shared(vma->resv, 1);
 +              if (unlikely(err))
 +                      return err;
 +
++              dma_resv_add_shared_fence(vma->resv, &rq->fence);
 +              obj->write_domain = 0;
        }
        obj->read_domains |= I915_GEM_GPU_DOMAINS;
        obj->mm.dirty = true;
Simple merge