]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Merge tag 'drm-misc-next-2019-08-19' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Wed, 21 Aug 2019 05:38:43 +0000 (15:38 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 21 Aug 2019 06:44:41 +0000 (16:44 +1000)
drm-misc-next for 5.4:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
  - dma-buf: add reservation_object_fences helper, relax
             reservation_object_add_shared_fence, remove
             reservation_object seq number (and then
             restored)
  - dma-fence: Shrinkage of the dma_fence structure,
               Merge dma_fence_signal and dma_fence_signal_locked,
               Store the timestamp in struct dma_fence in a union with
               cb_list

Driver Changes:
  - More dt-bindings YAML conversions
  - More removal of drmP.h includes
  - dw-hdmi: Support get_eld and various i2s improvements
  - gm12u320: Few fixes
  - meson: Global cleanup
  - panfrost: Few refactors, Support for GPU heap allocations
  - sun4i: Support for DDC enable GPIO
  - New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
                Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
                Toppoly TD043MTEA1

Signed-off-by: Dave Airlie <airlied@redhat.com>
[airlied: fixup dma_resv rename fallout]

From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819141923.7l2adietcr2pioct@flea
30 files changed:
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
include/drm/ttm/ttm_bo_driver.h

diff --cc MAINTAINERS
Simple merge
index 2d07f16f178971f43c13736b64309e3d87a163d8,2f11ebd95528452e1624ecbcbea65084a30730a8..6ebe61e14f290ab0c925be5d5e22fb29cde357b8
@@@ -1217,42 -1211,6 +1217,42 @@@ void amdgpu_bo_move_notify(struct ttm_b
        trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
  }
  
-       reservation_object_lock(bo->base.resv, NULL);
 +/**
 + * amdgpu_bo_move_notify - notification about a BO being released
 + * @bo: pointer to a buffer object
 + *
 + * Wipes VRAM buffers whose contents should not be leaked before the
 + * memory is released.
 + */
 +void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 +{
 +      struct dma_fence *fence = NULL;
 +      struct amdgpu_bo *abo;
 +      int r;
 +
 +      if (!amdgpu_bo_is_amdgpu_bo(bo))
 +              return;
 +
 +      abo = ttm_to_amdgpu_bo(bo);
 +
 +      if (abo->kfd_bo)
 +              amdgpu_amdkfd_unreserve_memory_limit(abo);
 +
 +      if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
 +          !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
 +              return;
 +
-       reservation_object_unlock(bo->base.resv);
++      dma_resv_lock(bo->base.resv, NULL);
 +
 +      r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
 +      if (!WARN_ON(r)) {
 +              amdgpu_bo_fence(abo, fence, false);
 +              dma_fence_put(fence);
 +      }
 +
++      dma_resv_unlock(bo->base.resv);
 +}
 +
  /**
   * amdgpu_bo_fault_reserve_notify - notification about a memory fault
   * @bo: pointer to a buffer object
index d5197a2a106f28b69245035b75d776a5b5db66e5,be6caccce0c5e0fd31a441e34bc89833c290dd21..afd75b85da1d9e1ba14171cf61d5d5f3bc9fcc00
@@@ -146,19 -146,6 +146,19 @@@ void i915_gem_close_object(struct drm_g
        }
  }
  
-       reservation_object_fini(&obj->base._resv);
 +static void __i915_gem_free_object_rcu(struct rcu_head *head)
 +{
 +      struct drm_i915_gem_object *obj =
 +              container_of(head, typeof(*obj), rcu);
 +      struct drm_i915_private *i915 = to_i915(obj->base.dev);
 +
++      dma_resv_fini(&obj->base._resv);
 +      i915_gem_object_free(obj);
 +
 +      GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
 +      atomic_dec(&i915->mm.free_count);
 +}
 +
  static void __i915_gem_free_objects(struct drm_i915_private *i915,
                                    struct llist_node *freed)
  {
Simple merge
Simple merge
index b17f239912534e021bcddbe7341433544a8a2937,5f82a763e64ca9b6d1d3bed9d3aa36165c89a625..8675a608a6fe69b6a6ddebb7a07a34bba5c9646a
@@@ -94,26 -94,34 +94,26 @@@ i915_gem_batch_pool_get(struct i915_gem
        list = &pool->cache_list[n];
  
        list_for_each_entry(obj, list, batch_pool_link) {
-               struct reservation_object *resv = obj->base.resv;
++              struct dma_resv *resv = obj->base.resv;
 +
                /* The batches are strictly LRU ordered */
-               if (!reservation_object_test_signaled_rcu(resv, true))
 -              if (i915_gem_object_is_active(obj)) {
 -                      struct dma_resv *resv = obj->base.resv;
 -
 -                      if (!dma_resv_test_signaled_rcu(resv, true))
 -                              break;
 -
 -                      i915_retire_requests(pool->engine->i915);
 -                      GEM_BUG_ON(i915_gem_object_is_active(obj));
 -
 -                      /*
 -                       * The object is now idle, clear the array of shared
 -                       * fences before we add a new request. Although, we
 -                       * remain on the same engine, we may be on a different
 -                       * timeline and so may continually grow the array,
 -                       * trapping a reference to all the old fences, rather
 -                       * than replace the existing fence.
 -                       */
 -                      if (rcu_access_pointer(resv->fence)) {
 -                              dma_resv_lock(resv, NULL);
 -                              dma_resv_add_excl_fence(resv, NULL);
 -                              dma_resv_unlock(resv);
 -                      }
++              if (!dma_resv_test_signaled_rcu(resv, true))
 +                      break;
 +
 +              /*
 +               * The object is now idle, clear the array of shared
 +               * fences before we add a new request. Although, we
 +               * remain on the same engine, we may be on a different
 +               * timeline and so may continually grow the array,
 +               * trapping a reference to all the old fences, rather
 +               * than replace the existing fence.
 +               */
 +              if (rcu_access_pointer(resv->fence)) {
-                       reservation_object_lock(resv, NULL);
-                       reservation_object_add_excl_fence(resv, NULL);
-                       reservation_object_unlock(resv);
++                      dma_resv_lock(resv, NULL);
++                      dma_resv_add_excl_fence(resv, NULL);
++                      dma_resv_unlock(resv);
                }
  
 -              GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv,
 -                                                               true));
 -
                if (obj->base.size >= size)
                        goto found;
        }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 2eb3532e329147edcb52317ee2438ab087918e78,0b547245063391191234612cfe75a63bd4b654de..5581a7826b4c06fc814d9c028ff6ec68f51c5ecf
  
  #define VMW_RES_EVICT_ERR_COUNT 10
  
-       reservation_object_assert_held(backup->base.base.resv);
 +/**
 + * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
 + * @res: The resource
 + */
 +void vmw_resource_mob_attach(struct vmw_resource *res)
 +{
 +      struct vmw_buffer_object *backup = res->backup;
 +
-       reservation_object_assert_held(backup->base.base.resv);
++      dma_resv_assert_held(res->backup->base.base.resv);
 +      res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
 +              res->func->prio;
 +      list_add_tail(&res->mob_head, &backup->res_list);
 +      vmw_bo_prio_add(backup, res->used_prio);
 +}
 +
 +/**
 + * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
 + * @res: The resource
 + */
 +void vmw_resource_mob_detach(struct vmw_resource *res)
 +{
 +      struct vmw_buffer_object *backup = res->backup;
 +
++      dma_resv_assert_held(backup->base.base.resv);
 +      if (vmw_resource_mob_attached(res)) {
 +              list_del_init(&res->mob_head);
 +              vmw_bo_prio_del(backup, res->used_prio);
 +      }
 +}
 +
  struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  {
        kref_get(&res->kref);
Simple merge