drm-misc-next for 5.4:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- dma-buf: add reservation_object_fences helper, relax
reservation_object_add_shared_fence, remove
reservation_object seq number (and then
restored)
- dma-fence: Shrinkage of the dma_fence structure,
Merge dma_fence_signal and dma_fence_signal_locked,
Store the timestamp in struct dma_fence in a union with
cb_list
Driver Changes:
- More dt-bindings YAML conversions
- More removal of drmP.h includes
- dw-hdmi: Support get_eld and various i2s improvements
- gm12u320: Few fixes
- meson: Global cleanup
- panfrost: Few refactors, Support for GPU heap allocations
- sun4i: Support for DDC enable GPIO
- New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
Toppoly TD043MTEA1
Signed-off-by: Dave Airlie <airlied@redhat.com>
[airlied: fixup dma_resv rename fallout]
From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819141923.7l2adietcr2pioct@flea
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
- reservation_object_lock(bo->base.resv, NULL);
+/**
+ * amdgpu_bo_move_notify - notification about a BO being released
+ * @bo: pointer to a buffer object
+ *
+ * Wipes VRAM buffers whose contents should not be leaked before the
+ * memory is released.
+ */
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+{
+ struct dma_fence *fence = NULL;
+ struct amdgpu_bo *abo;
+ int r;
+
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return;
+
+ abo = ttm_to_amdgpu_bo(bo);
+
+ if (abo->kfd_bo)
+ amdgpu_amdkfd_unreserve_memory_limit(abo);
+
+ if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+ !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+ return;
+
- reservation_object_unlock(bo->base.resv);
++ dma_resv_lock(bo->base.resv, NULL);
+
+ r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
+ if (!WARN_ON(r)) {
+ amdgpu_bo_fence(abo, fence, false);
+ dma_fence_put(fence);
+ }
+
++ dma_resv_unlock(bo->base.resv);
+}
+
/**
* amdgpu_bo_fault_reserve_notify - notification about a memory fault
* @bo: pointer to a buffer object
}
}
- reservation_object_fini(&obj->base._resv);
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
++ dma_resv_fini(&obj->base._resv);
+ i915_gem_object_free(obj);
+
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
list = &pool->cache_list[n];
list_for_each_entry(obj, list, batch_pool_link) {
- struct reservation_object *resv = obj->base.resv;
++ struct dma_resv *resv = obj->base.resv;
+
/* The batches are strictly LRU ordered */
- if (!reservation_object_test_signaled_rcu(resv, true))
- if (i915_gem_object_is_active(obj)) {
- struct dma_resv *resv = obj->base.resv;
-
- if (!dma_resv_test_signaled_rcu(resv, true))
- break;
-
- i915_retire_requests(pool->engine->i915);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
-
- /*
- * The object is now idle, clear the array of shared
- * fences before we add a new request. Although, we
- * remain on the same engine, we may be on a different
- * timeline and so may continually grow the array,
- * trapping a reference to all the old fences, rather
- * than replace the existing fence.
- */
- if (rcu_access_pointer(resv->fence)) {
- dma_resv_lock(resv, NULL);
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
++ if (!dma_resv_test_signaled_rcu(resv, true))
+ break;
+
+ /*
+ * The object is now idle, clear the array of shared
+ * fences before we add a new request. Although, we
+ * remain on the same engine, we may be on a different
+ * timeline and so may continually grow the array,
+ * trapping a reference to all the old fences, rather
+ * than replace the existing fence.
+ */
+ if (rcu_access_pointer(resv->fence)) {
- reservation_object_lock(resv, NULL);
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
++ dma_resv_lock(resv, NULL);
++ dma_resv_add_excl_fence(resv, NULL);
++ dma_resv_unlock(resv);
}
- GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv,
- true));
-
if (obj->base.size >= size)
goto found;
}
#define VMW_RES_EVICT_ERR_COUNT 10
- reservation_object_assert_held(backup->base.base.resv);
+/**
+ * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_attach(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *backup = res->backup;
+
- reservation_object_assert_held(backup->base.base.resv);
++ dma_resv_assert_held(res->backup->base.base.resv);
+ res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
+ res->func->prio;
+ list_add_tail(&res->mob_head, &backup->res_list);
+ vmw_bo_prio_add(backup, res->used_prio);
+}
+
+/**
+ * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_detach(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *backup = res->backup;
+
++ dma_resv_assert_held(backup->base.base.resv);
+ if (vmw_resource_mob_attached(res)) {
+ list_del_init(&res->mob_head);
+ vmw_bo_prio_del(backup, res->used_prio);
+ }
+}
+
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);