void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj);
+ i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
if (!intel_fb->frontbuffer)
return -ENOMEM;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
tiling = i915_gem_object_get_tiling(obj);
stride = i915_gem_object_get_stride(obj);
i915_gem_object_unlock(obj);
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_sw_fence_await_reservation(&work->wait,
obj->base.resv, NULL, true, 0,
I915_FENCE_GFP);
if (err)
return err;
- err = i915_gem_object_lock_interruptible(obj);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
if (err)
return err;
- err = i915_gem_object_lock_interruptible(obj);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
if (!i915_gem_object_is_framebuffer(obj))
return;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
__i915_gem_object_flush_for_display(obj);
i915_gem_object_unlock(obj);
}
if (ret)
return ret;
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
if (err)
goto out;
- err = i915_gem_object_lock_interruptible(obj);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out_unpin;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
if (use_cpu_reloc(cache, obj))
return NULL;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
-static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
+static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ bool intr)
{
- dma_resv_lock(obj->base.resv, NULL);
+ int ret;
+
+ if (intr)
+ ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
+ else
+ ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
+
+ if (!ret && ww)
+ list_add_tail(&obj->obj_link, &ww->obj_list);
+ if (ret == -EALREADY)
+ ret = 0;
+
+ if (ret == -EDEADLK)
+ ww->contended = obj;
+
+ return ret;
}
-static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww)
{
- return dma_resv_trylock(obj->base.resv);
+ return __i915_gem_object_lock(obj, ww, ww && ww->intr);
}
-static inline int
-i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
+static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww)
{
- return dma_resv_lock_interruptible(obj->base.resv, NULL);
+ WARN_ON(ww && !ww->intr);
+ return __i915_gem_object_lock(obj, ww, true);
+}
+
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
struct list_head lut_list;
spinlock_t lut_lock; /* guards lut_list */
+ /**
+ * @obj_link: Link into @i915_gem_ww_ctx.obj_list
+ *
+ * When we lock this object through i915_gem_object_lock() with a
+ * context, we add it to the list to ensure we can unlock everything
+ * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
+ */
+ struct list_head obj_link;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
union {
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
drm_WARN_ON(&i915->drm,
i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
* whilst executing a fenced command for an untiled object.
*/
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
{
int err;
- i915_gem_object_lock(vma->obj);
+ i915_gem_object_lock(vma->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
i915_gem_object_unlock(vma->obj);
if (err)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
u32 __iomem *map;
int err = 0;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
i915_gem_object_unlock(ctx->obj);
if (err)
u32 __iomem *map;
int err = 0;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
i915_gem_object_unlock(ctx->obj);
if (err)
u32 *map;
int err;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
i915_gem_object_unlock(ctx->obj);
if (err)
u32 *map;
int err;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
i915_gem_object_unlock(ctx->obj);
if (err)
u32 *cs;
int err;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
i915_gem_object_unlock(ctx->obj);
if (err)
if (IS_ERR(vma))
return PTR_ERR(vma);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
i915_request_add(rq);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
}
/* Make the object dirty so that put_pages must do copy back the data */
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
return PTR_ERR(results);
err = 0;
- i915_gem_object_lock(results);
+ i915_gem_object_lock(results, NULL);
intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
goto put_obj;
}
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (ret) {
GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out_unpin;
GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out_unpin;
i915_gem_drain_freed_objects(i915);
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
drm_WARN_ON(&i915->drm,
i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
return ret;
}
+void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
+{
+ ww_acquire_init(&ww->ctx, &reservation_ww_class);
+ INIT_LIST_HEAD(&ww->obj_list);
+ ww->intr = intr;
+ ww->contended = NULL;
+}
+
+static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
+{
+ struct drm_i915_gem_object *obj;
+
+ while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
+ list_del(&obj->obj_link);
+ i915_gem_object_unlock(obj);
+ }
+}
+
+void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
+{
+ i915_gem_ww_ctx_unlock_all(ww);
+ WARN_ON(ww->contended);
+ ww_acquire_fini(&ww->ctx);
+}
+
+int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
+{
+ int ret = 0;
+
+ if (WARN_ON(!ww->contended))
+ return -EINVAL;
+
+ i915_gem_ww_ctx_unlock_all(ww);
+ if (ww->intr)
+ ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
+ else
+ dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
+
+ if (!ret)
+ list_add_tail(&ww->contended->obj_link, &ww->obj_list);
+
+ ww->contended = NULL;
+
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
return test_bit(TASKLET_STATE_SCHED, &t->state);
}
+struct i915_gem_ww_ctx {
+ struct ww_acquire_ctx ctx;
+ struct list_head obj_list;
+ bool intr;
+ struct drm_i915_gem_object *contended;
+};
+
+void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
+void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
+int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
+
#endif /* __I915_GEM_H__ */
return err;
}
+static int igt_gem_ww_ctx(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *obj2;
+ struct i915_gem_ww_ctx ww;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto put1;
+ }
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ /* Lock the objects, twice for good measure (-EALREADY handling) */
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock_interruptible(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock_interruptible(obj2, &ww);
+ if (!err)
+ err = i915_gem_object_lock(obj2, &ww);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ i915_gem_object_put(obj2);
+put1:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int i915_gem_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_gem_suspend),
SUBTEST(igt_gem_hibernate),
+ SUBTEST(igt_gem_ww_ctx),
};
if (intel_gt_is_wedged(&i915->gt))
unsigned int x, y;
int err;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
if (err)
goto out_unpin;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_wc_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)