*/
if (needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
- old_obj->resv, NULL,
+ old_obj->base.resv, NULL,
false, 0,
GFP_KERNEL);
if (ret < 0)
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
- obj->resv, NULL,
+ obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
*
*/
retry:
- seq = raw_read_seqcount(&obj->resv->seq);
+ seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
+ args->busy =
+ busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
/* Translate shared fences to READ set of engines */
- list = rcu_dereference(obj->resv->fence);
+ list = rcu_dereference(obj->base.resv->fence);
if (list) {
unsigned int shared_count = list->shared_count, i;
}
}
- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+ if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
goto retry;
err = 0;
dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait,
- obj->resv, NULL,
+ obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
- reservation_object_add_excl_fence(obj->resv, &clflush->dma);
+ reservation_object_add_excl_fence(obj->base.resv,
+ &clflush->dma);
i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) {
i915_gem_object_lock(obj);
err = i915_sw_fence_await_reservation(&work->wait,
- obj->resv, NULL,
+ obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
- reservation_object_add_excl_fence(obj->resv, &work->dma);
+ reservation_object_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
- exp_info.resv = obj->resv;
+ exp_info.resv = obj->base.resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
- obj->resv = dma_buf->resv;
+ obj->base.resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
0);
if (i915_sw_fence_await_reservation(&stub->chain,
- obj->resv, NULL,
+ obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP) < 0)
goto err;
- reservation_object_add_excl_fence(obj->resv, &stub->dma);
+ reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;
obj->ops = ops;
- reservation_object_init(&obj->__builtin_resv);
- obj->resv = &obj->__builtin_resv;
-
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
i915_active_request_init(&obj->frontbuffer_write,
NULL, frontbuffer_retire);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
- reservation_object_fini(&obj->__builtin_resv);
drm_gem_object_release(&obj->base);
bitmap_free(obj->bit_17);
__drm_gem_object_put(&obj->base);
}
-#define assert_object_held(obj) reservation_object_assert_held((obj)->resv)
+#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
- reservation_object_lock(obj->resv, NULL);
+ reservation_object_lock(obj->base.resv, NULL);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
- return reservation_object_lock_interruptible(obj->resv, NULL);
+ return reservation_object_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
- reservation_object_unlock(obj->resv);
+ reservation_object_unlock(obj->base.resv);
}
struct dma_fence *
struct dma_fence *fence;
rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
#ifndef __I915_GEM_OBJECT_TYPES_H__
#define __I915_GEM_OBJECT_TYPES_H__
-#include <linux/reservation.h>
-
#include <drm/drm_gem.h>
#include "i915_active.h"
bool quirked:1;
} mm;
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct reservation_object *resv;
-
/** References from framebuffers, locks out tiling changes. */
unsigned int framebuffer_references;
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
-
- struct reservation_object __builtin_resv;
};
static inline struct drm_i915_gem_object *
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(obj->resv,
+ ret = reservation_object_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->resv);
+ excl = reservation_object_get_excl_rcu(obj->base.resv);
}
if (excl) {
might_sleep();
GEM_BUG_ON(timeout < 0);
- timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
+ timeout = i915_gem_object_wait_reservation(obj->base.resv,
+ flags, timeout);
return timeout < 0 ? timeout : 0;
}
list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->resv;
+ struct reservation_object *resv = obj->base.resv;
if (!reservation_object_test_signaled_rcu(resv, true))
break;
}
}
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
true));
if (obj->base.size >= size)
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->resv,
+ ret = reservation_object_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->resv);
+ excl = reservation_object_get_excl_rcu(obj->base.resv);
}
if (excl) {
return;
/* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->resv)) {
- if (reservation_object_test_signaled_rcu(obj->resv, true))
- reservation_object_add_excl_fence(obj->resv, NULL);
- reservation_object_unlock(obj->resv);
+ if (reservation_object_trylock(obj->base.resv)) {
+ if (reservation_object_test_signaled_rcu(obj->base.resv, true))
+ reservation_object_add_excl_fence(obj->base.resv, NULL);
+ reservation_object_unlock(obj->base.resv);
}
/*
vma->vm = vm;
vma->ops = &vm->vma_ops;
vma->obj = obj;
- vma->resv = obj->resv;
+ vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;