If we inherit an error along the fence chain, we skip the main work
callback and go straight to the error. In the case of the vma bind
worker, we only dropped the pinned pages from the worker.
In the process, make sure we call the release earlier rather than wait
until the final reference to the fence is dropped (as a reference is
kept while being listened upon).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191216161717.2688274-1-chris@chris-wilson.co.uk
static int clflush_work(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
+ struct drm_i915_gem_object *obj = clflush->obj;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
- goto put;
+ return err;
__do_clflush(obj);
i915_gem_object_unpin_pages(obj);
-put:
- i915_gem_object_put(obj);
- return err;
+ return 0;
}
static void clflush_release(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- if (clflush->obj)
- i915_gem_object_put(clflush->obj);
+ i915_gem_object_put(clflush->obj);
}
static const struct dma_fence_work_ops clflush_ops = {
#include "i915_sw_fence_work.h"
+static void fence_complete(struct dma_fence_work *f)
+{
+ if (f->ops->release)
+ f->ops->release(f);
+ dma_fence_signal(&f->dma);
+}
+
static void fence_work(struct work_struct *work)
{
struct dma_fence_work *f = container_of(work, typeof(*f), work);
err = f->ops->work(f);
if (err)
dma_fence_set_error(&f->dma, err);
- dma_fence_signal(&f->dma);
+
+ fence_complete(f);
dma_fence_put(&f->dma);
}
dma_fence_get(&f->dma);
queue_work(system_unbound_wq, &f->work);
} else {
- dma_fence_signal(&f->dma);
+ fence_complete(f);
}
break;
{
struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
- if (f->ops->release)
- f->ops->release(f);
-
i915_sw_fence_fini(&f->chain);
BUILD_BUG_ON(offsetof(typeof(*f), dma));
struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
+ struct drm_i915_gem_object *pinned;
enum i915_cache_level cache_level;
unsigned int flags;
};
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
- if (vma->obj)
- __i915_gem_object_unpin_pages(vma->obj);
-
return err;
}
+static void __vma_release(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+ if (vw->pinned)
+ __i915_gem_object_unpin_pages(vw->pinned);
+}
+
static const struct dma_fence_work_ops bind_ops = {
.name = "bind",
.work = __vma_bind,
+ .release = __vma_release,
};
struct i915_vma_work *i915_vma_work(void)
i915_active_set_exclusive(&vma->active, &work->base.dma);
work->base.dma.error = 0; /* enable the queue_work() */
- if (vma->obj)
+ if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
+ work->pinned = vma->obj;
+ }
} else {
GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);