swap(p->vm->last_unlocked, tmp);
dma_fence_put(tmp);
} else {
- amdgpu_bo_fence(p->vm->root.bo, f, true);
+ dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
+ DMA_RESV_USAGE_BOOKKEEP);
}
- if (fence && !p->immediate)
+ if (fence && !p->immediate) {
+ /*
+ * Most hw generations now have a separate queue for page table
+ * updates, but when the queue is shared with userspace we need
+ * the extra CPU round trip to correctly flush the TLB.
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f);
+ }
dma_fence_put(f);
return 0;
#endif
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ return ret;
}
+ static void vc4_hdmi_put_ddc_device(void *ptr)
+ {
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ put_device(&vc4_hdmi->ddc->dev);
+ }
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);