]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/virtio: implement context init: add virtio_gpu_fence_event
authorGurchetan Singh <gurchetansingh@chromium.org>
Tue, 21 Sep 2021 23:20:23 +0000 (16:20 -0700)
committerGerd Hoffmann <kraxel@redhat.com>
Wed, 29 Sep 2021 07:22:31 +0000 (09:22 +0200)
Similar to DRM_VMW_EVENT_FENCE_SIGNALED.  Sends a pollable event
to the DRM file descriptor when a fence on a specific ring is
signaled.

One difference is the event is not exposed via the UAPI -- this is
because host responses are on a shared memory buffer of type
BLOB_MEM_GUEST [this is the common way to receive responses with
virtgpu].  As such, there is no context specific read(..)
implementation either -- just a poll(..) implementation.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Acked-by: Nicholas Verne <nverne@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-12-gurchetansingh@chromium.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c

index 9d963f1fda8f59d92582d7240edfb79f3e8e8116..749db18dcfa21227472cae71be08b82e33965770 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/module.h>
 #include <linux/console.h>
 #include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
 
 #include <drm/drm.h>
 #include <drm/drm_aperture.h>
@@ -155,6 +157,35 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
        schedule_work(&vgdev->config_changed_work);
 }
 
+static __poll_t virtio_gpu_poll(struct file *filp,
+                               struct poll_table_struct *wait)
+{
+       struct drm_file *drm_file = filp->private_data;
+       struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+       struct drm_device *dev = drm_file->minor->dev;
+       struct drm_pending_event *e = NULL;
+       __poll_t mask = 0;
+
+       if (!vfpriv->ring_idx_mask)
+               return drm_poll(filp, wait);
+
+       poll_wait(filp, &drm_file->event_wait, wait);
+
+       if (!list_empty(&drm_file->event_list)) {
+               spin_lock_irq(&dev->event_lock);
+               e = list_first_entry(&drm_file->event_list,
+                                    struct drm_pending_event, link);
+               drm_file->event_space += e->event->length;
+               list_del(&e->link);
+               spin_unlock_irq(&dev->event_lock);
+
+               kfree(e);
+               mask |= EPOLLIN | EPOLLRDNORM;
+       }
+
+       return mask;
+}
+
 static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
        { 0 },
@@ -194,7 +225,17 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
 MODULE_AUTHOR("Alon Levy");
 
-DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
+static const struct file_operations virtio_gpu_driver_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .release        = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .compat_ioctl   = drm_compat_ioctl,
+       .poll           = virtio_gpu_poll,
+       .read           = drm_read,
+       .llseek         = noop_llseek,
+       .mmap           = drm_gem_mmap
+};
 
 static const struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
index cb60d52c2bd14f1c60b69e43cc29990c469b6384..e0265fe74aa565f639127042f7071faf80c5b50c 100644 (file)
@@ -138,11 +138,18 @@ struct virtio_gpu_fence_driver {
        spinlock_t       lock;
 };
 
+#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
+struct virtio_gpu_fence_event {
+       struct drm_pending_event base;
+       struct drm_event event;
+};
+
 struct virtio_gpu_fence {
        struct dma_fence f;
        uint32_t ring_idx;
        uint64_t fence_id;
        bool emit_fence_info;
+       struct virtio_gpu_fence_event *e;
        struct virtio_gpu_fence_driver *drv;
        struct list_head node;
 };
index 98a00c1e654dc404d56800b34e70b81baa754c80..f28357dbde3522f55382723084206618794e1d32 100644 (file)
@@ -152,11 +152,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
                                continue;
 
                        dma_fence_signal_locked(&curr->f);
+                       if (curr->e) {
+                               drm_send_event(vgdev->ddev, &curr->e->base);
+                               curr->e = NULL;
+                       }
+
                        list_del(&curr->node);
                        dma_fence_put(&curr->f);
                }
 
                dma_fence_signal_locked(&signaled->f);
+               if (signaled->e) {
+                       drm_send_event(vgdev->ddev, &signaled->e->base);
+                       signaled->e = NULL;
+               }
+
                list_del(&signaled->node);
                dma_fence_put(&signaled->f);
                break;
index be7b22a03884a23eae6463191c06013a7cf6d866..fdaa7f3d9eeb7542aeb78df94501191784f112ad 100644 (file)
                                    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
                                    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
 
+static int virtio_gpu_fence_event_create(struct drm_device *dev,
+                                        struct drm_file *file,
+                                        struct virtio_gpu_fence *fence,
+                                        uint32_t ring_idx)
+{
+       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+       struct virtio_gpu_fence_event *e = NULL;
+       int ret;
+
+       if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+               return 0;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (!e)
+               return -ENOMEM;
+
+       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+       e->event.length = sizeof(e->event);
+
+       ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
+       if (ret)
+               goto free;
+
+       fence->e = e;
+       return 0;
+free:
+       kfree(e);
+       return ret;
+}
+
 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
                                             struct virtio_gpu_fpriv *vfpriv)
@@ -195,6 +225,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
                goto out_unresv;
        }
 
+       ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+       if (ret)
+               goto out_unresv;
+
        if (out_fence_fd >= 0) {
                sync_file = sync_file_create(&out_fence->f);
                if (!sync_file) {