struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
-bool virtio_gpu_is_shmem(struct drm_gem_object *obj);
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
/* virtgpu_prime.c */
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (shmem->pages) {
- if (shmem->mapped) {
- dma_unmap_sg(vgdev->vdev->dev.parent,
- shmem->pages->sgl, shmem->mapped,
- DMA_TO_DEVICE);
- shmem->mapped = 0;
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ if (virtio_gpu_is_shmem(bo)) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+ if (shmem->pages) {
+ if (shmem->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl, shmem->mapped,
+ DMA_TO_DEVICE);
+ shmem->mapped = 0;
+ }
+
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
}
- sg_free_table(shmem->pages);
- shmem->pages = NULL;
- drm_gem_shmem_unpin(&bo->base.base);
+
+ drm_gem_shmem_free_object(&bo->base.base);
}
- virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- drm_gem_shmem_free_object(&bo->base.base);
}
static void virtio_gpu_free_object(struct drm_gem_object *obj)
.mmap = drm_gem_shmem_mmap,
};
-bool virtio_gpu_is_shmem(struct drm_gem_object *obj)
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
{
- return obj->funcs == &virtio_gpu_shmem_funcs;
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
}
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,