faults can implement their own mmap file operation handler.
For platforms without MMU the GEM core provides a helper method
-drm_gem_cma_get_unmapped_area(). The mmap() routines will call this to get a
+drm_gem_dma_get_unmapped_area(). The mmap() routines will call this to get a
proposed address for the mapping.
-To use drm_gem_cma_get_unmapped_area(), drivers must fill the struct
+To use drm_gem_dma_get_unmapped_area(), drivers must fill the struct
:c:type:`struct file_operations <file_operations>` get_unmapped_area field with
-a pointer on drm_gem_cma_get_unmapped_area().
+a pointer on drm_gem_dma_get_unmapped_area().
More detailed information about get_unmapped_area can be found in
Documentation/admin-guide/mm/nommu-mmap.rst
.. kernel-doc:: drivers/gpu/drm/drm_gem.c
:export:
-GEM CMA Helper Functions Reference
+GEM DMA Helper Functions Reference
----------------------------------
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
- :doc: cma helpers
+.. kernel-doc:: drivers/gpu/drm/drm_gem_dma_helper.c
+ :doc: dma helpers
-.. kernel-doc:: include/drm/drm_gem_cma_helper.h
+.. kernel-doc:: include/drm/drm_gem_dma_helper.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_gem_dma_helper.c
:export:
GEM SHMEM Helper Function Reference
help
Helpers for ttm-based gem objects
-config DRM_GEM_CMA_HELPER
+config DRM_GEM_DMA_HELPER
tristate
depends on DRM
help
- Choose this if you need the GEM CMA helper functions
+ Choose this if you need the GEM DMA helper functions
config DRM_GEM_SHMEM_HELPER
tristate
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
-drm_cma_helper-y := drm_gem_cma_helper.o
-drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
-obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
+drm_dma_helper-y := drm_gem_dma_helper.o
+drm_dma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
+obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you want to compile the ARM Mali Display
depends on DRM && OF
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you want to compile the ARM Komeda display
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "komeda_framebuffer.h"
}
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
- - to_drm_gem_cma_obj(obj)->paddr;
+ - to_drm_gem_dma_obj(obj)->paddr;
if (obj->size < min_size) {
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
i, obj->size, min_size);
komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
{
struct drm_framebuffer *fb = &kfb->base;
- const struct drm_gem_cma_object *obj;
+ const struct drm_gem_dma_object *obj;
u32 offset, plane_x, plane_y, block_w, block_sz;
if (plane >= fb->format->num_planes) {
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "komeda_framebuffer.h"
#include "komeda_kms.h"
-DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
+DEFINE_DRM_GEM_DMA_FOPS(komeda_cma_fops);
-static int komeda_gem_cma_dumb_create(struct drm_file *file,
+static int komeda_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = ALIGN(pitch, mdev->chip.bus_width);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
}
#endif
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = hdlcd_debugfs_init,
#endif
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
return 0;
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static int malidp_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
#ifdef CONFIG_DEBUG_FS
static const struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
n_planes = fb->format->num_planes;
for (i = 0; i < n_planes; i++) {
- struct drm_gem_cma_object *obj = drm_fb_dma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, i);
/* memory write buffers are never rotated */
u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
for (i = 0; i < ms->n_planes; i++) {
struct drm_gem_object *obj;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sg_table *sgt;
struct scatterlist *sgl;
obj = drm_gem_fb_get_obj(ms->base.fb, i);
- cma_obj = to_drm_gem_cma_obj(obj);
+ dma_obj = to_drm_gem_dma_obj(obj);
- if (cma_obj->sgt)
- sgt = cma_obj->sgt;
+ if (dma_obj->sgt)
+ sgt = dma_obj->sgt;
else
sgt = obj->funcs->get_sg_table(obj);
while (sgl) {
if (sgl->length < pgsize) {
- if (!cma_obj->sgt)
+ if (!dma_obj->sgt)
kfree(sgt);
return false;
}
sgl = sg_next(sgl);
}
- if (!cma_obj->sgt)
+ if (!dma_obj->sgt)
kfree(sgt);
}
paddr = drm_fb_dma_get_gem_addr(fb, plane->state,
plane_index);
} else {
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
obj = drm_fb_dma_get_gem_obj(fb, plane_index);
}
/*
- * We could grab something from CMA if it's enabled, but that
+ * We could grab something from DMA if it's enabled, but that
* involves building in a problem:
*
- * CMA's interface uses dma_alloc_coherent(), which provides us
- * with an CPU virtual address and a device address.
+ * GEM DMA helper interface uses dma_alloc_coherent(), which provides
+ * us with an CPU virtual address and a device address.
*
* The CPU virtual address may be either an address in the kernel
* direct mapped region (for example, as it would be on x86) or
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
select CMA if HAVE_DMA_CONTIGUOUS
select MFD_SYSCON
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
struct drm_crtc *crtc = &pipe->crtc;
struct drm_framebuffer *fb = pipe->plane.state->fb;
struct drm_pending_vblank_event *event;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
spin_lock_irq(&crtc->dev->event_lock);
event = crtc->state->event;
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
drm_kms_helper_poll_fini(drm);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
config DRM_ATMEL_HLCDC
tristate "DRM Support for ATMEL HLCDC Display Controller"
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
help
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
clk_disable_unprepare(dc->hlcdc->periph_clk);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "atmel_hlcdc_dc.h"
sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR);
for (i = 0; i < state->nplanes; i++) {
- struct drm_gem_cma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
state->dscrs[i]->addr = gem->paddr + state->offsets[i];
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <linux/dma-mapping.h>
*/
/**
- * drm_fb_dma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * drm_fb_dma_get_gem_obj() - Get DMA GEM object for framebuffer
* @fb: The framebuffer
* @plane: Which plane
*
- * Return the CMA GEM object for given framebuffer.
+ * Return the DMA GEM object for given framebuffer.
*
* This function will usually be called from the CRTC callback functions.
*/
-struct drm_gem_cma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
struct drm_gem_object *gem;
if (!gem)
return NULL;
- return to_drm_gem_cma_obj(gem);
+ return to_drm_gem_dma_obj(gem);
}
EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_obj);
struct drm_plane_state *state,
unsigned int plane)
{
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
dma_addr_t paddr;
u8 h_div = 1, v_div = 1;
u32 block_w = drm_format_info_block_width(fb->format, plane);
* @state: New plane state
*
* This function can be used by drivers that use damage clips and have
- * CMA GEM objects backed by non-coherent memory. Calling this function
+ * DMA GEM objects backed by non-coherent memory. Calling this function
* in a plane's .atomic_update ensures that all the data in the backing
* memory have been written to RAM.
*/
{
const struct drm_format_info *finfo = state->fb->format;
struct drm_atomic_helper_damage_iter iter;
- const struct drm_gem_cma_object *cma_obj;
+ const struct drm_gem_dma_object *dma_obj;
unsigned int offset, i;
struct drm_rect clip;
dma_addr_t daddr;
size_t nb_bytes;
for (i = 0; i < finfo->num_planes; i++) {
- cma_obj = drm_fb_dma_get_gem_obj(state->fb, i);
- if (!cma_obj->map_noncoherent)
+ dma_obj = drm_fb_dma_get_gem_obj(state->fb, i);
+ if (!dma_obj->map_noncoherent)
continue;
daddr = drm_fb_dma_get_gem_addr(state->fb, state, i);
* };
*
* For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
- * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
+ * DMA based drivers there is the DEFINE_DRM_GEM_DMA_FOPS() macro to make this
* simpler.
*
* The driver's &file_operations must be stored in &drm_driver.fops.
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * drm gem CMA (contiguous memory allocator) helper functions
- *
- * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * Based on Samsung Exynos code
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/dma-buf.h>
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_vma_manager.h>
-
-/**
- * DOC: cma helpers
- *
- * The DRM GEM/CMA helpers are a means to provide buffer objects that are
- * presented to the device as a contiguous chunk of memory. This is useful
- * for devices that do not support scatter-gather DMA (either directly or
- * by using an intimately attached IOMMU).
- *
- * Despite the name, the DRM GEM/CMA helpers are not hardwired to use the
- * Contiguous Memory Allocator (CMA).
- *
- * For devices that access the memory bus through an (external) IOMMU then
- * the buffer objects are allocated using a traditional page-based
- * allocator and may be scattered through physical memory. However they
- * are contiguous in the IOVA space so appear contiguous to devices using
- * them.
- *
- * For other devices then the helpers rely on CMA to provide buffer
- * objects that are physically contiguous in memory.
- *
- * For GEM callback helpers in struct &drm_gem_object functions, see likewise
- * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
- * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
- */
-
-static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
- .free = drm_gem_cma_object_free,
- .print_info = drm_gem_cma_object_print_info,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
- .mmap = drm_gem_cma_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
-};
-
-/**
- * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
- * @drm: DRM device
- * @size: size of the object to allocate
- * @private: true if used for internal purposes
- *
- * This function creates and initializes a GEM CMA object of the given size,
- * but doesn't allocate any memory to back the object.
- *
- * Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
- * error code on failure.
- */
-static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *gem_obj;
- int ret = 0;
-
- if (drm->driver->gem_create_object) {
- gem_obj = drm->driver->gem_create_object(drm, size);
- if (IS_ERR(gem_obj))
- return ERR_CAST(gem_obj);
- cma_obj = to_drm_gem_cma_obj(gem_obj);
- } else {
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
- return ERR_PTR(-ENOMEM);
- gem_obj = &cma_obj->base;
- }
-
- if (!gem_obj->funcs)
- gem_obj->funcs = &drm_gem_cma_default_funcs;
-
- if (private) {
- drm_gem_private_object_init(drm, gem_obj, size);
-
- /* Always use writecombine for dma-buf mappings */
- cma_obj->map_noncoherent = false;
- } else {
- ret = drm_gem_object_init(drm, gem_obj, size);
- }
- if (ret)
- goto error;
-
- ret = drm_gem_create_mmap_offset(gem_obj);
- if (ret) {
- drm_gem_object_release(gem_obj);
- goto error;
- }
-
- return cma_obj;
-
-error:
- kfree(cma_obj);
- return ERR_PTR(ret);
-}
-
-/**
- * drm_gem_cma_create - allocate an object with the given size
- * @drm: DRM device
- * @size: size of the object to allocate
- *
- * This function creates a CMA GEM object and allocates memory as backing store.
- * The allocated memory will occupy a contiguous chunk of bus address space.
- *
- * For devices that are directly connected to the memory bus then the allocated
- * memory will be physically contiguous. For devices that access through an
- * IOMMU, then the allocated memory is not expected to be physically contiguous
- * because having contiguous IOVAs is sufficient to meet a devices DMA
- * requirements.
- *
- * Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
- * error code on failure.
- */
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- size_t size)
-{
- struct drm_gem_cma_object *cma_obj;
- int ret;
-
- size = round_up(size, PAGE_SIZE);
-
- cma_obj = __drm_gem_cma_create(drm, size, false);
- if (IS_ERR(cma_obj))
- return cma_obj;
-
- if (cma_obj->map_noncoherent) {
- cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
- &cma_obj->paddr,
- DMA_TO_DEVICE,
- GFP_KERNEL | __GFP_NOWARN);
- } else {
- cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
- GFP_KERNEL | __GFP_NOWARN);
- }
- if (!cma_obj->vaddr) {
- drm_dbg(drm, "failed to allocate buffer with size %zu\n",
- size);
- ret = -ENOMEM;
- goto error;
- }
-
- return cma_obj;
-
-error:
- drm_gem_object_put(&cma_obj->base);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_create);
-
-/**
- * drm_gem_cma_create_with_handle - allocate an object with the given size and
- * return a GEM handle to it
- * @file_priv: DRM file-private structure to register the handle for
- * @drm: DRM device
- * @size: size of the object to allocate
- * @handle: return location for the GEM handle
- *
- * This function creates a CMA GEM object, allocating a chunk of memory as
- * backing store. The GEM object is then added to the list of object associated
- * with the given file and a handle to it is returned.
- *
- * The allocated memory will occupy a contiguous chunk of bus address space.
- * See drm_gem_cma_create() for more details.
- *
- * Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
- * error code on failure.
- */
-static struct drm_gem_cma_object *
-drm_gem_cma_create_with_handle(struct drm_file *file_priv,
- struct drm_device *drm, size_t size,
- uint32_t *handle)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *gem_obj;
- int ret;
-
- cma_obj = drm_gem_cma_create(drm, size);
- if (IS_ERR(cma_obj))
- return cma_obj;
-
- gem_obj = &cma_obj->base;
-
- /*
- * allocate a id of idr table where the obj is registered
- * and handle has the id what user can see.
- */
- ret = drm_gem_handle_create(file_priv, gem_obj, handle);
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_put(gem_obj);
- if (ret)
- return ERR_PTR(ret);
-
- return cma_obj;
-}
-
-/**
- * drm_gem_cma_free - free resources associated with a CMA GEM object
- * @cma_obj: CMA GEM object to free
- *
- * This function frees the backing memory of the CMA GEM object, cleans up the
- * GEM object state and frees the memory used to store the object itself.
- * If the buffer is imported and the virtual address is set, it is released.
- */
-void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
-{
- struct drm_gem_object *gem_obj = &cma_obj->base;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr);
-
- if (gem_obj->import_attach) {
- if (cma_obj->vaddr)
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
- drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
- } else if (cma_obj->vaddr) {
- if (cma_obj->map_noncoherent)
- dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr,
- DMA_TO_DEVICE);
- else
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
- }
-
- drm_gem_object_release(gem_obj);
-
- kfree(cma_obj);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_free);
-
-/**
- * drm_gem_cma_dumb_create_internal - create a dumb buffer object
- * @file_priv: DRM file-private structure to create the dumb buffer for
- * @drm: DRM device
- * @args: IOCTL data
- *
- * This aligns the pitch and size arguments to the minimum required. This is
- * an internal helper that can be wrapped by a driver to account for hardware
- * with more specific alignment requirements. It should not be used directly
- * as their &drm_driver.dumb_create callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args)
-{
- unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- struct drm_gem_cma_object *cma_obj;
-
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
-
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
-
- cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
- &args->handle);
- return PTR_ERR_OR_ZERO(cma_obj);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
-
-/**
- * drm_gem_cma_dumb_create - create a dumb buffer object
- * @file_priv: DRM file-private structure to create the dumb buffer for
- * @drm: DRM device
- * @args: IOCTL data
- *
- * This function computes the pitch of the dumb buffer and rounds it up to an
- * integer number of bytes per pixel. Drivers for hardware that doesn't have
- * any additional restrictions on the pitch can directly use this function as
- * their &drm_driver.dumb_create callback.
- *
- * For hardware with additional restrictions, drivers can adjust the fields
- * set up by userspace and pass the IOCTL data along to the
- * drm_gem_cma_dumb_create_internal() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_cma_object *cma_obj;
-
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->size = args->pitch * args->height;
-
- cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
- &args->handle);
- return PTR_ERR_OR_ZERO(cma_obj);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
-
-const struct vm_operations_struct drm_gem_cma_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
-
-#ifndef CONFIG_MMU
-/**
- * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
- * @filp: file object
- * @addr: memory address
- * @len: buffer size
- * @pgoff: page offset
- * @flags: memory flags
- *
- * This function is used in noMMU platforms to propose address mapping
- * for a given buffer.
- * It's intended to be used as a direct handler for the struct
- * &file_operations.get_unmapped_area operation.
- *
- * Returns:
- * mapping address on success or a negative error code on failure.
- */
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *obj = NULL;
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_offset_node *node;
-
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- pgoff,
- len >> PAGE_SHIFT);
- if (likely(node)) {
- obj = container_of(node, struct drm_gem_object, vma_node);
- /*
- * When the object is being freed, after it hits 0-refcnt it
- * proceeds to tear down the object. In the process it will
- * attempt to remove the VMA offset and so acquire this
- * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
- * that matches our range, we know it is in the process of being
- * destroyed and will be freed as soon as we release the lock -
- * so we have to check for the 0-refcnted object and treat it as
- * invalid.
- */
- if (!kref_get_unless_zero(&obj->refcount))
- obj = NULL;
- }
-
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
-
- if (!obj)
- return -EINVAL;
-
- if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put(obj);
- return -EACCES;
- }
-
- cma_obj = to_drm_gem_cma_obj(obj);
-
- drm_gem_object_put(obj);
-
- return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
-#endif
-
-/**
- * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
- * @cma_obj: CMA GEM object
- * @p: DRM printer
- * @indent: Tab indentation level
- *
- * This function prints paddr and vaddr for use in e.g. debugfs output.
- */
-void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
- struct drm_printer *p, unsigned int indent)
-{
- drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
- drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
-}
-EXPORT_SYMBOL(drm_gem_cma_print_info);
-
-/**
- * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
- * pages for a CMA GEM object
- * @cma_obj: CMA GEM object
- *
- * This function exports a scatter/gather table by calling the standard
- * DMA mapping API.
- *
- * Returns:
- * A pointer to the scatter/gather table of pinned pages or NULL on failure.
- */
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
-{
- struct drm_gem_object *obj = &cma_obj->base;
- struct sg_table *sgt;
- int ret;
-
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- return ERR_PTR(-ENOMEM);
-
- ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
- cma_obj->paddr, obj->size);
- if (ret < 0)
- goto out;
-
- return sgt;
-
-out:
- kfree(sgt);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
-
-/**
- * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
- * driver's scatter/gather table of pinned pages
- * @dev: device to import into
- * @attach: DMA-BUF attachment
- * @sgt: scatter/gather table of pinned pages
- *
- * This function imports a scatter/gather table exported via DMA-BUF by
- * another driver. Imported buffers must be physically contiguous in memory
- * (i.e. the scatter/gather table must contain a single entry). Drivers that
- * use the CMA helpers should set this as their
- * &drm_driver.gem_prime_import_sg_table callback.
- *
- * Returns:
- * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
- * error code on failure.
- */
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_cma_object *cma_obj;
-
- /* check if the entries in the sg_table are contiguous */
- if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
- return ERR_PTR(-EINVAL);
-
- /* Create a CMA GEM buffer. */
- cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
- if (IS_ERR(cma_obj))
- return ERR_CAST(cma_obj);
-
- cma_obj->paddr = sg_dma_address(sgt->sgl);
- cma_obj->sgt = sgt;
-
- DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
-
- return &cma_obj->base;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
-
-/**
- * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
- * address space
- * @cma_obj: CMA GEM object
- * @map: Returns the kernel virtual address of the CMA GEM object's backing
- * store.
- *
- * This function maps a buffer into the kernel's virtual address space.
- * Since the CMA buffers are already mapped into the kernel virtual address
- * space this simply returns the cached virtual address.
- *
- * Returns:
- * 0 on success, or a negative error code otherwise.
- */
-int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
- struct iosys_map *map)
-{
- iosys_map_set_vaddr(map, cma_obj->vaddr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
-
-/**
- * drm_gem_cma_mmap - memory-map an exported CMA GEM object
- * @cma_obj: CMA GEM object
- * @vma: VMA for the area to be mapped
- *
- * This function maps a buffer into a userspace process's address space.
- * In addition to the usual GEM VMA setup it immediately faults in the entire
- * object instead of using on-demand faulting.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = &cma_obj->base;
- int ret;
-
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_DONTEXPAND;
-
- if (cma_obj->map_noncoherent) {
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
- ret = dma_mmap_pages(cma_obj->base.dev->dev,
- vma, vma->vm_end - vma->vm_start,
- virt_to_page(cma_obj->vaddr));
- } else {
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
- }
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-
-/**
- * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
- * scatter/gather table and get the virtual address of the buffer
- * @dev: DRM device
- * @attach: DMA-BUF attachment
- * @sgt: Scatter/gather table of pinned pages
- *
- * This function imports a scatter/gather table using
- * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
- * virtual address. This ensures that a CMA GEM object always has its virtual
- * address set. This address is released when the object is freed.
- *
- * This function can be used as the &drm_driver.gem_prime_import_sg_table
- * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
- * the necessary DRM driver operations.
- *
- * Returns:
- * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
- * error code on failure.
- */
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *obj;
- struct iosys_map map;
- int ret;
-
- ret = dma_buf_vmap(attach->dmabuf, &map);
- if (ret) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(ret);
- }
-
- obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
- if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, &map);
- return obj;
- }
-
- cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = map.vaddr;
-
- return obj;
-}
-EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
-
-MODULE_DESCRIPTION("DRM CMA memory-management helpers");
-MODULE_IMPORT_NS(DMA_BUF);
-MODULE_LICENSE("GPL");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drm gem DMA helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <drm/drm.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_vma_manager.h>
+
+/**
+ * DOC: dma helpers
+ *
+ * The DRM GEM/DMA helpers are a means to provide buffer objects that are
+ * presented to the device as a contiguous chunk of memory. This is useful
+ * for devices that do not support scatter-gather DMA (either directly or
+ * by using an intimately attached IOMMU).
+ *
+ * For devices that access the memory bus through an (external) IOMMU then
+ * the buffer objects are allocated using a traditional page-based
+ * allocator and may be scattered through physical memory. However they
+ * are contiguous in the IOVA space so appear contiguous to devices using
+ * them.
+ *
+ * For other devices then the helpers rely on CMA to provide buffer
+ * objects that are physically contiguous in memory.
+ *
+ * For GEM callback helpers in struct &drm_gem_object functions, see likewise
+ * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
+ * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
+ */
+
+static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
+ .free = drm_gem_dma_object_free,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = drm_gem_dma_object_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
+};
+
+/**
+ * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ * @private: true if used for internal purposes
+ *
+ * This function creates and initializes a GEM DMA object of the given size,
+ * but doesn't allocate any memory to back the object.
+ *
+ * Returns:
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+static struct drm_gem_dma_object *
+__drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
+{
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret = 0;
+
+ if (drm->driver->gem_create_object) {
+ gem_obj = drm->driver->gem_create_object(drm, size);
+ if (IS_ERR(gem_obj))
+ return ERR_CAST(gem_obj);
+ dma_obj = to_drm_gem_dma_obj(gem_obj);
+ } else {
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
+ return ERR_PTR(-ENOMEM);
+ gem_obj = &dma_obj->base;
+ }
+
+ if (!gem_obj->funcs)
+ gem_obj->funcs = &drm_gem_dma_default_funcs;
+
+ if (private) {
+ drm_gem_private_object_init(drm, gem_obj, size);
+
+ /* Always use writecombine for dma-buf mappings */
+ dma_obj->map_noncoherent = false;
+ } else {
+ ret = drm_gem_object_init(drm, gem_obj, size);
+ }
+ if (ret)
+ goto error;
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret) {
+ drm_gem_object_release(gem_obj);
+ goto error;
+ }
+
+ return dma_obj;
+
+error:
+ kfree(dma_obj);
+ return ERR_PTR(ret);
+}
+
+/**
+ * drm_gem_dma_create - allocate an object with the given size
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates a DMA GEM object and allocates memory as backing store.
+ * The allocated memory will occupy a contiguous chunk of bus address space.
+ *
+ * For devices that are directly connected to the memory bus then the allocated
+ * memory will be physically contiguous. For devices that access through an
+ * IOMMU, then the allocated memory is not expected to be physically contiguous
+ * because having contiguous IOVAs is sufficient to meet a devices DMA
+ * requirements.
+ *
+ * Returns:
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
+ size_t size)
+{
+ struct drm_gem_dma_object *dma_obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ dma_obj = __drm_gem_dma_create(drm, size, false);
+ if (IS_ERR(dma_obj))
+ return dma_obj;
+
+ if (dma_obj->map_noncoherent) {
+ dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
+ &dma_obj->paddr,
+ DMA_TO_DEVICE,
+ GFP_KERNEL | __GFP_NOWARN);
+ } else {
+ dma_obj->vaddr = dma_alloc_wc(drm->dev, size, &dma_obj->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ }
+ if (!dma_obj->vaddr) {
+ drm_dbg(drm, "failed to allocate buffer with size %zu\n",
+ size);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ return dma_obj;
+
+error:
+ drm_gem_object_put(&dma_obj->base);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_create);
+
+/**
+ * drm_gem_dma_create_with_handle - allocate an object with the given size and
+ * return a GEM handle to it
+ * @file_priv: DRM file-private structure to register the handle for
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ * @handle: return location for the GEM handle
+ *
+ * This function creates a DMA GEM object, allocating a chunk of memory as
+ * backing store. The GEM object is then added to the list of object associated
+ * with the given file and a handle to it is returned.
+ *
+ * The allocated memory will occupy a contiguous chunk of bus address space.
+ * See drm_gem_dma_create() for more details.
+ *
+ * Returns:
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+static struct drm_gem_dma_object *
+drm_gem_dma_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *drm, size_t size,
+ uint32_t *handle)
+{
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ dma_obj = drm_gem_dma_create(drm, size);
+ if (IS_ERR(dma_obj))
+ return dma_obj;
+
+ gem_obj = &dma_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dma_obj;
+}
+
+/**
+ * drm_gem_dma_free - free resources associated with a DMA GEM object
+ * @dma_obj: DMA GEM object to free
+ *
+ * This function frees the backing memory of the DMA GEM object, cleans up the
+ * GEM object state and frees the memory used to store the object itself.
+ * If the buffer is imported and the virtual address is set, it is released.
+ */
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
+{
+ struct drm_gem_object *gem_obj = &dma_obj->base;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
+
+ if (gem_obj->import_attach) {
+ if (dma_obj->vaddr)
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
+ drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
+ } else if (dma_obj->vaddr) {
+ if (dma_obj->map_noncoherent)
+ dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->paddr,
+ DMA_TO_DEVICE);
+ else
+ dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->paddr);
+ }
+
+ drm_gem_object_release(gem_obj);
+
+ kfree(dma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_free);
+
+/**
+ * drm_gem_dma_dumb_create_internal - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This aligns the pitch and size arguments to the minimum required. This is
+ * an internal helper that can be wrapped by a driver to account for hardware
+ * with more specific alignment requirements. It should not be used directly
+ * as their &drm_driver.dumb_create callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct drm_gem_dma_object *dma_obj;
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
+ &args->handle);
+ return PTR_ERR_OR_ZERO(dma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
+
+/**
+ * drm_gem_dma_dumb_create - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their &drm_driver.dumb_create callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace and pass the IOCTL data along to the
+ * drm_gem_dma_dumb_create_internal() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_dma_object *dma_obj;
+
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ args->size = args->pitch * args->height;
+
+ dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
+ &args->handle);
+ return PTR_ERR_OR_ZERO(dma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
+
+const struct vm_operations_struct drm_gem_dma_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
+
+#ifndef CONFIG_MMU
+/**
+ * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
+ * @filp: file object
+ * @addr: memory address
+ * @len: buffer size
+ * @pgoff: page offset
+ * @flags: memory flags
+ *
+ * This function is used in noMMU platforms to propose address mapping
+ * for a given buffer.
+ * It's intended to be used as a direct handler for the struct
+ * &file_operations.get_unmapped_area operation.
+ *
+ * Returns:
+ * mapping address on success or a negative error code on failure.
+ */
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_gem_object *obj = NULL;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_offset_node *node;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ pgoff,
+ len >> PAGE_SHIFT);
+ if (likely(node)) {
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ /*
+ * When the object is being freed, after it hits 0-refcnt it
+ * proceeds to tear down the object. In the process it will
+ * attempt to remove the VMA offset and so acquire this
+ * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
+ * that matches our range, we know it is in the process of being
+ * destroyed and will be freed as soon as we release the lock -
+ * so we have to check for the 0-refcnted object and treat it as
+ * invalid.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+ if (!obj)
+ return -EINVAL;
+
+ if (!drm_vma_node_is_allowed(node, priv)) {
+ drm_gem_object_put(obj);
+ return -EACCES;
+ }
+
+ dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_object_put(obj);
+
+ return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
+#endif
+
+/**
+ * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @dma_obj: DMA GEM object
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ *
+ * This function prints paddr and vaddr for use in e.g. debugfs output.
+ */
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
+ struct drm_printer *p, unsigned int indent)
+{
+ drm_printf_indent(p, indent, "paddr=%pad\n", &dma_obj->paddr);
+ drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
+}
+EXPORT_SYMBOL(drm_gem_dma_print_info);
+
+/**
+ * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
+ * pages for a DMA GEM object
+ * @dma_obj: DMA GEM object
+ *
+ * This function exports a scatter/gather table by calling the standard
+ * DMA mapping API.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
+{
+ struct drm_gem_object *obj = &dma_obj->base;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
+ dma_obj->paddr, obj->size);
+ if (ret < 0)
+ goto out;
+
+ return sgt;
+
+out:
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
+
+/**
+ * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
+ * driver's scatter/gather table of pinned pages
+ * @dev: device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Imported buffers must be physically contiguous in memory
+ * (i.e. the scatter/gather table must contain a single entry). Drivers that
+ * use the DMA helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_dma_object *dma_obj;
+
+ /* check if the entries in the sg_table are contiguous */
+ if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
+ return ERR_PTR(-EINVAL);
+
+ /* Create a DMA GEM buffer. */
+ dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
+ if (IS_ERR(dma_obj))
+ return ERR_CAST(dma_obj);
+
+ dma_obj->paddr = sg_dma_address(sgt->sgl);
+ dma_obj->sgt = sgt;
+
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->paddr, attach->dmabuf->size);
+
+ return &dma_obj->base;
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
+
+/**
+ * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
+ * address space
+ * @dma_obj: DMA GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing
+ * store.
+ *
+ * This function maps a buffer into the kernel's virtual address space.
+ * Since the DMA buffers are already mapped into the kernel virtual address
+ * space this simply returns the cached virtual address.
+ *
+ * Returns:
+ * 0 on success, or a negative error code otherwise.
+ */
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
+ struct iosys_map *map)
+{
+ iosys_map_set_vaddr(map, dma_obj->vaddr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
+
+/**
+ * drm_gem_dma_mmap - memory-map an exported DMA GEM object
+ * @dma_obj: DMA GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer into a userspace process's address space.
+ * In addition to the usual GEM VMA setup it immediately faults in the entire
+ * object instead of using on-demand faulting.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = &dma_obj->base;
+ int ret;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_DONTEXPAND;
+
+ if (dma_obj->map_noncoherent) {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ ret = dma_mmap_pages(dma_obj->base.dev->dev,
+ vma, vma->vm_end - vma->vm_start,
+ virt_to_page(dma_obj->vaddr));
+ } else {
+ ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
+ dma_obj->paddr, vma->vm_end - vma->vm_start);
+ }
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
+
+/**
+ * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
+ * scatter/gather table and get the virtual address of the buffer
+ * @dev: DRM device
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table using
+ * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a DMA GEM object always has its virtual
+ * address set. This address is released when the object is freed.
+ *
+ * This function can be used as the &drm_driver.gem_prime_import_sg_table
+ * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
+ * the necessary DRM driver operations.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_gem_object *obj;
+ struct iosys_map map;
+ int ret;
+
+ ret = dma_buf_vmap(attach->dmabuf, &map);
+ if (ret) {
+ DRM_ERROR("Failed to vmap PRIME buffer\n");
+ return ERR_PTR(ret);
+ }
+
+ obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ dma_buf_vunmap(attach->dmabuf, &map);
+ return obj;
+ }
+
+ dma_obj = to_drm_gem_dma_obj(obj);
+ dma_obj->vaddr = map.vaddr;
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
+
+MODULE_DESCRIPTION("DRM DMA memory-management helpers");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL");
/*
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in the GEM code
- * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical
+ * (e.g., drm_gem_dma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
dev->dev_private = NULL;
}
-DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(fsl_dcu_drm_fops);
static const struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
int index;
tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
depends on DRM && OF && ARM64
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_gem_framebuffer_helper.h>
static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
- struct drm_gem_cma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
};
-DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ade_fops);
static const struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
.date = "20150718",
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
-DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
+DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops);
static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
static const struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = new_plane_state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_crtc_state *crtc_state;
int hdisplay, vdisplay;
int min, max;
if (!fb || !new_plane_state->crtc)
return 0;
- cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
- WARN_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ WARN_ON(!dma_obj);
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
- struct drm_gem_cma_object *cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
unsigned long p1_ba = 0, p2_ba = 0;
if (!format->is_yuv ||
format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
- p1_ba = cma_obj->paddr + fb->offsets[0] +
+ p1_ba = dma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
format->char_per_block[0] * (state->src.x1 >> 16);
else if (format->format == DRM_FORMAT_UYVY ||
format->format == DRM_FORMAT_VYUY ||
format->format == DRM_FORMAT_YUYV ||
format->format == DRM_FORMAT_YVYU)
- p1_ba = cma_obj->paddr + fb->offsets[0] +
+ p1_ba = dma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
2 * format->char_per_block[0] * (state->src.x1 >> 17);
if (format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
- p2_ba = cma_obj->paddr + fb->offsets[1] +
+ p2_ba = dma_obj->paddr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
(state->src.x1 >> 17)) << 1);
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
-DEFINE_DRM_GEM_CMA_FOPS(imx_drm_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops);
void imx_drm_connector_destroy(struct drm_connector *connector)
{
args->width = ALIGN(width, 8);
- ret = drm_gem_cma_dumb_create(file_priv, drm, args);
+ ret = drm_gem_dma_dumb_create(file_priv, drm, args);
if (ret)
return ret;
static const struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
.ioctls = imx_drm_ioctls,
.num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
.fops = &imx_drm_driver_fops,
void imx_drm_mode_config_init(struct drm_device *drm);
-struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
+struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <video/imx-ipu-v3.h>
drm_plane_state_to_eba(struct drm_plane_state *state, int plane)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_dma_get_gem_obj(fb, plane);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, plane);
+ BUG_ON(!dma_obj);
- return cma_obj->paddr + fb->offsets[plane] + fb->pitches[plane] * y +
+ return dma_obj->paddr + fb->offsets[plane] + fb->pitches[plane] * y +
fb->format->cpp[plane] * x;
}
drm_plane_state_to_ubo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
unsigned long eba = drm_plane_state_to_eba(state, 0);
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_dma_get_gem_obj(fb, 1);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 1);
+ BUG_ON(!dma_obj);
x /= fb->format->hsub;
y /= fb->format->vsub;
- return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ return dma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
fb->format->cpp[1] * x - eba;
}
drm_plane_state_to_vbo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
unsigned long eba = drm_plane_state_to_eba(state, 0);
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_dma_get_gem_obj(fb, 2);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 2);
+ BUG_ON(!dma_obj);
x /= fb->format->hsub;
y /= fb->format->vsub;
- return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ return dma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
fb->format->cpp[2] * x - eba;
}
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Ingenic SoCs.
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
ingenic_drm_gem_create_object(struct drm_device *drm, size_t size)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
kfree(priv_state);
}
-DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &ingenic_drm_fops,
.gem_create_object = ingenic_drm_gem_create_object,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_property.h>
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have Intel's KeemBay SOC which integrates
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
free_irq(kmb->irq_lcd, drm);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
/* GEM Operations */
.fops = &fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
.date = DRIVER_DATE,
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include "kmb_drv.h"
depends on OF || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_DMA_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
DRM display driver for the logiCVC programmable logic block from Xylon
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
#include "logicvc_of.h"
#include "logicvc_regs.h"
-DEFINE_DRM_GEM_CMA_FOPS(logicvc_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(logicvc_drm_fops);
-static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+static int logicvc_drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
{
/* Stride is always fixed to its configuration value. */
args->pitch = logicvc->config.row_stride * DIV_ROUND_UP(args->bpp, 8);
- return drm_gem_cma_dumb_create_internal(file_priv, drm_dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm_dev, args);
}
static struct drm_driver logicvc_drm_driver = {
.major = 1,
.minor = 0,
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create),
};
static struct regmap_config logicvc_drm_regmap_config = {
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_panel.h>
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the ST-Ericsson MCDE
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
return 0;
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver mcde_drm_driver = {
.driver_features =
.major = 1,
.minor = 0,
.patchlevel = 0,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static int mcde_drm_bind(struct device *dev)
depends on HAVE_ARM_SMCCC
depends on OF
depends on MTK_MMSYS
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
.vmap = mtk_drm_gem_prime_vmap,
.vunmap = mtk_drm_gem_prime_vunmap,
.mmap = mtk_drm_gem_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
select REGMAP_MMIO
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
args->size = PAGE_ALIGN(args->pitch * args->height);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver meson_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- /* CMA Ops */
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
+ /* DMA Ops */
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
/* Misc */
.fops = &fops,
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "meson_overlay.h"
#include "meson_registers.h"
plane);
struct drm_framebuffer *fb = new_state->fb;
struct meson_drm *priv = meson_overlay->priv;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned long flags;
bool interlace_mode;
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "meson_plane.h"
#include "meson_registers.h"
struct drm_rect dest = drm_plane_state_dest(new_state);
struct meson_drm *priv = meson_plane->priv;
struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned long flags;
int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire CMA chunk carved out in early startup in
+ * Grab the entire DMA chunk carved out in early startup in
* mach-msm:
*/
} else if (!msm_use_mmu(dev)) {
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
drm->dev_private = NULL;
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver lcdif_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_vblank.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
pm_runtime_disable(drm->dev);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver mxsfb_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_vblank.h>
tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI
depends on DRM_KMS_HELPER
- depends on DRM_GEM_CMA_HELPER
+ depends on DRM_GEM_DMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI
help
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9341_dbi_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops);
static struct drm_driver ili9341_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_dbi_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "pl111_drm.h"
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
if (priv->use_device_memory)
return ERR_PTR(-EINVAL);
- return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver pl111_drm_driver = {
.driver_features =
.major = 1,
.minor = 0,
.patchlevel = 0,
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = drm_gem_dma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "rcar_cmm.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
* DRM operations
*/
-DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
+DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
*/
static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
- .free = drm_gem_cma_object_free,
- .print_info = drm_gem_cma_object_print_info,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
- .mmap = drm_gem_cma_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .free = drm_gem_dma_object_free,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = drm_gem_dma_object_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
- return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
- /* Create a CMA GEM buffer. */
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
+ /* Create a DMA GEM buffer. */
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
return ERR_PTR(-ENOMEM);
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
gem_obj->funcs = &rcar_du_gem_funcs;
drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size);
- cma_obj->map_noncoherent = false;
+ dma_obj->map_noncoherent = false;
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret) {
drm_gem_object_release(gem_obj);
- kfree(cma_obj);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
- cma_obj->paddr = 0;
- cma_obj->sgt = sgt;
+ dma_obj->paddr = 0;
+ dma_obj->sgt = sgt;
return gem_obj;
}
args->pitch = roundup(min_pitch, align);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_group.h"
if (state->source == RCAR_DU_PLANE_MEMORY) {
struct drm_framebuffer *fb = state->state.fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int i;
if (state->format->planes == 2)
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_vblank.h>
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
- struct drm_gem_cma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
if (gem->sgt) {
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
.vmap = rockchip_gem_prime_vmap,
.vunmap = rockchip_gem_prime_vunmap,
.mmap = rockchip_drm_gem_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
static struct rockchip_gem_object *
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
Choose this option if you have an SH Mobile chipset.
If M is selected the module will be called shmob-drm.
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
{
struct drm_crtc *crtc = &scrtc->crtc;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
return IRQ_HANDLED;
}
-DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/types.h>
-struct drm_gem_cma_object;
+struct drm_gem_dma_object;
struct shmob_drm_device;
struct shmob_drm_format_info {
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = splane->format->yuv ? 8 : splane->format->bpp;
tristate "DRM Support for Unisoc SoCs Platform"
depends on ARCH_SPRD || COMPILE_TEST
depends on DRM && OF
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "sprd_drm.h"
static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
{
struct dpu_context *ctx = &dpu->ctx;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb = state->fb;
u32 addr, size, offset, pitch, blend, format, rotation;
u32 src_x = state->src_x >> 16;
size = (src_w & 0xffff) | (src_h << 16);
for (i = 0; i < fb->format->num_planes; i++) {
- cma_obj = drm_fb_dma_get_gem_obj(fb, i);
- addr = cma_obj->paddr + fb->offsets[i];
+ dma_obj = drm_fb_dma_get_gem_obj(fb, i);
+ addr = dma_obj->paddr + fb->offsets[i];
if (i == 0)
layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
drm->mode_config.helper_private = &sprd_drm_mode_config_helper;
}
-DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sprd_drm_fops);
static struct drm_driver sprd_drm_drv = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &sprd_drm_fops,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
select RESET_CONTROLLER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
u32 y, x;
u32 val;
dst_x = newstate->crtc_x;
dst_y = newstate->crtc_y;
- cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Convert ARGB8888 to CLUT8 */
- sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
+ sti_cursor_argb8888_to_clut8(cursor, (u32 *)dma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
dev->mode_config.normalize_zpos = true;
}
-DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sti_driver_fops);
static const struct drm_driver sti_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &sti_driver_fops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.debugfs_init = sti_drm_dbg_init,
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
struct sti_gdp_node *top_field, *btm_field;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
- cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
- (unsigned long)cma_obj->paddr);
+ (unsigned long)dma_obj->paddr);
/* pixel memory location */
bpp = fb->format->cpp[0];
- top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+ top_field->gam_gdp_pml = (u32)dma_obj->paddr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * bpp;
top_field->gam_gdp_pml += src_y * fb->pitches[0];
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
- (unsigned long)cma_obj->paddr,
+ (unsigned long)dma_obj->paddr,
readl(gdp->regs + GAM_GDP_PML_OFFSET));
if (!curr_list) {
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sti_hqvdp_cmd *cmd;
int scale_h, scale_v;
int cmd_offset;
cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
- cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
- (unsigned long)cma_obj->paddr);
+ (unsigned long)dma_obj->paddr);
/* Buffer planes address */
- cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
- cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+ cmd->top.current_luma = (u32)dma_obj->paddr + fb->offsets[0];
+ cmd->top.current_chroma = (u32)dma_obj->paddr + fb->offsets[1];
/* Pitches */
cmd->top.luma_processed_pitch = fb->pitches[0];
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
tristate "DRM Support for STMicroelectronics SoC Series"
depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
.atomic_commit = drm_atomic_helper_commit,
};
-static int stm_gem_cma_dumb_create(struct drm_file *file,
+static int stm_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = roundup(min_pitch, 128);
args->height = roundup(args->height, 4);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drv_driver_fops);
static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create),
};
static int drv_load(struct drm_device *ddev)
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
tristate "DRM Support for Allwinner A10 Display Engine"
depends on DRM && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_backend.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
/* The hardware only allows even pitches for YUV buffers. */
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sun4i_drv_fops);
static const struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.minor = 0,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
};
static int sun4i_drv_bind(struct device *dev)
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
#include "sun4i_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_drv.h"
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_mixer.h"
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
dma_addr_t paddr;
u32 ch_base;
int bpp;
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_csc.h"
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 dx, dy, src_x, src_y;
dma_addr_t paddr;
u32 ch_base;
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
Display SubSystem. There is currently three Keystone family
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "tidss_crtc.h"
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include "tidss_crtc.h"
dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 x = state->src_x >> 16;
u32 y = state->src_y >> 16;
dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 x = state->src_x >> 16;
u32 y = state->src_y >> 16;
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
drm_kms_helper_poll_fini(ddev);
}
-DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+DEFINE_DRM_GEM_DMA_FOPS(tidss_fops);
static const struct drm_driver tidss_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &tidss_fops,
.release = tidss_release,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "tidss",
.desc = "TI Keystone DSS",
.date = "20180215",
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mm.h>
#include <drm/drm_probe_helper.h>
}
#endif
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver tilcdc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
#endif
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
tristate "DRM support for ILI9163 display panels"
depends on DRM && SPI
select BACKLIGHT_CLASS_DEVICE
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DBI
help
tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
tristate "DRM support for MI0283QT"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
struct drm_plane_state *state)
{
struct arcpgu_drm_private *arcpgu;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
if (!pipe->plane.state->fb)
return;
.atomic_commit = drm_atomic_helper_commit,
};
-DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
+DEFINE_DRM_GEM_DMA_FOPS(arcpgu_drm_ops);
static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
{
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
DRM_SIMPLE_MODE(320, 480, 60, 75),
};
-DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
+DEFINE_DRM_GEM_DMA_FOPS(hx8357d_fops);
static const struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
DRM_SIMPLE_MODE(128, 160, 28, 35),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9163_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9163_fops);
static struct drm_driver ili9163_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9163_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
if (ret)
goto err_msg;
} else {
- tr = cma_obj->vaddr;
+ tr = dma_obj->vaddr;
}
switch (dbidev->rotation) {
DRM_SIMPLE_MODE(176, 220, 35, 44),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9225_fops);
static const struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9341_fops);
static const struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
DRM_SIMPLE_MODE(480, 320, 73, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9486_fops);
static const struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
DRM_SIMPLE_MODE(320, 240, 58, 43),
};
-DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
+DEFINE_DRM_GEM_DMA_FOPS(mi0283qt_fops);
static const struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
.update = mipi_dbi_pipe_update,
};
-DEFINE_DRM_GEM_CMA_FOPS(panel_mipi_dbi_fops);
+DEFINE_DRM_GEM_DMA_FOPS(panel_mipi_dbi_fops);
static const struct drm_driver panel_mipi_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &panel_mipi_dbi_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
struct drm_rect clip;
int idx, ret = 0;
if (ret)
goto out_free;
- drm_fb_xrgb8888_to_mono(buf, 0, cma_obj->vaddr, fb, &clip);
+ drm_fb_xrgb8888_to_mono(buf, 0, dma_obj->vaddr, fb, &clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
0xff, 0xfe, 0x00, 0x00 };
-DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
+DEFINE_DRM_GEM_DMA_FOPS(repaper_fops);
static const struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_dma_get_gem_obj(fb, 0);
- void *src = cma_obj->vaddr;
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ void *src = dma_obj->vaddr;
int ret = 0;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
DRM_SIMPLE_MODE(178, 128, 37, 27),
};
-DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
+DEFINE_DRM_GEM_DMA_FOPS(st7586_fops);
static const struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
.rgb = true,
};
-DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
+DEFINE_DRM_GEM_DMA_FOPS(st7735r_fops);
static const struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Faraday TV Encoder
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_vblank.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
return ret;
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver tve200_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.major = 1,
.minor = 0,
.patchlevel = 0,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static int tve200_probe(struct platform_device *pdev)
}
job->bo = kvmalloc_array(job->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!job->bo) {
DRM_DEBUG("Failed to allocate validated BO pointers\n");
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
- "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
+ "Failed to allocate page tables. Please ensure you have DMA enabled.\n");
return -ENOMEM;
}
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
select SND_PCM_ELD
*
* The VC4 GPU architecture (both scanout and rendering) has direct
* access to system memory with no MMU in between. To support it, we
- * use the GEM CMA helper functions to allocate contiguous ranges of
+ * use the GEM DMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
*
- * Since the CMA allocator is very slow, we keep a cache of recently
+ * Since the DMA allocator is very slow, we keep a cache of recently
* freed BOs around so that the kernel's allocation of objects for 3D
* rendering can return quickly.
*/
bo->validated_shader = NULL;
}
- drm_gem_cma_free(&bo->base);
+ drm_gem_dma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
- * This lets the CMA helpers allocate object structs for us, and keep
+ * This lets the DMA helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return bo;
}
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
+ dma_obj = drm_gem_dma_create(dev, size);
+ if (IS_ERR(dma_obj)) {
/*
- * If we've run out of CMA memory, kill the cache of
- * CMA allocations we've got laying around and try again.
+ * If we've run out of DMA memory, kill the cache of
+ * DMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
/*
- * Still not enough CMA memory, purge the userspace BO
+ * Still not enough DMA memory, purge the userspace BO
* cache and retry.
* This is sub-optimal since we purge the whole userspace
* BO cache which forces user that want to re-use the BO to
* restore its initial content.
* Ideally, we should purge entries one by one and retry
- * after each to see if CMA allocation succeeds. Or even
+ * after each to see if DMA allocation succeeds. Or even
* better, try to find an entry with at least the same
* size.
*/
vc4_bo_userspace_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from CMA:\n");
+ DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
- bo = to_vc4_bo(&cma_obj->base);
+ bo = to_vc4_bo(&dma_obj->base);
/* By default, BOs do not support the MADV ioctl. This will be enabled
* only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
bo->madv = __VC4_MADV_NOTSUPP;
mutex_lock(&vc4->bo_lock);
- vc4_bo_set_label(&cma_obj->base, type);
+ vc4_bo_set_label(&dma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
return bo;
goto out;
}
- /* If this object was partially constructed but CMA allocation
+ /* If this object was partially constructed but DMA allocation
* had failed, just free it. Can also happen when the BO has been
* purged.
*/
return -EINVAL;
}
- return drm_gem_cma_mmap(&bo->base, vma);
+ return drm_gem_dma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo =
+ struct drm_gem_dma_object *dma_bo =
drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
- bo = to_vc4_bo(&cma_bo->base);
+ bo = to_vc4_bo(&dma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
- struct drm_gem_cma_object *cma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
- ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
+ ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_bo = drm_fb_dma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
if (ret)
return ret;
- return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, dev, args);
}
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
.debugfs_init = vc4_debugfs_init,
#endif
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
.fops = &vc4_drm_fops,
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
}
struct vc4_bo {
- struct drm_gem_cma_object base;
+ struct drm_gem_dma_object base;
/* seqno of the last job to render using this BO. */
uint64_t seqno;
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
+ return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_cma_object **bo;
+ struct drm_gem_dma_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
* the binner temporary storage, this is all the BOs written
* by the job.
*/
- struct drm_gem_cma_object *rcl_write_bo[4];
+ struct drm_gem_dma_object *rcl_write_bo[4];
uint32_t rcl_write_bo_count;
/* Pointers for our position in vc4->job_list */
/* This is the BO where we store the validated command lists, shader
* records, and uniforms.
*/
- struct drm_gem_cma_object *exec_bo;
+ struct drm_gem_dma_object *exec_bo;
/**
* This tracks the per-shader-record state (packet 64) that
int
vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
-struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
+struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
uint32_t hindex);
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
bool vc4_check_tex_size(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *fbo,
+ struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp);
/* vc4_validate_shader.c */
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
/* vc4_perfmon.c */
void vc4_perfmon_get(struct vc4_perfmon *perfmon);
}
exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
}
drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_cma_object *)bo;
+ exec->bo[i] = (struct drm_gem_dma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
- struct drm_gem_cma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
uint32_t addr;
/* We're skipping the address adjustment for negative origin,
#include "vc4_packet.h"
struct vc4_rcl_setup {
- struct drm_gem_cma_object *color_read;
- struct drm_gem_cma_object *color_write;
- struct drm_gem_cma_object *zs_read;
- struct drm_gem_cma_object *zs_write;
- struct drm_gem_cma_object *msaa_color_write;
- struct drm_gem_cma_object *msaa_zs_write;
-
- struct drm_gem_cma_object *rcl;
+ struct drm_gem_dma_object *color_read;
+ struct drm_gem_dma_object *color_write;
+ struct drm_gem_dma_object *zs_read;
+ struct drm_gem_dma_object *zs_write;
+ struct drm_gem_dma_object *msaa_color_write;
+ struct drm_gem_dma_object *msaa_zs_write;
+
+ struct drm_gem_dma_object *rcl;
u32 next_offset;
u32 next_write_bo_index;
* coordinates packet, and instead just store to the address given.
*/
static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *bo,
+ struct drm_gem_dma_object *bo,
struct drm_vc4_submit_rcl_surface *surf,
uint8_t x, uint8_t y)
{
}
static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *obj,
+ struct drm_gem_dma_object *obj,
struct drm_vc4_submit_rcl_surface *surf)
{
struct drm_vc4_submit_cl *args = exec->args;
}
static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
}
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf,
bool is_write)
{
static int
vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
struct vc4_rcl_setup *setup,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
u32 ctrl;
* if it doesn't fit within the buffer that we allocated up front.
* However, it turns out that 16MB is "enough for anybody", and
* real-world applications run into allocation failures from the
- * overall CMA pool before they make scenes complicated enough to run
+ * overall DMA pool before they make scenes complicated enough to run
* out of bin space.
*/
static int bin_bo_alloc(struct vc4_dev *vc4)
dev_err(&v3d->pdev->dev,
"Failed to allocate memory for tile binning: "
- "%d. You may need to enable CMA or give it "
+ "%d. You may need to enable DMA or give it "
"more memory.",
ret);
break;
height <= 4 * utile_height(cpp));
}
-struct drm_gem_cma_object *
+struct drm_gem_dma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
struct vc4_dev *vc4 = exec->dev;
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
return obj;
}
-static struct drm_gem_cma_object *
+static struct drm_gem_dma_object *
vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
{
return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
}
bool
-vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
static int
validate_indexed_prim_list(VALIDATE_ARGS)
{
- struct drm_gem_cma_object *ib;
+ struct drm_gem_dma_object *ib;
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t offset = *(uint32_t *)(untrusted + 5);
uint32_t max_index = *(uint32_t *)(untrusted + 9);
struct vc4_texture_sample_info *sample,
uint32_t texture_handle_index, bool is_cs)
{
- struct drm_gem_cma_object *tex;
+ struct drm_gem_dma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
uint32_t p2 = (sample->p_offset[2] != ~0 ?
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
+ struct drm_gem_dma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
}
for (i = 0; i < nr_attributes; i++) {
- struct drm_gem_cma_object *vbo =
+ struct drm_gem_dma_object *vbo =
bo[ARRAY_SIZE(shader_reloc_offsets) + i];
uint32_t o = 36 + i * 8;
uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
}
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
{
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
select DMA_ENGINE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
help
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
/* Enforce the alignment constraints of the DMA engine. */
args->pitch = ALIGN(pitch, dpsub->dma_align);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
static struct drm_framebuffer *
* DRM/KMS Driver
*/
-DEFINE_DRM_GEM_CMA_FOPS(zynqmp_dpsub_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(zynqmp_dpsub_drm_fops);
static const struct drm_driver zynqmp_dpsub_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
.fops = &zynqmp_dpsub_drm_fops,
struct drm_framebuffer;
struct drm_plane_state;
-struct drm_gem_cma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
*
* SHMEM file node used as backing storage for swappable buffer objects.
* GEM also supports driver private objects with driver-specific backing
- * storage (contiguous CMA memory, special reserved blocks). In this
+ * storage (contiguous DMA memory, special reserved blocks). In this
* case @filp is NULL.
*/
struct file *filp;
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_GEM_CMA_HELPER_H__
-#define __DRM_GEM_CMA_HELPER_H__
-
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_gem.h>
-
-struct drm_mode_create_dumb;
-
-/**
- * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
- * @base: base GEM object
- * @paddr: physical address of the backing memory
- * @sgt: scatter/gather table for imported PRIME buffers. The table can have
- * more than one entry but they are guaranteed to have contiguous
- * DMA addresses.
- * @vaddr: kernel virtual address of the backing memory
- * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
- */
-struct drm_gem_cma_object {
- struct drm_gem_object base;
- dma_addr_t paddr;
- struct sg_table *sgt;
-
- /* For objects with DMA memory allocated by GEM CMA */
- void *vaddr;
-
- bool map_noncoherent;
-};
-
-#define to_drm_gem_cma_obj(gem_obj) \
- container_of(gem_obj, struct drm_gem_cma_object, base)
-
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- size_t size);
-void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj);
-void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
- struct drm_printer *p, unsigned int indent);
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj);
-int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
- struct iosys_map *map);
-int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma);
-
-extern const struct vm_operations_struct drm_gem_cma_vm_ops;
-
-/*
- * GEM object functions
- */
-
-/**
- * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free()
- * @obj: GEM object to free
- *
- * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers
- * should use it as their &drm_gem_object_funcs.free handler.
- */
-static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
-{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
- drm_gem_cma_free(cma_obj);
-}
-
-/**
- * drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs
- * @p: DRM printer
- * @indent: Tab indentation level
- * @obj: GEM object
- *
- * This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers
- * should use this function as their &drm_gem_object_funcs.print_info handler.
- */
-static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj)
-{
- const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
- drm_gem_cma_print_info(cma_obj, p, indent);
-}
-
-/**
- * drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table()
- * @obj: GEM object
- *
- * This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should
- * use it as their &drm_gem_object_funcs.get_sg_table handler.
- *
- * Returns:
- * A pointer to the scatter/gather table of pinned pages or NULL on failure.
- */
-static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj)
-{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
- return drm_gem_cma_get_sg_table(cma_obj);
-}
-
-/*
- * drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap()
- * @obj: GEM object
- * @map: Returns the kernel virtual address of the CMA GEM object's backing store.
- *
- * This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should
- * use it as their &drm_gem_object_funcs.vmap handler.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj,
- struct iosys_map *map)
-{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
- return drm_gem_cma_vmap(cma_obj, map);
-}
-
-/**
- * drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap()
- * @obj: GEM object
- * @vma: VMA for the area to be mapped
- *
- * This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should
- * use it as their &drm_gem_object_funcs.mmap handler.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
- return drm_gem_cma_mmap(cma_obj, vma);
-}
-
-/*
- * Driver ops
- */
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
- * @dumb_create_func: callback function for .dumb_create
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure.
- *
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that
- * override the default implementation of &struct rm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = (dumb_create_func), \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure.
- *
- * Drivers that come with their own implementation of
- * &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS \
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations
- * ensuring a virtual address
- * on the buffer
- * @dumb_create_func: callback function for .dumb_create
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- *
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that
- * override the default implementation of &struct drm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = dumb_create_func, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual
- * address on the buffer
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- *
- * Drivers that come with their own implementation of
- * &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS
- * instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP \
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
-
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
-/*
- * File ops
- */
-
-#ifndef CONFIG_MMU
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags);
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- .get_unmapped_area = drm_gem_cma_get_unmapped_area,
-#else
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
-#endif
-
-/**
- * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for CMA based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_CMA_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_mmap,\
- DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- }
-
-#endif /* __DRM_GEM_CMA_HELPER_H__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_GEM_DMA_HELPER_H__
+#define __DRM_GEM_DMA_HELPER_H__
+
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_gem.h>
+
+struct drm_mode_create_dumb;
+
+/**
+ * struct drm_gem_dma_object - GEM object backed by DMA memory allocations
+ * @base: base GEM object
+ * @paddr: DMA address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers. The table can have
+ * more than one entry but they are guaranteed to have contiguous
+ * DMA addresses.
+ * @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
+ */
+struct drm_gem_dma_object {
+ struct drm_gem_object base;
+ dma_addr_t paddr;
+ struct sg_table *sgt;
+
+ /* For objects with DMA memory allocated by GEM DMA */
+ void *vaddr;
+
+ bool map_noncoherent;
+};
+
+#define to_drm_gem_dma_obj(gem_obj) \
+ container_of(gem_obj, struct drm_gem_dma_object, base)
+
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
+ size_t size);
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj);
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
+ struct drm_printer *p, unsigned int indent);
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj);
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
+ struct iosys_map *map);
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_dma_vm_ops;
+
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_dma_object_free - GEM object function for drm_gem_dma_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_dma_free_object(). Drivers that employ the DMA helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_dma_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_free(dma_obj);
+}
+
+/**
+ * drm_gem_dma_object_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_print_info(). Drivers that employ the DMA helpers
+ * should use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_dma_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_print_info(dma_obj, p, indent);
+}
+
+/**
+ * drm_gem_dma_object_get_sg_table - GEM object function for drm_gem_dma_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_get_sg_table(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+static inline struct sg_table *drm_gem_dma_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_get_sg_table(dma_obj);
+}
+
+/*
+ * drm_gem_dma_object_vmap - GEM object function for drm_gem_dma_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing store.
+ *
+ * This function wraps drm_gem_dma_vmap(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_vmap(dma_obj, map);
+}
+
+/**
+ * drm_gem_dma_object_mmap - GEM object function for drm_gem_dma_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_dma_mmap(). Drivers that employ the dma helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_mmap(dma_obj, vma);
+}
+
+/*
+ * Driver ops
+ */
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE - DMA GEM driver operations
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS for drivers that
+ * override the default implementation of &struct rm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = (dumb_create_func), \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use DRM_GEM_DMA_DRIVER_OPS_VMAP instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS \
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - DMA GEM driver operations
+ * ensuring a virtual address
+ * on the buffer
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS_VMAP for drivers that
+ * override the default implementation of &struct drm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = dumb_create_func, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use DRM_GEM_DMA_DRIVER_OPS
+ * instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP \
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/*
+ * File ops
+ */
+
+#ifndef CONFIG_MMU
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ .get_unmapped_area = drm_gem_dma_get_unmapped_area,
+#else
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+#endif
+
+/**
+ * DEFINE_DRM_GEM_DMA_FOPS() - macro to generate file operations for DMA drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for DMA based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_DMA_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap,\
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ }
+
+#endif /* __DRM_GEM_DMA_HELPER_H__ */