}
return domain;
}
- pin_count = READ_ONCE(bo->pin_count);
+
+ #if defined(CONFIG_DEBUG_FS)
+ #define amdgpu_bo_print_flag(m, bo, flag) \
+ do { \
+ if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
+ seq_printf((m), " " #flag); \
+ } \
+ } while (0)
+
+ /**
+ * amdgpu_debugfs_print_bo_info - print BO info in debugfs file
+ *
+ * @id: Index or Id of the BO
+ * @bo: Requested BO for printing info
+ * @m: debugfs file
+ *
+ * Print BO information in debugfs file
+ *
+ * Returns:
+ * Size of the BO in bytes.
+ */
+ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
+ {
+ struct dma_buf_attachment *attachment;
+ struct dma_buf *dma_buf;
+ unsigned int domain;
+ const char *placement;
+ unsigned int pin_count;
+ u64 size;
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ placement = "VRAM";
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ placement = " GTT";
+ break;
+ case AMDGPU_GEM_DOMAIN_CPU:
+ default:
+ placement = " CPU";
+ break;
+ }
+
+ size = amdgpu_bo_size(bo);
+ seq_printf(m, "\t\t0x%08x: %12lld byte %s",
+ id, size, placement);
+
++ pin_count = READ_ONCE(bo->tbo.pin_count);
+ if (pin_count)
+ seq_printf(m, " pin count %d", pin_count);
+
+ dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
+ attachment = READ_ONCE(bo->tbo.base.import_attach);
+
+ if (attachment)
+ seq_printf(m, " imported from %p", dma_buf);
+ else if (dma_buf)
+ seq_printf(m, " exported as %p", dma_buf);
+
+ amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
+ amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
+ amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
+ amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
+ amdgpu_bo_print_flag(m, bo, SHADOW);
+ amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
+ amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
+ amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
+
+ seq_puts(m, "\n");
+
+ return size;
+ }
+ #endif
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem);
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm);
+
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
- uint64_t size)
+ uint64_t size_in_page)
{
return ttm_range_man_init(&adev->mman.bdev, type,
- false, size >> PAGE_SHIFT);
- TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
+ false, size_in_page);
}
/**
}
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc *dc = adev->dm.dc;
- struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
int ret = -EINVAL;
- trace_amdgpu_dm_crtc_atomic_check(state);
++ trace_amdgpu_dm_crtc_atomic_check(crtc_state);
+
- dm_update_crtc_active_planes(crtc, state);
+ dm_update_crtc_active_planes(crtc, crtc_state);
if (unlikely(!dm_crtc_state->stream &&
- modeset_required(state, NULL, dm_crtc_state->stream))) {
+ modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
WARN_ON(1);
return ret;
}
int crtc_disable_count = 0;
bool mode_set_reset_required = false;
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) {