*/
flags = I915_BO_ALLOC_USER;
- ret = mr->ops->init_object(mr, obj, size, flags);
+ ret = mr->ops->init_object(mr, obj, size, 0, flags);
if (ret)
return ret;
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
+/**
+ * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
+ * minimum page size for the backing pages.
+ * @i915: The i915 instance.
+ * @size: The size in bytes for the object. Note that we need to round the size
+ * up depending on the @page_size. The final object size can be fished out from
+ * the drm GEM object.
+ * @page_size: The requested minimum page size in bytes for this object. This is
+ * useful if we need something bigger than the regions min_page_size due to some
+ * hw restriction, or in some very specialised cases where it needs to be
+ * smaller, where the internal fragmentation cost is too great when rounding up
+ * the object size.
+ * @flags: The optional BO allocation flags.
+ *
+ * Note that this interface assumes you know what you are doing when forcing the
+ * @page_size. If this is smaller than the regions min_page_size then it can
+ * never be inserted into any GTT, otherwise it might lead to undefined
+ * behaviour.
+ *
+ * Return: The object pointer, which might be an ERR_PTR in the case of failure.
+ */
+struct drm_i915_gem_object *
+__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ size, page_size, flags);
+}
+
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags)
{
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
- size, flags);
+ size, 0, flags);
}
bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+struct drm_i915_gem_object *
+__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags);
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
+ resource_size_t default_page_size;
int err;
/*
if (!mem)
return ERR_PTR(-ENODEV);
- size = round_up(size, mem->min_page_size);
+ default_page_size = mem->min_page_size;
+ if (page_size)
+ default_page_size = page_size;
+
+ GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
+ GEM_BUG_ON(default_page_size < PAGE_SIZE);
+
+ size = round_up(size, default_page_size);
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
if (!obj)
return ERR_PTR(-ENOMEM);
- err = mem->ops->init_object(mem, obj, size, flags);
+ err = mem->ops->init_object(mem, obj, size, page_size, flags);
if (err)
goto err_object_free;
struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags);
#endif
static int shmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags)
{
static struct lock_class_key lock_class;
resource_size_t size)
{
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
- size, 0);
+ size, 0, 0);
}
/* Allocate a new GEM object and fill it with the supplied data */
static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags)
{
struct drm_i915_private *i915 = mem->i915;
i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
- return i915_gem_object_create_region(i915->mm.stolen_region, size, 0);
+ return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
}
static int init_stolen_smem(struct intel_memory_region *mem)
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags)
{
static struct lock_class_key lock_class;
obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
+ /* Forcing the page size is kernel internal only */
+ GEM_BUG_ON(page_size && obj->mm.n_placements);
+
/*
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
bo_type, &i915_sys_placement,
- mem->min_page_size >> PAGE_SHIFT,
+ page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
if (ret)
return i915_ttm_err_to_gem(ret);
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags);
#endif
int i;
for (i = 0; i < ARRAY_SIZE(flags); ++i) {
- obj = i915_gem_object_create_region(mem, page_size,
+ obj = i915_gem_object_create_region(mem,
+ page_size, page_size,
flags[i]);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
GEM_BUG_ON(!src_mr);
/* Switch object backing-store on create */
- obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0);
+ obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
struct drm_i915_gem_object *obj;
int err;
- obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER);
+ obj = i915_gem_object_create_region(mr, sizes[i], 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
struct drm_i915_gem_object *obj;
int err;
- obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
struct drm_i915_gem_object *obj;
int err;
- obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
struct drm_i915_gem_object *obj;
int err;
- obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
struct i915_buddy_mm mm;
struct list_head reserved;
struct mutex lock;
+ u64 default_page_size;
};
static struct i915_ttm_buddy_manager *
GEM_BUG_ON(!bman_res->base.num_pages);
size = bman_res->base.num_pages << PAGE_SHIFT;
- min_page_size = bo->page_alignment << PAGE_SHIFT;
+ min_page_size = bman->default_page_size;
+ if (bo->page_alignment)
+ min_page_size = bo->page_alignment << PAGE_SHIFT;
+
GEM_BUG_ON(min_page_size < mm->chunk_size);
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
* @type: Memory type we want to manage
* @use_tt: Set use_tt for the manager
* @size: The size in bytes to manage
+ * @default_page_size: The default minimum page size in bytes for allocations,
+ * this must be at least as large as @chunk_size, and can be overridden by
+ * setting the BO page_alignment, to be larger or smaller as needed.
* @chunk_size: The minimum page size in bytes for our allocations i.e
* order-zero
*
*/
int i915_ttm_buddy_man_init(struct ttm_device *bdev,
unsigned int type, bool use_tt,
- u64 size, u64 chunk_size)
+ u64 size, u64 default_page_size,
+ u64 chunk_size)
{
struct ttm_resource_manager *man;
struct i915_ttm_buddy_manager *bman;
mutex_init(&bman->lock);
INIT_LIST_HEAD(&bman->reserved);
+ GEM_BUG_ON(default_page_size < chunk_size);
+ bman->default_page_size = default_page_size;
man = &bman->manager;
man->use_tt = use_tt;
int i915_ttm_buddy_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
- u64 size, u64 chunk_size);
+ u64 size, u64 default_page_size, u64 chunk_size);
int i915_ttm_buddy_man_fini(struct ttm_device *bdev,
unsigned int type);
int (*init_object)(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags);
};
int ret;
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
- resource_size(&mem->region), PAGE_SIZE);
+ resource_size(&mem->region),
+ mem->min_page_size, PAGE_SIZE);
if (ret)
return ret;
int ret;
mock_bo.base.size = size;
- mock_bo.page_alignment = mem->min_page_size >> PAGE_SHIFT;
place.flags = flags;
ret = man->func->alloc(man, &mock_bo, &place, &res);
resource_size_t size = page_num * page_size;
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_create_region(mem, size, 0);
+ obj = i915_gem_object_create_region(mem, size, 0, 0);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
struct drm_i915_gem_object *obj;
int err;
- obj = i915_gem_object_create_region(mem, size, flags);
+ obj = i915_gem_object_create_region(mem, size, 0, flags);
if (IS_ERR(obj))
return obj;
return err;
}
+static int igt_lmem_create_with_ps(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ int err = 0;
+ u32 ps;
+
+ for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
+ struct drm_i915_gem_object *obj;
+ dma_addr_t daddr;
+
+ obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -ENXIO || err == -E2BIG) {
+ pr_info("%s not enough lmem for ps(%u) err=%d\n",
+ __func__, ps, err);
+ err = 0;
+ }
+
+ break;
+ }
+
+ if (obj->base.size != ps) {
+ pr_err("%s size(%zu) != ps(%u)\n",
+ __func__, obj->base.size, ps);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ daddr = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(daddr, ps)) {
+ pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
+ __func__, &daddr, ps);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+out_put:
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
static int igt_lmem_create_cleared_cpu(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
void *addr;
- obj = i915_gem_object_create_region(mr, size, 0);
+ obj = i915_gem_object_create_region(mr, size, 0, 0);
if (IS_ERR(obj)) {
if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
return ERR_PTR(-ENODEV);
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_create_with_ps),
SUBTEST(igt_lmem_create_cleared_cpu),
SUBTEST(igt_lmem_write_cpu),
SUBTEST(igt_lmem_write_gpu),
static int mock_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
+ resource_size_t page_size,
unsigned int flags)
{
static struct lock_class_key lock_class;