struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
- if (!i915_gem_object_evictable(obj))
- return false;
-
- /* This isn't valid with a buddy allocator */
- return ttm_bo_eviction_valuable(bo, place);
+ return i915_gem_object_evictable(obj);
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
enum ttm_bo_type bo_type;
- size_t alignment = 0;
int ret;
- /* Adjust alignment to GPU- and CPU huge page sizes. */
-
- if (mem->is_range_manager) {
- if (size >= SZ_1G)
- alignment = SZ_1G >> PAGE_SHIFT;
- else if (size >= SZ_2M)
- alignment = SZ_2M >> PAGE_SHIFT;
- else if (size >= SZ_64K)
- alignment = SZ_64K >> PAGE_SHIFT;
- }
-
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
i915_gem_object_init_memory_region(obj, mem);
*/
obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
- bo_type, &i915_sys_placement, alignment,
+ bo_type, &i915_sys_placement, 1,
true, NULL, NULL, i915_ttm_bo_destroy);
if (!ret)
#include "intel_memory_region.h"
#include "i915_drv.h"
+#include "i915_ttm_buddy_manager.h"
static const struct {
u16 class;
},
};
-struct intel_region_reserve {
- struct list_head link;
- struct ttm_resource *res;
-};
-
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance)
return NULL;
}
-/**
- * intel_memory_region_unreserve - Unreserve all previously reserved
- * ranges
- * @mem: The region containing the reserved ranges.
- */
-void intel_memory_region_unreserve(struct intel_memory_region *mem)
-{
- struct intel_region_reserve *reserve, *next;
-
- if (!mem->priv_ops || !mem->priv_ops->free)
- return;
-
- mutex_lock(&mem->mm_lock);
- list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
- list_del(&reserve->link);
- mem->priv_ops->free(mem, reserve->res);
- kfree(reserve);
- }
- mutex_unlock(&mem->mm_lock);
-}
-
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
resource_size_t offset,
resource_size_t size)
{
- int ret;
- struct intel_region_reserve *reserve;
-
- if (!mem->priv_ops || !mem->priv_ops->reserve)
- return -EINVAL;
-
- reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
- if (!reserve)
- return -ENOMEM;
+ struct ttm_resource_manager *man = mem->region_private;
- reserve->res = mem->priv_ops->reserve(mem, offset, size);
- if (IS_ERR(reserve->res)) {
- ret = PTR_ERR(reserve->res);
- kfree(reserve);
- return ret;
- }
-
- mutex_lock(&mem->mm_lock);
- list_add_tail(&reserve->link, &mem->reserved);
- mutex_unlock(&mem->mm_lock);
+ GEM_BUG_ON(mem->is_range_manager);
- return 0;
+ return i915_ttm_buddy_man_reserve(man, offset, size);
}
struct intel_memory_region *
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
- INIT_LIST_HEAD(&mem->reserved);
-
- mutex_init(&mem->mm_lock);
if (ops->init) {
err = ops->init(mem);
struct intel_memory_region *mem =
container_of(kref, typeof(*mem), kref);
- intel_memory_region_unreserve(mem);
if (mem->ops->release)
mem->ops->release(mem);
- mutex_destroy(&mem->mm_lock);
mutex_destroy(&mem->objects.lock);
kfree(mem);
}
unsigned int flags);
};
-struct intel_memory_region_private_ops {
- struct ttm_resource *(*reserve)(struct intel_memory_region *mem,
- resource_size_t offset,
- resource_size_t size);
- void (*free)(struct intel_memory_region *mem,
- struct ttm_resource *res);
-};
-
struct intel_memory_region {
struct drm_i915_private *i915;
const struct intel_memory_region_ops *ops;
- const struct intel_memory_region_private_ops *priv_ops;
struct io_mapping iomap;
struct resource region;
/* For fake LMEM */
struct drm_mm_node fake_mappable;
- struct mutex mm_lock;
-
struct kref kref;
resource_size_t io_start;
char name[16];
bool private; /* not for userspace */
- struct list_head reserved;
-
dma_addr_t remap_addr;
struct {
struct list_head list;
} objects;
- size_t chunk_size;
- unsigned int max_order;
bool is_range_manager;
void *region_private;
intel_memory_region_set_name(struct intel_memory_region *mem,
const char *fmt, ...);
-void intel_memory_region_unreserve(struct intel_memory_region *mem);
-
int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_ttm_buddy_manager.h"
#include "intel_region_ttm.h"
return type;
}
-static struct ttm_resource *
-intel_region_ttm_resource_reserve(struct intel_memory_region *mem,
- resource_size_t offset,
- resource_size_t size)
-{
- struct ttm_resource_manager *man = mem->region_private;
- struct ttm_place place = {};
- struct ttm_buffer_object mock_bo = {};
- struct ttm_resource *res;
- int ret;
-
- /*
- * Having to use a mock_bo is unfortunate but stems from some
- * drivers having private managers that insist to know what the
- * allocate memory is intended for, using it to send private
- * data to the manager. Also recently the bo has been used to send
- * alignment info to the manager. Assume that apart from the latter,
- * none of the managers we use will ever access the buffer object
- * members, hoping we can pass the alignment info in the
- * struct ttm_place in the future.
- */
-
- place.fpfn = offset >> PAGE_SHIFT;
- place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
- mock_bo.base.size = size;
- ret = man->func->alloc(man, &mock_bo, &place, &res);
- if (ret == -ENOSPC)
- ret = -ENXIO;
-
- return ret ? ERR_PTR(ret) : res;
-}
-
/**
- * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
- * @mem: The region the resource was allocated from.
- * @res: The opaque resource representing an allocation.
+ * intel_region_ttm_init - Initialize a memory region for TTM.
+ * @mem: The region to initialize.
+ *
+ * This function initializes a suitable TTM resource manager for the
+ * region, and if it's a LMEM region type, attaches it to the TTM
+ * device. MOCK regions are NOT attached to the TTM device, since we don't
+ * have one for the mock selftests.
+ *
+ * Return: 0 on success, negative error code on failure.
*/
-void intel_region_ttm_resource_free(struct intel_memory_region *mem,
- struct ttm_resource *res)
-{
- struct ttm_resource_manager *man = mem->region_private;
-
- man->func->free(man, res);
-}
-
-static const struct intel_memory_region_private_ops priv_ops = {
- .reserve = intel_region_ttm_resource_reserve,
- .free = intel_region_ttm_resource_free,
-};
-
int intel_region_ttm_init(struct intel_memory_region *mem)
{
struct ttm_device *bdev = &mem->i915->bdev;
int mem_type = intel_region_to_ttm_type(mem);
int ret;
- ret = ttm_range_man_init(bdev, mem_type, false,
- resource_size(&mem->region) >> PAGE_SHIFT);
+ ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
+ resource_size(&mem->region), PAGE_SIZE);
if (ret)
return ret;
- mem->chunk_size = PAGE_SIZE;
- mem->max_order =
- get_order(rounddown_pow_of_two(resource_size(&mem->region)));
- mem->is_range_manager = true;
- mem->priv_ops = &priv_ops;
mem->region_private = ttm_manager_type(bdev, mem_type);
return 0;
{
int ret;
- ret = ttm_range_man_fini(&mem->i915->bdev,
- intel_region_to_ttm_type(mem));
+ ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
+ intel_region_to_ttm_type(mem));
GEM_WARN_ON(ret);
mem->region_private = NULL;
}
struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
struct ttm_resource *res)
{
- struct ttm_range_mgr_node *range_node =
- container_of(res, typeof(*range_node), base);
+ if (mem->is_range_manager) {
+ struct ttm_range_mgr_node *range_node =
+ to_ttm_range_mgr_node(res);
- GEM_WARN_ON(!mem->is_range_manager);
- return i915_sg_from_mm_node(&range_node->mm_nodes[0],
- mem->region.start);
+ return i915_sg_from_mm_node(&range_node->mm_nodes[0],
+ mem->region.start);
+ } else {
+ return i915_sg_from_buddy_resource(res, mem->region.start);
+ }
}
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *res;
int ret;
- /*
- * We ignore the flags for now since we're using the range
- * manager and contigous and min page size would be fulfilled
- * by default if size is min page size aligned.
- */
mock_bo.base.size = size;
-
- if (mem->is_range_manager) {
- if (size >= SZ_1G)
- mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
- else if (size >= SZ_2M)
- mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
- else if (size >= SZ_64K)
- mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
- }
+ mock_bo.page_alignment = 1;
+ place.flags = flags;
ret = man->func->alloc(man, &mock_bo, &place, &res);
if (ret == -ENOSPC)
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res;
}
+
#endif
+
+void intel_region_ttm_node_free(struct intel_memory_region *mem,
+ struct ttm_resource *res)
+{
+ struct ttm_resource_manager *man = mem->region_private;
+
+ man->func->free(man, res);
+}
+
+/**
+ * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
+ * @mem: The region the resource was allocated from.
+ * @res: The opaque resource representing an allocation.
+ */
+void intel_region_ttm_resource_free(struct intel_memory_region *mem,
+ struct ttm_resource *res)
+{
+ struct ttm_resource_manager *man = mem->region_private;
+
+ man->func->free(man, res);
+}
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "i915_buddy.h"
#include "i915_memcpy.h"
+#include "i915_ttm_buddy_manager.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
LIST_HEAD(objects);
int err = 0;
- page_size = mem->chunk_size;
+ page_size = PAGE_SIZE;
+ max_pages = div64_u64(total, page_size);
rem = total;
-retry:
- max_pages = div64_u64(rem, page_size);
for_each_prime_number_from(page_num, 1, max_pages) {
resource_size_t size = page_num * page_size;
err = 0;
if (err == -ENXIO) {
if (page_num * page_size <= rem) {
- if (mem->is_range_manager && max_pages > 1) {
- max_pages >>= 1;
- goto retry;
- }
-
pr_err("%s failed, space still left in region\n",
__func__);
err = -EINVAL;
static int igt_mock_reserve(void *arg)
{
struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
resource_size_t avail = resource_size(&mem->region);
struct drm_i915_gem_object *obj;
const u32 chunk_size = SZ_32M;
LIST_HEAD(objects);
int err = 0;
- if (!list_empty(&mem->reserved)) {
- pr_err("%s region reserved list is not empty\n", __func__);
- return -EINVAL;
- }
-
count = avail / chunk_size;
order = i915_random_order(count, &prng);
if (!order)
return 0;
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_close;
+ }
+
/* Reserve a bunch of ranges within the region */
for (i = 0; i < count; ++i) {
u64 start = order[i] * chunk_size;
do {
u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
-retry:
size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
obj = igt_object_create(mem, &objects, size, 0);
if (IS_ERR(obj)) {
- if (PTR_ERR(obj) == -ENXIO) {
- if (mem->is_range_manager &&
- size > mem->chunk_size) {
- size >>= 1;
- goto retry;
- }
+ if (PTR_ERR(obj) == -ENXIO)
break;
- }
+
err = PTR_ERR(obj);
goto out_close;
}
out_close:
kfree(order);
close_objects(mem, &objects);
- intel_memory_region_unreserve(mem);
+ intel_memory_region_put(mem);
return err;
}
total = resource_size(&mem->region);
/* Min size */
- obj = igt_object_create(mem, &objects, mem->chunk_size,
+ obj = igt_object_create(mem, &objects, PAGE_SIZE,
I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
min = target;
target = total >> 1;
- if (!mem->is_range_manager) {
- /* Make sure we can still allocate all the fragmented space */
- obj = igt_object_create(mem, &objects, target, 0);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_close_objects;
- }
-
- igt_object_release(obj);
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
}
+ igt_object_release(obj);
+
/*
* Even though we have enough free space, we don't have a big enough
* contiguous block. Make sure that holds true.
}
target >>= 1;
- } while (target >= mem->chunk_size);
+ } while (target >= PAGE_SIZE);
err_close_objects:
list_splice_tail(&holes, &objects);
{
struct intel_memory_region *mem = arg;
struct drm_i915_private *i915 = mem->i915;
+ struct i915_ttm_buddy_resource *res;
struct drm_i915_gem_object *obj;
+ struct i915_buddy_mm *mm;
unsigned int expected_order;
LIST_HEAD(objects);
u64 size;
/*
* Sanity check we can still allocate everything even if the
- * max_order != mm.size. i.e our starting address space size is not a
+ * mm.max_order != mm.size. i.e our starting address space size is not a
* power-of-two.
*/
if (IS_ERR(mem))
return PTR_ERR(mem);
- expected_order = get_order(rounddown_pow_of_two(size));
- if (mem->max_order != expected_order) {
- pr_err("%s order mismatch(%u != %u)\n",
- __func__, mem->max_order, expected_order);
- err = -EINVAL;
- goto out_put;
- }
-
obj = igt_object_create(mem, &objects, size, 0);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_close;
}
+ res = to_ttm_buddy_resource(obj->mm.res);
+ mm = res->mm;
+ if (mm->size != size) {
+ pr_err("%s size mismatch(%llu != %llu)\n",
+ __func__, mm->size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ expected_order = get_order(rounddown_pow_of_two(size));
+ if (mm->max_order != expected_order) {
+ pr_err("%s order mismatch(%u != %u)\n",
+ __func__, mm->max_order, expected_order);
+ err = -EINVAL;
+ goto out_put;
+ }
+
close_objects(mem, &objects);
/*
* sure that does indeed hold true.
*/
- if (!mem->is_range_manager) {
- obj = igt_object_create(mem, &objects, size,
- I915_BO_ALLOC_CONTIGUOUS);
- if (!IS_ERR(obj)) {
- pr_err("%s too large contiguous allocation was not rejected\n",
- __func__);
- err = -EINVAL;
- goto out_close;
- }
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (!IS_ERR(obj)) {
+ pr_err("%s too large contiguous allocation was not rejected\n",
+ __func__);
+ err = -EINVAL;
+ goto out_close;
}
obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
return err;
}
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+ const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ttm_buddy_resource *res;
+ struct drm_i915_gem_object *obj;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm *mm;
+ struct list_head *blocks;
+ struct scatterlist *sg;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
+ */
+
+ size = SZ_8G;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ res = to_ttm_buddy_resource(obj->mm.res);
+ blocks = &res->blocks;
+ mm = res->mm;
+ size = 0;
+ list_for_each_entry(block, blocks, link) {
+ if (i915_buddy_block_size(mm, block) > size)
+ size = i915_buddy_block_size(mm, block);
+ }
+ if (size < max_segment) {
+ pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+ __func__, max_segment, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
* Copyright © 2019-2021 Intel Corporation
*/
+#include <drm/ttm/ttm_placement.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_placement.h>
{
unsigned int flags;
struct sg_table *pages;
+ int err;
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
- flags |= I915_ALLOC_CONTIGUOUS;
+ flags |= TTM_PL_FLAG_CONTIGUOUS;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
obj->base.size,
pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res);
if (IS_ERR(pages)) {
- intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
- return PTR_ERR(pages);
+ err = PTR_ERR(pages);
+ goto err_free_resource;
}
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
return 0;
+
+err_free_resource:
+ intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
+ return err;
}
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {