}
static struct ttm_resource *
-intel_region_ttm_node_reserve(struct intel_memory_region *mem,
+intel_region_ttm_resource_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size)
{
}
/**
- * intel_region_ttm_node_free - Free a node allocated from a resource manager
- * @mem: The region the node was allocated from.
- * @node: The opaque node representing an allocation.
+ * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
+ * @mem: The region the resource was allocated from.
+ * @res: The opaque resource representing an allocation.
*/
-void intel_region_ttm_node_free(struct intel_memory_region *mem,
- struct ttm_resource *res)
+void intel_region_ttm_resource_free(struct intel_memory_region *mem,
+ struct ttm_resource *res)
{
struct ttm_resource_manager *man = mem->region_private;
}
static const struct intel_memory_region_private_ops priv_ops = {
- .reserve = intel_region_ttm_node_reserve,
- .free = intel_region_ttm_node_free,
+ .reserve = intel_region_ttm_resource_reserve,
+ .free = intel_region_ttm_resource_free,
};
int intel_region_ttm_init(struct intel_memory_region *mem)
}
/**
- * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
+ * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource
* to an sg_table.
* @mem: The memory region.
- * @node: The resource manager node obtained from the TTM resource manager.
+ * @res: The resource manager resource obtained from the TTM resource manager.
*
* The gem backends typically use sg-tables for operations on the underlying
* io_memory. So provide a way for the backends to translate the
*
* Return: A malloced sg_table on success, an error pointer on failure.
*/
-struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
- struct ttm_resource *res)
+struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
+ struct ttm_resource *res)
{
struct ttm_range_mgr_node *range_node =
container_of(res, typeof(*range_node), base);
* Return: A valid pointer on success, an error pointer on failure.
*/
struct ttm_resource *
-intel_region_ttm_node_alloc(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
struct ttm_resource_manager *man = mem->region_private;
struct ttm_place place = {};
void intel_region_ttm_fini(struct intel_memory_region *mem);
-struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
- struct ttm_resource *res);
+struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
+ struct ttm_resource *res);
-void intel_region_ttm_node_free(struct intel_memory_region *mem,
- struct ttm_resource *node);
+void intel_region_ttm_resource_free(struct intel_memory_region *mem,
+ struct ttm_resource *res);
int intel_region_to_ttm_type(const struct intel_memory_region *mem);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
-intel_region_ttm_node_alloc(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
#endif
#endif /* _INTEL_REGION_TTM_H_ */
static void mock_region_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
sg_free_table(pages);
kfree(pages);
}
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
- obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
- obj->base.size,
- flags);
- if (IS_ERR(obj->mm.st_mm_node))
- return PTR_ERR(obj->mm.st_mm_node);
+ obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+ obj->base.size,
+ flags);
+ if (IS_ERR(obj->mm.res))
+ return PTR_ERR(obj->mm.res);
- pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
+ pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res);
if (IS_ERR(pages)) {
- intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
return PTR_ERR(pages);
}