static inline struct amdgpu_gtt_node *
to_amdgpu_gtt_node(struct ttm_resource *res)
{
- return container_of(res->mm_node, struct amdgpu_gtt_node,
- base.mm_nodes[0]);
+ return container_of(res, struct amdgpu_gtt_node, base.base);
}
/**
/**
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
- * @mem: the mem object to check
+ * @res: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
- struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
return drm_mm_node_allocated(&node->base.mm_nodes[0]);
}
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+ uint32_t num_pages = PFN_UP(tbo->base.size);
struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
- if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) &&
- atomic64_read(&mgr->available) < mem->num_pages) {
+ if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
+ atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock);
return -ENOSPC;
}
- atomic64_sub(mem->num_pages, &mgr->available);
+ atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm,
&node->base.mm_nodes[0],
- mem->num_pages,
- tbo->page_alignment, 0,
- place->fpfn, place->lpfn,
+ num_pages, tbo->page_alignment,
+ 0, place->fpfn, place->lpfn,
DRM_MM_INSERT_BEST);
spin_unlock(&mgr->lock);
if (unlikely(r))
goto err_free;
- mem->start = node->base.mm_nodes[0].start;
+ node->base.base.start = node->base.mm_nodes[0].start;
} else {
node->base.mm_nodes[0].start = 0;
- node->base.mm_nodes[0].size = mem->num_pages;
- mem->start = AMDGPU_BO_INVALID_OFFSET;
+ node->base.mm_nodes[0].size = node->base.base.num_pages;
+ node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
}
- mem->mm_node = &node->base.mm_nodes[0];
+ *res = &node->base.base;
return 0;
err_free:
kfree(node);
err_out:
- atomic64_add(mem->num_pages, &mgr->available);
+ atomic64_add(num_pages, &mgr->available);
return r;
}
* Free the allocated GTT again.
*/
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct amdgpu_gtt_node *node;
-
- if (!mem->mm_node)
- return;
-
- node = to_amdgpu_gtt_node(mem);
spin_lock(&mgr->lock);
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock);
- atomic64_add(mem->num_pages, &mgr->available);
+ atomic64_add(res->num_pages, &mgr->available);
kfree(node);
}
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node ||
+ if (bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
#include <drm/drm_mm.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_range_manager.h>
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
{
struct drm_mm_node *node;
- if (!res || !res->mm_node) {
+ if (!res) {
cur->start = start;
cur->size = size;
cur->remaining = size;
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
- node = res->mm_node;
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *mem = bo->tbo.resource;
- struct drm_mm_node *nodes = mem->mm_node;
- unsigned pages = mem->num_pages;
+ struct ttm_resource *res = bo->tbo.resource;
+ unsigned pages = res->num_pages;
+ struct drm_mm_node *mm;
u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
- if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
- usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+ mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+ for (usage = 0; pages; pages -= mm->size, mm++)
+ usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage;
}
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
/* bail out quickly if there's likely not enough VRAM for this BO */
- mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
+ mem_bytes = tbo->base.size;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
r = -ENOSPC;
goto error_sub;
#endif
pages_per_node = max_t(uint32_t, pages_per_node,
tbo->page_alignment);
- num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node);
}
node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
- mem->start = 0;
- pages_left = mem->num_pages;
+ pages_left = node->base.num_pages;
/* Limit maximum size to 2GB due to SG table limitations */
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
}
vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
- amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]);
+ amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
pages_left -= pages;
++i;
spin_unlock(&mgr->lock);
if (i == 1)
- mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
+ node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
atomic64_add(vis_usage, &mgr->vis_usage);
- mem->mm_node = &node->mm_nodes[0];
+ *res = &node->base;
return 0;
error_free:
* Free the allocated VRAM again.
*/
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct ttm_range_mgr_node *node;
uint64_t usage = 0, vis_usage = 0;
- unsigned pages = mem->num_pages;
- struct drm_mm_node *nodes;
-
- if (!mem->mm_node)
- return;
-
- node = to_ttm_range_mgr_node(mem);
- nodes = &node->mm_nodes[0];
+ unsigned i, pages;
spin_lock(&mgr->lock);
- while (pages) {
- pages -= nodes->size;
- drm_mm_remove_node(nodes);
- usage += nodes->size << PAGE_SHIFT;
- vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
- ++nodes;
+ for (i = 0, pages = res->num_pages; pages;
+ pages -= node->mm_nodes[i].size, ++i) {
+ struct drm_mm_node *mm = &node->mm_nodes[i];
+
+ drm_mm_remove_node(mm);
+ usage += mm->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
}
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_resource *mem,
+ struct ttm_resource *res,
u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
return -ENOMEM;
/* Determine the number of DRM_MM nodes to export */
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
amdgpu_res_next(&cursor, cursor.size);
* and the number of bytes from it. Access the following
* DRM_MM node(s) if more buffer needs to exported
*/
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + adev->gmc.aper_base;
size_t size = cursor.size;
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{
/* Keep TTM behavior for now, remove when drivers are audited */
- if (WARN_ON_ONCE(!gbo->bo.resource->mm_node))
+ if (WARN_ON_ONCE(!gbo->bo.resource ||
+ gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
return 0;
return gbo->bo.resource->start;
}
}
- if (new_reg) {
- if (new_reg->mm_node)
- nvbo->offset = (new_reg->start << PAGE_SHIFT);
- else
- nvbo->offset = 0;
- }
+ if (new_reg)
+ nvbo->offset = (new_reg->start << PAGE_SHIFT);
}
nouveau_mem_del(struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- if (!mem)
- return;
+
nouveau_mem_fini(mem);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
+ kfree(mem);
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
+
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
- reg->mm_node = mem;
+ *res = &mem->base;
return 0;
}
#include <nvif/mem.h>
#include <nvif/vmm.h>
-static inline struct nouveau_mem *
-nouveau_mem(struct ttm_resource *reg)
-{
- return reg->mm_node;
-}
-
struct nouveau_mem {
struct ttm_resource base;
struct nouveau_cli *cli;
struct nvif_vma vma[2];
};
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_resource *reg)
+{
+ return container_of(reg, struct nouveau_mem, base);
+}
+
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
- struct ttm_resource *);
+ struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- ttm_resource_init(bo, place, reg->mm_node);
+ ttm_resource_init(bo, place, *res);
- ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
+ ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- ttm_resource_init(bo, place, reg->mm_node);
- reg->start = 0;
+ ttm_resource_init(bo, place, *res);
+ (*res)->start = 0;
return 0;
}
nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- ttm_resource_init(bo, place, reg->mm_node);
+ mem = nouveau_mem(*res);
+ ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
- reg->start = mem->vma[0].addr >> PAGE_SHIFT;
+ (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct ttm_range_manager *rman = to_range_manager(man);
struct ttm_range_mgr_node *node;
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- mem->num_pages, bo->page_alignment, 0,
+ node->base.num_pages,
+ bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
- if (unlikely(ret)) {
+ if (unlikely(ret))
kfree(node);
- } else {
- mem->mm_node = &node->mm_nodes[0];
- mem->start = node->mm_nodes[0].start;
- }
+ else
+ node->base.start = node->mm_nodes[0].start;
return ret;
}
static void ttm_range_man_free(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct ttm_range_manager *rman = to_range_manager(man);
- struct ttm_range_mgr_node *node;
-
- if (!mem->mm_node)
- return;
-
- node = to_ttm_range_mgr_node(mem);
spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock);
kfree(node);
- mem->mm_node = NULL;
}
static void ttm_range_man_debug(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource *res)
{
- res->mm_node = NULL;
res->start = 0;
res->num_pages = PFN_UP(bo->base.size);
res->mem_type = place->mem_type;
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
- struct ttm_resource *res;
- int r;
-
- res = kmalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- ttm_resource_init(bo, place, res);
- r = man->func->alloc(man, bo, place, res);
- if (r) {
- kfree(res);
- return r;
- }
- *res_ptr = res;
- return 0;
+ return man->func->alloc(man, bo, place, res_ptr);
}
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
- kfree(*res);
*res = NULL;
}
EXPORT_SYMBOL(ttm_resource_free);
static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
- mem->mm_node = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem->mm_node)
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
return -ENOMEM;
- ttm_resource_init(bo, place, mem->mm_node);
+ ttm_resource_init(bo, place, *res);
return 0;
}
static void ttm_sys_man_free(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
- kfree(mem->mm_node);
+ kfree(res);
}
static const struct ttm_resource_manager_func ttm_sys_manager_func = {
static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id;
- mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem->mm_node)
+ *res = kmalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
return -ENOMEM;
- ttm_resource_init(bo, place, mem->mm_node);
+ ttm_resource_init(bo, place, *res);
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += mem->num_pages;
+ gman->used_gmr_pages += (*res)->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
- mem->mm_node = gman;
- mem->start = id;
+ (*res)->start = id;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= mem->num_pages;
+ gman->used_gmr_pages -= (*res)->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
+ kfree(*res);
return -ENOSPC;
}
static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
- ida_free(&gman->gmr_ida, mem->start);
+ ida_free(&gman->gmr_ida, res->start);
spin_lock(&gman->lock);
- gman->used_gmr_pages -= mem->num_pages;
+ gman->used_gmr_pages -= res->num_pages;
spin_unlock(&gman->lock);
- kfree(mem->mm_node);
+ kfree(res);
}
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
+ if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
- align_pages, place, mem,
- lpfn, mode);
+ align_pages, place,
+ &node->base, lpfn, mode);
if (!ret)
goto found_unlock;
}
}
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
+ if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
- align_pages, place, mem, lpfn,
- mode);
+ align_pages, place, &node->base,
+ lpfn, mode);
if (!ret)
goto found_unlock;
}
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- mem->num_pages, bo->page_alignment, 0,
+ node->base.num_pages,
+ bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
spin_unlock(&rman->lock);
if (unlikely(ret)) {
kfree(node);
} else {
- mem->mm_node = &node->mm_nodes[0];
- mem->start = node->mm_nodes[0].start;
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
}
return ret;
}
-
-
static void vmw_thp_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man);
- struct ttm_range_mgr_node * node = mem->mm_node;
spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]);
static inline struct ttm_range_mgr_node *
to_ttm_range_mgr_node(struct ttm_resource *res)
{
- return container_of(res->mm_node, struct ttm_range_mgr_node,
- mm_nodes[0]);
+ return container_of(res, struct ttm_range_mgr_node, base);
}
int ttm_range_man_init(struct ttm_device *bdev,
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
- * @placement: Placement details.
- * @flags: Additional placement flags.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @place: Placement details.
+ * @res: Resulting pointer to the ttm_resource.
*
* This function should allocate space in the memory type managed
- * by @man. Placement details if
- * applicable are given by @placement. If successful,
- * @mem::mm_node should be set to a non-null value, and
- * @mem::start should be set to a value identifying the beginning
+ * by @man. Placement details if applicable are given by @place. If
+ * successful, a filled in ttm_resource object should be returned in
+ * @res. @res::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
- * If the memory region accommodate the buffer object, @mem::mm_node
- * should be set to NULL, and the function should return 0.
+ * If the manager can't fulfill the request -ENOSPC should be returned.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
- * Note that @mem::mm_node will only be dereferenced by
- * struct ttm_resource_manager functions and optionally by the driver,
- * which has knowledge of the underlying type.
- *
- * This function may not be called from within atomic context, so
- * an implementation can and must use either a mutex or a spinlock to
- * protect any data structures managing the space.
+ * This function may not be called from within atomic context and needs
+ * to take care of its own locking to protect any data structures
+ * managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem);
+ struct ttm_resource **res);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @res: Pointer to a struct ttm_resource to be freed.
*
- * This function frees memory type resources previously allocated
- * and that are identified by @mem::mm_node and @mem::start. May not
- * be called from within atomic context.
+ * This function frees memory type resources previously allocated.
+ * May not be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
- struct ttm_resource *mem);
+ struct ttm_resource *res);
/**
* struct ttm_resource_manager_func member debug
/**
* struct ttm_resource
*
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
+ * @start: Start of the allocation.
+ * @num_pages: Actual size of resource in pages.
+ * @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
* buffer object.
*/
struct ttm_resource {
- void *mm_node;
unsigned long start;
unsigned long num_pages;
uint32_t mem_type;