return (struct amdgpu_device *)kgd;
}
-static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
+static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
struct kgd_mem *mem)
{
- struct kfd_bo_va_list *entry;
+ struct kfd_mem_attachment *entry;
- list_for_each_entry(entry, &mem->bo_va_list, bo_list)
+ list_for_each_entry(entry, &mem->attachments, list)
if (entry->bo_va->base.vm == avm)
- return false;
+ return true;
- return true;
+ return false;
}
/* Set memory usage limits. Current, limits are
return pte_flags;
}
-/* add_bo_to_vm - Add a BO to a VM
+/* kfd_mem_attach - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
* to a VM. It can later be mapped and unmapped many times without
* 4. Alloc page tables and directories if needed
* 4a. Validate new page tables and directories
*/
-static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
+static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_vm *vm, bool is_aql,
- struct kfd_bo_va_list **p_bo_va_entry)
+ struct kfd_mem_attachment **p_attachment)
{
int ret;
- struct kfd_bo_va_list *bo_va_entry;
+ struct kfd_mem_attachment *attachment;
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
- struct list_head *list_bo_va = &mem->bo_va_list;
unsigned long bo_size = bo->tbo.base.size;
if (!va) {
if (is_aql)
va += bo_size;
- bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
- if (!bo_va_entry)
+ attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+ if (!attachment)
return -ENOMEM;
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, vm);
/* Add BO to VM internal data structures*/
- bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
- if (!bo_va_entry->bo_va) {
+ attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!attachment->bo_va) {
ret = -EINVAL;
pr_err("Failed to add BO object to VM. ret == %d\n",
ret);
goto err_vmadd;
}
- bo_va_entry->va = va;
- bo_va_entry->pte_flags = get_pte_flags(adev, mem);
- bo_va_entry->kgd_dev = (void *)adev;
- list_add(&bo_va_entry->bo_list, list_bo_va);
+ attachment->va = va;
+ attachment->pte_flags = get_pte_flags(adev, mem);
+ attachment->adev = adev;
+ list_add(&attachment->list, &mem->attachments);
- if (p_bo_va_entry)
- *p_bo_va_entry = bo_va_entry;
+ if (p_attachment)
+ *p_attachment = attachment;
/* Allocate validate page tables if needed */
ret = vm_validate_pt_pd_bos(vm);
return 0;
err_alloc_pts:
- amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
- list_del(&bo_va_entry->bo_list);
+ amdgpu_vm_bo_rmv(adev, attachment->bo_va);
+ list_del(&attachment->list);
err_vmadd:
- kfree(bo_va_entry);
+ kfree(attachment);
return ret;
}
-static void remove_bo_from_vm(struct amdgpu_device *adev,
- struct kfd_bo_va_list *entry, unsigned long size)
+static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
{
- pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
- entry->va,
- entry->va + size, entry);
- amdgpu_vm_bo_rmv(adev, entry->bo_va);
- list_del(&entry->bo_list);
- kfree(entry);
+ pr_debug("\t remove VA 0x%llx in entry %p\n",
+ attachment->va, attachment);
+ amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
+ list_del(&attachment->list);
+ kfree(attachment);
}
static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct bo_vm_reservation_context *ctx)
{
struct amdgpu_bo *bo = mem->bo;
- struct kfd_bo_va_list *entry;
+ struct kfd_mem_attachment *entry;
unsigned int i;
int ret;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->duplicates);
- list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+ list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
i = 0;
- list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+ list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
}
static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
- struct kfd_bo_va_list *entry,
+ struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
}
static int update_gpuvm_pte(struct amdgpu_device *adev,
- struct kfd_bo_va_list *entry,
+ struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
int ret;
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
- struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
+ struct kfd_mem_attachment *entry, struct amdgpu_sync *sync,
bool no_update_pte)
{
int ret;
ret = -ENOMEM;
goto err;
}
- INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ INIT_LIST_HEAD(&(*mem)->attachments);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
- struct kfd_bo_va_list *entry, *tmp;
+ struct kfd_mem_attachment *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
unsigned int mapped_to_gpu_memory;
mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */
- list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
- remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
- entry, bo_size);
+ list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
+ kfd_mem_detach(entry);
ret = unreserve_bo_and_vms(&ctx, false, false);
int ret;
struct amdgpu_bo *bo;
uint32_t domain;
- struct kfd_bo_va_list *entry;
+ struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
- struct kfd_bo_va_list *bo_va_entry = NULL;
- struct kfd_bo_va_list *bo_va_entry_aql = NULL;
+ struct kfd_mem_attachment *attachment = NULL;
+ struct kfd_mem_attachment *attachment_aql = NULL;
unsigned long bo_size;
bool is_invalid_userptr = false;
bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
- if (check_if_add_bo_to_vm(avm, mem)) {
- ret = add_bo_to_vm(adev, mem, avm, false,
- &bo_va_entry);
+ if (!kfd_mem_is_attached(avm, mem)) {
+ ret = kfd_mem_attach(adev, mem, avm, false, &attachment);
if (ret)
- goto add_bo_to_vm_failed;
+ goto attach_failed;
if (mem->aql_queue) {
- ret = add_bo_to_vm(adev, mem, avm,
- true, &bo_va_entry_aql);
+ ret = kfd_mem_attach(adev, mem, avm, true,
+ &attachment_aql);
if (ret)
- goto add_bo_to_vm_failed_aql;
+ goto attach_failed_aql;
}
} else {
ret = vm_validate_pt_pd_bos(avm);
if (unlikely(ret))
- goto add_bo_to_vm_failed;
+ goto attach_failed;
}
if (mem->mapped_to_gpu_memory == 0 &&
}
}
- list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
- if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
- pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
- entry->va, entry->va + bo_size,
- entry);
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->bo_va->base.vm != avm || entry->is_mapped)
+ continue;
- ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
- is_invalid_userptr);
- if (ret) {
- pr_err("Failed to map bo to gpuvm\n");
- goto map_bo_to_gpuvm_failed;
- }
+ pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
+ entry->va, entry->va + bo_size, entry);
- ret = vm_update_pds(avm, ctx.sync);
- if (ret) {
- pr_err("Failed to update page directories\n");
- goto map_bo_to_gpuvm_failed;
- }
+ ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
+ is_invalid_userptr);
+ if (ret) {
+ pr_err("Failed to map bo to gpuvm\n");
+ goto map_bo_to_gpuvm_failed;
+ }
- entry->is_mapped = true;
- mem->mapped_to_gpu_memory++;
- pr_debug("\t INC mapping count %d\n",
- mem->mapped_to_gpu_memory);
+ ret = vm_update_pds(avm, ctx.sync);
+ if (ret) {
+ pr_err("Failed to update page directories\n");
+ goto map_bo_to_gpuvm_failed;
}
+
+ entry->is_mapped = true;
+ mem->mapped_to_gpu_memory++;
+ pr_debug("\t INC mapping count %d\n",
+ mem->mapped_to_gpu_memory);
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
goto out;
map_bo_to_gpuvm_failed:
- if (bo_va_entry_aql)
- remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
-add_bo_to_vm_failed_aql:
- if (bo_va_entry)
- remove_bo_from_vm(adev, bo_va_entry, bo_size);
-add_bo_to_vm_failed:
+ if (attachment_aql)
+ kfd_mem_detach(attachment_aql);
+attach_failed_aql:
+ if (attachment)
+ kfd_mem_detach(attachment);
+attach_failed:
unreserve_bo_and_vms(&ctx, false, false);
out:
mutex_unlock(&mem->process_info->lock);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdkfd_process_info *process_info = avm->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
- struct kfd_bo_va_list *entry;
+ struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
int ret;
mem->va + bo_size * (1 + mem->aql_queue),
avm);
- list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
- if (entry->bo_va->base.vm == avm && entry->is_mapped) {
- pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
- entry->va,
- entry->va + bo_size,
- entry);
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->bo_va->base.vm != avm || !entry->is_mapped)
+ continue;
- ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
- if (ret == 0) {
- entry->is_mapped = false;
- } else {
- pr_err("failed to unmap VA 0x%llx\n",
- mem->va);
- goto unreserve_out;
- }
+ pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
+ entry->va, entry->va + bo_size, entry);
- mem->mapped_to_gpu_memory--;
- pr_debug("\t DEC mapping count %d\n",
- mem->mapped_to_gpu_memory);
+ ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
+ if (ret == 0) {
+ entry->is_mapped = false;
+ } else {
+ pr_err("failed to unmap VA 0x%llx\n", mem->va);
+ goto unreserve_out;
}
+
+ mem->mapped_to_gpu_memory--;
+ pr_debug("\t DEC mapping count %d\n",
+ mem->mapped_to_gpu_memory);
}
/* If BO is unmapped from all VMs, unfence it. It can be evicted if
if (mmap_offset)
*mmap_offset = amdgpu_bo_mmap_offset(bo);
- INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ INIT_LIST_HEAD(&(*mem)->attachments);
mutex_init(&(*mem)->lock);
(*mem)->alloc_flags =
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
- struct kfd_bo_va_list *bo_va_entry;
+ struct kfd_mem_attachment *attachment;
bo = mem->bo;
* VM faults if the GPU tries to access the invalid
* memory.
*/
- list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
- if (!bo_va_entry->is_mapped)
+ list_for_each_entry(attachment, &mem->attachments, list) {
+ if (!attachment->is_mapped)
continue;
ret = update_gpuvm_pte((struct amdgpu_device *)
- bo_va_entry->kgd_dev,
- bo_va_entry, &sync);
+ attachment->adev,
+ attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
- struct kfd_bo_va_list *bo_va_entry;
+ struct kfd_mem_attachment *attachment;
total_size += amdgpu_bo_size(bo);
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
}
- list_for_each_entry(bo_va_entry, &mem->bo_va_list,
- bo_list) {
+ list_for_each_entry(attachment, &mem->attachments, list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
- bo_va_entry->kgd_dev,
- bo_va_entry,
+ attachment->adev, attachment,
&sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
return -ENOMEM;
mutex_init(&(*mem)->lock);
- INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ INIT_LIST_HEAD(&(*mem)->attachments);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;