atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+ uint64_t addr);
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
-
- if (data->last_reserved <= i)
+ ret = amdgpu_vram_mgr_query_page_status(
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ data->bps[i].retired_page);
+ if (ret == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (data->bps_bo[i] == NULL)
+ else if (ret == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
- struct amdgpu_bo **bps_bo =
- kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
- if (!bps || !bps_bo) {
+ if (!bps) {
kfree(bps);
- kfree(bps_bo);
return -ENOMEM;
}
data->count * sizeof(*data->bps));
kfree(data->bps);
}
- if (data->bps_bo) {
- memcpy(bps_bo, data->bps_bo,
- data->count * sizeof(*data->bps_bo));
- kfree(data->bps_bo);
- }
data->bps = bps;
- data->bps_bo = bps_bo;
data->space_left += align_space - old_space;
return 0;
}
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int ret = 0;
+ uint32_t i;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
if (!data)
goto out;
- if (data->space_left <= pages)
- if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
+ for (i = 0; i < pages; i++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
ret = -ENOMEM;
goto out;
}
- memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
- data->count += pages;
- data->space_left -= pages;
+ amdgpu_vram_mgr_reserve_range(
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE);
+ memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
out:
mutex_unlock(&con->recovery_lock);
return ret;
}
+static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+ uint64_t addr)
+{
+ struct ras_err_handler_data *data = con->eh_data;
+ int i;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page)
+ return true;
+
+ return false;
+}
+
/*
* check if an address belongs to bad page
*
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
- int i;
bool ret = false;
if (!con || !con->eh_data)
return ret;
mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data)
- goto out;
-
- addr >>= AMDGPU_GPU_PAGE_SHIFT;
- for (i = 0; i < data->count; i++)
- if (addr == data->bps[i].retired_page) {
- ret = true;
- goto out;
- }
-
-out:
+ ret = amdgpu_ras_check_bad_page_unlock(con, addr);
mutex_unlock(&con->recovery_lock);
return ret;
}
}
}
-/* called in gpu recovery/init */
-int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
- uint64_t bp;
- struct amdgpu_bo *bo = NULL;
- int i, ret = 0;
-
- /* Not reserve bad page when amdgpu_bad_page_threshold == 0. */
- if (!con || !con->eh_data || (amdgpu_bad_page_threshold == 0))
- return 0;
-
- mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data)
- goto out;
- /* reserve vram at driver post stage. */
- for (i = data->last_reserved; i < data->count; i++) {
- bp = data->bps[i].retired_page;
-
- /* There are two cases of reserve error should be ignored:
- * 1) a ras bad page has been allocated (used by someone);
- * 2) a ras bad page has been reserved (duplicate error injection
- * for one page);
- */
- if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL))
- dev_warn(adev->dev, "RAS WARN: reserve vram for "
- "retired page %llx fail\n", bp);
-
- data->bps_bo[i] = bo;
- data->last_reserved = i + 1;
- bo = NULL;
- }
-out:
- mutex_unlock(&con->recovery_lock);
- return ret;
-}
-
-/* called when driver unload */
-static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
- struct amdgpu_bo *bo;
- int i;
-
- if (!con || !con->eh_data)
- return 0;
-
- mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data)
- goto out;
-
- for (i = data->last_reserved - 1; i >= 0; i--) {
- bo = data->bps_bo[i];
-
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
-
- data->bps_bo[i] = bo;
- data->last_reserved = i;
- }
-out:
- mutex_unlock(&con->recovery_lock);
- return 0;
-}
-
int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
goto free;
- ret = amdgpu_ras_reserve_bad_pages(adev);
- if (ret)
- goto release;
}
return 0;
-release:
- amdgpu_ras_release_bad_pages(adev);
free:
kfree((*data)->bps);
- kfree((*data)->bps_bo);
kfree(*data);
con->eh_data = NULL;
out:
return 0;
cancel_work_sync(&con->recovery_work);
- amdgpu_ras_release_bad_pages(adev);
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
- kfree(data->bps_bo);
kfree(data);
mutex_unlock(&con->recovery_lock);
struct ras_err_handler_data {
/* point to bad page records array */
struct eeprom_table_record *bps;
- /* point to reserved bo array */
- struct amdgpu_bo **bps_bo;
/* the count of entries */
int count;
/* the space can place new entries */
int space_left;
- /* last reserved entry's index + 1 */
- int last_reserved;
};
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
-int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (in_task())
- amdgpu_ras_reserve_bad_pages(adev);
-
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
return 0;
#define AMDGPU_POISON 0xd0bed0be
+struct amdgpu_vram_reservation {
+ struct list_head node;
+ struct drm_mm_node mm_node;
+};
+
struct amdgpu_vram_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
+ struct list_head reservations_pending;
+ struct list_head reserved_pages;
atomic64_t usage;
atomic64_t vis_usage;
};
struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
+int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
+ uint64_t start, uint64_t size);
+int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
+ uint64_t start);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_late_init(struct amdgpu_device *adev);
drm_mm_init(&mgr->mm, 0, man->size);
spin_lock_init(&mgr->lock);
+ INIT_LIST_HEAD(&mgr->reservations_pending);
+ INIT_LIST_HEAD(&mgr->reserved_pages);
/* Add the two VRAM-related sysfs files */
ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager;
int ret;
+ struct amdgpu_vram_reservation *rsv, *temp;
ttm_resource_manager_set_used(man, false);
return;
spin_lock(&mgr->lock);
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
+ kfree(rsv);
+
+ list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
+ drm_mm_remove_node(&rsv->mm_node);
+ kfree(rsv);
+ }
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
return usage;
}
+static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct drm_mm *mm = &mgr->mm;
+ struct amdgpu_vram_reservation *rsv, *temp;
+ uint64_t vis_usage;
+
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
+ if (drm_mm_reserve_node(mm, &rsv->mm_node))
+ continue;
+
+ dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Successed\n",
+ rsv->mm_node.start, rsv->mm_node.size);
+
+ vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+ atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
+ list_move(&rsv->node, &mgr->reserved_pages);
+ }
+}
+
+/**
+ * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
+ *
+ * @man: TTM memory type manager
+ * @start: start address of the range in VRAM
+ * @size: size of the range
+ *
+ * Reserve memory from start addess with the specified size in VRAM
+ */
+int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
+ uint64_t start, uint64_t size)
+{
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_vram_reservation *rsv;
+
+ rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
+ if (!rsv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&rsv->node);
+ rsv->mm_node.start = start >> PAGE_SHIFT;
+ rsv->mm_node.size = size >> PAGE_SHIFT;
+
+ spin_lock(&mgr->lock);
+ list_add_tail(&mgr->reservations_pending, &rsv->node);
+ amdgpu_vram_mgr_do_reserve(man);
+ spin_unlock(&mgr->lock);
+
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_query_page_status - query the reservation status
+ *
+ * @man: TTM memory type manager
+ * @start: start address of a page in VRAM
+ *
+ * Returns:
+ * -EBUSY: the page is still hold and in pending list
+ * 0: the page has been reserved
+ * -ENOENT: the input page is not a reservation
+ */
+int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
+ uint64_t start)
+{
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_vram_reservation *rsv;
+ int ret;
+
+ spin_lock(&mgr->lock);
+
+ list_for_each_entry(rsv, &mgr->reservations_pending, node) {
+ if ((rsv->mm_node.start <= start) &&
+ (start < (rsv->mm_node.start + rsv->mm_node.size))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ list_for_each_entry(rsv, &mgr->reserved_pages, node) {
+ if ((rsv->mm_node.start <= start) &&
+ (start < (rsv->mm_node.start + rsv->mm_node.size))) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = -ENOENT;
+out:
+ spin_unlock(&mgr->lock);
+ return ret;
+}
+
/**
* amdgpu_vram_mgr_virt_start - update virtual start address
*
vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
++nodes;
}
+ amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
atomic64_sub(usage, &mgr->usage);