kmalloc uses power-of-two slab buckets for small allocations (up to a
few pages). Since i915_page_directory is a page of pointers, plus a
couple more, this is rounded up to 8K, and we waste nearly 50% of that
allocation. Long terms this leads to poor memory utilisation, bloating
the kernel footprint, but the problem is exacerbated by our conservative
preallocation scheme for binding VMA. As we are required to allocate all
levels for each vma just in case we need to insert them upon binding,
this leads to a large multiplication factor for a single page vma. By
halving the allocation we need for the page directory structure, we
halve the impact of that factor, bringing workloads that once fitted into
memory, hopefully back to fitting into memory.
We maintain the split between i915_page_directory and i915_page_table as
we only need half the allocation for the lowest, most populous, level.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200729164219.5737-3-chris@chris-wilson.co.uk
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
gen6_for_all_pdes(pt, pd, pde)
if (pt)
- free_px(&ppgtt->base.vm, pt);
+ free_pt(&ppgtt->base.vm, pt);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
mutex_destroy(&ppgtt->flush);
mutex_destroy(&ppgtt->pin_mutex);
- kfree(ppgtt->base.pd);
+
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
}
static int pd_vma_set_pages(struct i915_vma *vma)
if (!pt || atomic_read(&pt->used))
continue;
- free_px(&ppgtt->base.vm, pt);
+ free_pt(&ppgtt->base.vm, pt);
pd->entry[pde] = NULL;
}
ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
+ ppgtt->base.pd = __alloc_pd(I915_PDES);
if (!ppgtt->base.pd) {
err = -ENOMEM;
goto err_free;
err_scratch:
free_scratch(&ppgtt->base.vm);
err_pd:
- kfree(ppgtt->base.pd);
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
err_free:
mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt);
} while (pde++, --count);
}
- free_px(vm, pd);
+ free_px(vm, &pd->pt, lvl);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
}
if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
+ free_px(vm, pt, lvl);
} while (idx++, --len);
return start;
err = pin_pt_dma(vm, pde->pt.base);
if (err) {
i915_gem_object_put(pde->pt.base);
- kfree(pde);
+ free_pd(vm, pde);
return err;
}
struct i915_page_directory *pd;
int err;
- GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+ GEM_BUG_ON(count > I915_PDES);
- pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ pd = __alloc_pd(count);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(pd->pt.base)) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
}
err = pin_pt_dma(vm, pd->pt.base);
- if (err) {
- i915_gem_object_put(pd->pt.base);
- kfree(pd);
- return ERR_PTR(err);
- }
+ if (err)
+ goto err_pd;
fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
atomic_inc(px_used(pd)); /* mark as pinned */
return pd;
+
+err_pd:
+ free_pd(vm, pd);
+ return ERR_PTR(err);
}
/*
struct i915_page_directory {
struct i915_page_table pt;
spinlock_t lock;
- void *entry[512];
+ void **entry;
};
#define __px_choose_expr(x, type, expr, other) \
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
struct i915_page_table *alloc_pt(struct i915_address_space *vm);
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
-struct i915_page_directory *__alloc_pd(size_t sz);
+struct i915_page_directory *__alloc_pd(int npde);
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
-void free_pt(struct i915_address_space *vm, struct i915_page_table *pt);
-#define free_px(vm, px) free_pt(vm, px_pt(px))
+void free_px(struct i915_address_space *vm,
+ struct i915_page_table *pt, int lvl);
+#define free_pt(vm, px) free_px(vm, px, 0)
+#define free_pd(vm, px) free_px(vm, px_pt(px), 1)
void
__set_pd_entry(struct i915_page_directory * const pd,
return pt;
}
-struct i915_page_directory *__alloc_pd(size_t sz)
+struct i915_page_directory *__alloc_pd(int count)
{
struct i915_page_directory *pd;
- pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return NULL;
+ pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd->entry)) {
+ kfree(pd);
+ return NULL;
+ }
+
spin_lock_init(&pd->lock);
return pd;
}
{
struct i915_page_directory *pd;
- pd = __alloc_pd(sizeof(*pd));
+ pd = __alloc_pd(I915_PDES);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(pd->pt.base)) {
+ kfree(pd->entry);
kfree(pd);
return ERR_PTR(-ENOMEM);
}
return pd;
}
-void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
+void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
{
- i915_gem_object_put(pt->base);
+ BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
+
+ if (lvl) {
+ struct i915_page_directory *pd =
+ container_of(pt, typeof(*pd), pt);
+ kfree(pd->entry);
+ }
+
+ if (pt->base)
+ i915_gem_object_put(pt->base);
+
kfree(pt);
}
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
/* Each thread pre-pins the pd, and we may have a thread per pde. */
- GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
atomic_inc(px_used(pd));
pd->entry[idx] = to;
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
while ((pt = stash->pt[n])) {
stash->pt[n] = pt->stash;
- free_px(vm, pt);
+ free_px(vm, pt, n);
}
}
}