return 0;
}
-static struct sg_table *
-i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
drm_dma_handle_t *phys;
struct scatterlist *sg;
char *vaddr;
int i;
+ int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size));
if (!phys)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
- st = ERR_CAST(page);
+ err = PTR_ERR(page);
goto err_phys;
}
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
- st = ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
goto err_phys;
}
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- st = ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
goto err_phys;
}
sg_dma_len(sg) = obj->base.size;
obj->phys_handle = phys;
- return st;
+
+ __i915_gem_object_set_pages(obj, st);
+
+ return 0;
err_phys:
drm_pci_free(obj->base.dev, phys);
- return st;
+
+ return err;
}
static void __start_cpu_write(struct drm_i915_gem_object *obj)
return true;
}
-static struct sg_table *
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const unsigned long page_count = obj->base.size / PAGE_SIZE;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
/* Get the list of pages out of our struct file. They'll be pinned
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
- return st;
+ __i915_gem_object_set_pages(obj, st);
+
+ return 0;
err_sg:
sg_mark_end(sg);
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ERR_PTR(ret);
+ return ret;
}
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct sg_table *pages;
+ int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- pages = obj->ops->get_pages(obj);
- if (unlikely(IS_ERR(pages)))
- return PTR_ERR(pages);
+ err = obj->ops->get_pages(obj);
+ GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
- __i915_gem_object_set_pages(obj, pages);
- return 0;
+ return err;
}
/* Ensure that the associated pages are gathered from the backing storage
return drm_gem_dmabuf_export(dev, &exp_info);
}
-static struct sg_table *
-i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- return dma_buf_map_attachment(obj->base.import_attach,
- DMA_BIDIRECTIONAL);
+ struct sg_table *pages;
+
+ pages = dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ __i915_gem_object_set_pages(obj, pages);
+
+ return 0;
}
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
kfree(st);
}
-static struct sg_table *
-i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
create_st:
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = st->sgl;
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
- return st;
+
+ __i915_gem_object_set_pages(obj, st);
+
+ return 0;
err:
sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg);
internal_free_pages(st);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
}
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
- struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *,
return st;
}
-static struct sg_table *
-i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- return i915_pages_create_for_stolen(obj->base.dev,
- obj->stolen->start,
- obj->stolen->size);
+ struct sg_table *pages =
+ i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ __i915_gem_object_set_pages(obj, pages);
+
+ return 0;
}
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
+ __i915_gem_object_set_pages(obj, st);
+
return st;
}
pages = __i915_gem_userptr_alloc_pages(obj, pvec,
npages);
if (!IS_ERR(pages)) {
- __i915_gem_object_set_pages(obj, pages);
pinned = 0;
pages = NULL;
}
return ERR_PTR(-EAGAIN);
}
-static struct sg_table *
-i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return ERR_CAST(obj->userptr.work);
+ return PTR_ERR(obj->userptr.work);
else
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
pvec = NULL;
release_pages(pvec, pinned, 0);
kvfree(pvec);
- return pages;
+ return PTR_ERR_OR_ZERO(pages);
}
static void
kfree(pages);
}
-static struct sg_table *
-huge_get_pages(struct drm_i915_gem_object *obj)
+static int huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
if (sg_alloc_table(pages, npages, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = pages->sgl;
if (i915_gem_gtt_prepare_pages(obj, pages))
goto err;
- return pages;
+ __i915_gem_object_set_pages(obj, pages);
+
+ return 0;
err:
huge_free_pages(obj, pages);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
#undef GFP
}
kfree(pages);
}
-static struct sg_table *
-fake_get_pages(struct drm_i915_gem_object *obj)
+static int fake_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
#define PFN_BIAS 0x1000
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rem = round_up(obj->base.size, BIT(31)) >> 31;
if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
rem = obj->base.size;
GEM_BUG_ON(rem);
obj->mm.madv = I915_MADV_DONTNEED;
- return pages;
+
+ __i915_gem_object_set_pages(obj, pages);
+
+ return 0;
#undef GFP
}