gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
- gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_ggtt.o \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
+ gt/intel_gt_buffer_pool.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
-#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
goto err_unpin;
}
- err = intel_engine_pool_mark_active(pool, rq);
+ err = intel_gt_buffer_pool_mark_active(pool, rq);
if (err)
goto err_request;
err_unmap:
i915_gem_object_unpin_map(pool->obj);
out_pool:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
return err;
}
static int eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
int err;
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
- pool = intel_engine_get_pool(eb->engine, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
if (IS_ERR(pool))
return PTR_ERR(pool);
err_shadow:
i915_vma_unpin(shadow);
err:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
return err;
}
*/
eb.request->batch = batch;
if (batch->private)
- intel_engine_pool_mark_active(batch->private, eb.request);
+ intel_gt_buffer_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb, batch);
i915_vma_unpin(batch);
err_parse:
if (batch->private)
- intel_engine_pool_put(batch->private);
+ intel_gt_buffer_pool_put(batch->private);
err_vma:
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 offset;
u64 count;
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_get_pool(ce->engine, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
return batch;
out_put:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
if (unlikely(err))
return err;
- return intel_engine_pool_mark_active(vma->private, rq);
+ return intel_gt_buffer_pool_mark_active(vma->private, rq);
}
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
{
i915_vma_unpin(vma);
- intel_engine_pool_put(vma->private);
+ intel_gt_buffer_pool_put(vma->private);
intel_engine_pm_put(ce->engine);
}
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
u64 count, rem;
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_get_pool(ce->engine, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
return batch;
out_put:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "i915_vma.h"
struct drm_i915_gem_object;
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
- intel_engine_pool_init(&engine->pool);
-
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
cleanup_status_page(engine);
intel_engine_fini_retire(engine);
- intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
- intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "gem/i915_gem_object.h"
-
-#include "i915_drv.h"
-#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
-
-static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
-{
- return container_of(pool, struct intel_engine_cs, pool);
-}
-
-static struct list_head *
-bucket_for_size(struct intel_engine_pool *pool, size_t sz)
-{
- int n;
-
- /*
- * Compute a power-of-two bucket, but throw everything greater than
- * 16KiB into the same bucket: i.e. the buckets hold objects of
- * (1 page, 2 pages, 4 pages, 8+ pages).
- */
- n = fls(sz >> PAGE_SHIFT) - 1;
- if (n >= ARRAY_SIZE(pool->cache_list))
- n = ARRAY_SIZE(pool->cache_list) - 1;
-
- return &pool->cache_list[n];
-}
-
-static void node_free(struct intel_engine_pool_node *node)
-{
- i915_gem_object_put(node->obj);
- i915_active_fini(&node->active);
- kfree(node);
-}
-
-static int pool_active(struct i915_active *ref)
-{
- struct intel_engine_pool_node *node =
- container_of(ref, typeof(*node), active);
- struct dma_resv *resv = node->obj->base.resv;
- int err;
-
- if (dma_resv_trylock(resv)) {
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
-
- err = i915_gem_object_pin_pages(node->obj);
- if (err)
- return err;
-
- /* Hide this pinned object from the shrinker until retired */
- i915_gem_object_make_unshrinkable(node->obj);
-
- return 0;
-}
-
-__i915_active_call
-static void pool_retire(struct i915_active *ref)
-{
- struct intel_engine_pool_node *node =
- container_of(ref, typeof(*node), active);
- struct intel_engine_pool *pool = node->pool;
- struct list_head *list = bucket_for_size(pool, node->obj->base.size);
- unsigned long flags;
-
- GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
- i915_gem_object_unpin_pages(node->obj);
-
- /* Return this object to the shrinker pool */
- i915_gem_object_make_purgeable(node->obj);
-
- spin_lock_irqsave(&pool->lock, flags);
- list_add(&node->link, list);
- spin_unlock_irqrestore(&pool->lock, flags);
-}
-
-static struct intel_engine_pool_node *
-node_create(struct intel_engine_pool *pool, size_t sz)
-{
- struct intel_engine_cs *engine = to_engine(pool);
- struct intel_engine_pool_node *node;
- struct drm_i915_gem_object *obj;
-
- node = kmalloc(sizeof(*node),
- GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->pool = pool;
- i915_active_init(&node->active, pool_active, pool_retire);
-
- obj = i915_gem_object_create_internal(engine->i915, sz);
- if (IS_ERR(obj)) {
- i915_active_fini(&node->active);
- kfree(node);
- return ERR_CAST(obj);
- }
-
- i915_gem_object_set_readonly(obj);
-
- node->obj = obj;
- return node;
-}
-
-static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
-{
- if (intel_engine_is_virtual(engine))
- engine = intel_virtual_engine_get_sibling(engine, 0);
-
- GEM_BUG_ON(!engine);
- return &engine->pool;
-}
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
-{
- struct intel_engine_pool *pool = lookup_pool(engine);
- struct intel_engine_pool_node *node;
- struct list_head *list;
- unsigned long flags;
- int ret;
-
- GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
- size = PAGE_ALIGN(size);
- list = bucket_for_size(pool, size);
-
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry(node, list, link) {
- if (node->obj->base.size < size)
- continue;
- list_del(&node->link);
- break;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-
- if (&node->link == list) {
- node = node_create(pool, size);
- if (IS_ERR(node))
- return node;
- }
-
- ret = i915_active_acquire(&node->active);
- if (ret) {
- node_free(node);
- return ERR_PTR(ret);
- }
-
- return node;
-}
-
-void intel_engine_pool_init(struct intel_engine_pool *pool)
-{
- int n;
-
- spin_lock_init(&pool->lock);
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
- INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-void intel_engine_pool_park(struct intel_engine_pool *pool)
-{
- int n;
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
- struct list_head *list = &pool->cache_list[n];
- struct intel_engine_pool_node *node, *nn;
-
- list_for_each_entry_safe(node, nn, list, link)
- node_free(node);
-
- INIT_LIST_HEAD(list);
- }
-}
-
-void intel_engine_pool_fini(struct intel_engine_pool *pool)
-{
- int n;
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
- GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef INTEL_ENGINE_POOL_H
-#define INTEL_ENGINE_POOL_H
-
-#include "intel_engine_pool_types.h"
-#include "i915_active.h"
-#include "i915_request.h"
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
-
-static inline int
-intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
- struct i915_request *rq)
-{
- return i915_active_add_request(&node->active, rq);
-}
-
-static inline void
-intel_engine_pool_put(struct intel_engine_pool_node *node)
-{
- i915_active_release(&node->active);
-}
-
-void intel_engine_pool_init(struct intel_engine_pool *pool);
-void intel_engine_pool_park(struct intel_engine_pool *pool);
-void intel_engine_pool_fini(struct intel_engine_pool *pool);
-
-#endif /* INTEL_ENGINE_POOL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef INTEL_ENGINE_POOL_TYPES_H
-#define INTEL_ENGINE_POOL_TYPES_H
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-#include "i915_active_types.h"
-
-struct drm_i915_gem_object;
-
-struct intel_engine_pool {
- spinlock_t lock;
- struct list_head cache_list[4];
-};
-
-struct intel_engine_pool_node {
- struct i915_active active;
- struct drm_i915_gem_object *obj;
- struct list_head link;
- struct intel_engine_pool *pool;
-};
-
-#endif /* INTEL_ENGINE_POOL_TYPES_H */
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_wakeref.h"
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
} pmu;
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct intel_engine_pool pool;
-
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
struct i915_wa_list ctx_wa_list;
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
INIT_LIST_HEAD(>->closed_vma);
spin_lock_init(>->closed_lock);
+ intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
+ intel_gt_fini_buffer_pool(gt);
}
void intel_gt_driver_late_release(struct intel_gt *gt)
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_buffer_pool.h"
+
+static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
+{
+ return container_of(pool, struct intel_gt, buffer_pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
+{
+ int n;
+
+ /*
+ * Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(sz >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+
+ return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_gt_buffer_pool_node *node)
+{
+ i915_gem_object_put(node->obj);
+ i915_active_fini(&node->active);
+ kfree(node);
+}
+
+static void pool_free_work(struct work_struct *wrk)
+{
+ struct intel_gt_buffer_pool *pool =
+ container_of(wrk, typeof(*pool), work.work);
+ struct intel_gt_buffer_pool_node *node, *next;
+ unsigned long old = jiffies - HZ;
+ bool active = false;
+ LIST_HEAD(stale);
+ int n;
+
+ /* Free buffers that have not been used in the past second */
+ spin_lock_irq(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+
+ /* Most recent at head; oldest at tail */
+ list_for_each_entry_safe_reverse(node, next, list, link) {
+ if (time_before(node->age, old))
+ break;
+
+ list_move(&node->link, &stale);
+ }
+ active |= !list_empty(list);
+ }
+ spin_unlock_irq(&pool->lock);
+
+ list_for_each_entry_safe(node, next, &stale, link)
+ node_free(node);
+
+ if (active)
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
+}
+
+static int pool_active(struct i915_active *ref)
+{
+ struct intel_gt_buffer_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct dma_resv *resv = node->obj->base.resv;
+ int err;
+
+ if (dma_resv_trylock(resv)) {
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+
+ err = i915_gem_object_pin_pages(node->obj);
+ if (err)
+ return err;
+
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+
+ return 0;
+}
+
+__i915_active_call
+static void pool_retire(struct i915_active *ref)
+{
+ struct intel_gt_buffer_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct intel_gt_buffer_pool *pool = node->pool;
+ struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+ unsigned long flags;
+
+ i915_gem_object_unpin_pages(node->obj);
+
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ node->age = jiffies;
+ list_add(&node->link, list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
+}
+
+static struct intel_gt_buffer_pool_node *
+node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+{
+ struct intel_gt *gt = to_gt(pool);
+ struct intel_gt_buffer_pool_node *node;
+ struct drm_i915_gem_object *obj;
+
+ node = kmalloc(sizeof(*node),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->pool = pool;
+ i915_active_init(&node->active, pool_active, pool_retire);
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj)) {
+ i915_active_fini(&node->active);
+ kfree(node);
+ return ERR_CAST(obj);
+ }
+
+ i915_gem_object_set_readonly(obj);
+
+ node->obj = obj;
+ return node;
+}
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+{
+ struct intel_gt_buffer_pool *pool = >->buffer_pool;
+ struct intel_gt_buffer_pool_node *node;
+ struct list_head *list;
+ unsigned long flags;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+ list = bucket_for_size(pool, size);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry(node, list, link) {
+ if (node->obj->base.size < size)
+ continue;
+ list_del(&node->link);
+ break;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (&node->link == list) {
+ node = node_create(pool, size);
+ if (IS_ERR(node))
+ return node;
+ }
+
+ ret = i915_active_acquire(&node->active);
+ if (ret) {
+ node_free(node);
+ return ERR_PTR(ret);
+ }
+
+ return node;
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = >->buffer_pool;
+ int n;
+
+ spin_lock_init(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+ INIT_DELAYED_WORK(&pool->work, pool_free_work);
+}
+
+static void pool_free_imm(struct intel_gt_buffer_pool *pool)
+{
+ int n;
+
+ spin_lock_irq(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct intel_gt_buffer_pool_node *node, *next;
+ struct list_head *list = &pool->cache_list[n];
+
+ list_for_each_entry_safe(node, next, list, link)
+ node_free(node);
+ INIT_LIST_HEAD(list);
+ }
+ spin_unlock_irq(&pool->lock);
+}
+
+void intel_gt_flush_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = >->buffer_pool;
+
+ if (cancel_delayed_work_sync(&pool->work))
+ pool_free_imm(pool);
+}
+
+void intel_gt_fini_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = >->buffer_pool;
+ int n;
+
+ intel_gt_flush_buffer_pool(gt);
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_H
+#define INTEL_GT_BUFFER_POOL_H
+
+#include <linux/types.h>
+
+#include "i915_active.h"
+#include "intel_gt_buffer_pool_types.h"
+
+struct intel_gt;
+struct i915_request;
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+
+static inline int
+intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+ struct i915_request *rq)
+{
+ return i915_active_add_request(&node->active, rq);
+}
+
+static inline void
+intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt);
+void intel_gt_flush_buffer_pool(struct intel_gt *gt);
+void intel_gt_fini_buffer_pool(struct intel_gt *gt);
+
+#endif /* INTEL_GT_BUFFER_POOL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_TYPES_H
+#define INTEL_GT_BUFFER_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_gt_buffer_pool {
+ spinlock_t lock;
+ struct list_head cache_list[4];
+ struct delayed_work work;
+};
+
+struct intel_gt_buffer_pool_node {
+ struct i915_active active;
+ struct drm_i915_gem_object *obj;
+ struct list_head link;
+ struct intel_gt_buffer_pool *pool;
+ unsigned long age;
+};
+
+#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
#include "i915_vma.h"
#include "intel_engine_types.h"
+#include "intel_gt_buffer_pool_types.h"
#include "intel_llc_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
*/
struct i915_address_space *vm;
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ *
+ * Buffers older than 1s are periodically reaped from the pool,
+ * or may be reclaimed by the shrinker before then.
+ */
+ struct intel_gt_buffer_pool buffer_pool;
+
struct i915_vma *scratch;
};
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
- intel_engine_pool_init(&engine->pool);
ce = create_kernel_context(engine);
if (IS_ERR(ce))
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
+ if (val & DROP_FREED)
+ intel_gt_flush_buffer_pool(gt);
+
return 0;
}