/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(i915);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
- list_add(&obj->userfault_link, &i915->mm.userfault_list);
+ list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&i915->mm.userfault_wakeref,
+ intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
GEM_BUG_ON(!obj->userfault_count);
{
GEM_TRACE("\n");
- intel_wakeref_auto(&i915->mm.userfault_wakeref, 0);
+ intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
mutex_lock(&i915->drm.struct_mutex);
{
int i;
- for (i = 0; i < i915->num_fence_regs; i++) {
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
- vma = READ_ONCE(i915->fence_regs[i].vma);
+ vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
- GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+ GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
node = &vma->obj->base.vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(i915->drm.anon_inode->i_mapping,
*/
#include "i915_drv.h"
+#include "i915_gem_fence_reg.h"
#include "gvt.h"
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
assert_rpm_wakelock_held(dev_priv);
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
int i;
intel_runtime_pm_get(dev_priv);
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
- struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
u32 base;
u32 size;
};
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
unsigned int frontbuffer_bits;
int pin_count = 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (i915_vma_is_pinned(vma))
- pin_count++;
- }
- seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+
+ spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
+ spin_unlock(&obj->vma.lock);
+
+ if (i915_vma_is_pinned(vma))
+ pin_count++;
+
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size,
vma->fence->id,
i915_active_request_isset(&vma->last_fence) ? "*" : "");
seq_puts(m, ")");
+
+ spin_lock(&obj->vma.lock);
}
+ spin_unlock(&obj->vma.lock);
+
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
+ if (obj->pin_global)
+ seq_printf(m, " (global)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int i, ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ unsigned int i;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct i915_vma *vma = dev_priv->fence_regs[i].vma;
+ rcu_read_lock();
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, dev_priv->fence_regs[i].pin_count);
+ i, i915->ggtt.fence_regs[i].pin_count);
if (!vma)
seq_puts(m, "unused");
else
describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
+ rcu_read_unlock();
- mutex_unlock(&dev->struct_mutex);
return 0;
}
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->num_fence_regs;
+ value = dev_priv->ggtt.num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
intel_uncore_sanitize(dev_priv);
intel_gt_init_workarounds(dev_priv);
- i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
*/
struct list_head shrink_list;
- /** List of all objects in gtt_space, currently mmaped by userspace.
- * All objects within this list must also be on bound_list.
- */
- struct list_head userfault_list;
-
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
/**
* List of objects which are pending destruction.
*/
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
/* protects panel power sequencer state */
struct mutex pps_mutex;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
unsigned int max_cdclk_freq;
void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-/* i915_gem_fence_reg.c */
-struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv);
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-
-void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
return 0;
}
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
int i;
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link)
+ &i915->ggtt.userfault_list, userfault_link)
__i915_gem_object_release_mmap(obj);
- /* The fence will be lost when the device powers down. If any were
+ /*
+ * The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- /* Ideally we want to assert that the fence register is not
+ /*
+ * Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
{
GEM_BUG_ON(dev_priv->gt.awake);
- intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);
+ intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_disable_gt_powersave(dev_priv);
i915_gem_sanitize(i915);
}
-void
-i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
-{
- int i;
-
- if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
- dev_priv->num_fence_regs = 32;
- else if (INTEL_GEN(dev_priv) >= 4 ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->num_fence_regs = 16;
- else
- dev_priv->num_fence_regs = 8;
-
- if (intel_vgpu_active(dev_priv))
- dev_priv->num_fence_regs =
- I915_READ(vgtif_reg(avail_rs.fence_num));
-
- /* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
- fence->i915 = dev_priv;
- fence->id = i;
- list_add_tail(&fence->link, &dev_priv->mm.fence_list);
- }
- i915_gem_restore_fences(dev_priv);
-
- i915_gem_detect_bit_6_swizzle(dev_priv);
-}
-
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
INIT_LIST_HEAD(&i915->mm.purge_list);
INIT_LIST_HEAD(&i915->mm.shrink_list);
- INIT_LIST_HEAD(&i915->mm.fence_list);
-
- INIT_LIST_HEAD(&i915->mm.userfault_list);
- intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915);
i915_gem_init__objects(i915);
}
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
/**
* DOC: fence register handling
#define pipelined 0
-static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
}
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i915_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
u32 val;
}
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i830_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
u32 val;
}
}
-static void fence_write(struct drm_i915_fence_reg *fence,
+static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
/*
fence->dirty = false;
}
-static int fence_update(struct drm_i915_fence_reg *fence,
+static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->mm.fence_list);
+ list_move(&fence->link, &fence->i915->ggtt.fence_list);
}
/*
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->mm.fence_list);
+ list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
}
intel_runtime_pm_put(fence->i915, wakeref);
*/
int i915_vma_put_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence = vma->fence;
+ struct i915_fence_reg *fence = vma->fence;
if (!fence)
return 0;
return fence_update(fence, NULL);
}
-static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *i915)
+static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
- list_for_each_entry(fence, &i915->mm.fence_list, link) {
+ list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->pin_count)
*
* 0 on success, negative error code on failure.
*/
-int
-i915_vma_pin_fence(struct i915_vma *vma)
+int i915_vma_pin_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
fence->pin_count++;
if (!fence->dirty) {
list_move_tail(&fence->link,
- &fence->i915->mm.fence_list);
+ &fence->i915->ggtt.fence_list);
return 0;
}
} else if (set) {
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *i915)
+struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
int count;
int ret;
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &i915->mm.fence_list, link)
+ list_for_each_entry(fence, &i915->ggtt.fence_list, link)
count += !fence->pin_count;
if (count <= 1)
return ERR_PTR(-ENOSPC);
*
* This function add a reserved fence register from vGPU to the fence_list.
*/
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
+void i915_unreserve_fence(struct i915_fence_reg *fence)
{
lockdep_assert_held(&fence->i915->drm.struct_mutex);
- list_add(&fence->link, &fence->i915->mm.fence_list);
+ list_add(&fence->link, &fence->i915->ggtt.fence_list);
}
/**
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < i915->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &i915->fence_regs[i];
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-void
-i915_gem_detect_bit_6_swizzle(struct drm_i915_private *i915)
+static void detect_bit_6_swizzle(struct drm_i915_private *i915)
{
struct intel_uncore *uncore = &i915->uncore;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static void
-i915_gem_swizzle_page(struct page *page)
+static void i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
i++;
}
}
+
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ int num_fences;
+ int i;
+
+ INIT_LIST_HEAD(&ggtt->fence_list);
+ INIT_LIST_HEAD(&ggtt->userfault_list);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, i915);
+
+ detect_bit_6_swizzle(i915);
+
+ if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ num_fences = 32;
+ else if (INTEL_GEN(i915) >= 4 ||
+ IS_I945G(i915) || IS_I945GM(i915) ||
+ IS_G33(i915) || IS_PINEVIEW(i915))
+ num_fences = 16;
+ else
+ num_fences = 8;
+
+ if (intel_vgpu_active(i915))
+ num_fences = intel_uncore_read(&i915->uncore,
+ vgtif_reg(avail_rs.fence_num));
+
+ /* Initialize fence registers to zero */
+ for (i = 0; i < num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ fence->i915 = i915;
+ fence->id = i;
+ list_add_tail(&fence->link, &ggtt->fence_list);
+ }
+ ggtt->num_fences = num_fences;
+
+ i915_gem_restore_fences(i915);
+}
#define __I915_FENCE_REG_H__
#include <linux/list.h>
+#include <linux/types.h>
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct i915_ggtt;
struct i915_vma;
+struct sg_table;
#define I965_FENCE_PAGE 4096UL
-struct drm_i915_fence_reg {
+struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_vma *vma;
bool dirty;
};
+/* i915_gem_fence_reg.c */
+struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+void i915_unreserve_fence(struct i915_fence_reg *fence);
+
+void i915_gem_restore_fences(struct drm_i915_private *i915);
+
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+
#endif
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+ i915_ggtt_init_fences(ggtt);
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
#include <linux/pagevec.h>
#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_selftest.h"
#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_file_private;
-struct drm_i915_fence_reg;
struct drm_i915_gem_object;
struct i915_vma;
u32 pin_bias;
+ unsigned int num_fences;
+ struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct list_head fence_list;
+
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /* Manual runtime pm autosuspend delay for user GGTT mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
};
int i;
if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
error->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_GEN6_LO(i));
} else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
error->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
error->fence[i] =
intel_uncore_read(uncore, FENCE_REG(i));
}
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;