goto insert;
/* Attempt to reap some mmap space from dead objects */
- err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (err)
goto err;
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
}
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+ long remaining_timeout;
+
+ /* If the device is asleep, we have no requests outstanding */
+ if (!intel_gt_pm_is_awake(gt))
+ return 0;
+
+ while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
+ &remaining_timeout)) > 0) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return timeout ? timeout : intel_uc_wait_for_idle(>->uc,
+ remaining_timeout);
+}
+
int intel_gt_init(struct intel_gt *gt)
{
int err;
void intel_gt_driver_late_release(struct intel_gt *gt);
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
void intel_gt_clear_error_registers(struct intel_gt *gt,
intel_engine_mask_t engine_mask);
GEM_BUG_ON(engine->retire);
}
-long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
+ long *remaining_timeout)
{
struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl, *tn;
if (flush_submission(gt, timeout)) /* Wait, there's more! */
active_count++;
- return active_count ? timeout : 0;
-}
-
-int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
-{
- /* If the device is asleep, we have no requests outstanding */
- if (!intel_gt_pm_is_awake(gt))
- return 0;
-
- while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
- cond_resched();
- if (signal_pending(current))
- return -EINTR;
- }
+ if (remaining_timeout)
+ *remaining_timeout = timeout;
- return timeout;
+ return active_count ? timeout : 0;
}
static void retire_work_handler(struct work_struct *work)
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
+#include <stddef.h>
+
struct intel_engine_cs;
struct intel_gt;
struct intel_timeline;
-long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
+ long *remaining_timeout);
static inline void intel_gt_retire_requests(struct intel_gt *gt)
{
- intel_gt_retire_requests_timeout(gt, 0);
+ intel_gt_retire_requests_timeout(gt, 0, NULL);
}
void intel_engine_init_retire(struct intel_engine_cs *engine);
struct intel_timeline *tl);
void intel_engine_fini_retire(struct intel_engine_cs *engine);
-int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
-
void intel_gt_init_requests(struct intel_gt *gt);
void intel_gt_park_requests(struct intel_gt *gt);
void intel_gt_unpark_requests(struct intel_gt *gt);
spinlock_t irq_lock;
unsigned int msg_enabled_mask;
+ atomic_t outstanding_submission_g2h;
+
struct {
void (*reset)(struct intel_guc *guc);
void (*enable)(struct intel_guc *guc);
spin_unlock_irq(&guc->irq_lock);
}
+int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
+
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
INIT_LIST_HEAD(&ct->requests.incoming);
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
+ init_waitqueue_head(&ct->wq);
}
static inline const char *guc_ct_buffer_type_to_str(u32 type)
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/ktime.h>
+#include <linux/wait.h>
#include "intel_guc_fwif.h"
struct tasklet_struct receive_tasklet;
+ /** @wq: wait queue for g2h chanenl */
+ wait_queue_head_t wq;
+
struct {
u16 last_fence; /* last fence used to send request */
xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC);
}
+static int guc_submission_send_busy_loop(struct intel_guc *guc,
+ const u32 *action,
+ u32 len,
+ u32 g2h_len_dw,
+ bool loop)
+{
+ int err;
+
+ err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+
+ if (!err && g2h_len_dw)
+ atomic_inc(&guc->outstanding_submission_g2h);
+
+ return err;
+}
+
+static int guc_wait_for_pending_msg(struct intel_guc *guc,
+ atomic_t *wait_var,
+ bool interruptible,
+ long timeout)
+{
+ const int state = interruptible ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ DEFINE_WAIT(wait);
+
+ might_sleep();
+ GEM_BUG_ON(timeout < 0);
+
+ if (!atomic_read(wait_var))
+ return 0;
+
+ if (!timeout)
+ return -ETIME;
+
+ for (;;) {
+ prepare_to_wait(&guc->ct.wq, &wait, state);
+
+ if (!atomic_read(wait_var))
+ break;
+
+ if (signal_pending_state(state, current)) {
+ timeout = -EINTR;
+ break;
+ }
+
+ if (!timeout) {
+ timeout = -ETIME;
+ break;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ }
+ finish_wait(&guc->ct.wq, &wait);
+
+ return (timeout < 0) ? timeout : 0;
+}
+
+int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
+{
+ if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
+ return 0;
+
+ return guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
+ true, timeout);
+}
+
static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
int err;
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
if (!enabled && !err) {
+ atomic_inc(&guc->outstanding_submission_g2h);
set_context_enabled(ce);
} else if (!enabled) {
clr_context_pending_enable(ce);
offset,
};
- return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ 0, true);
}
static int register_context(struct intel_context *ce)
guc_id,
};
- return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
- G2H_LEN_DW_DEREGISTER_CONTEXT, true);
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_DEREGISTER_CONTEXT,
+ true);
}
static int deregister_context(struct intel_context *ce, u32 guc_id)
intel_context_get(ce);
- intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
- G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
}
static u16 prep_context_pending_disable(struct intel_context *ce)
return ce;
}
+static void decr_outstanding_submission_g2h(struct intel_guc *guc)
+{
+ if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
+ wake_up_all(&guc->ct.wq);
+}
+
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
const u32 *msg,
u32 len)
lrc_destroy(&ce->ref);
}
+ decr_outstanding_submission_g2h(guc);
+
return 0;
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
+ decr_outstanding_submission_g2h(guc);
intel_context_put(ce);
return 0;
#undef uc_state_checkers
#undef __uc_state_checker
+static inline int intel_uc_wait_for_idle(struct intel_uc *uc, long timeout)
+{
+ return intel_guc_wait_for_idle(&uc->guc, timeout);
+}
+
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
{ \
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
*/
#include "i915_drv.h"
-#include "gt/intel_gt_requests.h"
+#include "gt/intel_gt.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
do {
for_each_engine(engine, gt, id)
mock_engine_flush(engine);
- } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
+ NULL));
}
static void mock_device_release(struct drm_device *dev)