{
struct io_uring_task *tctx = task->io_uring;
- percpu_counter_sub(&tctx->inflight, nr);
- if (unlikely(atomic_read(&tctx->in_idle)))
- wake_up(&tctx->wait);
- put_task_struct_many(task, nr);
+ if (likely(task == current)) {
+ tctx->cached_refs += nr;
+ } else {
+ percpu_counter_sub(&tctx->inflight, nr);
+ if (unlikely(atomic_read(&tctx->in_idle)))
+ wake_up(&tctx->wait);
+ put_task_struct_many(task, nr);
+ }
}
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
{
if (rb->ctx_refs)
percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
- if (rb->task == current)
- current->io_uring->cached_refs += rb->task_refs;
- else if (rb->task)
+ if (rb->task)
io_put_task(rb->task, rb->task_refs);
}