gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
- if (!percpu_ref_tryget(&ctx->refs))
- return NULL;
-
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
}
}
+static void __io_req_do_free(struct io_kiocb *req)
+{
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+}
+
static void __io_free_req(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
- percpu_ref_put(&ctx->refs);
- if (likely(!io_is_fallback_req(req)))
- kmem_cache_free(req_cachep, req);
- else
- clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+
+ percpu_ref_put(&req->ctx->refs);
+ __io_req_do_free(req);
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
return -EBUSY;
}
+ if (!percpu_ref_tryget_many(&ctx->refs, nr))
+ return -EAGAIN;
+
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr);
statep = &state;
break;
}
if (!io_get_sqring(ctx, req, &sqe)) {
- __io_free_req(req);
+ __io_req_do_free(req);
break;
}
break;
}
+ if (submitted != nr)
+ percpu_ref_put_many(&ctx->refs, nr - submitted);
if (link)
io_queue_link_head(link);
if (statep)