io_req_complete_post(req, res, cflags);
}
+ static inline void __io_req_complete32(struct io_kiocb *req,
+ unsigned int issue_flags, s32 res,
+ u32 cflags, u64 extra1, u64 extra2)
+ {
+ if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
+ io_req_complete_state(req, res, cflags);
+ req->extra1 = extra1;
+ req->extra2 = extra2;
+ } else {
+ io_req_complete_post32(req, res, cflags, extra1, extra2);
+ }
+ }
+
static inline void io_req_complete(struct io_kiocb *req, s32 res)
{
+ if (res < 0)
+ req_set_fail(req);
__io_req_complete(req, 0, res, 0);
}
*/
static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
{
- __io_req_complete(req, issue_flags, 0, io_put_kbuf(req, issue_flags));
++ unsigned int cflags;
+ void __user *buf;
+
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ size_t len = 1;
+
+ buf = io_buffer_select(req, &len, issue_flags);
+ if (!buf)
+ return -ENOBUFS;
+ }
+
- __io_req_complete(req, issue_flags, 0, 0);
++ cflags = io_put_kbuf(req, issue_flags);
+ if (!(req->ctx->flags & IORING_SETUP_CQE32))
- __io_req_complete32(req, issue_flags, 0, 0, req->nop.extra1,
- req->nop.extra2);
++ __io_req_complete(req, issue_flags, 0, cflags);
+ else
++ __io_req_complete32(req, issue_flags, 0, cflags,
++ req->nop.extra1, req->nop.extra2);
return 0;
}
req->io_task_work.func = io_apoll_task_func;
trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
}
-static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
+static inline void io_poll_execute(struct io_kiocb *req, int res,
+ __poll_t events)
{
if (io_poll_get_ownership(req))
__io_poll_execute(req, res, events);
__s32 splice_fd_in;
__u32 file_index;
};
- __u64 addr3;
- __u64 __pad2[1];
+ union {
+ struct {
+ __u64 addr3;
+ __u64 __pad2[1];
+ };
+ /*
+ * If the ring is initialized with IORING_SETUP_SQE128, then
+ * this field is used for 80 bytes of arbitrary command data
+ */
+ __u8 cmd[0];
+ };
};
+/*
+ * If sqe->file_index is set to this for opcodes that instantiate a new
+ * direct descriptor (like openat/openat2/accept), then io_uring will allocate
+ * an available direct descriptor instead of having the application pass one
+ * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
+ * if the space is full.
+ */
+#define IORING_FILE_INDEX_ALLOC (~0U)
+
enum {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,