.needs_file = 1,
.fd_non_neg = 1,
},
+ {
+ /* IORING_OP_READ */
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ {
+ /* IORING_OP_WRITE */
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
};
static void io_wq_submit_work(struct io_wq_work **workptr);
if (req->rw.kiocb.private)
return -EINVAL;
+ if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+ ssize_t ret;
+ ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
+ *iovec = NULL;
+ return ret;
+ }
+
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
ret = io_read_prep(req, sqe, true);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
ret = io_write_prep(req, sqe, true);
break;
case IORING_OP_POLL_ADD:
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
if (sqe) {
ret = io_read_prep(req, sqe, force_nonblock);
if (ret < 0)
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
if (sqe) {
ret = io_write_prep(req, sqe, force_nonblock);
if (ret < 0)