static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
- sector_t sector, int op)
+ sector_t sector, enum req_op op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
- int err, op_flags = 0;
+ int err;
+ blk_opf_t op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
- sector_t sector, int op)
+ sector_t sector, enum req_op op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
- if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
err = -EIO;
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
+ enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
- struct drbd_backing_dev *bdev, sector_t sector, int op);
+ struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_device *,
- struct drbd_peer_request *, const unsigned,
- const unsigned, const int);
+ struct drbd_peer_request *, enum req_op,
+ blk_opf_t, int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
- const unsigned op, const unsigned op_flags,
+ const enum req_op op, const blk_opf_t op_flags,
const int fault_type)
{
struct bio *bios = NULL;
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio_flags(u32 dpf)
+static blk_opf_t wire_flags_to_bio_flags(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
}
-static unsigned long wire_flags_to_bio_op(u32 dpf)
+static enum req_op wire_flags_to_bio_op(u32 dpf)
{
if (dpf & DP_ZEROES)
return REQ_OP_WRITE_ZEROES;
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
- int op, op_flags;
+ enum req_op op;
+ blk_opf_t op_flags;
u32 dp_flags;
int err, tp;
if (get_ldev(device)) {
struct drbd_peer_request *peer_req;
- const int op = REQ_OP_WRITE_ZEROES;
+ const enum req_op op = REQ_OP_WRITE_ZEROES;
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
size, 0, GFP_NOIO);