return -EINVAL;
}
- if (bi->tag_size != cc->on_disk_tag_size) {
+ if (bi->tag_size != cc->on_disk_tag_size ||
+ bi->tuple_size != cc->on_disk_tag_size) {
ti->error = "Integrity profile tag size mismatch.";
return -EINVAL;
}
+ if (1 << bi->interval_exp != cc->sector_size) {
+ ti->error = "Integrity profile sector size mismatch.";
+ return -EINVAL;
+ }
if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
case -EINPROGRESS:
ctx->r.req = NULL;
ctx->cc_sector += sector_step;
- tag_offset += sector_step;
+ tag_offset++;
continue;
/*
* The request was already processed (synchronously).
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector += sector_step;
- tag_offset += sector_step;
+ tag_offset++;
cond_resched();
continue;
/*
ti->error = "Cannot allocate integrity tags mempool";
goto bad;
}
+
+ cc->tag_pool_max_sectors <<= cc->sector_shift;
}
ret = -ENOMEM;
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (cc->on_disk_tag_size) {
- unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio);
+ unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
- unlikely(!(io->integrity_metadata = kzalloc(tag_len,
+ unlikely(!(io->integrity_metadata = kmalloc(tag_len,
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true;
- memset(io->integrity_metadata, 0, cc->tag_pool_max_sectors * (1 << SECTOR_SHIFT));
}
}