void dm_table_destroy(struct dm_table *t)
{
- unsigned int i;
-
if (!t)
return;
kvfree(t->index[t->depth - 2]);
/* free the targets */
- for (i = 0; i < t->num_targets; i++) {
- struct dm_target *tgt = t->targets + i;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
- if (tgt->type->dtr)
- tgt->type->dtr(tgt);
+ if (ti->type->dtr)
+ ti->type->dtr(ti);
- dm_put_target_type(tgt->type);
+ dm_put_target_type(ti->type);
}
kvfree(t->highs);
/*
* Checks to see if the target joins onto the end of the table.
*/
-static int adjoin(struct dm_table *table, struct dm_target *ti)
+static int adjoin(struct dm_table *t, struct dm_target *ti)
{
struct dm_target *prev;
- if (!table->num_targets)
+ if (!t->num_targets)
return !ti->begin;
- prev = &table->targets[table->num_targets - 1];
+ prev = &t->targets[t->num_targets - 1];
return (ti->begin == (prev->begin + prev->len));
}
* two or more targets, the size of each piece it gets split into must
* be compatible with the logical_block_size of the target processing it.
*/
-static int validate_hardware_logical_block_alignment(struct dm_table *table,
- struct queue_limits *limits)
+static int validate_hardware_logical_block_alignment(struct dm_table *t,
+ struct queue_limits *limits)
{
/*
* This function uses arithmetic modulo the logical_block_size
struct dm_target *ti;
struct queue_limits ti_limits;
- unsigned i;
+ unsigned int i;
/*
* Check each entry in the table in turn.
*/
- for (i = 0; i < table->num_targets; i++) {
- ti = dm_table_get_target(table, i);
+ for (i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
blk_set_stacking_limits(&ti_limits);
if (remaining) {
DMWARN("%s: table line %u (start sect %llu len %llu) "
"not aligned to h/w logical block size %u",
- dm_device_name(table->md), i,
+ dm_device_name(t->md), i,
(unsigned long long) ti->begin,
(unsigned long long) ti->len,
limits->logical_block_size);
}
static bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn)
+ iterate_devices_callout_fn iterate_fn)
{
- struct dm_target *ti;
- unsigned i;
-
/* Ensure that all targets support DAX. */
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->direct_access)
return false;
static int dm_table_determine_type(struct dm_table *t)
{
- unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- struct dm_target *tgt;
+ struct dm_target *ti;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
goto verify_rq_based;
}
- for (i = 0; i < t->num_targets; i++) {
- tgt = t->targets + i;
- if (dm_target_hybrid(tgt))
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
+ if (dm_target_hybrid(ti))
hybrid = 1;
- else if (dm_target_request_based(tgt))
+ else if (dm_target_request_based(ti))
request_based = 1;
else
bio_based = 1;
return 0;
}
- tgt = dm_table_get_immutable_target(t);
- if (!tgt) {
+ ti = dm_table_get_immutable_target(t);
+ if (!ti) {
DMERR("table load rejected: immutable target is required");
return -EINVAL;
- } else if (tgt->max_io_len) {
+ } else if (ti->max_io_len) {
DMERR("table load rejected: immutable target that splits IO is not supported");
return -EINVAL;
}
/* Non-request-stackable devices can't be used for request-based dm */
- if (!tgt->type->iterate_devices ||
- !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i;
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
if (dm_target_is_wildcard(ti->type))
return ti;
}
}
for (unsigned int i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = t->targets + i;
+ struct dm_target *ti = dm_table_get_target(t, i);
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios);
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
struct gendisk *prev_disk = NULL, *template_disk = NULL;
- unsigned i;
- for (i = 0; i < t->num_targets; i++) {
+ for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
+
if (!dm_target_passes_integrity(ti->type))
goto no_integrity;
}
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
- int i;
- struct dm_target *ti;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
}
+
dm_put_live_table(md, srcu_idx);
return args.err;
}
{
struct dm_crypto_profile *dmcp;
struct blk_crypto_profile *profile;
- struct dm_target *ti;
unsigned int i;
bool empty_profile = true;
sizeof(profile->modes_supported));
for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
blk_crypto_intersect_capabilities(profile, NULL);
}
EXPORT_SYMBOL(dm_table_get_size);
-struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
-{
- if (index >= t->num_targets)
- return NULL;
-
- return t->targets + index;
-}
-
/*
* Search the btree for the correct target.
*
static bool dm_table_any_dev_attr(struct dm_table *t,
iterate_devices_callout_fn func, void *data)
{
- struct dm_target *ti;
- unsigned int i;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, func, data))
static bool dm_table_supports_poll(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < t->num_targets) {
- ti = dm_table_get_target(t, i++);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
* Returns false if the result is unknown because a target doesn't
* support iterate_devices.
*/
-bool dm_table_has_no_data_devices(struct dm_table *table)
+bool dm_table_has_no_data_devices(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i, num_devices;
-
- for (i = 0; i < table->num_targets; i++) {
- ti = dm_table_get_target(table, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+ unsigned num_devices = 0;
if (!ti->type->iterate_devices)
return false;
- num_devices = 0;
ti->type->iterate_devices(ti, count_device, &num_devices);
if (num_devices)
return false;
static bool dm_table_supports_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model)
{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (dm_target_supports_zoned_hm(ti->type)) {
if (!ti->type->iterate_devices ||
* zone sectors, if the destination device is a zoned block device, it shall
* have the specified zone_sectors.
*/
-static int validate_hardware_zoned_model(struct dm_table *table,
+static int validate_hardware_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model,
unsigned int zone_sectors)
{
if (zoned_model == BLK_ZONED_NONE)
return 0;
- if (!dm_table_supports_zoned_model(table, zoned_model)) {
+ if (!dm_table_supports_zoned_model(t, zoned_model)) {
DMERR("%s: zoned model is not consistent across all devices",
- dm_device_name(table->md));
+ dm_device_name(t->md));
return -EINVAL;
}
if (!zone_sectors || !is_power_of_2(zone_sectors))
return -EINVAL;
- if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
+ if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all zoned devices",
- dm_device_name(table->md));
+ dm_device_name(t->md));
return -EINVAL;
}
/*
* Establish the new table's queue_limits and validate them.
*/
-int dm_calculate_queue_limits(struct dm_table *table,
+int dm_calculate_queue_limits(struct dm_table *t,
struct queue_limits *limits)
{
- struct dm_target *ti;
struct queue_limits ti_limits;
- unsigned i;
enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
unsigned int zone_sectors = 0;
blk_set_stacking_limits(limits);
- for (i = 0; i < table->num_targets; i++) {
- blk_set_stacking_limits(&ti_limits);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
- ti = dm_table_get_target(table, i);
+ blk_set_stacking_limits(&ti_limits);
if (!ti->type->iterate_devices)
goto combine_limits;
DMWARN("%s: adding target device "
"(start sect %llu len %llu) "
"caused an alignment inconsistency",
- dm_device_name(table->md),
+ dm_device_name(t->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
}
zoned_model = limits->zoned;
zone_sectors = limits->chunk_sectors;
}
- if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
+ if (validate_hardware_zoned_model(t, zoned_model, zone_sectors))
return -EINVAL;
- return validate_hardware_logical_block_alignment(table, limits);
+ return validate_hardware_logical_block_alignment(t, limits);
}
/*
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
- struct dm_target *ti;
- unsigned i;
-
/*
* Require at least one underlying device to support flushes.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting flushes must provide.
*/
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_flush_bios)
continue;
static bool dm_table_supports_write_zeroes(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < t->num_targets) {
- ti = dm_table_get_target(t, i++);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_write_zeroes_bios)
return false;
static bool dm_table_supports_nowait(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i = 0;
-
- while (i < t->num_targets) {
- ti = dm_table_get_target(t, i++);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_supports_nowait(ti->type))
return false;
static bool dm_table_supports_discards(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
return false;
static bool dm_table_supports_secure_erase(struct dm_table *t)
{
- struct dm_target *ti;
- unsigned int i;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = dm_table_get_target(t, i);
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_secure_erase_bios)
return false;
static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
{
- int i = t->num_targets;
- struct dm_target *ti = t->targets;
-
lockdep_assert_held(&t->md->suspend_lock);
- while (i--) {
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
switch (mode) {
case PRESUSPEND:
if (ti->type->presuspend)
ti->type->postsuspend(ti);
break;
}
- ti++;
}
}
int dm_table_resume_targets(struct dm_table *t)
{
- int i, r = 0;
+ unsigned int i;
+ int r = 0;
lockdep_assert_held(&t->md->suspend_lock);
for (i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = t->targets + i;
+ struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->preresume)
continue;
}
for (i = 0; i < t->num_targets; i++) {
- struct dm_target *ti = t->targets + i;
+ struct dm_target *ti = dm_table_get_target(t, i);
if (ti->type->resume)
ti->type->resume(ti);