struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i, clen, err;
+ int i, err;
int already;
+ unsigned int clen, clen_changed;
- clen = EXT4_B2C(sbi, len);
+ clen = EXT4_NUM_B2C(sbi, len);
ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state)
already++;
+ clen_changed = clen - already;
if (state)
ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
else
group, gdp));
}
if (state)
- clen = ext4_free_group_clusters(sb, gdp) - clen + already;
+ clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
else
- clen = ext4_free_group_clusters(sb, gdp) + clen - already;
+ clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
ext4_free_group_clusters_set(sb, gdp, clen);
ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, group);
+ struct flex_groups *fg = sbi_array_rcu_deref(sbi,
+ s_flex_groups, flex_group);
- atomic64_sub(len,
- &sbi_array_rcu_deref(sbi, s_flex_groups,
- flex_group)->free_clusters);
+ if (state)
+ atomic64_sub(clen_changed, &fg->free_clusters);
+ else
+ atomic64_add(clen_changed, &fg->free_clusters);
}
err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);