Read and write inode tables for a whole flex_bg (fixes growing with new algorithm)

master
Vitaliy Filippov 2014-01-09 22:27:52 +00:00
parent 488162ea1b
commit 8ad456b992
1 changed files with 75 additions and 64 deletions

View File

@ -1,8 +1,8 @@
/**
* A tool for ext2/ext3/ext4 filesystems that allows to change inode count
* without recreating it.
* without recreating the FS.
*
* Copyright (c) Vitaliy Filippov <vitalif@mail.ru> 2014
* Copyright (c) Vitaliy Filippov <vitalif@mail.ru> 2013+
* License: GNU GPLv2 or later
*
* This program is free software; you can redistribute it and/or modify
@ -33,20 +33,20 @@
* 1.2) Using the created map, find place for each group inode table closest to the beginning
* of its flex_bg. Save these locations in memory.
* 1.3) Free the map.
* 2) If shrinking - move inodes away from the end of each block group inode table
* 2) Only if shrinking: move inodes away from the end of each block group inode table
* 2.1) move each inode to the new place, mark new place as occupied, unmark old one
* 2.2) remember the old->new inode number mapping
* 2) If growing - move data away from extra blocks needed by growing inode tables:
* 2.2) Create a map of blocks that we want to free.
* 2.3) Iterate through all inodes and move blocks. It may involve overwriting
* 3) Move data away from extra blocks needed by growing/moved inode tables:
* 3.2) Create a map of blocks that we want to free.
* 3.3) Iterate through all inodes and move blocks. It may involve overwriting
* the whole file extent tree or block mapping.
* 3) Change all inode numbers in directory entries according to mappings from (1.2),
* 4) Change all inode numbers in directory entries according to mappings from (1.2),
* and then using a formula: new_num = 1 + ((old_num-1)/old_i_per_g)*new_i_per_g + ((old_num-1) % old_i_per_g)
* 4) Move inode tables.
* 5) Unmark old inode table blocks, mark new ones.
* 6) Change block group descriptors: bg_inode_table, bg_free_inodes_count,
* 5) Move inode tables.
* 6) Unmark old inode table blocks, mark new ones.
* 7) Change block group descriptors: bg_inode_table, bg_free_inodes_count,
* bg_free_blocks_count, bg_inode_bitmap_csum, bg_itable_unused
* 7) Change superblock: s_inodes_count, s_free_blocks_count,
* 8) Change superblock: s_inodes_count, s_free_blocks_count,
* s_free_inodes_count, s_inodes_per_group
*
* This is a highly destructive process and WILL leave a corrupted FS if interrupted.
@ -98,6 +98,7 @@ typedef struct
{
ext2_filsys fs;
int fs_fd;
dgrp_t flexbg_size, flex_count;
char *device_name, *io_options, *patch_file;
__u32 ig_old, ig_new; // old and new inodes-per-group count
__u32 ibg_old, ibg_new; // old and new inode_blocks-per-group count
@ -326,8 +327,8 @@ errcode_t change_inode_numbers(realloc_data *rd)
errcode_t change_super_and_bgd(realloc_data *rd)
{
blk64_t blk;
dgrp_t grp;
__u32 used_ibg, i, unus;
dgrp_t grp, n_grp, flex_grp;
__u32 used_ig, used_ibg, i, unus;
errcode_t retval = 0;
int has_gdt_csum = EXT2_HAS_RO_COMPAT_FEATURE(rd->fs->super, EXT4_FEATURE_RO_COMPAT_GDT_CSUM);
void *buf = NULL;
@ -336,7 +337,7 @@ errcode_t change_super_and_bgd(realloc_data *rd)
{
ext2fs_read_block_bitmap(rd->fs);
}
retval = ext2fs_get_mem(EXT2_BLOCK_SIZE(rd->fs->super) * rd->ibg_new, &buf);
retval = ext2fs_get_mem(EXT2_BLOCK_SIZE(rd->fs->super) * rd->ibg_new * rd->flexbg_size, &buf);
if (retval)
{
goto out;
@ -362,59 +363,73 @@ errcode_t change_super_and_bgd(realloc_data *rd)
ext2fs_block_alloc_stats2(rd->fs, blk, +1);
}
}
for (grp = 0; grp < rd->fs->group_desc_count; grp++)
for (flex_grp = 0; flex_grp < rd->flex_count; flex_grp++)
{
// Skip unitialized inode table parts
used_ibg = rd->ibg_old;
if (has_gdt_csum)
// Read inode tables for a flex_bg
grp = flex_grp*rd->flexbg_size;
n_grp = min(rd->flexbg_size, rd->fs->group_desc_count-grp);
for (i = 0; i < n_grp; i++, grp++)
{
if (ext2fs_bg_flags_test(rd->fs, grp, EXT2_BG_INODE_UNINIT))
// Skip unitialized inode table parts
used_ibg = rd->ibg_old;
used_ig = rd->ig_old;
if (has_gdt_csum)
{
used_ibg = 0;
if (ext2fs_bg_flags_test(rd->fs, grp, EXT2_BG_INODE_UNINIT))
{
used_ig = used_ibg = 0;
}
else
{
used_ig = (rd->ig_old - ext2fs_bg_itable_unused(rd->fs, grp));
used_ibg = (used_ig * EXT2_INODE_SIZE(rd->fs->super)+EXT2_BLOCK_SIZE(rd->fs->super)-1)/EXT2_BLOCK_SIZE(rd->fs->super);
}
}
else
blk = ext2fs_inode_table_loc(rd->fs, grp);
if (used_ibg > 0)
{
used_ibg = (rd->ig_old - ext2fs_bg_itable_unused(rd->fs, grp));
used_ibg = (used_ibg * EXT2_INODE_SIZE(rd->fs->super)+EXT2_BLOCK_SIZE(rd->fs->super)-1)/EXT2_BLOCK_SIZE(rd->fs->super);
retval = io_channel_read_blk64(rd->fs->io, blk, min(used_ibg, rd->ibg_new),
buf + i*rd->ibg_new*EXT2_BLOCK_SIZE(rd->fs->super));
if (retval)
goto out;
if (used_ig < rd->ig_new)
{
memset(buf + i*rd->ibg_new*EXT2_BLOCK_SIZE(rd->fs->super) +
EXT2_INODE_SIZE(rd->fs->super) * used_ig, 0,
EXT2_INODE_SIZE(rd->fs->super) * (rd->ig_new - used_ig));
}
}
}
// Move inode table
blk = ext2fs_inode_table_loc(rd->fs, grp);
if (used_ibg > 0 && blk != rd->new_itable_loc[grp])
// Write inode tables
grp = flex_grp*rd->flexbg_size;
for (i = 0; i < n_grp; i++, grp++)
{
retval = io_channel_read_blk64(rd->fs->io, blk, min(used_ibg, rd->ibg_new), buf);
if (retval)
goto out;
if (used_ibg < rd->ibg_new)
{
memset(buf + EXT2_BLOCK_SIZE(rd->fs->super) * used_ibg, 0,
EXT2_BLOCK_SIZE(rd->fs->super) * (rd->ibg_new - used_ibg));
}
retval = io_channel_write_blk64(rd->fs->io, rd->new_itable_loc[grp], rd->ibg_new, buf);
retval = io_channel_write_blk64(rd->fs->io, rd->new_itable_loc[grp], rd->ibg_new,
buf + i*rd->ibg_new*EXT2_BLOCK_SIZE(rd->fs->super));
if (retval)
{
printf("Error moving inode table for block group %u\n", grp);
goto out;
}
}
// Set inode table location and free inode count
ext2fs_inode_table_loc_set(rd->fs, grp, rd->new_itable_loc[grp]);
ext2fs_bg_free_inodes_count_set(rd->fs, grp,
ext2fs_bg_free_inodes_count(rd->fs, grp) + rd->ig_new - rd->ig_old);
if (has_gdt_csum)
{
unus = ext2fs_bg_itable_unused(rd->fs, grp);
if (rd->ig_new > rd->ig_old || unus >= rd->ig_old - rd->ig_new)
// Set inode table location and free inode count
ext2fs_inode_table_loc_set(rd->fs, grp, rd->new_itable_loc[grp]);
ext2fs_bg_free_inodes_count_set(rd->fs, grp,
ext2fs_bg_free_inodes_count(rd->fs, grp) + rd->ig_new - rd->ig_old);
if (has_gdt_csum)
{
unus += rd->ig_new - rd->ig_old;
unus = ext2fs_bg_itable_unused(rd->fs, grp);
if (rd->ig_new > rd->ig_old || unus >= rd->ig_old - rd->ig_new)
{
unus += rd->ig_new - rd->ig_old;
}
else
{
unus = 0;
}
ext2fs_bg_itable_unused_set(rd->fs, grp, unus);
ext2fs_bg_flags_clear(rd->fs, grp, EXT2_BG_BLOCK_UNINIT);
ext2fs_group_desc_csum_set(rd->fs, grp);
}
else
{
unus = 0;
}
ext2fs_bg_itable_unused_set(rd->fs, grp, unus);
ext2fs_bg_flags_clear(rd->fs, grp, EXT2_BG_BLOCK_UNINIT);
ext2fs_group_desc_csum_set(rd->fs, grp);
}
}
// Bitmaps never need to be moved because a single bitmap is always a single FS block
@ -472,8 +487,8 @@ errcode_t alloc_itables(realloc_data *rd)
{
errcode_t retval = 0;
ext2fs_block_bitmap nonmovable = NULL;
dgrp_t grp, flex_grp, flex_count;
int flexbg_size, n_grp, i;
dgrp_t grp, flex_grp;
int n_grp, i;
blk64_t blk, end;
retval = ext2fs_get_mem(sizeof(blk64_t) * rd->fs->group_desc_count, &rd->new_itable_loc);
if (retval)
@ -498,22 +513,18 @@ errcode_t alloc_itables(realloc_data *rd)
if (EXT2_HAS_INCOMPAT_FEATURE(rd->fs->super, EXT4_FEATURE_INCOMPAT_FLEX_BG)
&& rd->fs->super->s_log_groups_per_flex)
{
flexbg_size = 1 << rd->fs->super->s_log_groups_per_flex;
rd->flexbg_size = 1 << rd->fs->super->s_log_groups_per_flex;
}
else
{
flexbg_size = 1;
rd->flexbg_size = 1;
}
flex_count = (rd->fs->group_desc_count + flexbg_size - 1) / flexbg_size;
rd->flex_count = (rd->fs->group_desc_count + rd->flexbg_size - 1) / rd->flexbg_size;
// Allocate inode tables
for (flex_grp = 0; flex_grp < flex_count; flex_grp++)
for (flex_grp = 0; flex_grp < rd->flex_count; flex_grp++)
{
n_grp = flexbg_size;
grp = flex_grp*flexbg_size;
if (grp+n_grp > rd->fs->group_desc_count)
{
n_grp = rd->fs->group_desc_count - grp;
}
grp = flex_grp*rd->flexbg_size;
n_grp = min(rd->flexbg_size, rd->fs->group_desc_count-grp);
// TODO We could use a better algorithm that would always try to find
// the biggest free sequence of blocks if it can't allocate all inode
// tables in sequence