Fix overwrite code...

master
Vitaliy Filippov 2013-06-04 13:05:46 +04:00
parent 1206aa80b0
commit f4b7a626f6
1 changed files with 82 additions and 60 deletions

142
sftl.c
View File

@ -93,6 +93,8 @@ static void sync_io(struct block_device *bdev, sector_t sector, void *buf, unsig
static long bio_submit_kern_seq(
struct block_device *bdev, void *data, unsigned int len, gfp_t gfp_mask,
sector_t sector, void *private, bio_end_io_t *endio, int rw);
static void sftl_continue_overwrite_callback(struct bio *bio, int err);
static void sftl_write_sufficient(struct sftl_dev *sftl, struct bio *bio);
static void sftl_complete_seg(struct bio *bio, int err)
{
@ -104,7 +106,7 @@ struct sftl_flush_info
{
struct sftl_dev *sftl;
struct bio *next_bio;
u32 random_free[seg_clust];
u32 *random_free;
u32 random_found;
u32 overwrite_last_cluster;
u32 overwrite_total;
@ -154,7 +156,7 @@ static void sftl_search_free_sequence(struct sftl_dev *sftl, u32 *out_cur_first,
static void sftl_search_freeable_sequence(struct sftl_dev *sftl, struct sftl_flush_info *info,
u32 *out_min_freeable_start, u32 *out_min_freeable_cost)
{
u32 min_freeable_start = 0, min_freeable_cost = seg_clust*seg_clust, cur_freeable_cost = 0;
u32 i, j, min_freeable_start = 0, min_freeable_cost = seg_clust*seg_clust, cur_freeable_cost = 0;
for (i = 0; i < sftl->segs; i++)
{
for (j = 0; j < seg_clust; j++)
@ -182,49 +184,6 @@ static void sftl_search_freeable_sequence(struct sftl_dev *sftl, struct sftl_flu
*out_min_freeable_start = min_freeable_start;
}
static void sftl_continue_overwrite_callback(struct bio *bio, int err)
{
struct sftl_flush_info *info = bio->bi_private;
sftl_continue_overwrite(info);
}
static void sftl_continue_overwrite(struct sftl_flush_info *info)
{
u32 i, j, k;
struct sftl_dev *sftl = info->sftl;
info->overwrite_total -= info->overwrite_current;
info->overwrite_current = info->overwrite_total < sftl->buf_max ? info->overwrite_total : sftl->buf_max;
atomic_set(&info->overwrite_pending, info->overwrite_current);
for (k = info->overwrite_last_cluster; k < sftl->next_free_end*seg_clust && sftl->buf_size < sftl->buf_max; k++)
{
if (sftl->clust_map[k])
{
// Modify maps
struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size;
u32 cluster = sftl->clust_map[k]-1;
buf_map->magic[0] = magic[0];
buf_map->magic[1] = magic[1];
buf_map->magic[2] = magic[2];
buf_map->is_erased = 0;
buf_map->block = cluster;
buf_map->ver = sftl->ver[cluster]+1;
buf_map->checksum = sftl_map_checksum(*buf_map);
sftl->map[cluster] = sftl->free_start_seg*seg_clust + sftl->buf_size;
sftl->clust_map[sftl->map[cluster]] = 1 + cluster;
sftl->ver[cluster] = buf_map->ver;
// Read into buffer - will write back from a callback
sftl->buf_size++;
if (sftl->buf_size >= sftl->buf_max || k == sftl->next_free_end*seg_clust-1)
{
info->overwrite_last_cluster = k+1;
info->overwrite_do_write = sftl->buf_size >= sftl->buf_max;
}
bio_submit_kern_seq(sftl->blkdev, sftl->buf + (sftl->buf_size-1)*clust*sz, clust_sz, GFP_KERNEL,
(min_freeable_start+i)*(seg_clust*clust_blocks+1) + j*clust_blocks, info, sftl_overwrite_one, READ);
}
}
}
static void sftl_overwrite_one(struct bio *bio, int err)
{
struct sftl_flush_info *info = bio->bi_private;
@ -234,17 +193,86 @@ static void sftl_overwrite_one(struct bio *bio, int err)
{
if (info->overwrite_do_write)
{
// Submit write request and then, continue overwriting
bio_submit_kern_seq(sftl->blkdev, sftl->buf, seg_clust*clust_sz + phy_sz, GFP_KERNEL,
sftl->free_start_seg*(seg_clust*clust_sz) + phy_sz, info, sftl_continue_overwrite_callback, READ);
}
else
{
// If cleaning doesn't compose a full segment, we won't write it
// If cleaning doesn't compose a full segment, don't write it
}
}
}
// Set @real <-> @virtual cluster mapping, and optionally write it into @buf_map, if it's not NULL
static void sftl_set_map(struct sftl_dev *sftl, u32 real, u32 virtual, struct sftl_map *buf_map)
{
if (sftl->ver[virtual])
{
// Mark cluster containing previous version as free
sftl->clust_map[sftl->map[virtual]] = 0;
}
if (sftl->clust_map[real])
{
// BUG - We are overwriting a non-empty cluster
}
sftl->map[virtual] = real;
sftl->clust_map[real] = 1 + virtual;
sftl->ver[virtual]++;
if (buf_map)
{
// Fill *buf_map with new mapping
buf_map->magic[0] = magic[0];
buf_map->magic[1] = magic[1];
buf_map->magic[2] = magic[2];
buf_map->is_erased = 0;
buf_map->block = virtual;
buf_map->ver = sftl->ver[virtual];
buf_map->checksum = sftl_map_checksum(*buf_map);
}
}
// Set maps for @virtual cluster being in buffer at position @in_buffer
static void sftl_set_buffer_map(struct sftl_dev *sftl, u32 in_buffer, u32 virtual)
{
sftl_set_map(sftl, sftl->free_start_seg*seg_clust + in_buffer, virtual, (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + in_buffer);
}
static void sftl_continue_overwrite(struct sftl_flush_info *info)
{
u32 k;
struct sftl_dev *sftl = info->sftl;
info->overwrite_total -= info->overwrite_current;
info->overwrite_current = info->overwrite_total < sftl->buf_max ? info->overwrite_total : sftl->buf_max;
atomic_set(&info->overwrite_pending, info->overwrite_current);
for (k = info->overwrite_last_cluster; k < sftl->next_free_end*seg_clust && sftl->buf_size < sftl->buf_max; k++)
{
if (sftl->clust_map[k])
{
// Modify mapping
sftl_set_buffer_map(sftl, sftl->buf_size, cluster);
// Read into buffer - will write back from a callback
sftl->buf_size++;
if (sftl->buf_size >= sftl->buf_max || k == sftl->next_free_end*seg_clust-1)
{
// Set these parameters before final request to avoid inconsistency issues
// (even if the request will be executed immediately)
info->overwrite_last_cluster = k+1;
info->overwrite_do_write = sftl->buf_size >= sftl->buf_max;
}
bio_submit_kern_seq(sftl->blkdev, sftl->buf + (sftl->buf_size-1)*clust_sz, clust_sz, GFP_KERNEL,
(sftl->next_free_start + k/seg_clust)*(seg_clust*clust_blocks+1) + (k%seg_clust)*clust_blocks, info, sftl_overwrite_one, READ);
}
}
}
static void sftl_continue_overwrite_callback(struct bio *bio, int err)
{
struct sftl_flush_info *info = bio->bi_private;
sftl_continue_overwrite(info);
}
// Callback called after flushing buffer the first time during flush
static void sftl_continue_flush(struct bio *bio, int err)
{
@ -258,7 +286,7 @@ static void sftl_continue_flush(struct bio *bio, int err)
sftl->freeclust -= seg_clust;
sftl->freesegs--;
sftl->free_start_seg++;
BUG_ON(dev->freeclust < dev->reserved_segs*seg_clust);
BUG_ON(sftl->freeclust < sftl->reserved_segs*seg_clust);
if (sftl->next_free_end)
{
if (sftl->free_end_seg <= sftl->free_start_seg)
@ -302,6 +330,7 @@ static void sftl_continue_flush(struct bio *bio, int err)
else
{
// Move data into random free clusters
u32 i, j, next_seg;
if (sftl->free_end_seg < sftl->segs)
{
next_seg = sftl->free_end_seg;
@ -341,6 +370,8 @@ static void sftl_continue_flush(struct bio *bio, int err)
sftl_write_sufficient(sftl, info->next_bio);
write_unlock(&sftl->buffer_lock);
bio_endio(info->next_bio, 0);
kfree(info->random_free);
kfree(info);
}
/* Cleaning algorithm:
@ -370,6 +401,8 @@ static void sftl_begin_flush(struct sftl_dev *sftl, struct bio *bio)
{
int err;
struct sftl_flush_info *info = kmalloc(sizeof(struct sftl_flush_info), GFP_KERNEL);
info->random_free = kmalloc(sizeof(u32) * seg_clust, GFP_KERNEL);
info->random_found = 0;
info->sftl = sftl;
info->next_bio = bio;
err = bio_submit_kern_seq(sftl->blkdev, sftl->buf, seg_clust*clust_sz+phy_sz, GFP_KERNEL,
@ -385,20 +418,10 @@ static void sftl_begin_flush(struct sftl_dev *sftl, struct bio *bio)
static void sftl_write_sufficient(struct sftl_dev *sftl, struct bio *bio)
{
u32 cluster = bio->bi_sector/clust_blocks;
struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size;
char *buffer = __bio_kmap_atomic(bio, 0, KM_USER0);
memcpy(sftl->buf + clust_sz*sftl->buf_size, buffer, clust_sz);
__bio_kunmap_atomic(bio, KM_USER0);
buf_map->magic[0] = magic[0];
buf_map->magic[1] = magic[1];
buf_map->magic[2] = magic[2];
buf_map->is_erased = 0;
buf_map->block = cluster;
buf_map->ver = sftl->ver[cluster]+1;
buf_map->checksum = sftl_map_checksum(*buf_map);
sftl->map[cluster] = sftl->free_start_seg*seg_clust + sftl->buf_size;
sftl->clust_map[sftl->map[cluster]] = 1 + cluster;
sftl->ver[cluster] = buf_map->ver;
sftl_set_buffer_map(sftl, sftl->buf_size, cluster);
sftl->buf_size++;
}
@ -450,7 +473,6 @@ static void sftl_read_request(struct sftl_dev *sftl, struct bio *bio)
static void sftl_make_request(struct request_queue *q, struct bio *bio)
{
struct sftl_dev *sftl = (struct sftl_dev*)q->queuedata;
u32 cluster = bio->bi_sector/clust_blocks;
BUG_ON(bio->bi_vcnt > 1);
BUG_ON(bio->bi_sector % clust_blocks);
BUG_ON(bio->bi_size != clust_sz);
@ -487,7 +509,7 @@ static void sftl_make_request(struct request_queue *q, struct bio *bio)
}
// Someone if flushing - wait for flush to finish
write_unlock(&sftl->buffer_lock);
wait_event_interruptible(lo->lo_event, !sftl->is_flushing);
wait_event_interruptible(sftl->flush_event, !sftl->is_flushing);
}
}
}