aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOGAWA Hirofumi <hirofumi@mail.parknet.co.jp>2013-06-17 14:25:20 +0900
committerDaniel Phillips <daniel@tux3.org>2013-06-17 14:25:20 +0900
commitd041487ebc7bf8f00f41d3bfbb983c16e385f20b (patch)
treed0ef6da6f120dcc918c0f6a152713e9d33eeb81a
parent76093bcd157b32bacf6268a9374513645ccbfd41 (diff)
downloadlinux-tux3-d041487ebc7bf8f00f41d3bfbb983c16e385f20b.tar.gz
tux3: Teach partial allocation to logblock flush
tux3_logmap_io() tries to allocate contiguous blocks, but it can be fail if volume was fragmented. This teaches about partial allocation to tux3_logmap_io(). Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
-rw-r--r--fs/tux3/log.c75
1 files changed, 48 insertions, 27 deletions
diff --git a/fs/tux3/log.c b/fs/tux3/log.c
index 81c4ccbd1a513..2793cecf04870 100644
--- a/fs/tux3/log.c
+++ b/fs/tux3/log.c
@@ -165,43 +165,64 @@ int tux3_logmap_io(int rw, struct bufvec *bufvec)
struct inode *logmap = bufvec_inode(bufvec);
struct sb *sb = tux_sb(logmap->i_sb);
unsigned count = bufvec_contig_count(bufvec);
- struct block_segment seg;
- block_t last;
- struct buffer_head *buffer;
- int err;
assert(rw == WRITE);
assert(bufvec_contig_index(bufvec) == 0);
- err = balloc(sb, count, &seg, 1);
- if (err) {
- assert(err);
- return err;
- }
+ while (count > 0) {
+ struct buffer_head *buffer;
+ struct block_segment seg;
+ block_t block, limit;
+ int err;
- /*
- * We can obsolete the log blocks after next rollup
- * by LOG_BFREE_RELOG.
- */
- defer_bfree(&sb->derollup, seg.block, seg.count);
+ err = balloc_partial(sb, count, &seg, 1);
+ if (err) {
+ assert(err);
+ return err;
+ }
- /* Link log blocks to logchain */
- last = seg.block;
- bufvec_buffer_for_each_contig(buffer, bufvec) {
- struct logblock *log = bufdata(buffer);
+ /*
+ * Link log blocks to logchain.
+ *
+ * FIXME: making the link for each block is
+ * inefficient to read on replay. Instead, we would be
+ * able to use the link of extent. With it, we can
+ * read multiple blocks at once.
+ */
+ block = seg.block;
+ limit = seg.block + seg.count;
+ bufvec_buffer_for_each_contig(buffer, bufvec) {
+ struct logblock *log = bufdata(buffer);
+
+ assert(log->magic == cpu_to_be16(TUX3_MAGIC_LOG));
+ log->logchain = sb->super.logchain;
+
+ trace("logchain %lld", block);
+ sb->super.logchain = cpu_to_be64(block);
+ block++;
+ if (block == limit)
+ break;
+ }
- assert(log->magic == cpu_to_be16(TUX3_MAGIC_LOG));
- log->logchain = sb->super.logchain;
+ err = __tux3_volmap_io(rw, bufvec, seg.block, seg.count);
+ if (err) {
+ tux3_err(sb, "logblock write error (%d)", err);
+ return err; /* FIXME: error handling */
+ }
- trace("logchain %lld", last);
- sb->super.logchain = cpu_to_be64(last);
- last++;
- }
+ /*
+ * We can obsolete the log blocks after next rollup
+ * by LOG_BFREE_RELOG.
+ */
+ defer_bfree(&sb->derollup, seg.block, seg.count);
- /* Add count of log on this delta to rollup logcount */
- be32_add_cpu(&sb->super.logcount, seg.count);
+ /* Add count of log on this delta to rollup logcount */
+ be32_add_cpu(&sb->super.logcount, seg.count);
- return __tux3_volmap_io(rw, bufvec, seg.block, seg.count);
+ count -= seg.count;
+ }
+
+ return 0;
}
static void log_intent(struct sb *sb, u8 intent)