aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Phillips <daniel@tux3.org>2014-02-18 01:45:21 +0900
committerDaniel Phillips <daniel@tux3.org>2014-02-18 01:45:21 +0900
commit9eac49c1499f611590324fb9ce914726771ef9dd (patch)
treeb0b5acd7c5471371634d08095c71ecb3c0f63ba9
parente04c640481caaa0ce6248495e27ea2641eb23bdf (diff)
downloadlinux-tux3-9eac49c1499f611590324fb9ce914726771ef9dd.tar.gz
tux3: Add locking for countmap access
Multiple frontend tasks need to access countmap for inode number layout heuristics. Signed-off-by: Daniel Phillips <d.phillips@partner.samsung.com> Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
-rw-r--r--fs/tux3/balloc.c98
-rw-r--r--fs/tux3/commit.c1
-rw-r--r--fs/tux3/tux3.h1
3 files changed, 66 insertions, 34 deletions
diff --git a/fs/tux3/balloc.c b/fs/tux3/balloc.c
index 268117d33257d7..cc642d082c6d71 100644
--- a/fs/tux3/balloc.c
+++ b/fs/tux3/balloc.c
@@ -19,48 +19,80 @@
* Group counts
*/
-static int countmap_load(struct sb *sb, block_t group)
+void countmap_put(struct countmap_pin *pin)
+{
+ if (pin->buffer) {
+ blockput(pin->buffer);
+ pin->buffer = NULL;
+ }
+}
+
+/*
+ * Load and pin one block of the groupmap. Returns with spinlock held.
+ * Access from frontend is read only and write access from backend is single
+ * threaded, so rw spinlock may reduce frontend contention if there is any.
+ * This could be extended to pin multiple blocks if contention causes too
+ * many block reads.
+ */
+static struct buffer_head *countmap_load(struct sb *sb, block_t group)
{
- block_t block = group >> (sb->blockbits - 1);
struct countmap_pin *pin = &sb->countmap_pin;
+ block_t block = group >> (sb->blockbits - 1);
+ struct buffer_head *buffer;
+
+ spin_lock(&sb->countmap_lock);
+ buffer = pin->buffer;
+ if (buffer && bufindex(buffer) == block) {
+ get_bh(buffer);
+ spin_unlock(&sb->countmap_lock);
+ } else {
+ spin_unlock(&sb->countmap_lock);
- if (!pin->buffer || bufindex(pin->buffer) != block) {
- if (pin->buffer)
- blockput(pin->buffer);
- pin->buffer = blockread(mapping(sb->countmap), block);
- if (!pin->buffer) {
+ buffer = blockread(mapping(sb->countmap), block);
+ if (!buffer) {
tux3_err(sb, "block read failed");
- return -EIO;
+ return ERR_PTR(-EIO);
}
}
- return 0;
+
+ return buffer;
+}
+
+static void countmap_pin_update(struct sb *sb, struct buffer_head *buffer)
+{
+ if (sb->countmap_pin.buffer != buffer) {
+ countmap_put(&sb->countmap_pin);
+ sb->countmap_pin.buffer = buffer;
+ } else
+ blockput(buffer);
}
static int countmap_add(struct sb *sb, block_t group, int count)
{
unsigned offset = group & (sb->blockmask >> 1);
- struct buffer_head *clone;
+ struct buffer_head *buffer, *clone;
__be16 *p;
- int err;
- err = countmap_load(sb, group);
- if (err)
- return err;
+ buffer = countmap_load(sb, group);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
trace("add %d to group %Lu", count, group);
/*
* The countmap is modified only by backend. blockdirty()
* should never return -EAGAIN.
*/
- clone = blockdirty(sb->countmap_pin.buffer, sb->unify);
+ clone = blockdirty(buffer, sb->unify);
if (IS_ERR(clone)) {
- err = PTR_ERR(clone);
- assert(err != -EAGAIN);
- return err;
+ assert(PTR_ERR(clone) != -EAGAIN);
+ blockput(buffer);
+ return PTR_ERR(clone);
}
- sb->countmap_pin.buffer = clone;
- p = bufdata(sb->countmap_pin.buffer);
+ spin_lock(&sb->countmap_lock);
+ p = bufdata(clone);
be16_add_cpu(p + offset, count);
+ countmap_pin_update(sb, clone);
+ spin_unlock(&sb->countmap_lock);
return 0;
}
@@ -90,26 +122,24 @@ static int countmap_add_segment(struct sb *sb, block_t start, unsigned blocks,
return countmap_add(sb, group, set ? blocks : -blocks);
}
-void countmap_put(struct countmap_pin *pin)
-{
- if (pin->buffer) {
- blockput(pin->buffer);
- pin->buffer = NULL;
- }
-}
-
static int countmap_used(struct sb *sb, block_t group)
{
unsigned offset = group & (sb->blockmask >> 1);
+ struct buffer_head *buffer;
__be16 *p;
- int err;
+ u16 count;
- err = countmap_load(sb, group);
- if (err)
- return err;
+ buffer = countmap_load(sb, group);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ spin_lock(&sb->countmap_lock);
+ p = bufdata(buffer);
+ count = be16_to_cpup(p + offset);
+ countmap_pin_update(sb, buffer);
+ spin_unlock(&sb->countmap_lock);
- p = bufdata(sb->countmap_pin.buffer);
- return be16_to_cpup(p + offset);
+ return count;
}
#ifndef __KERNEL__
diff --git a/fs/tux3/commit.c b/fs/tux3/commit.c
index a61f5d91853d23..b64e61782bad29 100644
--- a/fs/tux3/commit.c
+++ b/fs/tux3/commit.c
@@ -45,6 +45,7 @@ static void init_sb(struct sb *sb)
INIT_LIST_HEAD(&sb->unify_buffers);
INIT_LIST_HEAD(&sb->alloc_inodes);
+ spin_lock_init(&sb->countmap_lock);
spin_lock_init(&sb->forked_buffers_lock);
init_link_circular(&sb->forked_buffers);
spin_lock_init(&sb->dirty_inodes_lock);
diff --git a/fs/tux3/tux3.h b/fs/tux3/tux3.h
index 42277851c3a286..b1ea39abb8b6c5 100644
--- a/fs/tux3/tux3.h
+++ b/fs/tux3/tux3.h
@@ -308,6 +308,7 @@ struct sb {
/*
* For frontend and backend
*/
+ spinlock_t countmap_lock;
struct countmap_pin countmap_pin;
struct list_head alloc_inodes; /* deferred inum allocation inodes */