aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2022-05-28 14:24:59 +0800
committerColy Li <colyli@suse.de>2022-05-28 14:24:59 +0800
commit167a5ac72783bf3e566441ebf1e2547bbc66bb2c (patch)
tree178addb9f75483b9274466f4e3e7ebd03533064c
parent5fa94acba8ddf4d2f8d4e1a32c32651c0dc466ec (diff)
downloadbcache-patches-167a5ac72783bf3e566441ebf1e2547bbc66bb2c.tar.gz
for-test: remove remove-multiple-cache-devices series since most part are merged
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0001-bcache-rename-struct-bkey-members.patch259
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0002-bcache-remove-multiple-cache-devices-support-from-PT.patch270
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0003-bcache-remove-useless-for-loop-of-KEY_PTRS.patch762
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0004-bcache-remove-ptr-index-from-PTR_CACHE.patch194
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0005-bcache-remove-ptr-index-from-PTR_BUCKET-and-PTR_BUCK.patch351
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0006-bcache-remove-ptr-index-from-ptr_available-and-ptr_s.patch244
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0007-bcache-remove-ptr-index-from-bch_submit_bbio.patch126
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0008-bcache-remove-ptr-index-from-bch_bkey_copy_single_pt.patch101
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0009-bcache-remove-ptr-index-from-bch_extent_bad_expensiv.patch44
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0010-bcache-remove-for_each_cache.patch899
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0011-bcache-do-not-use-nr_in_set-and-nr_this_dev-of-struc.patch164
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0012-bcache-change-cache_set.cache-and-cache_set.cache_by.patch292
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0013-bcache-remove-ptr-index-from-bch_bucket_alloc_set.patch133
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/0014-bcache-remove-MAX_CACHES_PER_SET-from-ptr_available.patch35
-rw-r--r--for-test/remove-multiple-cache-devices/original_ideas/debug_info_0001.patch63
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/0000-cover-letter.patch73
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch151
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0002-bcache-explicitly-make-cache_set-only-have-single.patch128
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0003-bcache-remove-for_each_cache.patch895
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0004-bcache-add-set_uuid-in-struct-cache_set.patch172
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0005-bcache-only-use-block_bytes-on-struct-cache.patch257
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0006-bcache-remove-useless-alloc_bucket_pages.patch29
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0007-bcache-remove-useless-bucket_pages.patch29
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch49
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch66
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch52
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0011-bcache-remove-can_attach_cache.patch49
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch109
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch415
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch261
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch73
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch152
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch128
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch896
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch173
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch258
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch30
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch30
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch50
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch53
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch50
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch110
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch469
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch152
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch128
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch896
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch173
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch258
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch30
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch30
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch50
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch66
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch53
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch50
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch110
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch438
56 files changed, 0 insertions, 11578 deletions
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0001-bcache-rename-struct-bkey-members.patch b/for-test/remove-multiple-cache-devices/original_ideas/0001-bcache-rename-struct-bkey-members.patch
deleted file mode 100644
index 433fe53..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0001-bcache-rename-struct-bkey-members.patch
+++ /dev/null
@@ -1,259 +0,0 @@
-From a5271c934edad2ca044777f2c03965385018cc8b Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Mon, 16 Apr 2018 22:28:59 +0800
-Subject: [PATCH 01/14] bcache: rename struct bkey members
-
-Also bcache has code framework to support multiple cache copies, it is not
-finished, and won't be finished in future. People who care about data
-realiability mostly use software or hardware raid, there is no reason to
-keep the multiple cache device code frame work any more.
-
-The first change is to rename struct bkey members, currently it is defined
-as,
- struct bkey {
- __u64 high;
- __u64 low;
- __u64 ptr[];
- };
-
-ptr[] is a dynamic array, size of array is defined by KEY_PTRS(). Indeed
-for all bcache configuration, KEY_PTRS() at most is 1 (only use one
-cache device for each cache set). Typically ptr[0] stores LBA address on
-cache device, and ptr[1] stores checksum of this key (if there is).
-
-In order to keep consistency of on-disk format, this patch renames struct
-bkey members as,
-
- struct bkey {
- __u64 high;
- __u64 low;
- __u64 ptr;
- __u64 csum;
- }
-
-csum takes the space of ptr[1], there is no change for on-disk layout,
-kernel and user space tool before this patch can still work well.
-
-This is the first step to remove the unfinished and useless multiple cache
-device code framwork from bcache code.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 8 ++++----
- drivers/md/bcache/bset.c | 2 +-
- drivers/md/bcache/btree.c | 12 ++++++------
- drivers/md/bcache/extents.c | 11 +++++------
- drivers/md/bcache/journal.c | 3 ++-
- drivers/md/bcache/request.c | 2 +-
- include/uapi/linux/bcache.h | 9 +++++----
- 7 files changed, 24 insertions(+), 23 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 7fa2631b422c..072af26e9c00 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -496,7 +496,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
- if (b == -1)
- goto err;
-
-- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
-+ k->ptr = MAKE_PTR(ca->buckets[b].gen,
- bucket_to_sector(c, b),
- ca->sb.nr_this_dev);
-
-@@ -637,9 +637,9 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- EBUG_ON(ptr_stale(c, &b->key, i));
-
- /* Set up the pointer to the space we're allocating: */
--
-- for (i = 0; i < KEY_PTRS(&b->key); i++)
-- k->ptr[i] = b->key.ptr[i];
-+ k->ptr = b->key.ptr;
-+ if (KEY_CSUM(&b->key))
-+ k->csum = b->key.csum;
-
- sectors = min(sectors, b->sectors_free);
-
-diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
-index f3403b45bc28..c7f52134b394 100644
---- a/drivers/md/bcache/bset.c
-+++ b/drivers/md/bcache/bset.c
-@@ -186,7 +186,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
-
- /* Only copy the header, key, and one pointer. */
- memcpy(dest, src, 2 * sizeof(uint64_t));
-- dest->ptr[0] = src->ptr[i];
-+ dest->ptr = src->ptr;
- SET_KEY_PTRS(dest, 1);
- /* We didn't copy the checksum so clear that bit. */
- SET_KEY_CSUM(dest, 0);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 2a0968c04e21..125168a18be7 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -94,7 +94,7 @@
- #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
-
- #define PTR_HASH(c, k) \
-- (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
-+ (((k)->ptr >> c->bucket_bits) | PTR_GEN(k, 0))
-
- #define insert_lock(s, b) ((b)->level <= (s)->lock)
-
-@@ -191,7 +191,7 @@ void bkey_put(struct cache_set *c, struct bkey *k)
-
- static uint64_t btree_csum_set(struct btree *b, struct bset *i)
- {
-- uint64_t crc = b->key.ptr[0];
-+ uint64_t crc = b->key.ptr;
- void *data = (void *) i + 8, *end = bset_bkey_last(i);
-
- crc = bch_crc64_update(crc, data, end - data);
-@@ -573,7 +573,7 @@ static void mca_bucket_free(struct btree *b)
- {
- BUG_ON(btree_node_dirty(b));
-
-- b->key.ptr[0] = 0;
-+ b->key.ptr = 0;
- hlist_del_init_rcu(&b->hash);
- list_move(&b->list, &b->c->btree_cache_freeable);
- }
-@@ -2165,7 +2165,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- struct bkey *check_key)
- {
- int ret = -EINTR;
-- uint64_t btree_ptr = b->key.ptr[0];
-+ uint64_t btree_ptr = b->key.ptr;
- unsigned long seq = b->seq;
- struct keylist insert;
- bool upgrade = op->lock == -1;
-@@ -2176,7 +2176,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- rw_unlock(false, b);
- rw_lock(true, b, b->level);
-
-- if (b->key.ptr[0] != btree_ptr ||
-+ if (b->key.ptr != btree_ptr ||
- b->seq != seq + 1) {
- op->lock = b->level;
- goto out;
-@@ -2184,7 +2184,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- }
-
- SET_KEY_PTRS(check_key, 1);
-- get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
-+ get_random_bytes(&check_key->ptr, sizeof(uint64_t));
-
- SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
-
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index c334e6666461..6604cab6a48f 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -119,7 +119,7 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
- if (KEY_DIRTY(k))
- p(" dirty");
- if (KEY_CSUM(k))
-- p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
-+ p(" cs%llu %llx", KEY_CSUM(k), k->csum);
- #undef p
- }
-
-@@ -385,7 +385,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
- BUG_ON(!KEY_PTRS(replace_key));
-
- for (i = 0; i < KEY_PTRS(replace_key); i++)
-- if (k->ptr[i] != replace_key->ptr[i] + offset)
-+ if (k->ptr != replace_key->ptr + offset)
- goto check_failed;
-
- sectors_found = KEY_OFFSET(k) - KEY_START(insert);
-@@ -570,8 +570,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
-
- static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
- {
-- return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
-- ~((uint64_t)1 << 63);
-+ return (l->csum + r->csum) & ~((uint64_t)1 << 63);
- }
-
- static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
-@@ -583,7 +582,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
- return false;
-
- for (i = 0; i < KEY_PTRS(l); i++)
-- if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
-+ if (l->ptr + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr ||
- PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
- return false;
-
-@@ -600,7 +599,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
-
- if (KEY_CSUM(l)) {
- if (KEY_CSUM(r))
-- l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
-+ l->csum = merge_chksums(l, r);
- else
- SET_KEY_CSUM(l, 0);
- }
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 18f1b5239620..b2b7bacbfc83 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -533,9 +533,10 @@ static void journal_reclaim(struct cache_set *c)
- continue;
-
- ja->cur_idx = next;
-- k->ptr[n++] = MAKE_PTR(0,
-+ k->ptr = MAKE_PTR(0,
- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
- ca->sb.nr_this_dev);
-+ n++;
- }
-
- bkey_init(k);
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index ae67f5fa8047..cd4fcae6516c 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -49,7 +49,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
- kunmap(bv.bv_page);
- }
-
-- k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
-+ k->csum = csum & (~0ULL >> 1);
- }
-
- /* Insert data into cache */
-diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
-index 821f71a2e48f..1c919a662ec3 100644
---- a/include/uapi/linux/bcache.h
-+++ b/include/uapi/linux/bcache.h
-@@ -23,7 +23,8 @@ static inline void SET_##name(type *k, __u64 v) \
- struct bkey {
- __u64 high;
- __u64 low;
-- __u64 ptr[];
-+ __u64 ptr;
-+ __u64 csum;
- };
-
- #define KEY_FIELD(name, field, offset, size) \
-@@ -31,12 +32,12 @@ struct bkey {
-
- #define PTR_FIELD(name, offset, size) \
- static inline __u64 name(const struct bkey *k, unsigned i) \
--{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
-+{ return (k->ptr >> offset) & ~(~0ULL << size); } \
- \
- static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
- { \
-- k->ptr[i] &= ~(~(~0ULL << size) << offset); \
-- k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
-+ k->ptr &= ~(~(~0ULL << size) << offset); \
-+ k->ptr |= (v & ~(~0ULL << size)) << offset; \
- }
-
- #define KEY_SIZE_BITS 16
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0002-bcache-remove-multiple-cache-devices-support-from-PT.patch b/for-test/remove-multiple-cache-devices/original_ideas/0002-bcache-remove-multiple-cache-devices-support-from-PT.patch
deleted file mode 100644
index 5d00e83..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0002-bcache-remove-multiple-cache-devices-support-from-PT.patch
+++ /dev/null
@@ -1,270 +0,0 @@
-From 2795f44818acb77ee13ab6982c180f2c2281dbef Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Mon, 16 Apr 2018 23:04:00 +0800
-Subject: [PATCH 02/14] bcache: remove multiple cache devices support from
- PTR_FIELD()
-
-Current definition of macro PTR_FILE() has 'unsigned i' to index ptr[] in
-struct bkey for multiple cache devices support. Since multiple cache
-devices support is unfinished and no one use it, removing the code
-framework will make bcache code to be simpler and easier to maintain.
-
-Because there is only one cache device in a cache set, the ptr index in
-macro PTR_FIELD() is useless. This patch removes it and keeps consistency
-of code execution logic.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/bcache.h | 8 ++++----
- drivers/md/bcache/bset.c | 2 +-
- drivers/md/bcache/btree.c | 14 +++++++-------
- drivers/md/bcache/debug.c | 2 +-
- drivers/md/bcache/extents.c | 10 +++++-----
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/journal.c | 4 ++--
- drivers/md/bcache/writeback.c | 2 +-
- include/uapi/linux/bcache.h | 4 ++--
- 10 files changed, 25 insertions(+), 25 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 072af26e9c00..90b43b081d61 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -658,7 +658,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- b->sectors_free -= sectors;
-
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
-- SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
-+ SET_PTR_OFFSET(&b->key, PTR_OFFSET(&b->key) + sectors);
-
- atomic_long_add(sectors,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index d6bf294f3907..10b9bbc482eb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -761,14 +761,14 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
- const struct bkey *k,
- unsigned ptr)
- {
-- return c->cache[PTR_DEV(k, ptr)];
-+ return c->cache[PTR_DEV(k)];
- }
-
- static inline size_t PTR_BUCKET_NR(struct cache_set *c,
- const struct bkey *k,
- unsigned ptr)
- {
-- return sector_to_bucket(c, PTR_OFFSET(k, ptr));
-+ return sector_to_bucket(c, PTR_OFFSET(k));
- }
-
- static inline struct bucket *PTR_BUCKET(struct cache_set *c,
-@@ -787,13 +787,13 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b)
- static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
- unsigned i)
- {
-- return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
-+ return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k));
- }
-
- static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- unsigned i)
- {
-- return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-+ return (PTR_DEV(k) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
- }
-
- /* Btree key macros */
-diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
-index c7f52134b394..e1e9b39c95b1 100644
---- a/drivers/md/bcache/bset.c
-+++ b/drivers/md/bcache/bset.c
-@@ -205,7 +205,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
- bkey_copy_key(k, where);
-
- for (i = 0; i < KEY_PTRS(k); i++)
-- SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
-+ SET_PTR_OFFSET(k, PTR_OFFSET(k) + KEY_SIZE(k) - len);
-
- BUG_ON(len > KEY_SIZE(k));
- SET_KEY_SIZE(k, len);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 125168a18be7..520ffa26750c 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -94,7 +94,7 @@
- #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
-
- #define PTR_HASH(c, k) \
-- (((k)->ptr >> c->bucket_bits) | PTR_GEN(k, 0))
-+ (((k)->ptr >> c->bucket_bits) | PTR_GEN(k))
-
- #define insert_lock(s, b) ((b)->level <= (s)->lock)
-
-@@ -416,7 +416,7 @@ static void do_btree_node_write(struct btree *b)
- */
-
- bkey_copy(&k.key, &b->key);
-- SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
-+ SET_PTR_OFFSET(&k.key, PTR_OFFSET(&k.key) +
- bset_sector_offset(&b->keys, i));
-
- if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
-@@ -1143,7 +1143,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- bkey_copy_key(k, &ZERO_KEY);
-
- for (i = 0; i < KEY_PTRS(k); i++)
-- SET_PTR_GEN(k, i,
-+ SET_PTR_GEN(k,
- bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
- PTR_BUCKET(b->c, &b->key, i)));
-
-@@ -1195,8 +1195,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
-
- g = PTR_BUCKET(c, k, i);
-
-- if (gen_after(g->last_gc, PTR_GEN(k, i)))
-- g->last_gc = PTR_GEN(k, i);
-+ if (gen_after(g->last_gc, PTR_GEN(k)))
-+ g->last_gc = PTR_GEN(k);
-
- if (ptr_stale(c, k, i)) {
- stale = max(stale, ptr_stale(c, k, i));
-@@ -1237,7 +1237,7 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
- !ptr_stale(c, k, i)) {
- struct bucket *b = PTR_BUCKET(c, k, i);
-
-- b->gen = PTR_GEN(k, i);
-+ b->gen = PTR_GEN(k);
-
- if (level && bkey_cmp(k, &ZERO_KEY))
- b->prio = BTREE_PRIO;
-@@ -2186,7 +2186,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- SET_KEY_PTRS(check_key, 1);
- get_random_bytes(&check_key->ptr, sizeof(uint64_t));
-
-- SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
-+ SET_PTR_DEV(check_key, PTR_CHECK_DEV);
-
- bch_keylist_add(&insert, check_key);
-
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index d030ce3025a6..86fd18e77dbf 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -51,7 +51,7 @@ void bch_btree_verify(struct btree *b)
-
- bio = bch_bbio_alloc(b->c);
- bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
-- bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
-+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key);
- bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
- bio->bi_opf = REQ_OP_READ | REQ_META;
- bch_bio_map(bio, sorted);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index 6604cab6a48f..42506367acc1 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -52,7 +52,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
-- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-+ size_t r = bucket_remainder(c, PTR_OFFSET(k));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
-@@ -73,7 +73,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
-- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-+ size_t r = bucket_remainder(c, PTR_OFFSET(k));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size)
- return "bad, length too big";
-@@ -107,11 +107,11 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
- if (i)
- p(", ");
-
-- if (PTR_DEV(k, i) == PTR_CHECK_DEV)
-+ if (PTR_DEV(k) == PTR_CHECK_DEV)
- p("check dev");
- else
-- p("%llu:%llu gen %llu", PTR_DEV(k, i),
-- PTR_OFFSET(k, i), PTR_GEN(k, i));
-+ p("%llu:%llu gen %llu", PTR_DEV(k),
-+ PTR_OFFSET(k), PTR_GEN(k));
- }
-
- p("]");
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index 9612873afee2..bb63855d26b0 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -34,7 +34,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
- {
- struct bbio *b = container_of(bio, struct bbio, bio);
-
-- bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
-+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key);
- bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
-
- b->submit_time_us = local_clock_us();
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index b2b7bacbfc83..f9e626f207a5 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -651,7 +651,7 @@ static void journal_write_unlocked(struct closure *cl)
- atomic_long_add(sectors, &ca->meta_sectors_written);
-
- bio_reset(bio);
-- bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
-+ bio->bi_iter.bi_sector = PTR_OFFSET(k);
- bio_set_dev(bio, ca->bdev);
- bio->bi_iter.bi_size = sectors << 9;
-
-@@ -664,7 +664,7 @@ static void journal_write_unlocked(struct closure *cl)
- trace_bcache_journal_write(bio);
- bio_list_add(&list, bio);
-
-- SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
-+ SET_PTR_OFFSET(k, PTR_OFFSET(k) + sectors);
-
- ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
- }
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index ad45ebe1a74b..3f9a6132e112 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -401,7 +401,7 @@ static void read_dirty(struct cached_dev *dc)
-
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
-- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
-+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key);
- bio_set_dev(&io->bio,
- PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
-index 1c919a662ec3..b15f499eb997 100644
---- a/include/uapi/linux/bcache.h
-+++ b/include/uapi/linux/bcache.h
-@@ -31,10 +31,10 @@ struct bkey {
- BITMASK(name, struct bkey, field, offset, size)
-
- #define PTR_FIELD(name, offset, size) \
--static inline __u64 name(const struct bkey *k, unsigned i) \
-+static inline __u64 name(const struct bkey *k) \
- { return (k->ptr >> offset) & ~(~0ULL << size); } \
- \
--static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
-+static inline void SET_##name(struct bkey *k, __u64 v) \
- { \
- k->ptr &= ~(~(~0ULL << size) << offset); \
- k->ptr |= (v & ~(~0ULL << size)) << offset; \
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0003-bcache-remove-useless-for-loop-of-KEY_PTRS.patch b/for-test/remove-multiple-cache-devices/original_ideas/0003-bcache-remove-useless-for-loop-of-KEY_PTRS.patch
deleted file mode 100644
index 14e95dd..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0003-bcache-remove-useless-for-loop-of-KEY_PTRS.patch
+++ /dev/null
@@ -1,762 +0,0 @@
-From 21baeba4157d1176dedd0992a1b12c015a793b2a Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Wed, 6 Jun 2018 22:55:15 +0800
-Subject: [PATCH 03/14] bcache: remove useless for-loop of KEY_PTRS()
-
-Bcache multiple cache devices code framwork always uses a for-loop to
-interate all ptr[] of bkey, but indeed only ptr[0] is accessed.
-
-To remove multiple cache devices code framework, it is unnecessary to
-use a for-loop to only access ptr[0] of a bkey structure. This patch
-removes all the useless for-loop of KEY_PTRS() from bcache code.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 23 ++---
- drivers/md/bcache/bset.c | 5 +-
- drivers/md/bcache/btree.c | 103 ++++++++++------------
- drivers/md/bcache/extents.c | 160 ++++++++++++++--------------------
- drivers/md/bcache/journal.c | 41 ++++-----
- drivers/md/bcache/movinggc.c | 8 +-
- drivers/md/bcache/request.c | 6 +-
- drivers/md/bcache/super.c | 24 +++--
- drivers/md/bcache/writeback.c | 4 +-
- 9 files changed, 154 insertions(+), 220 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 90b43b081d61..47a9199557b9 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -470,11 +470,8 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
-
- void bch_bucket_free(struct cache_set *c, struct bkey *k)
- {
-- unsigned i;
--
-- for (i = 0; i < KEY_PTRS(k); i++)
-- __bch_bucket_free(PTR_CACHE(c, k, i),
-- PTR_BUCKET(c, k, i));
-+ __bch_bucket_free(PTR_CACHE(c, k, 0),
-+ PTR_BUCKET(c, k, 0));
- }
-
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
-@@ -600,7 +597,6 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- {
- struct open_bucket *b;
- BKEY_PADDED(key) alloc;
-- unsigned i;
-
- /*
- * We might have to allocate a new bucket, which we can't do with a
-@@ -633,8 +629,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- if (KEY_PTRS(&alloc.key))
- bkey_put(c, &alloc.key);
-
-- for (i = 0; i < KEY_PTRS(&b->key); i++)
-- EBUG_ON(ptr_stale(c, &b->key, i));
-+ EBUG_ON(ptr_stale(c, &b->key, 0));
-
- /* Set up the pointer to the space we're allocating: */
- k->ptr = b->key.ptr;
-@@ -657,12 +652,9 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
-
- b->sectors_free -= sectors;
-
-- for (i = 0; i < KEY_PTRS(&b->key); i++) {
-- SET_PTR_OFFSET(&b->key, PTR_OFFSET(&b->key) + sectors);
--
-- atomic_long_add(sectors,
-- &PTR_CACHE(c, &b->key, i)->sectors_written);
-- }
-+ SET_PTR_OFFSET(&b->key, PTR_OFFSET(&b->key) + sectors);
-+ atomic_long_add(sectors,
-+ &PTR_CACHE(c, &b->key, 0)->sectors_written);
-
- if (b->sectors_free < c->sb.block_size)
- b->sectors_free = 0;
-@@ -673,8 +665,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- * get_data_bucket()'s refcount.
- */
- if (b->sectors_free)
-- for (i = 0; i < KEY_PTRS(&b->key); i++)
-- atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
-+ atomic_inc(&PTR_BUCKET(c, &b->key, 0)->pin);
-
- spin_unlock(&c->data_bucket_lock);
- return true;
-diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
-index e1e9b39c95b1..00c883536d54 100644
---- a/drivers/md/bcache/bset.c
-+++ b/drivers/md/bcache/bset.c
-@@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
-
- bool __bch_cut_front(const struct bkey *where, struct bkey *k)
- {
-- unsigned i, len = 0;
-+ unsigned len = 0;
-
- if (bkey_cmp(where, &START_KEY(k)) <= 0)
- return false;
-@@ -204,8 +204,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
- else
- bkey_copy_key(k, where);
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- SET_PTR_OFFSET(k, PTR_OFFSET(k) + KEY_SIZE(k) - len);
-+ SET_PTR_OFFSET(k, PTR_OFFSET(k) + KEY_SIZE(k) - len);
-
- BUG_ON(len > KEY_SIZE(k));
- SET_KEY_SIZE(k, len);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 520ffa26750c..f2f8fa0f4480 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -180,11 +180,8 @@ static void bch_btree_init_next(struct btree *b)
-
- void bkey_put(struct cache_set *c, struct bkey *k)
- {
-- unsigned i;
--
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (ptr_available(c, k, i))
-- atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
-+ if (ptr_available(c, k, 0))
-+ atomic_dec_bug(&PTR_BUCKET(c, k, 0)->pin);
- }
-
- /* Btree IO */
-@@ -1133,8 +1130,6 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
-
- static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- {
-- unsigned i;
--
- mutex_lock(&b->c->bucket_lock);
-
- atomic_inc(&b->c->prio_blocked);
-@@ -1142,10 +1137,9 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- bkey_copy(k, &b->key);
- bkey_copy_key(k, &ZERO_KEY);
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- SET_PTR_GEN(k,
-- bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
-- PTR_BUCKET(b->c, &b->key, i)));
-+ SET_PTR_GEN(k,
-+ bch_inc_gen(PTR_CACHE(b->c, &b->key, 0),
-+ PTR_BUCKET(b->c, &b->key, 0)));
-
- mutex_unlock(&b->c->bucket_lock);
- }
-@@ -1178,7 +1172,6 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
- struct bkey *k)
- {
- uint8_t stale = 0;
-- unsigned i;
- struct bucket *g;
-
- /*
-@@ -1189,40 +1182,39 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
- if (!bkey_cmp(k, &ZERO_KEY))
- return stale;
-
-- for (i = 0; i < KEY_PTRS(k); i++) {
-- if (!ptr_available(c, k, i))
-- continue;
-+ if (!ptr_available(c, k, 0))
-+ goto out;
-
-- g = PTR_BUCKET(c, k, i);
-+ g = PTR_BUCKET(c, k, 0);
-
-- if (gen_after(g->last_gc, PTR_GEN(k)))
-- g->last_gc = PTR_GEN(k);
-+ if (gen_after(g->last_gc, PTR_GEN(k)))
-+ g->last_gc = PTR_GEN(k);
-
-- if (ptr_stale(c, k, i)) {
-- stale = max(stale, ptr_stale(c, k, i));
-- continue;
-- }
-+ if (ptr_stale(c, k, 0)) {
-+ stale = max(stale, ptr_stale(c, k, 0));
-+ goto out;
-+ }
-
-- cache_bug_on(GC_MARK(g) &&
-- (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
-- c, "inconsistent ptrs: mark = %llu, level = %i",
-- GC_MARK(g), level);
-+ cache_bug_on(GC_MARK(g) &&
-+ (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
-+ c, "inconsistent ptrs: mark = %llu, level = %i",
-+ GC_MARK(g), level);
-
-- if (level)
-- SET_GC_MARK(g, GC_MARK_METADATA);
-- else if (KEY_DIRTY(k))
-- SET_GC_MARK(g, GC_MARK_DIRTY);
-- else if (!GC_MARK(g))
-- SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
-+ if (level)
-+ SET_GC_MARK(g, GC_MARK_METADATA);
-+ else if (KEY_DIRTY(k))
-+ SET_GC_MARK(g, GC_MARK_DIRTY);
-+ else if (!GC_MARK(g))
-+ SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
-
-- /* guard against overflow */
-- SET_GC_SECTORS_USED(g, min_t(unsigned,
-- GC_SECTORS_USED(g) + KEY_SIZE(k),
-- MAX_GC_SECTORS_USED));
-+ /* guard against overflow */
-+ SET_GC_SECTORS_USED(g, min_t(unsigned,
-+ GC_SECTORS_USED(g) + KEY_SIZE(k),
-+ MAX_GC_SECTORS_USED));
-
-- BUG_ON(!GC_SECTORS_USED(g));
-- }
-+ BUG_ON(!GC_SECTORS_USED(g));
-
-+out:
- return stale;
- }
-
-@@ -1230,20 +1222,18 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
-
- void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
- {
-- unsigned i;
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (ptr_available(c, k, i) &&
-- !ptr_stale(c, k, i)) {
-- struct bucket *b = PTR_BUCKET(c, k, i);
-+ if (ptr_available(c, k, 0) &&
-+ !ptr_stale(c, k, 0)) {
-+ struct bucket *b = PTR_BUCKET(c, k, 0);
-
-- b->gen = PTR_GEN(k);
-+ b->gen = PTR_GEN(k);
-
-- if (level && bkey_cmp(k, &ZERO_KEY))
-- b->prio = BTREE_PRIO;
-- else if (!level && b->prio == BTREE_PRIO)
-- b->prio = INITIAL_PRIO;
-- }
-+ if (level && bkey_cmp(k, &ZERO_KEY))
-+ b->prio = BTREE_PRIO;
-+ else if (!level && b->prio == BTREE_PRIO)
-+ b->prio = INITIAL_PRIO;
-+ }
-
- __bch_btree_mark_key(c, level, k);
- }
-@@ -1676,9 +1666,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
- c->gc_mark_valid = 1;
- c->need_gc = 0;
-
-- for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
-- SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
-- GC_MARK_METADATA);
-+ SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, 0),
-+ GC_MARK_METADATA);
-
- /* don't reclaim buckets to which writeback keys point */
- rcu_read_lock();
-@@ -1686,7 +1675,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
- struct bcache_device *d = c->devices[i];
- struct cached_dev *dc;
- struct keybuf_key *w, *n;
-- unsigned j;
-
- if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
- continue;
-@@ -1695,9 +1683,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
- spin_lock(&dc->writeback_keys.lock);
- rbtree_postorder_for_each_entry_safe(w, n,
- &dc->writeback_keys.keys, node)
-- for (j = 0; j < KEY_PTRS(&w->key); j++)
-- SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
-- GC_MARK_DIRTY);
-+ SET_GC_MARK(PTR_BUCKET(c, &w->key, 0),
-+ GC_MARK_DIRTY);
- spin_unlock(&dc->writeback_keys.lock);
- }
- rcu_read_unlock();
-@@ -2255,7 +2242,6 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
-
- void bch_btree_set_root(struct btree *b)
- {
-- unsigned i;
- struct closure cl;
-
- closure_init_stack(&cl);
-@@ -2264,8 +2250,7 @@ void bch_btree_set_root(struct btree *b)
-
- BUG_ON(!b->written);
-
-- for (i = 0; i < KEY_PTRS(&b->key); i++)
-- BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
-+ BUG_ON(PTR_BUCKET(b->c, &b->key, 0)->prio != BTREE_PRIO);
-
- mutex_lock(&b->c->bucket_lock);
- list_del_init(&b->list);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index 42506367acc1..d52239f4443e 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -46,19 +46,16 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
-
- static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- {
-- unsigned i;
--
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (ptr_available(c, k, i)) {
-- struct cache *ca = PTR_CACHE(c, k, i);
-- size_t bucket = PTR_BUCKET_NR(c, k, i);
-- size_t r = bucket_remainder(c, PTR_OFFSET(k));
--
-- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-- bucket < ca->sb.first_bucket ||
-- bucket >= ca->sb.nbuckets)
-- return true;
-- }
-+ if (ptr_available(c, k, 0)) {
-+ struct cache *ca = PTR_CACHE(c, k, 0);
-+ size_t bucket = PTR_BUCKET_NR(c, k, 0);
-+ size_t r = bucket_remainder(c, PTR_OFFSET(k));
-+
-+ if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-+ bucket < ca->sb.first_bucket ||
-+ bucket >= ca->sb.nbuckets)
-+ return true;
-+ }
-
- return false;
- }
-@@ -67,23 +64,20 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-
- static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- {
-- unsigned i;
--
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (ptr_available(c, k, i)) {
-- struct cache *ca = PTR_CACHE(c, k, i);
-- size_t bucket = PTR_BUCKET_NR(c, k, i);
-- size_t r = bucket_remainder(c, PTR_OFFSET(k));
--
-- if (KEY_SIZE(k) + r > c->sb.bucket_size)
-- return "bad, length too big";
-- if (bucket < ca->sb.first_bucket)
-- return "bad, short offset";
-- if (bucket >= ca->sb.nbuckets)
-- return "bad, offset past end of device";
-- if (ptr_stale(c, k, i))
-- return "stale";
-- }
-+ if (ptr_available(c, k, 0)) {
-+ struct cache *ca = PTR_CACHE(c, k, 0);
-+ size_t bucket = PTR_BUCKET_NR(c, k, 0);
-+ size_t r = bucket_remainder(c, PTR_OFFSET(k));
-+
-+ if (KEY_SIZE(k) + r > c->sb.bucket_size)
-+ return "bad, length too big";
-+ if (bucket < ca->sb.first_bucket)
-+ return "bad, short offset";
-+ if (bucket >= ca->sb.nbuckets)
-+ return "bad, offset past end of device";
-+ if (ptr_stale(c, k, 0))
-+ return "stale";
-+ }
-
- if (!bkey_cmp(k, &ZERO_KEY))
- return "bad, null key";
-@@ -96,23 +90,17 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
-
- void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
- {
-- unsigned i = 0;
- char *out = buf, *end = buf + size;
-
- #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
-
- p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
-
-- for (i = 0; i < KEY_PTRS(k); i++) {
-- if (i)
-- p(", ");
--
-- if (PTR_DEV(k) == PTR_CHECK_DEV)
-- p("check dev");
-- else
-- p("%llu:%llu gen %llu", PTR_DEV(k),
-- PTR_OFFSET(k), PTR_GEN(k));
-- }
-+ if (PTR_DEV(k) == PTR_CHECK_DEV)
-+ p("check dev");
-+ else
-+ p("%llu:%llu gen %llu", PTR_DEV(k),
-+ PTR_OFFSET(k), PTR_GEN(k));
-
- p("]");
-
-@@ -126,20 +114,17 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
- static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
- {
- struct btree *b = container_of(keys, struct btree, keys);
-- unsigned j;
- char buf[80];
--
-+ size_t n;
- bch_extent_to_text(buf, sizeof(buf), k);
- printk(" %s", buf);
-
-- for (j = 0; j < KEY_PTRS(k); j++) {
-- size_t n = PTR_BUCKET_NR(b->c, k, j);
-- printk(" bucket %zu", n);
-+ n = PTR_BUCKET_NR(b->c, k, 0);
-+ printk(" bucket %zu", n);
-
-- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-- printk(" prio %i",
-- PTR_BUCKET(b->c, k, j)->prio);
-- }
-+ if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-+ printk(" prio %i",
-+ PTR_BUCKET(b->c, k, 0)->prio);
-
- printk(" %s\n", bch_ptr_status(b->c, k));
- }
-@@ -171,21 +156,19 @@ static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
-
- static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
- {
-- unsigned i;
- char buf[80];
- struct bucket *g;
-
- if (mutex_trylock(&b->c->bucket_lock)) {
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (ptr_available(b->c, k, i)) {
-- g = PTR_BUCKET(b->c, k, i);
--
-- if (KEY_DIRTY(k) ||
-- g->prio != BTREE_PRIO ||
-- (b->c->gc_mark_valid &&
-- GC_MARK(g) != GC_MARK_METADATA))
-- goto err;
-- }
-+ if (ptr_available(b->c, k, 0)) {
-+ g = PTR_BUCKET(b->c, k, 0);
-+
-+ if (KEY_DIRTY(k) ||
-+ g->prio != BTREE_PRIO ||
-+ (b->c->gc_mark_valid &&
-+ GC_MARK(g) != GC_MARK_METADATA))
-+ goto err;
-+ }
-
- mutex_unlock(&b->c->bucket_lock);
- }
-@@ -196,7 +179,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
- bch_extent_to_text(buf, sizeof(buf), k);
- btree_bug(b,
- "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
-- buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
-+ buf, PTR_BUCKET_NR(b->c, k, 0), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g));
- return true;
- }
-@@ -204,17 +187,15 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
- static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
- {
- struct btree *b = container_of(bk, struct btree, keys);
-- unsigned i;
-
- if (!bkey_cmp(k, &ZERO_KEY) ||
- !KEY_PTRS(k) ||
- bch_ptr_invalid(bk, k))
- return true;
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (!ptr_available(b->c, k, i) ||
-- ptr_stale(b->c, k, i))
-- return true;
-+ if (!ptr_available(b->c, k, 0) ||
-+ ptr_stale(b->c, k, 0))
-+ return true;
-
- if (expensive_debug_checks(b->c) &&
- btree_ptr_bad_expensive(b, k))
-@@ -363,7 +344,6 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
- * k might have been split since we inserted/found the
- * key we're replacing
- */
-- unsigned i;
- uint64_t offset = KEY_START(k) -
- KEY_START(replace_key);
-
-@@ -384,9 +364,8 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
-
- BUG_ON(!KEY_PTRS(replace_key));
-
-- for (i = 0; i < KEY_PTRS(replace_key); i++)
-- if (k->ptr != replace_key->ptr + offset)
-- goto check_failed;
-+ if (k->ptr != replace_key->ptr + offset)
-+ goto check_failed;
-
- sectors_found = KEY_OFFSET(k) - KEY_START(insert);
- }
-@@ -534,36 +513,33 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
- static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
- {
- struct btree *b = container_of(bk, struct btree, keys);
-- unsigned i, stale;
-+ unsigned stale;
-
- if (!KEY_PTRS(k) ||
- bch_extent_invalid(bk, k))
- return true;
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (!ptr_available(b->c, k, i))
-- return true;
-+ if (!ptr_available(b->c, k, 0))
-+ return true;
-
- if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
- return false;
-
-- for (i = 0; i < KEY_PTRS(k); i++) {
-- stale = ptr_stale(b->c, k, i);
-+ stale = ptr_stale(b->c, k, 0);
-
-- btree_bug_on(stale > 96, b,
-- "key too stale: %i, need_gc %u",
-- stale, b->c->need_gc);
-+ btree_bug_on(stale > 96, b,
-+ "key too stale: %i, need_gc %u",
-+ stale, b->c->need_gc);
-
-- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-- b, "stale dirty pointer");
-+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-+ b, "stale dirty pointer");
-
-- if (stale)
-- return true;
-+ if (stale)
-+ return true;
-
-- if (expensive_debug_checks(b->c) &&
-- bch_extent_bad_expensive(b, k, i))
-- return true;
-- }
-+ if (expensive_debug_checks(b->c) &&
-+ bch_extent_bad_expensive(b, k, 0))
-+ return true;
-
- return false;
- }
-@@ -576,15 +552,13 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
- static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
- {
- struct btree *b = container_of(bk, struct btree, keys);
-- unsigned i;
-
- if (key_merging_disabled(b->c))
- return false;
-
-- for (i = 0; i < KEY_PTRS(l); i++)
-- if (l->ptr + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr ||
-- PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
-- return false;
-+ if (l->ptr + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr ||
-+ PTR_BUCKET_NR(b->c, l, 0) != PTR_BUCKET_NR(b->c, r, 0))
-+ return false;
-
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index f9e626f207a5..fca59ed26067 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -304,11 +304,8 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
- k < bset_bkey_last(&i->j);
- k = bkey_next(k))
- if (!__bch_extent_invalid(c, k)) {
-- unsigned j;
--
-- for (j = 0; j < KEY_PTRS(k); j++)
-- if (ptr_available(c, k, j))
-- atomic_inc(&PTR_BUCKET(c, k, j)->pin);
-+ if (ptr_available(c, k, 0))
-+ atomic_inc(&PTR_BUCKET(c, k, 0)->pin);
-
- bch_initial_mark_key(c, 0, k);
- }
-@@ -644,30 +641,28 @@ static void journal_write_unlocked(struct closure *cl)
- w->data->last_seq = last_seq(&c->journal);
- w->data->csum = csum_set(w->data);
-
-- for (i = 0; i < KEY_PTRS(k); i++) {
-- ca = PTR_CACHE(c, k, i);
-- bio = &ca->journal.bio;
-+ ca = PTR_CACHE(c, k, 0);
-+ bio = &ca->journal.bio;
-
-- atomic_long_add(sectors, &ca->meta_sectors_written);
-+ atomic_long_add(sectors, &ca->meta_sectors_written);
-
-- bio_reset(bio);
-- bio->bi_iter.bi_sector = PTR_OFFSET(k);
-- bio_set_dev(bio, ca->bdev);
-- bio->bi_iter.bi_size = sectors << 9;
-+ bio_reset(bio);
-+ bio->bi_iter.bi_sector = PTR_OFFSET(k);
-+ bio_set_dev(bio, ca->bdev);
-+ bio->bi_iter.bi_size = sectors << 9;
-
-- bio->bi_end_io = journal_write_endio;
-- bio->bi_private = w;
-- bio_set_op_attrs(bio, REQ_OP_WRITE,
-- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
-- bch_bio_map(bio, w->data);
-+ bio->bi_end_io = journal_write_endio;
-+ bio->bi_private = w;
-+ bio_set_op_attrs(bio, REQ_OP_WRITE,
-+ REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
-+ bch_bio_map(bio, w->data);
-
-- trace_bcache_journal_write(bio);
-- bio_list_add(&list, bio);
-+ trace_bcache_journal_write(bio);
-+ bio_list_add(&list, bio);
-
-- SET_PTR_OFFSET(k, PTR_OFFSET(k) + sectors);
-+ SET_PTR_OFFSET(k, PTR_OFFSET(k) + sectors);
-
-- ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
-- }
-+ ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
-
- atomic_dec_bug(&fifo_back(&c->journal.pin));
- bch_journal_next(&c->journal);
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index a24c3a95b2c0..06fedfc55fd7 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -23,12 +23,10 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
- {
- struct cache_set *c = container_of(buf, struct cache_set,
- moving_gc_keys);
-- unsigned i;
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- if (ptr_available(c, k, i) &&
-- GC_MOVE(PTR_BUCKET(c, k, i)))
-- return true;
-+ if (ptr_available(c, k, 0) &&
-+ GC_MOVE(PTR_BUCKET(c, k, 0)))
-+ return true;
-
- return false;
- }
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index cd4fcae6516c..4a6889fa3f93 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -211,7 +211,6 @@ static void bch_data_insert_start(struct closure *cl)
- bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
-
- do {
-- unsigned i;
- struct bkey *k;
- struct bio_set *split = &op->c->bio_split;
-
-@@ -241,9 +240,8 @@ static void bch_data_insert_start(struct closure *cl)
- if (op->writeback) {
- SET_KEY_DIRTY(k, true);
-
-- for (i = 0; i < KEY_PTRS(k); i++)
-- SET_GC_MARK(PTR_BUCKET(op->c, k, i),
-- GC_MARK_DIRTY);
-+ SET_GC_MARK(PTR_BUCKET(op->c, k, 0),
-+ GC_MARK_DIRTY);
- }
-
- SET_KEY_CSUM(k, op->csum);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index a31e55bcc4e5..51490e9e5628 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -334,29 +334,25 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
- {
- struct closure *cl = &c->uuid_write;
- struct uuid_entry *u;
-- unsigned i;
- char buf[80];
-+ struct bio *bio;
-+
-
- BUG_ON(!parent);
- down(&c->uuid_write_mutex);
- closure_init(cl, parent);
-
-- for (i = 0; i < KEY_PTRS(k); i++) {
-- struct bio *bio = bch_bbio_alloc(c);
--
-- bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
-- bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
-+ bio = bch_bbio_alloc(c);
-
-- bio->bi_end_io = uuid_endio;
-- bio->bi_private = cl;
-- bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
-- bch_bio_map(bio, c->uuids);
-+ bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
-+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
-
-- bch_submit_bbio(bio, c, k, i);
-+ bio->bi_end_io = uuid_endio;
-+ bio->bi_private = cl;
-+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
-+ bch_bio_map(bio, c->uuids);
-
-- if (op != REQ_OP_WRITE)
-- break;
-- }
-+ bch_submit_bbio(bio, c, k, 0);
-
- bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 3f9a6132e112..17703b79a817 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -211,7 +211,6 @@ static void write_dirty_finish(struct closure *cl)
- /* This is kind of a dumb way of signalling errors. */
- if (KEY_DIRTY(&w->key)) {
- int ret;
-- unsigned i;
- struct keylist keys;
-
- bch_keylist_init(&keys);
-@@ -220,8 +219,7 @@ static void write_dirty_finish(struct closure *cl)
- SET_KEY_DIRTY(keys.top, false);
- bch_keylist_push(&keys);
-
-- for (i = 0; i < KEY_PTRS(&w->key); i++)
-- atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
-+ atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, 0)->pin);
-
- ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
-
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0004-bcache-remove-ptr-index-from-PTR_CACHE.patch b/for-test/remove-multiple-cache-devices/original_ideas/0004-bcache-remove-ptr-index-from-PTR_CACHE.patch
deleted file mode 100644
index c562646..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0004-bcache-remove-ptr-index-from-PTR_CACHE.patch
+++ /dev/null
@@ -1,194 +0,0 @@
-From f9fa3c1fe3bf001c4b9a4596e22763d91d1956c7 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 10:51:50 +0800
-Subject: [PATCH 04/14] bcache: remove ptr index from PTR_CACHE()
-
-the ptr index in macro PTR_CACHE() is used for multiple cache devices
-support, and in most of time its value is just 0 (references the first
-pointer in bkey's ptr[] array). Now we are removing the unfinished
-multiple cache devices support, the ptr index is useless and removed in
-this patch.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 4 ++--
- drivers/md/bcache/bcache.h | 7 +++----
- drivers/md/bcache/btree.c | 4 ++--
- drivers/md/bcache/debug.c | 2 +-
- drivers/md/bcache/extents.c | 4 ++--
- drivers/md/bcache/io.c | 4 ++--
- drivers/md/bcache/journal.c | 2 +-
- drivers/md/bcache/writeback.c | 4 ++--
- 8 files changed, 15 insertions(+), 16 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 47a9199557b9..9f0566a45f59 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -470,7 +470,7 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
-
- void bch_bucket_free(struct cache_set *c, struct bkey *k)
- {
-- __bch_bucket_free(PTR_CACHE(c, k, 0),
-+ __bch_bucket_free(PTR_CACHE(c, k),
- PTR_BUCKET(c, k, 0));
- }
-
-@@ -654,7 +654,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
-
- SET_PTR_OFFSET(&b->key, PTR_OFFSET(&b->key) + sectors);
- atomic_long_add(sectors,
-- &PTR_CACHE(c, &b->key, 0)->sectors_written);
-+ &PTR_CACHE(c, &b->key)->sectors_written);
-
- if (b->sectors_free < c->sb.block_size)
- b->sectors_free = 0;
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 10b9bbc482eb..f06230b4d574 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -758,8 +758,7 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
- }
-
- static inline struct cache *PTR_CACHE(struct cache_set *c,
-- const struct bkey *k,
-- unsigned ptr)
-+ const struct bkey *k)
- {
- return c->cache[PTR_DEV(k)];
- }
-@@ -775,7 +774,7 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
- const struct bkey *k,
- unsigned ptr)
- {
-- return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
-+ return PTR_CACHE(c, k)->buckets + PTR_BUCKET_NR(c, k, ptr);
- }
-
- static inline uint8_t gen_after(uint8_t a, uint8_t b)
-@@ -793,7 +792,7 @@ static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
- static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- unsigned i)
- {
-- return (PTR_DEV(k) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-+ return (PTR_DEV(k) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k);
- }
-
- /* Btree key macros */
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index f2f8fa0f4480..52e10db562ac 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -466,7 +466,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
- do_btree_node_write(b);
-
- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
-- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-+ &PTR_CACHE(b->c, &b->key)->btree_sectors_written);
-
- b->written += set_blocks(i, block_bytes(b->c));
- }
-@@ -1138,7 +1138,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- bkey_copy_key(k, &ZERO_KEY);
-
- SET_PTR_GEN(k,
-- bch_inc_gen(PTR_CACHE(b->c, &b->key, 0),
-+ bch_inc_gen(PTR_CACHE(b->c, &b->key),
- PTR_BUCKET(b->c, &b->key, 0)));
-
- mutex_unlock(&b->c->bucket_lock);
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 86fd18e77dbf..24555c54f382 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -50,7 +50,7 @@ void bch_btree_verify(struct btree *b)
- v->keys.ops = b->keys.ops;
-
- bio = bch_bbio_alloc(b->c);
-- bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
-+ bio_set_dev(bio, PTR_CACHE(b->c, &b->key)->bdev);
- bio->bi_iter.bi_sector = PTR_OFFSET(&b->key);
- bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
- bio->bi_opf = REQ_OP_READ | REQ_META;
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index d52239f4443e..b9cebb38777c 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -47,7 +47,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
- static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- {
- if (ptr_available(c, k, 0)) {
-- struct cache *ca = PTR_CACHE(c, k, 0);
-+ struct cache *ca = PTR_CACHE(c, k);
- size_t bucket = PTR_BUCKET_NR(c, k, 0);
- size_t r = bucket_remainder(c, PTR_OFFSET(k));
-
-@@ -65,7 +65,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- {
- if (ptr_available(c, k, 0)) {
-- struct cache *ca = PTR_CACHE(c, k, 0);
-+ struct cache *ca = PTR_CACHE(c, k);
- size_t bucket = PTR_BUCKET_NR(c, k, 0);
- size_t r = bucket_remainder(c, PTR_OFFSET(k));
-
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index bb63855d26b0..40686033f6d5 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -35,7 +35,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
- struct bbio *b = container_of(bio, struct bbio, bio);
-
- bio->bi_iter.bi_sector = PTR_OFFSET(&b->key);
-- bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
-+ bio_set_dev(bio, PTR_CACHE(c, &b->key)->bdev);
-
- b->submit_time_us = local_clock_us();
- closure_bio_submit(c, bio, bio->bi_private);
-@@ -123,7 +123,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
- blk_status_t error, const char *m)
- {
- struct bbio *b = container_of(bio, struct bbio, bio);
-- struct cache *ca = PTR_CACHE(c, &b->key, 0);
-+ struct cache *ca = PTR_CACHE(c, &b->key);
- int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
-
- unsigned threshold = op_is_write(bio_op(bio))
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index fca59ed26067..1204e7fbb116 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -641,7 +641,7 @@ static void journal_write_unlocked(struct closure *cl)
- w->data->last_seq = last_seq(&c->journal);
- w->data->csum = csum_set(w->data);
-
-- ca = PTR_CACHE(c, k, 0);
-+ ca = PTR_CACHE(c, k);
- bio = &ca->journal.bio;
-
- atomic_long_add(sectors, &ca->meta_sectors_written);
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 17703b79a817..d4a0cef1a6ff 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -305,7 +305,7 @@ static void read_dirty_endio(struct bio *bio)
- struct dirty_io *io = w->private;
-
- /* is_read = 1 */
-- bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
-+ bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key),
- bio->bi_status, 1,
- "reading dirty data from cache");
-
-@@ -401,7 +401,7 @@ static void read_dirty(struct cached_dev *dc)
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key);
- bio_set_dev(&io->bio,
-- PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
-+ PTR_CACHE(dc->disk.c, &w->key)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-
- if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0005-bcache-remove-ptr-index-from-PTR_BUCKET-and-PTR_BUCK.patch b/for-test/remove-multiple-cache-devices/original_ideas/0005-bcache-remove-ptr-index-from-PTR_BUCKET-and-PTR_BUCK.patch
deleted file mode 100644
index e121453..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0005-bcache-remove-ptr-index-from-PTR_BUCKET-and-PTR_BUCK.patch
+++ /dev/null
@@ -1,351 +0,0 @@
-From d67235fe8d0ae4b39f4b62b187f6b253ef0aee95 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 12:31:02 +0800
-Subject: [PATCH 05/14] bcache: remove ptr index from PTR_BUCKET() and
- PTR_BUCKET_NR()
-
-The ptr index in macro PTR_BUCKET() and PTR_BUCKET_NR() is always 0. Now we
-are removing the unfinished multiple cache devices support framework from
-bcache code, so remove the ptr index from these two macro definitions.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 5 ++---
- drivers/md/bcache/bcache.h | 10 ++++------
- drivers/md/bcache/btree.c | 18 +++++++++---------
- drivers/md/bcache/extents.c | 18 +++++++++---------
- drivers/md/bcache/journal.c | 2 +-
- drivers/md/bcache/movinggc.c | 2 +-
- drivers/md/bcache/request.c | 4 ++--
- drivers/md/bcache/writeback.c | 2 +-
- include/trace/events/bcache.h | 8 ++++----
- 9 files changed, 33 insertions(+), 36 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 9f0566a45f59..8a1cc9196c59 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -470,8 +470,7 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
-
- void bch_bucket_free(struct cache_set *c, struct bkey *k)
- {
-- __bch_bucket_free(PTR_CACHE(c, k),
-- PTR_BUCKET(c, k, 0));
-+ __bch_bucket_free(PTR_CACHE(c, k), PTR_BUCKET(c, k));
- }
-
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
-@@ -665,7 +664,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- * get_data_bucket()'s refcount.
- */
- if (b->sectors_free)
-- atomic_inc(&PTR_BUCKET(c, &b->key, 0)->pin);
-+ atomic_inc(&PTR_BUCKET(c, &b->key)->pin);
-
- spin_unlock(&c->data_bucket_lock);
- return true;
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index f06230b4d574..d9bee59195d2 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -764,17 +764,15 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
- }
-
- static inline size_t PTR_BUCKET_NR(struct cache_set *c,
-- const struct bkey *k,
-- unsigned ptr)
-+ const struct bkey *k)
- {
- return sector_to_bucket(c, PTR_OFFSET(k));
- }
-
- static inline struct bucket *PTR_BUCKET(struct cache_set *c,
-- const struct bkey *k,
-- unsigned ptr)
-+ const struct bkey *k)
- {
-- return PTR_CACHE(c, k)->buckets + PTR_BUCKET_NR(c, k, ptr);
-+ return PTR_CACHE(c, k)->buckets + PTR_BUCKET_NR(c, k);
- }
-
- static inline uint8_t gen_after(uint8_t a, uint8_t b)
-@@ -786,7 +784,7 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b)
- static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
- unsigned i)
- {
-- return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k));
-+ return gen_after(PTR_BUCKET(c, k)->gen, PTR_GEN(k));
- }
-
- static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 52e10db562ac..cc6e86cdd6b1 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -181,7 +181,7 @@ static void bch_btree_init_next(struct btree *b)
- void bkey_put(struct cache_set *c, struct bkey *k)
- {
- if (ptr_available(c, k, 0))
-- atomic_dec_bug(&PTR_BUCKET(c, k, 0)->pin);
-+ atomic_dec_bug(&PTR_BUCKET(c, k)->pin);
- }
-
- /* Btree IO */
-@@ -273,7 +273,7 @@ void bch_btree_node_read_done(struct btree *b)
- err:
- set_btree_node_io_error(b);
- bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
-- err, PTR_BUCKET_NR(b->c, &b->key, 0),
-+ err, PTR_BUCKET_NR(b->c, &b->key),
- bset_block_offset(b, i), i->keys);
- goto out;
- }
-@@ -319,7 +319,7 @@ static void bch_btree_node_read(struct btree *b)
- return;
- err:
- bch_cache_set_error(b->c, "io error reading bucket %zu",
-- PTR_BUCKET_NR(b->c, &b->key, 0));
-+ PTR_BUCKET_NR(b->c, &b->key));
- }
-
- static void btree_complete_write(struct btree *b, struct btree_write *w)
-@@ -1139,7 +1139,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
-
- SET_PTR_GEN(k,
- bch_inc_gen(PTR_CACHE(b->c, &b->key),
-- PTR_BUCKET(b->c, &b->key, 0)));
-+ PTR_BUCKET(b->c, &b->key)));
-
- mutex_unlock(&b->c->bucket_lock);
- }
-@@ -1185,7 +1185,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
- if (!ptr_available(c, k, 0))
- goto out;
-
-- g = PTR_BUCKET(c, k, 0);
-+ g = PTR_BUCKET(c, k);
-
- if (gen_after(g->last_gc, PTR_GEN(k)))
- g->last_gc = PTR_GEN(k);
-@@ -1225,7 +1225,7 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
-
- if (ptr_available(c, k, 0) &&
- !ptr_stale(c, k, 0)) {
-- struct bucket *b = PTR_BUCKET(c, k, 0);
-+ struct bucket *b = PTR_BUCKET(c, k);
-
- b->gen = PTR_GEN(k);
-
-@@ -1666,7 +1666,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
- c->gc_mark_valid = 1;
- c->need_gc = 0;
-
-- SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, 0),
-+ SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket),
- GC_MARK_METADATA);
-
- /* don't reclaim buckets to which writeback keys point */
-@@ -1683,7 +1683,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
- spin_lock(&dc->writeback_keys.lock);
- rbtree_postorder_for_each_entry_safe(w, n,
- &dc->writeback_keys.keys, node)
-- SET_GC_MARK(PTR_BUCKET(c, &w->key, 0),
-+ SET_GC_MARK(PTR_BUCKET(c, &w->key),
- GC_MARK_DIRTY);
- spin_unlock(&dc->writeback_keys.lock);
- }
-@@ -2250,7 +2250,7 @@ void bch_btree_set_root(struct btree *b)
-
- BUG_ON(!b->written);
-
-- BUG_ON(PTR_BUCKET(b->c, &b->key, 0)->prio != BTREE_PRIO);
-+ BUG_ON(PTR_BUCKET(b->c, &b->key)->prio != BTREE_PRIO);
-
- mutex_lock(&b->c->bucket_lock);
- list_del_init(&b->list);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index b9cebb38777c..44bc5e3b7e48 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -48,7 +48,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- {
- if (ptr_available(c, k, 0)) {
- struct cache *ca = PTR_CACHE(c, k);
-- size_t bucket = PTR_BUCKET_NR(c, k, 0);
-+ size_t bucket = PTR_BUCKET_NR(c, k);
- size_t r = bucket_remainder(c, PTR_OFFSET(k));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-@@ -66,7 +66,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- {
- if (ptr_available(c, k, 0)) {
- struct cache *ca = PTR_CACHE(c, k);
-- size_t bucket = PTR_BUCKET_NR(c, k, 0);
-+ size_t bucket = PTR_BUCKET_NR(c, k);
- size_t r = bucket_remainder(c, PTR_OFFSET(k));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size)
-@@ -119,12 +119,12 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
- bch_extent_to_text(buf, sizeof(buf), k);
- printk(" %s", buf);
-
-- n = PTR_BUCKET_NR(b->c, k, 0);
-+ n = PTR_BUCKET_NR(b->c, k);
- printk(" bucket %zu", n);
-
- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
- printk(" prio %i",
-- PTR_BUCKET(b->c, k, 0)->prio);
-+ PTR_BUCKET(b->c, k)->prio);
-
- printk(" %s\n", bch_ptr_status(b->c, k));
- }
-@@ -161,7 +161,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
-
- if (mutex_trylock(&b->c->bucket_lock)) {
- if (ptr_available(b->c, k, 0)) {
-- g = PTR_BUCKET(b->c, k, 0);
-+ g = PTR_BUCKET(b->c, k);
-
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
-@@ -179,7 +179,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
- bch_extent_to_text(buf, sizeof(buf), k);
- btree_bug(b,
- "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
-- buf, PTR_BUCKET_NR(b->c, k, 0), atomic_read(&g->pin),
-+ buf, PTR_BUCKET_NR(b->c, k), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g));
- return true;
- }
-@@ -483,7 +483,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
- static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
- unsigned ptr)
- {
-- struct bucket *g = PTR_BUCKET(b->c, k, ptr);
-+ struct bucket *g = PTR_BUCKET(b->c, k);
- char buf[80];
-
- if (mutex_trylock(&b->c->bucket_lock)) {
-@@ -505,7 +505,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
- bch_extent_to_text(buf, sizeof(buf), k);
- btree_bug(b,
- "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
-- buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
-+ buf, PTR_BUCKET_NR(b->c, k), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g));
- return true;
- }
-@@ -557,7 +557,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
- return false;
-
- if (l->ptr + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr ||
-- PTR_BUCKET_NR(b->c, l, 0) != PTR_BUCKET_NR(b->c, r, 0))
-+ PTR_BUCKET_NR(b->c, l) != PTR_BUCKET_NR(b->c, r))
- return false;
-
- /* Keys with no pointers aren't restricted to one bucket and could
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 1204e7fbb116..4ef86f81bf1e 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -305,7 +305,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
- k = bkey_next(k))
- if (!__bch_extent_invalid(c, k)) {
- if (ptr_available(c, k, 0))
-- atomic_inc(&PTR_BUCKET(c, k, 0)->pin);
-+ atomic_inc(&PTR_BUCKET(c, k)->pin);
-
- bch_initial_mark_key(c, 0, k);
- }
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 06fedfc55fd7..0486b3ef4450 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -25,7 +25,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
- moving_gc_keys);
-
- if (ptr_available(c, k, 0) &&
-- GC_MOVE(PTR_BUCKET(c, k, 0)))
-+ GC_MOVE(PTR_BUCKET(c, k)))
- return true;
-
- return false;
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 4a6889fa3f93..00f8e70c4a4d 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -240,7 +240,7 @@ static void bch_data_insert_start(struct closure *cl)
- if (op->writeback) {
- SET_KEY_DIRTY(k, true);
-
-- SET_GC_MARK(PTR_BUCKET(op->c, k, 0),
-+ SET_GC_MARK(PTR_BUCKET(op->c, k),
- GC_MARK_DIRTY);
- }
-
-@@ -539,7 +539,7 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
- /* XXX: figure out best pointer - for multiple cache devices */
- ptr = 0;
-
-- PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
-+ PTR_BUCKET(b->c, k)->prio = INITIAL_PRIO;
-
- if (KEY_DIRTY(k))
- s->read_dirty_data = true;
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index d4a0cef1a6ff..8ea299469ffe 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -219,7 +219,7 @@ static void write_dirty_finish(struct closure *cl)
- SET_KEY_DIRTY(keys.top, false);
- bch_keylist_push(&keys);
-
-- atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, 0)->pin);
-+ atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key)->pin);
-
- ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
-
-diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
-index 2cbd6e42ad83..ba040d06404e 100644
---- a/include/trace/events/bcache.h
-+++ b/include/trace/events/bcache.h
-@@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(btree_node,
- ),
-
- TP_fast_assign(
-- __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
-+ __entry->bucket = PTR_BUCKET_NR(b->c, &b->key);
- ),
-
- TP_printk("bucket %zu", __entry->bucket)
-@@ -249,7 +249,7 @@ TRACE_EVENT(bcache_btree_write,
- ),
-
- TP_fast_assign(
-- __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
-+ __entry->bucket = PTR_BUCKET_NR(b->c, &b->key);
- __entry->block = b->written;
- __entry->keys = b->keys.set[b->keys.nsets].data->keys;
- ),
-@@ -323,7 +323,7 @@ TRACE_EVENT(bcache_btree_insert_key,
- ),
-
- TP_fast_assign(
-- __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
-+ __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key);
- __entry->btree_level = b->level;
- __entry->inode = KEY_INODE(k);
- __entry->offset = KEY_OFFSET(k);
-@@ -350,7 +350,7 @@ DECLARE_EVENT_CLASS(btree_split,
- ),
-
- TP_fast_assign(
-- __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
-+ __entry->bucket = PTR_BUCKET_NR(b->c, &b->key);
- __entry->keys = keys;
- ),
-
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0006-bcache-remove-ptr-index-from-ptr_available-and-ptr_s.patch b/for-test/remove-multiple-cache-devices/original_ideas/0006-bcache-remove-ptr-index-from-ptr_available-and-ptr_s.patch
deleted file mode 100644
index 5f2fccf..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0006-bcache-remove-ptr-index-from-ptr_available-and-ptr_s.patch
+++ /dev/null
@@ -1,244 +0,0 @@
-From 73017c80bda60ab9945992ee9a1175a150b11afb Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 13:00:47 +0800
-Subject: [PATCH 06/14] bcache: remove ptr index from ptr_available() and
- ptr_stale()
-
-The ptr index of ptr_available() and ptr_stale() is for multiple cache
-devices code framework. Now we remove the unfinished and useless code
-framework, so the ptr index is useless and should be removed too.
-
-Indeed after changes from previous patches, ptr index is constant value
-0 already, this patch just rmeove the parameter from inline function
-definition and update all the locations where they are references.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/bcache.h | 6 ++----
- drivers/md/bcache/btree.c | 12 ++++++------
- drivers/md/bcache/extents.c | 16 ++++++++--------
- drivers/md/bcache/journal.c | 2 +-
- drivers/md/bcache/movinggc.c | 6 +++---
- drivers/md/bcache/request.c | 2 +-
- drivers/md/bcache/writeback.c | 2 +-
- 8 files changed, 23 insertions(+), 25 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 8a1cc9196c59..a182c1cddb4c 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -628,7 +628,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- if (KEY_PTRS(&alloc.key))
- bkey_put(c, &alloc.key);
-
-- EBUG_ON(ptr_stale(c, &b->key, 0));
-+ EBUG_ON(ptr_stale(c, &b->key));
-
- /* Set up the pointer to the space we're allocating: */
- k->ptr = b->key.ptr;
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index d9bee59195d2..1b1ec9dc8869 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -781,14 +781,12 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b)
- return r > 128U ? 0 : r;
- }
-
--static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
-- unsigned i)
-+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k)
- {
- return gen_after(PTR_BUCKET(c, k)->gen, PTR_GEN(k));
- }
-
--static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
-- unsigned i)
-+static inline bool ptr_available(struct cache_set *c, const struct bkey *k)
- {
- return (PTR_DEV(k) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k);
- }
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index cc6e86cdd6b1..9c7a468c12a2 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -180,7 +180,7 @@ static void bch_btree_init_next(struct btree *b)
-
- void bkey_put(struct cache_set *c, struct bkey *k)
- {
-- if (ptr_available(c, k, 0))
-+ if (ptr_available(c, k))
- atomic_dec_bug(&PTR_BUCKET(c, k)->pin);
- }
-
-@@ -1182,7 +1182,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
- if (!bkey_cmp(k, &ZERO_KEY))
- return stale;
-
-- if (!ptr_available(c, k, 0))
-+ if (!ptr_available(c, k))
- goto out;
-
- g = PTR_BUCKET(c, k);
-@@ -1190,8 +1190,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
- if (gen_after(g->last_gc, PTR_GEN(k)))
- g->last_gc = PTR_GEN(k);
-
-- if (ptr_stale(c, k, 0)) {
-- stale = max(stale, ptr_stale(c, k, 0));
-+ if (ptr_stale(c, k)) {
-+ stale = max(stale, ptr_stale(c, k));
- goto out;
- }
-
-@@ -1223,8 +1223,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
- void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
- {
-
-- if (ptr_available(c, k, 0) &&
-- !ptr_stale(c, k, 0)) {
-+ if (ptr_available(c, k) &&
-+ !ptr_stale(c, k)) {
- struct bucket *b = PTR_BUCKET(c, k);
-
- b->gen = PTR_GEN(k);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index 44bc5e3b7e48..f390c7a6da32 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
-
- static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- {
-- if (ptr_available(c, k, 0)) {
-+ if (ptr_available(c, k)) {
- struct cache *ca = PTR_CACHE(c, k);
- size_t bucket = PTR_BUCKET_NR(c, k);
- size_t r = bucket_remainder(c, PTR_OFFSET(k));
-@@ -64,7 +64,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-
- static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- {
-- if (ptr_available(c, k, 0)) {
-+ if (ptr_available(c, k)) {
- struct cache *ca = PTR_CACHE(c, k);
- size_t bucket = PTR_BUCKET_NR(c, k);
- size_t r = bucket_remainder(c, PTR_OFFSET(k));
-@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- return "bad, short offset";
- if (bucket >= ca->sb.nbuckets)
- return "bad, offset past end of device";
-- if (ptr_stale(c, k, 0))
-+ if (ptr_stale(c, k))
- return "stale";
- }
-
-@@ -160,7 +160,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
- struct bucket *g;
-
- if (mutex_trylock(&b->c->bucket_lock)) {
-- if (ptr_available(b->c, k, 0)) {
-+ if (ptr_available(b->c, k)) {
- g = PTR_BUCKET(b->c, k);
-
- if (KEY_DIRTY(k) ||
-@@ -193,8 +193,8 @@ static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
- bch_ptr_invalid(bk, k))
- return true;
-
-- if (!ptr_available(b->c, k, 0) ||
-- ptr_stale(b->c, k, 0))
-+ if (!ptr_available(b->c, k) ||
-+ ptr_stale(b->c, k))
- return true;
-
- if (expensive_debug_checks(b->c) &&
-@@ -519,13 +519,13 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
- bch_extent_invalid(bk, k))
- return true;
-
-- if (!ptr_available(b->c, k, 0))
-+ if (!ptr_available(b->c, k))
- return true;
-
- if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
- return false;
-
-- stale = ptr_stale(b->c, k, 0);
-+ stale = ptr_stale(b->c, k);
-
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 4ef86f81bf1e..ddcd80072b1d 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -304,7 +304,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
- k < bset_bkey_last(&i->j);
- k = bkey_next(k))
- if (!__bch_extent_invalid(c, k)) {
-- if (ptr_available(c, k, 0))
-+ if (ptr_available(c, k))
- atomic_inc(&PTR_BUCKET(c, k)->pin);
-
- bch_initial_mark_key(c, 0, k);
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 0486b3ef4450..5dceac6709b1 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -24,7 +24,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
- struct cache_set *c = container_of(buf, struct cache_set,
- moving_gc_keys);
-
-- if (ptr_available(c, k, 0) &&
-+ if (ptr_available(c, k) &&
- GC_MOVE(PTR_BUCKET(c, k)))
- return true;
-
-@@ -65,7 +65,7 @@ static void read_moving_endio(struct bio *bio)
- if (bio->bi_status)
- io->op.status = bio->bi_status;
- else if (!KEY_DIRTY(&b->key) &&
-- ptr_stale(io->op.c, &b->key, 0)) {
-+ ptr_stale(io->op.c, &b->key)) {
- io->op.status = BLK_STS_IOERR;
- }
-
-@@ -137,7 +137,7 @@ static void read_moving(struct cache_set *c)
- if (!w)
- break;
-
-- if (ptr_stale(c, &w->key, 0)) {
-+ if (ptr_stale(c, &w->key)) {
- bch_keybuf_del(&c->moving_gc_keys, w);
- continue;
- }
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 00f8e70c4a4d..245ada0c186f 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -495,7 +495,7 @@ static void bch_cache_read_endio(struct bio *bio)
- if (bio->bi_status)
- s->iop.status = bio->bi_status;
- else if (!KEY_DIRTY(&b->key) &&
-- ptr_stale(s->iop.c, &b->key, 0)) {
-+ ptr_stale(s->iop.c, &b->key)) {
- atomic_long_inc(&s->iop.c->cache_read_races);
- s->iop.status = BLK_STS_IOERR;
- }
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 8ea299469ffe..ea99166db88c 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -349,7 +349,7 @@ static void read_dirty(struct cached_dev *dc)
- nk = 0;
-
- do {
-- BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
-+ BUG_ON(ptr_stale(dc->disk.c, &next->key));
-
- /*
- * Don't combine too many operations, even if they
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0007-bcache-remove-ptr-index-from-bch_submit_bbio.patch b/for-test/remove-multiple-cache-devices/original_ideas/0007-bcache-remove-ptr-index-from-bch_submit_bbio.patch
deleted file mode 100644
index 7e44295..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0007-bcache-remove-ptr-index-from-bch_submit_bbio.patch
+++ /dev/null
@@ -1,126 +0,0 @@
-From d13907bc593a2ac3e765795c2080adcaa47d71f1 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 13:27:53 +0800
-Subject: [PATCH 07/14] bcache: remove ptr index from bch_submit_bbio()
-
-To remove the unfinished and useless multiple cache devices support code
-framework, the ptr index of bch_submit_bbio() is useless too. Indeed
-when calling bch_submit_bbio() the value of 'unsigned i' is always 0, it
-is safe to remove it from parameter list of bch_submit_bbio().
-
-This patch also updates the parameter list from where bch_submit_bbio()
-is referenced.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/btree.c | 6 +++---
- drivers/md/bcache/io.c | 4 ++--
- drivers/md/bcache/movinggc.c | 2 +-
- drivers/md/bcache/request.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- 6 files changed, 9 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 1b1ec9dc8869..05e888094b35 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -926,7 +926,7 @@ void bch_bbio_free(struct bio *, struct cache_set *);
- struct bio *bch_bbio_alloc(struct cache_set *);
-
- void __bch_submit_bbio(struct bio *, struct cache_set *);
--void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
-+void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *);
-
- uint8_t bch_inc_gen(struct cache *, struct bucket *);
- void bch_rescale_priorities(struct cache_set *, int);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 9c7a468c12a2..642fe4444173 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -302,7 +302,7 @@ static void bch_btree_node_read(struct btree *b)
-
- bch_bio_map(bio, b->keys.set[0].data);
-
-- bch_submit_bbio(bio, b->c, &b->key, 0);
-+ bch_submit_bbio(bio, b->c, &b->key);
- closure_sync(&cl);
-
- if (bio->bi_status)
-@@ -425,7 +425,7 @@ static void do_btree_node_write(struct btree *b)
- memcpy(page_address(bv->bv_page),
- base + j * PAGE_SIZE, PAGE_SIZE);
-
-- bch_submit_bbio(b->bio, b->c, &k.key, 0);
-+ bch_submit_bbio(b->bio, b->c, &k.key);
-
- continue_at(cl, btree_node_write_done, NULL);
- } else {
-@@ -433,7 +433,7 @@ static void do_btree_node_write(struct btree *b)
- b->bio->bi_vcnt = 0;
- bch_bio_map(b->bio, i);
-
-- bch_submit_bbio(b->bio, b->c, &k.key, 0);
-+ bch_submit_bbio(b->bio, b->c, &k.key);
-
- closure_sync(cl);
- continue_at_nobarrier(cl, __btree_node_write_done, NULL);
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index 40686033f6d5..3d078c50e624 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -42,10 +42,10 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
- }
-
- void bch_submit_bbio(struct bio *bio, struct cache_set *c,
-- struct bkey *k, unsigned ptr)
-+ struct bkey *k)
- {
- struct bbio *b = container_of(bio, struct bbio, bio);
-- bch_bkey_copy_single_ptr(&b->key, k, ptr);
-+ bch_bkey_copy_single_ptr(&b->key, k, 0);
- __bch_submit_bbio(bio, c);
- }
-
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 5dceac6709b1..52acb632aab0 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -115,7 +115,7 @@ static void read_moving_submit(struct closure *cl)
- struct moving_io *io = container_of(cl, struct moving_io, cl);
- struct bio *bio = &io->bio.bio;
-
-- bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
-+ bch_submit_bbio(bio, io->op.c, &io->w->key);
-
- continue_at(cl, write_moving, io->op.wq);
- }
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 245ada0c186f..7cd55912f314 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -252,7 +252,7 @@ static void bch_data_insert_start(struct closure *cl)
- bch_keylist_push(&op->insert_keys);
-
- bio_set_op_attrs(n, REQ_OP_WRITE, 0);
-- bch_submit_bbio(n, op->c, k, 0);
-+ bch_submit_bbio(n, op->c, k);
- } while (n != bio);
-
- op->insert_data_done = true;
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 51490e9e5628..cc3d16d2cffc 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -352,7 +352,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
- bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
- bch_bio_map(bio, c->uuids);
-
-- bch_submit_bbio(bio, c, k, 0);
-+ bch_submit_bbio(bio, c, k);
-
- bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0008-bcache-remove-ptr-index-from-bch_bkey_copy_single_pt.patch b/for-test/remove-multiple-cache-devices/original_ideas/0008-bcache-remove-ptr-index-from-bch_bkey_copy_single_pt.patch
deleted file mode 100644
index 2f9d956..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0008-bcache-remove-ptr-index-from-bch_bkey_copy_single_pt.patch
+++ /dev/null
@@ -1,101 +0,0 @@
-From 2708314df8597248594c99ee711d97cc786c0c83 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 13:40:04 +0800
-Subject: [PATCH 08/14] bcache: remove ptr index from
- bch_bkey_copy_single_ptr()
-
-bch_bkey_copy_single_ptr() is used to copy a single key from src key to
-dest key. The parameter 'unsigned i' is used as a ptr index for multiple
-cache devices code framework, although its value is always constant 0.
-
-Now the unfinished and useless multiple cache devices code framework is
-removing from bache code, the ptr index can also be removed from
-bch_bkey_copy_single_ptr() too. This patch also updates all the locations
-where this function is references.
-
-Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bset.c | 5 +----
- drivers/md/bcache/bset.h | 3 +--
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/request.c | 6 +-----
- 4 files changed, 4 insertions(+), 12 deletions(-)
-
-diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
-index 00c883536d54..194c9e16d483 100644
---- a/drivers/md/bcache/bset.c
-+++ b/drivers/md/bcache/bset.c
-@@ -179,11 +179,8 @@ void bch_keylist_pop_front(struct keylist *l)
-
- /* Key/pointer manipulation */
-
--void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
-- unsigned i)
-+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src)
- {
-- BUG_ON(i > KEY_PTRS(src));
--
- /* Only copy the header, key, and one pointer. */
- memcpy(dest, src, 2 * sizeof(uint64_t));
- dest->ptr = src->ptr;
-diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
-index b867f2200495..bf8ddf5df392 100644
---- a/drivers/md/bcache/bset.h
-+++ b/drivers/md/bcache/bset.h
-@@ -401,8 +401,7 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
- : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
- }
-
--void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
-- unsigned);
-+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *);
- bool __bch_cut_front(const struct bkey *, struct bkey *);
- bool __bch_cut_back(const struct bkey *, struct bkey *);
-
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index 3d078c50e624..b42009b85f29 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -45,7 +45,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
- struct bkey *k)
- {
- struct bbio *b = container_of(bio, struct bbio, bio);
-- bch_bkey_copy_single_ptr(&b->key, k, 0);
-+ bch_bkey_copy_single_ptr(&b->key, k);
- __bch_submit_bbio(bio, c);
- }
-
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 7cd55912f314..62d8fb807dd6 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -512,7 +512,6 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
- struct search *s = container_of(op, struct search, op);
- struct bio *n, *bio = &s->bio.bio;
- struct bkey *bio_key;
-- unsigned ptr;
-
- if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
- return MAP_CONTINUE;
-@@ -536,9 +535,6 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
- if (!KEY_SIZE(k))
- return MAP_CONTINUE;
-
-- /* XXX: figure out best pointer - for multiple cache devices */
-- ptr = 0;
--
- PTR_BUCKET(b->c, k)->prio = INITIAL_PRIO;
-
- if (KEY_DIRTY(k))
-@@ -549,7 +545,7 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
- GFP_NOIO, &s->d->bio_split);
-
- bio_key = &container_of(n, struct bbio, bio)->key;
-- bch_bkey_copy_single_ptr(bio_key, k, ptr);
-+ bch_bkey_copy_single_ptr(bio_key, k);
-
- bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
- bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0009-bcache-remove-ptr-index-from-bch_extent_bad_expensiv.patch b/for-test/remove-multiple-cache-devices/original_ideas/0009-bcache-remove-ptr-index-from-bch_extent_bad_expensiv.patch
deleted file mode 100644
index 233cb94..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0009-bcache-remove-ptr-index-from-bch_extent_bad_expensiv.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From 7e1fd54c8656a8da8decd8ab3187de1289a78740 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 13:53:45 +0800
-Subject: [PATCH 09/14] bcache: remove ptr index from
- bch_extent_bad_expensive()
-
-The ptr index parameter is prepared for the unfinished and useless multiple
-cache devices code framework. Now we are removing the multiple cache
-devices code framework, and indeed when bch_extent_bad_expensive() is
-called, ptr value is always constant 0. It is safe to remove ptr parameter
-from bch_extent_bad_expensive(). This patch also updates the parameter
-where bch_extent_bad_expensive() is referenced.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/extents.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index f390c7a6da32..4acb407c4594 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -480,8 +480,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
- return __bch_extent_invalid(b->c, k);
- }
-
--static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
-- unsigned ptr)
-+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k)
- {
- struct bucket *g = PTR_BUCKET(b->c, k);
- char buf[80];
-@@ -538,7 +537,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
- return true;
-
- if (expensive_debug_checks(b->c) &&
-- bch_extent_bad_expensive(b, k, 0))
-+ bch_extent_bad_expensive(b, k))
- return true;
-
- return false;
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0010-bcache-remove-for_each_cache.patch b/for-test/remove-multiple-cache-devices/original_ideas/0010-bcache-remove-for_each_cache.patch
deleted file mode 100644
index e2e3e76..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0010-bcache-remove-for_each_cache.patch
+++ /dev/null
@@ -1,899 +0,0 @@
-From 2fb4521998e63b4a89d4e2db67e5731a13b7350e Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 22:12:31 +0800
-Subject: [PATCH 10/14] bcache: remove for_each_cache()
-
-Previous patches remove the multiple cache devices framework, which is
-unfinished and useless. Now there is only one cache device in each cache
-set, so we don't need for_each_cache() anymore.
-
-This patch removes definition of for_each_cache(), and in all the
-locations where for_each_cache() are referenced, c->cache[0] is used to
-directly access the only cache of the cache set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 17 ++-
- drivers/md/bcache/bcache.h | 9 +-
- drivers/md/bcache/btree.c | 102 ++++++++---------
- drivers/md/bcache/journal.c | 212 ++++++++++++++++-------------------
- drivers/md/bcache/movinggc.c | 61 +++++-----
- drivers/md/bcache/request.c | 18 +--
- drivers/md/bcache/super.c | 104 ++++++++---------
- 7 files changed, 236 insertions(+), 287 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index a182c1cddb4c..64b40b439259 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- struct cache *ca;
- struct bucket *b;
- unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
-- unsigned i;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
-
- c->min_prio = USHRT_MAX;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca)
-- if (b->prio &&
-- b->prio != BTREE_PRIO &&
-- !atomic_read(&b->pin)) {
-- b->prio--;
-- c->min_prio = min(c->min_prio, b->prio);
-- }
-+ ca = c->cache[0];
-+ for_each_bucket(b, ca)
-+ if (b->prio &&
-+ b->prio != BTREE_PRIO &&
-+ !atomic_read(&b->pin)) {
-+ b->prio--;
-+ c->min_prio = min(c->min_prio, b->prio);
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 05e888094b35..cfbf10365dea 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -836,9 +836,6 @@ do { \
-
- /* Looping macros */
-
--#define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
--
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
- b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-@@ -880,11 +877,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
-
- static inline void wake_up_allocators(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned i;
-+ struct cache *ca = c->cache[0];
-
-- for_each_cache(ca, c, i)
-- wake_up_process(ca->alloc_thread);
-+ wake_up_process(ca->alloc_thread);
- }
-
- static inline void closure_bio_submit(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 642fe4444173..107a91cdd66b 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1147,19 +1147,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- static int btree_check_reserve(struct btree *b, struct btree_op *op)
- {
- struct cache_set *c = b->c;
-- struct cache *ca;
-- unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
-+ struct cache *ca = c->cache[0];
-+ unsigned reserve = (c->root->level - b->level) * 2 + 1;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i)
-- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-- if (op)
-- prepare_to_wait(&c->btree_cache_wait, &op->wait,
-- TASK_UNINTERRUPTIBLE);
-- mutex_unlock(&c->bucket_lock);
-- return -EINTR;
-- }
-+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-+ if (op)
-+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
-+ TASK_UNINTERRUPTIBLE);
-+ mutex_unlock(&c->bucket_lock);
-+ return -EINTR;
-+ }
-
- mutex_unlock(&c->bucket_lock);
-
-@@ -1630,9 +1629,8 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
-
- static void btree_gc_start(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache[0];
- struct bucket *b;
-- unsigned i;
-
- if (!c->gc_mark_valid)
- return;
-@@ -1642,14 +1640,13 @@ static void btree_gc_start(struct cache_set *c)
- c->gc_mark_valid = 0;
- c->gc_done = ZERO_KEY;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca) {
-- b->last_gc = b->gen;
-- if (!atomic_read(&b->pin)) {
-- SET_GC_MARK(b, 0);
-- SET_GC_SECTORS_USED(b, 0);
-- }
-+ for_each_bucket(b, ca) {
-+ b->last_gc = b->gen;
-+ if (!atomic_read(&b->pin)) {
-+ SET_GC_MARK(b, 0);
-+ SET_GC_SECTORS_USED(b, 0);
- }
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-@@ -1659,6 +1656,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
- struct bucket *b;
- struct cache *ca;
- unsigned i;
-+ uint64_t *j;
-
- mutex_lock(&c->bucket_lock);
-
-@@ -1690,29 +1688,26 @@ static void bch_btree_gc_finish(struct cache_set *c)
- rcu_read_unlock();
-
- c->avail_nbuckets = 0;
-- for_each_cache(ca, c, i) {
-- uint64_t *i;
--
-- ca->invalidate_needs_gc = 0;
-+ ca = c->cache[0];
-+ ca->invalidate_needs_gc = 0;
-
-- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (j = ca->sb.d; j < ca->sb.d + ca->sb.keys; j++)
-+ SET_GC_MARK(ca->buckets + *j, GC_MARK_METADATA);
-
-- for (i = ca->prio_buckets;
-- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (j = ca->prio_buckets;
-+ j < ca->prio_buckets + prio_buckets(ca) * 2; j++)
-+ SET_GC_MARK(ca->buckets + *j, GC_MARK_METADATA);
-
-- for_each_bucket(b, ca) {
-- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-+ for_each_bucket(b, ca) {
-+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-
-- if (atomic_read(&b->pin))
-- continue;
-+ if (atomic_read(&b->pin))
-+ continue;
-
-- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-
-- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-- c->avail_nbuckets++;
-- }
-+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-+ c->avail_nbuckets++;
- }
-
- mutex_unlock(&c->bucket_lock);
-@@ -1761,12 +1756,10 @@ static void bch_btree_gc(struct cache_set *c)
-
- static bool gc_should_run(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned i;
-+ struct cache *ca = c->cache[0];
-
-- for_each_cache(ca, c, i)
-- if (ca->invalidate_needs_gc)
-- return true;
-+ if (ca->invalidate_needs_gc)
-+ return true;
-
- if (atomic_read(&c->sectors_to_gc) < 0)
- return true;
-@@ -1845,9 +1838,8 @@ int bch_btree_check(struct cache_set *c)
-
- void bch_initial_gc_finish(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache[0];
- struct bucket *b;
-- unsigned i;
-
- bch_btree_gc_finish(c);
-
-@@ -1862,20 +1854,18 @@ void bch_initial_gc_finish(struct cache_set *c)
- * This is only safe for buckets that have no live data in them, which
- * there should always be some of.
- */
-- for_each_cache(ca, c, i) {
-- for_each_bucket(b, ca) {
-- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-- fifo_full(&ca->free[RESERVE_BTREE]))
-- break;
-+ for_each_bucket(b, ca) {
-+ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-+ fifo_full(&ca->free[RESERVE_BTREE]))
-+ break;
-
-- if (bch_can_invalidate_bucket(ca, b) &&
-- !GC_MARK(b)) {
-- __bch_invalidate_one_bucket(ca, b);
-- if (!fifo_push(&ca->free[RESERVE_PRIO],
-- b - ca->buckets))
-- fifo_push(&ca->free[RESERVE_BTREE],
-- b - ca->buckets);
-- }
-+ if (bch_can_invalidate_bucket(ca, b) &&
-+ !GC_MARK(b)) {
-+ __bch_invalidate_one_bucket(ca, b);
-+ if (!fifo_push(&ca->free[RESERVE_PRIO],
-+ b - ca->buckets))
-+ fifo_push(&ca->free[RESERVE_BTREE],
-+ b - ca->buckets);
- }
- }
-
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index ddcd80072b1d..d5222fcde412 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -153,113 +153,111 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
- ret; \
- })
-
-- struct cache *ca;
-- unsigned iter;
--
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-- unsigned i, l, r, m;
-- uint64_t seq;
-+ struct cache *ca = c->cache[0];
-+ struct journal_device *ja = &ca->journal;
-+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-+ unsigned i, l, r, m;
-+ uint64_t seq;
-
-- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-- pr_debug("%u journal buckets", ca->sb.njournal_buckets);
-+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-+ pr_debug("%u journal buckets", ca->sb.njournal_buckets);
-
-+ /*
-+ * Read journal buckets ordered by golden ratio hash to quickly
-+ * find a sequence of buckets with valid journal entries
-+ */
-+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
- /*
-- * Read journal buckets ordered by golden ratio hash to quickly
-- * find a sequence of buckets with valid journal entries
-+ * We must try the index l with ZERO first for
-+ * correctness due to the scenario that the journal
-+ * bucket is circular buffer which might have wrapped
- */
-- for (i = 0; i < ca->sb.njournal_buckets; i++) {
-- /*
-- * We must try the index l with ZERO first for
-- * correctness due to the scenario that the journal
-- * bucket is circular buffer which might have wrapped
-- */
-- l = (i * 2654435769U) % ca->sb.njournal_buckets;
-+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
-
-- if (test_bit(l, bitmap))
-- break;
-+ if (test_bit(l, bitmap))
-+ break;
-
-- if (read_bucket(l))
-- goto bsearch;
-- }
-+ if (read_bucket(l))
-+ goto bsearch;
-+ }
-
-- /*
-- * If that fails, check all the buckets we haven't checked
-- * already
-- */
-- pr_debug("falling back to linear search");
-+ /*
-+ * If that fails, check all the buckets we haven't checked
-+ * already
-+ */
-+ pr_debug("falling back to linear search");
-
-- for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
-- l < ca->sb.njournal_buckets;
-- l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
-- if (read_bucket(l))
-- goto bsearch;
-+ for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
-+ l < ca->sb.njournal_buckets;
-+ l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
-+ if (read_bucket(l))
-+ goto bsearch;
-+
-+ /* no journal entries on this device? */
-+ if (l == ca->sb.njournal_buckets)
-+ goto no_journal_entry;
-
-- /* no journal entries on this device? */
-- if (l == ca->sb.njournal_buckets)
-- continue;
- bsearch:
-- BUG_ON(list_empty(list));
-+ BUG_ON(list_empty(list));
-
-- /* Binary search */
-- m = l;
-- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-- pr_debug("starting binary search, l %u r %u", l, r);
-+ /* Binary search */
-+ m = l;
-+ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-+ pr_debug("starting binary search, l %u r %u", l, r);
-
-- while (l + 1 < r) {
-- seq = list_entry(list->prev, struct journal_replay,
-- list)->j.seq;
-+ while (l + 1 < r) {
-+ seq = list_entry(list->prev, struct journal_replay,
-+ list)->j.seq;
-
-- m = (l + r) >> 1;
-- read_bucket(m);
-+ m = (l + r) >> 1;
-+ read_bucket(m);
-
-- if (seq != list_entry(list->prev, struct journal_replay,
-- list)->j.seq)
-- l = m;
-- else
-- r = m;
-- }
-+ if (seq != list_entry(list->prev, struct journal_replay,
-+ list)->j.seq)
-+ l = m;
-+ else
-+ r = m;
-+ }
-
-- /*
-- * Read buckets in reverse order until we stop finding more
-- * journal entries
-- */
-- pr_debug("finishing up: m %u njournal_buckets %u",
-- m, ca->sb.njournal_buckets);
-- l = m;
-+ /*
-+ * Read buckets in reverse order until we stop finding more
-+ * journal entries
-+ */
-+ pr_debug("finishing up: m %u njournal_buckets %u",
-+ m, ca->sb.njournal_buckets);
-+ l = m;
-
-- while (1) {
-- if (!l--)
-- l = ca->sb.njournal_buckets - 1;
-+ while (1) {
-+ if (!l--)
-+ l = ca->sb.njournal_buckets - 1;
-
-- if (l == m)
-- break;
-+ if (l == m)
-+ break;
-
-- if (test_bit(l, bitmap))
-- continue;
-+ if (test_bit(l, bitmap))
-+ continue;
-
-- if (!read_bucket(l))
-- break;
-- }
-+ if (!read_bucket(l))
-+ break;
-+ }
-
-- seq = 0;
-+ seq = 0;
-
-- for (i = 0; i < ca->sb.njournal_buckets; i++)
-- if (ja->seq[i] > seq) {
-- seq = ja->seq[i];
-- /*
-- * When journal_reclaim() goes to allocate for
-- * the first time, it'll use the bucket after
-- * ja->cur_idx
-- */
-- ja->cur_idx = i;
-- ja->last_idx = ja->discard_idx = (i + 1) %
-- ca->sb.njournal_buckets;
-+ for (i = 0; i < ca->sb.njournal_buckets; i++)
-+ if (ja->seq[i] > seq) {
-+ seq = ja->seq[i];
-+ /*
-+ * When journal_reclaim() goes to allocate for
-+ * the first time, it'll use the bucket after
-+ * ja->cur_idx
-+ */
-+ ja->cur_idx = i;
-+ ja->last_idx = ja->discard_idx = (i + 1) %
-+ ca->sb.njournal_buckets;
-
-- }
-- }
-+ }
-
-+no_journal_entry:
- if (!list_empty(list))
- c->journal.seq = list_entry(list->prev,
- struct journal_replay,
-@@ -487,10 +485,12 @@ static void do_journal_discard(struct cache *ca)
- static void journal_reclaim(struct cache_set *c)
- {
- struct bkey *k = &c->journal.key;
-- struct cache *ca;
-+ struct cache *ca = c->cache[0];
- uint64_t last_seq;
-- unsigned iter, n = 0;
-+ unsigned n = 0;
- atomic_t p __maybe_unused;
-+ struct journal_device *ja;
-+ unsigned next;
-
- atomic_long_inc(&c->reclaim);
-
-@@ -500,35 +500,22 @@ static void journal_reclaim(struct cache_set *c)
- last_seq = last_seq(&c->journal);
-
- /* Update last_idx */
-+ ja = &ca->journal;
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
--
-- while (ja->last_idx != ja->cur_idx &&
-- ja->seq[ja->last_idx] < last_seq)
-- ja->last_idx = (ja->last_idx + 1) %
-- ca->sb.njournal_buckets;
-- }
-+ while (ja->last_idx != ja->cur_idx &&
-+ ja->seq[ja->last_idx] < last_seq)
-+ ja->last_idx = (ja->last_idx + 1) %
-+ ca->sb.njournal_buckets;
-
-- for_each_cache(ca, c, iter)
-- do_journal_discard(ca);
-+ do_journal_discard(ca);
-
- if (c->journal.blocks_free)
- goto out;
-
-- /*
-- * Allocate:
-- * XXX: Sort by free journal space
-- */
--
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
--
-- /* No space available on this device */
-- if (next == ja->discard_idx)
-- continue;
-+ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-
-+ /* Space available on this device */
-+ if (next != ja->discard_idx) {
- ja->cur_idx = next;
- k->ptr = MAKE_PTR(0,
- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-@@ -607,8 +594,8 @@ static void journal_write_unlocked(struct closure *cl)
- struct cache *ca;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
-- unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
-- c->sb.block_size;
-+ unsigned sectors = set_blocks(w->data, block_bytes(c)) *
-+ c->sb.block_size;
-
- struct bio *bio;
- struct bio_list list;
-@@ -633,15 +620,14 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->btree_root, &c->root->key);
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
-- for_each_cache(ca, c, i)
-- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-+ ca = PTR_CACHE(c, k);
-+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-
- w->data->magic = jset_magic(&c->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
- w->data->csum = csum_set(w->data);
-
-- ca = PTR_CACHE(c, k);
- bio = &ca->journal.bio;
-
- atomic_long_add(sectors, &ca->meta_sectors_written);
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 52acb632aab0..89ed74a23da1 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -192,50 +192,47 @@ static unsigned bucket_heap_top(struct cache *ca)
-
- void bch_moving_gc(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache[0];
- struct bucket *b;
-- unsigned i;
-+ unsigned sectors_to_move, reserve_sectors;
-
- if (!c->copy_gc_enabled)
- return;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i) {
-- unsigned sectors_to_move = 0;
-- unsigned reserve_sectors = ca->sb.bucket_size *
-- fifo_used(&ca->free[RESERVE_MOVINGGC]);
--
-- ca->heap.used = 0;
--
-- for_each_bucket(b, ca) {
-- if (GC_MARK(b) == GC_MARK_METADATA ||
-- !GC_SECTORS_USED(b) ||
-- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-- atomic_read(&b->pin))
-- continue;
--
-- if (!heap_full(&ca->heap)) {
-- sectors_to_move += GC_SECTORS_USED(b);
-- heap_add(&ca->heap, b, bucket_cmp);
-- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-- sectors_to_move -= bucket_heap_top(ca);
-- sectors_to_move += GC_SECTORS_USED(b);
--
-- ca->heap.data[0] = b;
-- heap_sift(&ca->heap, 0, bucket_cmp);
-- }
-- }
-+ sectors_to_move = 0;
-+ reserve_sectors = ca->sb.bucket_size *
-+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
-+
-+ ca->heap.used = 0;
-+ for_each_bucket(b, ca) {
-+ if (GC_MARK(b) == GC_MARK_METADATA ||
-+ !GC_SECTORS_USED(b) ||
-+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-+ atomic_read(&b->pin))
-+ continue;
-
-- while (sectors_to_move > reserve_sectors) {
-- heap_pop(&ca->heap, b, bucket_cmp);
-- sectors_to_move -= GC_SECTORS_USED(b);
-+ if (!heap_full(&ca->heap)) {
-+ sectors_to_move += GC_SECTORS_USED(b);
-+ heap_add(&ca->heap, b, bucket_cmp);
-+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-+ sectors_to_move -= bucket_heap_top(ca);
-+ sectors_to_move += GC_SECTORS_USED(b);
-+
-+ ca->heap.data[0] = b;
-+ heap_sift(&ca->heap, 0, bucket_cmp);
- }
-+ }
-
-- while (heap_pop(&ca->heap, b, bucket_cmp))
-- SET_GC_MOVE(b, 1);
-+ while (sectors_to_move > reserve_sectors) {
-+ heap_pop(&ca->heap, b, bucket_cmp);
-+ sectors_to_move -= GC_SECTORS_USED(b);
- }
-
-+ while (heap_pop(&ca->heap, b, bucket_cmp))
-+ SET_GC_MOVE(b, 1);
-+
- mutex_unlock(&c->bucket_lock);
-
- c->moving_gc_keys.last_scanned = ZERO_KEY;
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 62d8fb807dd6..5958a2e8b274 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -1164,13 +1164,10 @@ static int cached_dev_congested(void *data, int bits)
- return 1;
-
- if (cached_dev_get(dc)) {
-- unsigned i;
-- struct cache *ca;
-+ struct cache *ca = d->c->cache[0];
-
-- for_each_cache(ca, d->c, i) {
-- q = bdev_get_queue(ca->bdev);
-- ret |= bdi_congested(q->backing_dev_info, bits);
-- }
-+ q = bdev_get_queue(ca->bdev);
-+ ret |= bdi_congested(q->backing_dev_info, bits);
-
- cached_dev_put(dc);
- }
-@@ -1276,14 +1273,11 @@ static int flash_dev_congested(void *data, int bits)
- {
- struct bcache_device *d = data;
- struct request_queue *q;
-- struct cache *ca;
-- unsigned i;
-+ struct cache *ca = d->c->cache[0];
- int ret = 0;
-
-- for_each_cache(ca, d->c, i) {
-- q = bdev_get_queue(ca->bdev);
-- ret |= bdi_congested(q->backing_dev_info, bits);
-- }
-+ q = bdev_get_queue(ca->bdev);
-+ ret |= bdi_congested(q->backing_dev_info, bits);
-
- return ret;
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index cc3d16d2cffc..7b3b61f7ce21 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -281,31 +281,29 @@ static void bcache_write_super_unlock(struct closure *cl)
- void bcache_write_super(struct cache_set *c)
- {
- struct closure *cl = &c->sb_write;
-- struct cache *ca;
-- unsigned i;
-+ struct cache *ca = c->cache[0];
-+ struct bio *bio;
-
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-
- c->sb.seq++;
-
-- for_each_cache(ca, c, i) {
-- struct bio *bio = &ca->sb_bio;
-+ bio = &ca->sb_bio;
-
-- ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
-+ ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-+ ca->sb.seq = c->sb.seq;
-+ ca->sb.last_mount = c->sb.last_mount;
-
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-
-- bio_reset(bio);
-- bio_set_dev(bio, ca->bdev);
-- bio->bi_end_io = write_super_endio;
-- bio->bi_private = ca;
-+ bio_reset(bio);
-+ bio_set_dev(bio, ca->bdev);
-+ bio->bi_end_io = write_super_endio;
-+ bio->bi_private = ca;
-
-- closure_get(cl);
-- __write_super(&ca->sb, bio);
-- }
-+ closure_get(cl);
-+ __write_super(&ca->sb, bio);
-
- closure_return_with_destructor(cl, bcache_write_super_unlock);
- }
-@@ -658,25 +656,23 @@ static void bcache_device_unlink(struct bcache_device *d)
- lockdep_assert_held(&bch_register_lock);
-
- if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
-- unsigned i;
- struct cache *ca;
-
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
-
-- for_each_cache(ca, d->c, i)
-- bd_unlink_disk_holder(ca->bdev, d->disk);
-+ ca = d->c->cache[0];
-+ bd_unlink_disk_holder(ca->bdev, d->disk);
- }
- }
-
- static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
- const char *name)
- {
-- unsigned i;
- struct cache *ca;
-
-- for_each_cache(ca, d->c, i)
-- bd_link_disk_holder(ca->bdev, d->disk);
-+ ca = d->c->cache[0];
-+ bd_link_disk_holder(ca->bdev, d->disk);
-
- snprintf(d->name, BCACHEDEVNAME_SIZE,
- "%s%u", name, d->id);
-@@ -1473,7 +1469,6 @@ static void cache_set_free(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, cl);
- struct cache *ca;
-- unsigned i;
-
- if (!IS_ERR_OR_NULL(c->debug))
- debugfs_remove(c->debug);
-@@ -1482,12 +1477,12 @@ static void cache_set_free(struct closure *cl)
- bch_btree_cache_free(c);
- bch_journal_free(c);
-
-- for_each_cache(ca, c, i)
-- if (ca) {
-- ca->set = NULL;
-- c->cache[ca->sb.nr_this_dev] = NULL;
-- kobject_put(&ca->kobj);
-- }
-+ ca = c->cache[0];
-+ if (ca) {
-+ ca->set = NULL;
-+ c->cache[ca->sb.nr_this_dev] = NULL;
-+ kobject_put(&ca->kobj);
-+ }
-
- bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
-@@ -1516,7 +1511,6 @@ static void cache_set_flush(struct closure *cl)
- struct cache_set *c = container_of(cl, struct cache_set, caching);
- struct cache *ca;
- struct btree *b;
-- unsigned i;
-
- bch_cache_accounting_destroy(&c->accounting);
-
-@@ -1537,9 +1531,9 @@ static void cache_set_flush(struct closure *cl)
- mutex_unlock(&b->write_lock);
- }
-
-- for_each_cache(ca, c, i)
-- if (ca->alloc_thread)
-- kthread_stop(ca->alloc_thread);
-+ ca = c->cache[0];
-+ if (ca->alloc_thread)
-+ kthread_stop(ca->alloc_thread);
-
- if (c->journal.cur) {
- cancel_delayed_work_sync(&c->journal.work);
-@@ -1745,12 +1739,11 @@ static void run_cache_set(struct cache_set *c)
- struct cached_dev *dc, *t;
- struct cache *ca;
- struct closure cl;
-- unsigned i;
-
- closure_init_stack(&cl);
-
-- for_each_cache(ca, c, i)
-- c->nbuckets += ca->sb.nbuckets;
-+ ca = c->cache[0];
-+ c->nbuckets += ca->sb.nbuckets;
- set_gc_sectors(c);
-
- if (CACHE_SYNC(&c->sb)) {
-@@ -1771,8 +1764,7 @@ static void run_cache_set(struct cache_set *c)
- j = &list_entry(journal.prev, struct journal_replay, list)->j;
-
- err = "IO error reading priorities";
-- for_each_cache(ca, c, i)
-- prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
-+ prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
-
- /*
- * If prio_read() fails it'll call cache_set_error and we'll
-@@ -1814,9 +1806,8 @@ static void run_cache_set(struct cache_set *c)
- bch_journal_next(&c->journal);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- /*
- * First place it's safe to allocate: btree_check() and
-@@ -1833,28 +1824,24 @@ static void run_cache_set(struct cache_set *c)
-
- bch_journal_replay(c, &journal);
- } else {
-+ unsigned j;
-+
- pr_notice("invalidating existing data");
-
-- for_each_cache(ca, c, i) {
-- unsigned j;
-+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-+ 2, SB_JOURNAL_BUCKETS);
-
-- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-- 2, SB_JOURNAL_BUCKETS);
--
-- for (j = 0; j < ca->sb.keys; j++)
-- ca->sb.d[j] = ca->sb.first_bucket + j;
-- }
-+ for (j = 0; j < ca->sb.keys; j++)
-+ ca->sb.d[j] = ca->sb.first_bucket + j;
-
- bch_initial_gc_finish(c);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- mutex_lock(&c->bucket_lock);
-- for_each_cache(ca, c, i)
-- bch_prio_write(ca);
-+ bch_prio_write(ca);
- mutex_unlock(&c->bucket_lock);
-
- err = "cannot allocate new UUID bucket";
-@@ -2132,12 +2119,13 @@ static bool bch_is_open_backing(struct block_device *bdev) {
- static bool bch_is_open_cache(struct block_device *bdev) {
- struct cache_set *c, *tc;
- struct cache *ca;
-- unsigned i;
-
-- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
-- for_each_cache(ca, c, i)
-- if (ca->bdev == bdev)
-- return true;
-+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-+ ca = c->cache[0];
-+ if (ca->bdev == bdev)
-+ return true;
-+ }
-+
- return false;
- }
-
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0011-bcache-do-not-use-nr_in_set-and-nr_this_dev-of-struc.patch b/for-test/remove-multiple-cache-devices/original_ideas/0011-bcache-do-not-use-nr_in_set-and-nr_this_dev-of-struc.patch
deleted file mode 100644
index f5eca11..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0011-bcache-do-not-use-nr_in_set-and-nr_this_dev-of-struc.patch
+++ /dev/null
@@ -1,164 +0,0 @@
-From 28bd52ebc86d3d2d92e9c0c4aba527a3d3033d7e Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 22:26:38 +0800
-Subject: [PATCH 11/14] bcache: do not use nr_in_set and nr_this_dev of struct
- cache_sb
-
-After remove the unfinished and useless multiple cache devices framework,
-each cache set only has one cache device for now. Therefore, in struct
-cache_sb, nr_in_set is always 1 and nr_this_dev is always 0 (they are
-constant values even before multiple cache devices framework is removed).
-
-This patch removes all references to nr_in_set and nr_this_dev of struct
-cache_sb, but still keep them in include/uapi/linux/bcache.h so that
-user space tool can still be compiled with these dummy struct members.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/journal.c | 4 ++--
- drivers/md/bcache/super.c | 28 ++++++++--------------------
- 3 files changed, 11 insertions(+), 23 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 64b40b439259..1dd54a77dc4b 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -493,7 +493,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
-
- k->ptr = MAKE_PTR(ca->buckets[b].gen,
- bucket_to_sector(c, b),
-- ca->sb.nr_this_dev);
-+ 0);
-
- SET_KEY_PTRS(k, i + 1);
- }
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index d5222fcde412..102c2a508d74 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -519,7 +519,7 @@ static void journal_reclaim(struct cache_set *c)
- ja->cur_idx = next;
- k->ptr = MAKE_PTR(0,
- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-- ca->sb.nr_this_dev);
-+ 0);
- n++;
- }
-
-@@ -621,7 +621,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
- ca = PTR_CACHE(c, k);
-- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-+ w->data->prio_bucket[0] = ca->prio_buckets[0];
-
- w->data->magic = jset_magic(&c->sb);
- w->data->version = BCACHE_JSET_VERSION;
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 7b3b61f7ce21..3d3547aa6f04 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -130,9 +130,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
- sb->nbuckets = le64_to_cpu(s->nbuckets);
- sb->bucket_size = le16_to_cpu(s->bucket_size);
-
-- sb->nr_in_set = le16_to_cpu(s->nr_in_set);
-- sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
--
- err = "Too many buckets";
- if (sb->nbuckets > LONG_MAX)
- goto err;
-@@ -156,12 +153,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
- if (bch_is_zero(sb->set_uuid, 16))
- goto err;
-
-- err = "Bad cache device number in set";
-- if (!sb->nr_in_set ||
-- sb->nr_in_set <= sb->nr_this_dev ||
-- sb->nr_in_set > MAX_CACHES_PER_SET)
-- goto err;
--
- err = "Journal buckets not sequential";
- for (i = 0; i < sb->keys; i++)
- if (sb->d[i] != sb->first_bucket + i)
-@@ -1480,7 +1471,7 @@ static void cache_set_free(struct closure *cl)
- ca = c->cache[0];
- if (ca) {
- ca->set = NULL;
-- c->cache[ca->sb.nr_this_dev] = NULL;
-+ c->cache[0] = NULL;
- kobject_put(&ca->kobj);
- }
-
-@@ -1671,7 +1662,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
- c->sb.block_size = sb->block_size;
- c->sb.bucket_size = sb->bucket_size;
-- c->sb.nr_in_set = sb->nr_in_set;
- c->sb.last_mount = sb->last_mount;
- c->bucket_bits = ilog2(sb->bucket_size);
- c->block_bits = ilog2(sb->block_size);
-@@ -1764,7 +1754,7 @@ static void run_cache_set(struct cache_set *c)
- j = &list_entry(journal.prev, struct journal_replay, list)->j;
-
- err = "IO error reading priorities";
-- prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
-+ prio_read(ca, j->prio_bucket[0]);
-
- /*
- * If prio_read() fails it'll call cache_set_error and we'll
-@@ -1896,8 +1886,7 @@ static void run_cache_set(struct cache_set *c)
- static bool can_attach_cache(struct cache *ca, struct cache_set *c)
- {
- return ca->sb.block_size == c->sb.block_size &&
-- ca->sb.bucket_size == c->sb.bucket_size &&
-- ca->sb.nr_in_set == c->sb.nr_in_set;
-+ ca->sb.bucket_size == c->sb.bucket_size;
- }
-
- static const char *register_cache_set(struct cache *ca)
-@@ -1908,7 +1897,7 @@ static const char *register_cache_set(struct cache *ca)
-
- list_for_each_entry(c, &bch_cache_sets, list)
- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-- if (c->cache[ca->sb.nr_this_dev])
-+ if (c->cache[0])
- return "duplicate cache set member";
-
- if (!can_attach_cache(ca, c))
-@@ -1936,7 +1925,6 @@ static const char *register_cache_set(struct cache *ca)
-
- list_add(&c->list, &bch_cache_sets);
- found:
-- sprintf(buf, "cache%i", ca->sb.nr_this_dev);
- if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
- sysfs_create_link(&c->kobj, &ca->kobj, buf))
- goto err;
-@@ -1951,10 +1939,10 @@ static const char *register_cache_set(struct cache *ca)
-
- kobject_get(&ca->kobj);
- ca->set = c;
-- ca->set->cache[ca->sb.nr_this_dev] = ca;
-+ ca->set->cache[0] = ca;
- c->cache_by_alloc[c->caches_loaded++] = ca;
-
-- if (c->caches_loaded == c->sb.nr_in_set)
-+ if (c->caches_loaded == 1)
- run_cache_set(c);
-
- return NULL;
-@@ -1971,8 +1959,8 @@ void bch_cache_release(struct kobject *kobj)
- unsigned i;
-
- if (ca->set) {
-- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
-- ca->set->cache[ca->sb.nr_this_dev] = NULL;
-+ BUG_ON(ca->set->cache[0] != ca);
-+ ca->set->cache[0] = NULL;
- }
-
- free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0012-bcache-change-cache_set.cache-and-cache_set.cache_by.patch b/for-test/remove-multiple-cache-devices/original_ideas/0012-bcache-change-cache_set.cache-and-cache_set.cache_by.patch
deleted file mode 100644
index 0a46e66..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0012-bcache-change-cache_set.cache-and-cache_set.cache_by.patch
+++ /dev/null
@@ -1,292 +0,0 @@
-From 382edf663fc4669c67dc82bca119c0c6c118a1fb Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Tue, 17 Apr 2018 23:08:27 +0800
-Subject: [PATCH 12/14] bcache: change cache_set.cache and
- cache_set.cache_by_alloc to pointers
-
-struct cache_set member cache and cache_by_alloc are defined as array for
-now. After the unfinished and useless multiple cache devices framework is
-removed, each cache set only has one cache device, these two arrays should
-be modified to two pointers as well.
-
-This patch changes struct cache_set member cache and cache_by_alloc from
-arrays to pointers, and update all the locations where they are used.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 4 ++--
- drivers/md/bcache/bcache.h | 8 ++++----
- drivers/md/bcache/btree.c | 10 +++++-----
- drivers/md/bcache/journal.c | 4 ++--
- drivers/md/bcache/movinggc.c | 2 +-
- drivers/md/bcache/request.c | 4 ++--
- drivers/md/bcache/super.c | 27 ++++++++++++++-------------
- 7 files changed, 30 insertions(+), 29 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 1dd54a77dc4b..fbf11afb4ab4 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -103,7 +103,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
-
- c->min_prio = USHRT_MAX;
-
-- ca = c->cache[0];
-+ ca = c->cache;
- for_each_bucket(b, ca)
- if (b->prio &&
- b->prio != BTREE_PRIO &&
-@@ -485,7 +485,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
- /* sort by free space/prio of oldest data in caches */
-
- for (i = 0; i < n; i++) {
-- struct cache *ca = c->cache_by_alloc[i];
-+ struct cache *ca = c->cache_by_alloc;
- long b = bch_bucket_alloc(ca, reserve, wait);
-
- if (b == -1)
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index cfbf10365dea..254a10c6bb41 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -517,8 +517,8 @@ struct cache_set {
-
- struct cache_sb sb;
-
-- struct cache *cache[MAX_CACHES_PER_SET];
-- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
-+ struct cache *cache;
-+ struct cache *cache_by_alloc;
- int caches_loaded;
-
- struct bcache_device **devices;
-@@ -760,7 +760,7 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
- static inline struct cache *PTR_CACHE(struct cache_set *c,
- const struct bkey *k)
- {
-- return c->cache[PTR_DEV(k)];
-+ return c->cache;
- }
-
- static inline size_t PTR_BUCKET_NR(struct cache_set *c,
-@@ -877,7 +877,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
-
- static inline void wake_up_allocators(struct cache_set *c)
- {
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
-
- wake_up_process(ca->alloc_thread);
- }
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 107a91cdd66b..7dddaa8ca767 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1147,7 +1147,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- static int btree_check_reserve(struct btree *b, struct btree_op *op)
- {
- struct cache_set *c = b->c;
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- unsigned reserve = (c->root->level - b->level) * 2 + 1;
-
- mutex_lock(&c->bucket_lock);
-@@ -1629,7 +1629,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
-
- static void btree_gc_start(struct cache_set *c)
- {
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- struct bucket *b;
-
- if (!c->gc_mark_valid)
-@@ -1688,7 +1688,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
- rcu_read_unlock();
-
- c->avail_nbuckets = 0;
-- ca = c->cache[0];
-+ ca = c->cache;
- ca->invalidate_needs_gc = 0;
-
- for (j = ca->sb.d; j < ca->sb.d + ca->sb.keys; j++)
-@@ -1756,7 +1756,7 @@ static void bch_btree_gc(struct cache_set *c)
-
- static bool gc_should_run(struct cache_set *c)
- {
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
-
- if (ca->invalidate_needs_gc)
- return true;
-@@ -1838,7 +1838,7 @@ int bch_btree_check(struct cache_set *c)
-
- void bch_initial_gc_finish(struct cache_set *c)
- {
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- struct bucket *b;
-
- bch_btree_gc_finish(c);
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 102c2a508d74..05722adb96a1 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -153,7 +153,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
- ret; \
- })
-
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- struct journal_device *ja = &ca->journal;
- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
- unsigned i, l, r, m;
-@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
- static void journal_reclaim(struct cache_set *c)
- {
- struct bkey *k = &c->journal.key;
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- uint64_t last_seq;
- unsigned n = 0;
- atomic_t p __maybe_unused;
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 89ed74a23da1..72d47326f967 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -192,7 +192,7 @@ static unsigned bucket_heap_top(struct cache *ca)
-
- void bch_moving_gc(struct cache_set *c)
- {
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- struct bucket *b;
- unsigned sectors_to_move, reserve_sectors;
-
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 5958a2e8b274..df6d51ae5fa5 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -1164,7 +1164,7 @@ static int cached_dev_congested(void *data, int bits)
- return 1;
-
- if (cached_dev_get(dc)) {
-- struct cache *ca = d->c->cache[0];
-+ struct cache *ca = d->c->cache;
-
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
-@@ -1273,7 +1273,7 @@ static int flash_dev_congested(void *data, int bits)
- {
- struct bcache_device *d = data;
- struct request_queue *q;
-- struct cache *ca = d->c->cache[0];
-+ struct cache *ca = d->c->cache;
- int ret = 0;
-
- q = bdev_get_queue(ca->bdev);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 3d3547aa6f04..29f136098ea6 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -272,7 +272,7 @@ static void bcache_write_super_unlock(struct closure *cl)
- void bcache_write_super(struct cache_set *c)
- {
- struct closure *cl = &c->sb_write;
-- struct cache *ca = c->cache[0];
-+ struct cache *ca = c->cache;
- struct bio *bio;
-
- down(&c->sb_write_mutex);
-@@ -652,7 +652,7 @@ static void bcache_device_unlink(struct bcache_device *d)
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
-
-- ca = d->c->cache[0];
-+ ca = d->c->cache;
- bd_unlink_disk_holder(ca->bdev, d->disk);
- }
- }
-@@ -662,7 +662,7 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
- {
- struct cache *ca;
-
-- ca = d->c->cache[0];
-+ ca = d->c->cache;
- bd_link_disk_holder(ca->bdev, d->disk);
-
- snprintf(d->name, BCACHEDEVNAME_SIZE,
-@@ -1468,10 +1468,10 @@ static void cache_set_free(struct closure *cl)
- bch_btree_cache_free(c);
- bch_journal_free(c);
-
-- ca = c->cache[0];
-+ ca = c->cache;
- if (ca) {
- ca->set = NULL;
-- c->cache[0] = NULL;
-+ c->cache = NULL;
- kobject_put(&ca->kobj);
- }
-
-@@ -1522,7 +1522,7 @@ static void cache_set_flush(struct closure *cl)
- mutex_unlock(&b->write_lock);
- }
-
-- ca = c->cache[0];
-+ ca = c->cache;
- if (ca->alloc_thread)
- kthread_stop(ca->alloc_thread);
-
-@@ -1732,7 +1732,7 @@ static void run_cache_set(struct cache_set *c)
-
- closure_init_stack(&cl);
-
-- ca = c->cache[0];
-+ ca = c->cache;
- c->nbuckets += ca->sb.nbuckets;
- set_gc_sectors(c);
-
-@@ -1897,7 +1897,7 @@ static const char *register_cache_set(struct cache *ca)
-
- list_for_each_entry(c, &bch_cache_sets, list)
- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-- if (c->cache[0])
-+ if (c->cache)
- return "duplicate cache set member";
-
- if (!can_attach_cache(ca, c))
-@@ -1939,8 +1939,9 @@ static const char *register_cache_set(struct cache *ca)
-
- kobject_get(&ca->kobj);
- ca->set = c;
-- ca->set->cache[0] = ca;
-- c->cache_by_alloc[c->caches_loaded++] = ca;
-+ ca->set->cache = ca;
-+ c->cache_by_alloc = ca;
-+ c->caches_loaded++;
-
- if (c->caches_loaded == 1)
- run_cache_set(c);
-@@ -1959,8 +1960,8 @@ void bch_cache_release(struct kobject *kobj)
- unsigned i;
-
- if (ca->set) {
-- BUG_ON(ca->set->cache[0] != ca);
-- ca->set->cache[0] = NULL;
-+ BUG_ON(ca->set->cache != ca);
-+ ca->set->cache = NULL;
- }
-
- free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
-@@ -2109,7 +2110,7 @@ static bool bch_is_open_cache(struct block_device *bdev) {
- struct cache *ca;
-
- list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-- ca = c->cache[0];
-+ ca = c->cache;
- if (ca->bdev == bdev)
- return true;
- }
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0013-bcache-remove-ptr-index-from-bch_bucket_alloc_set.patch b/for-test/remove-multiple-cache-devices/original_ideas/0013-bcache-remove-ptr-index-from-bch_bucket_alloc_set.patch
deleted file mode 100644
index b40cf78..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0013-bcache-remove-ptr-index-from-bch_bucket_alloc_set.patch
+++ /dev/null
@@ -1,133 +0,0 @@
-From aa2b861b9aa8144078238def1271e3e019d55919 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Wed, 25 Apr 2018 10:17:39 +0800
-Subject: [PATCH 13/14] bcache: remove ptr index from bch_bucket_alloc_set()
-
-Since now the code frame work of multiple cache devices support is
-removed, the ptr index in parameter list of bch_bucket_alloc_set()
-is unnecessary. This patch removes it from this function, and its
-sub-function __bch_bucket_alloc_set(). All the locations that these
-two functions referenced are updated too.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 34 ++++++++++++++--------------------
- drivers/md/bcache/bcache.h | 4 ++--
- drivers/md/bcache/btree.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- 4 files changed, 18 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index fbf11afb4ab4..e0f38e2d1fa1 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -473,30 +473,24 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
- }
-
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
-- int i;
-+ struct cache *ca;
-+ long b;
-
- lockdep_assert_held(&c->bucket_lock);
-- BUG_ON(!n || n > c->caches_loaded || n > 8);
--
- bkey_init(k);
-
-- /* sort by free space/prio of oldest data in caches */
-+ ca = c->cache_by_alloc;
-+ b = bch_bucket_alloc(ca, reserve, wait);
-+ if (b == -1)
-+ goto err;
-
-- for (i = 0; i < n; i++) {
-- struct cache *ca = c->cache_by_alloc;
-- long b = bch_bucket_alloc(ca, reserve, wait);
-+ k->ptr = MAKE_PTR(ca->buckets[b].gen,
-+ bucket_to_sector(c, b),
-+ 0);
-
-- if (b == -1)
-- goto err;
--
-- k->ptr = MAKE_PTR(ca->buckets[b].gen,
-- bucket_to_sector(c, b),
-- 0);
--
-- SET_KEY_PTRS(k, i + 1);
-- }
-+ SET_KEY_PTRS(k, 1);
-
- return 0;
- err:
-@@ -506,11 +500,11 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
- }
-
- int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
- int ret;
- mutex_lock(&c->bucket_lock);
-- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
-+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
-@@ -613,7 +607,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
-
- spin_unlock(&c->data_bucket_lock);
-
-- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
-+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
- return false;
-
- spin_lock(&c->data_bucket_lock);
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 254a10c6bb41..4bd11e84181f 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -934,9 +934,9 @@ void bch_bucket_free(struct cache_set *, struct bkey *);
-
- long bch_bucket_alloc(struct cache *, unsigned, bool);
- int __bch_bucket_alloc_set(struct cache_set *, unsigned,
-- struct bkey *, int, bool);
-+ struct bkey *, bool);
- int bch_bucket_alloc_set(struct cache_set *, unsigned,
-- struct bkey *, int, bool);
-+ struct bkey *, bool);
- bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
- unsigned, unsigned, bool);
- bool bch_cached_dev_error(struct cached_dev *dc);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 7dddaa8ca767..714d3c47f47a 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1074,7 +1074,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
-
- mutex_lock(&c->bucket_lock);
- retry:
-- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
-+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
- goto err;
-
- bkey_put(c, &k.key);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 29f136098ea6..3edd8f54436c 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -404,7 +404,7 @@ static int __uuid_write(struct cache_set *c)
-
- lockdep_assert_held(&bch_register_lock);
-
-- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
-+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
- SET_KEY_SIZE(&k.key, c->sb.bucket_size);
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/0014-bcache-remove-MAX_CACHES_PER_SET-from-ptr_available.patch b/for-test/remove-multiple-cache-devices/original_ideas/0014-bcache-remove-MAX_CACHES_PER_SET-from-ptr_available.patch
deleted file mode 100644
index 47a3c0e..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/0014-bcache-remove-MAX_CACHES_PER_SET-from-ptr_available.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From da1e2224dde50cca018e96c634efd2efb16e1a4c Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Wed, 25 Apr 2018 10:27:19 +0800
-Subject: [PATCH 14/14] bcache: remove MAX_CACHES_PER_SET from ptr_available()
-
-Since now the unfinished and useless code frame work of multiple cache
-devices support is removed, there is only one cache in each cache set,
-we don't need to reference MAX_CACHES_PER_SET to check whether a bkey
-pointer is available, a valid PTR_DEV(k) should be 0.
-
-But MAX_CACHES_PER_SET is referenced by the on-disk data structure in
-include/uapi/linux/bcache.h, in order to keep on-disk format consistency,
-it is not removed from the uapi header file.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 4bd11e84181f..86260df7819b 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -788,7 +788,7 @@ static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k)
-
- static inline bool ptr_available(struct cache_set *c, const struct bkey *k)
- {
-- return (PTR_DEV(k) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k);
-+ return (PTR_DEV(k) == 0) && PTR_CACHE(c, k);
- }
-
- /* Btree key macros */
---
-2.17.0
-
diff --git a/for-test/remove-multiple-cache-devices/original_ideas/debug_info_0001.patch b/for-test/remove-multiple-cache-devices/original_ideas/debug_info_0001.patch
deleted file mode 100644
index 91b700f..0000000
--- a/for-test/remove-multiple-cache-devices/original_ideas/debug_info_0001.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-Index: linux/drivers/md/bcache/bcache.mod.c
-===================================================================
---- linux.orig/drivers/md/bcache/bcache.mod.c
-+++ linux/drivers/md/bcache/bcache.mod.c
-@@ -223,4 +223,4 @@ __attribute__((section(".modinfo"))) =
- "depends=";
-
-
--MODULE_INFO(srcversion, "8B6A3E10020471EB473203C");
-+MODULE_INFO(srcversion, "713065597BC89F261A1A15D");
-Index: linux/drivers/md/bcache/io.c
-===================================================================
---- linux.orig/drivers/md/bcache/io.c
-+++ linux/drivers/md/bcache/io.c
-@@ -37,8 +37,9 @@ void __bch_submit_bbio(struct bio *bio,
- bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
- bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
-
-- pr_err("%s: bio->bi_iter.bi_sector: %lu", __func__,
-- bio->bi_iter.bi_sector);
-+ if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
-+ pr_err("%s: bio->bi_iter.bi_sector: %lu", __func__,
-+ bio->bi_iter.bi_sector);
- b->submit_time_us = local_clock_us();
- closure_bio_submit(c, bio, bio->bi_private);
- }
-Index: linux/drivers/md/bcache/journal.c
-===================================================================
---- linux.orig/drivers/md/bcache/journal.c
-+++ linux/drivers/md/bcache/journal.c
-@@ -162,8 +162,9 @@ int bch_journal_read(struct cache_set *c
- unsigned i, l, r, m;
- uint64_t seq;
-
-+ pr_err("%s read journal for cache %u\n", __func__, iter);
- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-- pr_debug("%u journal buckets", ca->sb.njournal_buckets);
-+ pr_err("%u journal buckets", ca->sb.njournal_buckets);
-
- /*
- * Read journal buckets ordered by golden ratio hash to quickly
-Index: linux/drivers/md/bcache/super.c
-===================================================================
---- linux.orig/drivers/md/bcache/super.c
-+++ linux/drivers/md/bcache/super.c
-@@ -209,6 +209,7 @@ static void __write_super(struct cache_s
- struct cache_sb *out = page_address(bio_first_page_all(bio));
- unsigned i;
-
-+ dump_stack();
- bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_iter.bi_size = SB_SIZE;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
-@@ -297,7 +298,8 @@ void bcache_write_super(struct cache_set
-
- c->sb.seq++;
-
-- pr_err("start to write super");
-+ pr_err("start to write cache super");
-+ dump_stack();
- for_each_cache(ca, c, i) {
- struct bio *bio = &ca->sb_bio;
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/0000-cover-letter.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/0000-cover-letter.patch
deleted file mode 100644
index 8b05636..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/0000-cover-letter.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From 3821a8cd1196fdf155605f0747b1a9d4bf6d8cff Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 00:55:48 +0800
-Subject: [PATCH 00/14] bcache: remove multiple caches code framework
-
-The multiple caches code framework in bcache is to store multiple
-copies of the cached data among multiple caches of the cache set.
-Current code framework just does simple data write to each cache without
-any extra condition handling (e.g. device failure, slow devices). This
-code framework is not and will never be completed. Considering people
-may use md raid1 for same similar data duplication purpose, the multiple
-caches framework is useless dead code indeed.
-
-Due to the multiple caches code framework, bcache has two data structure
-struct cache and struct cache_set to manage the cache device. Indeed
-since bcache was merged into mainline kernel in Linux v3.10, a cache set
-only has one cache, the unnecessary two level abstraction makes extra
-effort to maintain redundant information between struct cache and struct
-cache set, for examaple the in-memmory super block struct cache_sb.
-
-This is the first wave effort to remove multiple caches framework and
-make the code and data structure relation to be more clear. This series
-explicitly make each cache set only have single cache, and remove the
-embedded partial super block in struct cache_set and directly reference
-cache's in-memory super block, finally move struct cache_sb from
-include/uapi/linux/bcache.h to drivers/md/bcache/bcache.h since it isn't
-part of uapi anymore.
-
-The patch set is just compiling passed, I post this series early for
-your review and comments. More fixes after testing will follow up soon.
-
-Thanks in advance.
-
-Coly Li
-----
-
-Coly Li (14):
- bcache: remove 'int n' from parameter list of bch_bucket_alloc_set()
- bcache: explicitly make cache_set only have single cache
- bcache: remove for_each_cache()
- bcache: add set_uuid in struct cache_set
- bcache: only use block_bytes() on struct cache
- bcache: remove useless alloc_bucket_pages()
- bcache: remove useless bucket_pages()
- bcache: only use bucket_bytes() on struct cache
- bcache: avoid data copy between cache_set->sb and cache->sb
- bcache: don't check seq numbers in register_cache_set()
- bcache: remove can_attach_cache()
- bcache: check and set sync status on cache's in-memory super block
- bcache: remove embedded struct cache_sb from struct cache_set
- bcache: move struct cache_sb out of uapi bcache.h
-
- drivers/md/bcache/alloc.c | 60 ++++-----
- drivers/md/bcache/bcache.h | 128 +++++++++++++++---
- drivers/md/bcache/btree.c | 144 ++++++++++----------
- drivers/md/bcache/btree.h | 2 +-
- drivers/md/bcache/debug.c | 10 +-
- drivers/md/bcache/extents.c | 6 +-
- drivers/md/bcache/features.c | 4 +-
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/journal.c | 246 ++++++++++++++++------------------
- drivers/md/bcache/movinggc.c | 58 ++++----
- drivers/md/bcache/request.c | 6 +-
- drivers/md/bcache/super.c | 225 +++++++++++--------------------
- drivers/md/bcache/sysfs.c | 10 +-
- drivers/md/bcache/writeback.c | 2 +-
- include/trace/events/bcache.h | 4 +-
- include/uapi/linux/bcache.h | 98 --------------
- 16 files changed, 445 insertions(+), 560 deletions(-)
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
deleted file mode 100644
index d55c183..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 9260c7e003b7652c9a8208fa479ff4c5d72a6737 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 00:07:05 +0800
-Subject: [PATCH v1 01/14] bcache: remove 'int n' from parameter list of
- bch_bucket_alloc_set()
-
-The parameter 'int n' from bch_bucket_alloc_set() is not cleared
-defined. From the code comments n is the number of buckets to alloc, but
-from the code itself 'n' is the maximum cache to iterate. Indeed all the
-locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.
-
-This patch removes the confused and unnecessary 'int n' from parameter
-list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
-for its caller.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
- drivers/md/bcache/bcache.h | 4 ++--
- drivers/md/bcache/btree.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- 4 files changed, 19 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 52035a78d836..4493ff57476d 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -49,7 +49,7 @@
- *
- * bch_bucket_alloc() allocates a single bucket from a specific cache.
- *
-- * bch_bucket_alloc_set() allocates one or more buckets from different caches
-+ * bch_bucket_alloc_set() allocates one bucket from different caches
- * out of a cache set.
- *
- * free_some_buckets() drives all the processes described above. It's called
-@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
- }
-
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
-- int i;
-+ struct cache *ca;
-+ long b;
-
- /* No allocation if CACHE_SET_IO_DISABLE bit is set */
- if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
- return -1;
-
- lockdep_assert_held(&c->bucket_lock);
-- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
-
- bkey_init(k);
-
-- /* sort by free space/prio of oldest data in caches */
--
-- for (i = 0; i < n; i++) {
-- struct cache *ca = c->cache_by_alloc[i];
-- long b = bch_bucket_alloc(ca, reserve, wait);
-+ ca = c->cache_by_alloc[0];
-+ b = bch_bucket_alloc(ca, reserve, wait);
-+ if (b == -1)
-+ goto err;
-
-- if (b == -1)
-- goto err;
-+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
-+ bucket_to_sector(c, b),
-+ ca->sb.nr_this_dev);
-
-- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
-- bucket_to_sector(c, b),
-- ca->sb.nr_this_dev);
--
-- SET_KEY_PTRS(k, i + 1);
-- }
-+ SET_KEY_PTRS(k, 1);
-
- return 0;
- err:
-@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- }
-
- int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
- int ret;
-
- mutex_lock(&c->bucket_lock);
-- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
-+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
-@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
-
- spin_unlock(&c->data_bucket_lock);
-
-- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
-+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
- return false;
-
- spin_lock(&c->data_bucket_lock);
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 4fd03d2496d8..5ff6e9573935 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
-
- long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait);
-+ struct bkey *k, bool wait);
- int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait);
-+ struct bkey *k, bool wait);
- bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
- unsigned int sectors, unsigned int write_point,
- unsigned int write_prio, bool wait);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 3d8bd0692af3..e2a719fed53b 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
-
- mutex_lock(&c->bucket_lock);
- retry:
-- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
-+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
- goto err;
-
- bkey_put(c, &k.key);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 1bbdc410ee3c..7057ec48f3d1 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c)
- closure_init_stack(&cl);
- lockdep_assert_held(&bch_register_lock);
-
-- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
-+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0002-bcache-explicitly-make-cache_set-only-have-single.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0002-bcache-explicitly-make-cache_set-only-have-single.patch
deleted file mode 100644
index 89c492c..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0002-bcache-explicitly-make-cache_set-only-have-single.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From da9ff41f507337ce4797935e8ba9b70da361d59d Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 00:30:59 +0800
-Subject: [PATCH v1 02/14] bcache: explicitly make cache_set only have single
- cache
-
-Currently although the bcache code has a framework for multiple caches
-in a cache set, but indeed the multiple caches never completed and users
-use md raid1 for multiple copies of the cached data.
-
-This patch does the following change in struct cache_set, to explicitly
-make a cache_set only have single cache,
-- Change pointer array "*cache[MAX_CACHES_PER_SET]" to a single pointer
- "*cache".
-- Remove pointer array "*cache_by_alloc[MAX_CACHES_PER_SET]".
-- Remove "caches_loaded".
-
-Now the code looks as exactly what it does in practic: only one cache is
-used in the cache set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/bcache.h | 8 +++-----
- drivers/md/bcache/super.c | 19 ++++++++-----------
- 3 files changed, 12 insertions(+), 17 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 4493ff57476d..3385f6add6df 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -501,7 +501,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-
- bkey_init(k);
-
-- ca = c->cache_by_alloc[0];
-+ ca = c->cache;
- b = bch_bucket_alloc(ca, reserve, wait);
- if (b == -1)
- goto err;
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 5ff6e9573935..aa112c1adba1 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -519,9 +519,7 @@ struct cache_set {
-
- struct cache_sb sb;
-
-- struct cache *cache[MAX_CACHES_PER_SET];
-- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
-- int caches_loaded;
-+ struct cache *cache;
-
- struct bcache_device **devices;
- unsigned int devices_max_used;
-@@ -808,7 +806,7 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
- const struct bkey *k,
- unsigned int ptr)
- {
-- return c->cache[PTR_DEV(k, ptr)];
-+ return c->cache;
- }
-
- static inline size_t PTR_BUCKET_NR(struct cache_set *c,
-@@ -890,7 +888,7 @@ do { \
- /* Looping macros */
-
- #define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
-+ for (iter = 0; ca = cs->cache, iter < 1; iter++)
-
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 7057ec48f3d1..e9ccfa17beb8 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1675,7 +1675,7 @@ static void cache_set_free(struct closure *cl)
- for_each_cache(ca, c, i)
- if (ca) {
- ca->set = NULL;
-- c->cache[ca->sb.nr_this_dev] = NULL;
-+ c->cache = NULL;
- kobject_put(&ca->kobj);
- }
-
-@@ -2166,7 +2166,7 @@ static const char *register_cache_set(struct cache *ca)
-
- list_for_each_entry(c, &bch_cache_sets, list)
- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-- if (c->cache[ca->sb.nr_this_dev])
-+ if (c->cache)
- return "duplicate cache set member";
-
- if (!can_attach_cache(ca, c))
-@@ -2216,14 +2216,11 @@ static const char *register_cache_set(struct cache *ca)
-
- kobject_get(&ca->kobj);
- ca->set = c;
-- ca->set->cache[ca->sb.nr_this_dev] = ca;
-- c->cache_by_alloc[c->caches_loaded++] = ca;
-+ ca->set->cache = ca;
-
-- if (c->caches_loaded == c->sb.nr_in_set) {
-- err = "failed to run cache set";
-- if (run_cache_set(c) < 0)
-- goto err;
-- }
-+ err = "failed to run cache set";
-+ if (run_cache_set(c) < 0)
-+ goto err;
-
- return NULL;
- err:
-@@ -2240,8 +2237,8 @@ void bch_cache_release(struct kobject *kobj)
- unsigned int i;
-
- if (ca->set) {
-- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
-- ca->set->cache[ca->sb.nr_this_dev] = NULL;
-+ BUG_ON(ca->set->cache != ca);
-+ ca->set->cache = NULL;
- }
-
- free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0003-bcache-remove-for_each_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0003-bcache-remove-for_each_cache.patch
deleted file mode 100644
index 2968637..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0003-bcache-remove-for_each_cache.patch
+++ /dev/null
@@ -1,895 +0,0 @@
-From 50516df3a606a49a170bb14e26ed595aff4c84d0 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 01:26:09 +0800
-Subject: [PATCH v1 03/14] bcache: remove for_each_cache()
-
-Since now each cache_set explicitly has single cache, for_each_cache()
-is unnecessary. This patch removes this macro, and update all locations
-where it is used, and makes sure all code logic still being consistent.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 17 ++-
- drivers/md/bcache/bcache.h | 9 +-
- drivers/md/bcache/btree.c | 103 +++++++---------
- drivers/md/bcache/journal.c | 229 ++++++++++++++++-------------------
- drivers/md/bcache/movinggc.c | 58 +++++----
- drivers/md/bcache/super.c | 115 ++++++++----------
- 6 files changed, 237 insertions(+), 294 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 3385f6add6df..1b8310992dd0 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- struct cache *ca;
- struct bucket *b;
- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
-- unsigned int i;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
-
- c->min_prio = USHRT_MAX;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca)
-- if (b->prio &&
-- b->prio != BTREE_PRIO &&
-- !atomic_read(&b->pin)) {
-- b->prio--;
-- c->min_prio = min(c->min_prio, b->prio);
-- }
-+ ca = c->cache;
-+ for_each_bucket(b, ca)
-+ if (b->prio &&
-+ b->prio != BTREE_PRIO &&
-+ !atomic_read(&b->pin)) {
-+ b->prio--;
-+ c->min_prio = min(c->min_prio, b->prio);
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index aa112c1adba1..7ffe6b2d179b 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -887,9 +887,6 @@ do { \
-
- /* Looping macros */
-
--#define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache, iter < 1; iter++)
--
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
- b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-@@ -931,11 +928,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
-
- static inline void wake_up_allocators(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = c->cache;
-
-- for_each_cache(ca, c, i)
-- wake_up_process(ca->alloc_thread);
-+ wake_up_process(ca->alloc_thread);
- }
-
- static inline void closure_bio_submit(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index e2a719fed53b..0817ad510d9f 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1167,19 +1167,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- static int btree_check_reserve(struct btree *b, struct btree_op *op)
- {
- struct cache_set *c = b->c;
-- struct cache *ca;
-- unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
-+ struct cache *ca = c->cache;
-+ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i)
-- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-- if (op)
-- prepare_to_wait(&c->btree_cache_wait, &op->wait,
-- TASK_UNINTERRUPTIBLE);
-- mutex_unlock(&c->bucket_lock);
-- return -EINTR;
-- }
-+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-+ if (op)
-+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
-+ TASK_UNINTERRUPTIBLE);
-+ mutex_unlock(&c->bucket_lock);
-+ return -EINTR;
-+ }
-
- mutex_unlock(&c->bucket_lock);
-
-@@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
- {
- struct cache *ca;
- struct bucket *b;
-- unsigned int i;
-
- if (!c->gc_mark_valid)
- return;
-@@ -1705,14 +1703,14 @@ static void btree_gc_start(struct cache_set *c)
- c->gc_mark_valid = 0;
- c->gc_done = ZERO_KEY;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca) {
-- b->last_gc = b->gen;
-- if (!atomic_read(&b->pin)) {
-- SET_GC_MARK(b, 0);
-- SET_GC_SECTORS_USED(b, 0);
-- }
-+ ca = c->cache;
-+ for_each_bucket(b, ca) {
-+ b->last_gc = b->gen;
-+ if (!atomic_read(&b->pin)) {
-+ SET_GC_MARK(b, 0);
-+ SET_GC_SECTORS_USED(b, 0);
- }
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-@@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
- {
- struct bucket *b;
- struct cache *ca;
-- unsigned int i;
-+ unsigned int i, j;
-+ uint64_t *k;
-
- mutex_lock(&c->bucket_lock);
-
-@@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
- struct bcache_device *d = c->devices[i];
- struct cached_dev *dc;
- struct keybuf_key *w, *n;
-- unsigned int j;
-
- if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
- continue;
-@@ -1756,29 +1754,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
- rcu_read_unlock();
-
- c->avail_nbuckets = 0;
-- for_each_cache(ca, c, i) {
-- uint64_t *i;
-
-- ca->invalidate_needs_gc = 0;
-+ ca = c->cache;
-+ ca->invalidate_needs_gc = 0;
-
-- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
-+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
-
-- for (i = ca->prio_buckets;
-- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (k = ca->prio_buckets;
-+ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
-+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
-
-- for_each_bucket(b, ca) {
-- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-+ for_each_bucket(b, ca) {
-+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-
-- if (atomic_read(&b->pin))
-- continue;
-+ if (atomic_read(&b->pin))
-+ continue;
-
-- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-
-- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-- c->avail_nbuckets++;
-- }
-+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-+ c->avail_nbuckets++;
- }
-
- mutex_unlock(&c->bucket_lock);
-@@ -1830,12 +1826,10 @@ static void bch_btree_gc(struct cache_set *c)
-
- static bool gc_should_run(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = c->cache;
-
-- for_each_cache(ca, c, i)
-- if (ca->invalidate_needs_gc)
-- return true;
-+ if (ca->invalidate_needs_gc)
-+ return true;
-
- if (atomic_read(&c->sectors_to_gc) < 0)
- return true;
-@@ -2081,9 +2075,8 @@ int bch_btree_check(struct cache_set *c)
-
- void bch_initial_gc_finish(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct bucket *b;
-- unsigned int i;
-
- bch_btree_gc_finish(c);
-
-@@ -2098,20 +2091,18 @@ void bch_initial_gc_finish(struct cache_set *c)
- * This is only safe for buckets that have no live data in them, which
- * there should always be some of.
- */
-- for_each_cache(ca, c, i) {
-- for_each_bucket(b, ca) {
-- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-- fifo_full(&ca->free[RESERVE_BTREE]))
-- break;
-+ for_each_bucket(b, ca) {
-+ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-+ fifo_full(&ca->free[RESERVE_BTREE]))
-+ break;
-
-- if (bch_can_invalidate_bucket(ca, b) &&
-- !GC_MARK(b)) {
-- __bch_invalidate_one_bucket(ca, b);
-- if (!fifo_push(&ca->free[RESERVE_PRIO],
-- b - ca->buckets))
-- fifo_push(&ca->free[RESERVE_BTREE],
-- b - ca->buckets);
-- }
-+ if (bch_can_invalidate_bucket(ca, b) &&
-+ !GC_MARK(b)) {
-+ __bch_invalidate_one_bucket(ca, b);
-+ if (!fifo_push(&ca->free[RESERVE_PRIO],
-+ b - ca->buckets))
-+ fifo_push(&ca->free[RESERVE_BTREE],
-+ b - ca->buckets);
- }
- }
-
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 77fbfd52edcf..027d0f8c4daf 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
- ret; \
- })
-
-- struct cache *ca;
-- unsigned int iter;
-+ struct cache *ca = c->cache;
- int ret = 0;
-+ struct journal_device *ja = &ca->journal;
-+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-+ unsigned int i, l, r, m;
-+ uint64_t seq;
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-- unsigned int i, l, r, m;
-- uint64_t seq;
--
-- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-- pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
-+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
-
-+ /*
-+ * Read journal buckets ordered by golden ratio hash to quickly
-+ * find a sequence of buckets with valid journal entries
-+ */
-+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
- /*
-- * Read journal buckets ordered by golden ratio hash to quickly
-- * find a sequence of buckets with valid journal entries
-+ * We must try the index l with ZERO first for
-+ * correctness due to the scenario that the journal
-+ * bucket is circular buffer which might have wrapped
- */
-- for (i = 0; i < ca->sb.njournal_buckets; i++) {
-- /*
-- * We must try the index l with ZERO first for
-- * correctness due to the scenario that the journal
-- * bucket is circular buffer which might have wrapped
-- */
-- l = (i * 2654435769U) % ca->sb.njournal_buckets;
-+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
-
-- if (test_bit(l, bitmap))
-- break;
-+ if (test_bit(l, bitmap))
-+ break;
-
-- if (read_bucket(l))
-- goto bsearch;
-- }
-+ if (read_bucket(l))
-+ goto bsearch;
-+ }
-
-- /*
-- * If that fails, check all the buckets we haven't checked
-- * already
-- */
-- pr_debug("falling back to linear search\n");
-+ /*
-+ * If that fails, check all the buckets we haven't checked
-+ * already
-+ */
-+ pr_debug("falling back to linear search\n");
-
-- for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
-- if (read_bucket(l))
-- goto bsearch;
-+ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
-+ if (read_bucket(l))
-+ goto bsearch;
-
-- /* no journal entries on this device? */
-- if (l == ca->sb.njournal_buckets)
-- continue;
-+ /* no journal entries on this device? */
-+ if (l == ca->sb.njournal_buckets)
-+ goto out;
- bsearch:
-- BUG_ON(list_empty(list));
-+ BUG_ON(list_empty(list));
-
-- /* Binary search */
-- m = l;
-- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-- pr_debug("starting binary search, l %u r %u\n", l, r);
-+ /* Binary search */
-+ m = l;
-+ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-+ pr_debug("starting binary search, l %u r %u\n", l, r);
-
-- while (l + 1 < r) {
-- seq = list_entry(list->prev, struct journal_replay,
-- list)->j.seq;
-+ while (l + 1 < r) {
-+ seq = list_entry(list->prev, struct journal_replay,
-+ list)->j.seq;
-
-- m = (l + r) >> 1;
-- read_bucket(m);
-+ m = (l + r) >> 1;
-+ read_bucket(m);
-
-- if (seq != list_entry(list->prev, struct journal_replay,
-- list)->j.seq)
-- l = m;
-- else
-- r = m;
-- }
-+ if (seq != list_entry(list->prev, struct journal_replay,
-+ list)->j.seq)
-+ l = m;
-+ else
-+ r = m;
-+ }
-
-- /*
-- * Read buckets in reverse order until we stop finding more
-- * journal entries
-- */
-- pr_debug("finishing up: m %u njournal_buckets %u\n",
-- m, ca->sb.njournal_buckets);
-- l = m;
-+ /*
-+ * Read buckets in reverse order until we stop finding more
-+ * journal entries
-+ */
-+ pr_debug("finishing up: m %u njournal_buckets %u\n",
-+ m, ca->sb.njournal_buckets);
-+ l = m;
-
-- while (1) {
-- if (!l--)
-- l = ca->sb.njournal_buckets - 1;
-+ while (1) {
-+ if (!l--)
-+ l = ca->sb.njournal_buckets - 1;
-
-- if (l == m)
-- break;
-+ if (l == m)
-+ break;
-
-- if (test_bit(l, bitmap))
-- continue;
-+ if (test_bit(l, bitmap))
-+ continue;
-
-- if (!read_bucket(l))
-- break;
-- }
-+ if (!read_bucket(l))
-+ break;
-+ }
-
-- seq = 0;
-+ seq = 0;
-
-- for (i = 0; i < ca->sb.njournal_buckets; i++)
-- if (ja->seq[i] > seq) {
-- seq = ja->seq[i];
-- /*
-- * When journal_reclaim() goes to allocate for
-- * the first time, it'll use the bucket after
-- * ja->cur_idx
-- */
-- ja->cur_idx = i;
-- ja->last_idx = ja->discard_idx = (i + 1) %
-- ca->sb.njournal_buckets;
-+ for (i = 0; i < ca->sb.njournal_buckets; i++)
-+ if (ja->seq[i] > seq) {
-+ seq = ja->seq[i];
-+ /*
-+ * When journal_reclaim() goes to allocate for
-+ * the first time, it'll use the bucket after
-+ * ja->cur_idx
-+ */
-+ ja->cur_idx = i;
-+ ja->last_idx = ja->discard_idx = (i + 1) %
-+ ca->sb.njournal_buckets;
-
-- }
-- }
-+ }
-
-+out:
- if (!list_empty(list))
- c->journal.seq = list_entry(list->prev,
- struct journal_replay,
-@@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
-
- static bool is_discard_enabled(struct cache_set *s)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = s->cache;
-
-- for_each_cache(ca, s, i)
-- if (ca->discard)
-- return true;
-+ if (ca->discard)
-+ return true;
-
- return false;
- }
-@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
- static void journal_reclaim(struct cache_set *c)
- {
- struct bkey *k = &c->journal.key;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- uint64_t last_seq;
-- unsigned int iter, n = 0;
-+ unsigned int next;
-+ struct journal_device *ja = &ca->journal;
- atomic_t p __maybe_unused;
-
- atomic_long_inc(&c->reclaim);
-@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
-
- /* Update last_idx */
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
--
-- while (ja->last_idx != ja->cur_idx &&
-- ja->seq[ja->last_idx] < last_seq)
-- ja->last_idx = (ja->last_idx + 1) %
-- ca->sb.njournal_buckets;
-- }
-+ while (ja->last_idx != ja->cur_idx &&
-+ ja->seq[ja->last_idx] < last_seq)
-+ ja->last_idx = (ja->last_idx + 1) %
-+ ca->sb.njournal_buckets;
-
-- for_each_cache(ca, c, iter)
-- do_journal_discard(ca);
-+ do_journal_discard(ca);
-
- if (c->journal.blocks_free)
- goto out;
-
-- /*
-- * Allocate:
-- * XXX: Sort by free journal space
-- */
--
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-+ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-+ /* No space available on this device */
-+ if (next == ja->discard_idx)
-+ goto out;
-
-- /* No space available on this device */
-- if (next == ja->discard_idx)
-- continue;
-+ ja->cur_idx = next;
-+ k->ptr[0] = MAKE_PTR(0,
-+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-+ ca->sb.nr_this_dev);
-+ atomic_long_inc(&c->reclaimed_journal_buckets);
-
-- ja->cur_idx = next;
-- k->ptr[n++] = MAKE_PTR(0,
-- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-- ca->sb.nr_this_dev);
-- atomic_long_inc(&c->reclaimed_journal_buckets);
-- }
-+ bkey_init(k);
-+ SET_KEY_PTRS(k, 1);
-+ c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
-- if (n) {
-- bkey_init(k);
-- SET_KEY_PTRS(k, n);
-- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-- }
- out:
- if (!journal_full(&c->journal))
- __closure_wake_up(&c->journal.wait);
-@@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
- __releases(c->journal.lock)
- {
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
-@@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->btree_root, &c->root->key);
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
-- for_each_cache(ca, c, i)
-- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
--
-+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(&c->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 5872d6470470..b9c3d27ec093 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
-
- void bch_moving_gc(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct bucket *b;
-- unsigned int i;
-+ unsigned long sectors_to_move, reserve_sectors;
-
- if (!c->copy_gc_enabled)
- return;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i) {
-- unsigned long sectors_to_move = 0;
-- unsigned long reserve_sectors = ca->sb.bucket_size *
-+ sectors_to_move = 0;
-+ reserve_sectors = ca->sb.bucket_size *
- fifo_used(&ca->free[RESERVE_MOVINGGC]);
-
-- ca->heap.used = 0;
--
-- for_each_bucket(b, ca) {
-- if (GC_MARK(b) == GC_MARK_METADATA ||
-- !GC_SECTORS_USED(b) ||
-- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-- atomic_read(&b->pin))
-- continue;
--
-- if (!heap_full(&ca->heap)) {
-- sectors_to_move += GC_SECTORS_USED(b);
-- heap_add(&ca->heap, b, bucket_cmp);
-- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-- sectors_to_move -= bucket_heap_top(ca);
-- sectors_to_move += GC_SECTORS_USED(b);
--
-- ca->heap.data[0] = b;
-- heap_sift(&ca->heap, 0, bucket_cmp);
-- }
-- }
-+ ca->heap.used = 0;
-+
-+ for_each_bucket(b, ca) {
-+ if (GC_MARK(b) == GC_MARK_METADATA ||
-+ !GC_SECTORS_USED(b) ||
-+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-+ atomic_read(&b->pin))
-+ continue;
-
-- while (sectors_to_move > reserve_sectors) {
-- heap_pop(&ca->heap, b, bucket_cmp);
-- sectors_to_move -= GC_SECTORS_USED(b);
-+ if (!heap_full(&ca->heap)) {
-+ sectors_to_move += GC_SECTORS_USED(b);
-+ heap_add(&ca->heap, b, bucket_cmp);
-+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-+ sectors_to_move -= bucket_heap_top(ca);
-+ sectors_to_move += GC_SECTORS_USED(b);
-+
-+ ca->heap.data[0] = b;
-+ heap_sift(&ca->heap, 0, bucket_cmp);
- }
-+ }
-
-- while (heap_pop(&ca->heap, b, bucket_cmp))
-- SET_GC_MOVE(b, 1);
-+ while (sectors_to_move > reserve_sectors) {
-+ heap_pop(&ca->heap, b, bucket_cmp);
-+ sectors_to_move -= GC_SECTORS_USED(b);
- }
-
-+ while (heap_pop(&ca->heap, b, bucket_cmp))
-+ SET_GC_MOVE(b, 1);
-+
- mutex_unlock(&c->bucket_lock);
-
- c->moving_gc_keys.last_scanned = ZERO_KEY;
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index e9ccfa17beb8..91883d5c4b62 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -343,8 +343,9 @@ static void bcache_write_super_unlock(struct closure *cl)
- void bcache_write_super(struct cache_set *c)
- {
- struct closure *cl = &c->sb_write;
-- struct cache *ca;
-- unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-+ struct cache *ca = c->cache;
-+ struct bio *bio = &ca->sb_bio;
-+ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-@@ -354,23 +355,19 @@ void bcache_write_super(struct cache_set *c)
- if (c->sb.version > version)
- version = c->sb.version;
-
-- for_each_cache(ca, c, i) {
-- struct bio *bio = &ca->sb_bio;
--
-- ca->sb.version = version;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
-+ ca->sb.version = version;
-+ ca->sb.seq = c->sb.seq;
-+ ca->sb.last_mount = c->sb.last_mount;
-
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-
-- bio_init(bio, ca->sb_bv, 1);
-- bio_set_dev(bio, ca->bdev);
-- bio->bi_end_io = write_super_endio;
-- bio->bi_private = ca;
-+ bio_init(bio, ca->sb_bv, 1);
-+ bio_set_dev(bio, ca->bdev);
-+ bio->bi_end_io = write_super_endio;
-+ bio->bi_private = ca;
-
-- closure_get(cl);
-- __write_super(&ca->sb, ca->sb_disk, bio);
-- }
-+ closure_get(cl);
-+ __write_super(&ca->sb, ca->sb_disk, bio);
-
- closure_return_with_destructor(cl, bcache_write_super_unlock);
- }
-@@ -772,26 +769,22 @@ static void bcache_device_unlink(struct bcache_device *d)
- lockdep_assert_held(&bch_register_lock);
-
- if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
-- unsigned int i;
-- struct cache *ca;
-+ struct cache *ca = d->c->cache;
-
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
-
-- for_each_cache(ca, d->c, i)
-- bd_unlink_disk_holder(ca->bdev, d->disk);
-+ bd_unlink_disk_holder(ca->bdev, d->disk);
- }
- }
-
- static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
- const char *name)
- {
-- unsigned int i;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- int ret;
-
-- for_each_cache(ca, d->c, i)
-- bd_link_disk_holder(ca->bdev, d->disk);
-+ bd_link_disk_holder(ca->bdev, d->disk);
-
- snprintf(d->name, BCACHEDEVNAME_SIZE,
- "%s%u", name, d->id);
-@@ -1663,7 +1656,6 @@ static void cache_set_free(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, cl);
- struct cache *ca;
-- unsigned int i;
-
- debugfs_remove(c->debug);
-
-@@ -1672,12 +1664,12 @@ static void cache_set_free(struct closure *cl)
- bch_journal_free(c);
-
- mutex_lock(&bch_register_lock);
-- for_each_cache(ca, c, i)
-- if (ca) {
-- ca->set = NULL;
-- c->cache = NULL;
-- kobject_put(&ca->kobj);
-- }
-+ ca = c->cache;
-+ if (ca) {
-+ ca->set = NULL;
-+ c->cache = NULL;
-+ kobject_put(&ca->kobj);
-+ }
-
- bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-@@ -1703,9 +1695,8 @@ static void cache_set_free(struct closure *cl)
- static void cache_set_flush(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, caching);
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct btree *b;
-- unsigned int i;
-
- bch_cache_accounting_destroy(&c->accounting);
-
-@@ -1730,9 +1721,8 @@ static void cache_set_flush(struct closure *cl)
- mutex_unlock(&b->write_lock);
- }
-
-- for_each_cache(ca, c, i)
-- if (ca->alloc_thread)
-- kthread_stop(ca->alloc_thread);
-+ if (ca->alloc_thread)
-+ kthread_stop(ca->alloc_thread);
-
- if (c->journal.cur) {
- cancel_delayed_work_sync(&c->journal.work);
-@@ -1973,16 +1963,14 @@ static int run_cache_set(struct cache_set *c)
- {
- const char *err = "cannot allocate memory";
- struct cached_dev *dc, *t;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct closure cl;
-- unsigned int i;
- LIST_HEAD(journal);
- struct journal_replay *l;
-
- closure_init_stack(&cl);
-
-- for_each_cache(ca, c, i)
-- c->nbuckets += ca->sb.nbuckets;
-+ c->nbuckets = ca->sb.nbuckets;
- set_gc_sectors(c);
-
- if (CACHE_SYNC(&c->sb)) {
-@@ -2002,10 +1990,8 @@ static int run_cache_set(struct cache_set *c)
- j = &list_entry(journal.prev, struct journal_replay, list)->j;
-
- err = "IO error reading priorities";
-- for_each_cache(ca, c, i) {
-- if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
-- goto err;
-- }
-+ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
-+ goto err;
-
- /*
- * If prio_read() fails it'll call cache_set_error and we'll
-@@ -2049,9 +2035,8 @@ static int run_cache_set(struct cache_set *c)
- bch_journal_next(&c->journal);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- /*
- * First place it's safe to allocate: btree_check() and
-@@ -2070,28 +2055,23 @@ static int run_cache_set(struct cache_set *c)
- if (bch_journal_replay(c, &journal))
- goto err;
- } else {
-- pr_notice("invalidating existing data\n");
--
-- for_each_cache(ca, c, i) {
-- unsigned int j;
-+ unsigned int j;
-
-- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-- 2, SB_JOURNAL_BUCKETS);
-+ pr_notice("invalidating existing data\n");
-+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-+ 2, SB_JOURNAL_BUCKETS);
-
-- for (j = 0; j < ca->sb.keys; j++)
-- ca->sb.d[j] = ca->sb.first_bucket + j;
-- }
-+ for (j = 0; j < ca->sb.keys; j++)
-+ ca->sb.d[j] = ca->sb.first_bucket + j;
-
- bch_initial_gc_finish(c);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- mutex_lock(&c->bucket_lock);
-- for_each_cache(ca, c, i)
-- bch_prio_write(ca, true);
-+ bch_prio_write(ca, true);
- mutex_unlock(&c->bucket_lock);
-
- err = "cannot allocate new UUID bucket";
-@@ -2467,13 +2447,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
- static bool bch_is_open_cache(struct block_device *bdev)
- {
- struct cache_set *c, *tc;
-- struct cache *ca;
-- unsigned int i;
-
-- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
-- for_each_cache(ca, c, i)
-- if (ca->bdev == bdev)
-- return true;
-+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-+ struct cache *ca = c->cache;
-+
-+ if (ca->bdev == bdev)
-+ return true;
-+ }
-+
- return false;
- }
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0004-bcache-add-set_uuid-in-struct-cache_set.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0004-bcache-add-set_uuid-in-struct-cache_set.patch
deleted file mode 100644
index a735c3d..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0004-bcache-add-set_uuid-in-struct-cache_set.patch
+++ /dev/null
@@ -1,172 +0,0 @@
-From 5f709f50fb5302b446ab136dd4673a68051b9299 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 20:12:07 +0800
-Subject: [PATCH v1 04/14] bcache: add set_uuid in struct cache_set
-
-This patch adds a separated set_uuid[16] in struct cache_set, to store
-the uuid of the cache set. This is the preparation to remove the
-embedded struct cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 1 +
- drivers/md/bcache/debug.c | 2 +-
- drivers/md/bcache/super.c | 24 ++++++++++++------------
- include/trace/events/bcache.h | 4 ++--
- 4 files changed, 16 insertions(+), 15 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 7ffe6b2d179b..94a62acac4fc 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -668,6 +668,7 @@ struct cache_set {
- struct mutex verify_lock;
- #endif
-
-+ uint8_t set_uuid[16];
- unsigned int nr_uuids;
- struct uuid_entry *uuids;
- BKEY_PADDED(uuid_bucket);
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 336f43910383..0ccc1b0baa42 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -238,7 +238,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
- if (!IS_ERR_OR_NULL(bcache_debug)) {
- char name[50];
-
-- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
-+ snprintf(name, 50, "bcache-%pU", c->set_uuid);
- c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
- &cache_set_debug_ops);
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 91883d5c4b62..90a419ad6445 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1189,8 +1189,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- struct cached_dev *exist_dc, *t;
- int ret = 0;
-
-- if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
-- (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
-+ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
-+ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
- return -ENOENT;
-
- if (dc->disk.c) {
-@@ -1262,7 +1262,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- u->first_reg = u->last_reg = rtime;
- bch_uuid_write(c);
-
-- memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
-+ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
-
- bch_write_bdev_super(dc, &cl);
-@@ -1324,7 +1324,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- pr_info("Caching %s as %s on set %pU\n",
- dc->backing_dev_name,
- dc->disk.disk->disk_name,
-- dc->disk.c->sb.set_uuid);
-+ dc->disk.c->set_uuid);
- return 0;
- }
-
-@@ -1632,7 +1632,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
- vaf.va = &args;
-
- pr_err("error on %pU: %pV, disabling caching\n",
-- c->sb.set_uuid, &vaf);
-+ c->set_uuid, &vaf);
-
- va_end(args);
-
-@@ -1685,7 +1685,7 @@ static void cache_set_free(struct closure *cl)
- list_del(&c->list);
- mutex_unlock(&bch_register_lock);
-
-- pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
-+ pr_info("Cache set %pU unregistered\n", c->set_uuid);
- wake_up(&unregister_wait);
-
- closure_debug_destroy(&c->cl);
-@@ -1755,7 +1755,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
- {
- if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
- pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
-- d->disk->disk_name, c->sb.set_uuid);
-+ d->disk->disk_name, c->set_uuid);
- bcache_device_stop(d);
- } else if (atomic_read(&dc->has_dirty)) {
- /*
-@@ -1862,7 +1862,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- bch_cache_accounting_init(&c->accounting, &c->cl);
-
-- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
-+ memcpy(c->set_uuid, sb->set_uuid, 16);
- c->sb.block_size = sb->block_size;
- c->sb.bucket_size = sb->bucket_size;
- c->sb.nr_in_set = sb->nr_in_set;
-@@ -2145,7 +2145,7 @@ static const char *register_cache_set(struct cache *ca)
- struct cache_set *c;
-
- list_for_each_entry(c, &bch_cache_sets, list)
-- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-+ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
- if (c->cache)
- return "duplicate cache set member";
-
-@@ -2163,7 +2163,7 @@ static const char *register_cache_set(struct cache *ca)
- return err;
-
- err = "error creating kobject";
-- if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
-+ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
- kobject_add(&c->internal, &c->kobj, "internal"))
- goto err;
-
-@@ -2188,7 +2188,7 @@ static const char *register_cache_set(struct cache *ca)
- */
- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
- c->sb.version = ca->sb.version;
-- memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
-+ memcpy(c->set_uuid, ca->sb.set_uuid, 16);
- c->sb.flags = ca->sb.flags;
- c->sb.seq = ca->sb.seq;
- pr_debug("set version = %llu\n", c->sb.version);
-@@ -2698,7 +2698,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
- list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
- list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
-- char *set_uuid = c->sb.uuid;
-+ char *set_uuid = c->set_uuid;
-
- if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
- list_del(&pdev->list);
-diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
-index 0bddea663b3b..e41c611d6d3b 100644
---- a/include/trace/events/bcache.h
-+++ b/include/trace/events/bcache.h
-@@ -164,7 +164,7 @@ TRACE_EVENT(bcache_write,
- ),
-
- TP_fast_assign(
-- memcpy(__entry->uuid, c->sb.set_uuid, 16);
-+ memcpy(__entry->uuid, c->set_uuid, 16);
- __entry->inode = inode;
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio->bi_iter.bi_size >> 9;
-@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
- ),
-
- TP_fast_assign(
-- memcpy(__entry->uuid, c->sb.set_uuid, 16);
-+ memcpy(__entry->uuid, c->set_uuid, 16);
- ),
-
- TP_printk("%pU", __entry->uuid)
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0005-bcache-only-use-block_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0005-bcache-only-use-block_bytes-on-struct-cache.patch
deleted file mode 100644
index b814ca7..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0005-bcache-only-use-block_bytes-on-struct-cache.patch
+++ /dev/null
@@ -1,257 +0,0 @@
-From 178fa57c56550568bf0d4140d8dc689cc6c11682 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:25:58 +0800
-Subject: [PATCH v1 05/14] bcache: only use block_bytes() on struct cache
-
-Because struct cache_set and struct cache both have struct cache_sb,
-therefore macro block_bytes() can be used on both of them. When removing
-the embedded struct cache_sb from struct cache_set, this macro won't be
-used on struct cache_set anymore.
-
-This patch unifies all block_bytes() usage only on struct cache, this is
-one of the preparation to remove the embedded struct cache_sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/btree.c | 24 ++++++++++++------------
- drivers/md/bcache/debug.c | 8 ++++----
- drivers/md/bcache/journal.c | 8 ++++----
- drivers/md/bcache/request.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- drivers/md/bcache/sysfs.c | 2 +-
- 7 files changed, 24 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 94a62acac4fc..29bec61cafbb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -759,7 +759,7 @@ struct bbio {
-
- #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
- #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
--#define block_bytes(c) ((c)->sb.block_size << 9)
-+#define block_bytes(ca) ((ca)->sb.block_size << 9)
-
- static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
- {
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 0817ad510d9f..c91b4d58a5b3 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -104,7 +104,7 @@
-
- static inline struct bset *write_block(struct btree *b)
- {
-- return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
-+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
- }
-
- static void bch_btree_init_next(struct btree *b)
-@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
- goto err;
-
- err = "bad btree header";
-- if (b->written + set_blocks(i, block_bytes(b->c)) >
-+ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
- btree_blocks(b))
- goto err;
-
-@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
-
- bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
-
-- b->written += set_blocks(i, block_bytes(b->c));
-+ b->written += set_blocks(i, block_bytes(b->c->cache));
- }
-
- err = "corrupted btree";
- for (i = write_block(b);
- bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
-- i = ((void *) i) + block_bytes(b->c))
-+ i = ((void *) i) + block_bytes(b->c->cache))
- if (i->seq == b->keys.set[0].data->seq)
- goto err;
-
-@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
-
- b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = cl;
-- b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
-+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
- b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
- bch_bio_map(b->bio, i);
-
-@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
-
- do_btree_node_write(b);
-
-- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
-+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-
-- b->written += set_blocks(i, block_bytes(b->c));
-+ b->written += set_blocks(i, block_bytes(b->c->cache));
- }
-
- void bch_btree_node_write(struct btree *b, struct closure *parent)
-@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
-
- if (nodes < 2 ||
- __set_blocks(b->keys.set[0].data, keys,
-- block_bytes(b->c)) > blocks * (nodes - 1))
-+ block_bytes(b->c->cache)) > blocks * (nodes - 1))
- return 0;
-
- for (i = 0; i < nodes; i++) {
-@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- k = bkey_next(k)) {
- if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k),
-- block_bytes(b->c)) > blocks)
-+ block_bytes(b->c->cache)) > blocks)
- break;
-
- last = k;
-@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- * though)
- */
- if (__set_blocks(n1, n1->keys + n2->keys,
-- block_bytes(b->c)) >
-+ block_bytes(b->c->cache)) >
- btree_blocks(new_nodes[i]))
- goto out_unlock_nocoalesce;
-
-@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- last = &r->b->key;
- }
-
-- BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
-+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
- btree_blocks(new_nodes[i]));
-
- if (last)
-@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
- goto err;
-
- split = set_blocks(btree_bset_first(n1),
-- block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
-+ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
-
- if (split) {
- unsigned int keys = 0;
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 0ccc1b0baa42..b00fd08d696b 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
- for (i = (start); \
- (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
- i->seq == (start)->seq; \
-- i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
-- block_bytes(b->c))
-+ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
-+ block_bytes(b->c->cache))
-
- void bch_btree_verify(struct btree *b)
- {
-@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
-
- for_each_written_bset(b, ondisk, i) {
- unsigned int block = ((void *) i - (void *) ondisk) /
-- block_bytes(b->c);
-+ block_bytes(b->c->cache);
-
- pr_err("*** on disk block %u:\n", block);
- bch_dump_bset(&b->keys, i, block);
- }
-
- pr_err("*** block %zu not written\n",
-- ((void *) i - (void *) ondisk) / block_bytes(b->c));
-+ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
-
- for (j = 0; j < inmemory->keys; j++)
- if (inmemory->d[j] != sorted->d[j])
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 027d0f8c4daf..ccd5de0ab0fe 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
- return ret;
- }
-
-- blocks = set_blocks(j, block_bytes(ca->set));
-+ blocks = set_blocks(j, block_bytes(ca));
-
- /*
- * Nodes in 'list' are in linear increasing order of
-@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
- struct cache *ca = c->cache;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
-- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
-+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
- c->sb.block_size;
-
- struct bio *bio;
-@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
- return;
- }
-
-- c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
-+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
-
- w->data->btree_level = c->root->level;
-
-@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- struct journal_write *w = c->journal.cur;
-
- sectors = __set_blocks(w->data, w->data->keys + nkeys,
-- block_bytes(c)) * c->sb.block_size;
-+ block_bytes(c->cache)) * c->sb.block_size;
-
- if (sectors <= min_t(size_t,
- c->journal.blocks_free * c->sb.block_size,
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index c7cadaafa947..02408fdbf5bb 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
- * bch_data_insert_keys() will insert the keys created so far
- * and finish the rest when the keylist is empty.
- */
-- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
-+ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
- return -ENOMEM;
-
- return __bch_keylist_realloc(l, u64s);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 90a419ad6445..36a538c2e960 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1528,7 +1528,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
-
- kobject_init(&d->kobj, &bch_flash_dev_ktype);
-
-- if (bcache_device_init(d, block_bytes(c), u->sectors,
-+ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
- NULL, &bcache_flash_ops))
- goto err;
-
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index ac06c0bc3c0a..b9f524ab5cc8 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -714,7 +714,7 @@ SHOW(__bch_cache_set)
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c));
-- sysfs_hprint(block_size, block_bytes(c));
-+ sysfs_hprint(block_size, block_bytes(c->cache));
- sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0006-bcache-remove-useless-alloc_bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0006-bcache-remove-useless-alloc_bucket_pages.patch
deleted file mode 100644
index 2057ff6..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0006-bcache-remove-useless-alloc_bucket_pages.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 811f8198f1d5337729bbd855bf0e381e60eeeca3 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:28:23 +0800
-Subject: [PATCH v1 06/14] bcache: remove useless alloc_bucket_pages()
-
-Now no one uses alloc_bucket_pages() anymore, remove it from bcache.h.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/super.c | 3 ---
- 1 file changed, 3 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 36a538c2e960..28257f11d835 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1832,9 +1832,6 @@ void bch_cache_set_unregister(struct cache_set *c)
- bch_cache_set_stop(c);
- }
-
--#define alloc_bucket_pages(gfp, c) \
-- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
--
- #define alloc_meta_bucket_pages(gfp, sb) \
- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0007-bcache-remove-useless-bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0007-bcache-remove-useless-bucket_pages.patch
deleted file mode 100644
index b7b40a9..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0007-bcache-remove-useless-bucket_pages.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From a34562e8f936f77d726fcd94746a467db5f2bf04 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:15:28 +0800
-Subject: [PATCH v1 07/14] bcache: remove useless bucket_pages()
-
-It seems alloc_bucket_pages() is the only user of bucket_pages().
-Considering alloc_bucket_pages() is removed from bcache code, it is safe
-to remove the useless macro bucket_pages() now.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 29bec61cafbb..48a2585b6bbb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -757,7 +757,6 @@ struct bbio {
- #define btree_default_blocks(c) \
- ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-
--#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
- #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
- #define block_bytes(ca) ((ca)->sb.block_size << 9)
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
deleted file mode 100644
index 225cd5e..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 964012dfcb5e4ae91630c5d92b51cfba698dc41d Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:20:48 +0800
-Subject: [PATCH v1 08/14] bcache: only use bucket_bytes() on struct cache
-
-Because struct cache_set and struct cache both have struct cache_sb,
-macro bucket_bytes() currently are used on both of them. When removing
-the embedded struct cache_sb from struct cache_set, this macro won't be
-used on struct cache_set anymore.
-
-This patch unifies all bucket_bytes() usage only on struct cache, this is
-one of the preparation to remove the embedded struct cache_sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/sysfs.c | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 48a2585b6bbb..94d4baf4c405 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -757,7 +757,7 @@ struct bbio {
- #define btree_default_blocks(c) \
- ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-
--#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
-+#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
- #define block_bytes(ca) ((ca)->sb.block_size << 9)
-
- static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index b9f524ab5cc8..4bfe98faadcc 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -713,7 +713,7 @@ SHOW(__bch_cache_set)
-
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
-- sysfs_hprint(bucket_size, bucket_bytes(c));
-+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
- sysfs_hprint(block_size, block_bytes(c->cache));
- sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
deleted file mode 100644
index f0f0dcc..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From 78c5a3367fe79f81efa030ef2cb2fc171009fc14 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:18:45 +0800
-Subject: [PATCH v1 09/14] bcache: avoid data copy between cache_set->sb and
- cache->sb
-
-struct cache_sb embedded in struct cache_set is only partial used and
-not a real copy from cache's in-memory super block. When removing the
-embedded cache_set->sb, it is unncessary to copy data between these two
-in-memory super blocks (cache_set->sb and cache->sb), it is sufficient
-to just use cache->sb.
-
-This patch removes the data copy between these two in-memory super
-blocks in bch_cache_set_alloc() and bcache_write_super(). In future
-except for set_uuid, cache's super block will be referenced by cache
-set, no copy any more.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/super.c | 22 +++-------------------
- 1 file changed, 3 insertions(+), 19 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 28257f11d835..20de004ab2ef 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-
-- c->sb.seq++;
-+ ca->sb.seq++;
-
-- if (c->sb.version > version)
-- version = c->sb.version;
--
-- ca->sb.version = version;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
--
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ if (ca->sb.version < version)
-+ ca->sb.version = version;
-
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
-@@ -1860,16 +1854,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- bch_cache_accounting_init(&c->accounting, &c->cl);
-
- memcpy(c->set_uuid, sb->set_uuid, 16);
-- c->sb.block_size = sb->block_size;
-- c->sb.bucket_size = sb->bucket_size;
-- c->sb.nr_in_set = sb->nr_in_set;
-- c->sb.last_mount = sb->last_mount;
-- c->sb.version = sb->version;
-- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
-- c->sb.feature_compat = sb->feature_compat;
-- c->sb.feature_ro_compat = sb->feature_ro_compat;
-- c->sb.feature_incompat = sb->feature_incompat;
-- }
-
- c->bucket_bits = ilog2(sb->bucket_size);
- c->block_bits = ilog2(sb->block_size);
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
deleted file mode 100644
index a00c7ad..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 754956b7956b6c08c1d8e3eab0a2bda29e220115 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:28:26 +0800
-Subject: [PATCH v1 10/14] bcache: don't check seq numbers in
- register_cache_set()
-
-In order to update the partial super block of cache set, the seq numbers
-of cache and cache set are checked in register_cache_set(). If cache's
-seq number is larger than cache set's seq number, cache set must update
-its partial super block from cache's super block. It is unncessary when
-the embedded struct cache_sb is removed from struct cache set.
-
-This patch removed the seq numbers checking from register_cache_set(),
-because later there will be no such partial super block in struct cache
-set, the cache set will directly reference in-memory super block from
-struct cache. This is a preparation patch for removing embedded struct
-cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/super.c | 15 ---------------
- 1 file changed, 15 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 20de004ab2ef..cdc1ebee5044 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2160,21 +2160,6 @@ static const char *register_cache_set(struct cache *ca)
- sysfs_create_link(&c->kobj, &ca->kobj, buf))
- goto err;
-
-- /*
-- * A special case is both ca->sb.seq and c->sb.seq are 0,
-- * such condition happens on a new created cache device whose
-- * super block is never flushed yet. In this case c->sb.version
-- * and other members should be updated too, otherwise we will
-- * have a mistaken super block version in cache set.
-- */
-- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
-- c->sb.version = ca->sb.version;
-- memcpy(c->set_uuid, ca->sb.set_uuid, 16);
-- c->sb.flags = ca->sb.flags;
-- c->sb.seq = ca->sb.seq;
-- pr_debug("set version = %llu\n", c->sb.version);
-- }
--
- kobject_get(&ca->kobj);
- ca->set = c;
- ca->set->cache = ca;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0011-bcache-remove-can_attach_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0011-bcache-remove-can_attach_cache.patch
deleted file mode 100644
index fac8321..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0011-bcache-remove-can_attach_cache.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From aeb61b8c57e542123d0082054e6a65f10848a6f1 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:36:56 +0800
-Subject: [PATCH v1 11/14] bcache: remove can_attach_cache()
-
-After removing the embedded struct cache_sb from struct cache_set, cache
-set will directly reference the in-memory super block of struct cache.
-It is unnecessary to compare block_size, bucket_size and nr_in_set from
-the identical in-memory super block in can_attach_cache().
-
-This is a preparation patch for latter removing cache_set->sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/super.c | 10 ----------
- 1 file changed, 10 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index cdc1ebee5044..80cfb9dfe93e 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2112,13 +2112,6 @@ static int run_cache_set(struct cache_set *c)
- return -EIO;
- }
-
--static bool can_attach_cache(struct cache *ca, struct cache_set *c)
--{
-- return ca->sb.block_size == c->sb.block_size &&
-- ca->sb.bucket_size == c->sb.bucket_size &&
-- ca->sb.nr_in_set == c->sb.nr_in_set;
--}
--
- static const char *register_cache_set(struct cache *ca)
- {
- char buf[12];
-@@ -2130,9 +2123,6 @@ static const char *register_cache_set(struct cache *ca)
- if (c->cache)
- return "duplicate cache set member";
-
-- if (!can_attach_cache(ca, c))
-- return "cache sb does not match set";
--
- if (!CACHE_SYNC(&ca->sb))
- SET_CACHE_SYNC(&c->sb, false);
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
deleted file mode 100644
index 96bc7c8..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
+++ /dev/null
@@ -1,109 +0,0 @@
-From 9cbec8384422a47b76db64bfe880e1224893c193 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:53:52 +0800
-Subject: [PATCH v1 12/14] bcache: check and set sync status on cache's
- in-memory super block
-
-Currently the cache's sync status is checked and set on cache set's in-
-memory partial super block. After removing the embedded struct cache_sb
-from cache set and reference cache's in-memory super block from struct
-cache_set, the sync status can set and check directly on cache's super
-block.
-
-This patch checks and sets the cache sync status directly on cache's
-in-memory super block. This is a preparation for later removing embedded
-struct cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/journal.c | 2 +-
- drivers/md/bcache/super.c | 7 ++-----
- drivers/md/bcache/sysfs.c | 6 +++---
- 4 files changed, 7 insertions(+), 10 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 1b8310992dd0..65fdbdeb5134 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -361,7 +361,7 @@ static int bch_allocator_thread(void *arg)
- * new stuff to them:
- */
- allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
-- if (CACHE_SYNC(&ca->set->sb)) {
-+ if (CACHE_SYNC(&ca->sb)) {
- /*
- * This could deadlock if an allocation with a btree
- * node locked ever blocked - having the btree node
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index ccd5de0ab0fe..e2810668ede3 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -915,7 +915,7 @@ atomic_t *bch_journal(struct cache_set *c,
- if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
- return NULL;
-
-- if (!CACHE_SYNC(&c->sb))
-+ if (!CACHE_SYNC(&c->cache->sb))
- return NULL;
-
- w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 80cfb9dfe93e..6b94b396f9e9 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1954,7 +1954,7 @@ static int run_cache_set(struct cache_set *c)
- c->nbuckets = ca->sb.nbuckets;
- set_gc_sectors(c);
-
-- if (CACHE_SYNC(&c->sb)) {
-+ if (CACHE_SYNC(&c->cache->sb)) {
- struct bkey *k;
- struct jset *j;
-
-@@ -2077,7 +2077,7 @@ static int run_cache_set(struct cache_set *c)
- * everything is set up - fortunately journal entries won't be
- * written until the SET_CACHE_SYNC() here:
- */
-- SET_CACHE_SYNC(&c->sb, true);
-+ SET_CACHE_SYNC(&c->cache->sb, true);
-
- bch_journal_next(&c->journal);
- bch_journal_meta(c, &cl);
-@@ -2123,9 +2123,6 @@ static const char *register_cache_set(struct cache *ca)
- if (c->cache)
- return "duplicate cache set member";
-
-- if (!CACHE_SYNC(&ca->sb))
-- SET_CACHE_SYNC(&c->sb, false);
--
- goto found;
- }
-
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index 4bfe98faadcc..554e3afc9b68 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -711,7 +711,7 @@ SHOW(__bch_cache_set)
- {
- struct cache_set *c = container_of(kobj, struct cache_set, kobj);
-
-- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
-+ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c->cache));
- sysfs_hprint(block_size, block_bytes(c->cache));
-@@ -812,8 +812,8 @@ STORE(__bch_cache_set)
- if (attr == &sysfs_synchronous) {
- bool sync = strtoul_or_return(buf);
-
-- if (sync != CACHE_SYNC(&c->sb)) {
-- SET_CACHE_SYNC(&c->sb, sync);
-+ if (sync != CACHE_SYNC(&c->cache->sb)) {
-+ SET_CACHE_SYNC(&c->cache->sb, sync);
- bcache_write_super(c);
- }
- }
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
deleted file mode 100644
index 693f0d0..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
+++ /dev/null
@@ -1,415 +0,0 @@
-From ae87df3f4f9278077ba811bd6ddc4389edd807dd Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 00:20:00 +0800
-Subject: [PATCH v1 13/14] bcache: remove embedded struct cache_sb from struct
- cache_set
-
-Since bcache code was merged into mainline kerrnel, each cache set only
-as one single cache in it. The multiple caches framework is here but the
-code is far from completed. Considering the multiple copies of cached
-data can also be stored on e.g. md raid1 devices, it is unnecessary to
-support multiple caches in one cache set indeed.
-
-The previous preparation patches fix the dependencies of explicitly
-making a cache set only have single cache. Now we don't have to maintain
-an embedded partial super block in struct cache_set, the in-memory super
-block can be directly referenced from struct cache.
-
-This patch removes the embedded struct cache_sb from struct cache_set,
-and fixes all locations where the superb lock was referenced from this
-removed super block by referencing the in-memory super block of struct
-cache.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 6 +++---
- drivers/md/bcache/bcache.h | 4 +---
- drivers/md/bcache/btree.c | 17 +++++++++--------
- drivers/md/bcache/btree.h | 2 +-
- drivers/md/bcache/extents.c | 6 +++---
- drivers/md/bcache/features.c | 4 ++--
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/journal.c | 11 ++++++-----
- drivers/md/bcache/request.c | 4 ++--
- drivers/md/bcache/super.c | 19 +++++++++----------
- drivers/md/bcache/writeback.c | 2 +-
- 11 files changed, 38 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 65fdbdeb5134..8c371d5eef8e 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- {
- struct cache *ca;
- struct bucket *b;
-- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
-+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
- struct open_bucket, list);
- found:
- if (!ret->sectors_free && KEY_PTRS(alloc)) {
-- ret->sectors_free = c->sb.bucket_size;
-+ ret->sectors_free = c->cache->sb.bucket_size;
- bkey_copy(&ret->key, alloc);
- bkey_init(alloc);
- }
-@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
- }
-
-- if (b->sectors_free < c->sb.block_size)
-+ if (b->sectors_free < c->cache->sb.block_size)
- b->sectors_free = 0;
-
- /*
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 94d4baf4c405..1d57f48307e6 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -517,8 +517,6 @@ struct cache_set {
- atomic_t idle_counter;
- atomic_t at_max_writeback_rate;
-
-- struct cache_sb sb;
--
- struct cache *cache;
-
- struct bcache_device **devices;
-@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
-
- static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
- {
-- return s & (c->sb.bucket_size - 1);
-+ return s & (c->cache->sb.bucket_size - 1);
- }
-
- static inline struct cache *PTR_CACHE(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index c91b4d58a5b3..d09103cc7da5 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
-
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
-- bset_magic(&b->c->sb));
-+ bset_magic(&b->c->cache->sb));
-
- }
-
-@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
- * See the comment arount cache_set->fill_iter.
- */
- iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
-- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
-+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
- iter->used = 0;
-
- #ifdef CONFIG_BCACHE_DEBUG
-@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
- goto err;
-
- err = "bad magic";
-- if (i->magic != bset_magic(&b->c->sb))
-+ if (i->magic != bset_magic(&b->c->cache->sb))
- goto err;
-
- err = "bad checksum";
-@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
-
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
-- bset_magic(&b->c->sb));
-+ bset_magic(&b->c->cache->sb));
- out:
- mempool_free(iter, &b->c->fill_iter);
- return;
-@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
-
- do_btree_node_write(b);
-
-- atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
-+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-
- b->written += set_blocks(i, block_bytes(b->c->cache));
-@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
- if (c->verify_data)
- list_move(&c->verify_data->list, &c->btree_cache);
-
-- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
-+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
- #endif
-
- list_splice(&c->btree_cache_freeable,
-@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
- mutex_init(&c->verify_lock);
-
- c->verify_ondisk = (void *)
-- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
-+ __get_free_pages(GFP_KERNEL|__GFP_COMP,
-+ ilog2(meta_bucket_pages(&c->cache->sb)));
- if (!c->verify_ondisk) {
- /*
- * Don't worry about the mca_rereserve buckets
-@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
- }
-
- b->parent = parent;
-- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
-+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
-
- mutex_unlock(&c->bucket_lock);
-
-diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
-index 257969980c49..50482107134f 100644
---- a/drivers/md/bcache/btree.h
-+++ b/drivers/md/bcache/btree.h
-@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
-
- static inline void set_gc_sectors(struct cache_set *c)
- {
-- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
-+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
- }
-
- void bkey_put(struct cache_set *c, struct bkey *k);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index 9162af5bb6ec..f4658a1f37b8 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
- bucket >= ca->sb.nbuckets)
- return true;
-@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-- if (KEY_SIZE(k) + r > c->sb.bucket_size)
-+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
- return "bad, length too big";
- if (bucket < ca->sb.first_bucket)
- return "bad, short offset";
-@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
- size_t n = PTR_BUCKET_NR(b->c, k, j);
-
- pr_cont(" bucket %zu", n);
-- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-+ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
- pr_cont(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
- }
-diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
-index 4442df48d28c..6469223f0b77 100644
---- a/drivers/md/bcache/features.c
-+++ b/drivers/md/bcache/features.c
-@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
- for (f = &feature_list[0]; f->compat != 0; f++) { \
- if (f->compat != BCH_FEATURE_ ## type) \
- continue; \
-- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
-+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
- if (first) { \
- out += snprintf(out, buf + size - out, \
- "["); \
-@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
- \
- out += snprintf(out, buf + size - out, "%s", f->string);\
- \
-- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
-+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
- out += snprintf(out, buf + size - out, "]"); \
- \
- first = false; \
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index a14a445618b4..dad71a6b7889 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
- struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
- struct bio *bio = &b->bio;
-
-- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
-+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
-
- return bio;
- }
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index e2810668ede3..c5526e5087ef 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
-
- bkey_init(k);
- SET_KEY_PTRS(k, 1);
-- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
-
- out:
- if (!journal_full(&c->journal))
-@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
-- c->sb.block_size;
-+ ca->sb.block_size;
-
- struct bio *bio;
- struct bio_list list;
-@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-- w->data->magic = jset_magic(&c->sb);
-+ w->data->magic = jset_magic(&ca->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
- w->data->csum = csum_set(w->data);
-@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- size_t sectors;
- struct closure cl;
- bool wait = false;
-+ struct cache *ca = c->cache;
-
- closure_init_stack(&cl);
-
-@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- struct journal_write *w = c->journal.cur;
-
- sectors = __set_blocks(w->data, w->data->keys + nkeys,
-- block_bytes(c->cache)) * c->sb.block_size;
-+ block_bytes(ca)) * ca->sb.block_size;
-
- if (sectors <= min_t(size_t,
-- c->journal.blocks_free * c->sb.block_size,
-+ c->journal.blocks_free * ca->sb.block_size,
- PAGE_SECTORS << JSET_BITS))
- return w;
-
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 02408fdbf5bb..37e9cf8dbfc1 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
- goto skip;
- }
-
-- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
-- bio_sectors(bio) & (c->sb.block_size - 1)) {
-+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
-+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
- pr_debug("skipping unaligned io\n");
- goto skip;
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 6b94b396f9e9..e03676e1b370 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -471,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
- {
- BKEY_PADDED(key) k;
- struct closure cl;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- unsigned int size;
-
- closure_init_stack(&cl);
-@@ -480,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
-- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
-+ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
- SET_KEY_SIZE(&k.key, size);
- uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
- closure_sync(&cl);
-
- /* Only one bucket used for uuid write */
-- ca = PTR_CACHE(c, &k.key, 0);
- atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
-
- bkey_copy(&c->uuid_bucket, &k.key);
-@@ -1199,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- return -EINVAL;
- }
-
-- if (dc->sb.block_size < c->sb.block_size) {
-+ if (dc->sb.block_size < c->cache->sb.block_size) {
- /* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size\n",
- dc->backing_dev_name);
-@@ -1666,7 +1665,7 @@ static void cache_set_free(struct closure *cl)
- }
-
- bch_bset_sort_state_free(&c->sort);
-- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
-
- if (c->moving_gc_wq)
- destroy_workqueue(c->moving_gc_wq);
-@@ -1857,10 +1856,10 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- c->bucket_bits = ilog2(sb->bucket_size);
- c->block_bits = ilog2(sb->block_size);
-- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
-+ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
- c->devices_max_used = 0;
- atomic_set(&c->attached_dev_nr, 0);
-- c->btree_pages = meta_bucket_pages(&c->sb);
-+ c->btree_pages = meta_bucket_pages(sb);
- if (c->btree_pages > BTREE_MAX_PAGES)
- c->btree_pages = max_t(int, c->btree_pages / 4,
- BTREE_MAX_PAGES);
-@@ -1898,7 +1897,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
- sizeof(struct bbio) +
-- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
-+ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
- goto err;
-
- if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
-@@ -1908,7 +1907,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
- goto err;
-
-- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
-+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
- if (!c->uuids)
- goto err;
-
-@@ -2088,7 +2087,7 @@ static int run_cache_set(struct cache_set *c)
- goto err;
-
- closure_sync(&cl);
-- c->sb.last_mount = (u32)ktime_get_real_seconds();
-+ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
- bcache_write_super(c);
-
- list_for_each_entry_safe(dc, t, &uncached_devices, list)
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 4f4ad6b3d43a..3c74996978da 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
- * This is the size of the cache, minus the amount used for
- * flash-only devices
- */
-- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
-+ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
- atomic_long_read(&c->flash_dev_dirty_sectors);
-
- /*
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch
deleted file mode 100644
index f2597c4..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v1/v1-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch
+++ /dev/null
@@ -1,261 +0,0 @@
-From df40e8c0d6f09a9d6216e7ba3bb100b4d8ac8634 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 00:49:17 +0800
-Subject: [PATCH v1 14/14] bcache: move struct cache_sb out of uapi bcache.h
-
-struct cache_sb does not exactly map to cache_sb_disk, it is only for
-in-memory super block and dosn't belong to uapi bcache.h.
-
-This patch moves the struct cache_sb definition and other depending
-macros and inline routines from include/uapi/linux/bcache.h to
-drivers/md/bcache/bcache.h, this is the proper location to have them.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 99 +++++++++++++++++++++++++++++++++++++
- include/uapi/linux/bcache.h | 98 ------------------------------------
- 2 files changed, 99 insertions(+), 98 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 1d57f48307e6..b755bf7832ac 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -279,6 +279,82 @@ struct bcache_device {
- unsigned int cmd, unsigned long arg);
- };
-
-+/*
-+ * This is for in-memory bcache super block.
-+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
-+ * size, ordering and even whole struct size may be different
-+ * from cache_sb_disk.
-+ */
-+struct cache_sb {
-+ __u64 offset; /* sector where this sb was written */
-+ __u64 version;
-+
-+ __u8 magic[16];
-+
-+ __u8 uuid[16];
-+ union {
-+ __u8 set_uuid[16];
-+ __u64 set_magic;
-+ };
-+ __u8 label[SB_LABEL_SIZE];
-+
-+ __u64 flags;
-+ __u64 seq;
-+
-+ __u64 feature_compat;
-+ __u64 feature_incompat;
-+ __u64 feature_ro_compat;
-+
-+ union {
-+ struct {
-+ /* Cache devices */
-+ __u64 nbuckets; /* device size */
-+
-+ __u16 block_size; /* sectors */
-+ __u16 nr_in_set;
-+ __u16 nr_this_dev;
-+ __u32 bucket_size; /* sectors */
-+ };
-+ struct {
-+ /* Backing devices */
-+ __u64 data_offset;
-+
-+ /*
-+ * block_size from the cache device section is still used by
-+ * backing devices, so don't add anything here until we fix
-+ * things to not need it for backing devices anymore
-+ */
-+ };
-+ };
-+
-+ __u32 last_mount; /* time overflow in y2106 */
-+
-+ __u16 first_bucket;
-+ union {
-+ __u16 njournal_buckets;
-+ __u16 keys;
-+ };
-+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-+};
-+
-+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-+#define CACHE_REPLACEMENT_LRU 0U
-+#define CACHE_REPLACEMENT_FIFO 1U
-+#define CACHE_REPLACEMENT_RANDOM 2U
-+
-+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-+#define CACHE_MODE_WRITETHROUGH 0U
-+#define CACHE_MODE_WRITEBACK 1U
-+#define CACHE_MODE_WRITEAROUND 2U
-+#define CACHE_MODE_NONE 3U
-+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-+#define BDEV_STATE_NONE 0U
-+#define BDEV_STATE_CLEAN 1U
-+#define BDEV_STATE_DIRTY 2U
-+#define BDEV_STATE_STALE 3U
-+
- struct io {
- /* Used to track sequential IO so it can be skipped */
- struct hlist_node hash;
-@@ -840,6 +916,13 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
- }
-
-+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
-+{
-+ return sb->version == BCACHE_SB_VERSION_BDEV
-+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
-+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
-+}
-+
- /* Btree key macros */
-
- /*
-@@ -958,6 +1041,22 @@ static inline void wait_for_kthread_stop(void)
- }
- }
-
-+/* generate magic number */
-+static inline __u64 jset_magic(struct cache_sb *sb)
-+{
-+ return sb->set_magic ^ JSET_MAGIC;
-+}
-+
-+static inline __u64 pset_magic(struct cache_sb *sb)
-+{
-+ return sb->set_magic ^ PSET_MAGIC;
-+}
-+
-+static inline __u64 bset_magic(struct cache_sb *sb)
-+{
-+ return sb->set_magic ^ BSET_MAGIC;
-+}
-+
- /* Forward declarations */
-
- void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
-diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
-index 52e8bcb33981..18166a3d8503 100644
---- a/include/uapi/linux/bcache.h
-+++ b/include/uapi/linux/bcache.h
-@@ -216,89 +216,6 @@ struct cache_sb_disk {
- __le16 bucket_size_hi;
- };
-
--/*
-- * This is for in-memory bcache super block.
-- * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
-- * size, ordering and even whole struct size may be different
-- * from cache_sb_disk.
-- */
--struct cache_sb {
-- __u64 offset; /* sector where this sb was written */
-- __u64 version;
--
-- __u8 magic[16];
--
-- __u8 uuid[16];
-- union {
-- __u8 set_uuid[16];
-- __u64 set_magic;
-- };
-- __u8 label[SB_LABEL_SIZE];
--
-- __u64 flags;
-- __u64 seq;
--
-- __u64 feature_compat;
-- __u64 feature_incompat;
-- __u64 feature_ro_compat;
--
-- union {
-- struct {
-- /* Cache devices */
-- __u64 nbuckets; /* device size */
--
-- __u16 block_size; /* sectors */
-- __u16 nr_in_set;
-- __u16 nr_this_dev;
-- __u32 bucket_size; /* sectors */
-- };
-- struct {
-- /* Backing devices */
-- __u64 data_offset;
--
-- /*
-- * block_size from the cache device section is still used by
-- * backing devices, so don't add anything here until we fix
-- * things to not need it for backing devices anymore
-- */
-- };
-- };
--
-- __u32 last_mount; /* time overflow in y2106 */
--
-- __u16 first_bucket;
-- union {
-- __u16 njournal_buckets;
-- __u16 keys;
-- };
-- __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
--};
--
--static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
--{
-- return sb->version == BCACHE_SB_VERSION_BDEV
-- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
-- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
--}
--
--BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
--BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
--BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
--#define CACHE_REPLACEMENT_LRU 0U
--#define CACHE_REPLACEMENT_FIFO 1U
--#define CACHE_REPLACEMENT_RANDOM 2U
--
--BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
--#define CACHE_MODE_WRITETHROUGH 0U
--#define CACHE_MODE_WRITEBACK 1U
--#define CACHE_MODE_WRITEAROUND 2U
--#define CACHE_MODE_NONE 3U
--BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
--#define BDEV_STATE_NONE 0U
--#define BDEV_STATE_CLEAN 1U
--#define BDEV_STATE_DIRTY 2U
--#define BDEV_STATE_STALE 3U
--
- /*
- * Magic numbers
- *
-@@ -310,21 +227,6 @@ BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
- #define PSET_MAGIC 0x6750e15f87337f91ULL
- #define BSET_MAGIC 0x90135c78b99e07f5ULL
-
--static inline __u64 jset_magic(struct cache_sb *sb)
--{
-- return sb->set_magic ^ JSET_MAGIC;
--}
--
--static inline __u64 pset_magic(struct cache_sb *sb)
--{
-- return sb->set_magic ^ PSET_MAGIC;
--}
--
--static inline __u64 bset_magic(struct cache_sb *sb)
--{
-- return sb->set_magic ^ BSET_MAGIC;
--}
--
- /*
- * Journal
- *
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch
deleted file mode 100644
index a013fd3..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From 6400744caa2bdbde213c6b336196ec074f715502 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 22 Aug 2020 19:35:42 +0800
-Subject: [PATCH v2 00/12] bcache: remove multiple caches code framework
-
-The multiple caches code framework in bcache is to store multiple
-copies of the cached data among multiple caches of the cache set.
-Current code framework just does simple data write to each cache without
-any extra condition handling (e.g. device failure, slow devices). This
-code framework is not and will never be completed. Considering people
-may use md raid1 for same similar data duplication purpose, the multiple
-caches framework is useless dead code indeed.
-
-Due to the multiple caches code framework, bcache has two data structure
-struct cache and struct cache_set to manage the cache device. Indeed
-since bcache was merged into mainline kernel in Linux v3.10, a cache set
-only has one cache, the unnecessary two level abstraction makes extra
-effort to maintain redundant information between struct cache and struct
-cache set, for examaple the in-memmory super block struct cache_sb.
-
-This is the first wave effort to remove multiple caches framework and
-make the code and data structure relation to be more clear. This series
-explicitly make each cache set only have single cache, and remove the
-embedded partial super block in struct cache_set and directly reference
-cache's in-memory super block.
-
-The patch set now passes basic smoking testing, I post this series early
-for your review and comments. More fixes after testing will follow up
-soon.
-
-Thanks in advance.
-
-Coly Li
----
-Changelog:
-v2: Add reviewed-by from Hannes Reinecke, update patches by suggestion
- from Hannes and Christoph.
-v1: initial version.
-
-Coly Li (12):
- bcache: remove 'int n' from parameter list of bch_bucket_alloc_set()
- bcache: explicitly make cache_set only have single cache
- bcache: remove for_each_cache()
- bcache: add set_uuid in struct cache_set
- bcache: only use block_bytes() on struct cache
- bcache: remove useless alloc_bucket_pages()
- bcache: remove useless bucket_pages()
- bcache: only use bucket_bytes() on struct cache
- bcache: don't check seq numbers in register_cache_set()
- bcache: remove can_attach_cache()
- bcache: check and set sync status on cache's in-memory super block
- bcache: remove embedded struct cache_sb from struct cache_set
-
- drivers/md/bcache/alloc.c | 60 ++++-----
- drivers/md/bcache/bcache.h | 29 ++--
- drivers/md/bcache/btree.c | 144 ++++++++++----------
- drivers/md/bcache/btree.h | 2 +-
- drivers/md/bcache/debug.c | 10 +-
- drivers/md/bcache/extents.c | 6 +-
- drivers/md/bcache/features.c | 4 +-
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/journal.c | 246 ++++++++++++++++------------------
- drivers/md/bcache/movinggc.c | 58 ++++----
- drivers/md/bcache/request.c | 6 +-
- drivers/md/bcache/super.c | 232 ++++++++++++--------------------
- drivers/md/bcache/sysfs.c | 10 +-
- drivers/md/bcache/writeback.c | 2 +-
- include/trace/events/bcache.h | 4 +-
- 15 files changed, 352 insertions(+), 463 deletions(-)
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
deleted file mode 100644
index b72045a..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
+++ /dev/null
@@ -1,152 +0,0 @@
-From ede5b363b28a12b2481232795aee44bf5344b8ec Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 00:07:05 +0800
-Subject: [PATCH v2 01/12] bcache: remove 'int n' from parameter list of
- bch_bucket_alloc_set()
-
-The parameter 'int n' from bch_bucket_alloc_set() is not cleared
-defined. From the code comments n is the number of buckets to alloc, but
-from the code itself 'n' is the maximum cache to iterate. Indeed all the
-locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.
-
-This patch removes the confused and unnecessary 'int n' from parameter
-list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
-for its caller.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
- drivers/md/bcache/bcache.h | 4 ++--
- drivers/md/bcache/btree.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- 4 files changed, 19 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 52035a78d836..4493ff57476d 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -49,7 +49,7 @@
- *
- * bch_bucket_alloc() allocates a single bucket from a specific cache.
- *
-- * bch_bucket_alloc_set() allocates one or more buckets from different caches
-+ * bch_bucket_alloc_set() allocates one bucket from different caches
- * out of a cache set.
- *
- * free_some_buckets() drives all the processes described above. It's called
-@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
- }
-
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
-- int i;
-+ struct cache *ca;
-+ long b;
-
- /* No allocation if CACHE_SET_IO_DISABLE bit is set */
- if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
- return -1;
-
- lockdep_assert_held(&c->bucket_lock);
-- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
-
- bkey_init(k);
-
-- /* sort by free space/prio of oldest data in caches */
--
-- for (i = 0; i < n; i++) {
-- struct cache *ca = c->cache_by_alloc[i];
-- long b = bch_bucket_alloc(ca, reserve, wait);
-+ ca = c->cache_by_alloc[0];
-+ b = bch_bucket_alloc(ca, reserve, wait);
-+ if (b == -1)
-+ goto err;
-
-- if (b == -1)
-- goto err;
-+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
-+ bucket_to_sector(c, b),
-+ ca->sb.nr_this_dev);
-
-- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
-- bucket_to_sector(c, b),
-- ca->sb.nr_this_dev);
--
-- SET_KEY_PTRS(k, i + 1);
-- }
-+ SET_KEY_PTRS(k, 1);
-
- return 0;
- err:
-@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- }
-
- int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
- int ret;
-
- mutex_lock(&c->bucket_lock);
-- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
-+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
-@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
-
- spin_unlock(&c->data_bucket_lock);
-
-- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
-+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
- return false;
-
- spin_lock(&c->data_bucket_lock);
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 4fd03d2496d8..5ff6e9573935 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
-
- long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait);
-+ struct bkey *k, bool wait);
- int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait);
-+ struct bkey *k, bool wait);
- bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
- unsigned int sectors, unsigned int write_point,
- unsigned int write_prio, bool wait);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 3d8bd0692af3..e2a719fed53b 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
-
- mutex_lock(&c->bucket_lock);
- retry:
-- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
-+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
- goto err;
-
- bkey_put(c, &k.key);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 1bbdc410ee3c..7057ec48f3d1 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c)
- closure_init_stack(&cl);
- lockdep_assert_held(&bch_register_lock);
-
-- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
-+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
deleted file mode 100644
index bc863af..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From 4463f2bf6406f234972f6cb21413d685d2924dea Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 00:30:59 +0800
-Subject: [PATCH v2 02/12] bcache: explicitly make cache_set only have single
- cache
-
-Currently although the bcache code has a framework for multiple caches
-in a cache set, but indeed the multiple caches never completed and users
-use md raid1 for multiple copies of the cached data.
-
-This patch does the following change in struct cache_set, to explicitly
-make a cache_set only have single cache,
-- Change pointer array "*cache[MAX_CACHES_PER_SET]" to a single pointer
- "*cache".
-- Remove pointer array "*cache_by_alloc[MAX_CACHES_PER_SET]".
-- Remove "caches_loaded".
-
-Now the code looks as exactly what it does in practic: only one cache is
-used in the cache set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/bcache.h | 8 +++-----
- drivers/md/bcache/super.c | 19 ++++++++-----------
- 3 files changed, 12 insertions(+), 17 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 4493ff57476d..3385f6add6df 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -501,7 +501,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-
- bkey_init(k);
-
-- ca = c->cache_by_alloc[0];
-+ ca = c->cache;
- b = bch_bucket_alloc(ca, reserve, wait);
- if (b == -1)
- goto err;
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 5ff6e9573935..aa112c1adba1 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -519,9 +519,7 @@ struct cache_set {
-
- struct cache_sb sb;
-
-- struct cache *cache[MAX_CACHES_PER_SET];
-- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
-- int caches_loaded;
-+ struct cache *cache;
-
- struct bcache_device **devices;
- unsigned int devices_max_used;
-@@ -808,7 +806,7 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
- const struct bkey *k,
- unsigned int ptr)
- {
-- return c->cache[PTR_DEV(k, ptr)];
-+ return c->cache;
- }
-
- static inline size_t PTR_BUCKET_NR(struct cache_set *c,
-@@ -890,7 +888,7 @@ do { \
- /* Looping macros */
-
- #define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
-+ for (iter = 0; ca = cs->cache, iter < 1; iter++)
-
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 7057ec48f3d1..e9ccfa17beb8 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1675,7 +1675,7 @@ static void cache_set_free(struct closure *cl)
- for_each_cache(ca, c, i)
- if (ca) {
- ca->set = NULL;
-- c->cache[ca->sb.nr_this_dev] = NULL;
-+ c->cache = NULL;
- kobject_put(&ca->kobj);
- }
-
-@@ -2166,7 +2166,7 @@ static const char *register_cache_set(struct cache *ca)
-
- list_for_each_entry(c, &bch_cache_sets, list)
- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-- if (c->cache[ca->sb.nr_this_dev])
-+ if (c->cache)
- return "duplicate cache set member";
-
- if (!can_attach_cache(ca, c))
-@@ -2216,14 +2216,11 @@ static const char *register_cache_set(struct cache *ca)
-
- kobject_get(&ca->kobj);
- ca->set = c;
-- ca->set->cache[ca->sb.nr_this_dev] = ca;
-- c->cache_by_alloc[c->caches_loaded++] = ca;
-+ ca->set->cache = ca;
-
-- if (c->caches_loaded == c->sb.nr_in_set) {
-- err = "failed to run cache set";
-- if (run_cache_set(c) < 0)
-- goto err;
-- }
-+ err = "failed to run cache set";
-+ if (run_cache_set(c) < 0)
-+ goto err;
-
- return NULL;
- err:
-@@ -2240,8 +2237,8 @@ void bch_cache_release(struct kobject *kobj)
- unsigned int i;
-
- if (ca->set) {
-- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
-- ca->set->cache[ca->sb.nr_this_dev] = NULL;
-+ BUG_ON(ca->set->cache != ca);
-+ ca->set->cache = NULL;
- }
-
- free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch
deleted file mode 100644
index 6cb26ed..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch
+++ /dev/null
@@ -1,896 +0,0 @@
-From 938e528cd160cbab1581c53c593572a39fed519a Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 01:26:09 +0800
-Subject: [PATCH v2 03/12] bcache: remove for_each_cache()
-
-Since now each cache_set explicitly has single cache, for_each_cache()
-is unnecessary. This patch removes this macro, and update all locations
-where it is used, and makes sure all code logic still being consistent.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 17 ++-
- drivers/md/bcache/bcache.h | 9 +-
- drivers/md/bcache/btree.c | 103 +++++++---------
- drivers/md/bcache/journal.c | 229 ++++++++++++++++-------------------
- drivers/md/bcache/movinggc.c | 58 +++++----
- drivers/md/bcache/super.c | 115 ++++++++----------
- 6 files changed, 237 insertions(+), 294 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 3385f6add6df..1b8310992dd0 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- struct cache *ca;
- struct bucket *b;
- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
-- unsigned int i;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
-
- c->min_prio = USHRT_MAX;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca)
-- if (b->prio &&
-- b->prio != BTREE_PRIO &&
-- !atomic_read(&b->pin)) {
-- b->prio--;
-- c->min_prio = min(c->min_prio, b->prio);
-- }
-+ ca = c->cache;
-+ for_each_bucket(b, ca)
-+ if (b->prio &&
-+ b->prio != BTREE_PRIO &&
-+ !atomic_read(&b->pin)) {
-+ b->prio--;
-+ c->min_prio = min(c->min_prio, b->prio);
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index aa112c1adba1..7ffe6b2d179b 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -887,9 +887,6 @@ do { \
-
- /* Looping macros */
-
--#define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache, iter < 1; iter++)
--
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
- b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-@@ -931,11 +928,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
-
- static inline void wake_up_allocators(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = c->cache;
-
-- for_each_cache(ca, c, i)
-- wake_up_process(ca->alloc_thread);
-+ wake_up_process(ca->alloc_thread);
- }
-
- static inline void closure_bio_submit(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index e2a719fed53b..0817ad510d9f 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1167,19 +1167,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- static int btree_check_reserve(struct btree *b, struct btree_op *op)
- {
- struct cache_set *c = b->c;
-- struct cache *ca;
-- unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
-+ struct cache *ca = c->cache;
-+ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i)
-- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-- if (op)
-- prepare_to_wait(&c->btree_cache_wait, &op->wait,
-- TASK_UNINTERRUPTIBLE);
-- mutex_unlock(&c->bucket_lock);
-- return -EINTR;
-- }
-+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-+ if (op)
-+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
-+ TASK_UNINTERRUPTIBLE);
-+ mutex_unlock(&c->bucket_lock);
-+ return -EINTR;
-+ }
-
- mutex_unlock(&c->bucket_lock);
-
-@@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
- {
- struct cache *ca;
- struct bucket *b;
-- unsigned int i;
-
- if (!c->gc_mark_valid)
- return;
-@@ -1705,14 +1703,14 @@ static void btree_gc_start(struct cache_set *c)
- c->gc_mark_valid = 0;
- c->gc_done = ZERO_KEY;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca) {
-- b->last_gc = b->gen;
-- if (!atomic_read(&b->pin)) {
-- SET_GC_MARK(b, 0);
-- SET_GC_SECTORS_USED(b, 0);
-- }
-+ ca = c->cache;
-+ for_each_bucket(b, ca) {
-+ b->last_gc = b->gen;
-+ if (!atomic_read(&b->pin)) {
-+ SET_GC_MARK(b, 0);
-+ SET_GC_SECTORS_USED(b, 0);
- }
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-@@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
- {
- struct bucket *b;
- struct cache *ca;
-- unsigned int i;
-+ unsigned int i, j;
-+ uint64_t *k;
-
- mutex_lock(&c->bucket_lock);
-
-@@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
- struct bcache_device *d = c->devices[i];
- struct cached_dev *dc;
- struct keybuf_key *w, *n;
-- unsigned int j;
-
- if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
- continue;
-@@ -1756,29 +1754,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
- rcu_read_unlock();
-
- c->avail_nbuckets = 0;
-- for_each_cache(ca, c, i) {
-- uint64_t *i;
-
-- ca->invalidate_needs_gc = 0;
-+ ca = c->cache;
-+ ca->invalidate_needs_gc = 0;
-
-- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
-+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
-
-- for (i = ca->prio_buckets;
-- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (k = ca->prio_buckets;
-+ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
-+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
-
-- for_each_bucket(b, ca) {
-- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-+ for_each_bucket(b, ca) {
-+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-
-- if (atomic_read(&b->pin))
-- continue;
-+ if (atomic_read(&b->pin))
-+ continue;
-
-- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-
-- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-- c->avail_nbuckets++;
-- }
-+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-+ c->avail_nbuckets++;
- }
-
- mutex_unlock(&c->bucket_lock);
-@@ -1830,12 +1826,10 @@ static void bch_btree_gc(struct cache_set *c)
-
- static bool gc_should_run(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = c->cache;
-
-- for_each_cache(ca, c, i)
-- if (ca->invalidate_needs_gc)
-- return true;
-+ if (ca->invalidate_needs_gc)
-+ return true;
-
- if (atomic_read(&c->sectors_to_gc) < 0)
- return true;
-@@ -2081,9 +2075,8 @@ int bch_btree_check(struct cache_set *c)
-
- void bch_initial_gc_finish(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct bucket *b;
-- unsigned int i;
-
- bch_btree_gc_finish(c);
-
-@@ -2098,20 +2091,18 @@ void bch_initial_gc_finish(struct cache_set *c)
- * This is only safe for buckets that have no live data in them, which
- * there should always be some of.
- */
-- for_each_cache(ca, c, i) {
-- for_each_bucket(b, ca) {
-- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-- fifo_full(&ca->free[RESERVE_BTREE]))
-- break;
-+ for_each_bucket(b, ca) {
-+ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-+ fifo_full(&ca->free[RESERVE_BTREE]))
-+ break;
-
-- if (bch_can_invalidate_bucket(ca, b) &&
-- !GC_MARK(b)) {
-- __bch_invalidate_one_bucket(ca, b);
-- if (!fifo_push(&ca->free[RESERVE_PRIO],
-- b - ca->buckets))
-- fifo_push(&ca->free[RESERVE_BTREE],
-- b - ca->buckets);
-- }
-+ if (bch_can_invalidate_bucket(ca, b) &&
-+ !GC_MARK(b)) {
-+ __bch_invalidate_one_bucket(ca, b);
-+ if (!fifo_push(&ca->free[RESERVE_PRIO],
-+ b - ca->buckets))
-+ fifo_push(&ca->free[RESERVE_BTREE],
-+ b - ca->buckets);
- }
- }
-
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 77fbfd52edcf..027d0f8c4daf 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
- ret; \
- })
-
-- struct cache *ca;
-- unsigned int iter;
-+ struct cache *ca = c->cache;
- int ret = 0;
-+ struct journal_device *ja = &ca->journal;
-+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-+ unsigned int i, l, r, m;
-+ uint64_t seq;
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-- unsigned int i, l, r, m;
-- uint64_t seq;
--
-- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-- pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
-+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
-
-+ /*
-+ * Read journal buckets ordered by golden ratio hash to quickly
-+ * find a sequence of buckets with valid journal entries
-+ */
-+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
- /*
-- * Read journal buckets ordered by golden ratio hash to quickly
-- * find a sequence of buckets with valid journal entries
-+ * We must try the index l with ZERO first for
-+ * correctness due to the scenario that the journal
-+ * bucket is circular buffer which might have wrapped
- */
-- for (i = 0; i < ca->sb.njournal_buckets; i++) {
-- /*
-- * We must try the index l with ZERO first for
-- * correctness due to the scenario that the journal
-- * bucket is circular buffer which might have wrapped
-- */
-- l = (i * 2654435769U) % ca->sb.njournal_buckets;
-+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
-
-- if (test_bit(l, bitmap))
-- break;
-+ if (test_bit(l, bitmap))
-+ break;
-
-- if (read_bucket(l))
-- goto bsearch;
-- }
-+ if (read_bucket(l))
-+ goto bsearch;
-+ }
-
-- /*
-- * If that fails, check all the buckets we haven't checked
-- * already
-- */
-- pr_debug("falling back to linear search\n");
-+ /*
-+ * If that fails, check all the buckets we haven't checked
-+ * already
-+ */
-+ pr_debug("falling back to linear search\n");
-
-- for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
-- if (read_bucket(l))
-- goto bsearch;
-+ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
-+ if (read_bucket(l))
-+ goto bsearch;
-
-- /* no journal entries on this device? */
-- if (l == ca->sb.njournal_buckets)
-- continue;
-+ /* no journal entries on this device? */
-+ if (l == ca->sb.njournal_buckets)
-+ goto out;
- bsearch:
-- BUG_ON(list_empty(list));
-+ BUG_ON(list_empty(list));
-
-- /* Binary search */
-- m = l;
-- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-- pr_debug("starting binary search, l %u r %u\n", l, r);
-+ /* Binary search */
-+ m = l;
-+ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-+ pr_debug("starting binary search, l %u r %u\n", l, r);
-
-- while (l + 1 < r) {
-- seq = list_entry(list->prev, struct journal_replay,
-- list)->j.seq;
-+ while (l + 1 < r) {
-+ seq = list_entry(list->prev, struct journal_replay,
-+ list)->j.seq;
-
-- m = (l + r) >> 1;
-- read_bucket(m);
-+ m = (l + r) >> 1;
-+ read_bucket(m);
-
-- if (seq != list_entry(list->prev, struct journal_replay,
-- list)->j.seq)
-- l = m;
-- else
-- r = m;
-- }
-+ if (seq != list_entry(list->prev, struct journal_replay,
-+ list)->j.seq)
-+ l = m;
-+ else
-+ r = m;
-+ }
-
-- /*
-- * Read buckets in reverse order until we stop finding more
-- * journal entries
-- */
-- pr_debug("finishing up: m %u njournal_buckets %u\n",
-- m, ca->sb.njournal_buckets);
-- l = m;
-+ /*
-+ * Read buckets in reverse order until we stop finding more
-+ * journal entries
-+ */
-+ pr_debug("finishing up: m %u njournal_buckets %u\n",
-+ m, ca->sb.njournal_buckets);
-+ l = m;
-
-- while (1) {
-- if (!l--)
-- l = ca->sb.njournal_buckets - 1;
-+ while (1) {
-+ if (!l--)
-+ l = ca->sb.njournal_buckets - 1;
-
-- if (l == m)
-- break;
-+ if (l == m)
-+ break;
-
-- if (test_bit(l, bitmap))
-- continue;
-+ if (test_bit(l, bitmap))
-+ continue;
-
-- if (!read_bucket(l))
-- break;
-- }
-+ if (!read_bucket(l))
-+ break;
-+ }
-
-- seq = 0;
-+ seq = 0;
-
-- for (i = 0; i < ca->sb.njournal_buckets; i++)
-- if (ja->seq[i] > seq) {
-- seq = ja->seq[i];
-- /*
-- * When journal_reclaim() goes to allocate for
-- * the first time, it'll use the bucket after
-- * ja->cur_idx
-- */
-- ja->cur_idx = i;
-- ja->last_idx = ja->discard_idx = (i + 1) %
-- ca->sb.njournal_buckets;
-+ for (i = 0; i < ca->sb.njournal_buckets; i++)
-+ if (ja->seq[i] > seq) {
-+ seq = ja->seq[i];
-+ /*
-+ * When journal_reclaim() goes to allocate for
-+ * the first time, it'll use the bucket after
-+ * ja->cur_idx
-+ */
-+ ja->cur_idx = i;
-+ ja->last_idx = ja->discard_idx = (i + 1) %
-+ ca->sb.njournal_buckets;
-
-- }
-- }
-+ }
-
-+out:
- if (!list_empty(list))
- c->journal.seq = list_entry(list->prev,
- struct journal_replay,
-@@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
-
- static bool is_discard_enabled(struct cache_set *s)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = s->cache;
-
-- for_each_cache(ca, s, i)
-- if (ca->discard)
-- return true;
-+ if (ca->discard)
-+ return true;
-
- return false;
- }
-@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
- static void journal_reclaim(struct cache_set *c)
- {
- struct bkey *k = &c->journal.key;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- uint64_t last_seq;
-- unsigned int iter, n = 0;
-+ unsigned int next;
-+ struct journal_device *ja = &ca->journal;
- atomic_t p __maybe_unused;
-
- atomic_long_inc(&c->reclaim);
-@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
-
- /* Update last_idx */
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
--
-- while (ja->last_idx != ja->cur_idx &&
-- ja->seq[ja->last_idx] < last_seq)
-- ja->last_idx = (ja->last_idx + 1) %
-- ca->sb.njournal_buckets;
-- }
-+ while (ja->last_idx != ja->cur_idx &&
-+ ja->seq[ja->last_idx] < last_seq)
-+ ja->last_idx = (ja->last_idx + 1) %
-+ ca->sb.njournal_buckets;
-
-- for_each_cache(ca, c, iter)
-- do_journal_discard(ca);
-+ do_journal_discard(ca);
-
- if (c->journal.blocks_free)
- goto out;
-
-- /*
-- * Allocate:
-- * XXX: Sort by free journal space
-- */
--
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-+ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-+ /* No space available on this device */
-+ if (next == ja->discard_idx)
-+ goto out;
-
-- /* No space available on this device */
-- if (next == ja->discard_idx)
-- continue;
-+ ja->cur_idx = next;
-+ k->ptr[0] = MAKE_PTR(0,
-+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-+ ca->sb.nr_this_dev);
-+ atomic_long_inc(&c->reclaimed_journal_buckets);
-
-- ja->cur_idx = next;
-- k->ptr[n++] = MAKE_PTR(0,
-- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-- ca->sb.nr_this_dev);
-- atomic_long_inc(&c->reclaimed_journal_buckets);
-- }
-+ bkey_init(k);
-+ SET_KEY_PTRS(k, 1);
-+ c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
-- if (n) {
-- bkey_init(k);
-- SET_KEY_PTRS(k, n);
-- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-- }
- out:
- if (!journal_full(&c->journal))
- __closure_wake_up(&c->journal.wait);
-@@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
- __releases(c->journal.lock)
- {
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
-@@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->btree_root, &c->root->key);
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
-- for_each_cache(ca, c, i)
-- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
--
-+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(&c->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 5872d6470470..b9c3d27ec093 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
-
- void bch_moving_gc(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct bucket *b;
-- unsigned int i;
-+ unsigned long sectors_to_move, reserve_sectors;
-
- if (!c->copy_gc_enabled)
- return;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i) {
-- unsigned long sectors_to_move = 0;
-- unsigned long reserve_sectors = ca->sb.bucket_size *
-+ sectors_to_move = 0;
-+ reserve_sectors = ca->sb.bucket_size *
- fifo_used(&ca->free[RESERVE_MOVINGGC]);
-
-- ca->heap.used = 0;
--
-- for_each_bucket(b, ca) {
-- if (GC_MARK(b) == GC_MARK_METADATA ||
-- !GC_SECTORS_USED(b) ||
-- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-- atomic_read(&b->pin))
-- continue;
--
-- if (!heap_full(&ca->heap)) {
-- sectors_to_move += GC_SECTORS_USED(b);
-- heap_add(&ca->heap, b, bucket_cmp);
-- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-- sectors_to_move -= bucket_heap_top(ca);
-- sectors_to_move += GC_SECTORS_USED(b);
--
-- ca->heap.data[0] = b;
-- heap_sift(&ca->heap, 0, bucket_cmp);
-- }
-- }
-+ ca->heap.used = 0;
-+
-+ for_each_bucket(b, ca) {
-+ if (GC_MARK(b) == GC_MARK_METADATA ||
-+ !GC_SECTORS_USED(b) ||
-+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-+ atomic_read(&b->pin))
-+ continue;
-
-- while (sectors_to_move > reserve_sectors) {
-- heap_pop(&ca->heap, b, bucket_cmp);
-- sectors_to_move -= GC_SECTORS_USED(b);
-+ if (!heap_full(&ca->heap)) {
-+ sectors_to_move += GC_SECTORS_USED(b);
-+ heap_add(&ca->heap, b, bucket_cmp);
-+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-+ sectors_to_move -= bucket_heap_top(ca);
-+ sectors_to_move += GC_SECTORS_USED(b);
-+
-+ ca->heap.data[0] = b;
-+ heap_sift(&ca->heap, 0, bucket_cmp);
- }
-+ }
-
-- while (heap_pop(&ca->heap, b, bucket_cmp))
-- SET_GC_MOVE(b, 1);
-+ while (sectors_to_move > reserve_sectors) {
-+ heap_pop(&ca->heap, b, bucket_cmp);
-+ sectors_to_move -= GC_SECTORS_USED(b);
- }
-
-+ while (heap_pop(&ca->heap, b, bucket_cmp))
-+ SET_GC_MOVE(b, 1);
-+
- mutex_unlock(&c->bucket_lock);
-
- c->moving_gc_keys.last_scanned = ZERO_KEY;
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index e9ccfa17beb8..91883d5c4b62 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -343,8 +343,9 @@ static void bcache_write_super_unlock(struct closure *cl)
- void bcache_write_super(struct cache_set *c)
- {
- struct closure *cl = &c->sb_write;
-- struct cache *ca;
-- unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-+ struct cache *ca = c->cache;
-+ struct bio *bio = &ca->sb_bio;
-+ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-@@ -354,23 +355,19 @@ void bcache_write_super(struct cache_set *c)
- if (c->sb.version > version)
- version = c->sb.version;
-
-- for_each_cache(ca, c, i) {
-- struct bio *bio = &ca->sb_bio;
--
-- ca->sb.version = version;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
-+ ca->sb.version = version;
-+ ca->sb.seq = c->sb.seq;
-+ ca->sb.last_mount = c->sb.last_mount;
-
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-
-- bio_init(bio, ca->sb_bv, 1);
-- bio_set_dev(bio, ca->bdev);
-- bio->bi_end_io = write_super_endio;
-- bio->bi_private = ca;
-+ bio_init(bio, ca->sb_bv, 1);
-+ bio_set_dev(bio, ca->bdev);
-+ bio->bi_end_io = write_super_endio;
-+ bio->bi_private = ca;
-
-- closure_get(cl);
-- __write_super(&ca->sb, ca->sb_disk, bio);
-- }
-+ closure_get(cl);
-+ __write_super(&ca->sb, ca->sb_disk, bio);
-
- closure_return_with_destructor(cl, bcache_write_super_unlock);
- }
-@@ -772,26 +769,22 @@ static void bcache_device_unlink(struct bcache_device *d)
- lockdep_assert_held(&bch_register_lock);
-
- if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
-- unsigned int i;
-- struct cache *ca;
-+ struct cache *ca = d->c->cache;
-
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
-
-- for_each_cache(ca, d->c, i)
-- bd_unlink_disk_holder(ca->bdev, d->disk);
-+ bd_unlink_disk_holder(ca->bdev, d->disk);
- }
- }
-
- static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
- const char *name)
- {
-- unsigned int i;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- int ret;
-
-- for_each_cache(ca, d->c, i)
-- bd_link_disk_holder(ca->bdev, d->disk);
-+ bd_link_disk_holder(ca->bdev, d->disk);
-
- snprintf(d->name, BCACHEDEVNAME_SIZE,
- "%s%u", name, d->id);
-@@ -1663,7 +1656,6 @@ static void cache_set_free(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, cl);
- struct cache *ca;
-- unsigned int i;
-
- debugfs_remove(c->debug);
-
-@@ -1672,12 +1664,12 @@ static void cache_set_free(struct closure *cl)
- bch_journal_free(c);
-
- mutex_lock(&bch_register_lock);
-- for_each_cache(ca, c, i)
-- if (ca) {
-- ca->set = NULL;
-- c->cache = NULL;
-- kobject_put(&ca->kobj);
-- }
-+ ca = c->cache;
-+ if (ca) {
-+ ca->set = NULL;
-+ c->cache = NULL;
-+ kobject_put(&ca->kobj);
-+ }
-
- bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-@@ -1703,9 +1695,8 @@ static void cache_set_free(struct closure *cl)
- static void cache_set_flush(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, caching);
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct btree *b;
-- unsigned int i;
-
- bch_cache_accounting_destroy(&c->accounting);
-
-@@ -1730,9 +1721,8 @@ static void cache_set_flush(struct closure *cl)
- mutex_unlock(&b->write_lock);
- }
-
-- for_each_cache(ca, c, i)
-- if (ca->alloc_thread)
-- kthread_stop(ca->alloc_thread);
-+ if (ca->alloc_thread)
-+ kthread_stop(ca->alloc_thread);
-
- if (c->journal.cur) {
- cancel_delayed_work_sync(&c->journal.work);
-@@ -1973,16 +1963,14 @@ static int run_cache_set(struct cache_set *c)
- {
- const char *err = "cannot allocate memory";
- struct cached_dev *dc, *t;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct closure cl;
-- unsigned int i;
- LIST_HEAD(journal);
- struct journal_replay *l;
-
- closure_init_stack(&cl);
-
-- for_each_cache(ca, c, i)
-- c->nbuckets += ca->sb.nbuckets;
-+ c->nbuckets = ca->sb.nbuckets;
- set_gc_sectors(c);
-
- if (CACHE_SYNC(&c->sb)) {
-@@ -2002,10 +1990,8 @@ static int run_cache_set(struct cache_set *c)
- j = &list_entry(journal.prev, struct journal_replay, list)->j;
-
- err = "IO error reading priorities";
-- for_each_cache(ca, c, i) {
-- if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
-- goto err;
-- }
-+ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
-+ goto err;
-
- /*
- * If prio_read() fails it'll call cache_set_error and we'll
-@@ -2049,9 +2035,8 @@ static int run_cache_set(struct cache_set *c)
- bch_journal_next(&c->journal);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- /*
- * First place it's safe to allocate: btree_check() and
-@@ -2070,28 +2055,23 @@ static int run_cache_set(struct cache_set *c)
- if (bch_journal_replay(c, &journal))
- goto err;
- } else {
-- pr_notice("invalidating existing data\n");
--
-- for_each_cache(ca, c, i) {
-- unsigned int j;
-+ unsigned int j;
-
-- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-- 2, SB_JOURNAL_BUCKETS);
-+ pr_notice("invalidating existing data\n");
-+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-+ 2, SB_JOURNAL_BUCKETS);
-
-- for (j = 0; j < ca->sb.keys; j++)
-- ca->sb.d[j] = ca->sb.first_bucket + j;
-- }
-+ for (j = 0; j < ca->sb.keys; j++)
-+ ca->sb.d[j] = ca->sb.first_bucket + j;
-
- bch_initial_gc_finish(c);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- mutex_lock(&c->bucket_lock);
-- for_each_cache(ca, c, i)
-- bch_prio_write(ca, true);
-+ bch_prio_write(ca, true);
- mutex_unlock(&c->bucket_lock);
-
- err = "cannot allocate new UUID bucket";
-@@ -2467,13 +2447,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
- static bool bch_is_open_cache(struct block_device *bdev)
- {
- struct cache_set *c, *tc;
-- struct cache *ca;
-- unsigned int i;
-
-- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
-- for_each_cache(ca, c, i)
-- if (ca->bdev == bdev)
-- return true;
-+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-+ struct cache *ca = c->cache;
-+
-+ if (ca->bdev == bdev)
-+ return true;
-+ }
-+
- return false;
- }
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
deleted file mode 100644
index 41c998f..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
+++ /dev/null
@@ -1,173 +0,0 @@
-From edbe36debb518e5db2ef66cfd6852379d4758684 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 20:12:07 +0800
-Subject: [PATCH v2 04/12] bcache: add set_uuid in struct cache_set
-
-This patch adds a separated set_uuid[16] in struct cache_set, to store
-the uuid of the cache set. This is the preparation to remove the
-embedded struct cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 1 +
- drivers/md/bcache/debug.c | 2 +-
- drivers/md/bcache/super.c | 24 ++++++++++++------------
- include/trace/events/bcache.h | 4 ++--
- 4 files changed, 16 insertions(+), 15 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 7ffe6b2d179b..94a62acac4fc 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -668,6 +668,7 @@ struct cache_set {
- struct mutex verify_lock;
- #endif
-
-+ uint8_t set_uuid[16];
- unsigned int nr_uuids;
- struct uuid_entry *uuids;
- BKEY_PADDED(uuid_bucket);
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 336f43910383..0ccc1b0baa42 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -238,7 +238,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
- if (!IS_ERR_OR_NULL(bcache_debug)) {
- char name[50];
-
-- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
-+ snprintf(name, 50, "bcache-%pU", c->set_uuid);
- c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
- &cache_set_debug_ops);
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 91883d5c4b62..90a419ad6445 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1189,8 +1189,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- struct cached_dev *exist_dc, *t;
- int ret = 0;
-
-- if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
-- (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
-+ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
-+ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
- return -ENOENT;
-
- if (dc->disk.c) {
-@@ -1262,7 +1262,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- u->first_reg = u->last_reg = rtime;
- bch_uuid_write(c);
-
-- memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
-+ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
-
- bch_write_bdev_super(dc, &cl);
-@@ -1324,7 +1324,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- pr_info("Caching %s as %s on set %pU\n",
- dc->backing_dev_name,
- dc->disk.disk->disk_name,
-- dc->disk.c->sb.set_uuid);
-+ dc->disk.c->set_uuid);
- return 0;
- }
-
-@@ -1632,7 +1632,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
- vaf.va = &args;
-
- pr_err("error on %pU: %pV, disabling caching\n",
-- c->sb.set_uuid, &vaf);
-+ c->set_uuid, &vaf);
-
- va_end(args);
-
-@@ -1685,7 +1685,7 @@ static void cache_set_free(struct closure *cl)
- list_del(&c->list);
- mutex_unlock(&bch_register_lock);
-
-- pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
-+ pr_info("Cache set %pU unregistered\n", c->set_uuid);
- wake_up(&unregister_wait);
-
- closure_debug_destroy(&c->cl);
-@@ -1755,7 +1755,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
- {
- if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
- pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
-- d->disk->disk_name, c->sb.set_uuid);
-+ d->disk->disk_name, c->set_uuid);
- bcache_device_stop(d);
- } else if (atomic_read(&dc->has_dirty)) {
- /*
-@@ -1862,7 +1862,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- bch_cache_accounting_init(&c->accounting, &c->cl);
-
-- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
-+ memcpy(c->set_uuid, sb->set_uuid, 16);
- c->sb.block_size = sb->block_size;
- c->sb.bucket_size = sb->bucket_size;
- c->sb.nr_in_set = sb->nr_in_set;
-@@ -2145,7 +2145,7 @@ static const char *register_cache_set(struct cache *ca)
- struct cache_set *c;
-
- list_for_each_entry(c, &bch_cache_sets, list)
-- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-+ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
- if (c->cache)
- return "duplicate cache set member";
-
-@@ -2163,7 +2163,7 @@ static const char *register_cache_set(struct cache *ca)
- return err;
-
- err = "error creating kobject";
-- if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
-+ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
- kobject_add(&c->internal, &c->kobj, "internal"))
- goto err;
-
-@@ -2188,7 +2188,7 @@ static const char *register_cache_set(struct cache *ca)
- */
- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
- c->sb.version = ca->sb.version;
-- memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
-+ memcpy(c->set_uuid, ca->sb.set_uuid, 16);
- c->sb.flags = ca->sb.flags;
- c->sb.seq = ca->sb.seq;
- pr_debug("set version = %llu\n", c->sb.version);
-@@ -2698,7 +2698,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
- list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
- list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
-- char *set_uuid = c->sb.uuid;
-+ char *set_uuid = c->set_uuid;
-
- if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
- list_del(&pdev->list);
-diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
-index 0bddea663b3b..e41c611d6d3b 100644
---- a/include/trace/events/bcache.h
-+++ b/include/trace/events/bcache.h
-@@ -164,7 +164,7 @@ TRACE_EVENT(bcache_write,
- ),
-
- TP_fast_assign(
-- memcpy(__entry->uuid, c->sb.set_uuid, 16);
-+ memcpy(__entry->uuid, c->set_uuid, 16);
- __entry->inode = inode;
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio->bi_iter.bi_size >> 9;
-@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
- ),
-
- TP_fast_assign(
-- memcpy(__entry->uuid, c->sb.set_uuid, 16);
-+ memcpy(__entry->uuid, c->set_uuid, 16);
- ),
-
- TP_printk("%pU", __entry->uuid)
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
deleted file mode 100644
index 74412bb..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
+++ /dev/null
@@ -1,258 +0,0 @@
-From 9708567a39ed76a51bde0065b05c3d30fa58aa32 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:25:58 +0800
-Subject: [PATCH v2 05/12] bcache: only use block_bytes() on struct cache
-
-Because struct cache_set and struct cache both have struct cache_sb,
-therefore macro block_bytes() can be used on both of them. When removing
-the embedded struct cache_sb from struct cache_set, this macro won't be
-used on struct cache_set anymore.
-
-This patch unifies all block_bytes() usage only on struct cache, this is
-one of the preparation to remove the embedded struct cache_sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/btree.c | 24 ++++++++++++------------
- drivers/md/bcache/debug.c | 8 ++++----
- drivers/md/bcache/journal.c | 8 ++++----
- drivers/md/bcache/request.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- drivers/md/bcache/sysfs.c | 2 +-
- 7 files changed, 24 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 94a62acac4fc..29bec61cafbb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -759,7 +759,7 @@ struct bbio {
-
- #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
- #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
--#define block_bytes(c) ((c)->sb.block_size << 9)
-+#define block_bytes(ca) ((ca)->sb.block_size << 9)
-
- static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
- {
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 0817ad510d9f..c91b4d58a5b3 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -104,7 +104,7 @@
-
- static inline struct bset *write_block(struct btree *b)
- {
-- return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
-+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
- }
-
- static void bch_btree_init_next(struct btree *b)
-@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
- goto err;
-
- err = "bad btree header";
-- if (b->written + set_blocks(i, block_bytes(b->c)) >
-+ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
- btree_blocks(b))
- goto err;
-
-@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
-
- bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
-
-- b->written += set_blocks(i, block_bytes(b->c));
-+ b->written += set_blocks(i, block_bytes(b->c->cache));
- }
-
- err = "corrupted btree";
- for (i = write_block(b);
- bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
-- i = ((void *) i) + block_bytes(b->c))
-+ i = ((void *) i) + block_bytes(b->c->cache))
- if (i->seq == b->keys.set[0].data->seq)
- goto err;
-
-@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
-
- b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = cl;
-- b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
-+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
- b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
- bch_bio_map(b->bio, i);
-
-@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
-
- do_btree_node_write(b);
-
-- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
-+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-
-- b->written += set_blocks(i, block_bytes(b->c));
-+ b->written += set_blocks(i, block_bytes(b->c->cache));
- }
-
- void bch_btree_node_write(struct btree *b, struct closure *parent)
-@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
-
- if (nodes < 2 ||
- __set_blocks(b->keys.set[0].data, keys,
-- block_bytes(b->c)) > blocks * (nodes - 1))
-+ block_bytes(b->c->cache)) > blocks * (nodes - 1))
- return 0;
-
- for (i = 0; i < nodes; i++) {
-@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- k = bkey_next(k)) {
- if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k),
-- block_bytes(b->c)) > blocks)
-+ block_bytes(b->c->cache)) > blocks)
- break;
-
- last = k;
-@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- * though)
- */
- if (__set_blocks(n1, n1->keys + n2->keys,
-- block_bytes(b->c)) >
-+ block_bytes(b->c->cache)) >
- btree_blocks(new_nodes[i]))
- goto out_unlock_nocoalesce;
-
-@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- last = &r->b->key;
- }
-
-- BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
-+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
- btree_blocks(new_nodes[i]));
-
- if (last)
-@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
- goto err;
-
- split = set_blocks(btree_bset_first(n1),
-- block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
-+ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
-
- if (split) {
- unsigned int keys = 0;
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 0ccc1b0baa42..b00fd08d696b 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
- for (i = (start); \
- (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
- i->seq == (start)->seq; \
-- i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
-- block_bytes(b->c))
-+ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
-+ block_bytes(b->c->cache))
-
- void bch_btree_verify(struct btree *b)
- {
-@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
-
- for_each_written_bset(b, ondisk, i) {
- unsigned int block = ((void *) i - (void *) ondisk) /
-- block_bytes(b->c);
-+ block_bytes(b->c->cache);
-
- pr_err("*** on disk block %u:\n", block);
- bch_dump_bset(&b->keys, i, block);
- }
-
- pr_err("*** block %zu not written\n",
-- ((void *) i - (void *) ondisk) / block_bytes(b->c));
-+ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
-
- for (j = 0; j < inmemory->keys; j++)
- if (inmemory->d[j] != sorted->d[j])
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 027d0f8c4daf..ccd5de0ab0fe 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
- return ret;
- }
-
-- blocks = set_blocks(j, block_bytes(ca->set));
-+ blocks = set_blocks(j, block_bytes(ca));
-
- /*
- * Nodes in 'list' are in linear increasing order of
-@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
- struct cache *ca = c->cache;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
-- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
-+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
- c->sb.block_size;
-
- struct bio *bio;
-@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
- return;
- }
-
-- c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
-+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
-
- w->data->btree_level = c->root->level;
-
-@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- struct journal_write *w = c->journal.cur;
-
- sectors = __set_blocks(w->data, w->data->keys + nkeys,
-- block_bytes(c)) * c->sb.block_size;
-+ block_bytes(c->cache)) * c->sb.block_size;
-
- if (sectors <= min_t(size_t,
- c->journal.blocks_free * c->sb.block_size,
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index c7cadaafa947..02408fdbf5bb 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
- * bch_data_insert_keys() will insert the keys created so far
- * and finish the rest when the keylist is empty.
- */
-- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
-+ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
- return -ENOMEM;
-
- return __bch_keylist_realloc(l, u64s);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 90a419ad6445..36a538c2e960 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1528,7 +1528,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
-
- kobject_init(&d->kobj, &bch_flash_dev_ktype);
-
-- if (bcache_device_init(d, block_bytes(c), u->sectors,
-+ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
- NULL, &bcache_flash_ops))
- goto err;
-
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index ac06c0bc3c0a..b9f524ab5cc8 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -714,7 +714,7 @@ SHOW(__bch_cache_set)
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c));
-- sysfs_hprint(block_size, block_bytes(c));
-+ sysfs_hprint(block_size, block_bytes(c->cache));
- sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
deleted file mode 100644
index 1194d90..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 395b4ef812e2a4ecf373a28d682a64cbda79ea34 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:28:23 +0800
-Subject: [PATCH v2 06/12] bcache: remove useless alloc_bucket_pages()
-
-Now no one uses alloc_bucket_pages() anymore, remove it from bcache.h.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/super.c | 3 ---
- 1 file changed, 3 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 36a538c2e960..28257f11d835 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1832,9 +1832,6 @@ void bch_cache_set_unregister(struct cache_set *c)
- bch_cache_set_stop(c);
- }
-
--#define alloc_bucket_pages(gfp, c) \
-- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
--
- #define alloc_meta_bucket_pages(gfp, sb) \
- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch
deleted file mode 100644
index b31a46f..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From df9eac9d5410755cf967640457c575b0aabb35b1 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:15:28 +0800
-Subject: [PATCH v2 07/12] bcache: remove useless bucket_pages()
-
-It seems alloc_bucket_pages() is the only user of bucket_pages().
-Considering alloc_bucket_pages() is removed from bcache code, it is safe
-to remove the useless macro bucket_pages() now.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 29bec61cafbb..48a2585b6bbb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -757,7 +757,6 @@ struct bbio {
- #define btree_default_blocks(c) \
- ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-
--#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
- #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
- #define block_bytes(ca) ((ca)->sb.block_size << 9)
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
deleted file mode 100644
index b9cedd8..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 9b54949ab941a8f34d8a70211b3f632db4e193f6 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:20:48 +0800
-Subject: [PATCH v2 08/12] bcache: only use bucket_bytes() on struct cache
-
-Because struct cache_set and struct cache both have struct cache_sb,
-macro bucket_bytes() currently are used on both of them. When removing
-the embedded struct cache_sb from struct cache_set, this macro won't be
-used on struct cache_set anymore.
-
-This patch unifies all bucket_bytes() usage only on struct cache, this is
-one of the preparation to remove the embedded struct cache_sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/sysfs.c | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 48a2585b6bbb..94d4baf4c405 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -757,7 +757,7 @@ struct bbio {
- #define btree_default_blocks(c) \
- ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-
--#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
-+#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
- #define block_bytes(ca) ((ca)->sb.block_size << 9)
-
- static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index b9f524ab5cc8..4bfe98faadcc 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -713,7 +713,7 @@ SHOW(__bch_cache_set)
-
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
-- sysfs_hprint(bucket_size, bucket_bytes(c));
-+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
- sysfs_hprint(block_size, block_bytes(c->cache));
- sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch
deleted file mode 100644
index 790d2ce..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From 5b1a516888a054b38038723d40368235faddb5e3 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:28:26 +0800
-Subject: [PATCH v2 09/12] bcache: don't check seq numbers in
- register_cache_set()
-
-In order to update the partial super block of cache set, the seq numbers
-of cache and cache set are checked in register_cache_set(). If cache's
-seq number is larger than cache set's seq number, cache set must update
-its partial super block from cache's super block. It is unncessary when
-the embedded struct cache_sb is removed from struct cache set.
-
-This patch removed the seq numbers checking from register_cache_set(),
-because later there will be no such partial super block in struct cache
-set, the cache set will directly reference in-memory super block from
-struct cache. This is a preparation patch for removing embedded struct
-cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/super.c | 15 ---------------
- 1 file changed, 15 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 28257f11d835..3dfe81bf31c8 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2176,21 +2176,6 @@ static const char *register_cache_set(struct cache *ca)
- sysfs_create_link(&c->kobj, &ca->kobj, buf))
- goto err;
-
-- /*
-- * A special case is both ca->sb.seq and c->sb.seq are 0,
-- * such condition happens on a new created cache device whose
-- * super block is never flushed yet. In this case c->sb.version
-- * and other members should be updated too, otherwise we will
-- * have a mistaken super block version in cache set.
-- */
-- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
-- c->sb.version = ca->sb.version;
-- memcpy(c->set_uuid, ca->sb.set_uuid, 16);
-- c->sb.flags = ca->sb.flags;
-- c->sb.seq = ca->sb.seq;
-- pr_debug("set version = %llu\n", c->sb.version);
-- }
--
- kobject_get(&ca->kobj);
- ca->set = c;
- ca->set->cache = ca;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch
deleted file mode 100644
index 4e08eac..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 46164fea1d6e07cbb196e945b4534879b74504bf Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:36:56 +0800
-Subject: [PATCH v2 10/12] bcache: remove can_attach_cache()
-
-After removing the embedded struct cache_sb from struct cache_set, cache
-set will directly reference the in-memory super block of struct cache.
-It is unnecessary to compare block_size, bucket_size and nr_in_set from
-the identical in-memory super block in can_attach_cache().
-
-This is a preparation patch for latter removing cache_set->sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/super.c | 10 ----------
- 1 file changed, 10 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 3dfe81bf31c8..fcfc8f41b0ed 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2128,13 +2128,6 @@ static int run_cache_set(struct cache_set *c)
- return -EIO;
- }
-
--static bool can_attach_cache(struct cache *ca, struct cache_set *c)
--{
-- return ca->sb.block_size == c->sb.block_size &&
-- ca->sb.bucket_size == c->sb.bucket_size &&
-- ca->sb.nr_in_set == c->sb.nr_in_set;
--}
--
- static const char *register_cache_set(struct cache *ca)
- {
- char buf[12];
-@@ -2146,9 +2139,6 @@ static const char *register_cache_set(struct cache *ca)
- if (c->cache)
- return "duplicate cache set member";
-
-- if (!can_attach_cache(ca, c))
-- return "cache sb does not match set";
--
- if (!CACHE_SYNC(&ca->sb))
- SET_CACHE_SYNC(&c->sb, false);
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
deleted file mode 100644
index dd534a5..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
+++ /dev/null
@@ -1,110 +0,0 @@
-From 24bb19eb76bea9589158215402aeda67fabba2e9 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:53:52 +0800
-Subject: [PATCH v2 11/12] bcache: check and set sync status on cache's
- in-memory super block
-
-Currently the cache's sync status is checked and set on cache set's in-
-memory partial super block. After removing the embedded struct cache_sb
-from cache set and reference cache's in-memory super block from struct
-cache_set, the sync status can set and check directly on cache's super
-block.
-
-This patch checks and sets the cache sync status directly on cache's
-in-memory super block. This is a preparation for later removing embedded
-struct cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/journal.c | 2 +-
- drivers/md/bcache/super.c | 7 ++-----
- drivers/md/bcache/sysfs.c | 6 +++---
- 4 files changed, 7 insertions(+), 10 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 1b8310992dd0..65fdbdeb5134 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -361,7 +361,7 @@ static int bch_allocator_thread(void *arg)
- * new stuff to them:
- */
- allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
-- if (CACHE_SYNC(&ca->set->sb)) {
-+ if (CACHE_SYNC(&ca->sb)) {
- /*
- * This could deadlock if an allocation with a btree
- * node locked ever blocked - having the btree node
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index ccd5de0ab0fe..e2810668ede3 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -915,7 +915,7 @@ atomic_t *bch_journal(struct cache_set *c,
- if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
- return NULL;
-
-- if (!CACHE_SYNC(&c->sb))
-+ if (!CACHE_SYNC(&c->cache->sb))
- return NULL;
-
- w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index fcfc8f41b0ed..18f76d1ea0e3 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1970,7 +1970,7 @@ static int run_cache_set(struct cache_set *c)
- c->nbuckets = ca->sb.nbuckets;
- set_gc_sectors(c);
-
-- if (CACHE_SYNC(&c->sb)) {
-+ if (CACHE_SYNC(&c->cache->sb)) {
- struct bkey *k;
- struct jset *j;
-
-@@ -2093,7 +2093,7 @@ static int run_cache_set(struct cache_set *c)
- * everything is set up - fortunately journal entries won't be
- * written until the SET_CACHE_SYNC() here:
- */
-- SET_CACHE_SYNC(&c->sb, true);
-+ SET_CACHE_SYNC(&c->cache->sb, true);
-
- bch_journal_next(&c->journal);
- bch_journal_meta(c, &cl);
-@@ -2139,9 +2139,6 @@ static const char *register_cache_set(struct cache *ca)
- if (c->cache)
- return "duplicate cache set member";
-
-- if (!CACHE_SYNC(&ca->sb))
-- SET_CACHE_SYNC(&c->sb, false);
--
- goto found;
- }
-
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index 4bfe98faadcc..554e3afc9b68 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -711,7 +711,7 @@ SHOW(__bch_cache_set)
- {
- struct cache_set *c = container_of(kobj, struct cache_set, kobj);
-
-- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
-+ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c->cache));
- sysfs_hprint(block_size, block_bytes(c->cache));
-@@ -812,8 +812,8 @@ STORE(__bch_cache_set)
- if (attr == &sysfs_synchronous) {
- bool sync = strtoul_or_return(buf);
-
-- if (sync != CACHE_SYNC(&c->sb)) {
-- SET_CACHE_SYNC(&c->sb, sync);
-+ if (sync != CACHE_SYNC(&c->cache->sb)) {
-+ SET_CACHE_SYNC(&c->cache->sb, sync);
- bcache_write_super(c);
- }
- }
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch
deleted file mode 100644
index 69e81ad..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch
+++ /dev/null
@@ -1,469 +0,0 @@
-From 6400744caa2bdbde213c6b336196ec074f715502 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 22 Aug 2020 16:11:38 +0800
-Subject: [PATCH v2 12/12] bcache: remove embedded struct cache_sb from struct
- cache_set
-
-Since bcache code was merged into mainline kerrnel, each cache set only
-as one single cache in it. The multiple caches framework is here but the
-code is far from completed. Considering the multiple copies of cached
-data can also be stored on e.g. md raid1 devices, it is unnecessary to
-support multiple caches in one cache set indeed.
-
-The previous preparation patches fix the dependencies of explicitly
-making a cache set only have single cache. Now we don't have to maintain
-an embedded partial super block in struct cache_set, the in-memory super
-block can be directly referenced from struct cache.
-
-This patch removes the embedded struct cache_sb from struct cache_set,
-and fixes all locations where the superb lock was referenced from this
-removed super block by referencing the in-memory super block of struct
-cache.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 6 ++---
- drivers/md/bcache/bcache.h | 4 +--
- drivers/md/bcache/btree.c | 17 +++++++------
- drivers/md/bcache/btree.h | 2 +-
- drivers/md/bcache/extents.c | 6 ++---
- drivers/md/bcache/features.c | 4 +--
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/journal.c | 11 ++++----
- drivers/md/bcache/request.c | 4 +--
- drivers/md/bcache/super.c | 47 +++++++++++++----------------------
- drivers/md/bcache/writeback.c | 2 +-
- 11 files changed, 46 insertions(+), 59 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 65fdbdeb5134..8c371d5eef8e 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- {
- struct cache *ca;
- struct bucket *b;
-- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
-+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
- struct open_bucket, list);
- found:
- if (!ret->sectors_free && KEY_PTRS(alloc)) {
-- ret->sectors_free = c->sb.bucket_size;
-+ ret->sectors_free = c->cache->sb.bucket_size;
- bkey_copy(&ret->key, alloc);
- bkey_init(alloc);
- }
-@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
- }
-
-- if (b->sectors_free < c->sb.block_size)
-+ if (b->sectors_free < c->cache->sb.block_size)
- b->sectors_free = 0;
-
- /*
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 94d4baf4c405..1d57f48307e6 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -517,8 +517,6 @@ struct cache_set {
- atomic_t idle_counter;
- atomic_t at_max_writeback_rate;
-
-- struct cache_sb sb;
--
- struct cache *cache;
-
- struct bcache_device **devices;
-@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
-
- static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
- {
-- return s & (c->sb.bucket_size - 1);
-+ return s & (c->cache->sb.bucket_size - 1);
- }
-
- static inline struct cache *PTR_CACHE(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index c91b4d58a5b3..d09103cc7da5 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
-
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
-- bset_magic(&b->c->sb));
-+ bset_magic(&b->c->cache->sb));
-
- }
-
-@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
- * See the comment arount cache_set->fill_iter.
- */
- iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
-- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
-+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
- iter->used = 0;
-
- #ifdef CONFIG_BCACHE_DEBUG
-@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
- goto err;
-
- err = "bad magic";
-- if (i->magic != bset_magic(&b->c->sb))
-+ if (i->magic != bset_magic(&b->c->cache->sb))
- goto err;
-
- err = "bad checksum";
-@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
-
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
-- bset_magic(&b->c->sb));
-+ bset_magic(&b->c->cache->sb));
- out:
- mempool_free(iter, &b->c->fill_iter);
- return;
-@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
-
- do_btree_node_write(b);
-
-- atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
-+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-
- b->written += set_blocks(i, block_bytes(b->c->cache));
-@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
- if (c->verify_data)
- list_move(&c->verify_data->list, &c->btree_cache);
-
-- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
-+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
- #endif
-
- list_splice(&c->btree_cache_freeable,
-@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
- mutex_init(&c->verify_lock);
-
- c->verify_ondisk = (void *)
-- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
-+ __get_free_pages(GFP_KERNEL|__GFP_COMP,
-+ ilog2(meta_bucket_pages(&c->cache->sb)));
- if (!c->verify_ondisk) {
- /*
- * Don't worry about the mca_rereserve buckets
-@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
- }
-
- b->parent = parent;
-- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
-+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
-
- mutex_unlock(&c->bucket_lock);
-
-diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
-index 257969980c49..50482107134f 100644
---- a/drivers/md/bcache/btree.h
-+++ b/drivers/md/bcache/btree.h
-@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
-
- static inline void set_gc_sectors(struct cache_set *c)
- {
-- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
-+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
- }
-
- void bkey_put(struct cache_set *c, struct bkey *k);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index 9162af5bb6ec..f4658a1f37b8 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
- bucket >= ca->sb.nbuckets)
- return true;
-@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-- if (KEY_SIZE(k) + r > c->sb.bucket_size)
-+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
- return "bad, length too big";
- if (bucket < ca->sb.first_bucket)
- return "bad, short offset";
-@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
- size_t n = PTR_BUCKET_NR(b->c, k, j);
-
- pr_cont(" bucket %zu", n);
-- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-+ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
- pr_cont(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
- }
-diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
-index 4442df48d28c..6469223f0b77 100644
---- a/drivers/md/bcache/features.c
-+++ b/drivers/md/bcache/features.c
-@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
- for (f = &feature_list[0]; f->compat != 0; f++) { \
- if (f->compat != BCH_FEATURE_ ## type) \
- continue; \
-- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
-+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
- if (first) { \
- out += snprintf(out, buf + size - out, \
- "["); \
-@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
- \
- out += snprintf(out, buf + size - out, "%s", f->string);\
- \
-- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
-+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
- out += snprintf(out, buf + size - out, "]"); \
- \
- first = false; \
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index a14a445618b4..dad71a6b7889 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
- struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
- struct bio *bio = &b->bio;
-
-- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
-+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
-
- return bio;
- }
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index e2810668ede3..c5526e5087ef 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
-
- bkey_init(k);
- SET_KEY_PTRS(k, 1);
-- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
-
- out:
- if (!journal_full(&c->journal))
-@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
-- c->sb.block_size;
-+ ca->sb.block_size;
-
- struct bio *bio;
- struct bio_list list;
-@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-- w->data->magic = jset_magic(&c->sb);
-+ w->data->magic = jset_magic(&ca->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
- w->data->csum = csum_set(w->data);
-@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- size_t sectors;
- struct closure cl;
- bool wait = false;
-+ struct cache *ca = c->cache;
-
- closure_init_stack(&cl);
-
-@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- struct journal_write *w = c->journal.cur;
-
- sectors = __set_blocks(w->data, w->data->keys + nkeys,
-- block_bytes(c->cache)) * c->sb.block_size;
-+ block_bytes(ca)) * ca->sb.block_size;
-
- if (sectors <= min_t(size_t,
-- c->journal.blocks_free * c->sb.block_size,
-+ c->journal.blocks_free * ca->sb.block_size,
- PAGE_SECTORS << JSET_BITS))
- return w;
-
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 02408fdbf5bb..37e9cf8dbfc1 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
- goto skip;
- }
-
-- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
-- bio_sectors(bio) & (c->sb.block_size - 1)) {
-+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
-+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
- pr_debug("skipping unaligned io\n");
- goto skip;
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 18f76d1ea0e3..d06ea4a3e500 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-
-- c->sb.seq++;
-+ ca->sb.seq++;
-
-- if (c->sb.version > version)
-- version = c->sb.version;
--
-- ca->sb.version = version;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
--
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ if (ca->sb.version < version)
-+ ca->sb.version = version;
-
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
-@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
- {
- BKEY_PADDED(key) k;
- struct closure cl;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- unsigned int size;
-
- closure_init_stack(&cl);
-@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
-- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
-+ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
- SET_KEY_SIZE(&k.key, size);
- uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
- closure_sync(&cl);
-
- /* Only one bucket used for uuid write */
-- ca = PTR_CACHE(c, &k.key, 0);
- atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
-
- bkey_copy(&c->uuid_bucket, &k.key);
-@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- return -EINVAL;
- }
-
-- if (dc->sb.block_size < c->sb.block_size) {
-+ if (dc->sb.block_size < c->cache->sb.block_size) {
- /* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size\n",
- dc->backing_dev_name);
-@@ -1664,6 +1657,9 @@ static void cache_set_free(struct closure *cl)
- bch_journal_free(c);
-
- mutex_lock(&bch_register_lock);
-+ bch_bset_sort_state_free(&c->sort);
-+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
-+
- ca = c->cache;
- if (ca) {
- ca->set = NULL;
-@@ -1671,8 +1667,6 @@ static void cache_set_free(struct closure *cl)
- kobject_put(&ca->kobj);
- }
-
-- bch_bset_sort_state_free(&c->sort);
-- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-
- if (c->moving_gc_wq)
- destroy_workqueue(c->moving_gc_wq);
-@@ -1838,6 +1832,7 @@ void bch_cache_set_unregister(struct cache_set *c)
- struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- {
- int iter_size;
-+ struct cache *ca = container_of(sb, struct cache, sb);
- struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
-
- if (!c)
-@@ -1860,23 +1855,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- bch_cache_accounting_init(&c->accounting, &c->cl);
-
- memcpy(c->set_uuid, sb->set_uuid, 16);
-- c->sb.block_size = sb->block_size;
-- c->sb.bucket_size = sb->bucket_size;
-- c->sb.nr_in_set = sb->nr_in_set;
-- c->sb.last_mount = sb->last_mount;
-- c->sb.version = sb->version;
-- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
-- c->sb.feature_compat = sb->feature_compat;
-- c->sb.feature_ro_compat = sb->feature_ro_compat;
-- c->sb.feature_incompat = sb->feature_incompat;
-- }
-
-+ c->cache = ca;
-+ c->cache->set = c;
- c->bucket_bits = ilog2(sb->bucket_size);
- c->block_bits = ilog2(sb->block_size);
-- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
-+ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
- c->devices_max_used = 0;
- atomic_set(&c->attached_dev_nr, 0);
-- c->btree_pages = meta_bucket_pages(&c->sb);
-+ c->btree_pages = meta_bucket_pages(sb);
- if (c->btree_pages > BTREE_MAX_PAGES)
- c->btree_pages = max_t(int, c->btree_pages / 4,
- BTREE_MAX_PAGES);
-@@ -1914,7 +1901,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
- sizeof(struct bbio) +
-- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
-+ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
- goto err;
-
- if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
-@@ -1924,7 +1911,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
- goto err;
-
-- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
-+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
- if (!c->uuids)
- goto err;
-
-@@ -2104,7 +2091,7 @@ static int run_cache_set(struct cache_set *c)
- goto err;
-
- closure_sync(&cl);
-- c->sb.last_mount = (u32)ktime_get_real_seconds();
-+ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
- bcache_write_super(c);
-
- list_for_each_entry_safe(dc, t, &uncached_devices, list)
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 4f4ad6b3d43a..3c74996978da 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
- * This is the size of the cache, minus the amount used for
- * flash-only devices
- */
-- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
-+ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
- atomic_long_read(&c->flash_dev_dirty_sectors);
-
- /*
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
deleted file mode 100644
index 1228ced..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
+++ /dev/null
@@ -1,152 +0,0 @@
-From 9260c7e003b7652c9a8208fa479ff4c5d72a6737 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 00:07:05 +0800
-Subject: [PATCH v2 01/15] bcache: remove 'int n' from parameter list of
- bch_bucket_alloc_set()
-
-The parameter 'int n' from bch_bucket_alloc_set() is not cleared
-defined. From the code comments n is the number of buckets to alloc, but
-from the code itself 'n' is the maximum cache to iterate. Indeed all the
-locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.
-
-This patch removes the confused and unnecessary 'int n' from parameter
-list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
-for its caller.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
- drivers/md/bcache/bcache.h | 4 ++--
- drivers/md/bcache/btree.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- 4 files changed, 19 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 52035a78d836..4493ff57476d 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -49,7 +49,7 @@
- *
- * bch_bucket_alloc() allocates a single bucket from a specific cache.
- *
-- * bch_bucket_alloc_set() allocates one or more buckets from different caches
-+ * bch_bucket_alloc_set() allocates one bucket from different caches
- * out of a cache set.
- *
- * free_some_buckets() drives all the processes described above. It's called
-@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
- }
-
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
-- int i;
-+ struct cache *ca;
-+ long b;
-
- /* No allocation if CACHE_SET_IO_DISABLE bit is set */
- if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
- return -1;
-
- lockdep_assert_held(&c->bucket_lock);
-- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
-
- bkey_init(k);
-
-- /* sort by free space/prio of oldest data in caches */
--
-- for (i = 0; i < n; i++) {
-- struct cache *ca = c->cache_by_alloc[i];
-- long b = bch_bucket_alloc(ca, reserve, wait);
-+ ca = c->cache_by_alloc[0];
-+ b = bch_bucket_alloc(ca, reserve, wait);
-+ if (b == -1)
-+ goto err;
-
-- if (b == -1)
-- goto err;
-+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
-+ bucket_to_sector(c, b),
-+ ca->sb.nr_this_dev);
-
-- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
-- bucket_to_sector(c, b),
-- ca->sb.nr_this_dev);
--
-- SET_KEY_PTRS(k, i + 1);
-- }
-+ SET_KEY_PTRS(k, 1);
-
- return 0;
- err:
-@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- }
-
- int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait)
-+ struct bkey *k, bool wait)
- {
- int ret;
-
- mutex_lock(&c->bucket_lock);
-- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
-+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
-@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
-
- spin_unlock(&c->data_bucket_lock);
-
-- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
-+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
- return false;
-
- spin_lock(&c->data_bucket_lock);
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 4fd03d2496d8..5ff6e9573935 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
-
- long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
- int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait);
-+ struct bkey *k, bool wait);
- int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-- struct bkey *k, int n, bool wait);
-+ struct bkey *k, bool wait);
- bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
- unsigned int sectors, unsigned int write_point,
- unsigned int write_prio, bool wait);
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 3d8bd0692af3..e2a719fed53b 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
-
- mutex_lock(&c->bucket_lock);
- retry:
-- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
-+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
- goto err;
-
- bkey_put(c, &k.key);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 1bbdc410ee3c..7057ec48f3d1 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c)
- closure_init_stack(&cl);
- lockdep_assert_held(&bch_register_lock);
-
-- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
-+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
deleted file mode 100644
index d0882f9..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From da9ff41f507337ce4797935e8ba9b70da361d59d Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 00:30:59 +0800
-Subject: [PATCH v2 02/15] bcache: explicitly make cache_set only have single
- cache
-
-Currently although the bcache code has a framework for multiple caches
-in a cache set, but indeed the multiple caches never completed and users
-use md raid1 for multiple copies of the cached data.
-
-This patch does the following change in struct cache_set, to explicitly
-make a cache_set only have single cache,
-- Change pointer array "*cache[MAX_CACHES_PER_SET]" to a single pointer
- "*cache".
-- Remove pointer array "*cache_by_alloc[MAX_CACHES_PER_SET]".
-- Remove "caches_loaded".
-
-Now the code looks as exactly what it does in practic: only one cache is
-used in the cache set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/bcache.h | 8 +++-----
- drivers/md/bcache/super.c | 19 ++++++++-----------
- 3 files changed, 12 insertions(+), 17 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 4493ff57476d..3385f6add6df 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -501,7 +501,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
-
- bkey_init(k);
-
-- ca = c->cache_by_alloc[0];
-+ ca = c->cache;
- b = bch_bucket_alloc(ca, reserve, wait);
- if (b == -1)
- goto err;
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 5ff6e9573935..aa112c1adba1 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -519,9 +519,7 @@ struct cache_set {
-
- struct cache_sb sb;
-
-- struct cache *cache[MAX_CACHES_PER_SET];
-- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
-- int caches_loaded;
-+ struct cache *cache;
-
- struct bcache_device **devices;
- unsigned int devices_max_used;
-@@ -808,7 +806,7 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
- const struct bkey *k,
- unsigned int ptr)
- {
-- return c->cache[PTR_DEV(k, ptr)];
-+ return c->cache;
- }
-
- static inline size_t PTR_BUCKET_NR(struct cache_set *c,
-@@ -890,7 +888,7 @@ do { \
- /* Looping macros */
-
- #define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
-+ for (iter = 0; ca = cs->cache, iter < 1; iter++)
-
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 7057ec48f3d1..e9ccfa17beb8 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1675,7 +1675,7 @@ static void cache_set_free(struct closure *cl)
- for_each_cache(ca, c, i)
- if (ca) {
- ca->set = NULL;
-- c->cache[ca->sb.nr_this_dev] = NULL;
-+ c->cache = NULL;
- kobject_put(&ca->kobj);
- }
-
-@@ -2166,7 +2166,7 @@ static const char *register_cache_set(struct cache *ca)
-
- list_for_each_entry(c, &bch_cache_sets, list)
- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-- if (c->cache[ca->sb.nr_this_dev])
-+ if (c->cache)
- return "duplicate cache set member";
-
- if (!can_attach_cache(ca, c))
-@@ -2216,14 +2216,11 @@ static const char *register_cache_set(struct cache *ca)
-
- kobject_get(&ca->kobj);
- ca->set = c;
-- ca->set->cache[ca->sb.nr_this_dev] = ca;
-- c->cache_by_alloc[c->caches_loaded++] = ca;
-+ ca->set->cache = ca;
-
-- if (c->caches_loaded == c->sb.nr_in_set) {
-- err = "failed to run cache set";
-- if (run_cache_set(c) < 0)
-- goto err;
-- }
-+ err = "failed to run cache set";
-+ if (run_cache_set(c) < 0)
-+ goto err;
-
- return NULL;
- err:
-@@ -2240,8 +2237,8 @@ void bch_cache_release(struct kobject *kobj)
- unsigned int i;
-
- if (ca->set) {
-- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
-- ca->set->cache[ca->sb.nr_this_dev] = NULL;
-+ BUG_ON(ca->set->cache != ca);
-+ ca->set->cache = NULL;
- }
-
- free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch
deleted file mode 100644
index 195c7a4..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch
+++ /dev/null
@@ -1,896 +0,0 @@
-From 50516df3a606a49a170bb14e26ed595aff4c84d0 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 01:26:09 +0800
-Subject: [PATCH v2 03/15] bcache: remove for_each_cache()
-
-Since now each cache_set explicitly has single cache, for_each_cache()
-is unnecessary. This patch removes this macro, and update all locations
-where it is used, and makes sure all code logic still being consistent.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 17 ++-
- drivers/md/bcache/bcache.h | 9 +-
- drivers/md/bcache/btree.c | 103 +++++++---------
- drivers/md/bcache/journal.c | 229 ++++++++++++++++-------------------
- drivers/md/bcache/movinggc.c | 58 +++++----
- drivers/md/bcache/super.c | 115 ++++++++----------
- 6 files changed, 237 insertions(+), 294 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 3385f6add6df..1b8310992dd0 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- struct cache *ca;
- struct bucket *b;
- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
-- unsigned int i;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
-
- c->min_prio = USHRT_MAX;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca)
-- if (b->prio &&
-- b->prio != BTREE_PRIO &&
-- !atomic_read(&b->pin)) {
-- b->prio--;
-- c->min_prio = min(c->min_prio, b->prio);
-- }
-+ ca = c->cache;
-+ for_each_bucket(b, ca)
-+ if (b->prio &&
-+ b->prio != BTREE_PRIO &&
-+ !atomic_read(&b->pin)) {
-+ b->prio--;
-+ c->min_prio = min(c->min_prio, b->prio);
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index aa112c1adba1..7ffe6b2d179b 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -887,9 +887,6 @@ do { \
-
- /* Looping macros */
-
--#define for_each_cache(ca, cs, iter) \
-- for (iter = 0; ca = cs->cache, iter < 1; iter++)
--
- #define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->sb.first_bucket; \
- b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-@@ -931,11 +928,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
-
- static inline void wake_up_allocators(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = c->cache;
-
-- for_each_cache(ca, c, i)
-- wake_up_process(ca->alloc_thread);
-+ wake_up_process(ca->alloc_thread);
- }
-
- static inline void closure_bio_submit(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index e2a719fed53b..0817ad510d9f 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1167,19 +1167,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
- static int btree_check_reserve(struct btree *b, struct btree_op *op)
- {
- struct cache_set *c = b->c;
-- struct cache *ca;
-- unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
-+ struct cache *ca = c->cache;
-+ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i)
-- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-- if (op)
-- prepare_to_wait(&c->btree_cache_wait, &op->wait,
-- TASK_UNINTERRUPTIBLE);
-- mutex_unlock(&c->bucket_lock);
-- return -EINTR;
-- }
-+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
-+ if (op)
-+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
-+ TASK_UNINTERRUPTIBLE);
-+ mutex_unlock(&c->bucket_lock);
-+ return -EINTR;
-+ }
-
- mutex_unlock(&c->bucket_lock);
-
-@@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
- {
- struct cache *ca;
- struct bucket *b;
-- unsigned int i;
-
- if (!c->gc_mark_valid)
- return;
-@@ -1705,14 +1703,14 @@ static void btree_gc_start(struct cache_set *c)
- c->gc_mark_valid = 0;
- c->gc_done = ZERO_KEY;
-
-- for_each_cache(ca, c, i)
-- for_each_bucket(b, ca) {
-- b->last_gc = b->gen;
-- if (!atomic_read(&b->pin)) {
-- SET_GC_MARK(b, 0);
-- SET_GC_SECTORS_USED(b, 0);
-- }
-+ ca = c->cache;
-+ for_each_bucket(b, ca) {
-+ b->last_gc = b->gen;
-+ if (!atomic_read(&b->pin)) {
-+ SET_GC_MARK(b, 0);
-+ SET_GC_SECTORS_USED(b, 0);
- }
-+ }
-
- mutex_unlock(&c->bucket_lock);
- }
-@@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
- {
- struct bucket *b;
- struct cache *ca;
-- unsigned int i;
-+ unsigned int i, j;
-+ uint64_t *k;
-
- mutex_lock(&c->bucket_lock);
-
-@@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
- struct bcache_device *d = c->devices[i];
- struct cached_dev *dc;
- struct keybuf_key *w, *n;
-- unsigned int j;
-
- if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
- continue;
-@@ -1756,29 +1754,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
- rcu_read_unlock();
-
- c->avail_nbuckets = 0;
-- for_each_cache(ca, c, i) {
-- uint64_t *i;
-
-- ca->invalidate_needs_gc = 0;
-+ ca = c->cache;
-+ ca->invalidate_needs_gc = 0;
-
-- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
-+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
-
-- for (i = ca->prio_buckets;
-- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
-- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
-+ for (k = ca->prio_buckets;
-+ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
-+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
-
-- for_each_bucket(b, ca) {
-- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-+ for_each_bucket(b, ca) {
-+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
-
-- if (atomic_read(&b->pin))
-- continue;
-+ if (atomic_read(&b->pin))
-+ continue;
-
-- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
-
-- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-- c->avail_nbuckets++;
-- }
-+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-+ c->avail_nbuckets++;
- }
-
- mutex_unlock(&c->bucket_lock);
-@@ -1830,12 +1826,10 @@ static void bch_btree_gc(struct cache_set *c)
-
- static bool gc_should_run(struct cache_set *c)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = c->cache;
-
-- for_each_cache(ca, c, i)
-- if (ca->invalidate_needs_gc)
-- return true;
-+ if (ca->invalidate_needs_gc)
-+ return true;
-
- if (atomic_read(&c->sectors_to_gc) < 0)
- return true;
-@@ -2081,9 +2075,8 @@ int bch_btree_check(struct cache_set *c)
-
- void bch_initial_gc_finish(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct bucket *b;
-- unsigned int i;
-
- bch_btree_gc_finish(c);
-
-@@ -2098,20 +2091,18 @@ void bch_initial_gc_finish(struct cache_set *c)
- * This is only safe for buckets that have no live data in them, which
- * there should always be some of.
- */
-- for_each_cache(ca, c, i) {
-- for_each_bucket(b, ca) {
-- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-- fifo_full(&ca->free[RESERVE_BTREE]))
-- break;
-+ for_each_bucket(b, ca) {
-+ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
-+ fifo_full(&ca->free[RESERVE_BTREE]))
-+ break;
-
-- if (bch_can_invalidate_bucket(ca, b) &&
-- !GC_MARK(b)) {
-- __bch_invalidate_one_bucket(ca, b);
-- if (!fifo_push(&ca->free[RESERVE_PRIO],
-- b - ca->buckets))
-- fifo_push(&ca->free[RESERVE_BTREE],
-- b - ca->buckets);
-- }
-+ if (bch_can_invalidate_bucket(ca, b) &&
-+ !GC_MARK(b)) {
-+ __bch_invalidate_one_bucket(ca, b);
-+ if (!fifo_push(&ca->free[RESERVE_PRIO],
-+ b - ca->buckets))
-+ fifo_push(&ca->free[RESERVE_BTREE],
-+ b - ca->buckets);
- }
- }
-
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 77fbfd52edcf..027d0f8c4daf 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
- ret; \
- })
-
-- struct cache *ca;
-- unsigned int iter;
-+ struct cache *ca = c->cache;
- int ret = 0;
-+ struct journal_device *ja = &ca->journal;
-+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-+ unsigned int i, l, r, m;
-+ uint64_t seq;
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
-- unsigned int i, l, r, m;
-- uint64_t seq;
--
-- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-- pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
-+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
-+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
-
-+ /*
-+ * Read journal buckets ordered by golden ratio hash to quickly
-+ * find a sequence of buckets with valid journal entries
-+ */
-+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
- /*
-- * Read journal buckets ordered by golden ratio hash to quickly
-- * find a sequence of buckets with valid journal entries
-+ * We must try the index l with ZERO first for
-+ * correctness due to the scenario that the journal
-+ * bucket is circular buffer which might have wrapped
- */
-- for (i = 0; i < ca->sb.njournal_buckets; i++) {
-- /*
-- * We must try the index l with ZERO first for
-- * correctness due to the scenario that the journal
-- * bucket is circular buffer which might have wrapped
-- */
-- l = (i * 2654435769U) % ca->sb.njournal_buckets;
-+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
-
-- if (test_bit(l, bitmap))
-- break;
-+ if (test_bit(l, bitmap))
-+ break;
-
-- if (read_bucket(l))
-- goto bsearch;
-- }
-+ if (read_bucket(l))
-+ goto bsearch;
-+ }
-
-- /*
-- * If that fails, check all the buckets we haven't checked
-- * already
-- */
-- pr_debug("falling back to linear search\n");
-+ /*
-+ * If that fails, check all the buckets we haven't checked
-+ * already
-+ */
-+ pr_debug("falling back to linear search\n");
-
-- for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
-- if (read_bucket(l))
-- goto bsearch;
-+ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
-+ if (read_bucket(l))
-+ goto bsearch;
-
-- /* no journal entries on this device? */
-- if (l == ca->sb.njournal_buckets)
-- continue;
-+ /* no journal entries on this device? */
-+ if (l == ca->sb.njournal_buckets)
-+ goto out;
- bsearch:
-- BUG_ON(list_empty(list));
-+ BUG_ON(list_empty(list));
-
-- /* Binary search */
-- m = l;
-- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-- pr_debug("starting binary search, l %u r %u\n", l, r);
-+ /* Binary search */
-+ m = l;
-+ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
-+ pr_debug("starting binary search, l %u r %u\n", l, r);
-
-- while (l + 1 < r) {
-- seq = list_entry(list->prev, struct journal_replay,
-- list)->j.seq;
-+ while (l + 1 < r) {
-+ seq = list_entry(list->prev, struct journal_replay,
-+ list)->j.seq;
-
-- m = (l + r) >> 1;
-- read_bucket(m);
-+ m = (l + r) >> 1;
-+ read_bucket(m);
-
-- if (seq != list_entry(list->prev, struct journal_replay,
-- list)->j.seq)
-- l = m;
-- else
-- r = m;
-- }
-+ if (seq != list_entry(list->prev, struct journal_replay,
-+ list)->j.seq)
-+ l = m;
-+ else
-+ r = m;
-+ }
-
-- /*
-- * Read buckets in reverse order until we stop finding more
-- * journal entries
-- */
-- pr_debug("finishing up: m %u njournal_buckets %u\n",
-- m, ca->sb.njournal_buckets);
-- l = m;
-+ /*
-+ * Read buckets in reverse order until we stop finding more
-+ * journal entries
-+ */
-+ pr_debug("finishing up: m %u njournal_buckets %u\n",
-+ m, ca->sb.njournal_buckets);
-+ l = m;
-
-- while (1) {
-- if (!l--)
-- l = ca->sb.njournal_buckets - 1;
-+ while (1) {
-+ if (!l--)
-+ l = ca->sb.njournal_buckets - 1;
-
-- if (l == m)
-- break;
-+ if (l == m)
-+ break;
-
-- if (test_bit(l, bitmap))
-- continue;
-+ if (test_bit(l, bitmap))
-+ continue;
-
-- if (!read_bucket(l))
-- break;
-- }
-+ if (!read_bucket(l))
-+ break;
-+ }
-
-- seq = 0;
-+ seq = 0;
-
-- for (i = 0; i < ca->sb.njournal_buckets; i++)
-- if (ja->seq[i] > seq) {
-- seq = ja->seq[i];
-- /*
-- * When journal_reclaim() goes to allocate for
-- * the first time, it'll use the bucket after
-- * ja->cur_idx
-- */
-- ja->cur_idx = i;
-- ja->last_idx = ja->discard_idx = (i + 1) %
-- ca->sb.njournal_buckets;
-+ for (i = 0; i < ca->sb.njournal_buckets; i++)
-+ if (ja->seq[i] > seq) {
-+ seq = ja->seq[i];
-+ /*
-+ * When journal_reclaim() goes to allocate for
-+ * the first time, it'll use the bucket after
-+ * ja->cur_idx
-+ */
-+ ja->cur_idx = i;
-+ ja->last_idx = ja->discard_idx = (i + 1) %
-+ ca->sb.njournal_buckets;
-
-- }
-- }
-+ }
-
-+out:
- if (!list_empty(list))
- c->journal.seq = list_entry(list->prev,
- struct journal_replay,
-@@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
-
- static bool is_discard_enabled(struct cache_set *s)
- {
-- struct cache *ca;
-- unsigned int i;
-+ struct cache *ca = s->cache;
-
-- for_each_cache(ca, s, i)
-- if (ca->discard)
-- return true;
-+ if (ca->discard)
-+ return true;
-
- return false;
- }
-@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
- static void journal_reclaim(struct cache_set *c)
- {
- struct bkey *k = &c->journal.key;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- uint64_t last_seq;
-- unsigned int iter, n = 0;
-+ unsigned int next;
-+ struct journal_device *ja = &ca->journal;
- atomic_t p __maybe_unused;
-
- atomic_long_inc(&c->reclaim);
-@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
-
- /* Update last_idx */
-
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
--
-- while (ja->last_idx != ja->cur_idx &&
-- ja->seq[ja->last_idx] < last_seq)
-- ja->last_idx = (ja->last_idx + 1) %
-- ca->sb.njournal_buckets;
-- }
-+ while (ja->last_idx != ja->cur_idx &&
-+ ja->seq[ja->last_idx] < last_seq)
-+ ja->last_idx = (ja->last_idx + 1) %
-+ ca->sb.njournal_buckets;
-
-- for_each_cache(ca, c, iter)
-- do_journal_discard(ca);
-+ do_journal_discard(ca);
-
- if (c->journal.blocks_free)
- goto out;
-
-- /*
-- * Allocate:
-- * XXX: Sort by free journal space
-- */
--
-- for_each_cache(ca, c, iter) {
-- struct journal_device *ja = &ca->journal;
-- unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-+ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-+ /* No space available on this device */
-+ if (next == ja->discard_idx)
-+ goto out;
-
-- /* No space available on this device */
-- if (next == ja->discard_idx)
-- continue;
-+ ja->cur_idx = next;
-+ k->ptr[0] = MAKE_PTR(0,
-+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-+ ca->sb.nr_this_dev);
-+ atomic_long_inc(&c->reclaimed_journal_buckets);
-
-- ja->cur_idx = next;
-- k->ptr[n++] = MAKE_PTR(0,
-- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
-- ca->sb.nr_this_dev);
-- atomic_long_inc(&c->reclaimed_journal_buckets);
-- }
-+ bkey_init(k);
-+ SET_KEY_PTRS(k, 1);
-+ c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
-- if (n) {
-- bkey_init(k);
-- SET_KEY_PTRS(k, n);
-- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-- }
- out:
- if (!journal_full(&c->journal))
- __closure_wake_up(&c->journal.wait);
-@@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
- __releases(c->journal.lock)
- {
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
-@@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->btree_root, &c->root->key);
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
-- for_each_cache(ca, c, i)
-- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
--
-+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(&c->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
-diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
-index 5872d6470470..b9c3d27ec093 100644
---- a/drivers/md/bcache/movinggc.c
-+++ b/drivers/md/bcache/movinggc.c
-@@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
-
- void bch_moving_gc(struct cache_set *c)
- {
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct bucket *b;
-- unsigned int i;
-+ unsigned long sectors_to_move, reserve_sectors;
-
- if (!c->copy_gc_enabled)
- return;
-
- mutex_lock(&c->bucket_lock);
-
-- for_each_cache(ca, c, i) {
-- unsigned long sectors_to_move = 0;
-- unsigned long reserve_sectors = ca->sb.bucket_size *
-+ sectors_to_move = 0;
-+ reserve_sectors = ca->sb.bucket_size *
- fifo_used(&ca->free[RESERVE_MOVINGGC]);
-
-- ca->heap.used = 0;
--
-- for_each_bucket(b, ca) {
-- if (GC_MARK(b) == GC_MARK_METADATA ||
-- !GC_SECTORS_USED(b) ||
-- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-- atomic_read(&b->pin))
-- continue;
--
-- if (!heap_full(&ca->heap)) {
-- sectors_to_move += GC_SECTORS_USED(b);
-- heap_add(&ca->heap, b, bucket_cmp);
-- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-- sectors_to_move -= bucket_heap_top(ca);
-- sectors_to_move += GC_SECTORS_USED(b);
--
-- ca->heap.data[0] = b;
-- heap_sift(&ca->heap, 0, bucket_cmp);
-- }
-- }
-+ ca->heap.used = 0;
-+
-+ for_each_bucket(b, ca) {
-+ if (GC_MARK(b) == GC_MARK_METADATA ||
-+ !GC_SECTORS_USED(b) ||
-+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
-+ atomic_read(&b->pin))
-+ continue;
-
-- while (sectors_to_move > reserve_sectors) {
-- heap_pop(&ca->heap, b, bucket_cmp);
-- sectors_to_move -= GC_SECTORS_USED(b);
-+ if (!heap_full(&ca->heap)) {
-+ sectors_to_move += GC_SECTORS_USED(b);
-+ heap_add(&ca->heap, b, bucket_cmp);
-+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
-+ sectors_to_move -= bucket_heap_top(ca);
-+ sectors_to_move += GC_SECTORS_USED(b);
-+
-+ ca->heap.data[0] = b;
-+ heap_sift(&ca->heap, 0, bucket_cmp);
- }
-+ }
-
-- while (heap_pop(&ca->heap, b, bucket_cmp))
-- SET_GC_MOVE(b, 1);
-+ while (sectors_to_move > reserve_sectors) {
-+ heap_pop(&ca->heap, b, bucket_cmp);
-+ sectors_to_move -= GC_SECTORS_USED(b);
- }
-
-+ while (heap_pop(&ca->heap, b, bucket_cmp))
-+ SET_GC_MOVE(b, 1);
-+
- mutex_unlock(&c->bucket_lock);
-
- c->moving_gc_keys.last_scanned = ZERO_KEY;
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index e9ccfa17beb8..91883d5c4b62 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -343,8 +343,9 @@ static void bcache_write_super_unlock(struct closure *cl)
- void bcache_write_super(struct cache_set *c)
- {
- struct closure *cl = &c->sb_write;
-- struct cache *ca;
-- unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-+ struct cache *ca = c->cache;
-+ struct bio *bio = &ca->sb_bio;
-+ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
-
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-@@ -354,23 +355,19 @@ void bcache_write_super(struct cache_set *c)
- if (c->sb.version > version)
- version = c->sb.version;
-
-- for_each_cache(ca, c, i) {
-- struct bio *bio = &ca->sb_bio;
--
-- ca->sb.version = version;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
-+ ca->sb.version = version;
-+ ca->sb.seq = c->sb.seq;
-+ ca->sb.last_mount = c->sb.last_mount;
-
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-
-- bio_init(bio, ca->sb_bv, 1);
-- bio_set_dev(bio, ca->bdev);
-- bio->bi_end_io = write_super_endio;
-- bio->bi_private = ca;
-+ bio_init(bio, ca->sb_bv, 1);
-+ bio_set_dev(bio, ca->bdev);
-+ bio->bi_end_io = write_super_endio;
-+ bio->bi_private = ca;
-
-- closure_get(cl);
-- __write_super(&ca->sb, ca->sb_disk, bio);
-- }
-+ closure_get(cl);
-+ __write_super(&ca->sb, ca->sb_disk, bio);
-
- closure_return_with_destructor(cl, bcache_write_super_unlock);
- }
-@@ -772,26 +769,22 @@ static void bcache_device_unlink(struct bcache_device *d)
- lockdep_assert_held(&bch_register_lock);
-
- if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
-- unsigned int i;
-- struct cache *ca;
-+ struct cache *ca = d->c->cache;
-
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
-
-- for_each_cache(ca, d->c, i)
-- bd_unlink_disk_holder(ca->bdev, d->disk);
-+ bd_unlink_disk_holder(ca->bdev, d->disk);
- }
- }
-
- static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
- const char *name)
- {
-- unsigned int i;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- int ret;
-
-- for_each_cache(ca, d->c, i)
-- bd_link_disk_holder(ca->bdev, d->disk);
-+ bd_link_disk_holder(ca->bdev, d->disk);
-
- snprintf(d->name, BCACHEDEVNAME_SIZE,
- "%s%u", name, d->id);
-@@ -1663,7 +1656,6 @@ static void cache_set_free(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, cl);
- struct cache *ca;
-- unsigned int i;
-
- debugfs_remove(c->debug);
-
-@@ -1672,12 +1664,12 @@ static void cache_set_free(struct closure *cl)
- bch_journal_free(c);
-
- mutex_lock(&bch_register_lock);
-- for_each_cache(ca, c, i)
-- if (ca) {
-- ca->set = NULL;
-- c->cache = NULL;
-- kobject_put(&ca->kobj);
-- }
-+ ca = c->cache;
-+ if (ca) {
-+ ca->set = NULL;
-+ c->cache = NULL;
-+ kobject_put(&ca->kobj);
-+ }
-
- bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-@@ -1703,9 +1695,8 @@ static void cache_set_free(struct closure *cl)
- static void cache_set_flush(struct closure *cl)
- {
- struct cache_set *c = container_of(cl, struct cache_set, caching);
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct btree *b;
-- unsigned int i;
-
- bch_cache_accounting_destroy(&c->accounting);
-
-@@ -1730,9 +1721,8 @@ static void cache_set_flush(struct closure *cl)
- mutex_unlock(&b->write_lock);
- }
-
-- for_each_cache(ca, c, i)
-- if (ca->alloc_thread)
-- kthread_stop(ca->alloc_thread);
-+ if (ca->alloc_thread)
-+ kthread_stop(ca->alloc_thread);
-
- if (c->journal.cur) {
- cancel_delayed_work_sync(&c->journal.work);
-@@ -1973,16 +1963,14 @@ static int run_cache_set(struct cache_set *c)
- {
- const char *err = "cannot allocate memory";
- struct cached_dev *dc, *t;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- struct closure cl;
-- unsigned int i;
- LIST_HEAD(journal);
- struct journal_replay *l;
-
- closure_init_stack(&cl);
-
-- for_each_cache(ca, c, i)
-- c->nbuckets += ca->sb.nbuckets;
-+ c->nbuckets = ca->sb.nbuckets;
- set_gc_sectors(c);
-
- if (CACHE_SYNC(&c->sb)) {
-@@ -2002,10 +1990,8 @@ static int run_cache_set(struct cache_set *c)
- j = &list_entry(journal.prev, struct journal_replay, list)->j;
-
- err = "IO error reading priorities";
-- for_each_cache(ca, c, i) {
-- if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
-- goto err;
-- }
-+ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
-+ goto err;
-
- /*
- * If prio_read() fails it'll call cache_set_error and we'll
-@@ -2049,9 +2035,8 @@ static int run_cache_set(struct cache_set *c)
- bch_journal_next(&c->journal);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- /*
- * First place it's safe to allocate: btree_check() and
-@@ -2070,28 +2055,23 @@ static int run_cache_set(struct cache_set *c)
- if (bch_journal_replay(c, &journal))
- goto err;
- } else {
-- pr_notice("invalidating existing data\n");
--
-- for_each_cache(ca, c, i) {
-- unsigned int j;
-+ unsigned int j;
-
-- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-- 2, SB_JOURNAL_BUCKETS);
-+ pr_notice("invalidating existing data\n");
-+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
-+ 2, SB_JOURNAL_BUCKETS);
-
-- for (j = 0; j < ca->sb.keys; j++)
-- ca->sb.d[j] = ca->sb.first_bucket + j;
-- }
-+ for (j = 0; j < ca->sb.keys; j++)
-+ ca->sb.d[j] = ca->sb.first_bucket + j;
-
- bch_initial_gc_finish(c);
-
- err = "error starting allocator thread";
-- for_each_cache(ca, c, i)
-- if (bch_cache_allocator_start(ca))
-- goto err;
-+ if (bch_cache_allocator_start(ca))
-+ goto err;
-
- mutex_lock(&c->bucket_lock);
-- for_each_cache(ca, c, i)
-- bch_prio_write(ca, true);
-+ bch_prio_write(ca, true);
- mutex_unlock(&c->bucket_lock);
-
- err = "cannot allocate new UUID bucket";
-@@ -2467,13 +2447,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
- static bool bch_is_open_cache(struct block_device *bdev)
- {
- struct cache_set *c, *tc;
-- struct cache *ca;
-- unsigned int i;
-
-- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
-- for_each_cache(ca, c, i)
-- if (ca->bdev == bdev)
-- return true;
-+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-+ struct cache *ca = c->cache;
-+
-+ if (ca->bdev == bdev)
-+ return true;
-+ }
-+
- return false;
- }
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
deleted file mode 100644
index 61e18a8..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
+++ /dev/null
@@ -1,173 +0,0 @@
-From 5f709f50fb5302b446ab136dd4673a68051b9299 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 20:12:07 +0800
-Subject: [PATCH v2 04/15] bcache: add set_uuid in struct cache_set
-
-This patch adds a separated set_uuid[16] in struct cache_set, to store
-the uuid of the cache set. This is the preparation to remove the
-embedded struct cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 1 +
- drivers/md/bcache/debug.c | 2 +-
- drivers/md/bcache/super.c | 24 ++++++++++++------------
- include/trace/events/bcache.h | 4 ++--
- 4 files changed, 16 insertions(+), 15 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 7ffe6b2d179b..94a62acac4fc 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -668,6 +668,7 @@ struct cache_set {
- struct mutex verify_lock;
- #endif
-
-+ uint8_t set_uuid[16];
- unsigned int nr_uuids;
- struct uuid_entry *uuids;
- BKEY_PADDED(uuid_bucket);
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 336f43910383..0ccc1b0baa42 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -238,7 +238,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
- if (!IS_ERR_OR_NULL(bcache_debug)) {
- char name[50];
-
-- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
-+ snprintf(name, 50, "bcache-%pU", c->set_uuid);
- c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
- &cache_set_debug_ops);
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 91883d5c4b62..90a419ad6445 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1189,8 +1189,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- struct cached_dev *exist_dc, *t;
- int ret = 0;
-
-- if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
-- (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
-+ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
-+ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
- return -ENOENT;
-
- if (dc->disk.c) {
-@@ -1262,7 +1262,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- u->first_reg = u->last_reg = rtime;
- bch_uuid_write(c);
-
-- memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
-+ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
-
- bch_write_bdev_super(dc, &cl);
-@@ -1324,7 +1324,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- pr_info("Caching %s as %s on set %pU\n",
- dc->backing_dev_name,
- dc->disk.disk->disk_name,
-- dc->disk.c->sb.set_uuid);
-+ dc->disk.c->set_uuid);
- return 0;
- }
-
-@@ -1632,7 +1632,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
- vaf.va = &args;
-
- pr_err("error on %pU: %pV, disabling caching\n",
-- c->sb.set_uuid, &vaf);
-+ c->set_uuid, &vaf);
-
- va_end(args);
-
-@@ -1685,7 +1685,7 @@ static void cache_set_free(struct closure *cl)
- list_del(&c->list);
- mutex_unlock(&bch_register_lock);
-
-- pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
-+ pr_info("Cache set %pU unregistered\n", c->set_uuid);
- wake_up(&unregister_wait);
-
- closure_debug_destroy(&c->cl);
-@@ -1755,7 +1755,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
- {
- if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
- pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
-- d->disk->disk_name, c->sb.set_uuid);
-+ d->disk->disk_name, c->set_uuid);
- bcache_device_stop(d);
- } else if (atomic_read(&dc->has_dirty)) {
- /*
-@@ -1862,7 +1862,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- bch_cache_accounting_init(&c->accounting, &c->cl);
-
-- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
-+ memcpy(c->set_uuid, sb->set_uuid, 16);
- c->sb.block_size = sb->block_size;
- c->sb.bucket_size = sb->bucket_size;
- c->sb.nr_in_set = sb->nr_in_set;
-@@ -2145,7 +2145,7 @@ static const char *register_cache_set(struct cache *ca)
- struct cache_set *c;
-
- list_for_each_entry(c, &bch_cache_sets, list)
-- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
-+ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
- if (c->cache)
- return "duplicate cache set member";
-
-@@ -2163,7 +2163,7 @@ static const char *register_cache_set(struct cache *ca)
- return err;
-
- err = "error creating kobject";
-- if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
-+ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
- kobject_add(&c->internal, &c->kobj, "internal"))
- goto err;
-
-@@ -2188,7 +2188,7 @@ static const char *register_cache_set(struct cache *ca)
- */
- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
- c->sb.version = ca->sb.version;
-- memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
-+ memcpy(c->set_uuid, ca->sb.set_uuid, 16);
- c->sb.flags = ca->sb.flags;
- c->sb.seq = ca->sb.seq;
- pr_debug("set version = %llu\n", c->sb.version);
-@@ -2698,7 +2698,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
- list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
- list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
-- char *set_uuid = c->sb.uuid;
-+ char *set_uuid = c->set_uuid;
-
- if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
- list_del(&pdev->list);
-diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
-index 0bddea663b3b..e41c611d6d3b 100644
---- a/include/trace/events/bcache.h
-+++ b/include/trace/events/bcache.h
-@@ -164,7 +164,7 @@ TRACE_EVENT(bcache_write,
- ),
-
- TP_fast_assign(
-- memcpy(__entry->uuid, c->sb.set_uuid, 16);
-+ memcpy(__entry->uuid, c->set_uuid, 16);
- __entry->inode = inode;
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio->bi_iter.bi_size >> 9;
-@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
- ),
-
- TP_fast_assign(
-- memcpy(__entry->uuid, c->sb.set_uuid, 16);
-+ memcpy(__entry->uuid, c->set_uuid, 16);
- ),
-
- TP_printk("%pU", __entry->uuid)
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
deleted file mode 100644
index 1fb42d9..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
+++ /dev/null
@@ -1,258 +0,0 @@
-From 178fa57c56550568bf0d4140d8dc689cc6c11682 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:25:58 +0800
-Subject: [PATCH v2 05/15] bcache: only use block_bytes() on struct cache
-
-Because struct cache_set and struct cache both have struct cache_sb,
-therefore macro block_bytes() can be used on both of them. When removing
-the embedded struct cache_sb from struct cache_set, this macro won't be
-used on struct cache_set anymore.
-
-This patch unifies all block_bytes() usage only on struct cache, this is
-one of the preparation to remove the embedded struct cache_sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/btree.c | 24 ++++++++++++------------
- drivers/md/bcache/debug.c | 8 ++++----
- drivers/md/bcache/journal.c | 8 ++++----
- drivers/md/bcache/request.c | 2 +-
- drivers/md/bcache/super.c | 2 +-
- drivers/md/bcache/sysfs.c | 2 +-
- 7 files changed, 24 insertions(+), 24 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 94a62acac4fc..29bec61cafbb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -759,7 +759,7 @@ struct bbio {
-
- #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
- #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
--#define block_bytes(c) ((c)->sb.block_size << 9)
-+#define block_bytes(ca) ((ca)->sb.block_size << 9)
-
- static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
- {
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 0817ad510d9f..c91b4d58a5b3 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -104,7 +104,7 @@
-
- static inline struct bset *write_block(struct btree *b)
- {
-- return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
-+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
- }
-
- static void bch_btree_init_next(struct btree *b)
-@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
- goto err;
-
- err = "bad btree header";
-- if (b->written + set_blocks(i, block_bytes(b->c)) >
-+ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
- btree_blocks(b))
- goto err;
-
-@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
-
- bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
-
-- b->written += set_blocks(i, block_bytes(b->c));
-+ b->written += set_blocks(i, block_bytes(b->c->cache));
- }
-
- err = "corrupted btree";
- for (i = write_block(b);
- bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
-- i = ((void *) i) + block_bytes(b->c))
-+ i = ((void *) i) + block_bytes(b->c->cache))
- if (i->seq == b->keys.set[0].data->seq)
- goto err;
-
-@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
-
- b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = cl;
-- b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
-+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
- b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
- bch_bio_map(b->bio, i);
-
-@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
-
- do_btree_node_write(b);
-
-- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
-+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-
-- b->written += set_blocks(i, block_bytes(b->c));
-+ b->written += set_blocks(i, block_bytes(b->c->cache));
- }
-
- void bch_btree_node_write(struct btree *b, struct closure *parent)
-@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
-
- if (nodes < 2 ||
- __set_blocks(b->keys.set[0].data, keys,
-- block_bytes(b->c)) > blocks * (nodes - 1))
-+ block_bytes(b->c->cache)) > blocks * (nodes - 1))
- return 0;
-
- for (i = 0; i < nodes; i++) {
-@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- k = bkey_next(k)) {
- if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k),
-- block_bytes(b->c)) > blocks)
-+ block_bytes(b->c->cache)) > blocks)
- break;
-
- last = k;
-@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- * though)
- */
- if (__set_blocks(n1, n1->keys + n2->keys,
-- block_bytes(b->c)) >
-+ block_bytes(b->c->cache)) >
- btree_blocks(new_nodes[i]))
- goto out_unlock_nocoalesce;
-
-@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- last = &r->b->key;
- }
-
-- BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
-+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
- btree_blocks(new_nodes[i]));
-
- if (last)
-@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
- goto err;
-
- split = set_blocks(btree_bset_first(n1),
-- block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
-+ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
-
- if (split) {
- unsigned int keys = 0;
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 0ccc1b0baa42..b00fd08d696b 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
- for (i = (start); \
- (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
- i->seq == (start)->seq; \
-- i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
-- block_bytes(b->c))
-+ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
-+ block_bytes(b->c->cache))
-
- void bch_btree_verify(struct btree *b)
- {
-@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
-
- for_each_written_bset(b, ondisk, i) {
- unsigned int block = ((void *) i - (void *) ondisk) /
-- block_bytes(b->c);
-+ block_bytes(b->c->cache);
-
- pr_err("*** on disk block %u:\n", block);
- bch_dump_bset(&b->keys, i, block);
- }
-
- pr_err("*** block %zu not written\n",
-- ((void *) i - (void *) ondisk) / block_bytes(b->c));
-+ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
-
- for (j = 0; j < inmemory->keys; j++)
- if (inmemory->d[j] != sorted->d[j])
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index 027d0f8c4daf..ccd5de0ab0fe 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
- return ret;
- }
-
-- blocks = set_blocks(j, block_bytes(ca->set));
-+ blocks = set_blocks(j, block_bytes(ca));
-
- /*
- * Nodes in 'list' are in linear increasing order of
-@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
- struct cache *ca = c->cache;
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
-- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
-+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
- c->sb.block_size;
-
- struct bio *bio;
-@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
- return;
- }
-
-- c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
-+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
-
- w->data->btree_level = c->root->level;
-
-@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- struct journal_write *w = c->journal.cur;
-
- sectors = __set_blocks(w->data, w->data->keys + nkeys,
-- block_bytes(c)) * c->sb.block_size;
-+ block_bytes(c->cache)) * c->sb.block_size;
-
- if (sectors <= min_t(size_t,
- c->journal.blocks_free * c->sb.block_size,
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index c7cadaafa947..02408fdbf5bb 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
- * bch_data_insert_keys() will insert the keys created so far
- * and finish the rest when the keylist is empty.
- */
-- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
-+ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
- return -ENOMEM;
-
- return __bch_keylist_realloc(l, u64s);
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 90a419ad6445..36a538c2e960 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1528,7 +1528,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
-
- kobject_init(&d->kobj, &bch_flash_dev_ktype);
-
-- if (bcache_device_init(d, block_bytes(c), u->sectors,
-+ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
- NULL, &bcache_flash_ops))
- goto err;
-
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index ac06c0bc3c0a..b9f524ab5cc8 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -714,7 +714,7 @@ SHOW(__bch_cache_set)
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c));
-- sysfs_hprint(block_size, block_bytes(c));
-+ sysfs_hprint(block_size, block_bytes(c->cache));
- sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
deleted file mode 100644
index 2288492..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 811f8198f1d5337729bbd855bf0e381e60eeeca3 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:28:23 +0800
-Subject: [PATCH v2 06/15] bcache: remove useless alloc_bucket_pages()
-
-Now no one uses alloc_bucket_pages() anymore, remove it from bcache.h.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/super.c | 3 ---
- 1 file changed, 3 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 36a538c2e960..28257f11d835 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1832,9 +1832,6 @@ void bch_cache_set_unregister(struct cache_set *c)
- bch_cache_set_stop(c);
- }
-
--#define alloc_bucket_pages(gfp, c) \
-- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
--
- #define alloc_meta_bucket_pages(gfp, sb) \
- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch
deleted file mode 100644
index 1957844..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From a34562e8f936f77d726fcd94746a467db5f2bf04 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:15:28 +0800
-Subject: [PATCH v2 07/15] bcache: remove useless bucket_pages()
-
-It seems alloc_bucket_pages() is the only user of bucket_pages().
-Considering alloc_bucket_pages() is removed from bcache code, it is safe
-to remove the useless macro bucket_pages() now.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 29bec61cafbb..48a2585b6bbb 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -757,7 +757,6 @@ struct bbio {
- #define btree_default_blocks(c) \
- ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-
--#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
- #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
- #define block_bytes(ca) ((ca)->sb.block_size << 9)
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
deleted file mode 100644
index 057b8d1..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 964012dfcb5e4ae91630c5d92b51cfba698dc41d Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 21:20:48 +0800
-Subject: [PATCH v2 08/15] bcache: only use bucket_bytes() on struct cache
-
-Because struct cache_set and struct cache both have struct cache_sb,
-macro bucket_bytes() currently are used on both of them. When removing
-the embedded struct cache_sb from struct cache_set, this macro won't be
-used on struct cache_set anymore.
-
-This patch unifies all bucket_bytes() usage only on struct cache, this is
-one of the preparation to remove the embedded struct cache_sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/bcache.h | 2 +-
- drivers/md/bcache/sysfs.c | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 48a2585b6bbb..94d4baf4c405 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -757,7 +757,7 @@ struct bbio {
- #define btree_default_blocks(c) \
- ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-
--#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
-+#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
- #define block_bytes(ca) ((ca)->sb.block_size << 9)
-
- static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index b9f524ab5cc8..4bfe98faadcc 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -713,7 +713,7 @@ SHOW(__bch_cache_set)
-
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
-- sysfs_hprint(bucket_size, bucket_bytes(c));
-+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
- sysfs_hprint(block_size, block_bytes(c->cache));
- sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
deleted file mode 100644
index ca8ff92..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From 78c5a3367fe79f81efa030ef2cb2fc171009fc14 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:18:45 +0800
-Subject: [PATCH v2 09/15] bcache: avoid data copy between cache_set->sb and
- cache->sb
-
-struct cache_sb embedded in struct cache_set is only partial used and
-not a real copy from cache's in-memory super block. When removing the
-embedded cache_set->sb, it is unncessary to copy data between these two
-in-memory super blocks (cache_set->sb and cache->sb), it is sufficient
-to just use cache->sb.
-
-This patch removes the data copy between these two in-memory super
-blocks in bch_cache_set_alloc() and bcache_write_super(). In future
-except for set_uuid, cache's super block will be referenced by cache
-set, no copy any more.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/super.c | 22 +++-------------------
- 1 file changed, 3 insertions(+), 19 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 28257f11d835..20de004ab2ef 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
- down(&c->sb_write_mutex);
- closure_init(cl, &c->cl);
-
-- c->sb.seq++;
-+ ca->sb.seq++;
-
-- if (c->sb.version > version)
-- version = c->sb.version;
--
-- ca->sb.version = version;
-- ca->sb.seq = c->sb.seq;
-- ca->sb.last_mount = c->sb.last_mount;
--
-- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
-+ if (ca->sb.version < version)
-+ ca->sb.version = version;
-
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
-@@ -1860,16 +1854,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- bch_cache_accounting_init(&c->accounting, &c->cl);
-
- memcpy(c->set_uuid, sb->set_uuid, 16);
-- c->sb.block_size = sb->block_size;
-- c->sb.bucket_size = sb->bucket_size;
-- c->sb.nr_in_set = sb->nr_in_set;
-- c->sb.last_mount = sb->last_mount;
-- c->sb.version = sb->version;
-- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
-- c->sb.feature_compat = sb->feature_compat;
-- c->sb.feature_ro_compat = sb->feature_ro_compat;
-- c->sb.feature_incompat = sb->feature_incompat;
-- }
-
- c->bucket_bits = ilog2(sb->bucket_size);
- c->block_bits = ilog2(sb->block_size);
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
deleted file mode 100644
index e2f8983..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From 754956b7956b6c08c1d8e3eab0a2bda29e220115 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:28:26 +0800
-Subject: [PATCH v2 10/15] bcache: don't check seq numbers in
- register_cache_set()
-
-In order to update the partial super block of cache set, the seq numbers
-of cache and cache set are checked in register_cache_set(). If cache's
-seq number is larger than cache set's seq number, cache set must update
-its partial super block from cache's super block. It is unncessary when
-the embedded struct cache_sb is removed from struct cache set.
-
-This patch removed the seq numbers checking from register_cache_set(),
-because later there will be no such partial super block in struct cache
-set, the cache set will directly reference in-memory super block from
-struct cache. This is a preparation patch for removing embedded struct
-cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/super.c | 15 ---------------
- 1 file changed, 15 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 20de004ab2ef..cdc1ebee5044 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2160,21 +2160,6 @@ static const char *register_cache_set(struct cache *ca)
- sysfs_create_link(&c->kobj, &ca->kobj, buf))
- goto err;
-
-- /*
-- * A special case is both ca->sb.seq and c->sb.seq are 0,
-- * such condition happens on a new created cache device whose
-- * super block is never flushed yet. In this case c->sb.version
-- * and other members should be updated too, otherwise we will
-- * have a mistaken super block version in cache set.
-- */
-- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
-- c->sb.version = ca->sb.version;
-- memcpy(c->set_uuid, ca->sb.set_uuid, 16);
-- c->sb.flags = ca->sb.flags;
-- c->sb.seq = ca->sb.seq;
-- pr_debug("set version = %llu\n", c->sb.version);
-- }
--
- kobject_get(&ca->kobj);
- ca->set = c;
- ca->set->cache = ca;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch
deleted file mode 100644
index fdbb825..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From aeb61b8c57e542123d0082054e6a65f10848a6f1 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:36:56 +0800
-Subject: [PATCH v2 11/15] bcache: remove can_attach_cache()
-
-After removing the embedded struct cache_sb from struct cache_set, cache
-set will directly reference the in-memory super block of struct cache.
-It is unnecessary to compare block_size, bucket_size and nr_in_set from
-the identical in-memory super block in can_attach_cache().
-
-This is a preparation patch for latter removing cache_set->sb from
-struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/super.c | 10 ----------
- 1 file changed, 10 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index cdc1ebee5044..80cfb9dfe93e 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2112,13 +2112,6 @@ static int run_cache_set(struct cache_set *c)
- return -EIO;
- }
-
--static bool can_attach_cache(struct cache *ca, struct cache_set *c)
--{
-- return ca->sb.block_size == c->sb.block_size &&
-- ca->sb.bucket_size == c->sb.bucket_size &&
-- ca->sb.nr_in_set == c->sb.nr_in_set;
--}
--
- static const char *register_cache_set(struct cache *ca)
- {
- char buf[12];
-@@ -2130,9 +2123,6 @@ static const char *register_cache_set(struct cache *ca)
- if (c->cache)
- return "duplicate cache set member";
-
-- if (!can_attach_cache(ca, c))
-- return "cache sb does not match set";
--
- if (!CACHE_SYNC(&ca->sb))
- SET_CACHE_SYNC(&c->sb, false);
-
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
deleted file mode 100644
index 864c8c4..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
+++ /dev/null
@@ -1,110 +0,0 @@
-From 9cbec8384422a47b76db64bfe880e1224893c193 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 14 Aug 2020 23:53:52 +0800
-Subject: [PATCH v2 12/15] bcache: check and set sync status on cache's
- in-memory super block
-
-Currently the cache's sync status is checked and set on cache set's in-
-memory partial super block. After removing the embedded struct cache_sb
-from cache set and reference cache's in-memory super block from struct
-cache_set, the sync status can set and check directly on cache's super
-block.
-
-This patch checks and sets the cache sync status directly on cache's
-in-memory super block. This is a preparation for later removing embedded
-struct cache_sb from struct cache_set.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 2 +-
- drivers/md/bcache/journal.c | 2 +-
- drivers/md/bcache/super.c | 7 ++-----
- drivers/md/bcache/sysfs.c | 6 +++---
- 4 files changed, 7 insertions(+), 10 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 1b8310992dd0..65fdbdeb5134 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -361,7 +361,7 @@ static int bch_allocator_thread(void *arg)
- * new stuff to them:
- */
- allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
-- if (CACHE_SYNC(&ca->set->sb)) {
-+ if (CACHE_SYNC(&ca->sb)) {
- /*
- * This could deadlock if an allocation with a btree
- * node locked ever blocked - having the btree node
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index ccd5de0ab0fe..e2810668ede3 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -915,7 +915,7 @@ atomic_t *bch_journal(struct cache_set *c,
- if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
- return NULL;
-
-- if (!CACHE_SYNC(&c->sb))
-+ if (!CACHE_SYNC(&c->cache->sb))
- return NULL;
-
- w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 80cfb9dfe93e..6b94b396f9e9 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1954,7 +1954,7 @@ static int run_cache_set(struct cache_set *c)
- c->nbuckets = ca->sb.nbuckets;
- set_gc_sectors(c);
-
-- if (CACHE_SYNC(&c->sb)) {
-+ if (CACHE_SYNC(&c->cache->sb)) {
- struct bkey *k;
- struct jset *j;
-
-@@ -2077,7 +2077,7 @@ static int run_cache_set(struct cache_set *c)
- * everything is set up - fortunately journal entries won't be
- * written until the SET_CACHE_SYNC() here:
- */
-- SET_CACHE_SYNC(&c->sb, true);
-+ SET_CACHE_SYNC(&c->cache->sb, true);
-
- bch_journal_next(&c->journal);
- bch_journal_meta(c, &cl);
-@@ -2123,9 +2123,6 @@ static const char *register_cache_set(struct cache *ca)
- if (c->cache)
- return "duplicate cache set member";
-
-- if (!CACHE_SYNC(&ca->sb))
-- SET_CACHE_SYNC(&c->sb, false);
--
- goto found;
- }
-
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index 4bfe98faadcc..554e3afc9b68 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -711,7 +711,7 @@ SHOW(__bch_cache_set)
- {
- struct cache_set *c = container_of(kobj, struct cache_set, kobj);
-
-- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
-+ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
- sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c->cache));
- sysfs_hprint(block_size, block_bytes(c->cache));
-@@ -812,8 +812,8 @@ STORE(__bch_cache_set)
- if (attr == &sysfs_synchronous) {
- bool sync = strtoul_or_return(buf);
-
-- if (sync != CACHE_SYNC(&c->sb)) {
-- SET_CACHE_SYNC(&c->sb, sync);
-+ if (sync != CACHE_SYNC(&c->cache->sb)) {
-+ SET_CACHE_SYNC(&c->cache->sb, sync);
- bcache_write_super(c);
- }
- }
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
deleted file mode 100644
index 407ffbe..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
+++ /dev/null
@@ -1,438 +0,0 @@
-From 39296f9bea8a8448b882cbdee9688ddc39e5dd67 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 00:20:00 +0800
-Subject: [PATCH v2 13/15] bcache: remove embedded struct cache_sb from struct
- cache_set
-
-Since bcache code was merged into mainline kerrnel, each cache set only
-as one single cache in it. The multiple caches framework is here but the
-code is far from completed. Considering the multiple copies of cached
-data can also be stored on e.g. md raid1 devices, it is unnecessary to
-support multiple caches in one cache set indeed.
-
-The previous preparation patches fix the dependencies of explicitly
-making a cache set only have single cache. Now we don't have to maintain
-an embedded partial super block in struct cache_set, the in-memory super
-block can be directly referenced from struct cache.
-
-This patch removes the embedded struct cache_sb from struct cache_set,
-and fixes all locations where the superb lock was referenced from this
-removed super block by referencing the in-memory super block of struct
-cache.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
----
- drivers/md/bcache/alloc.c | 6 +++---
- drivers/md/bcache/bcache.h | 4 +---
- drivers/md/bcache/btree.c | 17 +++++++++--------
- drivers/md/bcache/btree.h | 2 +-
- drivers/md/bcache/extents.c | 6 +++---
- drivers/md/bcache/features.c | 4 ++--
- drivers/md/bcache/io.c | 2 +-
- drivers/md/bcache/journal.c | 11 ++++++-----
- drivers/md/bcache/request.c | 4 ++--
- drivers/md/bcache/super.c | 25 ++++++++++++++-----------
- drivers/md/bcache/writeback.c | 2 +-
- 11 files changed, 43 insertions(+), 40 deletions(-)
-
-diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
-index 65fdbdeb5134..8c371d5eef8e 100644
---- a/drivers/md/bcache/alloc.c
-+++ b/drivers/md/bcache/alloc.c
-@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
- {
- struct cache *ca;
- struct bucket *b;
-- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
-+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
- int r;
-
- atomic_sub(sectors, &c->rescale);
-@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
- struct open_bucket, list);
- found:
- if (!ret->sectors_free && KEY_PTRS(alloc)) {
-- ret->sectors_free = c->sb.bucket_size;
-+ ret->sectors_free = c->cache->sb.bucket_size;
- bkey_copy(&ret->key, alloc);
- bkey_init(alloc);
- }
-@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
- }
-
-- if (b->sectors_free < c->sb.block_size)
-+ if (b->sectors_free < c->cache->sb.block_size)
- b->sectors_free = 0;
-
- /*
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 94d4baf4c405..1d57f48307e6 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -517,8 +517,6 @@ struct cache_set {
- atomic_t idle_counter;
- atomic_t at_max_writeback_rate;
-
-- struct cache_sb sb;
--
- struct cache *cache;
-
- struct bcache_device **devices;
-@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
-
- static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
- {
-- return s & (c->sb.bucket_size - 1);
-+ return s & (c->cache->sb.bucket_size - 1);
- }
-
- static inline struct cache *PTR_CACHE(struct cache_set *c,
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index c91b4d58a5b3..d09103cc7da5 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
-
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
-- bset_magic(&b->c->sb));
-+ bset_magic(&b->c->cache->sb));
-
- }
-
-@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
- * See the comment arount cache_set->fill_iter.
- */
- iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
-- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
-+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
- iter->used = 0;
-
- #ifdef CONFIG_BCACHE_DEBUG
-@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
- goto err;
-
- err = "bad magic";
-- if (i->magic != bset_magic(&b->c->sb))
-+ if (i->magic != bset_magic(&b->c->cache->sb))
- goto err;
-
- err = "bad checksum";
-@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
-
- if (b->written < btree_blocks(b))
- bch_bset_init_next(&b->keys, write_block(b),
-- bset_magic(&b->c->sb));
-+ bset_magic(&b->c->cache->sb));
- out:
- mempool_free(iter, &b->c->fill_iter);
- return;
-@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
-
- do_btree_node_write(b);
-
-- atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
-+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
-
- b->written += set_blocks(i, block_bytes(b->c->cache));
-@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
- if (c->verify_data)
- list_move(&c->verify_data->list, &c->btree_cache);
-
-- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
-+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
- #endif
-
- list_splice(&c->btree_cache_freeable,
-@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
- mutex_init(&c->verify_lock);
-
- c->verify_ondisk = (void *)
-- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
-+ __get_free_pages(GFP_KERNEL|__GFP_COMP,
-+ ilog2(meta_bucket_pages(&c->cache->sb)));
- if (!c->verify_ondisk) {
- /*
- * Don't worry about the mca_rereserve buckets
-@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
- }
-
- b->parent = parent;
-- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
-+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
-
- mutex_unlock(&c->bucket_lock);
-
-diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
-index 257969980c49..50482107134f 100644
---- a/drivers/md/bcache/btree.h
-+++ b/drivers/md/bcache/btree.h
-@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
-
- static inline void set_gc_sectors(struct cache_set *c)
- {
-- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
-+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
- }
-
- void bkey_put(struct cache_set *c, struct bkey *k);
-diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
-index 9162af5bb6ec..f4658a1f37b8 100644
---- a/drivers/md/bcache/extents.c
-+++ b/drivers/md/bcache/extents.c
-@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
- bucket >= ca->sb.nbuckets)
- return true;
-@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-- if (KEY_SIZE(k) + r > c->sb.bucket_size)
-+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
- return "bad, length too big";
- if (bucket < ca->sb.first_bucket)
- return "bad, short offset";
-@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
- size_t n = PTR_BUCKET_NR(b->c, k, j);
-
- pr_cont(" bucket %zu", n);
-- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-+ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
- pr_cont(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
- }
-diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
-index 4442df48d28c..6469223f0b77 100644
---- a/drivers/md/bcache/features.c
-+++ b/drivers/md/bcache/features.c
-@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
- for (f = &feature_list[0]; f->compat != 0; f++) { \
- if (f->compat != BCH_FEATURE_ ## type) \
- continue; \
-- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
-+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
- if (first) { \
- out += snprintf(out, buf + size - out, \
- "["); \
-@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
- \
- out += snprintf(out, buf + size - out, "%s", f->string);\
- \
-- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
-+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
- out += snprintf(out, buf + size - out, "]"); \
- \
- first = false; \
-diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
-index a14a445618b4..dad71a6b7889 100644
---- a/drivers/md/bcache/io.c
-+++ b/drivers/md/bcache/io.c
-@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
- struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
- struct bio *bio = &b->bio;
-
-- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
-+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
-
- return bio;
- }
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index e2810668ede3..c5526e5087ef 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
-
- bkey_init(k);
- SET_KEY_PTRS(k, 1);
-- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
-
- out:
- if (!journal_full(&c->journal))
-@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
- struct journal_write *w = c->journal.cur;
- struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
-- c->sb.block_size;
-+ ca->sb.block_size;
-
- struct bio *bio;
- struct bio_list list;
-@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
- bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
-
- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-- w->data->magic = jset_magic(&c->sb);
-+ w->data->magic = jset_magic(&ca->sb);
- w->data->version = BCACHE_JSET_VERSION;
- w->data->last_seq = last_seq(&c->journal);
- w->data->csum = csum_set(w->data);
-@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- size_t sectors;
- struct closure cl;
- bool wait = false;
-+ struct cache *ca = c->cache;
-
- closure_init_stack(&cl);
-
-@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
- struct journal_write *w = c->journal.cur;
-
- sectors = __set_blocks(w->data, w->data->keys + nkeys,
-- block_bytes(c->cache)) * c->sb.block_size;
-+ block_bytes(ca)) * ca->sb.block_size;
-
- if (sectors <= min_t(size_t,
-- c->journal.blocks_free * c->sb.block_size,
-+ c->journal.blocks_free * ca->sb.block_size,
- PAGE_SECTORS << JSET_BITS))
- return w;
-
-diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index 02408fdbf5bb..37e9cf8dbfc1 100644
---- a/drivers/md/bcache/request.c
-+++ b/drivers/md/bcache/request.c
-@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
- goto skip;
- }
-
-- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
-- bio_sectors(bio) & (c->sb.block_size - 1)) {
-+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
-+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
- pr_debug("skipping unaligned io\n");
- goto skip;
- }
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 6b94b396f9e9..d06ea4a3e500 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -471,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
- {
- BKEY_PADDED(key) k;
- struct closure cl;
-- struct cache *ca;
-+ struct cache *ca = c->cache;
- unsigned int size;
-
- closure_init_stack(&cl);
-@@ -480,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
- return 1;
-
-- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
-+ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
- SET_KEY_SIZE(&k.key, size);
- uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
- closure_sync(&cl);
-
- /* Only one bucket used for uuid write */
-- ca = PTR_CACHE(c, &k.key, 0);
- atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
-
- bkey_copy(&c->uuid_bucket, &k.key);
-@@ -1199,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
- return -EINVAL;
- }
-
-- if (dc->sb.block_size < c->sb.block_size) {
-+ if (dc->sb.block_size < c->cache->sb.block_size) {
- /* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size\n",
- dc->backing_dev_name);
-@@ -1658,6 +1657,9 @@ static void cache_set_free(struct closure *cl)
- bch_journal_free(c);
-
- mutex_lock(&bch_register_lock);
-+ bch_bset_sort_state_free(&c->sort);
-+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
-+
- ca = c->cache;
- if (ca) {
- ca->set = NULL;
-@@ -1665,8 +1667,6 @@ static void cache_set_free(struct closure *cl)
- kobject_put(&ca->kobj);
- }
-
-- bch_bset_sort_state_free(&c->sort);
-- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-
- if (c->moving_gc_wq)
- destroy_workqueue(c->moving_gc_wq);
-@@ -1832,6 +1832,7 @@ void bch_cache_set_unregister(struct cache_set *c)
- struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- {
- int iter_size;
-+ struct cache *ca = container_of(sb, struct cache, sb);
- struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
-
- if (!c)
-@@ -1855,12 +1856,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- memcpy(c->set_uuid, sb->set_uuid, 16);
-
-+ c->cache = ca;
-+ c->cache->set = c;
- c->bucket_bits = ilog2(sb->bucket_size);
- c->block_bits = ilog2(sb->block_size);
-- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
-+ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
- c->devices_max_used = 0;
- atomic_set(&c->attached_dev_nr, 0);
-- c->btree_pages = meta_bucket_pages(&c->sb);
-+ c->btree_pages = meta_bucket_pages(sb);
- if (c->btree_pages > BTREE_MAX_PAGES)
- c->btree_pages = max_t(int, c->btree_pages / 4,
- BTREE_MAX_PAGES);
-@@ -1898,7 +1901,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
-
- if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
- sizeof(struct bbio) +
-- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
-+ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
- goto err;
-
- if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
-@@ -1908,7 +1911,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
- goto err;
-
-- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
-+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
- if (!c->uuids)
- goto err;
-
-@@ -2088,7 +2091,7 @@ static int run_cache_set(struct cache_set *c)
- goto err;
-
- closure_sync(&cl);
-- c->sb.last_mount = (u32)ktime_get_real_seconds();
-+ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
- bcache_write_super(c);
-
- list_for_each_entry_safe(dc, t, &uncached_devices, list)
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 4f4ad6b3d43a..3c74996978da 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
- * This is the size of the cache, minus the amount used for
- * flash-only devices
- */
-- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
-+ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
- atomic_long_read(&c->flash_dev_dirty_sectors);
-
- /*
---
-2.26.2
-