aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2018-07-28 23:08:21 +0800
committerColy Li <colyli@suse.de>2018-07-28 23:08:21 +0800
commit7caff66cc71684e8fdc2bdec4e62276ce6f20a56 (patch)
treea33c7e5b6beeaf7acd39051dbcd579c36582c81f
parent3a814ba29512c0f34544b9a9d864e85380dc5353 (diff)
downloadbcache-patches-7caff66cc71684e8fdc2bdec4e62276ce6f20a56.tar.gz
for-review: remove unncessary patch 0008-bcache-initiate-bcache_debug-to-NULL.patch
for-test: add patch set checkpatches_fixes
-rw-r--r--for-review/0008-bcache-initiate-bcache_debug-to-NULL.patch40
-rw-r--r--for-test/checkpatches_fixes/0000-cover-letter.patch67
-rw-r--r--for-test/checkpatches_fixes/0001-bcache-style-fix-to-replace-unsigned-by-unsigned-int.patch2246
-rw-r--r--for-test/checkpatches_fixes/0002-bcache-style-fix-to-add-a-blank-line-after-declarati.patch570
-rw-r--r--for-test/checkpatches_fixes/0003-bcache-add-identifier-names-to-arguments-of-function.patch635
-rw-r--r--for-test/checkpatches_fixes/0004-bcache-style-fixes-for-lines-over-80-characters.patch325
-rw-r--r--for-test/checkpatches_fixes/0005-bcache-replace-Symbolic-permissions-by-octal-permiss.patch54
-rw-r--r--for-test/checkpatches_fixes/0006-bcache-replace-printk-by-pr_-routines.patch143
-rw-r--r--for-test/checkpatches_fixes/0007-bcache-fix-indent-by-replacing-blank-by-tabs.patch34
-rw-r--r--for-test/checkpatches_fixes/0008-bcache-replace-pF-by-pS-in-seq_printf.patch38
-rw-r--r--for-test/checkpatches_fixes/0009-bcache-fix-typo-succesfully-to-successfully.patch43
-rw-r--r--for-test/checkpatches_fixes/0010-bcache-prefer-help-in-Kconfig.patch48
-rw-r--r--for-test/checkpatches_fixes/0011-bcache-do-not-check-NULL-pointer-before-calling-kmem.patch32
-rw-r--r--for-test/checkpatches_fixes/0012-bcache-fix-code-comments-style.patch72
-rw-r--r--for-test/checkpatches_fixes/0013-bcache-add-static-const-prefix-to-char-array-declara.patch30
-rw-r--r--for-test/checkpatches_fixes/0014-bcache-move-open-brace-at-end-of-function-definition.patch52
-rw-r--r--for-test/checkpatches_fixes/0015-bcache-remove-useless-macros-from-util.c.patch32
-rw-r--r--for-test/checkpatches_fixes/0016-bcache-add-missing-SPDX-header.patch45
-rw-r--r--for-test/checkpatches_fixes/0017-bcache-remove-unnecessary-space-before-ioctl-functio.patch31
-rw-r--r--for-test/checkpatches_fixes/0018-bcache-add-the-missing-code-comments-for-smp_mb.patch53
20 files changed, 4550 insertions, 40 deletions
diff --git a/for-review/0008-bcache-initiate-bcache_debug-to-NULL.patch b/for-review/0008-bcache-initiate-bcache_debug-to-NULL.patch
deleted file mode 100644
index 55eca2b..0000000
--- a/for-review/0008-bcache-initiate-bcache_debug-to-NULL.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From 8550cab2c0fa863f27ee28ae075278bca0e659da Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Wed, 16 May 2018 22:10:53 +0800
-Subject: [PATCH 8/9] bcache: initiate bcache_debug to NULL
-
-Global variable bcache_debug is firstly initialized in bch_debug_init(),
-and destroyed in bch_debug_exit(). bch_debug_init() is called in
-bcache_init() with many other functions, if one of the previous calling
-onces failed, bcache_exit() will be called in the failure path.
-
-The problem is, if bcache_init() fails before bch_debug_init() is called,
-then in bcache_exit() when bch_debug_exit() is called to destroy global
-variable bcache_debug, at this moment bcache_debug is unndefined, then the
-test of "if (!IS_ERR_OR_NULL(bcache_debug))" might be buggy.
-
-This patch initializes global varabile bcache_debug to be NULL, to make
-the failure code path to be predictable.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: stable@vger.kernel.org
----
- drivers/md/bcache/debug.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
-index 57f8f5aeee55..24b0eb65ddec 100644
---- a/drivers/md/bcache/debug.c
-+++ b/drivers/md/bcache/debug.c
-@@ -17,7 +17,7 @@
- #include <linux/random.h>
- #include <linux/seq_file.h>
-
--struct dentry *bcache_debug;
-+struct dentry *bcache_debug = NULL;
-
- #ifdef CONFIG_BCACHE_DEBUG
-
---
-2.17.1
-
diff --git a/for-test/checkpatches_fixes/0000-cover-letter.patch b/for-test/checkpatches_fixes/0000-cover-letter.patch
new file mode 100644
index 0000000..da8a8aa
--- /dev/null
+++ b/for-test/checkpatches_fixes/0000-cover-letter.patch
@@ -0,0 +1,67 @@
+From 3963d925e5b72a43355ee3176528a6c858198945 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 23:05:38 +0800
+Subject: [PATCH 00/18] bcache cleanup and fixes reported by checkpatch.pl
+
+This series is code cleanup and fixes for the issues reported by
+checkpatch.pl.
+
+Any code reivew or comment is welcome.
+
+Coly Li
+---
+Coly Li (18):
+ bcache: style fix to replace 'unsigned' by 'unsigned int'
+ bcache: style fix to add a blank line after declarations
+ bcache: add identifier names to arguments of function definitions
+ bcache: style fixes for lines over 80 characters
+ bcache: replace Symbolic permissions by octal permission numbers
+ bcache: replace printk() by pr_*() routines
+ bcache: fix indent by replacing blank by tabs
+ bcache: replace '%pF' by '%pS' in seq_printf()
+ bcache: fix typo 'succesfully' to 'successfully'
+ bcache: prefer 'help' in Kconfig
+ bcache: do not check NULL pointer before calling kmem_cache_destroy
+ bcache: fix code comments style
+ bcache: add static const prefix to char * array declarations
+ bcache: move open brace at end of function definitions to next line
+ bcache: remove useless macros from util.c
+ bcache: add missing SPDX header
+ bcache: remove unnecessary space before ioctl function pointer
+ arguments
+ bcache: add the missing code comments for smp_mb()
+
+ drivers/md/bcache/Kconfig | 6 +-
+ drivers/md/bcache/alloc.c | 39 ++++---
+ drivers/md/bcache/bcache.h | 208 +++++++++++++++++-----------------
+ drivers/md/bcache/bset.c | 133 ++++++++++++----------
+ drivers/md/bcache/bset.h | 146 +++++++++++++-----------
+ drivers/md/bcache/btree.c | 66 ++++++-----
+ drivers/md/bcache/btree.h | 86 +++++++-------
+ drivers/md/bcache/closure.c | 6 +-
+ drivers/md/bcache/closure.h | 6 +-
+ drivers/md/bcache/debug.c | 23 ++--
+ drivers/md/bcache/debug.h | 6 +-
+ drivers/md/bcache/extents.c | 37 +++---
+ drivers/md/bcache/extents.h | 6 +-
+ drivers/md/bcache/io.c | 24 ++--
+ drivers/md/bcache/journal.c | 25 ++--
+ drivers/md/bcache/journal.h | 28 ++---
+ drivers/md/bcache/movinggc.c | 14 ++-
+ drivers/md/bcache/request.c | 59 +++++-----
+ drivers/md/bcache/request.h | 18 +--
+ drivers/md/bcache/stats.c | 15 ++-
+ drivers/md/bcache/stats.h | 15 ++-
+ drivers/md/bcache/super.c | 103 ++++++++++-------
+ drivers/md/bcache/sysfs.c | 36 +++---
+ drivers/md/bcache/sysfs.h | 6 +-
+ drivers/md/bcache/util.c | 5 +-
+ drivers/md/bcache/util.h | 24 ++--
+ drivers/md/bcache/writeback.c | 27 +++--
+ drivers/md/bcache/writeback.h | 19 ++--
+ include/uapi/linux/bcache.h | 8 +-
+ 29 files changed, 662 insertions(+), 532 deletions(-)
+
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0001-bcache-style-fix-to-replace-unsigned-by-unsigned-int.patch b/for-test/checkpatches_fixes/0001-bcache-style-fix-to-replace-unsigned-by-unsigned-int.patch
new file mode 100644
index 0000000..a123deb
--- /dev/null
+++ b/for-test/checkpatches_fixes/0001-bcache-style-fix-to-replace-unsigned-by-unsigned-int.patch
@@ -0,0 +1,2246 @@
+From 0f4c5310d2844532bce6a4033c97e3a69d5bdef6 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 26 Jun 2018 22:30:50 +0800
+Subject: [PATCH 01/18] bcache: style fix to replace 'unsigned' by 'unsigned
+ int'
+
+Signed-off-by; Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/alloc.c | 36 ++++++-----
+ drivers/md/bcache/bcache.h | 104 +++++++++++++++----------------
+ drivers/md/bcache/bset.c | 114 ++++++++++++++++++----------------
+ drivers/md/bcache/bset.h | 34 +++++-----
+ drivers/md/bcache/btree.c | 50 +++++++--------
+ drivers/md/bcache/btree.h | 4 +-
+ drivers/md/bcache/closure.h | 2 +-
+ drivers/md/bcache/debug.c | 6 +-
+ drivers/md/bcache/extents.c | 22 +++----
+ drivers/md/bcache/io.c | 18 +++---
+ drivers/md/bcache/journal.c | 20 +++---
+ drivers/md/bcache/journal.h | 8 +--
+ drivers/md/bcache/movinggc.c | 12 ++--
+ drivers/md/bcache/request.c | 42 ++++++-------
+ drivers/md/bcache/request.h | 18 +++---
+ drivers/md/bcache/stats.c | 12 ++--
+ drivers/md/bcache/stats.h | 2 +-
+ drivers/md/bcache/super.c | 34 +++++-----
+ drivers/md/bcache/sysfs.c | 18 +++---
+ drivers/md/bcache/util.h | 9 +--
+ drivers/md/bcache/writeback.c | 19 +++---
+ drivers/md/bcache/writeback.h | 12 ++--
+ include/uapi/linux/bcache.h | 6 +-
+ 23 files changed, 307 insertions(+), 295 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 7fa2631b422c..89f663d22551 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
+- unsigned i;
++ unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
++ unsigned int i;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
+
+ #define bucket_prio(b) \
+ ({ \
+- unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
++ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
+ \
+ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
+ })
+@@ -301,7 +301,7 @@ do { \
+
+ static int bch_allocator_push(struct cache *ca, long bucket)
+ {
+- unsigned i;
++ unsigned int i;
+
+ /* Prios/gens are actually the most important reserve */
+ if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+@@ -385,7 +385,7 @@ static int bch_allocator_thread(void *arg)
+
+ /* Allocation */
+
+-long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
++long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
+ {
+ DEFINE_WAIT(w);
+ struct bucket *b;
+@@ -421,7 +421,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
+ if (expensive_debug_checks(ca->set)) {
+ size_t iter;
+ long i;
+- unsigned j;
++ unsigned int j;
+
+ for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
+ BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
+@@ -470,14 +470,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
+
+ void bch_bucket_free(struct cache_set *c, struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ __bch_bucket_free(PTR_CACHE(c, k, i),
+ PTR_BUCKET(c, k, i));
+ }
+
+-int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
++int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, int n, bool wait)
+ {
+ int i;
+@@ -510,7 +510,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+ return -1;
+ }
+
+-int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
++int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, int n, bool wait)
+ {
+ int ret;
+@@ -524,8 +524,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+
+ struct open_bucket {
+ struct list_head list;
+- unsigned last_write_point;
+- unsigned sectors_free;
++ unsigned int last_write_point;
++ unsigned int sectors_free;
+ BKEY_PADDED(key);
+ };
+
+@@ -556,7 +556,7 @@ struct open_bucket {
+ */
+ static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ const struct bkey *search,
+- unsigned write_point,
++ unsigned int write_point,
+ struct bkey *alloc)
+ {
+ struct open_bucket *ret, *ret_task = NULL;
+@@ -595,12 +595,16 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ *
+ * If s->writeback is true, will not fail.
+ */
+-bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+- unsigned write_point, unsigned write_prio, bool wait)
++bool bch_alloc_sectors(struct cache_set *c,
++ struct bkey *k,
++ unsigned int sectors,
++ unsigned int write_point,
++ unsigned int write_prio,
++ bool wait)
+ {
+ struct open_bucket *b;
+ BKEY_PADDED(key) alloc;
+- unsigned i;
++ unsigned int i;
+
+ /*
+ * We might have to allocate a new bucket, which we can't do with a
+@@ -613,7 +617,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+ spin_lock(&c->data_bucket_lock);
+
+ while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
+- unsigned watermark = write_prio
++ unsigned int watermark = write_prio
+ ? RESERVE_MOVINGGC
+ : RESERVE_NONE;
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index d6bf294f3907..dd134c36ae92 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -252,7 +252,7 @@ struct bcache_device {
+ struct kobject kobj;
+
+ struct cache_set *c;
+- unsigned id;
++ unsigned int id;
+ #define BCACHEDEVNAME_SIZE 12
+ char name[BCACHEDEVNAME_SIZE];
+
+@@ -264,18 +264,18 @@ struct bcache_device {
+ #define BCACHE_DEV_UNLINK_DONE 2
+ #define BCACHE_DEV_WB_RUNNING 3
+ #define BCACHE_DEV_RATE_DW_RUNNING 4
+- unsigned nr_stripes;
+- unsigned stripe_size;
++ unsigned int nr_stripes;
++ unsigned int stripe_size;
+ atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
+
+ struct bio_set bio_split;
+
+- unsigned data_csum:1;
++ unsigned int data_csum:1;
+
+ int (*cache_miss)(struct btree *, struct search *,
+- struct bio *, unsigned);
+- int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
++ struct bio *, unsigned int);
++ int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long);
+ };
+
+ struct io {
+@@ -284,7 +284,7 @@ struct io {
+ struct list_head lru;
+
+ unsigned long jiffies;
+- unsigned sequential;
++ unsigned int sequential;
+ sector_t last;
+ };
+
+@@ -365,18 +365,18 @@ struct cached_dev {
+ struct cache_accounting accounting;
+
+ /* The rest of this all shows up in sysfs */
+- unsigned sequential_cutoff;
+- unsigned readahead;
++ unsigned int sequential_cutoff;
++ unsigned int readahead;
+
+- unsigned io_disable:1;
+- unsigned verify:1;
+- unsigned bypass_torture_test:1;
++ unsigned int io_disable:1;
++ unsigned int verify:1;
++ unsigned int bypass_torture_test:1;
+
+- unsigned partial_stripes_expensive:1;
+- unsigned writeback_metadata:1;
+- unsigned writeback_running:1;
++ unsigned int partial_stripes_expensive:1;
++ unsigned int writeback_metadata:1;
++ unsigned int writeback_running:1;
+ unsigned char writeback_percent;
+- unsigned writeback_delay;
++ unsigned int writeback_delay;
+
+ uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+@@ -384,16 +384,16 @@ struct cached_dev {
+ int64_t writeback_rate_integral_scaled;
+ int32_t writeback_rate_change;
+
+- unsigned writeback_rate_update_seconds;
+- unsigned writeback_rate_i_term_inverse;
+- unsigned writeback_rate_p_term_inverse;
+- unsigned writeback_rate_minimum;
++ unsigned int writeback_rate_update_seconds;
++ unsigned int writeback_rate_i_term_inverse;
++ unsigned int writeback_rate_p_term_inverse;
++ unsigned int writeback_rate_minimum;
+
+ enum stop_on_failure stop_when_cache_set_failed;
+ #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
+ atomic_t io_errors;
+- unsigned error_limit;
+- unsigned offline_seconds;
++ unsigned int error_limit;
++ unsigned int offline_seconds;
+
+ char backing_dev_name[BDEVNAME_SIZE];
+ };
+@@ -454,7 +454,7 @@ struct cache {
+ * until a gc finishes - otherwise we could pointlessly burn a ton of
+ * cpu
+ */
+- unsigned invalidate_needs_gc;
++ unsigned int invalidate_needs_gc;
+
+ bool discard; /* Get rid of? */
+
+@@ -478,7 +478,7 @@ struct gc_stat {
+
+ size_t nkeys;
+ uint64_t data; /* sectors */
+- unsigned in_use; /* percent */
++ unsigned int in_use; /* percent */
+ };
+
+ /*
+@@ -522,7 +522,7 @@ struct cache_set {
+ int caches_loaded;
+
+ struct bcache_device **devices;
+- unsigned devices_max_used;
++ unsigned int devices_max_used;
+ struct list_head cached_devs;
+ uint64_t cached_dev_sectors;
+ struct closure caching;
+@@ -550,7 +550,7 @@ struct cache_set {
+ * Default number of pages for a new btree node - may be less than a
+ * full bucket
+ */
+- unsigned btree_pages;
++ unsigned int btree_pages;
+
+ /*
+ * Lists of struct btrees; lru is the list for structs that have memory
+@@ -573,7 +573,7 @@ struct cache_set {
+ struct list_head btree_cache_freed;
+
+ /* Number of elements in btree_cache + btree_cache_freeable lists */
+- unsigned btree_cache_used;
++ unsigned int btree_cache_used;
+
+ /*
+ * If we need to allocate memory for a new btree node and that
+@@ -647,7 +647,7 @@ struct cache_set {
+ struct mutex verify_lock;
+ #endif
+
+- unsigned nr_uuids;
++ unsigned int nr_uuids;
+ struct uuid_entry *uuids;
+ BKEY_PADDED(uuid_bucket);
+ struct closure uuid_write;
+@@ -668,12 +668,12 @@ struct cache_set {
+ struct journal journal;
+
+ #define CONGESTED_MAX 1024
+- unsigned congested_last_us;
++ unsigned int congested_last_us;
+ atomic_t congested;
+
+ /* The rest of this all shows up in sysfs */
+- unsigned congested_read_threshold_us;
+- unsigned congested_write_threshold_us;
++ unsigned int congested_read_threshold_us;
++ unsigned int congested_write_threshold_us;
+
+ struct time_stats btree_gc_time;
+ struct time_stats btree_split_time;
+@@ -692,16 +692,16 @@ struct cache_set {
+ ON_ERROR_PANIC,
+ } on_error;
+ #define DEFAULT_IO_ERROR_LIMIT 8
+- unsigned error_limit;
+- unsigned error_decay;
++ unsigned int error_limit;
++ unsigned int error_decay;
+
+ unsigned short journal_delay_ms;
+ bool expensive_debug_checks;
+- unsigned verify:1;
+- unsigned key_merging_disabled:1;
+- unsigned gc_always_rewrite:1;
+- unsigned shrinker_disabled:1;
+- unsigned copy_gc_enabled:1;
++ unsigned int verify:1;
++ unsigned int key_merging_disabled:1;
++ unsigned int gc_always_rewrite:1;
++ unsigned int shrinker_disabled:1;
++ unsigned int copy_gc_enabled:1;
+
+ #define BUCKET_HASH_BITS 12
+ struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
+@@ -710,7 +710,7 @@ struct cache_set {
+ };
+
+ struct bbio {
+- unsigned submit_time_us;
++ unsigned int submit_time_us;
+ union {
+ struct bkey key;
+ uint64_t _pad[3];
+@@ -727,10 +727,10 @@ struct bbio {
+
+ #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
+ #define btree_blocks(b) \
+- ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
++ ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
+
+ #define btree_default_blocks(c) \
+- ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
++ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+ #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
+ #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
+@@ -759,21 +759,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
+
+ static inline struct cache *PTR_CACHE(struct cache_set *c,
+ const struct bkey *k,
+- unsigned ptr)
++ unsigned int ptr)
+ {
+ return c->cache[PTR_DEV(k, ptr)];
+ }
+
+ static inline size_t PTR_BUCKET_NR(struct cache_set *c,
+ const struct bkey *k,
+- unsigned ptr)
++ unsigned int ptr)
+ {
+ return sector_to_bucket(c, PTR_OFFSET(k, ptr));
+ }
+
+ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
+ const struct bkey *k,
+- unsigned ptr)
++ unsigned int ptr)
+ {
+ return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
+ }
+@@ -785,13 +785,13 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b)
+ }
+
+ static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+- unsigned i)
++ unsigned int i)
+ {
+ return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
+ }
+
+ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+- unsigned i)
++ unsigned int i)
+ {
+ return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+ }
+@@ -886,7 +886,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
+ static inline void wake_up_allocators(struct cache_set *c)
+ {
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+
+ for_each_cache(ca, c, i)
+ wake_up_process(ca->alloc_thread);
+@@ -942,13 +942,13 @@ void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
+ void __bch_bucket_free(struct cache *, struct bucket *);
+ void bch_bucket_free(struct cache_set *, struct bkey *);
+
+-long bch_bucket_alloc(struct cache *, unsigned, bool);
+-int __bch_bucket_alloc_set(struct cache_set *, unsigned,
++long bch_bucket_alloc(struct cache *, unsigned int, bool);
++int __bch_bucket_alloc_set(struct cache_set *, unsigned int,
+ struct bkey *, int, bool);
+-int bch_bucket_alloc_set(struct cache_set *, unsigned,
++int bch_bucket_alloc_set(struct cache_set *, unsigned int,
+ struct bkey *, int, bool);
+-bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
+- unsigned, unsigned, bool);
++bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int,
++ unsigned int, unsigned int, bool);
+ bool bch_cached_dev_error(struct cached_dev *dc);
+
+ __printf(2, 3)
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index f3403b45bc28..e3576f279493 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -18,7 +18,7 @@
+
+ #ifdef CONFIG_BCACHE_DEBUG
+
+-void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
++void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
+ {
+ struct bkey *k, *next;
+
+@@ -26,7 +26,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+ next = bkey_next(k);
+
+ printk(KERN_ERR "block %u key %u/%u: ", set,
+- (unsigned) ((u64 *) k - i->d), i->keys);
++ (unsigned int) ((u64 *) k - i->d), i->keys);
+
+ if (b->ops->key_dump)
+ b->ops->key_dump(b, k);
+@@ -42,7 +42,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+
+ void bch_dump_bucket(struct btree_keys *b)
+ {
+- unsigned i;
++ unsigned int i;
+
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+@@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b)
+
+ int __bch_count_data(struct btree_keys *b)
+ {
+- unsigned ret = 0;
++ unsigned int ret = 0;
+ struct btree_iter iter;
+ struct bkey *k;
+
+@@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+ /* Keylists */
+
+-int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
++int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
+ {
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + u64s;
+@@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l)
+ /* Key/pointer manipulation */
+
+ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+- unsigned i)
++ unsigned int i)
+ {
+ BUG_ON(i > KEY_PTRS(src));
+
+@@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+
+ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
+ {
+- unsigned i, len = 0;
++ unsigned int i, len = 0;
+
+ if (bkey_cmp(where, &START_KEY(k)) <= 0)
+ return false;
+@@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
+
+ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
+ {
+- unsigned len = 0;
++ unsigned int len = 0;
+
+ if (bkey_cmp(where, k) >= 0)
+ return false;
+@@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
+ #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
+
+ struct bkey_float {
+- unsigned exponent:BKEY_EXPONENT_BITS;
+- unsigned m:BKEY_MID_BITS;
+- unsigned mantissa:BKEY_MANTISSA_BITS;
++ unsigned int exponent:BKEY_EXPONENT_BITS;
++ unsigned int m:BKEY_MID_BITS;
++ unsigned int mantissa:BKEY_MANTISSA_BITS;
+ } __packed;
+
+ /*
+@@ -311,7 +311,7 @@ void bch_btree_keys_free(struct btree_keys *b)
+ }
+ EXPORT_SYMBOL(bch_btree_keys_free);
+
+-int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
++int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, gfp_t gfp)
+ {
+ struct bset_tree *t = b->set;
+
+@@ -345,7 +345,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
+ void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks)
+ {
+- unsigned i;
++ unsigned int i;
+
+ b->ops = ops;
+ b->expensive_debug_checks = expensive_debug_checks;
+@@ -366,7 +366,7 @@ EXPORT_SYMBOL(bch_btree_keys_init);
+
+ /* Binary tree stuff for auxiliary search trees */
+
+-static unsigned inorder_next(unsigned j, unsigned size)
++static unsigned int inorder_next(unsigned int j, unsigned int size)
+ {
+ if (j * 2 + 1 < size) {
+ j = j * 2 + 1;
+@@ -379,7 +379,7 @@ static unsigned inorder_next(unsigned j, unsigned size)
+ return j;
+ }
+
+-static unsigned inorder_prev(unsigned j, unsigned size)
++static unsigned int inorder_prev(unsigned int j, unsigned int size)
+ {
+ if (j * 2 < size) {
+ j = j * 2;
+@@ -405,10 +405,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
+ * extra is a function of size:
+ * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+ */
+-static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
++static unsigned int __to_inorder(unsigned int j,
++ unsigned int size,
++ unsigned int extra)
+ {
+- unsigned b = fls(j);
+- unsigned shift = fls(size - 1) - b;
++ unsigned int b = fls(j);
++ unsigned int shift = fls(size - 1) - b;
+
+ j ^= 1U << (b - 1);
+ j <<= 1;
+@@ -421,14 +423,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
+ return j;
+ }
+
+-static unsigned to_inorder(unsigned j, struct bset_tree *t)
++static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
+ {
+ return __to_inorder(j, t->size, t->extra);
+ }
+
+-static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
++static unsigned int __inorder_to_tree(unsigned int j,
++ unsigned int size,
++ unsigned int extra)
+ {
+- unsigned shift;
++ unsigned int shift;
+
+ if (j > extra)
+ j += j - extra;
+@@ -441,7 +445,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
+ return j;
+ }
+
+-static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
++static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
+ {
+ return __inorder_to_tree(j, t->size, t->extra);
+ }
+@@ -452,11 +456,11 @@ void inorder_test(void)
+ unsigned long done = 0;
+ ktime_t start = ktime_get();
+
+- for (unsigned size = 2;
++ for (unsigned int size = 2;
+ size < 65536000;
+ size++) {
+- unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+- unsigned i = 1, j = rounddown_pow_of_two(size - 1);
++ unsigned int extra = (size - rounddown_pow_of_two(size - 1)) << 1;
++ unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
+
+ if (!(size % 4096))
+ printk(KERN_NOTICE "loop %u, %llu per us\n", size,
+@@ -502,30 +506,31 @@ void inorder_test(void)
+ * of the previous key so we can walk backwards to it from t->tree[j]'s key.
+ */
+
+-static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
+- unsigned offset)
++static struct bkey *cacheline_to_bkey(struct bset_tree *t,
++ unsigned int cacheline,
++ unsigned int offset)
+ {
+ return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
+ }
+
+-static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
++static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+ {
+ return ((void *) k - (void *) t->data) / BSET_CACHELINE;
+ }
+
+-static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+- unsigned cacheline,
++static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
++ unsigned int cacheline,
+ struct bkey *k)
+ {
+ return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
+ }
+
+-static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
++static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
+ {
+ return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
+ }
+
+-static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
++static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
+ {
+ return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
+ }
+@@ -534,7 +539,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
+ * For the write set - the one we're currently inserting keys into - we don't
+ * maintain a full search tree, we just keep a simple lookup table in t->prev.
+ */
+-static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
++static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
+ {
+ return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
+ }
+@@ -546,14 +551,14 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
+ return low;
+ }
+
+-static inline unsigned bfloat_mantissa(const struct bkey *k,
++static inline unsigned int bfloat_mantissa(const struct bkey *k,
+ struct bkey_float *f)
+ {
+ const uint64_t *p = &k->low - (f->exponent >> 6);
+ return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
+ }
+
+-static void make_bfloat(struct bset_tree *t, unsigned j)
++static void make_bfloat(struct bset_tree *t, unsigned int j)
+ {
+ struct bkey_float *f = &t->tree[j];
+ struct bkey *m = tree_to_bkey(t, j);
+@@ -591,7 +596,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
+ static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
+ {
+ if (t != b->set) {
+- unsigned j = roundup(t[-1].size,
++ unsigned int j = roundup(t[-1].size,
+ 64 / sizeof(struct bkey_float));
+
+ t->tree = t[-1].tree + j;
+@@ -637,13 +642,13 @@ void bch_bset_build_written_tree(struct btree_keys *b)
+ {
+ struct bset_tree *t = bset_tree_last(b);
+ struct bkey *prev = NULL, *k = t->data->start;
+- unsigned j, cacheline = 1;
++ unsigned int j, cacheline = 1;
+
+ b->last_set_unwritten = 0;
+
+ bset_alloc_tree(b, t);
+
+- t->size = min_t(unsigned,
++ t->size = min_t(unsigned int,
+ bkey_to_cacheline(t, bset_bkey_last(t->data)),
+ b->set->tree + btree_keys_cachelines(b) - t->tree);
+
+@@ -683,7 +688,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree);
+ void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
+ {
+ struct bset_tree *t;
+- unsigned inorder, j = 1;
++ unsigned int inorder, j = 1;
+
+ for (t = b->set; t <= bset_tree_last(b); t++)
+ if (k < bset_bkey_last(t->data))
+@@ -730,8 +735,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
+ struct bset_tree *t,
+ struct bkey *k)
+ {
+- unsigned shift = bkey_u64s(k);
+- unsigned j = bkey_to_cacheline(t, k);
++ unsigned int shift = bkey_u64s(k);
++ unsigned int j = bkey_to_cacheline(t, k);
+
+ /* We're getting called from btree_split() or btree_gc, just bail out */
+ if (!t->size)
+@@ -818,10 +823,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ }
+ EXPORT_SYMBOL(bch_bset_insert);
+
+-unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
++unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key)
+ {
+- unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
++ unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ struct bset *i = bset_tree_last(b)->data;
+ struct bkey *m, *prev = NULL;
+ struct btree_iter iter;
+@@ -873,10 +878,10 @@ struct bset_search_iter {
+ static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
+ const struct bkey *search)
+ {
+- unsigned li = 0, ri = t->size;
++ unsigned int li = 0, ri = t->size;
+
+ while (li + 1 != ri) {
+- unsigned m = (li + ri) >> 1;
++ unsigned int m = (li + ri) >> 1;
+
+ if (bkey_cmp(table_to_bkey(t, m), search) > 0)
+ ri = m;
+@@ -895,10 +900,10 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
+ {
+ struct bkey *l, *r;
+ struct bkey_float *f;
+- unsigned inorder, j, n = 1;
++ unsigned int inorder, j, n = 1;
+
+ do {
+- unsigned p = n << 4;
++ unsigned int p = n << 4;
+ p &= ((int) (p - t->size)) >> 31;
+
+ prefetch(&t->tree[p]);
+@@ -915,7 +920,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
+ * to work - that's done in make_bfloat()
+ */
+ if (likely(f->exponent != 127))
+- n = j * 2 + (((unsigned)
++ n = j * 2 + (((unsigned int)
+ (f->mantissa -
+ bfloat_mantissa(search, f))) >> 31);
+ else
+@@ -1121,7 +1126,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state)
+ mempool_exit(&state->pool);
+ }
+
+-int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
++int bch_bset_sort_state_init(struct bset_sort_state *state,
++ unsigned int page_order)
+ {
+ spin_lock_init(&state->time.lock);
+
+@@ -1174,7 +1180,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
+ }
+
+ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+- unsigned start, unsigned order, bool fixup,
++ unsigned int start, unsigned int order, bool fixup,
+ struct bset_sort_state *state)
+ {
+ uint64_t start_time;
+@@ -1225,7 +1231,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+ bch_time_stats_update(&state->time, start_time);
+ }
+
+-void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
++void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ struct bset_sort_state *state)
+ {
+ size_t order = b->page_order, keys = 0;
+@@ -1235,7 +1241,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+ __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
+
+ if (start) {
+- unsigned i;
++ unsigned int i;
+
+ for (i = start; i <= b->nsets; i++)
+ keys += b->set[i].data->keys;
+@@ -1275,7 +1281,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+
+ void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
+ {
+- unsigned crit = SORT_CRIT;
++ unsigned int crit = SORT_CRIT;
+ int i;
+
+ /* Don't sort if nothing to do */
+@@ -1304,7 +1310,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy);
+
+ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i <= b->nsets; i++) {
+ struct bset_tree *t = &b->set[i];
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index b867f2200495..fdc296103113 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -163,10 +163,10 @@ struct bset_tree {
+ */
+
+ /* size of the binary tree and prev array */
+- unsigned size;
++ unsigned int size;
+
+ /* function of size - precalculated for to_inorder() */
+- unsigned extra;
++ unsigned int extra;
+
+ /* copy of the last key in the set */
+ struct bkey end;
+@@ -211,7 +211,7 @@ struct btree_keys {
+ const struct btree_keys_ops *ops;
+ uint8_t page_order;
+ uint8_t nsets;
+- unsigned last_set_unwritten:1;
++ unsigned int last_set_unwritten:1;
+ bool *expensive_debug_checks;
+
+ /*
+@@ -239,12 +239,12 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+ return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+ }
+
+-static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
++static inline unsigned int bset_byte_offset(struct btree_keys *b, struct bset *i)
+ {
+ return ((size_t) i) - ((size_t) b->set->data);
+ }
+
+-static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
++static inline unsigned int bset_sector_offset(struct btree_keys *b, struct bset *i)
+ {
+ return bset_byte_offset(b, i) >> 9;
+ }
+@@ -273,7 +273,7 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+ }
+
+ static inline struct bset *bset_next_set(struct btree_keys *b,
+- unsigned block_bytes)
++ unsigned int block_bytes)
+ {
+ struct bset *i = bset_tree_last(b)->data;
+
+@@ -281,7 +281,7 @@ static inline struct bset *bset_next_set(struct btree_keys *b,
+ }
+
+ void bch_btree_keys_free(struct btree_keys *);
+-int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
++int bch_btree_keys_alloc(struct btree_keys *, unsigned int, gfp_t);
+ void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+ bool *);
+
+@@ -290,7 +290,7 @@ void bch_bset_build_written_tree(struct btree_keys *);
+ void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+ bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+ void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+-unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
++unsigned int bch_btree_insert_key(struct btree_keys *, struct bkey *,
+ struct bkey *);
+
+ enum {
+@@ -349,20 +349,20 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ struct bset_sort_state {
+ mempool_t pool;
+
+- unsigned page_order;
+- unsigned crit_factor;
++ unsigned int page_order;
++ unsigned int crit_factor;
+
+ struct time_stats time;
+ };
+
+ void bch_bset_sort_state_free(struct bset_sort_state *);
+-int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
++int bch_bset_sort_state_init(struct bset_sort_state *, unsigned int);
+ void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+ void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+ struct bset_sort_state *);
+ void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+ struct bset_sort_state *);
+-void bch_btree_sort_partial(struct btree_keys *, unsigned,
++void bch_btree_sort_partial(struct btree_keys *, unsigned int,
+ struct bset_sort_state *);
+
+ static inline void bch_btree_sort(struct btree_keys *b,
+@@ -383,7 +383,7 @@ void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+
+ #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
+
+-static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
++static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
+ {
+ return bkey_idx(i->start, idx);
+ }
+@@ -402,7 +402,7 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
+ }
+
+ void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+- unsigned);
++ unsigned int);
+ bool __bch_cut_front(const struct bkey *, struct bkey *);
+ bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+@@ -524,7 +524,7 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
+
+ struct bkey *bch_keylist_pop(struct keylist *);
+ void bch_keylist_pop_front(struct keylist *);
+-int __bch_keylist_realloc(struct keylist *, unsigned);
++int __bch_keylist_realloc(struct keylist *, unsigned int);
+
+ /* Debug stuff */
+
+@@ -532,7 +532,7 @@ int __bch_keylist_realloc(struct keylist *, unsigned);
+
+ int __bch_count_data(struct btree_keys *);
+ void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
+-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
++void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int);
+ void bch_dump_bucket(struct btree_keys *);
+
+ #else
+@@ -541,7 +541,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+ static inline void __printf(2, 3)
+ __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+ static inline void bch_dump_bucket(struct btree_keys *b) {}
+-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
++void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int);
+
+ #endif
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 547c9eedc2f4..a80fd1106ed0 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -180,7 +180,7 @@ static void bch_btree_init_next(struct btree *b)
+
+ void bkey_put(struct cache_set *c, struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i))
+@@ -476,7 +476,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+
+ void bch_btree_node_write(struct btree *b, struct closure *parent)
+ {
+- unsigned nsets = b->keys.nsets;
++ unsigned int nsets = b->keys.nsets;
+
+ lockdep_assert_held(&b->lock);
+
+@@ -578,7 +578,7 @@ static void mca_bucket_free(struct btree *b)
+ list_move(&b->list, &b->c->btree_cache_freeable);
+ }
+
+-static unsigned btree_order(struct bkey *k)
++static unsigned int btree_order(struct bkey *k)
+ {
+ return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
+ }
+@@ -586,7 +586,7 @@ static unsigned btree_order(struct bkey *k)
+ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
+ {
+ if (!bch_btree_keys_alloc(&b->keys,
+- max_t(unsigned,
++ max_t(unsigned int,
+ ilog2(b->c->btree_pages),
+ btree_order(k)),
+ gfp)) {
+@@ -617,7 +617,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
+ return b;
+ }
+
+-static int mca_reap(struct btree *b, unsigned min_order, bool flush)
++static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
+ {
+ struct closure cl;
+
+@@ -783,7 +783,7 @@ void bch_btree_cache_free(struct cache_set *c)
+
+ int bch_btree_cache_alloc(struct cache_set *c)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < mca_reserve(c); i++)
+ if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
+@@ -1133,7 +1133,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
+
+ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+
+ mutex_lock(&b->c->bucket_lock);
+
+@@ -1154,7 +1154,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
+ {
+ struct cache_set *c = b->c;
+ struct cache *ca;
+- unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
++ unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
+
+ mutex_lock(&c->bucket_lock);
+
+@@ -1178,7 +1178,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
+ struct bkey *k)
+ {
+ uint8_t stale = 0;
+- unsigned i;
++ unsigned int i;
+ struct bucket *g;
+
+ /*
+@@ -1216,7 +1216,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
+ SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
+
+ /* guard against overflow */
+- SET_GC_SECTORS_USED(g, min_t(unsigned,
++ SET_GC_SECTORS_USED(g, min_t(unsigned int,
+ GC_SECTORS_USED(g) + KEY_SIZE(k),
+ MAX_GC_SECTORS_USED));
+
+@@ -1230,7 +1230,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
+
+ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+@@ -1256,7 +1256,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
+ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+ {
+ uint8_t stale = 0;
+- unsigned keys = 0, good_keys = 0;
++ unsigned int keys = 0, good_keys = 0;
+ struct bkey *k;
+ struct btree_iter iter;
+ struct bset_tree *t;
+@@ -1299,7 +1299,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+
+ struct gc_merge_info {
+ struct btree *b;
+- unsigned keys;
++ unsigned int keys;
+ };
+
+ static int bch_btree_insert_node(struct btree *, struct btree_op *,
+@@ -1308,7 +1308,7 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *,
+ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ struct gc_stat *gc, struct gc_merge_info *r)
+ {
+- unsigned i, nodes = 0, keys = 0, blocks;
++ unsigned int i, nodes = 0, keys = 0, blocks;
+ struct btree *new_nodes[GC_MERGE_NODES];
+ struct keylist keylist;
+ struct closure cl;
+@@ -1508,11 +1508,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ return -EINTR;
+ }
+
+-static unsigned btree_gc_count_keys(struct btree *b)
++static unsigned int btree_gc_count_keys(struct btree *b)
+ {
+ struct bkey *k;
+ struct btree_iter iter;
+- unsigned ret = 0;
++ unsigned int ret = 0;
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ ret += bkey_u64s(k);
+@@ -1642,7 +1642,7 @@ static void btree_gc_start(struct cache_set *c)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned i;
++ unsigned int i;
+
+ if (!c->gc_mark_valid)
+ return;
+@@ -1668,7 +1668,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ {
+ struct bucket *b;
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+
+ mutex_lock(&c->bucket_lock);
+
+@@ -1686,7 +1686,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+- unsigned j;
++ unsigned int j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+@@ -1775,7 +1775,7 @@ static void bch_btree_gc(struct cache_set *c)
+ static bool gc_should_run(struct cache_set *c)
+ {
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc)
+@@ -1860,7 +1860,7 @@ void bch_initial_gc_finish(struct cache_set *c)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned i;
++ unsigned int i;
+
+ bch_btree_gc_finish(c);
+
+@@ -1900,7 +1900,7 @@ void bch_initial_gc_finish(struct cache_set *c)
+ static bool btree_insert_key(struct btree *b, struct bkey *k,
+ struct bkey *replace_key)
+ {
+- unsigned status;
++ unsigned int status;
+
+ BUG_ON(bkey_cmp(k, &b->key) > 0);
+
+@@ -1999,7 +1999,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
+ block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
+
+ if (split) {
+- unsigned keys = 0;
++ unsigned int keys = 0;
+
+ trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
+
+@@ -2255,7 +2255,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+
+ void bch_btree_set_root(struct btree *b)
+ {
+- unsigned i;
++ unsigned int i;
+ struct closure cl;
+
+ closure_init_stack(&cl);
+@@ -2367,7 +2367,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
+
+ struct refill {
+ struct btree_op op;
+- unsigned nr_found;
++ unsigned int nr_found;
+ struct keybuf *buf;
+ struct bkey *end;
+ keybuf_pred_fn *pred;
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index d211e2c25b6b..398d81a45cf6 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
+ return bset_tree_last(&b->keys)->data;
+ }
+
+-static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
++static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
+ {
+ return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
+ }
+@@ -213,7 +213,7 @@ struct btree_op {
+ /* Btree level at which we start taking write locks */
+ short lock;
+
+- unsigned insert_collision:1;
++ unsigned int insert_collision:1;
+ };
+
+ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
+index 71427eb5fdae..07c631f1b7c7 100644
+--- a/drivers/md/bcache/closure.h
++++ b/drivers/md/bcache/closure.h
+@@ -159,7 +159,7 @@ struct closure {
+ #define CLOSURE_MAGIC_DEAD 0xc054dead
+ #define CLOSURE_MAGIC_ALIVE 0xc054a11e
+
+- unsigned magic;
++ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index d030ce3025a6..e0fb31bba346 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -69,7 +69,7 @@ void bch_btree_verify(struct btree *b)
+ sorted->start,
+ (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+ struct bset *i;
+- unsigned j;
++ unsigned int j;
+
+ console_lock();
+
+@@ -80,7 +80,7 @@ void bch_btree_verify(struct btree *b)
+ bch_dump_bset(&v->keys, sorted, 0);
+
+ for_each_written_bset(b, ondisk, i) {
+- unsigned block = ((void *) i - (void *) ondisk) /
++ unsigned int block = ((void *) i - (void *) ondisk) /
+ block_bytes(b->c);
+
+ printk(KERN_ERR "*** on disk block %u:\n", block);
+@@ -172,7 +172,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
+
+ while (size) {
+ struct keybuf_key *w;
+- unsigned bytes = min(i->bytes, size);
++ unsigned int bytes = min(i->bytes, size);
+
+ int err = copy_to_user(buf, i->buf, bytes);
+ if (err)
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index 1d096742eb41..e96ba928eeb6 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
+
+ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+
+ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+
+ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+ {
+- unsigned i = 0;
++ unsigned int i = 0;
+ char *out = buf, *end = buf + size;
+
+ #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+@@ -126,7 +126,7 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+ {
+ struct btree *b = container_of(keys, struct btree, keys);
+- unsigned j;
++ unsigned int j;
+ char buf[80];
+
+ bch_extent_to_text(buf, sizeof(buf), k);
+@@ -171,7 +171,7 @@ static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+
+ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+ {
+- unsigned i;
++ unsigned int i;
+ char buf[80];
+ struct bucket *g;
+
+@@ -204,7 +204,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+ static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+ {
+ struct btree *b = container_of(bk, struct btree, keys);
+- unsigned i;
++ unsigned int i;
+
+ if (!bkey_cmp(k, &ZERO_KEY) ||
+ !KEY_PTRS(k) ||
+@@ -327,7 +327,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
+ struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+ uint64_t old_offset;
+- unsigned old_size, sectors_found = 0;
++ unsigned int old_size, sectors_found = 0;
+
+ BUG_ON(!KEY_OFFSET(insert));
+ BUG_ON(!KEY_SIZE(insert));
+@@ -363,7 +363,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
+ * k might have been split since we inserted/found the
+ * key we're replacing
+ */
+- unsigned i;
++ unsigned int i;
+ uint64_t offset = KEY_START(k) -
+ KEY_START(replace_key);
+
+@@ -502,7 +502,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+ }
+
+ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+- unsigned ptr)
++ unsigned int ptr)
+ {
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+@@ -534,7 +534,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+ {
+ struct btree *b = container_of(bk, struct btree, keys);
+- unsigned i, stale;
++ unsigned int i, stale;
+
+ if (!KEY_PTRS(k) ||
+ bch_extent_invalid(bk, k))
+@@ -577,7 +577,7 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+ {
+ struct btree *b = container_of(bk, struct btree, keys);
+- unsigned i;
++ unsigned int i;
+
+ if (key_merging_disabled(b->c))
+ return false;
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index 9612873afee2..c6b41a09f550 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -42,7 +42,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
+ }
+
+ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+- struct bkey *k, unsigned ptr)
++ struct bkey *k, unsigned int ptr)
+ {
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ bch_bkey_copy_single_ptr(&b->key, k, ptr);
+@@ -52,7 +52,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+ /* IO errors */
+ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
+ {
+- unsigned errors;
++ unsigned int errors;
+
+ WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
+
+@@ -75,12 +75,12 @@ void bch_count_io_errors(struct cache *ca,
+ */
+
+ if (ca->set->error_decay) {
+- unsigned count = atomic_inc_return(&ca->io_count);
++ unsigned int count = atomic_inc_return(&ca->io_count);
+
+ while (count > ca->set->error_decay) {
+- unsigned errors;
+- unsigned old = count;
+- unsigned new = count - ca->set->error_decay;
++ unsigned int errors;
++ unsigned int old = count;
++ unsigned int new = count - ca->set->error_decay;
+
+ /*
+ * First we subtract refresh from count; each time we
+@@ -104,7 +104,7 @@ void bch_count_io_errors(struct cache *ca,
+ }
+
+ if (error) {
+- unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
++ unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
+ &ca->io_errors);
+ errors >>= IO_ERROR_SHIFT;
+
+@@ -126,12 +126,12 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+ struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
+
+- unsigned threshold = op_is_write(bio_op(bio))
++ unsigned int threshold = op_is_write(bio_op(bio))
+ ? c->congested_write_threshold_us
+ : c->congested_read_threshold_us;
+
+ if (threshold) {
+- unsigned t = local_clock_us();
++ unsigned int t = local_clock_us();
+
+ int us = t - b->submit_time_us;
+ int congested = atomic_read(&c->congested);
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 18f1b5239620..9c6201109e14 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -32,7 +32,7 @@ static void journal_read_endio(struct bio *bio)
+ }
+
+ static int journal_read_bucket(struct cache *ca, struct list_head *list,
+- unsigned bucket_index)
++ unsigned int bucket_index)
+ {
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->bio;
+@@ -40,7 +40,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
+ struct journal_replay *i;
+ struct jset *j, *data = ca->set->journal.w[0].data;
+ struct closure cl;
+- unsigned len, left, offset = 0;
++ unsigned int len, left, offset = 0;
+ int ret = 0;
+ sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+
+@@ -50,7 +50,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
+
+ while (offset < ca->sb.bucket_size) {
+ reread: left = ca->sb.bucket_size - offset;
+- len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
++ len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
+
+ bio_reset(bio);
+ bio->bi_iter.bi_sector = bucket + offset;
+@@ -154,12 +154,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
+ })
+
+ struct cache *ca;
+- unsigned iter;
++ unsigned int iter;
+
+ for_each_cache(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
+- unsigned i, l, r, m;
++ unsigned int i, l, r, m;
+ uint64_t seq;
+
+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+@@ -304,7 +304,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
+ k < bset_bkey_last(&i->j);
+ k = bkey_next(k))
+ if (!__bch_extent_invalid(c, k)) {
+- unsigned j;
++ unsigned int j;
+
+ for (j = 0; j < KEY_PTRS(k); j++)
+ if (ptr_available(c, k, j))
+@@ -492,7 +492,7 @@ static void journal_reclaim(struct cache_set *c)
+ struct bkey *k = &c->journal.key;
+ struct cache *ca;
+ uint64_t last_seq;
+- unsigned iter, n = 0;
++ unsigned int iter, n = 0;
+ atomic_t p __maybe_unused;
+
+ atomic_long_inc(&c->reclaim);
+@@ -526,7 +526,7 @@ static void journal_reclaim(struct cache_set *c)
+
+ for_each_cache(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+- unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
++ unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+
+ /* No space available on this device */
+ if (next == ja->discard_idx)
+@@ -609,7 +609,7 @@ static void journal_write_unlocked(struct closure *cl)
+ struct cache *ca;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+- unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
++ unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
+ c->sb.block_size;
+
+ struct bio *bio;
+@@ -705,7 +705,7 @@ static void journal_try_write(struct cache_set *c)
+ }
+
+ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+- unsigned nkeys)
++ unsigned int nkeys)
+ __acquires(&c->journal.lock)
+ {
+ size_t sectors;
+diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
+index b5788199188f..f0982731ae20 100644
+--- a/drivers/md/bcache/journal.h
++++ b/drivers/md/bcache/journal.h
+@@ -110,7 +110,7 @@ struct journal {
+ struct delayed_work work;
+
+ /* Number of blocks free in the bucket(s) we're currently writing to */
+- unsigned blocks_free;
++ unsigned int blocks_free;
+ uint64_t seq;
+ DECLARE_FIFO(atomic_t, pin);
+
+@@ -131,13 +131,13 @@ struct journal_device {
+ uint64_t seq[SB_JOURNAL_BUCKETS];
+
+ /* Journal bucket we're currently writing to */
+- unsigned cur_idx;
++ unsigned int cur_idx;
+
+ /* Last journal bucket that still contains an open journal entry */
+- unsigned last_idx;
++ unsigned int last_idx;
+
+ /* Next journal bucket to be discarded */
+- unsigned discard_idx;
++ unsigned int discard_idx;
+
+ #define DISCARD_READY 0
+ #define DISCARD_IN_FLIGHT 1
+diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
+index a24c3a95b2c0..0790d710f911 100644
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
+ {
+ struct cache_set *c = container_of(buf, struct cache_set,
+ moving_gc_keys);
+- unsigned i;
++ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+@@ -186,7 +186,7 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
+ return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+ }
+
+-static unsigned bucket_heap_top(struct cache *ca)
++static unsigned int bucket_heap_top(struct cache *ca)
+ {
+ struct bucket *b;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
+@@ -196,7 +196,7 @@ void bch_moving_gc(struct cache_set *c)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned i;
++ unsigned int i;
+
+ if (!c->copy_gc_enabled)
+ return;
+@@ -204,9 +204,9 @@ void bch_moving_gc(struct cache_set *c)
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i) {
+- unsigned sectors_to_move = 0;
+- unsigned reserve_sectors = ca->sb.bucket_size *
+- fifo_used(&ca->free[RESERVE_MOVINGGC]);
++ unsigned int sectors_to_move = 0;
++ unsigned int reserve_sectors = ca->sb.bucket_size *
++ fifo_used(&ca->free[RESERVE_MOVINGGC]);
+
+ ca->heap.used = 0;
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index ae67f5fa8047..7769af040ec0 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -27,7 +27,7 @@ struct kmem_cache *bch_search_cache;
+
+ static void bch_data_insert_start(struct closure *);
+
+-static unsigned cache_mode(struct cached_dev *dc)
++static unsigned int cache_mode(struct cached_dev *dc)
+ {
+ return BDEV_CACHE_MODE(&dc->sb);
+ }
+@@ -98,7 +98,7 @@ static void bch_data_insert_keys(struct closure *cl)
+ closure_return(cl);
+ }
+
+-static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
++static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
+ struct cache_set *c)
+ {
+ size_t oldsize = bch_keylist_nkeys(l);
+@@ -125,7 +125,7 @@ static void bch_data_invalidate(struct closure *cl)
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
+
+ while (bio_sectors(bio)) {
+- unsigned sectors = min(bio_sectors(bio),
++ unsigned int sectors = min(bio_sectors(bio),
+ 1U << (KEY_SIZE_BITS - 1));
+
+ if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
+@@ -211,7 +211,7 @@ static void bch_data_insert_start(struct closure *cl)
+ bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
+
+ do {
+- unsigned i;
++ unsigned int i;
+ struct bkey *k;
+ struct bio_set *split = &op->c->bio_split;
+
+@@ -328,7 +328,7 @@ void bch_data_insert(struct closure *cl)
+
+ /* Congested? */
+
+-unsigned bch_get_congested(struct cache_set *c)
++unsigned int bch_get_congested(struct cache_set *c)
+ {
+ int i;
+ long rand;
+@@ -372,8 +372,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
+ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+ {
+ struct cache_set *c = dc->disk.c;
+- unsigned mode = cache_mode(dc);
+- unsigned sectors, congested = bch_get_congested(c);
++ unsigned int mode = cache_mode(dc);
++ unsigned int sectors, congested = bch_get_congested(c);
+ struct task_struct *task = current;
+ struct io *i;
+
+@@ -469,11 +469,11 @@ struct search {
+ struct bio *cache_miss;
+ struct bcache_device *d;
+
+- unsigned insert_bio_sectors;
+- unsigned recoverable:1;
+- unsigned write:1;
+- unsigned read_dirty_data:1;
+- unsigned cache_missed:1;
++ unsigned int insert_bio_sectors;
++ unsigned int recoverable:1;
++ unsigned int write:1;
++ unsigned int read_dirty_data:1;
++ unsigned int cache_missed:1;
+
+ unsigned long start_time;
+
+@@ -514,15 +514,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+ struct search *s = container_of(op, struct search, op);
+ struct bio *n, *bio = &s->bio.bio;
+ struct bkey *bio_key;
+- unsigned ptr;
++ unsigned int ptr;
+
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
+ return MAP_CONTINUE;
+
+ if (KEY_INODE(k) != s->iop.inode ||
+ KEY_START(k) > bio->bi_iter.bi_sector) {
+- unsigned bio_sectors = bio_sectors(bio);
+- unsigned sectors = KEY_INODE(k) == s->iop.inode
++ unsigned int bio_sectors = bio_sectors(bio);
++ unsigned int sectors = KEY_INODE(k) == s->iop.inode
+ ? min_t(uint64_t, INT_MAX,
+ KEY_START(k) - bio->bi_iter.bi_sector)
+ : INT_MAX;
+@@ -854,10 +854,10 @@ static void cached_dev_read_done_bh(struct closure *cl)
+ }
+
+ static int cached_dev_cache_miss(struct btree *b, struct search *s,
+- struct bio *bio, unsigned sectors)
++ struct bio *bio, unsigned int sectors)
+ {
+ int ret = MAP_CONTINUE;
+- unsigned reada = 0;
++ unsigned int reada = 0;
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct bio *miss, *cache_bio;
+
+@@ -1170,7 +1170,7 @@ static int cached_dev_congested(void *data, int bits)
+ return 1;
+
+ if (cached_dev_get(dc)) {
+- unsigned i;
++ unsigned int i;
+ struct cache *ca;
+
+ for_each_cache(ca, d->c, i) {
+@@ -1197,9 +1197,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
+ /* Flash backed devices */
+
+ static int flash_dev_cache_miss(struct btree *b, struct search *s,
+- struct bio *bio, unsigned sectors)
++ struct bio *bio, unsigned int sectors)
+ {
+- unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
++ unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
+
+ swap(bio->bi_iter.bi_size, bytes);
+ zero_fill_bio(bio);
+@@ -1283,7 +1283,7 @@ static int flash_dev_congested(void *data, int bits)
+ struct bcache_device *d = data;
+ struct request_queue *q;
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+ int ret = 0;
+
+ for_each_cache(ca, d->c, i) {
+diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
+index dea0886b81c1..8e8c1ce00d9d 100644
+--- a/drivers/md/bcache/request.h
++++ b/drivers/md/bcache/request.h
+@@ -8,7 +8,7 @@ struct data_insert_op {
+ struct bio *bio;
+ struct workqueue_struct *wq;
+
+- unsigned inode;
++ unsigned int inode;
+ uint16_t write_point;
+ uint16_t write_prio;
+ blk_status_t status;
+@@ -17,15 +17,15 @@ struct data_insert_op {
+ uint16_t flags;
+
+ struct {
+- unsigned bypass:1;
+- unsigned writeback:1;
+- unsigned flush_journal:1;
+- unsigned csum:1;
++ unsigned int bypass:1;
++ unsigned int writeback:1;
++ unsigned int flush_journal:1;
++ unsigned int csum:1;
+
+- unsigned replace:1;
+- unsigned replace_collision:1;
++ unsigned int replace:1;
++ unsigned int replace_collision:1;
+
+- unsigned insert_data_done:1;
++ unsigned int insert_data_done:1;
+ };
+ };
+
+@@ -33,7 +33,7 @@ struct data_insert_op {
+ BKEY_PADDED(replace_key);
+ };
+
+-unsigned bch_get_congested(struct cache_set *);
++unsigned int bch_get_congested(struct cache_set *);
+ void bch_data_insert(struct closure *cl);
+
+ void bch_cached_dev_request_init(struct cached_dev *dc);
+diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
+index be119326297b..2331a0d5aa28 100644
+--- a/drivers/md/bcache/stats.c
++++ b/drivers/md/bcache/stats.c
+@@ -33,11 +33,11 @@
+ * stored left shifted by 16, and scaled back in the sysfs show() function.
+ */
+
+-static const unsigned DAY_RESCALE = 288;
+-static const unsigned HOUR_RESCALE = 12;
+-static const unsigned FIVE_MINUTE_RESCALE = 1;
+-static const unsigned accounting_delay = (HZ * 300) / 22;
+-static const unsigned accounting_weight = 32;
++static const unsigned int DAY_RESCALE = 288;
++static const unsigned int HOUR_RESCALE = 12;
++static const unsigned int FIVE_MINUTE_RESCALE = 1;
++static const unsigned int accounting_delay = (HZ * 300) / 22;
++static const unsigned int accounting_weight = 32;
+
+ /* sysfs reading/writing */
+
+@@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t)
+ struct cache_accounting *acc = from_timer(acc, t, timer);
+
+ #define move_stat(name) do { \
+- unsigned t = atomic_xchg(&acc->collector.name, 0); \
++ unsigned int t = atomic_xchg(&acc->collector.name, 0); \
+ t <<= 16; \
+ acc->five_minute.name += t; \
+ acc->hour.name += t; \
+diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
+index 0b70f9de0c03..77234a89dd69 100644
+--- a/drivers/md/bcache/stats.h
++++ b/drivers/md/bcache/stats.h
+@@ -23,7 +23,7 @@ struct cache_stats {
+ unsigned long cache_miss_collisions;
+ unsigned long sectors_bypassed;
+
+- unsigned rescale;
++ unsigned int rescale;
+ };
+
+ struct cache_accounting {
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index fa4058e43202..6a59fc47e0fe 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -61,7 +61,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
+ const char *err;
+ struct cache_sb *s;
+ struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+- unsigned i;
++ unsigned int i;
+
+ if (!bh)
+ return "IO error";
+@@ -202,7 +202,7 @@ static void write_bdev_super_endio(struct bio *bio)
+ static void __write_super(struct cache_sb *sb, struct bio *bio)
+ {
+ struct cache_sb *out = page_address(bio_first_page_all(bio));
+- unsigned i;
++ unsigned int i;
+
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ bio->bi_iter.bi_size = SB_SIZE;
+@@ -282,7 +282,7 @@ void bcache_write_super(struct cache_set *c)
+ {
+ struct closure *cl = &c->sb_write;
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
+@@ -334,7 +334,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
+ {
+ struct closure *cl = &c->uuid_write;
+ struct uuid_entry *u;
+- unsigned i;
++ unsigned int i;
+ char buf[80];
+
+ BUG_ON(!parent);
+@@ -587,7 +587,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
+ struct prio_set *p = ca->disk_buckets;
+ struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
+ struct bucket *b;
+- unsigned bucket_nr = 0;
++ unsigned int bucket_nr = 0;
+
+ for (b = ca->buckets;
+ b < ca->buckets + ca->sb.nbuckets;
+@@ -662,7 +662,7 @@ static void bcache_device_unlink(struct bcache_device *d)
+ lockdep_assert_held(&bch_register_lock);
+
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+- unsigned i;
++ unsigned int i;
+ struct cache *ca;
+
+ sysfs_remove_link(&d->c->kobj, d->name);
+@@ -676,7 +676,7 @@ static void bcache_device_unlink(struct bcache_device *d)
+ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ const char *name)
+ {
+- unsigned i;
++ unsigned int i;
+ struct cache *ca;
+
+ for_each_cache(ca, d->c, i)
+@@ -713,7 +713,7 @@ static void bcache_device_detach(struct bcache_device *d)
+ }
+
+ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
+- unsigned id)
++ unsigned int id)
+ {
+ d->id = id;
+ d->c = c;
+@@ -760,7 +760,7 @@ static void bcache_device_free(struct bcache_device *d)
+ closure_debug_destroy(&d->cl);
+ }
+
+-static int bcache_device_init(struct bcache_device *d, unsigned block_size,
++static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
+ sector_t sectors)
+ {
+ struct request_queue *q;
+@@ -776,7 +776,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
+
+ if (!d->nr_stripes || d->nr_stripes > max_stripes) {
+ pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+- (unsigned)d->nr_stripes);
++ (unsigned int)d->nr_stripes);
+ return -ENOMEM;
+ }
+
+@@ -1203,7 +1203,7 @@ static void cached_dev_flush(struct closure *cl)
+ continue_at(cl, cached_dev_free, system_wq);
+ }
+
+-static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
++static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
+ {
+ int ret;
+ struct io *io;
+@@ -1477,7 +1477,7 @@ static void cache_set_free(struct closure *cl)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, cl);
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+
+ if (!IS_ERR_OR_NULL(c->debug))
+ debugfs_remove(c->debug);
+@@ -1520,7 +1520,7 @@ static void cache_set_flush(struct closure *cl)
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+ struct cache *ca;
+ struct btree *b;
+- unsigned i;
++ unsigned int i;
+
+ bch_cache_accounting_destroy(&c->accounting);
+
+@@ -1749,7 +1749,7 @@ static void run_cache_set(struct cache_set *c)
+ struct cached_dev *dc, *t;
+ struct cache *ca;
+ struct closure cl;
+- unsigned i;
++ unsigned int i;
+
+ closure_init_stack(&cl);
+
+@@ -1840,7 +1840,7 @@ static void run_cache_set(struct cache_set *c)
+ pr_notice("invalidating existing data");
+
+ for_each_cache(ca, c, i) {
+- unsigned j;
++ unsigned int j;
+
+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+ 2, SB_JOURNAL_BUCKETS);
+@@ -1985,7 +1985,7 @@ static const char *register_cache_set(struct cache *ca)
+ void bch_cache_release(struct kobject *kobj)
+ {
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+- unsigned i;
++ unsigned int i;
+
+ if (ca->set) {
+ BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
+@@ -2137,7 +2137,7 @@ static bool bch_is_open_backing(struct block_device *bdev) {
+ static bool bch_is_open_cache(struct block_device *bdev) {
+ struct cache_set *c, *tc;
+ struct cache *ca;
+- unsigned i;
++ unsigned int i;
+
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ for_each_cache(ca, c, i)
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 225b15aa0340..5fabee253448 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -287,7 +287,7 @@ STORE(__cached_dev)
+ if (v < 0)
+ return v;
+
+- if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
++ if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
+ SET_BDEV_CACHE_MODE(&dc->sb, v);
+ bch_write_bdev_super(dc, NULL);
+ }
+@@ -513,9 +513,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
+ op.stats.floats, op.stats.failed);
+ }
+
+-static unsigned bch_root_usage(struct cache_set *c)
++static unsigned int bch_root_usage(struct cache_set *c)
+ {
+- unsigned bytes = 0;
++ unsigned int bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+ struct btree_iter iter;
+@@ -550,9 +550,9 @@ static size_t bch_cache_size(struct cache_set *c)
+ return ret;
+ }
+
+-static unsigned bch_cache_max_chain(struct cache_set *c)
++static unsigned int bch_cache_max_chain(struct cache_set *c)
+ {
+- unsigned ret = 0;
++ unsigned int ret = 0;
+ struct hlist_head *h;
+
+ mutex_lock(&c->bucket_lock);
+@@ -560,7 +560,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
+ for (h = c->bucket_hash;
+ h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+ h++) {
+- unsigned i = 0;
++ unsigned int i = 0;
+ struct hlist_node *p;
+
+ hlist_for_each(p, h)
+@@ -573,13 +573,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
+ return ret;
+ }
+
+-static unsigned bch_btree_used(struct cache_set *c)
++static unsigned int bch_btree_used(struct cache_set *c)
+ {
+ return div64_u64(c->gc_stats.key_bytes * 100,
+ (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+ }
+
+-static unsigned bch_average_key_size(struct cache_set *c)
++static unsigned int bch_average_key_size(struct cache_set *c)
+ {
+ return c->gc_stats.nkeys
+ ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+@@ -976,7 +976,7 @@ STORE(__bch_cache)
+ if (v < 0)
+ return v;
+
+- if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
++ if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
+ mutex_lock(&ca->set->bucket_lock);
+ SET_CACHE_REPLACEMENT(&ca->sb, v);
+ mutex_unlock(&ca->set->bucket_lock);
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index cced87f8eb27..9486104fe304 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -347,7 +347,7 @@ static inline int bch_strtoul_h(const char *cp, long *res)
+ snprintf(buf, size, \
+ __builtin_types_compatible_p(typeof(var), int) \
+ ? "%i\n" : \
+- __builtin_types_compatible_p(typeof(var), unsigned) \
++ __builtin_types_compatible_p(typeof(var), unsigned int) \
+ ? "%u\n" : \
+ __builtin_types_compatible_p(typeof(var), long) \
+ ? "%li\n" : \
+@@ -379,7 +379,7 @@ struct time_stats {
+
+ void bch_time_stats_update(struct time_stats *stats, uint64_t time);
+
+-static inline unsigned local_clock_us(void)
++static inline unsigned int local_clock_us(void)
+ {
+ return local_clock() >> 10;
+ }
+@@ -543,9 +543,10 @@ dup: \
+ container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
+
+ /* Does linear interpolation between powers of two */
+-static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
++static inline unsigned int fract_exp_two(unsigned int x,
++ unsigned int fract_bits)
+ {
+- unsigned fract = x & ~(~0 << fract_bits);
++ unsigned int fract = x & ~(~0 << fract_bits);
+
+ x >>= fract_bits;
+ x = 1 << x;
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index ad45ebe1a74b..91ceb32f7525 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -163,7 +163,8 @@ static void update_writeback_rate(struct work_struct *work)
+ smp_mb();
+ }
+
+-static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
++static unsigned int writeback_delay(struct cached_dev *dc,
++ unsigned int sectors)
+ {
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ !dc->writeback_percent)
+@@ -211,7 +212,7 @@ static void write_dirty_finish(struct closure *cl)
+ /* This is kind of a dumb way of signalling errors. */
+ if (KEY_DIRTY(&w->key)) {
+ int ret;
+- unsigned i;
++ unsigned int i;
+ struct keylist keys;
+
+ bch_keylist_init(&keys);
+@@ -325,7 +326,7 @@ static void read_dirty_submit(struct closure *cl)
+
+ static void read_dirty(struct cached_dev *dc)
+ {
+- unsigned delay = 0;
++ unsigned int delay = 0;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
+@@ -467,11 +468,11 @@ static void read_dirty(struct cached_dev *dc)
+
+ /* Scan for dirty data */
+
+-void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
++void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
+ uint64_t offset, int nr_sectors)
+ {
+ struct bcache_device *d = c->devices[inode];
+- unsigned stripe_offset, stripe, sectors_dirty;
++ unsigned int stripe_offset, stripe, sectors_dirty;
+
+ if (!d)
+ return;
+@@ -480,7 +481,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ stripe_offset = offset & (d->stripe_size - 1);
+
+ while (nr_sectors) {
+- int s = min_t(unsigned, abs(nr_sectors),
++ int s = min_t(unsigned int, abs(nr_sectors),
+ d->stripe_size - stripe_offset);
+
+ if (nr_sectors < 0)
+@@ -514,7 +515,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ static void refill_full_stripes(struct cached_dev *dc)
+ {
+ struct keybuf *buf = &dc->writeback_keys;
+- unsigned start_stripe, stripe, next_stripe;
++ unsigned int start_stripe, stripe, next_stripe;
+ bool wrapped = false;
+
+ stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+@@ -654,7 +655,7 @@ static int bch_writeback_thread(void *arg)
+ read_dirty(dc);
+
+ if (searched_full_index) {
+- unsigned delay = dc->writeback_delay * HZ;
++ unsigned int delay = dc->writeback_delay * HZ;
+
+ while (delay &&
+ !kthread_should_stop() &&
+@@ -676,7 +677,7 @@ static int bch_writeback_thread(void *arg)
+
+ struct sectors_dirty_init {
+ struct btree_op op;
+- unsigned inode;
++ unsigned int inode;
+ };
+
+ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 610fb01de629..276696482c80 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -47,7 +47,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
+ return ret;
+ }
+
+-static inline unsigned offset_to_stripe(struct bcache_device *d,
++static inline unsigned int offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+ {
+ do_div(offset, d->stripe_size);
+@@ -56,9 +56,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d,
+
+ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
+ uint64_t offset,
+- unsigned nr_sectors)
++ unsigned int nr_sectors)
+ {
+- unsigned stripe = offset_to_stripe(&dc->disk, offset);
++ unsigned int stripe = offset_to_stripe(&dc->disk, offset);
+
+ while (1) {
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
+@@ -73,9 +73,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
+ }
+
+ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+- unsigned cache_mode, bool would_skip)
++ unsigned int cache_mode, bool would_skip)
+ {
+- unsigned in_use = dc->disk.c->gc_stats.in_use;
++ unsigned int in_use = dc->disk.c->gc_stats.in_use;
+
+ if (cache_mode != CACHE_MODE_WRITEBACK ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+@@ -115,7 +115,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
+ }
+ }
+
+-void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
++void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned int, uint64_t, int);
+
+ void bch_sectors_dirty_init(struct bcache_device *);
+ void bch_cached_dev_writeback_init(struct cached_dev *);
+diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
+index 821f71a2e48f..d50fa71daaca 100644
+--- a/include/uapi/linux/bcache.h
++++ b/include/uapi/linux/bcache.h
+@@ -30,10 +30,10 @@ struct bkey {
+ BITMASK(name, struct bkey, field, offset, size)
+
+ #define PTR_FIELD(name, offset, size) \
+-static inline __u64 name(const struct bkey *k, unsigned i) \
++static inline __u64 name(const struct bkey *k, unsigned int i) \
+ { return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
+ \
+-static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
++static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
+ { \
+ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
+ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
+@@ -120,7 +120,7 @@ static inline struct bkey *bkey_next(const struct bkey *k)
+ return (struct bkey *) (d + bkey_u64s(k));
+ }
+
+-static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
++static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
+ {
+ __u64 *d = (void *) k;
+ return (struct bkey *) (d + nr_keys);
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0002-bcache-style-fix-to-add-a-blank-line-after-declarati.patch b/for-test/checkpatches_fixes/0002-bcache-style-fix-to-add-a-blank-line-after-declarati.patch
new file mode 100644
index 0000000..bafb26e
--- /dev/null
+++ b/for-test/checkpatches_fixes/0002-bcache-style-fix-to-add-a-blank-line-after-declarati.patch
@@ -0,0 +1,570 @@
+From b87c5de3a384a831dfc83c440da39c1106417663 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 26 Jun 2018 22:54:13 +0800
+Subject: [PATCH 02/18] bcache: style fix to add a blank line after
+ declarations
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/alloc.c | 3 +++
+ drivers/md/bcache/bcache.h | 1 +
+ drivers/md/bcache/bset.c | 5 ++++-
+ drivers/md/bcache/btree.c | 7 +++++++
+ drivers/md/bcache/closure.c | 1 +
+ drivers/md/bcache/debug.c | 4 ++--
+ drivers/md/bcache/extents.c | 5 ++++-
+ drivers/md/bcache/io.c | 4 +++-
+ drivers/md/bcache/journal.c | 2 ++
+ drivers/md/bcache/movinggc.c | 2 ++
+ drivers/md/bcache/request.c | 5 ++++-
+ drivers/md/bcache/stats.c | 3 +++
+ drivers/md/bcache/super.c | 13 ++++++++++++-
+ drivers/md/bcache/sysfs.c | 5 +++++
+ drivers/md/bcache/util.c | 1 +
+ drivers/md/bcache/writeback.c | 1 +
+ include/uapi/linux/bcache.h | 2 ++
+ 17 files changed, 57 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 89f663d22551..7a28232d868b 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca)
+
+ while (!fifo_full(&ca->free_inc)) {
+ size_t n;
++
+ get_random_bytes(&n, sizeof(n));
+
+ n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
+@@ -514,6 +515,7 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, int n, bool wait)
+ {
+ int ret;
++
+ mutex_lock(&c->bucket_lock);
+ ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
+ mutex_unlock(&c->bucket_lock);
+@@ -706,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c)
+
+ for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
+ struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
++
+ if (!b)
+ return -ENOMEM;
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index dd134c36ae92..5e96650f8d39 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -781,6 +781,7 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
+ static inline uint8_t gen_after(uint8_t a, uint8_t b)
+ {
+ uint8_t r = a - b;
++
+ return r > 128U ? 0 : r;
+ }
+
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index e3576f279493..f24ea5a5dbd1 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -555,6 +555,7 @@ static inline unsigned int bfloat_mantissa(const struct bkey *k,
+ struct bkey_float *f)
+ {
+ const uint64_t *p = &k->low - (f->exponent >> 6);
++
+ return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
+ }
+
+@@ -904,6 +905,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
+
+ do {
+ unsigned int p = n << 4;
++
+ p &= ((int) (p - t->size)) >> 31;
+
+ prefetch(&t->tree[p]);
+@@ -1051,6 +1053,7 @@ static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+ struct bset_tree *start)
+ {
+ struct bkey *ret = NULL;
++
+ iter->size = ARRAY_SIZE(iter->data);
+ iter->used = 0;
+
+@@ -1266,8 +1269,8 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state)
+ {
+ uint64_t start_time = local_clock();
+-
+ struct btree_iter iter;
++
+ bch_btree_iter_init(b, &iter, NULL);
+
+ btree_mergesort(b, new->set->data, &iter, false, true);
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index a80fd1106ed0..f10dacb4ce90 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -284,6 +284,7 @@ void bch_btree_node_read_done(struct btree *b)
+ static void btree_node_read_endio(struct bio *bio)
+ {
+ struct closure *cl = bio->bi_private;
++
+ closure_put(cl);
+ }
+
+@@ -601,6 +602,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
+ struct bkey *k, gfp_t gfp)
+ {
+ struct btree *b = kzalloc(sizeof(struct btree), gfp);
++
+ if (!b)
+ return NULL;
+
+@@ -743,6 +745,7 @@ void bch_btree_cache_free(struct cache_set *c)
+ {
+ struct btree *b;
+ struct closure cl;
++
+ closure_init_stack(&cl);
+
+ if (c->shrink.list.next)
+@@ -1121,6 +1124,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
+ struct btree_op *op)
+ {
+ struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
++
+ if (!IS_ERR_OR_NULL(n)) {
+ mutex_lock(&n->write_lock);
+ bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+@@ -2443,6 +2447,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+
+ if (!RB_EMPTY_ROOT(&buf->keys)) {
+ struct keybuf_key *w;
++
+ w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+ buf->start = START_KEY(&w->key);
+
+@@ -2474,6 +2479,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ {
+ bool ret = false;
+ struct keybuf_key *p, *w, s;
++
+ s.key = *start;
+
+ if (bkey_cmp(end, &buf->start) <= 0 ||
+@@ -2500,6 +2506,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
+ {
+ struct keybuf_key *w;
++
+ spin_lock(&buf->lock);
+
+ w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 0e14969182c6..0af954884e19 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -162,6 +162,7 @@ static struct dentry *closure_debug;
+ static int debug_seq_show(struct seq_file *f, void *data)
+ {
+ struct closure *cl;
++
+ spin_lock_irq(&closure_list_lock);
+
+ list_for_each_entry(cl, &closure_list, all) {
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index e0fb31bba346..cdcab6fdbd13 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -173,8 +173,8 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
+ while (size) {
+ struct keybuf_key *w;
+ unsigned int bytes = min(i->bytes, size);
+-
+ int err = copy_to_user(buf, i->buf, bytes);
++
+ if (err)
+ return err;
+
+@@ -233,8 +233,8 @@ void bch_debug_init_cache_set(struct cache_set *c)
+ {
+ if (!IS_ERR_OR_NULL(bcache_debug)) {
+ char name[50];
+- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
+
++ snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
+ c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
+ &cache_set_debug_ops);
+ }
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index e96ba928eeb6..8f5de61e1a90 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -134,8 +134,8 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+- printk(" bucket %zu", n);
+
++ printk(" bucket %zu", n);
+ if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+ printk(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+@@ -166,6 +166,7 @@ bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+ static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+ {
+ struct btree *b = container_of(bk, struct btree, keys);
++
+ return __bch_btree_ptr_invalid(b->c, k);
+ }
+
+@@ -334,6 +335,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
+
+ while (1) {
+ struct bkey *k = bch_btree_iter_next(iter);
++
+ if (!k)
+ break;
+
+@@ -498,6 +500,7 @@ bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
+ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+ {
+ struct btree *b = container_of(bk, struct btree, keys);
++
+ return __bch_extent_invalid(b->c, k);
+ }
+
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index c6b41a09f550..cfc56add799a 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -17,6 +17,7 @@
+ void bch_bbio_free(struct bio *bio, struct cache_set *c)
+ {
+ struct bbio *b = container_of(bio, struct bbio, bio);
++
+ mempool_free(b, &c->bio_meta);
+ }
+
+@@ -45,6 +46,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+ struct bkey *k, unsigned int ptr)
+ {
+ struct bbio *b = container_of(bio, struct bbio, bio);
++
+ bch_bkey_copy_single_ptr(&b->key, k, ptr);
+ __bch_submit_bbio(bio, c);
+ }
+@@ -132,12 +134,12 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+
+ if (threshold) {
+ unsigned int t = local_clock_us();
+-
+ int us = t - b->submit_time_us;
+ int congested = atomic_read(&c->congested);
+
+ if (us > (int) threshold) {
+ int ms = us / 1024;
++
+ c->congested_last_us = t;
+
+ ms = min(ms, CONGESTED_MAX + congested);
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 9c6201109e14..6b992651d357 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -28,6 +28,7 @@
+ static void journal_read_endio(struct bio *bio)
+ {
+ struct closure *cl = bio->bi_private;
++
+ closure_put(cl);
+ }
+
+@@ -614,6 +615,7 @@ static void journal_write_unlocked(struct closure *cl)
+
+ struct bio *bio;
+ struct bio_list list;
++
+ bio_list_init(&list);
+
+ if (!w->need_write) {
+diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
+index 0790d710f911..7891fb512736 100644
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -38,6 +38,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
+ static void moving_io_destructor(struct closure *cl)
+ {
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
++
+ kfree(io);
+ }
+
+@@ -189,6 +190,7 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
+ static unsigned int bucket_heap_top(struct cache *ca)
+ {
+ struct bucket *b;
++
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
+ }
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 7769af040ec0..403bb58c117f 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
+
+ bio_for_each_segment(bv, bio, iter) {
+ void *d = kmap(bv.bv_page) + bv.bv_offset;
++
+ csum = bch_crc64_update(csum, d, bv.bv_len);
+ kunmap(bv.bv_page);
+ }
+@@ -526,8 +527,8 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+ ? min_t(uint64_t, INT_MAX,
+ KEY_START(k) - bio->bi_iter.bi_sector)
+ : INT_MAX;
+-
+ int ret = s->d->cache_miss(b, s, bio, sectors);
++
+ if (ret != MAP_CONTINUE)
+ return ret;
+
+@@ -623,6 +624,7 @@ static void request_endio(struct bio *bio)
+
+ if (bio->bi_status) {
+ struct search *s = container_of(cl, struct search, cl);
++
+ s->iop.status = bio->bi_status;
+ /* Only cache read errors are recoverable */
+ s->recoverable = false;
+@@ -1156,6 +1158,7 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+ {
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
++
+ return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
+ }
+
+diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
+index 2331a0d5aa28..894410f3f829 100644
+--- a/drivers/md/bcache/stats.c
++++ b/drivers/md/bcache/stats.c
+@@ -200,6 +200,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass)
+ {
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
++
+ mark_cache_stats(&dc->accounting.collector, hit, bypass);
+ mark_cache_stats(&c->accounting.collector, hit, bypass);
+ }
+@@ -207,6 +208,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
+ {
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
++
+ atomic_inc(&dc->accounting.collector.cache_readaheads);
+ atomic_inc(&c->accounting.collector.cache_readaheads);
+ }
+@@ -214,6 +216,7 @@ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
+ void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
+ {
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
++
+ atomic_inc(&dc->accounting.collector.cache_miss_collisions);
+ atomic_inc(&c->accounting.collector.cache_miss_collisions);
+ }
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 6a59fc47e0fe..6a6be7c47429 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -415,8 +415,8 @@ static int __uuid_write(struct cache_set *c)
+ {
+ BKEY_PADDED(key) k;
+ struct closure cl;
+- closure_init_stack(&cl);
+
++ closure_init_stack(&cl);
+ lockdep_assert_held(&bch_register_lock);
+
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
+@@ -456,6 +456,7 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
+ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
+ {
+ static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
++
+ return uuid_find(c, zero_uuid);
+ }
+
+@@ -619,6 +620,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
+ static int open_dev(struct block_device *b, fmode_t mode)
+ {
+ struct bcache_device *d = b->bd_disk->private_data;
++
+ if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
+ return -ENXIO;
+
+@@ -629,6 +631,7 @@ static int open_dev(struct block_device *b, fmode_t mode)
+ static void release_dev(struct gendisk *b, fmode_t mode)
+ {
+ struct bcache_device *d = b->private_data;
++
+ closure_put(&d->cl);
+ }
+
+@@ -911,6 +914,7 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ if (!d->c &&
+ BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+ struct closure cl;
++
+ closure_init_stack(&cl);
+
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
+@@ -968,6 +972,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
+ {
+ struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+ struct closure cl;
++
+ closure_init_stack(&cl);
+
+ BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
+@@ -1095,6 +1100,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+
+ if (bch_is_zero(u->uuid, 16)) {
+ struct closure cl;
++
+ closure_init_stack(&cl);
+
+ memcpy(u->uuid, dc->sb.uuid, 16);
+@@ -1310,6 +1316,7 @@ void bch_flash_dev_release(struct kobject *kobj)
+ static void flash_dev_free(struct closure *cl)
+ {
+ struct bcache_device *d = container_of(cl, struct bcache_device, cl);
++
+ mutex_lock(&bch_register_lock);
+ bcache_device_free(d);
+ mutex_unlock(&bch_register_lock);
+@@ -1469,6 +1476,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+ void bch_cache_set_release(struct kobject *kobj)
+ {
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
++
+ kfree(c);
+ module_put(THIS_MODULE);
+ }
+@@ -1659,6 +1667,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ {
+ int iter_size;
+ struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
++
+ if (!c)
+ return NULL;
+
+@@ -2199,6 +2208,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ err = "failed to register device";
+ if (SB_IS_BDEV(sb)) {
+ struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
++
+ if (!dc)
+ goto err_close;
+
+@@ -2207,6 +2217,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ mutex_unlock(&bch_register_lock);
+ } else {
+ struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
++
+ if (!ca)
+ goto err_close;
+
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 5fabee253448..7374b78e442e 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -439,6 +439,7 @@ STORE(__bch_flash_dev)
+
+ if (attr == &sysfs_size) {
+ uint64_t v;
++
+ strtoi_h_or_return(buf, v);
+
+ u->sectors = v >> 9;
+@@ -683,6 +684,7 @@ STORE(__bch_cache_set)
+ if (attr == &sysfs_flash_vol_create) {
+ int r;
+ uint64_t v;
++
+ strtoi_h_or_return(buf, v);
+
+ r = bch_flash_dev_create(c, v);
+@@ -716,6 +718,7 @@ STORE(__bch_cache_set)
+
+ if (attr == &sysfs_prune_cache) {
+ struct shrink_control sc;
++
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->shrink.scan_objects(&c->shrink, &sc);
+@@ -769,12 +772,14 @@ STORE_LOCKED(bch_cache_set)
+ SHOW(bch_cache_set_internal)
+ {
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
++
+ return bch_cache_set_show(&c->kobj, attr, buf);
+ }
+
+ STORE(bch_cache_set_internal)
+ {
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
++
+ return bch_cache_set_store(&c->kobj, attr, buf, size);
+ }
+
+diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
+index fc479b026d6d..5b1b92d605ca 100644
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -133,6 +133,7 @@ bool bch_is_zero(const char *p, size_t n)
+ int bch_parse_uuid(const char *s, char *uuid)
+ {
+ size_t i, j, x;
++
+ memset(uuid, 0, 16);
+
+ for (i = 0, j = 0;
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 91ceb32f7525..89b56dd9f562 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -198,6 +198,7 @@ static void dirty_init(struct keybuf_key *w)
+ static void dirty_io_destructor(struct closure *cl)
+ {
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
++
+ kfree(io);
+ }
+
+diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
+index d50fa71daaca..f55e804c9694 100644
+--- a/include/uapi/linux/bcache.h
++++ b/include/uapi/linux/bcache.h
+@@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+ static inline struct bkey *bkey_next(const struct bkey *k)
+ {
+ __u64 *d = (void *) k;
++
+ return (struct bkey *) (d + bkey_u64s(k));
+ }
+
+ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
+ {
+ __u64 *d = (void *) k;
++
+ return (struct bkey *) (d + nr_keys);
+ }
+ /* Enough for a key with 6 pointers */
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0003-bcache-add-identifier-names-to-arguments-of-function.patch b/for-test/checkpatches_fixes/0003-bcache-add-identifier-names-to-arguments-of-function.patch
new file mode 100644
index 0000000..7d8b4d9
--- /dev/null
+++ b/for-test/checkpatches_fixes/0003-bcache-add-identifier-names-to-arguments-of-function.patch
@@ -0,0 +1,635 @@
+From c243de293d7cea6ee75f29d558e1559e0d169af0 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Mon, 23 Jul 2018 15:48:49 +0800
+Subject: [PATCH 03/18] bcache: add identifier names to arguments of function
+ definitions
+
+There are many function definitions do not have identifier argument names,
+scripts/checkpatch.pl complains warnings like this,
+
+ WARNING: function definition argument 'struct bcache_device *' should
+ also have an identifier name
+ #16735: FILE: writeback.h:120:
+ +void bch_sectors_dirty_init(struct bcache_device *);
+
+This patch adds identifier argument names to all bcache function
+definitions to fix such warnings.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/bcache.h | 109 +++++++++++++++--------------
+ drivers/md/bcache/bset.h | 126 +++++++++++++++++++---------------
+ drivers/md/bcache/btree.h | 80 ++++++++++-----------
+ drivers/md/bcache/debug.h | 6 +-
+ drivers/md/bcache/extents.h | 6 +-
+ drivers/md/bcache/journal.h | 20 +++---
+ drivers/md/bcache/request.h | 2 +-
+ drivers/md/bcache/stats.h | 13 ++--
+ drivers/md/bcache/util.h | 12 ++--
+ drivers/md/bcache/writeback.h | 9 +--
+ 10 files changed, 206 insertions(+), 177 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 5e96650f8d39..54d68d5df28b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -273,9 +273,10 @@ struct bcache_device {
+
+ unsigned int data_csum:1;
+
+- int (*cache_miss)(struct btree *, struct search *,
+- struct bio *, unsigned int);
+- int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long);
++ int (*cache_miss)(struct btree *b, struct search *s,
++ struct bio *bio, unsigned int sectors);
++ int (*ioctl) (struct bcache_device *d, fmode_t mode,
++ unsigned int cmd, unsigned long arg);
+ };
+
+ struct io {
+@@ -923,40 +924,43 @@ static inline void wait_for_kthread_stop(void)
+ /* Forward declarations */
+
+ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
+-void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
+-void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
+- blk_status_t, const char *);
+-void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
+- const char *);
+-void bch_bbio_free(struct bio *, struct cache_set *);
+-struct bio *bch_bbio_alloc(struct cache_set *);
+-
+-void __bch_submit_bbio(struct bio *, struct cache_set *);
+-void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
+-
+-uint8_t bch_inc_gen(struct cache *, struct bucket *);
+-void bch_rescale_priorities(struct cache_set *, int);
+-
+-bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
+-void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
+-
+-void __bch_bucket_free(struct cache *, struct bucket *);
+-void bch_bucket_free(struct cache_set *, struct bkey *);
+-
+-long bch_bucket_alloc(struct cache *, unsigned int, bool);
+-int __bch_bucket_alloc_set(struct cache_set *, unsigned int,
+- struct bkey *, int, bool);
+-int bch_bucket_alloc_set(struct cache_set *, unsigned int,
+- struct bkey *, int, bool);
+-bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int,
+- unsigned int, unsigned int, bool);
++void bch_count_io_errors(struct cache *ca, blk_status_t error,
++ int is_read, const char *m);
++void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
++ blk_status_t error, const char *m);
++void bch_bbio_endio(struct cache_set *c, struct bio *bio,
++ blk_status_t error, const char *m);
++void bch_bbio_free(struct bio *bio, struct cache_set *c);
++struct bio *bch_bbio_alloc(struct cache_set *c);
++
++void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
++void bch_submit_bbio(struct bio *bio, struct cache_set *c,
++ struct bkey *k, unsigned ptr);
++
++uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
++void bch_rescale_priorities(struct cache_set *c, int sectors);
++
++bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
++void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
++
++void __bch_bucket_free(struct cache *ca, struct bucket *b);
++void bch_bucket_free(struct cache_set *c, struct bkey *k);
++
++long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
++int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
++ struct bkey *k, int n, bool wait);
++int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
++ struct bkey *k, int n, bool wait);
++bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
++ unsigned int sectors, unsigned int write_point,
++ unsigned int write_prio, bool wait);
+ bool bch_cached_dev_error(struct cached_dev *dc);
+
+ __printf(2, 3)
+-bool bch_cache_set_error(struct cache_set *, const char *, ...);
++bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
+
+-void bch_prio_write(struct cache *);
+-void bch_write_bdev_super(struct cached_dev *, struct closure *);
++void bch_prio_write(struct cache *ca);
++void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
+
+ extern struct workqueue_struct *bcache_wq;
+ extern struct mutex bch_register_lock;
+@@ -968,35 +972,36 @@ extern struct kobj_type bch_cache_set_ktype;
+ extern struct kobj_type bch_cache_set_internal_ktype;
+ extern struct kobj_type bch_cache_ktype;
+
+-void bch_cached_dev_release(struct kobject *);
+-void bch_flash_dev_release(struct kobject *);
+-void bch_cache_set_release(struct kobject *);
+-void bch_cache_release(struct kobject *);
++void bch_cached_dev_release(struct kobject *kobj);
++void bch_flash_dev_release(struct kobject *kobj);
++void bch_cache_set_release(struct kobject *kobj);
++void bch_cache_release(struct kobject *kobj);
+
+-int bch_uuid_write(struct cache_set *);
+-void bcache_write_super(struct cache_set *);
++int bch_uuid_write(struct cache_set *c);
++void bcache_write_super(struct cache_set *c);
+
+ int bch_flash_dev_create(struct cache_set *c, uint64_t size);
+
+-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
+-void bch_cached_dev_detach(struct cached_dev *);
+-void bch_cached_dev_run(struct cached_dev *);
+-void bcache_device_stop(struct bcache_device *);
++int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
++ uint8_t *set_uuid);
++void bch_cached_dev_detach(struct cached_dev *dc);
++void bch_cached_dev_run(struct cached_dev *dc);
++void bcache_device_stop(struct bcache_device *d);
+
+-void bch_cache_set_unregister(struct cache_set *);
+-void bch_cache_set_stop(struct cache_set *);
++void bch_cache_set_unregister(struct cache_set *c);
++void bch_cache_set_stop(struct cache_set *c);
+
+-struct cache_set *bch_cache_set_alloc(struct cache_sb *);
+-void bch_btree_cache_free(struct cache_set *);
+-int bch_btree_cache_alloc(struct cache_set *);
+-void bch_moving_init_cache_set(struct cache_set *);
+-int bch_open_buckets_alloc(struct cache_set *);
+-void bch_open_buckets_free(struct cache_set *);
++struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
++void bch_btree_cache_free(struct cache_set *c);
++int bch_btree_cache_alloc(struct cache_set *c);
++void bch_moving_init_cache_set(struct cache_set *c);
++int bch_open_buckets_alloc(struct cache_set *c);
++void bch_open_buckets_free(struct cache_set *c);
+
+ int bch_cache_allocator_start(struct cache *ca);
+
+ void bch_debug_exit(void);
+-int bch_debug_init(struct kobject *);
++int bch_debug_init(struct kobject *kobj);
+ void bch_request_exit(void);
+ int bch_request_init(void);
+
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index fdc296103113..f5bf333aa40d 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -187,18 +187,25 @@ struct bset_tree {
+ };
+
+ struct btree_keys_ops {
+- bool (*sort_cmp)(struct btree_iter_set,
+- struct btree_iter_set);
+- struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
+- bool (*insert_fixup)(struct btree_keys *, struct bkey *,
+- struct btree_iter *, struct bkey *);
+- bool (*key_invalid)(struct btree_keys *,
+- const struct bkey *);
+- bool (*key_bad)(struct btree_keys *, const struct bkey *);
+- bool (*key_merge)(struct btree_keys *,
+- struct bkey *, struct bkey *);
+- void (*key_to_text)(char *, size_t, const struct bkey *);
+- void (*key_dump)(struct btree_keys *, const struct bkey *);
++ bool (*sort_cmp)(struct btree_iter_set l,
++ struct btree_iter_set r);
++ struct bkey *(*sort_fixup)(struct btree_iter *iter,
++ struct bkey *tmp);
++ bool (*insert_fixup)(struct btree_keys *b,
++ struct bkey *insert,
++ struct btree_iter *iter,
++ struct bkey *replace_key);
++ bool (*key_invalid)(struct btree_keys *bk,
++ const struct bkey *k);
++ bool (*key_bad)(struct btree_keys *bk,
++ const struct bkey *k);
++ bool (*key_merge)(struct btree_keys *bk,
++ struct bkey *l, struct bkey *r);
++ void (*key_to_text)(char *buf,
++ size_t size,
++ const struct bkey *k);
++ void (*key_dump)(struct btree_keys *keys,
++ const struct bkey *k);
+
+ /*
+ * Only used for deciding whether to use START_KEY(k) or just the key
+@@ -280,18 +287,20 @@ static inline struct bset *bset_next_set(struct btree_keys *b,
+ return ((void *) i) + roundup(set_bytes(i), block_bytes);
+ }
+
+-void bch_btree_keys_free(struct btree_keys *);
+-int bch_btree_keys_alloc(struct btree_keys *, unsigned int, gfp_t);
+-void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+- bool *);
+-
+-void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
+-void bch_bset_build_written_tree(struct btree_keys *);
+-void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+-bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+-void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+-unsigned int bch_btree_insert_key(struct btree_keys *, struct bkey *,
+- struct bkey *);
++void bch_btree_keys_free(struct btree_keys *b);
++int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
++ gfp_t gfp);
++void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
++ bool *expensive_debug_checks);
++
++void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
++void bch_bset_build_written_tree(struct btree_keys *b);
++void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
++bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
++void bch_bset_insert(struct btree_keys *b, struct bkey *where,
++ struct bkey *insert);
++unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
++ struct bkey *replace_key);
+
+ enum {
+ BTREE_INSERT_STATUS_NO_INSERT = 0,
+@@ -313,18 +322,21 @@ struct btree_iter {
+ } data[MAX_BSETS];
+ };
+
+-typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
++typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+
+-struct bkey *bch_btree_iter_next(struct btree_iter *);
+-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+- struct btree_keys *, ptr_filter_fn);
++struct bkey *bch_btree_iter_next(struct btree_iter *iter);
++struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
++ struct btree_keys *b,
++ ptr_filter_fn fn);
+
+-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+-struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
+- struct bkey *);
++void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
++ struct bkey *end);
++struct bkey *bch_btree_iter_init(struct btree_keys *b,
++ struct btree_iter *iter,
++ struct bkey *search);
+
+-struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
+- const struct bkey *);
++struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
++ const struct bkey *search);
+
+ /*
+ * Returns the first key that is strictly greater than search
+@@ -355,15 +367,17 @@ struct bset_sort_state {
+ struct time_stats time;
+ };
+
+-void bch_bset_sort_state_free(struct bset_sort_state *);
+-int bch_bset_sort_state_init(struct bset_sort_state *, unsigned int);
+-void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+-void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+- struct bset_sort_state *);
+-void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+- struct bset_sort_state *);
+-void bch_btree_sort_partial(struct btree_keys *, unsigned int,
+- struct bset_sort_state *);
++void bch_bset_sort_state_free(struct bset_sort_state *state);
++int bch_bset_sort_state_init(struct bset_sort_state *state,
++ unsigned int page_order);
++void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
++void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
++ struct bset_sort_state *state);
++void bch_btree_sort_and_fix_extents(struct btree_keys *b,
++ struct btree_iter *iter,
++ struct bset_sort_state *state);
++void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
++ struct bset_sort_state *state);
+
+ static inline void bch_btree_sort(struct btree_keys *b,
+ struct bset_sort_state *state)
+@@ -377,7 +391,7 @@ struct bset_stats {
+ size_t floats, failed;
+ };
+
+-void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
++void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
+
+ /* Bkey utility code */
+
+@@ -401,10 +415,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
+ : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
+ }
+
+-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+- unsigned int);
+-bool __bch_cut_front(const struct bkey *, struct bkey *);
+-bool __bch_cut_back(const struct bkey *, struct bkey *);
++void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
++ unsigned int i);
++bool __bch_cut_front(const struct bkey *where, struct bkey *k);
++bool __bch_cut_back(const struct bkey *where, struct bkey *k);
+
+ static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+ {
+@@ -522,18 +536,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
+ return bch_keylist_nkeys(l) * sizeof(uint64_t);
+ }
+
+-struct bkey *bch_keylist_pop(struct keylist *);
+-void bch_keylist_pop_front(struct keylist *);
+-int __bch_keylist_realloc(struct keylist *, unsigned int);
++struct bkey *bch_keylist_pop(struct keylist *l);
++void bch_keylist_pop_front(struct keylist *l);
++int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
+
+ /* Debug stuff */
+
+ #ifdef CONFIG_BCACHE_DEBUG
+
+-int __bch_count_data(struct btree_keys *);
+-void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
+-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int);
+-void bch_dump_bucket(struct btree_keys *);
++int __bch_count_data(struct btree_keys *b);
++void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
++ const char *fmt,
++ ...);
++void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
++void bch_dump_bucket(struct btree_keys *b);
+
+ #else
+
+@@ -541,7 +557,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+ static inline void __printf(2, 3)
+ __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+ static inline void bch_dump_bucket(struct btree_keys *b) {}
+-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int);
++void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
+
+ #endif
+
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index 398d81a45cf6..c0fc06a89e49 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b)
+ (w ? up_write : up_read)(&b->lock);
+ }
+
+-void bch_btree_node_read_done(struct btree *);
+-void __bch_btree_node_write(struct btree *, struct closure *);
+-void bch_btree_node_write(struct btree *, struct closure *);
+-
+-void bch_btree_set_root(struct btree *);
+-struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
+- int, bool, struct btree *);
+-struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
+- struct bkey *, int, bool, struct btree *);
+-
+-int bch_btree_insert_check_key(struct btree *, struct btree_op *,
+- struct bkey *);
+-int bch_btree_insert(struct cache_set *, struct keylist *,
+- atomic_t *, struct bkey *);
+-
+-int bch_gc_thread_start(struct cache_set *);
+-void bch_initial_gc_finish(struct cache_set *);
+-void bch_moving_gc(struct cache_set *);
+-int bch_btree_check(struct cache_set *);
+-void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
++void bch_btree_node_read_done(struct btree *b);
++void __bch_btree_node_write(struct btree *b, struct closure *parent);
++void bch_btree_node_write(struct btree *b, struct closure *parent);
++
++void bch_btree_set_root(struct btree *b);
++struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
++ int level, bool wait,
++ struct btree *parent);
++struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
++ struct bkey *k, int level, bool write,
++ struct btree *parent);
++
++int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
++ struct bkey *check_key);
++int bch_btree_insert(struct cache_set *c, struct keylist *keys,
++ atomic_t *journal_ref, struct bkey *replace_key);
++
++int bch_gc_thread_start(struct cache_set *c);
++void bch_initial_gc_finish(struct cache_set *c);
++void bch_moving_gc(struct cache_set *c);
++int bch_btree_check(struct cache_set *c);
++void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
+
+ static inline void wake_up_gc(struct cache_set *c)
+ {
+@@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c)
+
+ #define MAP_END_KEY 1
+
+-typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
+-int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
+- struct bkey *, btree_map_nodes_fn *, int);
++typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
++int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
++ struct bkey *from, btree_map_nodes_fn *fn, int flags);
+
+ static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn)
+@@ -290,21 +292,21 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+ }
+
+-typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
+- struct bkey *);
+-int bch_btree_map_keys(struct btree_op *, struct cache_set *,
+- struct bkey *, btree_map_keys_fn *, int);
+-
+-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
+-
+-void bch_keybuf_init(struct keybuf *);
+-void bch_refill_keybuf(struct cache_set *, struct keybuf *,
+- struct bkey *, keybuf_pred_fn *);
+-bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
+- struct bkey *);
+-void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
+-struct keybuf_key *bch_keybuf_next(struct keybuf *);
+-struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
+- struct bkey *, keybuf_pred_fn *);
++typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
++ struct bkey *k);
++int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
++ struct bkey *from, btree_map_keys_fn *fn, int flags);
++
++typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
++
++void bch_keybuf_init(struct keybuf *buf);
++void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
++ struct bkey *end, keybuf_pred_fn *pred);
++bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
++ struct bkey *end);
++void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
++struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
++struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, struct keybuf *buf,
++ struct bkey *end, keybuf_pred_fn *pred);
+ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
+ #endif
+diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
+index acc48d3fa274..fb3d4dff4b26 100644
+--- a/drivers/md/bcache/debug.h
++++ b/drivers/md/bcache/debug.h
+@@ -8,8 +8,8 @@ struct cache_set;
+
+ #ifdef CONFIG_BCACHE_DEBUG
+
+-void bch_btree_verify(struct btree *);
+-void bch_data_verify(struct cached_dev *, struct bio *);
++void bch_btree_verify(struct btree *b);
++void bch_data_verify(struct cached_dev *dc, struct bio *bio);
+
+ #define expensive_debug_checks(c) ((c)->expensive_debug_checks)
+ #define key_merging_disabled(c) ((c)->key_merging_disabled)
+@@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
+ #endif
+
+ #ifdef CONFIG_DEBUG_FS
+-void bch_debug_init_cache_set(struct cache_set *);
++void bch_debug_init_cache_set(struct cache_set *c);
+ #else
+ static inline void bch_debug_init_cache_set(struct cache_set *c) {}
+ #endif
+diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
+index 0cd3575afa1d..4d667e05bb73 100644
+--- a/drivers/md/bcache/extents.h
++++ b/drivers/md/bcache/extents.h
+@@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops;
+ struct bkey;
+ struct cache_set;
+
+-void bch_extent_to_text(char *, size_t, const struct bkey *);
+-bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+-bool __bch_extent_invalid(struct cache_set *, const struct bkey *);
++void bch_extent_to_text(char *buf, size_t size, const struct bkey *k);
++bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k);
++bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
+
+ #endif /* _BCACHE_EXTENTS_H */
+diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
+index f0982731ae20..66f0facff84b 100644
+--- a/drivers/md/bcache/journal.h
++++ b/drivers/md/bcache/journal.h
+@@ -167,14 +167,16 @@ struct cache_set;
+ struct btree_op;
+ struct keylist;
+
+-atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
+-void bch_journal_next(struct journal *);
+-void bch_journal_mark(struct cache_set *, struct list_head *);
+-void bch_journal_meta(struct cache_set *, struct closure *);
+-int bch_journal_read(struct cache_set *, struct list_head *);
+-int bch_journal_replay(struct cache_set *, struct list_head *);
+-
+-void bch_journal_free(struct cache_set *);
+-int bch_journal_alloc(struct cache_set *);
++atomic_t *bch_journal(struct cache_set *c,
++ struct keylist *keys,
++ struct closure *parent);
++void bch_journal_next(struct journal *j);
++void bch_journal_mark(struct cache_set *c, struct list_head *list);
++void bch_journal_meta(struct cache_set *c, struct closure *cl);
++int bch_journal_read(struct cache_set *c, struct list_head *list);
++int bch_journal_replay(struct cache_set *c, struct list_head *list);
++
++void bch_journal_free(struct cache_set *c);
++int bch_journal_alloc(struct cache_set *c);
+
+ #endif /* _BCACHE_JOURNAL_H */
+diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
+index 8e8c1ce00d9d..aa055cfeb099 100644
+--- a/drivers/md/bcache/request.h
++++ b/drivers/md/bcache/request.h
+@@ -33,7 +33,7 @@ struct data_insert_op {
+ BKEY_PADDED(replace_key);
+ };
+
+-unsigned int bch_get_congested(struct cache_set *);
++unsigned int bch_get_congested(struct cache_set *c);
+ void bch_data_insert(struct closure *cl);
+
+ void bch_cached_dev_request_init(struct cached_dev *dc);
+diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
+index 77234a89dd69..abfaabf7e7fc 100644
+--- a/drivers/md/bcache/stats.h
++++ b/drivers/md/bcache/stats.h
+@@ -53,10 +53,13 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
+
+ void bch_cache_accounting_destroy(struct cache_accounting *acc);
+
+-void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
+- bool, bool);
+-void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
+-void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
+-void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
++void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
++ bool hit, bool bypass);
++void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
++void bch_mark_cache_miss_collision(struct cache_set *c,
++ struct bcache_device *d);
++void bch_mark_sectors_bypassed(struct cache_set *c,
++ struct cached_dev *dc,
++ int sectors);
+
+ #endif /* _BCACHE_STATS_H_ */
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index 9486104fe304..98e8f97a8484 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -288,10 +288,10 @@ do { \
+ #define ANYSINT_MAX(t) \
+ ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
+
+-int bch_strtoint_h(const char *, int *);
+-int bch_strtouint_h(const char *, unsigned int *);
+-int bch_strtoll_h(const char *, long long *);
+-int bch_strtoull_h(const char *, unsigned long long *);
++int bch_strtoint_h(const char *cp, int *res);
++int bch_strtouint_h(const char *cp, unsigned int *res);
++int bch_strtoll_h(const char *cp, long long *res);
++int bch_strtoull_h(const char *cp, unsigned long long *res);
+
+ static inline int bch_strtol_h(const char *cp, long *res)
+ {
+@@ -563,7 +563,7 @@ static inline sector_t bdev_sectors(struct block_device *bdev)
+ return bdev->bd_inode->i_size >> 9;
+ }
+
+-uint64_t bch_crc64_update(uint64_t, const void *, size_t);
+-uint64_t bch_crc64(const void *, size_t);
++uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len);
++uint64_t bch_crc64(const void *data, size_t len);
+
+ #endif /* _BCACHE_UTIL_H */
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 276696482c80..cd513a89c412 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -115,10 +115,11 @@ static inline void bch_writeback_add(struct cached_dev *dc)
+ }
+ }
+
+-void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned int, uint64_t, int);
++void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
++ uint64_t offset, int nr_sectors);
+
+-void bch_sectors_dirty_init(struct bcache_device *);
+-void bch_cached_dev_writeback_init(struct cached_dev *);
+-int bch_cached_dev_writeback_start(struct cached_dev *);
++void bch_sectors_dirty_init(struct bcache_device *d);
++void bch_cached_dev_writeback_init(struct cached_dev *dc);
++int bch_cached_dev_writeback_start(struct cached_dev *dc);
+
+ #endif
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0004-bcache-style-fixes-for-lines-over-80-characters.patch b/for-test/checkpatches_fixes/0004-bcache-style-fixes-for-lines-over-80-characters.patch
new file mode 100644
index 0000000..0b7e26b
--- /dev/null
+++ b/for-test/checkpatches_fixes/0004-bcache-style-fixes-for-lines-over-80-characters.patch
@@ -0,0 +1,325 @@
+From 46033696b761f575c915064c4136e7acac40d820 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Mon, 23 Jul 2018 17:47:45 +0800
+Subject: [PATCH 04/18] bcache: style fixes for lines over 80 characters
+
+This patch fixes the lines over 80 characters into more lines, to minimize
+warnings by checkpatch.pl. There are still some lines exceed 80 characters,
+but it is better to be a single line and I don't change them.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/bcache.h | 4 ++--
+ drivers/md/bcache/bset.c | 10 +++++++---
+ drivers/md/bcache/bset.h | 6 ++++--
+ drivers/md/bcache/btree.c | 5 ++++-
+ drivers/md/bcache/btree.h | 6 ++++--
+ drivers/md/bcache/debug.c | 3 ++-
+ drivers/md/bcache/extents.c | 4 +++-
+ drivers/md/bcache/journal.c | 3 ++-
+ drivers/md/bcache/request.c | 7 +++++--
+ drivers/md/bcache/super.c | 18 ++++++++++++------
+ drivers/md/bcache/sysfs.c | 11 +++++++----
+ drivers/md/bcache/util.h | 3 ++-
+ drivers/md/bcache/writeback.c | 7 +++++--
+ 13 files changed, 59 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 54d68d5df28b..2c235772ee1e 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -612,8 +612,8 @@ struct cache_set {
+ uint16_t min_prio;
+
+ /*
+- * max(gen - last_gc) for all buckets. When it gets too big we have to gc
+- * to keep gens from wrapping around.
++ * max(gen - last_gc) for all buckets. When it gets too big we have to
++ * gc to keep gens from wrapping around.
+ */
+ uint8_t need_gc;
+ struct gc_stat gc_stats;
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index f24ea5a5dbd1..15a5de96e8eb 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b)
+ }
+ EXPORT_SYMBOL(bch_btree_keys_free);
+
+-int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, gfp_t gfp)
++int bch_btree_keys_alloc(struct btree_keys *b,
++ unsigned int page_order,
++ gfp_t gfp)
+ {
+ struct bset_tree *t = b->set;
+
+@@ -459,7 +461,8 @@ void inorder_test(void)
+ for (unsigned int size = 2;
+ size < 65536000;
+ size++) {
+- unsigned int extra = (size - rounddown_pow_of_two(size - 1)) << 1;
++ unsigned int extra =
++ (size - rounddown_pow_of_two(size - 1)) << 1;
+ unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
+
+ if (!(size % 4096))
+@@ -776,7 +779,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
+ k != bset_bkey_last(t->data);
+ k = bkey_next(k))
+ if (t->size == bkey_to_cacheline(t, k)) {
+- t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
++ t->prev[t->size] =
++ bkey_to_cacheline_offset(t, t->size, k);
+ t->size++;
+ }
+ }
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index f5bf333aa40d..bac76aabca6d 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -246,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+ return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+ }
+
+-static inline unsigned int bset_byte_offset(struct btree_keys *b, struct bset *i)
++static inline unsigned int bset_byte_offset(struct btree_keys *b,
++ struct bset *i)
+ {
+ return ((size_t) i) - ((size_t) b->set->data);
+ }
+
+-static inline unsigned int bset_sector_offset(struct btree_keys *b, struct bset *i)
++static inline unsigned int bset_sector_offset(struct btree_keys *b,
++ struct bset *i)
+ {
+ return bset_byte_offset(b, i) >> 9;
+ }
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index f10dacb4ce90..100ebce164b1 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -433,7 +433,10 @@ static void do_btree_node_write(struct btree *b)
+
+ continue_at(cl, btree_node_write_done, NULL);
+ } else {
+- /* No problem for multipage bvec since the bio is just allocated */
++ /*
++ * No problem for multipage bvec since the bio is
++ * just allocated
++ */
+ b->bio->bi_vcnt = 0;
+ bch_bio_map(b->bio, i);
+
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index c0fc06a89e49..52e7f4e2af2a 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -306,7 +306,9 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ struct bkey *end);
+ void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
+ struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
+-struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, struct keybuf *buf,
+- struct bkey *end, keybuf_pred_fn *pred);
++struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
++ struct keybuf *buf,
++ struct bkey *end,
++ keybuf_pred_fn *pred);
+ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
+ #endif
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index cdcab6fdbd13..d913509dd9d2 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -67,7 +67,8 @@ void bch_btree_verify(struct btree *b)
+ if (inmemory->keys != sorted->keys ||
+ memcmp(inmemory->start,
+ sorted->start,
+- (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
++ (void *) bset_bkey_last(inmemory) -
++ (void *) inmemory->start)) {
+ struct bset *i;
+ unsigned int j;
+
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index 8f5de61e1a90..cb3b2c613ed6 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -577,7 +577,9 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+ ~((uint64_t)1 << 63);
+ }
+
+-static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
++static bool bch_extent_merge(struct btree_keys *bk,
++ struct bkey *l,
++ struct bkey *r)
+ {
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned int i;
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 6b992651d357..cab5f62ec7c3 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -193,7 +193,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
+
+ for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
+ l < ca->sb.njournal_buckets;
+- l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
++ l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
++ l + 1))
+ if (read_bucket(l))
+ goto bsearch;
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 403bb58c117f..b6b22c4f3ce2 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -136,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl)
+ bio->bi_iter.bi_size -= sectors << 9;
+
+ bch_keylist_add(&op->insert_keys,
+- &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
++ &KEY(op->inode,
++ bio->bi_iter.bi_sector,
++ sectors));
+ }
+
+ op->insert_data_done = true;
+@@ -813,7 +815,8 @@ static void cached_dev_read_done(struct closure *cl)
+
+ if (s->iop.bio) {
+ bio_reset(s->iop.bio);
+- s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
++ s->iop.bio->bi_iter.bi_sector =
++ s->cache_miss->bi_iter.bi_sector;
+ bio_copy_dev(s->iop.bio, s->cache_miss);
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bch_bio_map(s->iop.bio, NULL);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 6a6be7c47429..c61a7c5669a6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -149,7 +149,8 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
+ goto err;
+
+ err = "Invalid superblock: device too small";
+- if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
++ if (get_capacity(bdev->bd_disk) <
++ sb->bucket_size * sb->nbuckets)
+ goto err;
+
+ err = "Bad UUID";
+@@ -600,7 +601,8 @@ static void prio_read(struct cache *ca, uint64_t bucket)
+
+ prio_io(ca, bucket, REQ_OP_READ, 0);
+
+- if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
++ if (p->csum !=
++ bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+ pr_warn("bad csum reading priorities");
+
+ if (p->magic != pset_magic(&ca->sb))
+@@ -1727,8 +1729,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
+ mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
+ mempool_init_kmalloc_pool(&c->bio_meta, 2,
+- sizeof(struct bbio) + sizeof(struct bio_vec) *
+- bucket_pages(c)) ||
++ sizeof(struct bbio) + sizeof(struct bio_vec) *
++ bucket_pages(c)) ||
+ mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
+ bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
+@@ -1800,7 +1802,9 @@ static void run_cache_set(struct cache_set *c)
+ goto err;
+
+ err = "error reading btree root";
+- c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
++ c->root = bch_btree_node_get(c, NULL, k,
++ j->btree_level,
++ true, NULL);
+ if (IS_ERR_OR_NULL(c->root))
+ goto err;
+
+@@ -2094,7 +2098,9 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ goto err;
+ }
+
+- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
++ if (kobject_add(&ca->kobj,
++ &part_to_dev(bdev->bd_part)->kobj,
++ "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 7374b78e442e..5476ee74f301 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -130,8 +130,10 @@ rw_attribute(btree_shrinker_disabled);
+ rw_attribute(copy_gc_enabled);
+ rw_attribute(size);
+
+-static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+- size_t selected)
++static ssize_t bch_snprint_string_list(char *buf,
++ size_t size,
++ const char * const list[],
++ size_t selected)
+ {
+ char *out = buf;
+ size_t i;
+@@ -321,8 +323,9 @@ STORE(__cached_dev)
+ add_uevent_var(env, "DRIVER=bcache");
+ add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
+ add_uevent_var(env, "CACHED_LABEL=%s", buf);
+- kobject_uevent_env(
+- &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
++ kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
++ KOBJ_CHANGE,
++ env->envp);
+ kfree(env);
+ }
+
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index 98e8f97a8484..4afd3743da6b 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -402,7 +402,8 @@ do { \
+ __print_time_stat(stats, name, \
+ average_duration, duration_units); \
+ sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
+- div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
++ div_u64((stats)->max_duration, \
++ NSEC_PER_ ## duration_units)); \
+ \
+ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
+ ? div_s64(local_clock() - (stats)->last, \
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 89b56dd9f562..e8bd02061a11 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -392,7 +392,8 @@ static void read_dirty(struct cached_dev *dc)
+
+ io = kzalloc(sizeof(struct dirty_io) +
+ sizeof(struct bio_vec) *
+- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
++ DIV_ROUND_UP(KEY_SIZE(&w->key),
++ PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+@@ -506,7 +507,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
+
+ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ {
+- struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
++ struct cached_dev *dc = container_of(buf,
++ struct cached_dev,
++ writeback_keys);
+
+ BUG_ON(KEY_INODE(k) != dc->disk.id);
+
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0005-bcache-replace-Symbolic-permissions-by-octal-permiss.patch b/for-test/checkpatches_fixes/0005-bcache-replace-Symbolic-permissions-by-octal-permiss.patch
new file mode 100644
index 0000000..f934c53
--- /dev/null
+++ b/for-test/checkpatches_fixes/0005-bcache-replace-Symbolic-permissions-by-octal-permiss.patch
@@ -0,0 +1,54 @@
+From 16e58d576f558f80846b3e84bff0cd49580fa05c Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Mon, 23 Jul 2018 18:41:00 +0800
+Subject: [PATCH 05/18] bcache: replace Symbolic permissions by octal
+ permission numbers
+
+Symbolic permission names are used in bcache, for now octal permission
+numbers are encouraged to use for readability. This patch replaces
+all symbolic permissions by octal permission numbers.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/bcache.h | 4 ++--
+ drivers/md/bcache/sysfs.h | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 2c235772ee1e..0a2c65842fd8 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -879,11 +879,11 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
+ #define BUCKET_GC_GEN_MAX 96U
+
+ #define kobj_attribute_write(n, fn) \
+- static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
++ static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
+
+ #define kobj_attribute_rw(n, show, store) \
+ static struct kobj_attribute ksysfs_##n = \
+- __ATTR(n, S_IWUSR|S_IRUSR, show, store)
++ __ATTR(n, 0600, show, store)
+
+ static inline void wake_up_allocators(struct cache_set *c)
+ {
+diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
+index b54fe9602529..3fe82425859c 100644
+--- a/drivers/md/bcache/sysfs.h
++++ b/drivers/md/bcache/sysfs.h
+@@ -44,9 +44,9 @@ STORE(fn) \
+ static struct attribute sysfs_##_name = \
+ { .name = #_name, .mode = _mode }
+
+-#define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
+-#define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
+-#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
++#define write_attribute(n) __sysfs_attribute(n, 0200)
++#define read_attribute(n) __sysfs_attribute(n, 0444)
++#define rw_attribute(n) __sysfs_attribute(n, 0644)
+
+ #define sysfs_printf(file, fmt, ...) \
+ do { \
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0006-bcache-replace-printk-by-pr_-routines.patch b/for-test/checkpatches_fixes/0006-bcache-replace-printk-by-pr_-routines.patch
new file mode 100644
index 0000000..47ffbf7
--- /dev/null
+++ b/for-test/checkpatches_fixes/0006-bcache-replace-printk-by-pr_-routines.patch
@@ -0,0 +1,143 @@
+From 6c2682ed9c475ff487403bfba1e9bf54a02055bd Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Mon, 23 Jul 2018 23:32:05 +0800
+Subject: [PATCH 06/18] bcache: replace printk() by pr_*() routines
+
+There are still many places in bcache use printk to display kernel
+message, which are suggested to be preplaced by pr_*() routines like
+pr_err(), pr_info(), or pr_notice().
+
+This patch replaces all printk() with a proper pr_*() routine for
+bcache code.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/bset.c | 8 ++++----
+ drivers/md/bcache/debug.c | 10 +++++-----
+ drivers/md/bcache/extents.c | 8 ++++----
+ drivers/md/bcache/super.c | 4 ++--
+ 4 files changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index 15a5de96e8eb..ab4bcb3c155f 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -25,18 +25,18 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
+ for (k = i->start; k < bset_bkey_last(i); k = next) {
+ next = bkey_next(k);
+
+- printk(KERN_ERR "block %u key %u/%u: ", set,
++ pr_err("block %u key %u/%u: ", set,
+ (unsigned int) ((u64 *) k - i->d), i->keys);
+
+ if (b->ops->key_dump)
+ b->ops->key_dump(b, k);
+ else
+- printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
++ pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+ if (next < bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ?
+ &START_KEY(next) : next) > 0)
+- printk(KERN_ERR "Key skipped backwards\n");
++ pr_err("Key skipped backwards\n");
+ }
+ }
+
+@@ -466,7 +466,7 @@ void inorder_test(void)
+ unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
+
+ if (!(size % 4096))
+- printk(KERN_NOTICE "loop %u, %llu per us\n", size,
++ pr_notice("loop %u, %llu per us\n", size,
+ done / ktime_us_delta(ktime_get(), start));
+
+ while (1) {
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index d913509dd9d2..1e4d4c6acc0b 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -74,28 +74,28 @@ void bch_btree_verify(struct btree *b)
+
+ console_lock();
+
+- printk(KERN_ERR "*** in memory:\n");
++ pr_err("*** in memory:\n");
+ bch_dump_bset(&b->keys, inmemory, 0);
+
+- printk(KERN_ERR "*** read back in:\n");
++ pr_err("*** read back in:\n");
+ bch_dump_bset(&v->keys, sorted, 0);
+
+ for_each_written_bset(b, ondisk, i) {
+ unsigned int block = ((void *) i - (void *) ondisk) /
+ block_bytes(b->c);
+
+- printk(KERN_ERR "*** on disk block %u:\n", block);
++ pr_err("*** on disk block %u:\n", block);
+ bch_dump_bset(&b->keys, i, block);
+ }
+
+- printk(KERN_ERR "*** block %zu not written\n",
++ pr_err("*** block %zu not written\n",
+ ((void *) i - (void *) ondisk) / block_bytes(b->c));
+
+ for (j = 0; j < inmemory->keys; j++)
+ if (inmemory->d[j] != sorted->d[j])
+ break;
+
+- printk(KERN_ERR "b->written %u\n", b->written);
++ pr_err("b->written %u\n", b->written);
+
+ console_unlock();
+ panic("verify failed at %u\n", j);
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index cb3b2c613ed6..c809724e6571 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -130,18 +130,18 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+ char buf[80];
+
+ bch_extent_to_text(buf, sizeof(buf), k);
+- printk(" %s", buf);
++ pr_err(" %s", buf);
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+
+- printk(" bucket %zu", n);
++ pr_err(" bucket %zu", n);
+ if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+- printk(" prio %i",
++ pr_err(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+
+- printk(" %s\n", bch_ptr_status(b->c, k));
++ pr_err(" %s\n", bch_ptr_status(b->c, k));
+ }
+
+ /* Btree ptrs */
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index c61a7c5669a6..4b86f40e1ee9 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1460,13 +1460,13 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+ acquire_console_sem();
+ */
+
+- printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
++ pr_err("bcache: error on %pU: ", c->sb.set_uuid);
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+- printk(", disabling caching\n");
++ pr_err(", disabling caching\n");
+
+ if (c->on_error == ON_ERROR_PANIC)
+ panic("panic forced after error\n");
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0007-bcache-fix-indent-by-replacing-blank-by-tabs.patch b/for-test/checkpatches_fixes/0007-bcache-fix-indent-by-replacing-blank-by-tabs.patch
new file mode 100644
index 0000000..8e0bf33
--- /dev/null
+++ b/for-test/checkpatches_fixes/0007-bcache-fix-indent-by-replacing-blank-by-tabs.patch
@@ -0,0 +1,34 @@
+From 39dc76caa58ad3f58b7fb6145f2ec7fc7dd40d47 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Mon, 23 Jul 2018 23:48:36 +0800
+Subject: [PATCH 07/18] bcache: fix indent by replacing blank by tabs
+
+bch_btree_insert_check_key() has unaligned indent, or indent by blank
+characters. This patch makes the indent aligned and replace blank by
+tabs.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/btree.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 100ebce164b1..9b3866c80390 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -2184,10 +2184,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ rw_lock(true, b, b->level);
+
+ if (b->key.ptr[0] != btree_ptr ||
+- b->seq != seq + 1) {
++ b->seq != seq + 1) {
+ op->lock = b->level;
+ goto out;
+- }
++ }
+ }
+
+ SET_KEY_PTRS(check_key, 1);
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0008-bcache-replace-pF-by-pS-in-seq_printf.patch b/for-test/checkpatches_fixes/0008-bcache-replace-pF-by-pS-in-seq_printf.patch
new file mode 100644
index 0000000..119caae
--- /dev/null
+++ b/for-test/checkpatches_fixes/0008-bcache-replace-pF-by-pS-in-seq_printf.patch
@@ -0,0 +1,38 @@
+From 677e3ea7f841d448b34c6f73512a13cbe5f9a27b Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Mon, 23 Jul 2018 23:57:40 +0800
+Subject: [PATCH 08/18] bcache: replace '%pF' by '%pS' in seq_printf()
+
+'%pF' and '%pf' are deprecated vsprintf pointer extensions, this patch
+replace them by '%pS', which is suggested by checkpatch.pl.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/closure.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 0af954884e19..2aab25122f3c 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -168,7 +168,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
+ list_for_each_entry(cl, &closure_list, all) {
+ int r = atomic_read(&cl->remaining);
+
+- seq_printf(f, "%p: %pF -> %pf p %p r %i ",
++ seq_printf(f, "%p: %pS -> %pS p %p r %i ",
+ cl, (void *) cl->ip, cl->fn, cl->parent,
+ r & CLOSURE_REMAINING_MASK);
+
+@@ -178,7 +178,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
+ r & CLOSURE_RUNNING ? "R" : "");
+
+ if (r & CLOSURE_WAITING)
+- seq_printf(f, " W %pF\n",
++ seq_printf(f, " W %pS\n",
+ (void *) cl->waiting_on);
+
+ seq_printf(f, "\n");
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0009-bcache-fix-typo-succesfully-to-successfully.patch b/for-test/checkpatches_fixes/0009-bcache-fix-typo-succesfully-to-successfully.patch
new file mode 100644
index 0000000..09f4ac0
--- /dev/null
+++ b/for-test/checkpatches_fixes/0009-bcache-fix-typo-succesfully-to-successfully.patch
@@ -0,0 +1,43 @@
+From f9be5591944e408c41728751439f16e5ae41b237 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 Jul 2018 00:05:37 +0800
+Subject: [PATCH 09/18] bcache: fix typo 'succesfully' to 'successfully'
+
+This patch fixes typo 'succesfully' to correct 'successfully', which is
+suggested by checkpatch.pl.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/io.c | 2 +-
+ drivers/md/bcache/request.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index cfc56add799a..c25097968319 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -86,7 +86,7 @@ void bch_count_io_errors(struct cache *ca,
+
+ /*
+ * First we subtract refresh from count; each time we
+- * succesfully do so, we rescale the errors once:
++ * successfully do so, we rescale the errors once:
+ */
+
+ count = atomic_cmpxchg(&ca->io_count, old, new);
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index b6b22c4f3ce2..86313f1ad89c 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -154,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl)
+
+ /*
+ * Our data write just errored, which means we've got a bunch of keys to
+- * insert that point to data that wasn't succesfully written.
++ * insert that point to data that wasn't successfully written.
+ *
+ * We don't have to insert those keys but we still have to invalidate
+ * that region of the cache - so, if we just strip off all the pointers
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0010-bcache-prefer-help-in-Kconfig.patch b/for-test/checkpatches_fixes/0010-bcache-prefer-help-in-Kconfig.patch
new file mode 100644
index 0000000..31ad29e
--- /dev/null
+++ b/for-test/checkpatches_fixes/0010-bcache-prefer-help-in-Kconfig.patch
@@ -0,0 +1,48 @@
+From 3ff465e44c9b2e59b14e95aa3004751bda27bd87 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 27 Jul 2018 13:15:11 +0800
+Subject: [PATCH 10/18] bcache: prefer 'help' in Kconfig
+
+Current bcache Kconfig uses '---help---' as header of help information,
+for now 'help' is prefered. This patch fixes this style by replacing
+'---help---' by 'help' in bcache Kconfig file.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/Kconfig | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
+index 17bf109c58e9..817b9fba50db 100644
+--- a/drivers/md/bcache/Kconfig
++++ b/drivers/md/bcache/Kconfig
+@@ -1,7 +1,7 @@
+
+ config BCACHE
+ tristate "Block device as cache"
+- ---help---
++ help
+ Allows a block device to be used as cache for other devices; uses
+ a btree for indexing and the layout is optimized for SSDs.
+
+@@ -10,7 +10,7 @@ config BCACHE
+ config BCACHE_DEBUG
+ bool "Bcache debugging"
+ depends on BCACHE
+- ---help---
++ help
+ Don't select this option unless you're a developer
+
+ Enables extra debugging tools, allows expensive runtime checks to be
+@@ -20,7 +20,7 @@ config BCACHE_CLOSURES_DEBUG
+ bool "Debug closures"
+ depends on BCACHE
+ select DEBUG_FS
+- ---help---
++ help
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0011-bcache-do-not-check-NULL-pointer-before-calling-kmem.patch b/for-test/checkpatches_fixes/0011-bcache-do-not-check-NULL-pointer-before-calling-kmem.patch
new file mode 100644
index 0000000..ba308de
--- /dev/null
+++ b/for-test/checkpatches_fixes/0011-bcache-do-not-check-NULL-pointer-before-calling-kmem.patch
@@ -0,0 +1,32 @@
+From 7efbeb16f87dbae9e74cbd866bdc1787d88010b3 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 27 Jul 2018 13:18:25 +0800
+Subject: [PATCH 11/18] bcache: do not check NULL pointer before calling
+ kmem_cache_destroy
+
+kmem_cache_destroy() is safe for NULL pointer as input, the NULL pointer
+checking is unncessary. This patch just removes the NULL pointer checking
+to make code simpler.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/request.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 86313f1ad89c..788f5a3a4386 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -1312,8 +1312,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
+
+ void bch_request_exit(void)
+ {
+- if (bch_search_cache)
+- kmem_cache_destroy(bch_search_cache);
++ kmem_cache_destroy(bch_search_cache);
+ }
+
+ int __init bch_request_init(void)
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0012-bcache-fix-code-comments-style.patch b/for-test/checkpatches_fixes/0012-bcache-fix-code-comments-style.patch
new file mode 100644
index 0000000..1790d37
--- /dev/null
+++ b/for-test/checkpatches_fixes/0012-bcache-fix-code-comments-style.patch
@@ -0,0 +1,72 @@
+From d2953308e388bea078fea88abb8fc64e42cf47da Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 27 Jul 2018 15:47:19 +0800
+Subject: [PATCH 12/18] bcache: fix code comments style
+
+This patch fixes 3 style issues warned by checkpatch.pl,
+- Comment lines are not aligned
+- Comments use "/*" on subsequent lines
+- Comment lines use a trailing "*/"
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/super.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 4b86f40e1ee9..3bb0e8299e4c 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -465,8 +465,8 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
+ * Bucket priorities/gens:
+ *
+ * For each bucket, we store on disk its
+- * 8 bit gen
+- * 16 bit priority
++ * 8 bit gen
++ * 16 bit priority
+ *
+ * See alloc.c for an explanation of the gen. The priority is used to implement
+ * lru (and in the future other) cache replacement policies; for most purposes
+@@ -926,8 +926,10 @@ void bch_cached_dev_run(struct cached_dev *dc)
+
+ add_disk(d->disk);
+ bd_link_disk_holder(dc->bdev, dc->disk.disk);
+- /* won't show up in the uevent file, use udevadm monitor -e instead
+- * only class / kset properties are persistent */
++ /*
++ * won't show up in the uevent file, use udevadm monitor -e instead
++ * only class / kset properties are persistent
++ */
+ kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
+ kfree(env[1]);
+ kfree(env[2]);
+@@ -1096,8 +1098,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ }
+ }
+
+- /* Deadlocks since we're called via sysfs...
+- sysfs_remove_file(&dc->kobj, &sysfs_attach);
++ /*
++ * Deadlocks since we're called via sysfs...
++ * sysfs_remove_file(&dc->kobj, &sysfs_attach);
+ */
+
+ if (bch_is_zero(u->uuid, 16)) {
+@@ -1456,9 +1459,10 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+ if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ pr_info("CACHE_SET_IO_DISABLE already set");
+
+- /* XXX: we can be called from atomic context
+- acquire_console_sem();
+- */
++ /*
++ * XXX: we can be called from atomic context
++ * acquire_console_sem();
++ */
+
+ pr_err("bcache: error on %pU: ", c->sb.set_uuid);
+
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0013-bcache-add-static-const-prefix-to-char-array-declara.patch b/for-test/checkpatches_fixes/0013-bcache-add-static-const-prefix-to-char-array-declara.patch
new file mode 100644
index 0000000..b418692
--- /dev/null
+++ b/for-test/checkpatches_fixes/0013-bcache-add-static-const-prefix-to-char-array-declara.patch
@@ -0,0 +1,30 @@
+From f21d653b3c8deba8b1aa6ffa1b4382adab52b6ec Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 14:46:50 +0800
+Subject: [PATCH 13/18] bcache: add static const prefix to char * array
+ declarations
+
+This patch declares char * array with const prefix in sysfs.c,
+which is suggested by checkpatch.pl.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/sysfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 5476ee74f301..074978df5a1a 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -150,7 +150,7 @@ SHOW(__bch_cached_dev)
+ {
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+- const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
++ char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+
+ #define var(stat) (dc->stat)
+
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0014-bcache-move-open-brace-at-end-of-function-definition.patch b/for-test/checkpatches_fixes/0014-bcache-move-open-brace-at-end-of-function-definition.patch
new file mode 100644
index 0000000..b5750cb
--- /dev/null
+++ b/for-test/checkpatches_fixes/0014-bcache-move-open-brace-at-end-of-function-definition.patch
@@ -0,0 +1,52 @@
+From ede892360eebc8275ad23a98ecb2c360cc23927a Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 14:56:09 +0800
+Subject: [PATCH 14/18] bcache: move open brace at end of function definitions
+ to next line
+
+This is not a preferred style to place open brace '{' at the end of
+function definition, checkpatch.pl reports error for such coding
+style. This patch moves them into the start of the next new line.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/super.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 3bb0e8299e4c..7722662f18d1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2139,7 +2139,8 @@ static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
+ kobj_attribute_write(register, register_bcache);
+ kobj_attribute_write(register_quiet, register_bcache);
+
+-static bool bch_is_open_backing(struct block_device *bdev) {
++static bool bch_is_open_backing(struct block_device *bdev)
++{
+ struct cache_set *c, *tc;
+ struct cached_dev *dc, *t;
+
+@@ -2153,7 +2154,8 @@ static bool bch_is_open_backing(struct block_device *bdev) {
+ return false;
+ }
+
+-static bool bch_is_open_cache(struct block_device *bdev) {
++static bool bch_is_open_cache(struct block_device *bdev)
++{
+ struct cache_set *c, *tc;
+ struct cache *ca;
+ unsigned int i;
+@@ -2165,7 +2167,8 @@ static bool bch_is_open_cache(struct block_device *bdev) {
+ return false;
+ }
+
+-static bool bch_is_open(struct block_device *bdev) {
++static bool bch_is_open(struct block_device *bdev)
++{
+ return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
+ }
+
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0015-bcache-remove-useless-macros-from-util.c.patch b/for-test/checkpatches_fixes/0015-bcache-remove-useless-macros-from-util.c.patch
new file mode 100644
index 0000000..bf643a9
--- /dev/null
+++ b/for-test/checkpatches_fixes/0015-bcache-remove-useless-macros-from-util.c.patch
@@ -0,0 +1,32 @@
+From 44f0657bc07d47dfa4700a9bba3c085ed0859404 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 18:30:24 +0800
+Subject: [PATCH 15/18] bcache: remove useless macros from util.c
+
+Macros simple_strtoint() and simple_strtouint() are not referenced in
+bcache code, and they are defined by obsoleted routine simple_strtoul().
+
+This patch removes these two useless macros from bcache.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/util.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
+index 5b1b92d605ca..329ba90245ee 100644
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -16,9 +16,6 @@
+
+ #include "util.h"
+
+-#define simple_strtoint(c, end, base) simple_strtol(c, end, base)
+-#define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
+-
+ #define STRTO_H(name, type) \
+ int bch_ ## name ## _h(const char *cp, type *res) \
+ { \
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0016-bcache-add-missing-SPDX-header.patch b/for-test/checkpatches_fixes/0016-bcache-add-missing-SPDX-header.patch
new file mode 100644
index 0000000..b937adb
--- /dev/null
+++ b/for-test/checkpatches_fixes/0016-bcache-add-missing-SPDX-header.patch
@@ -0,0 +1,45 @@
+From 35c8fa66562f5108f94ff79ec78b3f36196d89d4 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 18:35:26 +0800
+Subject: [PATCH 16/18] bcache: add missing SPDX header
+
+The SPDX header is missing fro closure.c, super.c and util.c, this
+patch adds SPDX header for GPL-2.0 into these files.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/closure.c | 1 +
+ drivers/md/bcache/super.c | 1 +
+ drivers/md/bcache/util.c | 1 +
+ 3 files changed, 3 insertions(+)
+
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 2aab25122f3c..3be7f5234f73 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Asynchronous refcounty things
+ *
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 7722662f18d1..688a6cc69752 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * bcache setup/teardown code, and some metadata io - read a superblock and
+ * figure out what to do with it.
+diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
+index 329ba90245ee..3246eaa28f5a 100644
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * random utiility code, for bcache but in theory not specific to bcache
+ *
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0017-bcache-remove-unnecessary-space-before-ioctl-functio.patch b/for-test/checkpatches_fixes/0017-bcache-remove-unnecessary-space-before-ioctl-functio.patch
new file mode 100644
index 0000000..14ba879
--- /dev/null
+++ b/for-test/checkpatches_fixes/0017-bcache-remove-unnecessary-space-before-ioctl-functio.patch
@@ -0,0 +1,31 @@
+From aa955aad8b2ff5733167b6be221a38f2b88be359 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 22:52:57 +0800
+Subject: [PATCH 17/18] bcache: remove unnecessary space before ioctl function
+ pointer arguments
+
+This is warned by checkpatch.pl, this patch removes the extra space.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/bcache.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 0a2c65842fd8..8ff20b75480b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -275,8 +275,8 @@ struct bcache_device {
+
+ int (*cache_miss)(struct btree *b, struct search *s,
+ struct bio *bio, unsigned int sectors);
+- int (*ioctl) (struct bcache_device *d, fmode_t mode,
+- unsigned int cmd, unsigned long arg);
++ int (*ioctl)(struct bcache_device *d, fmode_t mode,
++ unsigned int cmd, unsigned long arg);
+ };
+
+ struct io {
+--
+2.18.0
+
diff --git a/for-test/checkpatches_fixes/0018-bcache-add-the-missing-code-comments-for-smp_mb.patch b/for-test/checkpatches_fixes/0018-bcache-add-the-missing-code-comments-for-smp_mb.patch
new file mode 100644
index 0000000..f33d44f
--- /dev/null
+++ b/for-test/checkpatches_fixes/0018-bcache-add-the-missing-code-comments-for-smp_mb.patch
@@ -0,0 +1,53 @@
+From 3963d925e5b72a43355ee3176528a6c858198945 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 28 Jul 2018 22:58:34 +0800
+Subject: [PATCH 18/18] bcache: add the missing code comments for smp_mb()
+
+Checkpatch.pl warns there are 2 locations of smp_mb() without code
+comment. This patch adds the missing code comments for these smp_mb()
+calls.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/closure.h | 4 +++-
+ drivers/md/bcache/super.c | 2 +-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
+index 07c631f1b7c7..b3617c797dff 100644
+--- a/drivers/md/bcache/closure.h
++++ b/drivers/md/bcache/closure.h
+@@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl)
+ }
+
+ /**
+- * closure_wake_up - wake up all closures on a wait list.
++ * closure_wake_up - wake up all closures on a wait list,
++ * with memory barrier
+ */
+ static inline void closure_wake_up(struct closure_waitlist *list)
+ {
++ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+ }
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 688a6cc69752..33d8c2adacd5 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1128,11 +1128,11 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ list_move(&dc->list, &c->cached_devs);
+ calc_cached_dev_sectors(c);
+
+- smp_wmb();
+ /*
+ * dc->c must be set before dc->count != 0 - paired with the mb in
+ * cached_dev_get()
+ */
++ smp_wmb();
+ refcount_set(&dc->count, 1);
+
+ /* Block writeback thread, but spawn it */
+--
+2.18.0
+