diff options
Diffstat (limited to 'drivers/md')
28 files changed, 672 insertions, 535 deletions
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index af247298409a..f6e0a8b3a61e 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -2,7 +2,7 @@ config BCACHE tristate "Block device as cache" select CRC64 - ---help--- + help Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. @@ -11,7 +11,7 @@ config BCACHE config BCACHE_DEBUG bool "Bcache debugging" depends on BCACHE - ---help--- + help Don't select this option unless you're a developer Enables extra debugging tools, allows expensive runtime checks to be @@ -21,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG bool "Debug closures" depends on BCACHE select DEBUG_FS - ---help--- + help Keeps all active closures in a linked list and provides a debugfs interface to list them, which makes it possible to see asynchronous operations that get stuck. diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 7fa2631b422c..7a28232d868b 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) { struct cache *ca; struct bucket *b; - unsigned next = c->nbuckets * c->sb.bucket_size / 1024; - unsigned i; + unsigned int next = c->nbuckets * c->sb.bucket_size / 1024; + unsigned int i; int r; atomic_sub(sectors, &c->rescale); @@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) #define bucket_prio(b) \ ({ \ - unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ + unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ \ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ }) @@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca) while (!fifo_full(&ca->free_inc)) { size_t n; + get_random_bytes(&n, sizeof(n)); n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); @@ -301,7 +302,7 @@ do { \ static int bch_allocator_push(struct cache *ca, long bucket) { - unsigned i; + unsigned int i; /* Prios/gens are actually the most important reserve */ if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) @@ -385,7 +386,7 @@ out: /* Allocation */ -long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) +long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) { DEFINE_WAIT(w); struct bucket *b; @@ -421,7 +422,7 @@ out: if (expensive_debug_checks(ca->set)) { size_t iter; long i; - unsigned j; + unsigned int j; for (iter = 0; iter < prio_buckets(ca) * 2; iter++) BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); @@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b) void bch_bucket_free(struct cache_set *c, struct bkey *k) { - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) __bch_bucket_free(PTR_CACHE(c, k, i), PTR_BUCKET(c, k, i)); } -int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, +int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, struct bkey *k, int n, bool wait) { int i; @@ -510,10 +511,11 @@ err: return -1; } -int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, +int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, struct bkey *k, int n, bool wait) { int ret; + mutex_lock(&c->bucket_lock); ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); mutex_unlock(&c->bucket_lock); @@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, struct open_bucket { struct list_head list; - unsigned last_write_point; - unsigned sectors_free; + unsigned int last_write_point; + unsigned int sectors_free; BKEY_PADDED(key); }; @@ -556,7 +558,7 @@ struct open_bucket { */ static struct open_bucket *pick_data_bucket(struct cache_set *c, const struct bkey *search, - unsigned write_point, + unsigned int write_point, struct bkey *alloc) { struct open_bucket *ret, *ret_task = NULL; @@ -595,12 +597,16 @@ found: * * If s->writeback is true, will not fail. */ -bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, - unsigned write_point, unsigned write_prio, bool wait) +bool bch_alloc_sectors(struct cache_set *c, + struct bkey *k, + unsigned int sectors, + unsigned int write_point, + unsigned int write_prio, + bool wait) { struct open_bucket *b; BKEY_PADDED(key) alloc; - unsigned i; + unsigned int i; /* * We might have to allocate a new bucket, which we can't do with a @@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, spin_lock(&c->data_bucket_lock); while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { - unsigned watermark = write_prio + unsigned int watermark = write_prio ? RESERVE_MOVINGGC : RESERVE_NONE; @@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c) for (i = 0; i < MAX_OPEN_BUCKETS; i++) { struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) return -ENOMEM; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 05f82ff6f016..83504dd8100a 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -252,7 +252,7 @@ struct bcache_device { struct kobject kobj; struct cache_set *c; - unsigned id; + unsigned int id; #define BCACHEDEVNAME_SIZE 12 char name[BCACHEDEVNAME_SIZE]; @@ -264,18 +264,19 @@ struct bcache_device { #define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 - unsigned nr_stripes; - unsigned stripe_size; + unsigned int nr_stripes; + unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; struct bio_set bio_split; - unsigned data_csum:1; + unsigned int data_csum:1; - int (*cache_miss)(struct btree *, struct search *, - struct bio *, unsigned); - int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); + int (*cache_miss)(struct btree *b, struct search *s, + struct bio *bio, unsigned int sectors); + int (*ioctl)(struct bcache_device *d, fmode_t mode, + unsigned int cmd, unsigned long arg); }; struct io { @@ -284,7 +285,7 @@ struct io { struct list_head lru; unsigned long jiffies; - unsigned sequential; + unsigned int sequential; sector_t last; }; @@ -358,18 +359,18 @@ struct cached_dev { struct cache_accounting accounting; /* The rest of this all shows up in sysfs */ - unsigned sequential_cutoff; - unsigned readahead; + unsigned int sequential_cutoff; + unsigned int readahead; - unsigned io_disable:1; - unsigned verify:1; - unsigned bypass_torture_test:1; + unsigned int io_disable:1; + unsigned int verify:1; + unsigned int bypass_torture_test:1; - unsigned partial_stripes_expensive:1; - unsigned writeback_metadata:1; - unsigned writeback_running:1; + unsigned int partial_stripes_expensive:1; + unsigned int writeback_metadata:1; + unsigned int writeback_running:1; unsigned char writeback_percent; - unsigned writeback_delay; + unsigned int writeback_delay; uint64_t writeback_rate_target; int64_t writeback_rate_proportional; @@ -377,16 +378,16 @@ struct cached_dev { int64_t writeback_rate_integral_scaled; int32_t writeback_rate_change; - unsigned writeback_rate_update_seconds; - unsigned writeback_rate_i_term_inverse; - unsigned writeback_rate_p_term_inverse; - unsigned writeback_rate_minimum; + unsigned int writeback_rate_update_seconds; + unsigned int writeback_rate_i_term_inverse; + unsigned int writeback_rate_p_term_inverse; + unsigned int writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 atomic_t io_errors; - unsigned error_limit; - unsigned offline_seconds; + unsigned int error_limit; + unsigned int offline_seconds; char backing_dev_name[BDEVNAME_SIZE]; }; @@ -447,7 +448,7 @@ struct cache { * until a gc finishes - otherwise we could pointlessly burn a ton of * cpu */ - unsigned invalidate_needs_gc; + unsigned int invalidate_needs_gc; bool discard; /* Get rid of? */ @@ -472,7 +473,7 @@ struct gc_stat { size_t nkeys; uint64_t data; /* sectors */ - unsigned in_use; /* percent */ + unsigned int in_use; /* percent */ }; /* @@ -518,7 +519,7 @@ struct cache_set { int caches_loaded; struct bcache_device **devices; - unsigned devices_max_used; + unsigned int devices_max_used; atomic_t attached_dev_nr; struct list_head cached_devs; uint64_t cached_dev_sectors; @@ -548,7 +549,7 @@ struct cache_set { * Default number of pages for a new btree node - may be less than a * full bucket */ - unsigned btree_pages; + unsigned int btree_pages; /* * Lists of struct btrees; lru is the list for structs that have memory @@ -571,7 +572,7 @@ struct cache_set { struct list_head btree_cache_freed; /* Number of elements in btree_cache + btree_cache_freeable lists */ - unsigned btree_cache_used; + unsigned int btree_cache_used; /* * If we need to allocate memory for a new btree node and that @@ -613,8 +614,8 @@ struct cache_set { uint16_t min_prio; /* - * max(gen - last_gc) for all buckets. When it gets too big we have to gc - * to keep gens from wrapping around. + * max(gen - last_gc) for all buckets. When it gets too big we have to + * gc to keep gens from wrapping around. */ uint8_t need_gc; struct gc_stat gc_stats; @@ -649,7 +650,7 @@ struct cache_set { struct mutex verify_lock; #endif - unsigned nr_uuids; + unsigned int nr_uuids; struct uuid_entry *uuids; BKEY_PADDED(uuid_bucket); struct closure uuid_write; @@ -670,12 +671,12 @@ struct cache_set { struct journal journal; #define CONGESTED_MAX 1024 - unsigned congested_last_us; + unsigned int congested_last_us; atomic_t congested; /* The rest of this all shows up in sysfs */ - unsigned congested_read_threshold_us; - unsigned congested_write_threshold_us; + unsigned int congested_read_threshold_us; + unsigned int congested_write_threshold_us; struct time_stats btree_gc_time; struct time_stats btree_split_time; @@ -694,16 +695,16 @@ struct cache_set { ON_ERROR_PANIC, } on_error; #define DEFAULT_IO_ERROR_LIMIT 8 - unsigned error_limit; - unsigned error_decay; + unsigned int error_limit; + unsigned int error_decay; unsigned short journal_delay_ms; bool expensive_debug_checks; - unsigned verify:1; - unsigned key_merging_disabled:1; - unsigned gc_always_rewrite:1; - unsigned shrinker_disabled:1; - unsigned copy_gc_enabled:1; + unsigned int verify:1; + unsigned int key_merging_disabled:1; + unsigned int gc_always_rewrite:1; + unsigned int shrinker_disabled:1; + unsigned int copy_gc_enabled:1; #define BUCKET_HASH_BITS 12 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; @@ -712,7 +713,7 @@ struct cache_set { }; struct bbio { - unsigned submit_time_us; + unsigned int submit_time_us; union { struct bkey key; uint64_t _pad[3]; @@ -729,10 +730,10 @@ struct bbio { #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) #define btree_blocks(b) \ - ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) + ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) #define btree_default_blocks(c) \ - ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) + ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) #define bucket_bytes(c) ((c)->sb.bucket_size << 9) @@ -761,21 +762,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) static inline struct cache *PTR_CACHE(struct cache_set *c, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { return c->cache[PTR_DEV(k, ptr)]; } static inline size_t PTR_BUCKET_NR(struct cache_set *c, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { return sector_to_bucket(c, PTR_OFFSET(k, ptr)); } static inline struct bucket *PTR_BUCKET(struct cache_set *c, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); } @@ -783,17 +784,18 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c, static inline uint8_t gen_after(uint8_t a, uint8_t b) { uint8_t r = a - b; + return r > 128U ? 0 : r; } static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, - unsigned i) + unsigned int i) { return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); } static inline bool ptr_available(struct cache_set *c, const struct bkey *k, - unsigned i) + unsigned int i) { return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); } @@ -879,16 +881,16 @@ static inline uint8_t bucket_gc_gen(struct bucket *b) #define BUCKET_GC_GEN_MAX 96U #define kobj_attribute_write(n, fn) \ - static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) + static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn) #define kobj_attribute_rw(n, show, store) \ static struct kobj_attribute ksysfs_##n = \ - __ATTR(n, S_IWUSR|S_IRUSR, show, store) + __ATTR(n, 0600, show, store) static inline void wake_up_allocators(struct cache_set *c) { struct cache *ca; - unsigned i; + unsigned int i; for_each_cache(ca, c, i) wake_up_process(ca->alloc_thread); @@ -924,40 +926,43 @@ static inline void wait_for_kthread_stop(void) /* Forward declarations */ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); -void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); -void bch_bbio_count_io_errors(struct cache_set *, struct bio *, - blk_status_t, const char *); -void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, - const char *); -void bch_bbio_free(struct bio *, struct cache_set *); -struct bio *bch_bbio_alloc(struct cache_set *); - -void __bch_submit_bbio(struct bio *, struct cache_set *); -void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); - -uint8_t bch_inc_gen(struct cache *, struct bucket *); -void bch_rescale_priorities(struct cache_set *, int); - -bool bch_can_invalidate_bucket(struct cache *, struct bucket *); -void __bch_invalidate_one_bucket(struct cache *, struct bucket *); - -void __bch_bucket_free(struct cache *, struct bucket *); -void bch_bucket_free(struct cache_set *, struct bkey *); - -long bch_bucket_alloc(struct cache *, unsigned, bool); -int __bch_bucket_alloc_set(struct cache_set *, unsigned, - struct bkey *, int, bool); -int bch_bucket_alloc_set(struct cache_set *, unsigned, - struct bkey *, int, bool); -bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, - unsigned, unsigned, bool); +void bch_count_io_errors(struct cache *ca, blk_status_t error, + int is_read, const char *m); +void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, + blk_status_t error, const char *m); +void bch_bbio_endio(struct cache_set *c, struct bio *bio, + blk_status_t error, const char *m); +void bch_bbio_free(struct bio *bio, struct cache_set *c); +struct bio *bch_bbio_alloc(struct cache_set *c); + +void __bch_submit_bbio(struct bio *bio, struct cache_set *c); +void bch_submit_bbio(struct bio *bio, struct cache_set *c, + struct bkey *k, unsigned int ptr); + +uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); +void bch_rescale_priorities(struct cache_set *c, int sectors); + +bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); +void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); + +void __bch_bucket_free(struct cache *ca, struct bucket *b); +void bch_bucket_free(struct cache_set *c, struct bkey *k); + +long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); +int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, + struct bkey *k, int n, bool wait); +int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, + struct bkey *k, int n, bool wait); +bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, + unsigned int sectors, unsigned int write_point, + unsigned int write_prio, bool wait); bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) -bool bch_cache_set_error(struct cache_set *, const char *, ...); +bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); -void bch_prio_write(struct cache *); -void bch_write_bdev_super(struct cached_dev *, struct closure *); +void bch_prio_write(struct cache *ca); +void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; extern struct mutex bch_register_lock; @@ -969,30 +974,31 @@ extern struct kobj_type bch_cache_set_ktype; extern struct kobj_type bch_cache_set_internal_ktype; extern struct kobj_type bch_cache_ktype; -void bch_cached_dev_release(struct kobject *); -void bch_flash_dev_release(struct kobject *); -void bch_cache_set_release(struct kobject *); -void bch_cache_release(struct kobject *); +void bch_cached_dev_release(struct kobject *kobj); +void bch_flash_dev_release(struct kobject *kobj); +void bch_cache_set_release(struct kobject *kobj); +void bch_cache_release(struct kobject *kobj); -int bch_uuid_write(struct cache_set *); -void bcache_write_super(struct cache_set *); +int bch_uuid_write(struct cache_set *c); +void bcache_write_super(struct cache_set *c); int bch_flash_dev_create(struct cache_set *c, uint64_t size); -int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *); -void bch_cached_dev_detach(struct cached_dev *); -void bch_cached_dev_run(struct cached_dev *); -void bcache_device_stop(struct bcache_device *); - -void bch_cache_set_unregister(struct cache_set *); -void bch_cache_set_stop(struct cache_set *); - -struct cache_set *bch_cache_set_alloc(struct cache_sb *); -void bch_btree_cache_free(struct cache_set *); -int bch_btree_cache_alloc(struct cache_set *); -void bch_moving_init_cache_set(struct cache_set *); -int bch_open_buckets_alloc(struct cache_set *); -void bch_open_buckets_free(struct cache_set *); +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, + uint8_t *set_uuid); +void bch_cached_dev_detach(struct cached_dev *dc); +void bch_cached_dev_run(struct cached_dev *dc); +void bcache_device_stop(struct bcache_device *d); + +void bch_cache_set_unregister(struct cache_set *c); +void bch_cache_set_stop(struct cache_set *c); + +struct cache_set *bch_cache_set_alloc(struct cache_sb *sb); +void bch_btree_cache_free(struct cache_set *c); +int bch_btree_cache_alloc(struct cache_set *c); +void bch_moving_init_cache_set(struct cache_set *c); +int bch_open_buckets_alloc(struct cache_set *c); +void bch_open_buckets_free(struct cache_set *c); int bch_cache_allocator_start(struct cache *ca); diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 596c93b44e9b..8f07fa6e1739 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -18,31 +18,31 @@ #ifdef CONFIG_BCACHE_DEBUG -void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) +void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set) { struct bkey *k, *next; for (k = i->start; k < bset_bkey_last(i); k = next) { next = bkey_next(k); - printk(KERN_ERR "block %u key %u/%u: ", set, - (unsigned) ((u64 *) k - i->d), i->keys); + pr_err("block %u key %u/%u: ", set, + (unsigned int) ((u64 *) k - i->d), i->keys); if (b->ops->key_dump) b->ops->key_dump(b, k); else - printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); + pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); if (next < bset_bkey_last(i) && bkey_cmp(k, b->ops->is_extents ? &START_KEY(next) : next) > 0) - printk(KERN_ERR "Key skipped backwards\n"); + pr_err("Key skipped backwards\n"); } } void bch_dump_bucket(struct btree_keys *b) { - unsigned i; + unsigned int i; console_lock(); for (i = 0; i <= b->nsets; i++) @@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b) int __bch_count_data(struct btree_keys *b) { - unsigned ret = 0; + unsigned int ret = 0; struct btree_iter iter; struct bkey *k; @@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} /* Keylists */ -int __bch_keylist_realloc(struct keylist *l, unsigned u64s) +int __bch_keylist_realloc(struct keylist *l, unsigned int u64s) { size_t oldsize = bch_keylist_nkeys(l); size_t newsize = oldsize + u64s; @@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l) /* Key/pointer manipulation */ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, - unsigned i) + unsigned int i) { BUG_ON(i > KEY_PTRS(src)); @@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, bool __bch_cut_front(const struct bkey *where, struct bkey *k) { - unsigned i, len = 0; + unsigned int i, len = 0; if (bkey_cmp(where, &START_KEY(k)) <= 0) return false; @@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k) bool __bch_cut_back(const struct bkey *where, struct bkey *k) { - unsigned len = 0; + unsigned int len = 0; if (bkey_cmp(where, k) >= 0) return false; @@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k) #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) struct bkey_float { - unsigned exponent:BKEY_EXPONENT_BITS; - unsigned m:BKEY_MID_BITS; - unsigned mantissa:BKEY_MANTISSA_BITS; + unsigned int exponent:BKEY_EXPONENT_BITS; + unsigned int m:BKEY_MID_BITS; + unsigned int mantissa:BKEY_MANTISSA_BITS; } __packed; /* @@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b) } EXPORT_SYMBOL(bch_btree_keys_free); -int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) +int bch_btree_keys_alloc(struct btree_keys *b, + unsigned int page_order, + gfp_t gfp) { struct bset_tree *t = b->set; @@ -345,7 +347,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc); void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, bool *expensive_debug_checks) { - unsigned i; + unsigned int i; b->ops = ops; b->expensive_debug_checks = expensive_debug_checks; @@ -370,7 +372,7 @@ EXPORT_SYMBOL(bch_btree_keys_init); * return array index next to j when does in-order traverse * of a binary tree which is stored in a linear array */ -static unsigned inorder_next(unsigned j, unsigned size) +static unsigned int inorder_next(unsigned int j, unsigned int size) { if (j * 2 + 1 < size) { j = j * 2 + 1; @@ -387,7 +389,7 @@ static unsigned inorder_next(unsigned j, unsigned size) * return array index previous to j when does in-order traverse * of a binary tree which is stored in a linear array */ -static unsigned inorder_prev(unsigned j, unsigned size) +static unsigned int inorder_prev(unsigned int j, unsigned int size) { if (j * 2 < size) { j = j * 2; @@ -400,7 +402,8 @@ static unsigned inorder_prev(unsigned j, unsigned size) return j; } -/* I have no idea why this code works... and I'm the one who wrote it +/* + * I have no idea why this code works... and I'm the one who wrote it * * However, I do know what it does: * Given a binary tree constructed in an array (i.e. how you normally implement @@ -413,10 +416,12 @@ static unsigned inorder_prev(unsigned j, unsigned size) * extra is a function of size: * extra = (size - rounddown_pow_of_two(size - 1)) << 1; */ -static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) +static unsigned int __to_inorder(unsigned int j, + unsigned int size, + unsigned int extra) { - unsigned b = fls(j); - unsigned shift = fls(size - 1) - b; + unsigned int b = fls(j); + unsigned int shift = fls(size - 1) - b; j ^= 1U << (b - 1); j <<= 1; @@ -433,14 +438,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) * Return the cacheline index in bset_tree->data, where j is index * from a linear array which stores the auxiliar binary tree */ -static unsigned to_inorder(unsigned j, struct bset_tree *t) +static unsigned int to_inorder(unsigned int j, struct bset_tree *t) { return __to_inorder(j, t->size, t->extra); } -static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) +static unsigned int __inorder_to_tree(unsigned int j, + unsigned int size, + unsigned int extra) { - unsigned shift; + unsigned int shift; if (j > extra) j += j - extra; @@ -457,7 +464,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) * Return an index from a linear array which stores the auxiliar binary * tree, j is the cacheline index of t->data. */ -static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) +static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t) { return __inorder_to_tree(j, t->size, t->extra); } @@ -468,14 +475,15 @@ void inorder_test(void) unsigned long done = 0; ktime_t start = ktime_get(); - for (unsigned size = 2; + for (unsigned int size = 2; size < 65536000; size++) { - unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1; - unsigned i = 1, j = rounddown_pow_of_two(size - 1); + unsigned int extra = + (size - rounddown_pow_of_two(size - 1)) << 1; + unsigned int i = 1, j = rounddown_pow_of_two(size - 1); if (!(size % 4096)) - printk(KERN_NOTICE "loop %u, %llu per us\n", size, + pr_notice("loop %u, %llu per us\n", size, done / ktime_us_delta(ktime_get(), start)); while (1) { @@ -518,30 +526,31 @@ void inorder_test(void) * of the previous key so we can walk backwards to it from t->tree[j]'s key. */ -static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline, - unsigned offset) +static struct bkey *cacheline_to_bkey(struct bset_tree *t, + unsigned int cacheline, + unsigned int offset) { return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; } -static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) +static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k) { return ((void *) k - (void *) t->data) / BSET_CACHELINE; } -static unsigned bkey_to_cacheline_offset(struct bset_tree *t, - unsigned cacheline, +static unsigned int bkey_to_cacheline_offset(struct bset_tree *t, + unsigned int cacheline, struct bkey *k) { return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); } -static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) +static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j) { return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); } -static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) +static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j) { return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); } @@ -550,7 +559,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) * For the write set - the one we're currently inserting keys into - we don't * maintain a full search tree, we just keep a simple lookup table in t->prev. */ -static struct bkey *table_to_bkey(struct bset_tr |