summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 11:51:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 11:51:05 -0700
commitebb37277796269da36a8bc5d72ed1e8e1fb7d34b (patch)
tree0ded627a62a5cec70b18d12825dd858855c135d3 /drivers/md
parent4de13d7aa8f4d02f4dc99d4609575659f92b3c5a (diff)
parentf50efd2fdbd9b35b11f5778ed85beb764184bda9 (diff)
Merge branch 'for-3.10/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "It might look big in volume, but when categorized, not a lot of drivers are touched. The pull request contains: - mtip32xx fixes from Micron. - A slew of drbd updates, this time in a nicer series. - bcache, a flash/ssd caching framework from Kent. - Fixes for cciss" * 'for-3.10/drivers' of git://git.kernel.dk/linux-block: (66 commits) bcache: Use bd_link_disk_holder() bcache: Allocator cleanup/fixes cciss: bug fix to prevent cciss from loading in kdump crash kernel cciss: add cciss_allow_hpsa module parameter drivers/block/mg_disk.c: add CONFIG_PM_SLEEP to suspend/resume functions mtip32xx: Workaround for unaligned writes bcache: Make sure blocksize isn't smaller than device blocksize bcache: Fix merge_bvec_fn usage for when it modifies the bvm bcache: Correctly check against BIO_MAX_PAGES bcache: Hack around stuff that clones up to bi_max_vecs bcache: Set ra_pages based on backing device's ra_pages bcache: Take data offset from the bdev superblock. mtip32xx: mtip32xx: Disable TRIM support mtip32xx: fix a smatch warning bcache: Disable broken btree fuzz tester bcache: Fix a format string overflow bcache: Fix a minor memory leak on device teardown bcache: Documentation updates bcache: Use WARN_ONCE() instead of __WARN() bcache: Add missing #include <linux/prefetch.h> ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/Kconfig42
-rw-r--r--drivers/md/bcache/Makefile7
-rw-r--r--drivers/md/bcache/alloc.c599
-rw-r--r--drivers/md/bcache/bcache.h1259
-rw-r--r--drivers/md/bcache/bset.c1192
-rw-r--r--drivers/md/bcache/bset.h379
-rw-r--r--drivers/md/bcache/btree.c2503
-rw-r--r--drivers/md/bcache/btree.h405
-rw-r--r--drivers/md/bcache/closure.c345
-rw-r--r--drivers/md/bcache/closure.h672
-rw-r--r--drivers/md/bcache/debug.c565
-rw-r--r--drivers/md/bcache/debug.h54
-rw-r--r--drivers/md/bcache/io.c397
-rw-r--r--drivers/md/bcache/journal.c787
-rw-r--r--drivers/md/bcache/journal.h215
-rw-r--r--drivers/md/bcache/movinggc.c254
-rw-r--r--drivers/md/bcache/request.c1411
-rw-r--r--drivers/md/bcache/request.h62
-rw-r--r--drivers/md/bcache/stats.c246
-rw-r--r--drivers/md/bcache/stats.h58
-rw-r--r--drivers/md/bcache/super.c1987
-rw-r--r--drivers/md/bcache/sysfs.c817
-rw-r--r--drivers/md/bcache/sysfs.h110
-rw-r--r--drivers/md/bcache/trace.c26
-rw-r--r--drivers/md/bcache/util.c377
-rw-r--r--drivers/md/bcache/util.h589
-rw-r--r--drivers/md/bcache/writeback.c414
29 files changed, 15775 insertions, 0 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4d8d90b4fe78..3bfc8f1da9fe 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -174,6 +174,8 @@ config MD_FAULTY
In unsure, say N.
+source "drivers/md/bcache/Kconfig"
+
config BLK_DEV_DM
tristate "Device mapper support"
---help---
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 7ceeaefc0e95..1439fd4ad9b1 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
+obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
new file mode 100644
index 000000000000..05c220d05e23
--- /dev/null
+++ b/drivers/md/bcache/Kconfig
@@ -0,0 +1,42 @@
+
+config BCACHE
+ tristate "Block device as cache"
+ select CLOSURES
+ ---help---
+ Allows a block device to be used as cache for other devices; uses
+ a btree for indexing and the layout is optimized for SSDs.
+
+ See Documentation/bcache.txt for details.
+
+config BCACHE_DEBUG
+ bool "Bcache debugging"
+ depends on BCACHE
+ ---help---
+ Don't select this option unless you're a developer
+
+ Enables extra debugging tools (primarily a fuzz tester)
+
+config BCACHE_EDEBUG
+ bool "Extended runtime checks"
+ depends on BCACHE
+ ---help---
+ Don't select this option unless you're a developer
+
+ Enables extra runtime checks which significantly affect performance
+
+config BCACHE_CLOSURES_DEBUG
+ bool "Debug closures"
+ depends on BCACHE
+ select DEBUG_FS
+ ---help---
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+
+# cgroup code needs to be updated:
+#
+#config CGROUP_BCACHE
+# bool "Cgroup controls for bcache"
+# depends on BCACHE && BLK_CGROUP
+# ---help---
+# TODO
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
new file mode 100644
index 000000000000..0e9c82523be6
--- /dev/null
+++ b/drivers/md/bcache/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_BCACHE) += bcache.o
+
+bcache-y := alloc.o btree.o bset.o io.o journal.o writeback.o\
+ movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+
+CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
new file mode 100644
index 000000000000..048f2947e08b
--- /dev/null
+++ b/drivers/md/bcache/alloc.c
@@ -0,0 +1,599 @@
+/*
+ * Primary bucket allocation code
+ *
+ * Copyright 2012 Google, Inc.
+ *
+ * Allocation in bcache is done in terms of buckets:
+ *
+ * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
+ * btree pointers - they must match for the pointer to be considered valid.
+ *
+ * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
+ * bucket simply by incrementing its gen.
+ *
+ * The gens (along with the priorities; it's really the gens are important but
+ * the code is named as if it's the priorities) are written in an arbitrary list
+ * of buckets on disk, with a pointer to them in the journal header.
+ *
+ * When we invalidate a bucket, we have to write its new gen to disk and wait
+ * for that write to complete before we use it - otherwise after a crash we
+ * could have pointers that appeared to be good but pointed to data that had
+ * been overwritten.
+ *
+ * Since the gens and priorities are all stored contiguously on disk, we can
+ * batch this up: We fill up the free_inc list with freshly invalidated buckets,
+ * call prio_write(), and when prio_write() finishes we pull buckets off the
+ * free_inc list and optionally discard them.
+ *
+ * free_inc isn't the only freelist - if it was, we'd often to sleep while
+ * priorities and gens were being written before we could allocate. c->free is a
+ * smaller freelist, and buckets on that list are always ready to be used.
+ *
+ * If we've got discards enabled, that happens when a bucket moves from the
+ * free_inc list to the free list.
+ *
+ * There is another freelist, because sometimes we have buckets that we know
+ * have nothing pointing into them - these we can reuse without waiting for
+ * priorities to be rewritten. These come from freed btree nodes and buckets
+ * that garbage collection discovered no longer had valid keys pointing into
+ * them (because they were overwritten). That's the unused list - buckets on the
+ * unused list move to the free list, optionally being discarded in the process.
+ *
+ * It's also important to ensure that gens don't wrap around - with respect to
+ * either the oldest gen in the btree or the gen on disk. This is quite
+ * difficult to do in practice, but we explicitly guard against it anyways - if
+ * a bucket is in danger of wrapping around we simply skip invalidating it that
+ * time around, and we garbage collect or rewrite the priorities sooner than we
+ * would have otherwise.
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+ * bch_bucket_alloc_set() allocates one or more buckets from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+ * from bch_bucket_alloc() and a few other places that need to make sure free
+ * buckets are ready.
+ *
+ * invalidate_buckets_(lru|fifo)() find buckets that are available to be
+ * invalidated, and then invalidate them and stick them on the free_inc list -
+ * in either lru or fifo order.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+
+#include <linux/random.h>
+
+#define MAX_IN_FLIGHT_DISCARDS 8U
+
+/* Bucket heap / gen */
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
+{
+ uint8_t ret = ++b->gen;
+
+ ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
+ WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
+
+ if (CACHE_SYNC(&ca->set->sb)) {
+ ca->need_save_prio = max(ca->need_save_prio,
+ bucket_disk_gen(b));
+ WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
+ }
+
+ return ret;
+}
+
+void bch_rescale_priorities(struct cache_set *c, int sectors)
+{
+ struct cache *ca;
+ struct bucket *b;
+ unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
+ unsigned i;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+
+ do {
+ r = atomic_read(&c->rescale);
+
+ if (r >= 0)
+ return;
+ } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
+
+ mutex_lock(&c->bucket_lock);
+
+ c->min_prio = USHRT_MAX;
+
+ for_each_cache(ca, c, i)
+ for_each_bucket(b, ca)
+ if (b->prio &&
+ b->prio != BTREE_PRIO &&
+ !atomic_read(&b->pin)) {
+ b->prio--;
+ c->min_prio = min(c->min_prio, b->prio);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+/* Discard/TRIM */
+
+struct discard {
+ struct list_head list;
+ struct work_struct work;
+ struct cache *ca;
+ long bucket;
+
+ struct bio bio;
+ struct bio_vec bv;
+};
+
+static void discard_finish(struct work_struct *w)
+{
+ struct discard *d = container_of(w, struct discard, work);
+ struct cache *ca = d->ca;
+ char buf[BDEVNAME_SIZE];
+
+ if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
+ pr_notice("discard error on %s, disabling",
+ bdevname(ca->bdev, buf));
+ d->ca->discard = 0;
+ }
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ fifo_push(&ca->free, d->bucket);
+ list_add(&d->list, &ca->discards);
+ atomic_dec(&ca->discards_in_flight);
+
+ mutex_unlock(&ca->set->bucket_lock);
+
+ closure_wake_up(&ca->set->bucket_wait);
+ wake_up(&ca->set->alloc_wait);
+
+ closure_put(&ca->set->cl);
+}
+
+static void discard_endio(struct bio *bio, int error)
+{
+ struct discard *d = container_of(bio, struct discard, bio);
+ schedule_work(&d->work);
+}
+
+static void do_discard(struct cache *ca, long bucket)
+{
+ struct discard *d = list_first_entry(&ca->discards,
+ struct discard, list);
+
+ list_del(&d->list);
+ d->bucket = bucket;
+
+ atomic_inc(&ca->discards_in_flight);
+ closure_get(&ca->set->cl);
+
+ bio_init(&d->bio);
+
+ d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket);
+ d->bio.bi_bdev = ca->bdev;
+ d->bio.bi_rw = REQ_WRITE|REQ_DISCARD;
+ d->bio.bi_max_vecs = 1;
+ d->bio.bi_io_vec = d->bio.bi_inline_vecs;
+ d->bio.bi_size = bucket_bytes(ca);
+ d->bio.bi_end_io = discard_endio;
+ bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ submit_bio(0, &d->bio);
+}
+
+/* Allocation */
+
+static inline bool can_inc_bucket_gen(struct bucket *b)
+{
+ return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
+ bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
+}
+
+bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
+{
+ BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
+
+ if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
+ CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
+ return false;
+
+ b->prio = 0;
+
+ if (can_inc_bucket_gen(b) &&
+ fifo_push(&ca->unused, b - ca->buckets)) {
+ atomic_inc(&b->pin);
+ return true;
+ }
+
+ return false;
+}
+
+static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
+{
+ return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
+ !atomic_read(&b->pin) &&
+ can_inc_bucket_gen(b);
+}
+
+static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+ bch_inc_gen(ca, b);
+ b->prio = INITIAL_PRIO;
+ atomic_inc(&b->pin);
+ fifo_push(&ca->free_inc, b - ca->buckets);
+}
+
+#define bucket_prio(b) \
+ (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+
+#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
+#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
+
+static void invalidate_buckets_lru(struct cache *ca)
+{
+ struct bucket *b;
+ ssize_t i;
+
+ ca->heap.used = 0;
+
+ for_each_bucket(b, ca) {
+ /*
+ * If we fill up the unused list, if we then return before
+ * adding anything to the free_inc list we'll skip writing
+ * prios/gens and just go back to allocating from the unused
+ * list:
+ */
+ if (fifo_full(&ca->unused))
+ return;
+
+ if (!can_invalidate_bucket(ca, b))
+ continue;
+
+ if (!GC_SECTORS_USED(b) &&
+ bch_bucket_add_unused(ca, b))
+ continue;
+
+ if (!heap_full(&ca->heap))
+ heap_add(&ca->heap, b, bucket_max_cmp);
+ else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+ ca->heap.data[0] = b;
+ heap_sift(&ca->heap, 0, bucket_max_cmp);
+ }
+ }
+
+ for (i = ca->heap.used / 2 - 1; i >= 0; --i)
+ heap_sift(&ca->heap, i, bucket_min_cmp);
+
+ while (!fifo_full(&ca->free_inc)) {
+ if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+ /*
+ * We don't want to be calling invalidate_buckets()
+ * multiple times when it can't do anything
+ */
+ ca->invalidate_needs_gc = 1;
+ bch_queue_gc(ca->set);
+ return;
+ }
+
+ invalidate_one_bucket(ca, b);
+ }
+}
+
+static void invalidate_buckets_fifo(struct cache *ca)
+{
+ struct bucket *b;
+ size_t checked = 0;
+
+ while (!fifo_full(&ca->free_inc)) {
+ if (ca->fifo_last_bucket < ca->sb.first_bucket ||
+ ca->fifo_last_bucket >= ca->sb.nbuckets)
+ ca->fifo_last_bucket = ca->sb.first_bucket;
+
+ b = ca->buckets + ca->fifo_last_bucket++;
+
+ if (can_invalidate_bucket(ca, b))
+ invalidate_one_bucket(ca, b);
+
+ if (++checked >= ca->sb.nbuckets) {
+ ca->invalidate_needs_gc = 1;
+ bch_queue_gc(ca->set);
+ return;
+ }
+ }
+}
+
+static void invalidate_buckets_random(struct cache *ca)
+{
+ struct bucket *b;
+ size_t checked = 0;
+
+ while (!fifo_full(&ca->free_inc)) {
+ size_t n;
+ get_random_bytes(&n, sizeof(n));
+
+ n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
+ n += ca->sb.first_bucket;
+
+ b = ca->buckets + n;
+
+ if (can_invalidate_bucket(ca, b))
+ invalidate_one_bucket(ca, b);
+
+ if (++checked >= ca->sb.nbuckets / 2) {
+ ca->invalidate_needs_gc = 1;
+ bch_queue_gc(ca->set);
+ return;
+ }
+ }
+}
+
+static void invalidate_buckets(struct cache *ca)
+{
+ if (ca->invalidate_needs_gc)
+ return;
+
+ switch (CACHE_REPLACEMENT(&ca->sb)) {
+ case CACHE_REPLACEMENT_LRU:
+ invalidate_buckets_lru(ca);
+ break;
+ case CACHE_REPLACEMENT_FIFO:
+ invalidate_buckets_fifo(ca);
+ break;
+ case CACHE_REPLACEMENT_RANDOM:
+ invalidate_buckets_random(ca);
+ break;
+ }
+
+ pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu",
+ fifo_used(&ca->free), ca->free.size,
+ fifo_used(&ca->free_inc), ca->free_inc.size,
+ fifo_used(&ca->unused), ca->unused.size);
+}
+
+#define allocator_wait(ca, cond) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ while (1) { \
+ prepare_to_wait(&ca->set->alloc_wait, \
+ &__wait, TASK_INTERRUPTIBLE); \
+ if (cond) \
+ break; \
+ \
+ mutex_unlock(&(ca)->set->bucket_lock); \
+ if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \
+ finish_wait(&ca->set->alloc_wait, &__wait); \
+ closure_return(cl); \
+ } \
+ \
+ schedule(); \
+ mutex_lock(&(ca)->set->bucket_lock); \
+ } \
+ \
+ finish_wait(&ca->set->alloc_wait, &__wait); \
+} while (0)
+
+void bch_allocator_thread(struct closure *cl)
+{
+ struct cache *ca = container_of(cl, struct cache, alloc);
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ while (1) {
+ /*
+ * First, we pull buckets off of the unused and free_inc lists,
+ * possibly issue discards to them, then we add the bucket to
+ * the free list:
+ */
+ while (1) {
+ long bucket;
+
+ if ((!atomic_read(&ca->set->prio_blocked) ||
+ !CACHE_SYNC(&ca->set->sb)) &&
+ !fifo_empty(&ca->unused))
+ fifo_pop(&ca->unused, bucket);
+ else if (!fifo_empty(&ca->free_inc))
+ fifo_pop(&ca->free_inc, bucket);
+ else
+ break;
+
+ allocator_wait(ca, (int) fifo_free(&ca->free) >
+ atomic_read(&ca->discards_in_flight));
+
+ if (ca->discard) {
+ allocator_wait(ca, !list_empty(&ca->discards));
+ do_discard(ca, bucket);
+ } else {
+ fifo_push(&ca->free, bucket);
+ closure_wake_up(&ca->set->bucket_wait);
+ }
+ }
+
+ /*
+ * We've run out of free buckets, we need to find some buckets
+ * we can invalidate. First, invalidate them in memory and add
+ * them to the free_inc list:
+ */
+
+ allocator_wait(ca, ca->set->gc_mark_valid &&
+ (ca->need_save_prio > 64 ||
+ !ca->invalidate_needs_gc));
+ invalidate_buckets(ca);
+
+ /*
+ * Now, we write their new gens to disk so we can start writing
+ * new stuff to them:
+ */
+ allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
+ if (CACHE_SYNC(&ca->set->sb) &&
+ (!fifo_empty(&ca->free_inc) ||
+ ca->need_save_prio > 64))
+ bch_prio_write(ca);
+ }
+}
+
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+{
+ long r = -1;
+again:
+ wake_up(&ca->set->alloc_wait);
+
+ if (fifo_used(&ca->free) > ca->watermark[watermark] &&
+ fifo_pop(&ca->free, r)) {
+ struct bucket *b = ca->buckets + r;
+#ifdef CONFIG_BCACHE_EDEBUG
+ size_t iter;
+ long i;
+
+ for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
+ BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
+
+ fifo_for_each(i, &ca->free, iter)
+ BUG_ON(i == r);
+ fifo_for_each(i, &ca->free_inc, iter)
+ BUG_ON(i == r);
+ fifo_for_each(i, &ca->unused, iter)
+ BUG_ON(i == r);
+#endif
+ BUG_ON(atomic_read(&b->pin) != 1);
+
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+ if (watermark <= WATERMARK_METADATA) {
+ SET_GC_MARK(b, GC_MARK_METADATA);
+ b->prio = BTREE_PRIO;
+ } else {
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ b->prio = INITIAL_PRIO;
+ }
+
+ return r;
+ }
+
+ pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu",
+ atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
+ fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+
+ if (cl) {
+ closure_wait(&ca->set->bucket_wait, cl);
+
+ if (closure_blocking(cl)) {
+ mutex_unlock(&ca->set->bucket_lock);
+ closure_sync(cl);
+ mutex_lock(&ca->set->bucket_lock);
+ goto again;
+ }
+ }
+
+ return -1;
+}
+
+void bch_bucket_free(struct cache_set *c, struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ struct bucket *b = PTR_BUCKET(c, k, i);
+
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_SECTORS_USED(b, 0);
+ bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
+ }
+}
+
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+ struct bkey *k, int n, struct closure *cl)
+{
+ int i;
+
+ lockdep_assert_held(&c->bucket_lock);
+ BUG_ON(!n || n > c->caches_loaded || n > 8);
+
+ bkey_init(k);
+
+ /* sort by free space/prio of oldest data in caches */
+
+ for (i = 0; i < n; i++) {
+ struct cache *ca = c->cache_by_alloc[i];
+ long b = bch_bucket_alloc(ca, watermark, cl);
+
+ if (b == -1)
+ goto err;
+
+ k->ptr[i] = PTR(ca->buckets[b].gen,
+ bucket_to_sector(c, b),
+ ca->sb.nr_this_dev);
+
+ SET_KEY_PTRS(k, i + 1);
+ }
+
+ return 0;
+err:
+ bch_bucket_free(c, k);
+ __bkey_put(c, k);
+ return -1;
+}
+
+int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+ struct bkey *k, int n, struct closure *cl)
+{
+ int ret;
+ mutex_lock(&c->bucket_lock);
+ ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
+/* Init */
+
+void bch_cache_allocator_exit(struct cache *ca)
+{
+ struct discard *d;
+
+ while (!list_empty(&ca->discards)) {
+ d = list_first_entry(&ca->discards, struct discard, list);
+ cancel_work_sync(&d->work);
+ list_del(&d->list);
+ kfree(d);
+ }
+}
+
+int bch_cache_allocator_init(struct cache *ca)
+{
+ unsigned i;
+
+ /*
+ * Reserve:
+ * Prio/gen writes first
+ * Then 8 for btree allocations
+ * Then half for the moving garbage collector
+ */
+
+ ca->watermark[WATERMARK_PRIO] = 0;
+
+ ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
+
+ ca->watermark[WATERMARK_MOVINGGC] = 8 +
+ ca->watermark[WATERMARK_METADATA];
+
+ ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
+ ca->watermark[WATERMARK_MOVINGGC];
+
+ for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
+ struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->ca = ca;
+ INIT_WORK(&d->work, discard_finish);
+ list_add(&d->list, &ca->discards);
+ }
+
+ return 0;
+}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
new file mode 100644
index 000000000000..340146d7c17f
--- /dev/null
+++ b/drivers/md/bcache/bcache.h
@@ -0,0 +1,1259 @@
+#ifndef _BCACHE_H
+#define _BCACHE_H
+
+/*
+ * SOME HIGH LEVEL CODE DOCUMENTATION:
+ *
+ * Bcache mostly works with cache sets, cache devices, and backing devices.
+ *
+ * Support for multiple cache devices hasn't quite been finished off yet, but
+ * it's about 95% plumbed through. A cache set and its cache devices is sort of
+ * like a md raid array and its component devices. Most of the code doesn't care
+ * about individual cache devices, the main abstraction is the cache set.
+ *
+ * Multiple cache devices is intended to give us the ability to mirror dirty
+ * cached data and metadata, without mirroring clean cached data.
+ *
+ * Backing devices are different, in that they have a lifetime independent of a
+ * cache set. When you register a newly formatted backing device it'll come up
+ * in passthrough mode, and then you can attach and detach a backing device from
+ * a cache set at runtime - while it's mounted and in use. Detaching implicitly
+ * invalidates any cached data for that backing device.
+ *
+ * A cache set can have multiple (many) backing devices attached to it.
+ *
+ * There's also flash only volumes - this is the reason for the distinction
+ * between struct cached_dev and struct bcache_device. A flash only volume
+ * works much like a bcache device that has a backing device, except the
+ * "cached" data is always dirty. The end result is that we get thin
+ * provisioning with very little additional code.
+ *
+ * Flash only volumes work but they're not production ready because the moving
+ * garbage collector needs more work. More on that later.
+ *
+ * BUCKETS/ALLOCATION:
+ *
+ * Bcache is primarily designed for caching, which means that in normal
+ * operation all of our available space will be allocated. Thus, we need an
+ * efficient way of deleting things from the cache so we can write new things to
+ * it.
+ *
+ * To do this, we first divide the cache device up into buckets. A bucket is the
+ * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
+ * works efficiently.
+ *
+ * Each bucket has a 16 bit priority, and an 8 bit generation associated with
+ * it. The gens and priorities for all the buckets are stored contiguously and
+ * packed on disk (in a linked list of buckets - aside from the superblock, all
+ * of bcache's metadata is stored in buckets).
+ *
+ * The priority is used to implement an LRU. We reset a bucket's priority when
+ * we allocate it or on cache it, and every so often we decrement the priority
+ * of each bucket. It could be used to implement something more sophisticated,
+ * if anyone ever gets around to it.
+ *
+ * The generation is used for invalidating buckets. Each pointer also has an 8
+ * bit generation embedded in it; for a pointer to be considered valid, its gen
+ * must match the gen of the bucket it points into. Thus, to reuse a bucket all
+ * we have to do is increment its gen (and write its new gen to disk; we batch
+ * this up).
+ *
+ * Bcache is entirely COW - we never write twice to a bucket, even buckets that
+ * contain metadata (including btree nodes).
+ *
+ * THE BTREE:
+ *
+ * Bcache is in large part design around the btree.
+ *
+ * At a high level, the btree is just an index of key -> ptr tuples.
+ *
+ * Keys represent extents, and thus have a size field. Keys also have a variable
+ * number of pointers attached to them (potentially zero, which is handy for
+ * invalidating the cache).
+ *
+ * The key itself is an inode:offset pair. The inode number corresponds to a
+ * backing device or a flash only volume. The offset is the ending offset of the
+ * extent within the inode - not the starting offset; this makes lookups
+ * slightly more convenient.
+ *
+ * Pointers contain the cache device id, the offset on that device, and an 8 bit
+ * generation number. More on the gen later.
+ *
+ * Index lookups are not fully abstracted - cache lookups in particular are
+ * still somewhat mixed in with the btree code, but things are headed in that
+ * direction.
+ *
+ * Updates are fairly well abstracted, though. There are two different ways of
+ * updating the btree; insert and replace.
+ *
+ * BTREE_INSERT will just take a list of keys and insert them into the btree -
+ * overwriting (possibly only partially) any extents they overlap with. This is
+ * used to update the index after a write.
+ *
+ * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
+ * overwriting a key that matches another given key. This is used for inserting
+ * data into the cache after a cache miss, and for background writeback, and for
+ * the moving garbage collector.
+ *
+ * There is no "delete" operation; deleting things from the index is
+ * accomplished by either by invalidating pointers (by incrementing a bucket's
+ * gen) or by inserting a key with 0 pointers - which will overwrite anything
+ * previously present at that location in the index.
+ *
+ * This means that there are always stale/invalid keys in the btree. They're
+ * filtered out by the code that iterates through a btree node, and removed when
+ * a btree node is rewritten.
+ *
+ * BTREE NODES:
+ *
+ * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
+ * free smaller than a bucket - so, that's how big our btree nodes are.
+ *
+ * (If buckets are really big we'll only use part of the bucket for a btree node
+ * - no less than 1/4th - but a bucket still contains no more than a single
+ * btree node. I'd actually like to change this, but for now we rely on the
+ * bucket's gen for deleting btree nodes when we rewrite/split a node.)
+ *
+ * Anyways, btree nodes are big - big enough to be inefficient with a textbook
+ * btree implementation.
+ *
+ * The way this is solved is that btree nodes are internally log structured; we
+ * can append new keys to an existing btree node without rewriting it. This
+ * means each set of keys we write is sorted, but the node is not.
+ *
+ * We maintain this log structure in memory - keeping 1Mb of keys sorted would
+ * be expensive, and we have to distinguish between the keys we have written and
+ * the keys we haven't. So to do a lookup in a btree node, we have to search
+ * each sorted set. But we do merge written sets together lazily, so the cost of
+ * these extra searches is quite low (normally most of the keys in a btree node
+ * will be in one big set, and then there'll be one or two sets that are much
+ * smaller).
+ *
+ * This log structure makes bcache's btree more of a hybrid between a
+ * conventional btree and a compacting data structure, with some of the
+ * advantages of both.
+ *
+ * GARBAGE COLLECTION:
+ *
+ * We can't just invalidate any bucket - it might contain dirty data or
+ * metadata. If it once contained dirty data, other writes might overwrite it
+ * later, leaving no valid pointers into that bucket in the index.
+ *
+ * Thus, the primary purpose of garbage collection is to find buckets to reuse.
+ * It also counts how much valid data it each bucket currently contains, so that
+ * allocation can reuse buckets sooner when they've been mostly overwritten.
+ *
+ * It also does some things that are really internal to the btree
+ * implementation. If a btree node contains pointers that are stale by more than
+ * some threshold, it rewrites the btree node to avoid the bucket's generation
+ * wrapping around. It also merges adjacent bt