/*
* bcache journalling code, for btree insertions
*
* Copyright 2012 Google, Inc.
*/
#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "extents.h"
#include <trace/events/bcache.h>
/*
* Journal replay/recovery:
*
* This code is all driven from run_cache_set(); we first read the journal
* entries, do some other stuff, then we mark all the keys in the journal
* entries (same as garbage collection would), then we replay them - reinserting
* them into the cache in precisely the same order as they appear in the
* journal.
*
* We only journal keys that go in leaf nodes, which simplifies things quite a
* bit.
*/
static void journal_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
}
static int journal_read_bucket(struct cache *ca, struct list_head *list,
unsigned bucket_index)
{
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio;
struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl;
unsigned len, left, offset = 0;
int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
closure_init_stack(&cl);
pr_debug("reading %u", bucket_index);
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(bio, &cl);
closure_sync(&cl);
/* This function could be simpler now since we no longer write
* journal entries that overlap bucket boundaries; this means
* the start of a bucket will always have a valid journal entry
* if it has any journal entries at all.
*/
j = data;
while (len) {
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
if (j->magic != jset_magic(&ca->sb)) {
pr_debug("%u: bad magic", bucket_index);
return ret;
}
if (bytes > left << 9 ||
bytes > PAGE_SIZE << JSET_BITS) {
pr_info("%u: too big, %zu bytes, offset %u",
bucket_index, bytes, offset);
return ret;
}
if (bytes > len << 9)
goto reread;
if (j->csum != csum_set(j)) {
pr_info("%u: bad csum, %zu bytes, offset %u",
bucket_index, bytes, offset);
return ret;
}
blocks = set_blocks(j, block_bytes(ca->set));
while (!list_empty(list)) {
i = list_first_entry(list,
struct journal_replay, list);
if (i->j.seq >= j->last_seq)
break;
list_del(&i->list);
kfree(i);
}
list_for_each_entry_reverse(i, list, list) {
if (j->seq == i->j.seq)
goto next_set;
if (j->seq < i->j.last_seq)
goto next_set;
if (j->seq > i->j.seq) {
where = &i->list;
goto add;
}
}
where = list;
add:
i = kmalloc(offsetof(struct journal_replay, j) +
bytes, GFP_KERNEL);
if (!i)
return -ENOMEM;
memcpy(&i->j, j, bytes);
list_add(&i->list, where);
ret = 1;
ja->seq[bucket_index] = j->seq;
next_set:
offset += blocks * ca->sb.block_size;
len -= blocks * ca->sb.block_size;
j = ((void *) j) + blocks * block_bytes(ca);
}
}
return ret;
}
int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b) \
({ \
int ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \
if (ret < 0) \
return ret; \
ret; \
})
struct cache *ca;
unsigned iter;
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
unsigned i, l, r, m;
uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
pr_debug("%u journal buckets", ca->sb.njournal_buckets);
/*
* Read journal buckets ordered by golden ratio hash to quickly
* find a sequence of buckets with valid journal entries
*/
for (i = 0; i < ca->sb.njournal_buckets; i++) {
l = (i * 2654435769U) % ca->sb.njournal_buckets;
if (test_b