// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
*
* Based upon the circular ringbuffer.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* pblk-rb.c - pblk's write buffer
*/
#include <linux/circ_buf.h>
#include "pblk.h"
static DECLARE_RWSEM(pblk_rb_lock);
static void pblk_rb_data_free(struct pblk_rb *rb)
{
struct pblk_rb_pages *p, *t;
down_write(&pblk_rb_lock);
list_for_each_entry_safe(p, t, &rb->pages, list) {
free_pages((unsigned long)page_address(p->pages), p->order);
list_del(&p->list);
kfree(p);
}
up_write(&pblk_rb_lock);
}
void pblk_rb_free(struct pblk_rb *rb)
{
pblk_rb_data_free(rb);
vfree(rb->entries);
}
/*
* pblk_rb_calculate_size -- calculate the size of the write buffer
*/
static unsigned int pblk_rb_calculate_size(unsigned int nr_entries,
unsigned int threshold)
{
unsigned int thr_sz = 1 << (get_count_order(threshold + NVM_MAX_VLBA));
unsigned int max_sz = max(thr_sz, nr_entries);
unsigned int max_io;
/* Alloc a write buffer that can (i) fit at least two split bios
* (considering max I/O size NVM_MAX_VLBA, and (ii) guarantee that the
* threshold will be respected
*/
max_io = (1 << max((int)(get_count_order(max_sz)),
(int)(get_count_order(NVM_MAX_VLBA << 1))));
if ((threshold + NVM_MAX_VLBA) >= max_io)
max_io <<= 1;
return max_io;
}
/*
* Initialize ring buffer. The data and metadata buffers must be previously
* allocated and their size must be a power of two
* (Documentation/core-api/circular-buffers.rst)
*/
int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
unsigned int seg_size)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entries;
unsigned int init_entry = 0;
unsigned int max_order = MAX_ORDER - 1;
unsigned int power_size, power_seg_sz;
unsigned int alloc_order, order, iter;
unsigned int nr_entries;
nr_entries = pblk_rb_calculate_size(size, threshold);
entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
if (!entries)
return -ENOMEM;
power_size = get_count_order(nr_entries);
power_seg_sz = get_count_order(seg_size);
down_write(&pblk_rb_lock);
rb->entries = entries;
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
rb->back_thres = threshold;
rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
spin_lock_init(&rb->s_lock);
INIT_LIST_HEAD(&rb->pages);
alloc_order = power_size;
if (alloc_order >= max_order) {
order = max_order;
iter = (1 << (alloc_order - max_order));
} else {
order = alloc_order;
iter = 1;
}
do {
struct pblk_rb_entry *entry;
struct pblk_rb_pages *page_set;
void *kaddr;
unsigned long set_size;
int i;
page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
if (!page_set) {
up_write(&pblk_rb_lock);
vfree(entries);
return -ENOMEM;
}
page_set->order = order;
page_set->pages = alloc_pages(GFP_KERNEL, order);
if (!page_set->pages) {
kfree(page_set);
pblk_rb_data_free(rb);
up_write(&pblk_rb_lock);
vfree(entries);
return -ENOMEM;
}
kaddr = page_address(page_set->pages);
entry = &rb->entries[init_entry];
entry->data = kaddr;
entry->cacheline = pblk_cacheline_to_addr(init_entry++);
entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
set_size = (1 << order);
for (i = 1; i < set_size; i++) {
entry = &rb->entries[init_entry];
entry->cacheline = pblk_cacheline_to_addr(init_entry++);
entry->data = kaddr + (i * rb->seg_size);
entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
bio_list_init(&entry->w_ctx.bios);
}
list_add_tail(&page_set->list, &rb->pages);
iter--;
} while (iter > 0);
up_write(&pblk_rb_lock);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_set(&rb->inflight_flush_point, 0);
#endif
/*
* Initialize rate-lim