From c1a15d08f497150a91ba4e61bab54b8f5c8b49b9 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 17 Apr 2013 20:18:55 +0200 Subject: xen-blkback: print stats about persistent grants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Roger Pau Monné Cc: xen-devel@lists.xen.org Cc: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/block/xen-blkback') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index dd5b2fed97e9..f7526dbb204d 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -382,10 +382,12 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) static void print_stats(struct xen_blkif *blkif) { pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" - " | ds %4llu\n", + " | ds %4llu | pg: %4u/%4u\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, - blkif->st_f_req, blkif->st_ds_req); + blkif->st_f_req, blkif->st_ds_req, + blkif->persistent_gnt_c, + max_mapped_grant_pages(blkif->blk_protocol)); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_rd_req = 0; blkif->st_wr_req = 0; -- cgit v1.2.3 From c6cc142dac52e62e1e8a2aff5de1300202b96c66 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 17 Apr 2013 20:18:56 +0200 Subject: xen-blkback: use balloon pages for all mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using balloon pages for all granted pages allows us to simplify the logic in blkback, especially in the xen_blkbk_map function, since now we can decide if we want to map a grant persistently or not after we have actually mapped it. This could not be done before because persistent grants used ballooned pages, whereas non-persistent grants used pages from the kernel. This patch also introduces several changes, the first one is that the list of free pages is no longer global, now each blkback instance has it's own list of free pages that can be used to map grants. Also, a run time parameter (max_buffer_pages) has been added in order to tune the maximum number of free pages each blkback instance will keep in it's buffer. Signed-off-by: Roger Pau Monné Cc: xen-devel@lists.xen.org Cc: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 286 +++++++++++++++++++++--------------- drivers/block/xen-blkback/common.h | 5 + drivers/block/xen-blkback/xenbus.c | 3 + 3 files changed, 173 insertions(+), 121 deletions(-) (limited to 'drivers/block/xen-blkback') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index f7526dbb204d..8245c6bb9539 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -63,6 +63,21 @@ static int xen_blkif_reqs = 64; module_param_named(reqs, xen_blkif_reqs, int, 0); MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); +/* + * Maximum number of unused free pages to keep in the internal buffer. + * Setting this to a value too low will reduce memory used in each backend, + * but can have a performance penalty. + * + * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can + * be set to a lower value that might degrade performance on some intensive + * IO workloads. + */ + +static int xen_blkif_max_buffer_pages = 704; +module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); +MODULE_PARM_DESC(max_buffer_pages, +"Maximum number of free pages to keep in each block backend buffer"); + /* Run-time switchable: /sys/module/blkback/parameters/ */ static unsigned int log_stats; module_param(log_stats, int, 0644); @@ -82,10 +97,14 @@ struct pending_req { int status; struct list_head free_list; DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); + struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLKBACK_INVALID_HANDLE (~0) +/* Number of free pages to remove on each call to free_xenballooned_pages */ +#define NUM_BATCH_FREE_PAGES 10 + struct xen_blkbk { struct pending_req *pending_reqs; /* List of all 'pending_req' available */ @@ -93,8 +112,6 @@ struct xen_blkbk { /* And its spinlock. */ spinlock_t pending_free_lock; wait_queue_head_t pending_free_wq; - /* The list of all pages that are available. */ - struct page **pending_pages; /* And the grant handles that are available. */ grant_handle_t *pending_grant_handles; }; @@ -143,14 +160,66 @@ static inline int vaddr_pagenr(struct pending_req *req, int seg) BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; } -#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] +static inline int get_free_page(struct xen_blkif *blkif, struct page **page) +{ + unsigned long flags; + + spin_lock_irqsave(&blkif->free_pages_lock, flags); + if (list_empty(&blkif->free_pages)) { + BUG_ON(blkif->free_pages_num != 0); + spin_unlock_irqrestore(&blkif->free_pages_lock, flags); + return alloc_xenballooned_pages(1, page, false); + } + BUG_ON(blkif->free_pages_num == 0); + page[0] = list_first_entry(&blkif->free_pages, struct page, lru); + list_del(&page[0]->lru); + blkif->free_pages_num--; + spin_unlock_irqrestore(&blkif->free_pages_lock, flags); -static inline unsigned long vaddr(struct pending_req *req, int seg) + return 0; +} + +static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, + int num) { - unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); - return (unsigned long)pfn_to_kaddr(pfn); + unsigned long flags; + int i; + + spin_lock_irqsave(&blkif->free_pages_lock, flags); + for (i = 0; i < num; i++) + list_add(&page[i]->lru, &blkif->free_pages); + blkif->free_pages_num += num; + spin_unlock_irqrestore(&blkif->free_pages_lock, flags); +} + +static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) +{ + /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ + struct page *page[NUM_BATCH_FREE_PAGES]; + unsigned int num_pages = 0; + unsigned long flags; + + spin_lock_irqsave(&blkif->free_pages_lock, flags); + while (blkif->free_pages_num > num) { + BUG_ON(list_empty(&blkif->free_pages)); + page[num_pages] = list_first_entry(&blkif->free_pages, + struct page, lru); + list_del(&page[num_pages]->lru); + blkif->free_pages_num--; + if (++num_pages == NUM_BATCH_FREE_PAGES) { + spin_unlock_irqrestore(&blkif->free_pages_lock, flags); + free_xenballooned_pages(num_pages, page); + spin_lock_irqsave(&blkif->free_pages_lock, flags); + num_pages = 0; + } + } + spin_unlock_irqrestore(&blkif->free_pages_lock, flags); + if (num_pages != 0) + free_xenballooned_pages(num_pages, page); } +#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) + #define pending_handle(_req, _seg) \ (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) @@ -170,7 +239,7 @@ static void make_response(struct xen_blkif *blkif, u64 id, (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) -static void add_persistent_gnt(struct rb_root *root, +static int add_persistent_gnt(struct rb_root *root, struct persistent_gnt *persistent_gnt) { struct rb_node **new = &(root->rb_node), *parent = NULL; @@ -186,14 +255,15 @@ static void add_persistent_gnt(struct rb_root *root, else if (persistent_gnt->gnt > this->gnt) new = &((*new)->rb_right); else { - pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n"); - BUG(); + pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); + return -EINVAL; } } /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); rb_insert_color(&(persistent_gnt->node), root); + return 0; } static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, @@ -215,7 +285,8 @@ static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, return NULL; } -static void free_persistent_gnts(struct rb_root *root, unsigned int num) +static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, + unsigned int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; @@ -240,7 +311,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); - free_xenballooned_pages(segs_to_unmap, pages); + put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } @@ -422,13 +493,19 @@ int xen_blkif_schedule(void *arg) if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; + /* Shrink if we have more than xen_blkif_max_buffer_pages */ + shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); + if (log_stats && time_after(jiffies, blkif->st_print)) print_stats(blkif); } + /* Since we are shutting down remove all pages from the buffer */ + shrink_free_pagepool(blkif, 0 /* All */); + /* Free all persistent grant pages */ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) - free_persistent_gnts(&blkif->persistent_gnts, + free_persistent_gnts(blkif, &blkif->persistent_gnts, blkif->persistent_gnt_c); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); @@ -457,23 +534,25 @@ static void xen_blkbk_unmap(struct pending_req *req) struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; grant_handle_t handle; + struct xen_blkif *blkif = req->blkif; int ret; for (i = 0; i < req->nr_pages; i++) { if (!test_bit(i, req->unmap_seg)) continue; handle = pending_handle(req, i); + pages[invcount] = req->pages[i]; if (handle == BLKBACK_INVALID_HANDLE) continue; - gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), + gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[invcount]), GNTMAP_host_map, handle); pending_handle(req, i) = BLKBACK_INVALID_HANDLE; - pages[invcount] = virt_to_page(vaddr(req, i)); invcount++; } ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); BUG_ON(ret); + put_free_pages(blkif, pages, invcount); } static int xen_blkbk_map(struct blkif_request *req, @@ -487,8 +566,7 @@ static int xen_blkbk_map(struct blkif_request *req, struct persistent_gnt *persistent_gnt = NULL; struct xen_blkif *blkif = pending_req->blkif; phys_addr_t addr = 0; - int i, j; - bool new_map; + int i, seg_idx, new_map_idx; int nseg = req->u.rw.nr_segments; int segs_to_map = 0; int ret = 0; @@ -517,68 +595,16 @@ static int xen_blkbk_map(struct blkif_request *req, * We are using persistent grants and * the grant is already mapped */ - new_map = false; - } else if (use_persistent_gnts && - blkif->persistent_gnt_c < - max_mapped_grant_pages(blkif->blk_protocol)) { - /* - * We are using persistent grants, the grant is - * not mapped but we have room for it - */ - new_map = true; - persistent_gnt = kmalloc( - sizeof(struct persistent_gnt), - GFP_KERNEL); - if (!persistent_gnt) - return -ENOMEM; - if (alloc_xenballooned_pages(1, &persistent_gnt->page, - false)) { - kfree(persistent_gnt); - return -ENOMEM; - } - persistent_gnt->gnt = req->u.rw.seg[i].gref; - persistent_gnt->handle = BLKBACK_INVALID_HANDLE; - - pages_to_gnt[segs_to_map] = - persistent_gnt->page; - addr = (unsigned long) pfn_to_kaddr( - page_to_pfn(persistent_gnt->page)); - - add_persistent_gnt(&blkif->persistent_gnts, - persistent_gnt); - blkif->persistent_gnt_c++; - pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", - persistent_gnt->gnt, blkif->persistent_gnt_c, - max_mapped_grant_pages(blkif->blk_protocol)); - } else { - /* - * We are either using persistent grants and - * hit the maximum limit of grants mapped, - * or we are not using persistent grants. - */ - if (use_persistent_gnts && - !blkif->vbd.overflow_max_grants) { - blkif->vbd.overflow_max_grants = 1; - pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", - blkif->domid, blkif->vbd.handle); - } - new_map = true; - pages[i] = blkbk->pending_page(pending_req, i); - addr = vaddr(pending_req, i); - pages_to_gnt[segs_to_map] = - blkbk->pending_page(pending_req, i); - } - - if (persistent_gnt) { pages[i] = persistent_gnt->page; persistent_gnts[i] = persistent_gnt; } else { + if (get_free_page(blkif, &pages[i])) + goto out_of_memory; + addr = vaddr(pages[i]); + pages_to_gnt[segs_to_map] = pages[i]; persistent_gnts[i] = NULL; - } - - if (new_map) { flags = GNTMAP_host_map; - if (!persistent_gnt && + if (!use_persistent_gnts && (pending_req->operation != BLKIF_OP_READ)) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, @@ -598,48 +624,81 @@ static int xen_blkbk_map(struct blkif_request *req, * the page from the other domain. */ bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); - for (i = 0, j = 0; i < nseg; i++) { - if (!persistent_gnts[i] || - persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) { + for (seg_idx = 0, new_map_idx = 0; seg_idx < nseg; seg_idx++) { + if (!persistent_gnts[seg_idx]) { /* This is a newly mapped grant */ - BUG_ON(j >= segs_to_map); - if (unlikely(map[j].status != 0)) { + BUG_ON(new_map_idx >= segs_to_map); + if (unlikely(map[new_map_idx].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); - map[j].handle = BLKBACK_INVALID_HANDLE; + pending_handle(pending_req, seg_idx) = BLKBACK_INVALID_HANDLE; ret |= 1; - if (persistent_gnts[i]) { - rb_erase(&persistent_gnts[i]->node, - &blkif->persistent_gnts); - blkif->persistent_gnt_c--; - kfree(persistent_gnts[i]); - persistent_gnts[i] = NULL; - } + new_map_idx++; + /* + * No need to set unmap_seg bit, since + * we can not unmap this grant because + * the handle is invalid. + */ + continue; } + pending_handle(pending_req, seg_idx) = map[new_map_idx].handle; + } else { + /* This grant is persistent and already mapped */ + goto next; } - if (persistent_gnts[i]) { - if (persistent_gnts[i]->handle == - BLKBACK_INVALID_HANDLE) { + if (use_persistent_gnts && + blkif->persistent_gnt_c < + max_mapped_grant_pages(blkif->blk_protocol)) { + /* + * We are using persistent grants, the grant is + * not mapped but we have room for it + */ + persistent_gnt = kmalloc(sizeof(struct persistent_gnt), + GFP_KERNEL); + if (!persistent_gnt) { /* - * If this is a new persistent grant - * save the handler + * If we don't have enough memory to + * allocate the persistent_gnt struct + * map this grant non-persistenly */ - persistent_gnts[i]->handle = map[j++].handle; + goto next_unmap; } - pending_handle(pending_req, i) = - persistent_gnts[i]->handle; - - if (ret) - continue; - } else { - pending_handle(pending_req, i) = map[j++].handle; - bitmap_set(pending_req->unmap_seg, i, 1); - - if (ret) - continue; + persistent_gnt->gnt = map[new_map_idx].ref; + persistent_gnt->handle = map[new_map_idx].handle; + persistent_gnt->page = pages[seg_idx]; + if (add_persistent_gnt(&blkif->persistent_gnts, + persistent_gnt)) { + kfree(persistent_gnt); + persistent_gnt = NULL; + goto next_unmap; + } + blkif->persistent_gnt_c++; + pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", + persistent_gnt->gnt, blkif->persistent_gnt_c, + max_mapped_grant_pages(blkif->blk_protocol)); + new_map_idx++; + goto next; + } + if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { + blkif->vbd.overflow_max_grants = 1; + pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", + blkif->domid, blkif->vbd.handle); } - seg[i].offset = (req->u.rw.seg[i].first_sect << 9); +next_unmap: + /* + * We could not map this grant persistently, so use it as + * a non-persistent grant. + */ + bitmap_set(pending_req->unmap_seg, seg_idx, 1); + new_map_idx++; +next: + seg[seg_idx].offset = (req->u.rw.seg[seg_idx].first_sect << 9); } return ret; + +out_of_memory: + pr_alert(DRV_PFX "%s: out of memory\n", __func__); + put_free_pages(blkif, pages_to_gnt, segs_to_map); + return -ENOMEM; } static int dispatch_discard_io(struct xen_blkif *blkif, @@ -863,7 +922,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, int operation; struct blk_plug plug; bool drain = false; - struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct page **pages = pending_req->pages; switch (req->operation) { case BLKIF_OP_READ: @@ -1090,22 +1149,14 @@ static int __init xen_blkif_init(void) xen_blkif_reqs, GFP_KERNEL); blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) * mmap_pages, GFP_KERNEL); - blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * - mmap_pages, GFP_KERNEL); - if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || - !blkbk->pending_pages) { + if (!blkbk->pending_reqs || !blkbk->pending_grant_handles) { rc = -ENOMEM; goto out_of_memory; } for (i = 0; i < mmap_pages; i++) { blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; - blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); - if (blkbk->pending_pages[i] == NULL) { - rc = -ENOMEM; - goto out_of_memory; - } } rc = xen_blkif_interface_init(); if (rc) @@ -1130,13 +1181,6 @@ static int __init xen_blkif_init(void) failed_init: kfree(blkbk->pending_reqs); kfree(blkbk->pending_grant_handles); - if (blkbk->pending_pages) { - for (i = 0; i < mmap_pages; i++) { - if (blkbk->pending_pages[i]) - __free_page(blkbk->pending_pages[i]); - } - kfree(blkbk->pending_pages); - } kfree(blkbk); blkbk = NULL; return rc; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 60103e2517ba..6c73c3855e65 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -220,6 +220,11 @@ struct xen_blkif { struct rb_root persistent_gnts; unsigned int persistent_gnt_c; + /* buffer of free pages to map grant refs */ + spinlock_t free_pages_lock; + int free_pages_num; + struct list_head free_pages; + /* statistics */ unsigned long st_print; unsigned long long st_rd_req; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 8bfd1bcf95ec..24f7f6d87717 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -118,6 +118,9 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->st_print = jiffies; init_waitqueue_head(&blkif->waiting_to_free); blkif->persistent_gnts.rb_node = NULL; + spin_lock_init(&blkif->free_pages_lock); + INIT_LIST_HEAD(&blkif->free_pages); + blkif->free_pages_num = 0; return blkif; } -- cgit v1.2.3 From 3f3aad5e6686ed49242bbf86de378b39f119ec9d Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 17 Apr 2013 20:18:57 +0200 Subject: xen-blkback: implement LRU mechanism for persistent grants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This mechanism allows blkback to change the number of grants persistently mapped at run time. The algorithm uses a simple LRU mechanism that removes (if needed) the persistent grants that have not been used since the last LRU run, or if all grants have been used it removes the first grants in the list (that are not in use). The algorithm allows the user to change the maximum number of persistent grants, by changing max_persistent_grants in sysfs. Since we are storing the persistent grants used inside the request struct (to be able to mark them as "unused" when unmapping), we no longer need the bitmap (unmap_seg). Signed-off-by: Roger Pau Monné Cc: Konrad Rzeszutek Wilk Cc: xen-devel@lists.xen.org Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 287 +++++++++++++++++++++++++++++------- drivers/block/xen-blkback/common.h | 18 +++ drivers/block/xen-blkback/xenbus.c | 2 + 3 files changed, 250 insertions(+), 57 deletions(-) (limited to 'drivers/block/xen-blkback') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 8245c6bb9539..17052f74ebe5 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -78,6 +78,36 @@ module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); MODULE_PARM_DESC(max_buffer_pages, "Maximum number of free pages to keep in each block backend buffer"); +/* + * Maximum number of grants to map persistently in blkback. For maximum + * performance this should be the total numbers of grants that can be used + * to fill the ring, but since this might become too high, specially with + * the use of indirect descriptors, we set it to a value that provides good + * performance without using too much memory. + * + * When the list of persistent grants is full we clean it up using a LRU + * algorithm. + */ + +static int xen_blkif_max_pgrants = 352; +module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); +MODULE_PARM_DESC(max_persistent_grants, + "Maximum number of grants to map persistently"); + +/* + * The LRU mechanism to clean the lists of persistent grants needs to + * be executed periodically. The time interval between consecutive executions + * of the purge mechanism is set in ms. + */ +#define LRU_INTERVAL 100 + +/* + * When the persistent grants list is full we will remove unused grants + * from the list. The percent number of grants to be removed at each LRU + * execution. + */ +#define LRU_PERCENT_CLEAN 5 + /* Run-time switchable: /sys/module/blkback/parameters/ */ static unsigned int log_stats; module_param(log_stats, int, 0644); @@ -96,8 +126,8 @@ struct pending_req { unsigned short operation; int status; struct list_head free_list; - DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLKBACK_INVALID_HANDLE (~0) @@ -118,36 +148,6 @@ struct xen_blkbk { static struct xen_blkbk *blkbk; -/* - * Maximum number of grant pages that can be mapped in blkback. - * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of - * pages that blkback will persistently map. - * Currently, this is: - * RING_SIZE = 32 (for all known ring types) - * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11 - * sizeof(struct persistent_gnt) = 48 - * So the maximum memory used to store the grants is: - * 32 * 11 * 48 = 16896 bytes - */ -static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol) -{ - switch (protocol) { - case BLKIF_PROTOCOL_NATIVE: - return __CONST_RING_SIZE(blkif, PAGE_SIZE) * - BLKIF_MAX_SEGMENTS_PER_REQUEST; - case BLKIF_PROTOCOL_X86_32: - return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) * - BLKIF_MAX_SEGMENTS_PER_REQUEST; - case BLKIF_PROTOCOL_X86_64: - return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) * - BLKIF_MAX_SEGMENTS_PER_REQUEST; - default: - BUG(); - } - return 0; -} - - /* * Little helpful macro to figure out the index and virtual address of the * pending_pages[..]. For each 'pending_req' we have have up to @@ -239,13 +239,29 @@ static void make_response(struct xen_blkif *blkif, u64 id, (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) -static int add_persistent_gnt(struct rb_root *root, +/* + * We don't need locking around the persistent grant helpers + * because blkback uses a single-thread for each backed, so we + * can be sure that this functions will never be called recursively. + * + * The only exception to that is put_persistent_grant, that can be called + * from interrupt context (by xen_blkbk_unmap), so we have to use atomic + * bit operations to modify the flags of a persistent grant and to count + * the number of used grants. + */ +static int add_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { - struct rb_node **new = &(root->rb_node), *parent = NULL; + struct rb_node **new = NULL, *parent = NULL; struct persistent_gnt *this; + if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { + if (!blkif->vbd.overflow_max_grants) + blkif->vbd.overflow_max_grants = 1; + return -EBUSY; + } /* Figure out where to put new node */ + new = &blkif->persistent_gnts.rb_node; while (*new) { this = container_of(*new, struct persistent_gnt, node); @@ -260,18 +276,23 @@ static int add_persistent_gnt(struct rb_root *root, } } + bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); + set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); - rb_insert_color(&(persistent_gnt->node), root); + rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); + blkif->persistent_gnt_c++; + atomic_inc(&blkif->persistent_gnt_in_use); return 0; } -static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, +static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, grant_ref_t gref) { struct persistent_gnt *data; - struct rb_node *node = root->rb_node; + struct rb_node *node = NULL; + node = blkif->persistent_gnts.rb_node; while (node) { data = container_of(node, struct persistent_gnt, node); @@ -279,12 +300,29 @@ static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, node = node->rb_left; else if (gref > data->gnt) node = node->rb_right; - else + else { + if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { + pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); + return NULL; + } + set_bit(PERSISTENT_GNT_ACTIVE, data->flags); + atomic_inc(&blkif->persistent_gnt_in_use); return data; + } } return NULL; } +static void put_persistent_gnt(struct xen_blkif *blkif, + struct persistent_gnt *persistent_gnt) +{ + if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) + pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); + set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); + clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); + atomic_dec(&blkif->persistent_gnt_in_use); +} + static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { @@ -322,6 +360,129 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, BUG_ON(num != 0); } +static void unmap_purged_grants(struct work_struct *work) +{ + struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct persistent_gnt *persistent_gnt; + int ret, segs_to_unmap = 0; + struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); + + while(!list_empty(&blkif->persistent_purge_list)) { + persistent_gnt = list_first_entry(&blkif->persistent_purge_list, + struct persistent_gnt, + remove_node); + list_del(&persistent_gnt->remove_node); + + gnttab_set_unmap_op(&unmap[segs_to_unmap], + vaddr(persistent_gnt->page), + GNTMAP_host_map, + persistent_gnt->handle); + + pages[segs_to_unmap] = persistent_gnt->page; + + if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { + ret = gnttab_unmap_refs(unmap, NULL, pages, + segs_to_unmap); + BUG_ON(ret); + put_free_pages(blkif, pages, segs_to_unmap); + segs_to_unmap = 0; + } + kfree(persistent_gnt); + } + if (segs_to_unmap > 0) { + ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); + BUG_ON(ret); + put_free_pages(blkif, pages, segs_to_unmap); + } +} + +static void purge_persistent_gnt(struct xen_blkif *blkif) +{ + struct persistent_gnt *persistent_gnt; + struct rb_node *n; + unsigned int num_clean, total; + bool scan_used = false; + struct rb_root *root; + + if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || + (blkif->persistent_gnt_c == xen_blkif_max_pgrants && + !blkif->vbd.overflow_max_grants)) { + return; + } + + if (work_pending(&blkif->persistent_purge_work)) { + pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); + return; + } + + num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; + num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; + num_clean = min(blkif->persistent_gnt_c, num_clean); + if (num_clean > + (blkif->persistent_gnt_c - + atomic_read(&blkif->persistent_gnt_in_use))) + return; + + /* + * At this point, we can assure that there will be no calls + * to get_persistent_grant (because we are executing this code from + * xen_blkif_schedule), there can only be calls to put_persistent_gnt, + * which means that the number of currently used grants will go down, + * but never up, so we will always be able to remove the requested + * number of grants. + */ + + total = num_clean; + + pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); + + INIT_LIST_HEAD(&blkif->persistent_purge_list); + root = &blkif->persistent_gnts; +purge_list: + foreach_grant_safe(persistent_gnt, n, root, node) { + BUG_ON(persistent_gnt->handle == + BLKBACK_INVALID_HANDLE); + + if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) + continue; + if (!scan_used && + (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) + continue; + + rb_erase(&persistent_gnt->node, root); + list_add(&persistent_gnt->remove_node, + &blkif->persistent_purge_list); + if (--num_clean == 0) + goto finished; + } + /* + * If we get here it means we also need to start cleaning + * grants that were used since last purge in order to cope + * with the requested num + */ + if (!scan_used) { + pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); + scan_used = true; + goto purge_list; + } +finished: + /* Remove the "used" flag from all the persistent grants */ + foreach_grant_safe(persistent_gnt, n, root, node) { + BUG_ON(persistent_gnt->handle == + BLKBACK_INVALID_HANDLE); + clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); + } + blkif->persistent_gnt_c -= (total - num_clean); + blkif->vbd.overflow_max_grants = 0; + + /* We can defer this work */ + INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); + schedule_work(&blkif->persistent_purge_work); + pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); + return; +} + /* * Retrieve from the 'pending_reqs' a free pending_req structure to be used. */ @@ -453,12 +614,12 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) static void print_stats(struct xen_blkif *blkif) { pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" - " | ds %4llu | pg: %4u/%4u\n", + " | ds %4llu | pg: %4u/%4d\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req, blkif->st_ds_req, blkif->persistent_gnt_c, - max_mapped_grant_pages(blkif->blk_protocol)); + xen_blkif_max_pgrants); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_rd_req = 0; blkif->st_wr_req = 0; @@ -470,6 +631,7 @@ int xen_blkif_schedule(void *arg) { struct xen_blkif *blkif = arg; struct xen_vbd *vbd = &blkif->vbd; + unsigned long timeout; xen_blkif_get(blkif); @@ -479,13 +641,21 @@ int xen_blkif_schedule(void *arg) if (unlikely(vbd->size != vbd_sz(vbd))) xen_vbd_resize(blkif); - wait_event_interruptible( + timeout = msecs_to_jiffies(LRU_INTERVAL); + + timeout = wait_event_interruptible_timeout( blkif->wq, - blkif->waiting_reqs || kthread_should_stop()); - wait_event_interruptible( + blkif->waiting_reqs || kthread_should_stop(), + timeout); + if (timeout == 0) + goto purge_gnt_list; + timeout = wait_event_interruptible_timeout( blkbk->pending_free_wq, !list_empty(&blkbk->pending_free) || - kthread_should_stop()); + kthread_should_stop(), + timeout); + if (timeout == 0) + goto purge_gnt_list; blkif->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ @@ -493,6 +663,13 @@ int xen_blkif_schedule(void *arg) if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; +purge_gnt_list: + if (blkif->vbd.feature_gnt_persistent && + time_after(jiffies, blkif->next_lru)) { + purge_persistent_gnt(blkif); + blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); + } + /* Shrink if we have more than xen_blkif_max_buffer_pages */ shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); @@ -538,8 +715,10 @@ static void xen_blkbk_unmap(struct pending_req *req) int ret; for (i = 0; i < req->nr_pages; i++) { - if (!test_bit(i, req->unmap_seg)) + if (req->persistent_gnts[i] != NULL) { + put_persistent_gnt(blkif, req->persistent_gnts[i]); continue; + } handle = pending_handle(req, i); pages[invcount] = req->pages[i]; if (handle == BLKBACK_INVALID_HANDLE) @@ -561,8 +740,8 @@ static int xen_blkbk_map(struct blkif_request *req, struct page *pages[]) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct persistent_gnt **persistent_gnts = pending_req->persistent_gnts; struct persistent_gnt *persistent_gnt = NULL; struct xen_blkif *blkif = pending_req->blkif; phys_addr_t addr = 0; @@ -574,9 +753,6 @@ static int xen_blkbk_map(struct blkif_request *req, use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); - BUG_ON(blkif->persistent_gnt_c > - max_mapped_grant_pages(pending_req->blkif->blk_protocol)); - /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the @@ -587,7 +763,7 @@ static int xen_blkbk_map(struct blkif_request *req, if (use_persistent_gnts) persistent_gnt = get_persistent_gnt( - &blkif->persistent_gnts, + blkif, req->u.rw.seg[i].gref); if (persistent_gnt) { @@ -623,7 +799,6 @@ static int xen_blkbk_map(struct blkif_request *req, * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ - bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); for (seg_idx = 0, new_map_idx = 0; seg_idx < nseg; seg_idx++) { if (!persistent_gnts[seg_idx]) { /* This is a newly mapped grant */ @@ -646,11 +821,10 @@ static int xen_blkbk_map(struct blkif_request *req, goto next; } if (use_persistent_gnts && - blkif->persistent_gnt_c < - max_mapped_grant_pages(blkif->blk_protocol)) { + blkif->persistent_gnt_c < xen_blkif_max_pgrants) { /* * We are using persistent grants, the grant is - * not mapped but we have room for it + * not mapped but we might have room for it. */ persistent_gnt = kmalloc(sizeof(struct persistent_gnt), GFP_KERNEL); @@ -665,16 +839,16 @@ static int xen_blkbk_map(struct blkif_request *req, persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->page = pages[seg_idx]; - if (add_persistent_gnt(&blkif->persistent_gnts, + if (add_persistent_gnt(blkif, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; goto next_unmap; } - blkif->persistent_gnt_c++; + persistent_gnts[seg_idx] = persistent_gnt; pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, blkif->persistent_gnt_c, - max_mapped_grant_pages(blkif->blk_protocol)); + xen_blkif_max_pgrants); new_map_idx++; goto next; } @@ -688,7 +862,6 @@ next_unmap: * We could not map this grant persistently, so use it as * a non-persistent grant. */ - bitmap_set(pending_req->unmap_seg, seg_idx, 1); new_map_idx++; next: seg[seg_idx].offset = (req->u.rw.seg[seg_idx].first_sect << 9); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 6c73c3855e65..af9bed48f773 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -182,12 +182,23 @@ struct xen_vbd { struct backend_info; +/* Number of available flags */ +#define PERSISTENT_GNT_FLAGS_SIZE 2 +/* This persistent grant is currently in use */ +#define PERSISTENT_GNT_ACTIVE 0 +/* + * This persistent grant has been used, this flag is set when we remove the + * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. + */ +#define PERSISTENT_GNT_WAS_ACTIVE 1 struct persistent_gnt { struct page *page; grant_ref_t gnt; grant_handle_t handle; + DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); struct rb_node node; + struct list_head remove_node; }; struct xen_blkif { @@ -219,6 +230,12 @@ struct xen_blkif { /* tree to store persistent grants */ struct rb_root persistent_gnts; unsigned int persistent_gnt_c; + atomic_t persistent_gnt_in_use; + unsigned long next_lru; + + /* used by the kworker that offload work from the persistent purge */ + struct list_head persistent_purge_list; + struct work_struct persistent_purge_work; /* buffer of free pages to map grant refs */ spinlock_t free_pages_lock; @@ -262,6 +279,7 @@ int xen_blkif_xenbus_init(void); irqreturn_t xen_blkif_be_int(int irq, void *dev_id); int xen_blkif_schedule(void *arg); +int xen_blkif_purge_persistent(void *arg); int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, struct backend_info *be, int state); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 24f7f6d87717..e0fd92a2a4cd 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -98,6 +98,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) err = PTR_ERR(blkif->xenblkd); blkif->xenblkd = NULL; xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); + return; } } @@ -121,6 +122,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) spin_lock_init(&blkif->free_pages_lock); INIT_LIST_HEAD(&blkif->free_pages); blkif->free_pages_num = 0; + atomic_set(&blkif->persistent_gnt_in_use, 0); return blkif; } -- cgit v1.2.3 From bb6acb289fbaac0e99eb552abdefc80a2186ef3f Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 17 Apr 2013 20:18:58 +0200 Subject: xen-blkback: move pending handles list from blkbk to pending_req MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving grant ref handles from blkbk to pending_req will allow us to get rid of the shared blkbk structure. Signed-off-by: Roger Pau Monné Cc: Konrad Rzeszutek Wilk Cc: xen-devel@lists.xen.org Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'drivers/block/xen-blkback') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 17052f74ebe5..ae7dc92ad3cf 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -128,6 +128,7 @@ struct pending_req { struct list_head free_list; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + grant_handle_t grant_handles[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLKBACK_INVALID_HANDLE (~0) @@ -142,8 +143,6 @@ struct xen_blkbk { /* And its spinlock. */ spinlock_t pending_free_lock; wait_queue_head_t pending_free_wq; - /* And the grant handles that are available. */ - grant_handle_t *pending_grant_handles; }; static struct xen_blkbk *blkbk; @@ -221,7 +220,7 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) #define pending_handle(_req, _seg) \ - (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) + (_req->grant_handles[_seg]) static int do_block_io_op(struct xen_blkif *blkif); @@ -1304,7 +1303,7 @@ static void make_response(struct xen_blkif *blkif, u64 id, static int __init xen_blkif_init(void) { - int i, mmap_pages; + int i; int rc = 0; if (!xen_domain()) @@ -1316,21 +1315,15 @@ static int __init xen_blkif_init(void) return -ENOMEM; } - mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) * xen_blkif_reqs, GFP_KERNEL); - blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) * - mmap_pages, GFP_KERNEL); - if (!blkbk->pending_reqs || !blkbk->pending_grant_handles) { + if (!blkbk->pending_reqs) { rc = -ENOMEM; goto out_of_memory; } - for (i = 0; i < mmap_pages; i++) { - blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; - } rc = xen_blkif_interface_init(); if (rc) goto failed_init; @@ -1353,7 +1346,6 @@ static int __init xen_blkif_init(void) pr_alert(DRV_PFX "%s: out of memory\n", __func__); failed_init: kfree(blkbk->pending_reqs); - kfree(blkbk->pending_grant_handles); kfree(blkbk); blkbk = NULL; return rc; -- cgit v1.2.3 From bf0720c48c7cefd127ed2329e6d0e40b39fa4d0e Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 17 Apr 2013 20:18:59 +0200 Subject: xen-blkback: make the queue of free requests per backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the last dependency from blkbk by moving the list of free requests to blkif. This change reduces the contention on the list of available requests. Signed-off-by: Roger Pau Monné Cc: Konrad Rzeszutek Wilk Cc: xen-devel@lists.xen.org Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 123 ++++++------------------------------ drivers/block/xen-blkback/common.h | 30 +++++++++ drivers/block/xen-blkback/xenbus.c | 26 ++++++++ 3 files changed, 74 insertions(+), 105 deletions(-) (limited to 'drivers/block/xen-blkback') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ae7dc92ad3cf..90a57552f4b7 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -49,20 +49,6 @@ #include #include "common.h" -/* - * These are rather arbitrary. They are fairly large because adjacent requests - * pulled from a communication ring are quite likely to end up being part of - * the same scatter/gather request at the disc. - * - * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** - * - * This will increase the chances of being able to write whole tracks. - * 64 should be enough to keep us competitive with Linux. - */ -static int xen_blkif_reqs = 64; -module_param_named(reqs, xen_blkif_reqs, int, 0); -MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); - /* * Maximum number of unused free pages to keep in the internal buffer. * Setting this to a value too low will reduce memory used in each backend, @@ -112,53 +98,11 @@ MODULE_PARM_DESC(max_persistent_grants, static unsigned int log_stats; module_param(log_stats, int, 0644); -/* - * Each outstanding request that we've passed to the lower device layers has a - * 'pending_req' allocated to it. Each buffer_head that completes decrements - * the pendcnt towards zero. When it hits zero, the specified domain has a - * response queued for it, with the saved 'id' passed back. - */ -struct pending_req { - struct xen_blkif *blkif; - u64 id; - int nr_pages; - atomic_t pendcnt; - unsigned short operation; - int status; - struct list_head free_list; - struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - grant_handle_t grant_handles[BLKIF_MAX_SEGMENTS_PER_REQUEST]; -}; - #define BLKBACK_INVALID_HANDLE (~0) /* Number of free pages to remove on each call to free_xenballooned_pages */ #define NUM_BATCH_FREE_PAGES 10 -struct xen_blkbk { - struct pending_req *pending_reqs; - /* List of all 'pending_req' available */ - struct list_head pending_free; - /* And its spinlock. */ - spinlock_t pending_free_lock; - wait_queue_head_t pending_free_wq; -}; - -static struct xen_blkbk *blkbk; - -/* - * Little helpful macro to figure out the index and virtual address of the - * pending_pages[..]. For each 'pending_req' we have have up to - * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through - * 10 and would index in the pending_pages[..]. - */ -static inline int vaddr_pagenr(struct pending_req *req, int seg) -{ - return (req - blkbk->pending_reqs) * - BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; -} - static inline int get_free_page(struct xen_blkif *blkif, struct page **page) { unsigned long flags; @@ -485,18 +429,18 @@ finished: /* * Retrieve from the 'pending_reqs' a free pending_req structure to be used. */ -static struct pending_req *alloc_req(void) +static struct pending_req *alloc_req(struct xen_blkif *blkif) { struct pending_req *req = NULL; unsigned long flags; - spin_lock_irqsave(&blkbk->pending_free_lock, flags); - if (!list_empty(&blkbk->pending_free)) { - req = list_entry(blkbk->pending_free.next, struct pending_req, + spin_lock_irqsave(&blkif->pending_free_lock, flags); + if (!list_empty(&blkif->pending_free)) { + req = list_entry(blkif->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } - spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); + spin_unlock_irqrestore(&blkif->pending_free_lock, flags); return req; } @@ -504,17 +448,17 @@ static struct pending_req *alloc_req(void) * Return the 'pending_req' structure back to the freepool. We also * wake up the thread if it was waiting for a free page. */ -static void free_req(struct pending_req *req) +static void free_req(struct xen_blkif *blkif, struct pending_req *req) { unsigned long flags; int was_empty; - spin_lock_irqsave(&blkbk->pending_free_lock, flags); - was_empty = list_empty(&blkbk->pending_free); - list_add(&req->free_list, &blkbk->pending_free); - spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); + spin_lock_irqsave(&blkif->pending_free_lock, flags); + was_empty = list_empty(&blkif->pending_free); + list_add(&req->free_list, &blkif->pending_free); + spin_unlock_irqrestore(&blkif->pending_free_lock, flags); if (was_empty) - wake_up(&blkbk->pending_free_wq); + wake_up(&blkif->pending_free_wq); } /* @@ -649,8 +593,8 @@ int xen_blkif_schedule(void *arg) if (timeout == 0) goto purge_gnt_list; timeout = wait_event_interruptible_timeout( - blkbk->pending_free_wq, - !list_empty(&blkbk->pending_free) || + blkif->pending_free_wq, + !list_empty(&blkif->pending_free) || kthread_should_stop(), timeout); if (timeout == 0) @@ -907,7 +851,7 @@ static int dispatch_other_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { - free_req(pending_req); + free_req(blkif, pending_req); make_response(blkif, req->u.other.id, req->operation, BLKIF_RSP_EOPNOTSUPP); return -EIO; @@ -967,7 +911,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) if (atomic_read(&pending_req->blkif->drain)) complete(&pending_req->blkif->drain_complete); } - free_req(pending_req); + free_req(pending_req->blkif, pending_req); } } @@ -1010,7 +954,7 @@ __do_block_io_op(struct xen_blkif *blkif) break; } - pending_req = alloc_req(); + pending_req = alloc_req(blkif); if (NULL == pending_req) { blkif->st_oo_req++; more_to_do = 1; @@ -1044,7 +988,7 @@ __do_block_io_op(struct xen_blkif *blkif) goto done; break; case BLKIF_OP_DISCARD: - free_req(pending_req); + free_req(blkif, pending_req); if (dispatch_discard_io(blkif, &req)) goto done; break; @@ -1246,7 +1190,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); - free_req(pending_req); + free_req(blkif, pending_req); msleep(1); /* back off a bit */ return -EIO; @@ -1303,51 +1247,20 @@ static void make_response(struct xen_blkif *blkif, u64 id, static int __init xen_blkif_init(void) { - int i; int rc = 0; if (!xen_domain()) return -ENODEV; - blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); - if (!blkbk) { - pr_alert(DRV_PFX "%s: out of memory!\n", __func__); - return -ENOMEM; - } - - - blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) * - xen_blkif_reqs, GFP_KERNEL); - - if (!blkbk->pending_reqs) { - rc = -ENOMEM; - goto out_of_memory; - } - rc = xen_blkif_interface_init(); if (rc) goto failed_init; - INIT_LIST_HEAD(&blkbk->pending_free); - spin_lock_init(&blkbk->pending_free_lock); - init_waitqueue_head(&blkbk->pending_free_wq); - - for (i = 0; i < xen_blkif_reqs; i++) - list_add_tail(&blkbk->pending_reqs[i].free_list, - &blkbk->pending_free); - rc = xen_blkif_xenbus_init(); if (rc) goto failed_init; - return 0; - - out_of_memory: - pr_alert(DRV_PFX "%s: out of memory\n", __func__); failed_init: - kfree(blkbk->pending_reqs); - kfree(blkbk); - blkbk = NULL; return rc; } diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index af9bed48f773..e33fafa0facd 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -192,6 +192,9 @@ struct backend_info; */ #define PERSISTENT_GNT_WAS_ACTIVE 1 +/* Number of requests that we can fit in a ring */ +#define XEN_BLKIF_REQS 32 + struct persistent_gnt { struct page *page; grant_ref_t gnt; @@ -242,6 +245,14 @@ struct xen_blkif { int free_pages_num; struct list_head free_pages; + /* Allocation of pending_reqs */ + struct pending_req *pending_reqs; + /* List of all 'pending_req' available */ + struct list_head pending_free; + /* And its spinlock. */ + spinlock_t pending_free_lock; + wait_queue_head_t pending_free_wq; + /* statistics */ unsigned long st_print; unsigned long long st_rd_req; @@ -255,6 +266,25 @@ struct xen_blkif { wait_queue_head_t waiting_to_free; }; +/* + * Each outstanding request that we've passed to the lower device layers has a + * 'pending_req' allocated to it. Each buffer_head that completes decrements + * the pendcnt towards zero. When it hits zero, the specified domain has a + * response queued for it, with the saved 'id' passed back. + */ +struct pending_req { + struct xen_blkif *blkif; + u64 id; + int nr_pages; + atomic_t pendcnt; + unsigned short operation; + int status; + struct list_head free_list; + struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + grant_handle_t grant_handles[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; + #define vbd_sz(_v) ((_v)->bdev->bd_part ? \ (_v)->bdev->bd_part->nr_sects : \ diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index e0fd92a2a4cd..1f1ade6d6e09 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -105,6 +105,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) static struct xen_blkif *xen_blkif_alloc(domid_t domid) { struct xen_blkif *blkif; + int i; blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); if (!blkif) @@ -124,6 +125,21 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->free_pages_num = 0; atomic_set(&blkif->persistent_gnt_in_use, 0); + blkif->pending_reqs = kcalloc(XEN_BLKIF_REQS, + sizeof(blkif->pending_reqs[0]), + GFP_KERNEL); + if (!blkif->pending_reqs) { + kmem_cache_free(xen_blkif_cachep, blkif); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&blkif->pending_free); + spin_lock_init(&blkif->pending_free_lock); + init_waitqueue_head(&blkif->pending_free_wq); + + for (i = 0; i < XEN_BLKIF_REQS; i++) + list_add_tail(&blkif->pending_reqs[i].free_list, + &blkif->pending_free); + return blkif; } @@ -203,8 +219,18 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif) { + struct pending_req *req; + int i = 0; + if (!atomic_dec_and_test(&blkif->refcnt)) BUG(); + + /* Check that there is no request in use */ + list_for_each_entry(req, &blkif->pending_free, free_list) + i++; + BUG_ON(i != XEN_BLKIF_REQS); + + kfree(blkif->pending_reqs); kmem_cache_free(xen_blkif_cachep, blkif); } -- cgit v1.2.3 From 31552ee32df89f97a61766cee51b8dabb1ae3f4f Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 17 Apr 2013 20:19:00 +0200 Subject: xen-blkback: expand map/unmap functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preparatory change for implementing indirect descriptors. Change xen_blkbk_{map/unmap} in order to be able to map/unmap a random amount of grants (previously it was limited to BLKIF_MAX_SEGMENTS_PER_REQUEST). Also, remove the usage of pending_req in the map/unmap functions, so we can map/unmap grants without needing to pass a pending_req. Signed-off-by: Roger Pau Monné Cc: Konrad Rzeszutek Wilk Cc: xen-devel@lists.xen.org Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 141 ++++++++++++++++++++++-------------- 1 file changed, 86 insertions(+), 55 deletions(-) (limited to 'drivers/block/xen-blkback') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 90a57552f4b7..356722f65f88 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -163,10 +163,6 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) -#define pending_handle(_req, _seg) \ - (_req->grant_handles[_seg]) - - static int do_block_io_op(struct xen_blkif *blkif); static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, @@ -648,50 +644,57 @@ struct seg_buf { * Unmap the grant references, and also remove the M2P over-rides * used in the 'pending_req'. */ -static void xen_blkbk_unmap(struct pending_req *req) +static void xen_blkbk_unmap(struct xen_blkif *blkif, + grant_handle_t handles[], + struct page *pages[], + struct persistent_gnt *persistent_gnts[], + int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; - grant_handle_t handle; - struct xen_blkif *blkif = req->blkif; int ret; - for (i = 0; i < req->nr_pages; i++) { - if (req->persistent_gnts[i] != NULL) { - put_persistent_gnt(blkif, req->persistent_gnts[i]); + for (i = 0; i < num; i++) { + if (persistent_gnts[i] != NULL) { + put_persistent_gnt(blkif, persistent_gnts[i]); continue; } - handle = pending_handle(req, i); - pages[invcount] = req->pages[i]; - if (handle == BLKBACK_INVALID_HANDLE) + if (handles[i] == BLKBACK_INVALID_HANDLE) continue; - gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[invcount]), - GNTMAP_host_map, handle); - pending_handle(req, i) = BLKBACK_INVALID_HANDLE; - invcount++; + unmap_pages[invcount] = pages[i]; + gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]), + GNTMAP_host_map, handles[i]); + handles[i] = BLKBACK_INVALID_HANDLE; + if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { + ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, + invcount); + BUG_ON(ret); + put_free_pages(blkif, unmap_pages, invcount); + invcount = 0; + } + } + if (invcount) { + ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); + BUG_ON(ret); + put_free_pages(blkif, unmap_pages, invcount); } - - ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); - BUG_ON(ret); - put_free_pages(blkif, pages, invcount); } -static int xen_blkbk_map(struct blkif_request *req, - struct pending_req *pending_req, - struct seg_buf seg[], - struct page *pages[]) +static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[], + struct persistent_gnt *persistent_gnts[], + grant_handle_t handles[], + struct page *pages[], + int num, bool ro) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - struct persistent_gnt **persistent_gnts = pending_req->persistent_gnts; struct persistent_gnt *persistent_gnt = NULL; - struct xen_blkif *blkif = pending_req->blkif; phys_addr_t addr = 0; int i, seg_idx, new_map_idx; - int nseg = req->u.rw.nr_segments; int segs_to_map = 0; int ret = 0; + int last_map = 0, map_until = 0; int use_persistent_gnts; use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); @@ -701,13 +704,14 @@ static int xen_blkbk_map(struct blkif_request *req, * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ - for (i = 0; i < nseg; i++) { +again: + for (i = map_until; i < num; i++) { uint32_t flags; if (use_persistent_gnts) persistent_gnt = get_persistent_gnt( blkif, - req->u.rw.seg[i].gref); + grefs[i]); if (persistent_gnt) { /* @@ -723,13 +727,15 @@ static int xen_blkbk_map(struct blkif_request *req, pages_to_gnt[segs_to_map] = pages[i]; persistent_gnts[i] = NULL; flags = GNTMAP_host_map; - if (!use_persistent_gnts && - (pending_req->operation != BLKIF_OP_READ)) + if (!use_persistent_gnts && ro) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, - flags, req->u.rw.seg[i].gref, + flags, grefs[i], blkif->domid); } + map_until = i + 1; + if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) + break; } if (segs_to_map) { @@ -742,26 +748,19 @@ static int xen_blkbk_map(struct blkif_request *req, * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ - for (seg_idx = 0, new_map_idx = 0; seg_idx < nseg; seg_idx++) { + for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { if (!persistent_gnts[seg_idx]) { /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); - pending_handle(pending_req, seg_idx) = BLKBACK_INVALID_HANDLE; + handles[seg_idx] = BLKBACK_INVALID_HANDLE; ret |= 1; - new_map_idx++; - /* - * No need to set unmap_seg bit, since - * we can not unmap this grant because - * the handle is invalid. - */ - continue; + goto next; } - pending_handle(pending_req, seg_idx) = map[new_map_idx].handle; + handles[seg_idx] = map[new_map_idx].handle; } else { - /* This grant is persistent and already mapped */ - goto next; + continue; } if (use_persistent_gnts && blkif->persistent_gnt_c < xen_blkif_max_pgrants) { @@ -777,7 +776,7 @@ static int xen_blkbk_map(struct blkif_request *req, * allocate the persistent_gnt struct * map this grant non-persistenly */ - goto next_unmap; + goto next; } persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; @@ -786,13 +785,12 @@ static int xen_blkbk_map(struct blkif_request *req, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; - goto next_unmap; + goto next; } persistent_gnts[seg_idx] = persistent_gnt; pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, blkif->persistent_gnt_c, xen_blkif_max_pgrants); - new_map_idx++; goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { @@ -800,15 +798,18 @@ static int xen_blkbk_map(struct blkif_request *req, pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } -next_unmap: /* * We could not map this grant persistently, so use it as * a non-persistent grant. */ - new_map_idx++; next: - seg[seg_idx].offset = (req->u.rw.seg[seg_idx].first_sect << 9); + new_map_idx++; } + segs_to_map = 0; + last_map = map_until; + if (map_until != num) + goto again; + return ret; out_of_memory: @@ -817,6 +818,31 @@ out_of_memory: return -ENOMEM; } +static int xen_blkbk_map_seg(struct blkif_request *req, + struct pending_req *pending_req, + struct seg_buf seg[], + struct page *pages[]) +{ + int i, rc; + grant_ref_t grefs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + + for (i = 0; i < req->u.rw.nr_segments; i++) + grefs[i] = req->u.rw.seg[i].gref; + + rc = xen_blkbk_map(pending_req->blkif, grefs, + pending_req->persistent_gnts, + pending_req->grant_handles, pending_req->pages, + req->u.rw.nr_segments, + (pending_req->operation != BLKIF_OP_READ)); + if (rc) + return rc; + + for (i = 0; i < req->u.rw.nr_segments; i++) + seg[i].offset = (req->u.rw.seg[i].first_sect << 9); + + return 0; +} + static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { @@ -903,7 +929,10 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) * the proper response on the ring. */ if (atomic_dec_and_test(&pending_req->pendcnt)) { - xen_blkbk_unmap(pending_req); + xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles, + pending_req->pages, + pending_req->persistent_gnts, + pending_req->nr_pages); make_response(pending_req->blkif, pending_req->id, pending_req->operation, pending_req->status); xen_blkif_put(pending_req->blkif); @@ -1125,7 +1154,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, * the hypercall to unmap the grants - that is all done in * xen_blkbk_unmap. */ - if (xen_blkbk_map(req, pending_req, seg, pages)) + if (xen_blkbk_map_seg(req, pending_req, seg, pages)) goto fail_flush; /* @@ -1186,7 +1215,9 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, return 0; fail_flush: - xen_blkbk_unmap(pending_req); + xen_blkbk_unmap(blkif, pending_req->grant_handles, + pending_req->pages, pending_req->persistent_gnts, + pending_req->nr_pages); fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); -- cgit v1.2.3 From 402b27f9f2c22309d5bb285628765bc27b82fcf5 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Thu, 18 Apr 2013 16:06:54 +0200 Subject: xen-block: implement indirect descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Indirect descriptors introduce a new block operation (BLKIF_OP_INDIRECT) that passes grant references instead of segments in the request. This grant references are filled with arrays of blkif_request_segment_aligned, this way we can send more segments in a request. The proposed implementation sets the maximum numbe