summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/writeback_cache_control.txt4
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-flush.c11
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-settings.c38
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c12
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/skd_main.c61
-rw-r--r--drivers/block/virtio_blk.c6
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/ide/ide-disk.c6
-rw-r--r--drivers/lightnvm/core.c370
-rw-r--r--drivers/lightnvm/gennvm.c100
-rw-r--r--drivers/lightnvm/rrpc.c42
-rw-r--r--drivers/lightnvm/rrpc.h2
-rw-r--r--drivers/lightnvm/sysblk.c284
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid5-cache.c3
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c272
-rw-r--r--drivers/nvme/host/lightnvm.c82
-rw-r--r--drivers/nvme/host/nvme.h91
-rw-r--r--drivers/nvme/host/pci.c249
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/target/target_core_iblock.c6
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/lightnvm.h48
-rw-r--r--include/linux/nvme.h4
39 files changed, 966 insertions, 798 deletions
diff --git a/Documentation/block/writeback_cache_control.txt b/Documentation/block/writeback_cache_control.txt
index 83407d36630a..59e0516cbf6b 100644
--- a/Documentation/block/writeback_cache_control.txt
+++ b/Documentation/block/writeback_cache_control.txt
@@ -71,7 +71,7 @@ requests that have a payload. For devices with volatile write caches the
driver needs to tell the block layer that it supports flushing caches by
doing:
- blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
+ blk_queue_write_cache(sdkp->disk->queue, true, false);
and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
REQ_FLUSH requests with a payload are automatically turned into a sequence
@@ -79,7 +79,7 @@ of an empty REQ_FLUSH request followed by the actual write by the block
layer. For devices that also support the FUA bit the block layer needs
to be told to pass through the REQ_FUA bit using:
- blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(sdkp->disk->queue, true, true);
and the driver must handle write requests that have the REQ_FUA bit set
in prep_fn/request_fn. If the FUA bit is not natively supported the block
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 39ba20755e03..17e96dc29596 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -862,7 +862,7 @@ static int ubd_add(int n, char **error_out)
goto out;
}
ubd_dev->queue->queuedata = ubd_dev;
- blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
+ blk_queue_write_cache(ubd_dev->queue, true, false);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
diff --git a/block/blk-core.c b/block/blk-core.c
index c50227796a26..2475b1c72773 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9c423e53324a..b1c91d229e5e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,17 +95,18 @@ enum {
static bool blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq);
-static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
- if (fflags & REQ_FLUSH) {
+ if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
- if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+ (rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
@@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
- unsigned int fflags = q->flush_flags; /* may change, cache */
+ unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
@@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
- if (!(fflags & REQ_FUA))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
rq->cmd_flags &= ~REQ_FUA;
/*
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 2fd04286f103..56a0c37a3d06 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -464,15 +464,14 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
}
}
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
- void *priv)
+static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+ busy_tag_iter_fn *fn, void *priv)
{
if (tags->nr_reserved_tags)
bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
false);
}
-EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c903bee43cf8..f679ae122843 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -820,29 +820,14 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-/**
- * blk_queue_flush - configure queue's cache flush capability
- * @q: the request queue for the device
- * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
- *
- * Tell block layer cache flush capability of @q. If it supports
- * flushing, REQ_FLUSH should be set. If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
- */
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
-{
- WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
- if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
- flush &= ~REQ_FUA;
-
- q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush);
-
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
- q->flush_not_queueable = !queueable;
+ spin_lock_irq(q->queue_lock);
+ if (queueable)
+ clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ else
+ set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
@@ -857,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
spin_lock_irq(q->queue_lock);
- if (wc) {
+ if (wc)
queue_flag_set(QUEUE_FLAG_WC, q);
- q->flush_flags = REQ_FLUSH;
- } else
+ else
queue_flag_clear(QUEUE_FLAG_WC, q);
- if (fua) {
- if (wc)
- q->flush_flags |= REQ_FUA;
+ if (fua)
queue_flag_set(QUEUE_FLAG_FUA, q);
- } else
+ else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index fa209773d494..2ba1494b2799 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2761,7 +2761,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
q->backing_dev_info.congested_data = device;
blk_queue_make_request(q, drbd_make_request);
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 80cf8add46ff..1fa8cc235977 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -943,7 +943,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_flush(lo->lo_queue, REQ_FLUSH);
+ blk_queue_write_cache(lo->lo_queue, true, false);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 25824c1697c5..6053e4659fa2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3000,14 +3000,14 @@ restart_eh:
"Completion workers still active!");
spin_lock(dd->queue->queue_lock);
- blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ blk_mq_tagset_busy_iter(&dd->tags,
mtip_queue_cmd, dd);
spin_unlock(dd->queue->queue_lock);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
if (mtip_device_reset(dd))
- blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ blk_mq_tagset_busy_iter(&dd->tags,
mtip_abort_cmd, dd);
clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
@@ -4023,12 +4023,6 @@ skip_create_disk:
blk_queue_io_min(dd->queue, 4096);
blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
- /*
- * write back cache is not supported in the device. FUA depends on
- * write back cache support, hence setting flush support to zero.
- */
- blk_queue_flush(dd->queue, 0);
-
/* Signal trim support */
if (dd->trim_supp == true) {
set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
@@ -4174,7 +4168,7 @@ static int mtip_block_remove(struct driver_data *dd)
blk_mq_freeze_queue_start(dd->queue);
blk_mq_stop_hw_queues(dd->queue);
- blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+ blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
/*
* Delete our gendisk structure. This also removes the device
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08afbc7a2bb8..31e73a7a40f2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -693,9 +693,9 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ blk_queue_write_cache(nbd->disk->queue, true, false);
else
- blk_queue_flush(nbd->disk->queue, 0);
+ blk_queue_write_cache(nbd->disk->queue, false, false);
}
static int nbd_dev_dbg_init(struct nbd_device *nbd);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1b709a4e3b5e..c2854a2bfdb0 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -437,7 +437,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
blk_queue_prep_rq(q, blk_queue_start_tag);
- blk_queue_flush(q, REQ_FLUSH);
+ blk_queue_write_cache(q, true, false);
disk->queue = q;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c120d70d3fb3..4b7e405830d7 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
- blk_queue_flush(queue, REQ_FLUSH);
+ blk_queue_write_cache(queue, true, false);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9a9ec212fab8..910e065918af 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -133,7 +133,6 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
#define INQ_STD_NBYTES 36
-#define SKD_DISCARD_CDB_LENGTH 24
enum skd_drvr_state {
SKD_DRVR_STATE_LOAD,
@@ -212,7 +211,6 @@ struct skd_request_context {
struct request *req;
u8 flush_cmd;
- u8 discard_page;
u32 timeout_stamp;
u8 sg_data_dir;
@@ -230,7 +228,6 @@ struct skd_request_context {
};
#define SKD_DATA_DIR_HOST_TO_CARD 1
#define SKD_DATA_DIR_CARD_TO_HOST 2
-#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
struct skd_special_context {
struct skd_request_context req;
@@ -540,31 +537,6 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}
-static void
-skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq,
- struct page *page,
- u32 lba, u32 count)
-{
- char *buf;
- unsigned long len;
- struct request *req;
-
- buf = page_address(page);
- len = SKD_DISCARD_CDB_LENGTH;
-
- scsi_req->cdb[0] = UNMAP;
- scsi_req->cdb[8] = len;
-
- put_unaligned_be16(6 + 16, &buf[0]);
- put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(lba, &buf[8]);
- put_unaligned_be32(count, &buf[16]);
-
- req = skreq->req;
- blk_add_request_payload(req, page, 0, len);
-}
-
static void skd_request_fn_not_online(struct request_queue *q);
static void skd_request_fn(struct request_queue *q)
@@ -575,7 +547,6 @@ static void skd_request_fn(struct request_queue *q)
struct skd_request_context *skreq;
struct request *req = NULL;
struct skd_scsi_request *scsi_req;
- struct page *page;
unsigned long io_flags;
int error;
u32 lba;
@@ -669,7 +640,6 @@ static void skd_request_fn(struct request_queue *q)
skreq->flush_cmd = 0;
skreq->n_sg = 0;
skreq->sg_byte_count = 0;
- skreq->discard_page = 0;
/*
* OK to now dequeue request from q.
@@ -735,18 +705,7 @@ static void skd_request_fn(struct request_queue *q)
else
skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
- if (io_flags & REQ_DISCARD) {
- page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
- if (!page) {
- pr_err("request_fn:Page allocation failed.\n");
- skd_end_request(skdev, skreq, -ENOMEM);
- break;
- }
- skreq->discard_page = 1;
- req->completion_data = page;
- skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
-
- } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+ if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
skd_prep_zerosize_flush_cdb(scsi_req, skreq);
SKD_ASSERT(skreq->flush_cmd == 1);
@@ -851,16 +810,6 @@ skip_sg:
static void skd_end_request(struct skd_device *skdev,
struct skd_request_context *skreq, int error)
{
- struct request *req = skreq->req;
- unsigned int io_flags = req->cmd_flags;
-
- if ((io_flags & REQ_DISCARD) &&
- (skreq->discard_page == 1)) {
- pr_debug("%s:%s:%d, free the page!",
- skdev->name, __func__, __LINE__);
- __free_page(req->completion_data);
- }
-
if (unlikely(error)) {
struct request *req = skreq->req;
char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
@@ -4412,19 +4361,13 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->queue = q;
q->queuedata = skdev;
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
/* set sysfs ptimal_io_size to 8K */
blk_queue_io_opt(q, 8192);
- /* DISCARD Flag initialization. */
- q->limits.discard_granularity = 8192;
- q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- q->limits.discard_zeroes_data = 1;
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 28cff0d23d82..42758b52768c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -493,11 +493,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
u8 writeback = virtblk_get_cache_mode(vdev);
struct virtio_blk *vblk = vdev->priv;
- if (writeback)
- blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
- else
- blk_queue_flush(vblk->disk->queue, 0);
-
+ blk_queue_write_cache(vblk->disk->queue, writeback, false);
revalidate_disk(vblk->disk);
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 26aa080e243c..3355f1cdd4e5 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->type |= VDISK_REMOVABLE;
q = bdev_get_queue(bdev);
- if (q && q->flush_flags)
+ if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;
if (q && blk_queue_secdiscard(q))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6405b6557792..ca13df854639 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -998,7 +998,8 @@ static const char *flush_info(unsigned int feature_flush)
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_flush(info->rq, info->feature_flush);
+ blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
+ info->feature_flush & REQ_FUA);
pr_info("blkfront: %s: %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info->feature_flush),
"persistent grants:", info->feature_persistent ?
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37a8a907febe..05dbcce70b0e 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -522,7 +522,7 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
static void update_flush(ide_drive_t *drive)
{
u16 *id = drive->id;
- unsigned flush = 0;
+ bool wc = false;
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
unsigned long long capacity;
@@ -546,12 +546,12 @@ static void update_flush(ide_drive_t *drive)
drive->name, barrier ? "" : "not ");
if (barrier) {
- flush = REQ_FLUSH;
+ wc = true;
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
}
}
- blk_queue_flush(drive->queue, flush);
+ blk_queue_write_cache(drive->queue, wc, false);
}
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0dc9a80adb94..160c1a6838e1 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -30,23 +30,35 @@
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>
-static LIST_HEAD(nvm_targets);
+static LIST_HEAD(nvm_tgt_types);
static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
+static LIST_HEAD(nvm_targets);
static DECLARE_RWSEM(nvm_lock);
+static struct nvm_target *nvm_find_target(const char *name)
+{
+ struct nvm_target *tgt;
+
+ list_for_each_entry(tgt, &nvm_targets, list)
+ if (!strcmp(name, tgt->disk->disk_name))
+ return tgt;
+
+ return NULL;
+}
+
static struct nvm_tgt_type *nvm_find_target_type(const char *name)
{
struct nvm_tgt_type *tt;
- list_for_each_entry(tt, &nvm_targets, list)
+ list_for_each_entry(tt, &nvm_tgt_types, list)
if (!strcmp(name, tt->name))
return tt;
return NULL;
}
-int nvm_register_target(struct nvm_tgt_type *tt)
+int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
@@ -54,14 +66,14 @@ int nvm_register_target(struct nvm_tgt_type *tt)
if (nvm_find_target_type(tt->name))
ret = -EEXIST;
else
- list_add(&tt->list, &nvm_targets);
+ list_add(&tt->list, &nvm_tgt_types);
up_write(&nvm_lock);
return ret;
}
-EXPORT_SYMBOL(nvm_register_target);
+EXPORT_SYMBOL(nvm_register_tgt_type);
-void nvm_unregister_target(struct nvm_tgt_type *tt)
+void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
{
if (!tt)
return;
@@ -70,20 +82,20 @@ void nvm_unregister_target(struct nvm_tgt_type *tt)
list_del(&tt->list);
up_write(&nvm_lock);
}
-EXPORT_SYMBOL(nvm_unregister_target);
+EXPORT_SYMBOL(nvm_unregister_tgt_type);
void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
dma_addr_t *dma_handler)
{
- return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
+ return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
dma_addr_t dma_handler)
{
- dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
+ dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_free);
@@ -214,8 +226,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = dev_to_generic_addr(dev,
rqd->ppa_list[i]);
} else {
@@ -228,8 +240,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = generic_to_dev_addr(dev,
rqd->ppa_list[i]);
} else {
@@ -239,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
- struct ppa_addr *ppas, int nr_ppas)
+ struct ppa_addr *ppas, int nr_ppas, int vblk)
{
int i, plane_cnt, pl_idx;
- if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
- rqd->nr_pages = 1;
+ if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0];
return 0;
}
- plane_cnt = dev->plane_mode;
- rqd->nr_pages = plane_cnt * nr_ppas;
-
- if (dev->ops->max_phys_sect < rqd->nr_pages)
- return -EINVAL;
-
+ rqd->nr_ppas = nr_ppas;
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("nvm: failed to allocate dma memory\n");
return -ENOMEM;
}
- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ if (!vblk) {
+ for (i = 0; i < nr_ppas; i++)
+ rqd->ppa_list[i] = ppas[i];
+ } else {
+ plane_cnt = dev->plane_mode;
+ rqd->nr_ppas *= plane_cnt;
+
for (i = 0; i < nr_ppas; i++) {
- ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ ppas[i].g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ }
}
}
@@ -292,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
if (ret)
return ret;
@@ -322,11 +337,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
- int opcode, int flags, void *buf, int len)
+int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+ int flags, void *buf, int len)
{
DECLARE_COMPLETION_ONSTACK(wait);
- struct nvm_rq rqd;
struct bio *bio;
int ret;
unsigned long hang_check;
@@ -335,23 +349,21 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
if (IS_ERR_OR_NULL(bio))
return -ENOMEM;
- memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+ nvm_generic_to_addr_mode(dev, rqd);
+
+ rqd->dev = dev;
+ rqd->opcode = opcode;
+ rqd->flags = flags;
+ rqd->bio = bio;
+ rqd->wait = &wait;
+ rqd->end_io = nvm_end_io_sync;
+
+ ret = dev->ops->submit_io(dev, rqd);
if (ret) {
bio_put(bio);
return ret;
}
- rqd.opcode = opcode;
- rqd.bio = bio;
- rqd.wait = &wait;
- rqd.dev = dev;
- rqd.end_io = nvm_end_io_sync;
- rqd.flags = flags;
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->submit_io(dev, &rqd);
-
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
@@ -359,12 +371,113 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
else
wait_for_completion_io(&wait);
+ return rqd->error;
+}
+
+/**
+ * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
+ * take to free ppa list if necessary.
+ * @dev: device
+ * @ppa_list: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
+ int nr_ppas, int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+
+ if (dev->ops->max_phys_sect < nr_ppas)
+ return -EINVAL;
+