summaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 14:46:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 14:46:26 -0700
commitdfee9c257b102d7c0407629eef2ed32e152de0d2 (patch)
tree75b8b043241d4b00a6320ed1af07acbebff0bcfe /fs/fuse
parent7b47a9e7c8f672b6fb0b77fca11a63a8a77f5a91 (diff)
parentfabf7e0262d0bd57739d29aeac94c44b0542ff1f (diff)
Merge tag 'fuse-update-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
Pull fuse updates from Miklos Szeredi: "Scalability and performance improvements, as well as minor bug fixes and cleanups" * tag 'fuse-update-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: (25 commits) fuse: cache readdir calls if filesystem opts out of opendir fuse: support clients that don't implement 'opendir' fuse: lift bad inode checks into callers fuse: multiplex cached/direct_io file operations fuse add copy_file_range to direct io fops fuse: use iov_iter based generic splice helpers fuse: Switch to using async direct IO for FOPEN_DIRECT_IO fuse: use atomic64_t for khctr fuse: clean up aborted fuse: Protect ff->reserved_req via corresponding fi->lock fuse: Protect fi->nlookup with fi->lock fuse: Introduce fi->lock to protect write related fields fuse: Convert fc->attr_version into atomic64_t fuse: Add fuse_inode argument to fuse_prepare_release() fuse: Verify userspace asks to requeue interrupt that we really sent fuse: Do some refactoring in fuse_dev_do_write() fuse: Wake up req->waitq of only if not background fuse: Optimize request_end() by not taking fiq->waitq.lock fuse: Kill fasync only if interrupt is queued in queue_interrupt() fuse: Remove stale comment in end_requests() ...
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/cuse.c7
-rw-r--r--fs/fuse/dev.c115
-rw-r--r--fs/fuse/dir.c54
-rw-r--r--fs/fuse/file.c342
-rw-r--r--fs/fuse/fuse_i.h28
-rw-r--r--fs/fuse/inode.c26
-rw-r--r--fs/fuse/readdir.c4
8 files changed, 321 insertions, 259 deletions
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 989df5accaee..fe80bea4ad89 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -35,7 +35,9 @@ static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
{
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
- fuse_abort_conn(fc, true);
+ if (fc->abort_err)
+ fc->aborted = true;
+ fuse_abort_conn(fc);
fuse_conn_put(fc);
}
return count;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 8f68181256c0..55a26f351467 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -141,10 +141,11 @@ static int cuse_open(struct inode *inode, struct file *file)
static int cuse_release(struct inode *inode, struct file *file)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- fuse_sync_release(ff, file->f_flags);
+ fuse_sync_release(fi, ff, file->f_flags);
fuse_conn_put(fc);
return 0;
@@ -407,7 +408,7 @@ err_unlock:
err_region:
unregister_chrdev_region(devt, 1);
err:
- fuse_abort_conn(fc, false);
+ fuse_abort_conn(fc);
goto out;
}
@@ -586,7 +587,7 @@ static ssize_t cuse_class_abort_store(struct device *dev,
{
struct cuse_conn *cc = dev_get_drvdata(dev);
- fuse_abort_conn(&cc->fc, false);
+ fuse_abort_conn(&cc->fc);
return count;
}
static DEVICE_ATTR(abort, 0200, NULL, cuse_class_abort_store);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 809c0f2f9942..8a63e52785e9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -251,17 +251,18 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
struct file *file)
{
struct fuse_req *req = NULL;
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
do {
wait_event(fc->reserved_req_waitq, ff->reserved_req);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (ff->reserved_req) {
req = ff->reserved_req;
ff->reserved_req = NULL;
req->stolen_file = get_file(file);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
} while (!req);
return req;
@@ -273,16 +274,17 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
{
struct file *file = req->stolen_file;
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
WARN_ON(req->max_pages);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
memset(req, 0, sizeof(*req));
fuse_request_init(req, NULL, NULL, 0);
BUG_ON(ff->reserved_req);
ff->reserved_req = req;
wake_up_all(&fc->reserved_req_waitq);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fput(file);
}
@@ -431,10 +433,16 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
-
- spin_lock(&fiq->waitq.lock);
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ /*
+ * test_and_set_bit() implies smp_mb() between bit
+ * changing and below intr_entry check. Pairs with
+ * smp_mb() from queue_interrupt().
+ */
+ if (!list_empty(&req->intr_entry)) {
+ spin_lock(&fiq->waitq.lock);
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->waitq.lock);
+ }
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -462,27 +470,43 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
fc->active_background--;
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
+ } else {
+ /* Wake up waiter sleeping in request_wait_answer() */
+ wake_up(&req->waitq);
}
- wake_up(&req->waitq);
+
if (req->end)
req->end(fc, req);
put_request:
fuse_put_request(fc, req);
}
-static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
- if (test_bit(FR_FINISHED, &req->flags)) {
+ /* Check for we've sent request to interrupt this req */
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
spin_unlock(&fiq->waitq.lock);
- return;
+ return -EINVAL;
}
+
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->waitq.lock);
+ return 0;
+ }
wake_up_locked(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
}
spin_unlock(&fiq->waitq.lock);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ return 0;
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
@@ -1306,7 +1330,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
goto err_unlock;
if (!fiq->connected) {
- err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
+ err = fc->aborted ? -ECONNABORTED : -ENODEV;
goto err_unlock;
}
@@ -1353,7 +1377,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected) {
- err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
+ err = fc->aborted ? -ECONNABORTED : -ENODEV;
goto out_end;
}
if (err) {
@@ -1900,16 +1924,17 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_req *req;
struct fuse_out_header oh;
+ err = -EINVAL;
if (nbytes < sizeof(struct fuse_out_header))
- return -EINVAL;
+ goto out;
err = fuse_copy_one(cs, &oh, sizeof(oh));
if (err)
- goto err_finish;
+ goto copy_finish;
err = -EINVAL;
if (oh.len != nbytes)
- goto err_finish;
+ goto copy_finish;
/*
* Zero oh.unique indicates unsolicited notification message
@@ -1917,41 +1942,40 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
- return err ? err : nbytes;
+ goto out;
}
err = -EINVAL;
if (oh.error <= -1000 || oh.error > 0)
- goto err_finish;
+ goto copy_finish;
spin_lock(&fpq->lock);
- err = -ENOENT;
- if (!fpq->connected)
- goto err_unlock_pq;
+ req = NULL;
+ if (fpq->connected)
+ req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
- req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
- if (!req)
- goto err_unlock_pq;
+ err = -ENOENT;
+ if (!req) {
+ spin_unlock(&fpq->lock);
+ goto copy_finish;
+ }
/* Is it an interrupt reply ID? */
if (oh.unique & FUSE_INT_REQ_BIT) {
__fuse_get_request(req);
spin_unlock(&fpq->lock);
- err = -EINVAL;
- if (nbytes != sizeof(struct fuse_out_header)) {
- fuse_put_request(fc, req);
- goto err_finish;
- }
-
- if (oh.error == -ENOSYS)
+ err = 0;
+ if (nbytes != sizeof(struct fuse_out_header))
+ err = -EINVAL;
+ else if (oh.error == -ENOSYS)
fc->no_interrupt = 1;
else if (oh.error == -EAGAIN)
- queue_interrupt(&fc->iq, req);
+ err = queue_interrupt(&fc->iq, req);
+
fuse_put_request(fc, req);
- fuse_copy_finish(cs);
- return nbytes;
+ goto copy_finish;
}
clear_bit(FR_SENT, &req->flags);
@@ -1977,14 +2001,12 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_unlock(&fpq->lock);
request_end(fc, req);
-
+out:
return err ? err : nbytes;
- err_unlock_pq:
- spin_unlock(&fpq->lock);
- err_finish:
+copy_finish:
fuse_copy_finish(cs);
- return err;
+ goto out;
}
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
@@ -2109,11 +2131,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
return mask;
}
-/*
- * Abort all requests on the given list (pending or processing)
- *
- * This function releases and reacquires fc->lock
- */
+/* Abort all requests on the given list (pending or processing) */
static void end_requests(struct fuse_conn *fc, struct list_head *head)
{
while (!list_empty(head)) {
@@ -2159,7 +2177,7 @@ static void end_polls(struct fuse_conn *fc)
* is OK, the request will in that case be removed from the list before we touch
* it.
*/
-void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
+void fuse_abort_conn(struct fuse_conn *fc)
{
struct fuse_iqueue *fiq = &fc->iq;
@@ -2175,7 +2193,6 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
fc->connected = 0;
spin_unlock(&fc->bg_lock);
- fc->aborted = is_abort;
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
struct fuse_pqueue *fpq = &fud->pq;
@@ -2253,7 +2270,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
WARN_ON(fc->iq.fasync != NULL);
- fuse_abort_conn(fc, false);
+ fuse_abort_conn(fc);
}
fuse_dev_free(fud);
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index e909678afa2d..dd0f64f7bc06 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -149,21 +149,6 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
args->out.args[0].value = outarg;
}
-u64 fuse_get_attr_version(struct fuse_conn *fc)
-{
- u64 curr_version;
-
- /*
- * The spin lock isn't actually needed on 64bit archs, but we
- * don't yet care too much about such optimizations.
- */
- spin_lock(&fc->lock);
- curr_version = fc->attr_version;
- spin_unlock(&fc->lock);
-
- return curr_version;
-}
-
/*
* Check whether the dentry is still valid
*
@@ -222,9 +207,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
goto invalid;
}
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
fi->nlookup++;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
kfree(forget);
if (ret == -ENOMEM)
@@ -400,6 +385,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_create_in inarg;
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
+ struct fuse_inode *fi;
struct fuse_file *ff;
/* Userspace expects S_IFREG in create mode */
@@ -451,7 +437,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
&outentry.attr, entry_attr_timeout(&outentry), 0);
if (!inode) {
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
- fuse_sync_release(ff, flags);
+ fuse_sync_release(NULL, ff, flags);
fuse_queue_forget(fc, forget, outentry.nodeid, 1);
err = -ENOMEM;
goto out_err;
@@ -462,7 +448,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
fuse_dir_changed(dir);
err = finish_open(file, entry, generic_file_open);
if (err) {
- fuse_sync_release(ff, flags);
+ fi = get_fuse_inode(inode);
+ fuse_sync_release(fi, ff, flags);
} else {
file->private_data = ff;
fuse_finish_open(inode, file);
@@ -671,8 +658,8 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
struct inode *inode = d_inode(entry);
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
/*
* If i_nlink == 0 then unlink doesn't make sense, yet this can
* happen if userspace filesystem is careless. It would be
@@ -681,7 +668,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
*/
if (inode->i_nlink > 0)
drop_nlink(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
@@ -825,10 +812,10 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
if (!err) {
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
inc_nlink(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_update_ctime(inode);
} else if (err == -EINTR) {
@@ -1356,15 +1343,14 @@ static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
*/
void fuse_set_nowrite(struct inode *inode)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
BUG_ON(!inode_is_locked(inode));
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
BUG_ON(fi->writectr < 0);
fi->writectr += FUSE_NOWRITE;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
}
@@ -1385,11 +1371,11 @@ static void __fuse_release_nowrite(struct inode *inode)
void fuse_release_nowrite(struct inode *inode)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
__fuse_release_nowrite(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
@@ -1524,7 +1510,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
goto error;
}
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
/* the kernel maintains i_mtime locally */
if (trust_local_cmtime) {
if (attr->ia_valid & ATTR_MTIME)
@@ -1542,10 +1528,10 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
i_size_write(inode, outarg.attr.size);
if (is_truncate) {
- /* NOTE: this may release/reacquire fc->lock */
+ /* NOTE: this may release/reacquire fi->lock */
__fuse_release_nowrite(inode);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
/*
* Only call invalidate_inode_pages2() after removing
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a59c16bd90ac..06096b60f1df 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -19,8 +19,6 @@
#include <linux/falloc.h>
#include <linux/uio.h>
-static const struct file_operations fuse_direct_io_file_operations;
-
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
@@ -64,9 +62,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
- spin_lock(&fc->lock);
- ff->kh = ++fc->khctr;
- spin_unlock(&fc->lock);
+ ff->kh = atomic64_inc_return(&fc->khctr);
return ff;
}
@@ -94,7 +90,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
if (refcount_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- if (ff->fc->no_open && !isdir) {
+ if (isdir ? ff->fc->no_opendir : ff->fc->no_open) {
/*
* Drop the release request when client does not
* implement 'open'
@@ -128,8 +124,9 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
return -ENOMEM;
ff->fh = 0;
- ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */
- if (!fc->no_open || isdir) {
+ /* Default for no-open */
+ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
+ if (isdir ? !fc->no_opendir : !fc->no_open) {
struct fuse_open_out outarg;
int err;
@@ -138,11 +135,14 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
ff->fh = outarg.fh;
ff->open_flags = outarg.open_flags;
- } else if (err != -ENOSYS || isdir) {
+ } else if (err != -ENOSYS) {
fuse_file_free(ff);
return err;
} else {
- fc->no_open = 1;
+ if (isdir)
+ fc->no_opendir = 1;
+ else
+ fc->no_open = 1;
}
}
@@ -159,17 +159,16 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
static void fuse_link_write_file(struct file *file)
{
struct inode *inode = file_inode(file);
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
/*
* file may be written through mmap, so chain it onto the
* inodes's write_file list
*/
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (list_empty(&ff->write_entry))
list_add(&ff->write_entry, &fi->write_files);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
void fuse_finish_open(struct inode *inode, struct file *file)
@@ -177,8 +176,6 @@ void fuse_finish_open(struct inode *inode, struct file *file)
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
- if (ff->open_flags & FOPEN_DIRECT_IO)
- file->f_op = &fuse_direct_io_file_operations;
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
@@ -186,10 +183,10 @@ void fuse_finish_open(struct inode *inode, struct file *file)
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, 0);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
if (fc->writeback_cache)
file_update_time(file);
@@ -224,14 +221,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
return err;
}
-static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
+static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
+ int flags, int opcode)
{
struct fuse_conn *fc = ff->fc;
struct fuse_req *req = ff->reserved_req;
struct fuse_release_in *inarg = &req->misc.release.in;
+ /* Inode is NULL on error path of fuse_create_open() */
+ if (likely(fi)) {
+ spin_lock(&fi->lock);
+ list_del(&ff->write_entry);
+ spin_unlock(&fi->lock);
+ }
spin_lock(&fc->lock);
- list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
@@ -249,11 +252,12 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
void fuse_release_common(struct file *file, bool isdir)
{
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
struct fuse_req *req = ff->reserved_req;
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
- fuse_prepare_release(ff, file->f_flags, opcode);
+ fuse_prepare_release(fi, ff, file->f_flags, opcode);
if (ff->flock) {
struct fuse_release_in *inarg = &req->misc.release.in;
@@ -295,10 +299,10 @@ static int fuse_release(struct inode *inode, struct file *file)
return 0;
}
-void fuse_sync_release(struct fuse_file *ff, int flags)
+void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags)
{
WARN_ON(refcount_read(&ff->count) > 1);
- fuse_prepare_release(ff, flags, FUSE_RELEASE);
+ fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
/*
* iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here"
@@ -329,33 +333,39 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
return (u64) v0 + ((u64) v1 << 32);
}
-/*
- * Check if any page in a range is under writeback
- *
- * This is currently done by walking the list of writepage requests
- * for the inode, which can be pretty inefficient.
- */
-static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
- pgoff_t idx_to)
+static struct fuse_req *fuse_find_writeback(struct fuse_inode *fi,
+ pgoff_t idx_from, pgoff_t idx_to)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_req *req;
- bool found = false;
- spin_lock(&fc->lock);
list_for_each_entry(req, &fi->writepages, writepages_entry) {
pgoff_t curr_index;
- BUG_ON(req->inode != inode);
+ WARN_ON(get_fuse_inode(req->inode) != fi);
curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
if (idx_from < curr_index + req->num_pages &&
curr_index <= idx_to) {
- found = true;
- break;
+ return req;
}
}
- spin_unlock(&fc->lock);
+ return NULL;
+}
+
+/*
+ * Check if any page in a range is under writeback
+ *
+ * This is currently done by walking the list of writepage requests
+ * for the inode, which can be pretty inefficient.
+ */
+static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
+ pgoff_t idx_to)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ bool found;
+
+ spin_lock(&fi->lock);
+ found = fuse_find_writeback(fi, idx_from, idx_to);
+ spin_unlock(&fi->lock);
return found;
}
@@ -598,9 +608,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
- spin_unlock(&fc->lock);
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+ spin_unlock(&fi->lock);
}
io->iocb->ki_complete(io->iocb, res, 0);
@@ -675,13 +685,13 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (attr_ver == fi->attr_version && size < inode->i_size &&
!test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
- fi->attr_version = ++fc->attr_version;
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, size);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
static void fuse_short_read(struct fuse_req *req, struct inode *inode,
@@ -919,7 +929,7 @@ out:
return err;
}
-static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -996,13 +1006,13 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos)
struct fuse_inode *fi = get_fuse_inode(inode);
bool ret = false;
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
if (pos > inode->i_size) {
i_size_write(inode, pos);
ret = true;
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
return ret;
}
@@ -1125,9 +1135,6 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
int err = 0;
ssize_t res = 0;
- if (is_bad_inode(inode))
- return -EIO;
-
if (inode->i_size < pos + iov_iter_count(ii))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -1173,7 +1180,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
return res > 0 ? res : err;
}
-static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -1416,9 +1423,6 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
ssize_t res;
struct inode *inode = file_inode(io->iocb->ki_filp);
- if (is_bad_inode(inode))
- return -EIO;
-
res = fuse_direct_io(io, iter, ppos, 0);
fuse_invalidate_atime(inode);
@@ -1426,10 +1430,21 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
return res;
}
+static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
+
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
- return __fuse_direct_read(&io, to, &iocb->ki_pos);
+ ssize_t res;
+
+ if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+ res = fuse_direct_IO(iocb, to);
+ } else {
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
+
+ res = __fuse_direct_read(&io, to, &iocb->ki_pos);
+ }
+
+ return res;
}
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
@@ -1438,14 +1453,17 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
ssize_t res;
- if (is_bad_inode(inode))
- return -EIO;
-
/* Don't allow parallel writes to the same file */
inode_lock(inode);
res = generic_write_checks(iocb, from);
- if (res > 0)
- res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
+ if (res > 0) {
+ if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+ res = fuse_direct_IO(iocb, from);
+ } else {
+ res = fuse_direct_io(&io, from, &iocb->ki_pos,
+ FUSE_DIO_WRITE);
+ }
+ }
fuse_invalidate_attr(inode);
if (res > 0)
fuse_write_update_size(inode, iocb->ki_pos);
@@ -1454,6 +1472,34 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
return res;
}
+static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+
+ if (is_bad_inode(file_inode(file)))
+ return -EIO;
+
+ if (!(ff->open_flags & FOPEN_DIRECT_IO))
+ return fuse_cache_read_iter(iocb, to);
+ else
+ return fuse_direct_read_iter(iocb, to);
+}
+
+static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+
+ if (is_bad_inode(file_inode(file)))
+ return -EIO;
+
+ if (!(ff->open_flags & FOPEN_DIRECT_IO))
+ return fuse_cache_write_iter(iocb, from);
+ else
+ return fuse_direct_write_iter(iocb, from);
+}
+
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
int i;
@@ -1481,20 +1527,18 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&fi->page_waitq);
}
-/* Called under fc->lock, may release and reacquire it */
+/* Called under fi->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
loff_t size)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(fi->lock)
+__acquires(fi->lock)
{
+ struct fuse_req *aux, *next;
struct fuse_inode *fi = get_fuse_inode(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
__u64 data_size = req->num_pages * PAGE_SIZE;
bool queued;
- if (!fc->connected)
- goto out_free;
-
if (inarg->offset + data_size <= size) {
inarg->size = data_size;
} else if (inarg->offset < size) {
@@ -1505,28 +1549,40 @@ __acquires(fc->lock)
}
req->in.args[1].size = inarg->size;
- fi->writectr++;
queued = fuse_request_queue_background(fc, req);
- WARN_ON(!queued);
+ /* Fails on broken connection only */
+ if (unlikely(!queued))
+ goto out_free;
+
+ fi->writectr++;
return;
out_free:
fuse_writepage_finish(fc, req);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
+
+ /* After fuse_writepage_finish() aux request list is private */
+ for (aux = req->misc.write.next; aux; aux = next) {
+ next = aux->misc.write.next;
+ aux->misc.write.next = NULL;
+ fuse_writepage_free(fc, aux);
+ fuse_put_request(fc, aux);
+ }
+
fuse_writepage_free(fc, req);
fuse_put_request(fc, req);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
}
/*
* If fi->writectr is positive (no truncate or fsync going on) send
* all queued writepage requests.
*
- * Called with fc->lock
+ * Called with fi->lock
*/
void fuse_flush_writepages(struct inode *inode)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(fi->lock)
+__acquires(fi->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -1546,7 +1602,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_inode *fi = get_fuse_inode(inode);
mapping_set_error(inode->i_mapping, req->out.h.error);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
while (req->misc.write.next) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &req->misc.write.in;
@@ -1583,7 +1639,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
}
fi->writectr--;
fuse_writepage_finish(fc, req);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_writepage_free(fc, req);
}
@@ -1592,13 +1648,13 @@ static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
{
struct fuse_file *ff = NULL;
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (!list_empty(&fi->write_files)) {
ff = list_entry(fi->write_files.next, struct fuse_file,
write_entry);
fuse_file_get(ff);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
return ff;
}
@@ -1669,11 +1725,11 @@ static int fuse_writepage_locked(struct page *page)
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
list_add(&req->writepages_entry, &fi->writepages);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
end_page_writeback(page);
@@ -1722,21 +1778,27 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
int num_pages = req->num_pages;
int i;
req->ff = fuse_file_get(data->ff);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
for (i = 0; i < num_pages; i++)
end_page_writeback(data->orig_pages[i]);
}
+/*
+ * First recheck under fi->lock if the offending offset is still under
+ * writeback. If yes, then iterate auxiliary write requests, to see if there's
+ * one already added for a page at this offset. If there's none, then insert
+ * this new request onto the auxiliary list, otherwise reuse the existing one by
+ * copying the new page contents over to the old temporary page.
+ */
static bool fuse_writepage_in_flight(struct fuse_req *new_req,
struct page *page)
{
@@ -1744,57 +1806,50 @@ static bool fuse_wr