summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-05 16:41:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-05 16:41:23 -0700
commit8c27cb3566762613a23c080e3db7d0501af9a787 (patch)
tree32b2752e320b6cb3ecf289dd00b5145a6de947e6 /fs
parent7114f51fcb979f167ab5f625ac74059dcb1afc28 (diff)
parent848c23b78fafdcd3270b06a30737f8dbd70c347f (diff)
Merge branch 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "The core updates improve error handling (mostly related to bios), with the usual incremental work on the GFP_NOFS (mis)use removal, refactoring or cleanups. Except the two top patches, all have been in for-next for an extensive amount of time. User visible changes: - statx support - quota override tunable - improved compression thresholds - obsoleted mount option alloc_start Core updates: - bio-related updates: - faster bio cloning - no allocation failures - preallocated flush bios - more kvzalloc use, memalloc_nofs protections, GFP_NOFS updates - prep work for btree_inode removal - dir-item validation - qgoup fixes and updates - cleanups: - removed unused struct members, unused code, refactoring - argument refactoring (fs_info/root, caller -> callee sink) - SEARCH_TREE ioctl docs" * 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (115 commits) btrfs: Remove false alert when fiemap range is smaller than on-disk extent btrfs: Don't clear SGID when inheriting ACLs btrfs: fix integer overflow in calc_reclaim_items_nr btrfs: scrub: fix target device intialization while setting up scrub context btrfs: qgroup: Fix qgroup reserved space underflow by only freeing reserved ranges btrfs: qgroup: Introduce extent changeset for qgroup reserve functions btrfs: qgroup: Fix qgroup reserved space underflow caused by buffered write and quotas being enabled btrfs: qgroup: Return actually freed bytes for qgroup release or free data btrfs: qgroup: Cleanup btrfs_qgroup_prepare_account_extents function btrfs: qgroup: Add quick exit for non-fs extents Btrfs: rework delayed ref total_bytes_pinned accounting Btrfs: return old and new total ref mods when adding delayed refs Btrfs: always account pinned bytes when dropping a tree block ref Btrfs: update total_bytes_pinned when pinning down extents Btrfs: make BUG_ON() in add_pinned_bytes() an ASSERT() Btrfs: make add_pinned_bytes() take an s64 num_bytes instead of u64 btrfs: fix validation of XATTR_ITEM dir items btrfs: Verify dir_item in iterate_object_props btrfs: Check name_len before in btrfs_del_root_ref btrfs: Check name_len before reading btrfs_get_name ...
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/acl.c13
-rw-r--r--fs/btrfs/backref.c10
-rw-r--r--fs/btrfs/check-integrity.c53
-rw-r--r--fs/btrfs/compression.c94
-rw-r--r--fs/btrfs/compression.h44
-rw-r--r--fs/btrfs/ctree.c42
-rw-r--r--fs/btrfs/ctree.h84
-rw-r--r--fs/btrfs/delayed-ref.c29
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/dev-replace.c4
-rw-r--r--fs/btrfs/dir-item.c76
-rw-r--r--fs/btrfs/disk-io.c194
-rw-r--r--fs/btrfs/disk-io.h12
-rw-r--r--fs/btrfs/export.c5
-rw-r--r--fs/btrfs/extent-tree.c493
-rw-r--r--fs/btrfs/extent_io.c245
-rw-r--r--fs/btrfs/extent_io.h86
-rw-r--r--fs/btrfs/file-item.c31
-rw-r--r--fs/btrfs/file.c46
-rw-r--r--fs/btrfs/free-space-tree.c38
-rw-r--r--fs/btrfs/inode-map.c4
-rw-r--r--fs/btrfs/inode.c457
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/lzo.c33
-rw-r--r--fs/btrfs/ordered-data.c17
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/print-tree.c7
-rw-r--r--fs/btrfs/props.c7
-rw-r--r--fs/btrfs/qgroup.c225
-rw-r--r--fs/btrfs/qgroup.h9
-rw-r--r--fs/btrfs/raid56.c16
-rw-r--r--fs/btrfs/reada.c1
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/root-tree.c7
-rw-r--r--fs/btrfs/scrub.c211
-rw-r--r--fs/btrfs/send.c112
-rw-r--r--fs/btrfs/super.c74
-rw-r--r--fs/btrfs/sysfs.c41
-rw-r--r--fs/btrfs/tests/extent-io-tests.c2
-rw-r--r--fs/btrfs/transaction.c25
-rw-r--r--fs/btrfs/tree-log.c44
-rw-r--r--fs/btrfs/volumes.c74
-rw-r--r--fs/btrfs/volumes.h7
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/btrfs/zlib.c20
45 files changed, 1680 insertions, 1359 deletions
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 247b8dfaf6e5..8d8370ddb6b2 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -78,12 +78,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (ret)
- return ret;
- }
- ret = 0;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
@@ -119,6 +113,13 @@ out:
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ int ret;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (ret)
+ return ret;
+ }
return __btrfs_set_acl(NULL, inode, acl, type);
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 24865da63d8f..f723c11bb763 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -16,7 +16,7 @@
* Boston, MA 021110-1307, USA.
*/
-#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/rbtree.h>
#include "ctree.h"
#include "disk-io.h"
@@ -2305,7 +2305,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
- data = vmalloc(alloc_bytes);
+ data = kvmalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
@@ -2339,9 +2339,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
if (IS_ERR(fspath))
return (void *)fspath;
- ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
+ ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
if (!ifp) {
- vfree(fspath);
+ kvfree(fspath);
return ERR_PTR(-ENOMEM);
}
@@ -2356,6 +2356,6 @@ void free_ipath(struct inode_fs_paths *ipath)
{
if (!ipath)
return;
- vfree(ipath->fspath);
+ kvfree(ipath->fspath);
kfree(ipath);
}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 4ded1c3f92b8..11d37c94ce05 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -94,7 +94,7 @@
#include <linux/mutex.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
-#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/string.h>
#include "ctree.h"
#include "disk-io.h"
@@ -1638,12 +1638,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
struct bio *bio;
unsigned int j;
- bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
- if (!bio) {
- pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
- num_pages - i);
- return -1;
- }
+ bio = btrfs_io_bio_alloc(num_pages - i);
bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
@@ -1668,14 +1663,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
dev_bytenr += (j - i) * PAGE_SIZE;
i = j;
}
- for (i = 0; i < num_pages; i++) {
+ for (i = 0; i < num_pages; i++)
block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
- if (!block_ctx->datav[i]) {
- pr_info("btrfsic: kmap() failed (dev %s)!\n",
- block_ctx->dev->name);
- return -1;
- }
- }
return block_ctx->len;
}
@@ -2822,44 +2811,47 @@ static void __btrfsic_submit_bio(struct bio *bio)
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
- unsigned int i;
+ unsigned int i = 0;
u64 dev_bytenr;
u64 cur_bytenr;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
int bio_is_patched;
char **mapped_datav;
+ unsigned int segs = bio_segments(bio);
dev_bytenr = 512 * bio->bi_iter.bi_sector;
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
- bio_op(bio), bio->bi_opf, bio->bi_vcnt,
+ bio_op(bio), bio->bi_opf, segs,
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);
- mapped_datav = kmalloc_array(bio->bi_vcnt,
+ mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;
- bio_for_each_segment_all(bvec, bio, i) {
- BUG_ON(bvec->bv_len != PAGE_SIZE);
- mapped_datav[i] = kmap(bvec->bv_page);
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec.bv_len != PAGE_SIZE);
+ mapped_datav[i] = kmap(bvec.bv_page);
+ i++;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
- i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
- cur_bytenr += bvec->bv_len;
+ i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
+ cur_bytenr += bvec.bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr,
- mapped_datav, bio->bi_vcnt,
+ mapped_datav, segs,
bio, &bio_is_patched,
NULL, bio->bi_opf);
- bio_for_each_segment_all(bvec, bio, i)
- kunmap(bvec->bv_page);
+ bio_for_each_segment(bvec, bio, iter)
+ kunmap(bvec.bv_page);
kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
@@ -2923,13 +2915,10 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
fs_info->sectorsize, PAGE_SIZE);
return -1;
}
- state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ state = kvzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
- state = vzalloc(sizeof(*state));
- if (!state) {
- pr_info("btrfs check-integrity: vzalloc() failed!\n");
- return -1;
- }
+ pr_info("btrfs check-integrity: allocation failed!\n");
+ return -1;
}
if (!btrfsic_is_initialized) {
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index a2fad39f79ba..2c0b7b57fcd5 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -32,6 +32,7 @@
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -42,48 +43,7 @@
#include "extent_io.h"
#include "extent_map.h"
-struct compressed_bio {
- /* number of bios pending for this compressed extent */
- refcount_t pending_bios;
-
- /* the pages with the compressed data on them */
- struct page **compressed_pages;
-
- /* inode that owns this data */
- struct inode *inode;
-
- /* starting offset in the inode for our pages */
- u64 start;
-
- /* number of bytes in the inode we're working on */
- unsigned long len;
-
- /* number of bytes on disk */
- unsigned long compressed_len;
-
- /* the compression algorithm for this bio */
- int compress_type;
-
- /* number of compressed pages in the array */
- unsigned long nr_pages;
-
- /* IO errors */
- int errors;
- int mirror_num;
-
- /* for reads, this is the bio we are copying the data into */
- struct bio *orig_bio;
-
- /*
- * the start of a variable length array of checksums only
- * used by reads
- */
- u32 sums;
-};
-
-static int btrfs_decompress_bio(int type, struct page **pages_in,
- u64 disk_start, struct bio *orig_bio,
- size_t srclen);
+static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
@@ -94,12 +54,6 @@ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
}
-static struct bio *compressed_bio_alloc(struct block_device *bdev,
- u64 first_byte, gfp_t gfp_flags)
-{
- return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
-}
-
static int check_compressed_csum(struct btrfs_inode *inode,
struct compressed_bio *cb,
u64 disk_start)
@@ -173,11 +127,8 @@ static void end_compressed_bio_read(struct bio *bio)
/* ok, we're the last bio for this extent, lets start
* the decompression.
*/
- ret = btrfs_decompress_bio(cb->compress_type,
- cb->compressed_pages,
- cb->start,
- cb->orig_bio,
- cb->compressed_len);
+ ret = btrfs_decompress_bio(cb);
+
csum_failed:
if (ret)
cb->errors = 1;
@@ -355,11 +306,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bdev = fs_info->fs_devices->latest_bdev;
- bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
- if (!bio) {
- kfree(cb);
- return BLK_STS_RESOURCE;
- }
+ bio = btrfs_bio_alloc(bdev, first_byte);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
@@ -406,8 +353,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_put(bio);
- bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
- BUG_ON(!bio);
+ bio = btrfs_bio_alloc(bdev, first_byte);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
@@ -650,9 +596,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size;
- comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
- if (!comp_bio)
- goto fail2;
+ comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -703,9 +647,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_put(comp_bio);
- comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
- GFP_NOFS);
- BUG_ON(!comp_bio);
+ comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -801,6 +743,7 @@ static struct list_head *find_workspace(int type)
struct list_head *workspace;
int cpus = num_online_cpus();
int idx = type - 1;
+ unsigned nofs_flag;
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
@@ -830,7 +773,15 @@ again:
atomic_inc(total_ws);
spin_unlock(ws_lock);
+ /*
+ * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
+ * to turn it off here because we might get called from the restricted
+ * context of btrfs_compress_bio/btrfs_compress_pages
+ */
+ nofs_flag = memalloc_nofs_save();
workspace = btrfs_compress_op[idx]->alloc_workspace();
+ memalloc_nofs_restore(nofs_flag);
+
if (IS_ERR(workspace)) {
atomic_dec(total_ws);
wake_up(ws_wait);
@@ -961,19 +912,16 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
* be contiguous. They all correspond to the range of bytes covered by
* the compressed extent.
*/
-static int btrfs_decompress_bio(int type, struct page **pages_in,
- u64 disk_start, struct bio *orig_bio,
- size_t srclen)
+static int btrfs_decompress_bio(struct compressed_bio *cb)
{
struct list_head *workspace;
int ret;
+ int type = cb->compress_type;
workspace = find_workspace(type);
-
- ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
- disk_start, orig_bio,
- srclen);
+ ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
free_workspace(type, workspace);
+
return ret;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 680d4265d601..87f6d3332163 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -34,6 +34,45 @@
/* Maximum size of data before compression */
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
+struct compressed_bio {
+ /* number of bios pending for this compressed extent */
+ refcount_t pending_bios;
+
+ /* the pages with the compressed data on them */
+ struct page **compressed_pages;
+
+ /* inode that owns this data */
+ struct inode *inode;
+
+ /* starting offset in the inode for our pages */
+ u64 start;
+
+ /* number of bytes in the inode we're working on */
+ unsigned long len;
+
+ /* number of bytes on disk */
+ unsigned long compressed_len;
+
+ /* the compression algorithm for this bio */
+ int compress_type;
+
+ /* number of compressed pages in the array */
+ unsigned long nr_pages;
+
+ /* IO errors */
+ int errors;
+ int mirror_num;
+
+ /* for reads, this is the bio we are copying the data into */
+ struct bio *orig_bio;
+
+ /*
+ * the start of a variable length array of checksums only
+ * used by reads
+ */
+ u32 sums;
+};
+
void btrfs_init_compress(void);
void btrfs_exit_compress(void);
@@ -78,10 +117,7 @@ struct btrfs_compress_op {
unsigned long *total_out);
int (*decompress_bio)(struct list_head *workspace,
- struct page **pages_in,
- u64 disk_start,
- struct bio *orig_bio,
- size_t srclen);
+ struct compressed_bio *cb);
int (*decompress)(struct list_head *workspace,
unsigned char *data_in,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index a3a75f1de002..3f4daa9d6e2c 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
-#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -3667,14 +3667,14 @@ static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
/* make room in the right data area */
data_end = leaf_data_end(fs_info, right);
memmove_extent_buffer(right,
- btrfs_leaf_data(right) + data_end - push_space,
- btrfs_leaf_data(right) + data_end,
+ BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
+ BTRFS_LEAF_DATA_OFFSET + data_end,
BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
/* copy from the left data area */
- copy_extent_buffer(right, left, btrfs_leaf_data(right) +
+ copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
- btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
+ BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
@@ -3888,9 +3888,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
btrfs_item_offset_nr(right, push_items - 1);
- copy_extent_buffer(left, right, btrfs_leaf_data(left) +
+ copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
leaf_data_end(fs_info, left) - push_space,
- btrfs_leaf_data(right) +
+ BTRFS_LEAF_DATA_OFFSET +
btrfs_item_offset_nr(right, push_items - 1),
push_space);
old_left_nritems = btrfs_header_nritems(left);
@@ -3917,9 +3917,9 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
if (push_items < right_nritems) {
push_space = btrfs_item_offset_nr(right, push_items - 1) -
leaf_data_end(fs_info, right);
- memmove_extent_buffer(right, btrfs_leaf_data(right) +
+ memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
- btrfs_leaf_data(right) +
+ BTRFS_LEAF_DATA_OFFSET +
leaf_data_end(fs_info, right), push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(0),
@@ -4069,8 +4069,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
nritems * sizeof(struct btrfs_item));
copy_extent_buffer(right, l,
- btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
- data_copy_size, btrfs_leaf_data(l) +
+ BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
+ data_copy_size, BTRFS_LEAF_DATA_OFFSET +
leaf_data_end(fs_info, l), data_copy_size);
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
@@ -4607,8 +4607,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
/* shift the data */
if (from_end) {
- memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
- data_end + size_diff, btrfs_leaf_data(leaf) +
+ memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
+ data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data_start + new_size - data_end);
} else {
struct btrfs_disk_key disk_key;
@@ -4634,8 +4634,8 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
}
}
- memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
- data_end + size_diff, btrfs_leaf_data(leaf) +
+ memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
+ data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data_start - data_end);
offset = btrfs_disk_key_offset(&disk_key);
@@ -4707,8 +4707,8 @@ void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
}
/* shift the data */
- memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
- data_end - data_size, btrfs_leaf_data(leaf) +
+ memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
+ data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data - data_end);
data_end = old_data;
@@ -4790,8 +4790,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
(nritems - slot) * sizeof(struct btrfs_item));
/* shift the data */
- memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
- data_end - total_data, btrfs_leaf_data(leaf) +
+ memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
+ data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
data_end, old_data - data_end);
data_end = old_data;
}
@@ -4983,9 +4983,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (slot + nr != nritems) {
int data_end = leaf_data_end(fs_info, leaf);
- memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+ memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
data_end + dsize,
- btrfs_leaf_data(leaf) + data_end,
+ BTRFS_LEAF_DATA_OFFSET + data_end,
last_off - data_end);
for (i = slot + nr; i < nritems; i++) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index a0d0c79d95ed..3f3eb7b17cac 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -48,7 +48,6 @@ struct btrfs_trans_handle;
struct btrfs_transaction;
struct btrfs_pending_snapshot;
extern struct kmem_cache *btrfs_trans_handle_cachep;
-extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
@@ -716,6 +715,10 @@ struct btrfs_delayed_root;
#define BTRFS_FS_BTREE_ERR 11
#define BTRFS_FS_LOG1_ERR 12
#define BTRFS_FS_LOG2_ERR 13
+#define BTRFS_FS_QUOTA_OVERRIDE 14
+/* Used to record internally whether fs has been frozen */
+#define BTRFS_FS_FROZEN 15
+
/*
* Indicate that a whole-filesystem exclusive operation is running
* (device replace, resize, device add/delete, balance)
@@ -748,8 +751,7 @@ struct btrfs_fs_info {
struct rb_root block_group_cache_tree;
/* keep track of unallocated space */
- spinlock_t free_chunk_lock;
- u64 free_chunk_space;
+ atomic64_t free_chunk_space;
struct extent_io_tree freed_extents[2];
struct extent_io_tree *pinned_extents;
@@ -797,17 +799,7 @@ struct btrfs_fs_info {
* so it is also safe.
*/
u64 max_inline;
- /*
- * Protected by ->chunk_mutex and sb->s_umount.
- *
- * The reason that we use two lock to protect it is because only
- * remount and mount operations can change it and these two operations
- * are under sb->s_umount, but the read side (chunk allocation) can not
- * acquire sb->s_umount or the deadlock would happen. So we use two
- * locks to protect it. On the write side, we must acquire two locks,
- * and on the read side, we just need acquire one of them.
- */
- u64 alloc_start;
+
struct btrfs_transaction *running_transaction;
wait_queue_head_t transaction_throttle;
wait_queue_head_t transaction_wait;
@@ -1107,9 +1099,6 @@ struct btrfs_fs_info {
*/
struct list_head pinned_chunks;
- /* Used to record internally whether fs has been frozen */
- int fs_frozen;
-
/* Cached block sizes */
u32 nodesize;
u32 sectorsize;
@@ -1277,21 +1266,20 @@ struct btrfs_root {
/* For qgroup metadata space reserve */
atomic64_t qgroup_meta_rsv;
};
+
static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
{
return btrfs_sb(inode->i_sb)->sectorsize;
}
-static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
-{
- return blocksize - sizeof(struct btrfs_header);
-}
-
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
{
- return __BTRFS_LEAF_DATA_SIZE(info->nodesize);
+
+ return info->nodesize - sizeof(struct btrfs_header);
}
+#define BTRFS_LEAF_DATA_OFFSET offsetof(struct btrfs_leaf, items)
+
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
{
return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
@@ -1553,8 +1541,27 @@ static inline void btrfs_set_##name(type *s, u##bits val) \
s->member = cpu_to_le##bits(val); \
}
+
+static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb,
+ struct btrfs_dev_item *s)
+{
+ BUILD_BUG_ON(sizeof(u64) !=
+ sizeof(((struct btrfs_dev_item *)0))->total_bytes);
+ return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
+ total_bytes));
+}
+static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb,
+ struct btrfs_dev_item *s,
+ u64 val)
+{
+ BUILD_BUG_ON(sizeof(u64) !=
+ sizeof(((struct btrfs_dev_item *)0))->total_bytes);
+ WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize));
+ btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
+}
+
+
BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
-BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64);
BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
@@ -2324,10 +2331,6 @@ static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
return btrfs_csum_sizes[t];
}
-static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
-{
- return offsetof(struct btrfs_leaf, items);
-}
/*
* The leaf data grows from end-to-front in the node.
@@ -2538,11 +2541,11 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
/* helper function to cast into the data area of the leaf. */
#define btrfs_item_ptr(leaf, slot, type)