summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/rgrp.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/rgrp.c')
-rw-r--r--fs/gfs2/rgrp.c573
1 files changed, 190 insertions, 383 deletions
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 7f8af1eb02de..96bd6d759f29 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -15,6 +15,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/prefetch.h>
#include <linux/blkdev.h>
+#include <linux/rbtree.h>
#include "gfs2.h"
#include "incore.h"
@@ -328,18 +329,22 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk)
{
- struct gfs2_rgrpd *rgd;
+ struct rb_node **newn;
+ struct gfs2_rgrpd *cur;
spin_lock(&sdp->sd_rindex_spin);
-
- list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
- if (rgrp_contains_block(rgd, blk)) {
- list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
+ newn = &sdp->sd_rindex_tree.rb_node;
+ while (*newn) {
+ cur = rb_entry(*newn, struct gfs2_rgrpd, rd_node);
+ if (blk < cur->rd_addr)
+ newn = &((*newn)->rb_left);
+ else if (blk >= cur->rd_data0 + cur->rd_data)
+ newn = &((*newn)->rb_right);
+ else {
spin_unlock(&sdp->sd_rindex_spin);
- return rgd;
+ return cur;
}
}
-
spin_unlock(&sdp->sd_rindex_spin);
return NULL;
@@ -354,8 +359,15 @@ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk)
struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
{
- gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
- return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
+ const struct rb_node *n;
+ struct gfs2_rgrpd *rgd;
+
+ spin_lock(&sdp->sd_rindex_spin);
+ n = rb_first(&sdp->sd_rindex_tree);
+ rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
+ spin_unlock(&sdp->sd_rindex_spin);
+
+ return rgd;
}
/**
@@ -367,47 +379,60 @@ struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
{
- if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
+ const struct rb_node *n;
+
+ spin_lock(&sdp->sd_rindex_spin);
+ n = rb_next(&rgd->rd_node);
+ if (n == NULL)
+ n = rb_first(&sdp->sd_rindex_tree);
+
+ if (unlikely(&rgd->rd_node == n)) {
+ spin_unlock(&sdp->sd_rindex_spin);
return NULL;
- return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
+ }
+ rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
+ spin_unlock(&sdp->sd_rindex_spin);
+ return rgd;
}
-static void clear_rgrpdi(struct gfs2_sbd *sdp)
+void gfs2_free_clones(struct gfs2_rgrpd *rgd)
{
- struct list_head *head;
+ int x;
+
+ for (x = 0; x < rgd->rd_length; x++) {
+ struct gfs2_bitmap *bi = rgd->rd_bits + x;
+ kfree(bi->bi_clone);
+ bi->bi_clone = NULL;
+ }
+}
+
+void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
+{
+ struct rb_node *n;
struct gfs2_rgrpd *rgd;
struct gfs2_glock *gl;
- spin_lock(&sdp->sd_rindex_spin);
- sdp->sd_rindex_forward = NULL;
- spin_unlock(&sdp->sd_rindex_spin);
-
- head = &sdp->sd_rindex_list;
- while (!list_empty(head)) {
- rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
+ while ((n = rb_first(&sdp->sd_rindex_tree))) {
+ rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
gl = rgd->rd_gl;
- list_del(&rgd->rd_list);
- list_del(&rgd->rd_list_mru);
+ rb_erase(n, &sdp->sd_rindex_tree);
if (gl) {
+ spin_lock(&gl->gl_spin);
gl->gl_object = NULL;
+ spin_unlock(&gl->gl_spin);
gfs2_glock_add_to_lru(gl);
gfs2_glock_put(gl);
}
+ gfs2_free_clones(rgd);
kfree(rgd->rd_bits);
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
}
}
-void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
-{
- mutex_lock(&sdp->sd_rindex_mutex);
- clear_rgrpdi(sdp);
- mutex_unlock(&sdp->sd_rindex_mutex);
-}
-
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
@@ -524,22 +549,34 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
return total_data;
}
-static void gfs2_rindex_in(struct gfs2_rgrpd *rgd, const void *buf)
+static void rgd_insert(struct gfs2_rgrpd *rgd)
{
- const struct gfs2_rindex *str = buf;
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
+ struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*newn) {
+ struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
+ rd_node);
+
+ parent = *newn;
+ if (rgd->rd_addr < cur->rd_addr)
+ newn = &((*newn)->rb_left);
+ else if (rgd->rd_addr > cur->rd_addr)
+ newn = &((*newn)->rb_right);
+ else
+ return;
+ }
- rgd->rd_addr = be64_to_cpu(str->ri_addr);
- rgd->rd_length = be32_to_cpu(str->ri_length);
- rgd->rd_data0 = be64_to_cpu(str->ri_data0);
- rgd->rd_data = be32_to_cpu(str->ri_data);
- rgd->rd_bitbytes = be32_to_cpu(str->ri_bitbytes);
+ rb_link_node(&rgd->rd_node, parent, newn);
+ rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
}
/**
* read_rindex_entry - Pull in a new resource index entry from the disk
* @gl: The glock covering the rindex inode
*
- * Returns: 0 on success, error code otherwise
+ * Returns: 0 on success, > 0 on EOF, error code otherwise
*/
static int read_rindex_entry(struct gfs2_inode *ip,
@@ -547,44 +584,53 @@ static int read_rindex_entry(struct gfs2_inode *ip,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
- char buf[sizeof(struct gfs2_rindex)];
+ struct gfs2_rindex buf;
int error;
struct gfs2_rgrpd *rgd;
- error = gfs2_internal_read(ip, ra_state, buf, &pos,
+ if (pos >= i_size_read(&ip->i_inode))
+ return 1;
+
+ error = gfs2_internal_read(ip, ra_state, (char *)&buf, &pos,
sizeof(struct gfs2_rindex));
- if (!error)
- return 0;
- if (error != sizeof(struct gfs2_rindex)) {
- if (error > 0)
- error = -EIO;
- return error;
- }
+
+ if (error != sizeof(struct gfs2_rindex))
+ return (error == 0) ? 1 : error;
rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
error = -ENOMEM;
if (!rgd)
return error;
- mutex_init(&rgd->rd_mutex);
- lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
rgd->rd_sbd = sdp;
+ rgd->rd_addr = be64_to_cpu(buf.ri_addr);
+ rgd->rd_length = be32_to_cpu(buf.ri_length);
+ rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
+ rgd->rd_data = be32_to_cpu(buf.ri_data);
+ rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
- list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
- list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
-
- gfs2_rindex_in(rgd, buf);
error = compute_bitstructs(rgd);
if (error)
- return error;
+ goto fail;
error = gfs2_glock_get(sdp, rgd->rd_addr,
&gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
if (error)
- return error;
+ goto fail;
rgd->rd_gl->gl_object = rgd;
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
+ if (rgd->rd_data > sdp->sd_max_rg_data)
+ sdp->sd_max_rg_data = rgd->rd_data;
+ spin_lock(&sdp->sd_rindex_spin);
+ rgd_insert(rgd);
+ sdp->sd_rgrps++;
+ spin_unlock(&sdp->sd_rindex_spin);
+ return error;
+
+fail:
+ kfree(rgd->rd_bits);
+ kmem_cache_free(gfs2_rgrpd_cachep, rgd);
return error;
}
@@ -595,40 +641,28 @@ static int read_rindex_entry(struct gfs2_inode *ip,
* Returns: 0 on successful update, error code otherwise
*/
-int gfs2_ri_update(struct gfs2_inode *ip)
+static int gfs2_ri_update(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct inode *inode = &ip->i_inode;
struct file_ra_state ra_state;
- u64 rgrp_count = i_size_read(inode);
- struct gfs2_rgrpd *rgd;
- unsigned int max_data = 0;
int error;
- do_div(rgrp_count, sizeof(struct gfs2_rindex));
- clear_rgrpdi(sdp);
-
file_ra_state_init(&ra_state, inode->i_mapping);
- for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) {
+ do {
error = read_rindex_entry(ip, &ra_state);
- if (error) {
- clear_rgrpdi(sdp);
- return error;
- }
- }
+ } while (error == 0);
+
+ if (error < 0)
+ return error;
- list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
- if (rgd->rd_data > max_data)
- max_data = rgd->rd_data;
- sdp->sd_max_rg_data = max_data;
sdp->sd_rindex_uptodate = 1;
return 0;
}
/**
- * gfs2_rindex_hold - Grab a lock on the rindex
+ * gfs2_rindex_update - Update the rindex if required
* @sdp: The GFS2 superblock
- * @ri_gh: the glock holder
*
* We grab a lock on the rindex inode to make sure that it doesn't
* change whilst we are performing an operation. We keep this lock
@@ -640,30 +674,29 @@ int gfs2_ri_update(struct gfs2_inode *ip)
* special file, which might have been updated if someone expanded the
* filesystem (via gfs2_grow utility), which adds new resource groups.
*
- * Returns: 0 on success, error code otherwise
+ * Returns: 0 on succeess, error code otherwise
*/
-int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
+int gfs2_rindex_update(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
struct gfs2_glock *gl = ip->i_gl;
- int error;
-
- error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
- if (error)
- return error;
+ struct gfs2_holder ri_gh;
+ int error = 0;
/* Read new copy from disk if we don't have the latest */
if (!sdp->sd_rindex_uptodate) {
mutex_lock(&sdp->sd_rindex_mutex);
- if (!sdp->sd_rindex_uptodate) {
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
+ if (error)
+ return error;
+ if (!sdp->sd_rindex_uptodate)
error = gfs2_ri_update(ip);
- if (error)
- gfs2_glock_dq_uninit(ri_gh);
- }
+ gfs2_glock_dq_uninit(&ri_gh);
mutex_unlock(&sdp->sd_rindex_mutex);
}
+
return error;
}
@@ -694,7 +727,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
}
/**
- * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
+ * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps
* @rgd: the struct gfs2_rgrpd describing the RG to read in
*
* Read in all of a Resource Group's header and bitmap blocks.
@@ -703,8 +736,9 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
* Returns: errno
*/
-int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
+int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
{
+ struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd;
struct gfs2_glock *gl = rgd->rd_gl;
unsigned int length = rgd->rd_length;
@@ -712,17 +746,6 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
unsigned int x, y;
int error;
- mutex_lock(&rgd->rd_mutex);
-
- spin_lock(&sdp->sd_rindex_spin);
- if (rgd->rd_bh_count) {
- rgd->rd_bh_count++;
- spin_unlock(&sdp->sd_rindex_spin);
- mutex_unlock(&rgd->rd_mutex);
- return 0;
- }
- spin_unlock(&sdp->sd_rindex_spin);
-
for (x = 0; x < length; x++) {
bi = rgd->rd_bits + x;
error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
@@ -747,15 +770,9 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
+ rgd->rd_free_clone = rgd->rd_free;
}
- spin_lock(&sdp->sd_rindex_spin);
- rgd->rd_free_clone = rgd->rd_free;
- rgd->rd_bh_count++;
- spin_unlock(&sdp->sd_rindex_spin);
-
- mutex_unlock(&rgd->rd_mutex);
-
return 0;
fail:
@@ -765,52 +782,32 @@ fail:
bi->bi_bh = NULL;
gfs2_assert_warn(sdp, !bi->bi_clone);
}
- mutex_unlock(&rgd->rd_mutex);
return error;
}
-void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
-{
- struct gfs2_sbd *sdp = rgd->rd_sbd;
-
- spin_lock(&sdp->sd_rindex_spin);
- gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
- rgd->rd_bh_count++;
- spin_unlock(&sdp->sd_rindex_spin);
-}
-
/**
- * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
+ * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
* @rgd: the struct gfs2_rgrpd describing the RG to read in
*
*/
-void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
+void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
{
- struct gfs2_sbd *sdp = rgd->rd_sbd;
+ struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
int x, length = rgd->rd_length;
- spin_lock(&sdp->sd_rindex_spin);
- gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
- if (--rgd->rd_bh_count) {
- spin_unlock(&sdp->sd_rindex_spin);
- return;
- }
-
for (x = 0; x < length; x++) {
struct gfs2_bitmap *bi = rgd->rd_bits + x;
- kfree(bi->bi_clone);
- bi->bi_clone = NULL;
brelse(bi->bi_bh);
bi->bi_bh = NULL;
}
- spin_unlock(&sdp->sd_rindex_spin);
}
-static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
- const struct gfs2_bitmap *bi)
+void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+ struct buffer_head *bh,
+ const struct gfs2_bitmap *bi)
{
struct super_block *sb = sdp->sd_vfs;
struct block_device *bdev = sb->s_bdev;
@@ -823,7 +820,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
unsigned int x;
for (x = 0; x < bi->bi_len; x++) {
- const u8 *orig = bi->bi_bh->b_data + bi->bi_offset + x;
+ const u8 *orig = bh->b_data + bi->bi_offset + x;
const u8 *clone = bi->bi_clone + bi->bi_offset + x;
u8 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
diff &= 0x55;
@@ -862,28 +859,6 @@ fail:
sdp->sd_args.ar_discard = 0;
}
-void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
-{
- struct gfs2_sbd *sdp = rgd->rd_sbd;
- unsigned int length = rgd->rd_length;
- unsigned int x;
-
- for (x = 0; x < length; x++) {
- struct gfs2_bitmap *bi = rgd->rd_bits + x;
- if (!bi->bi_clone)
- continue;
- if (sdp->sd_args.ar_discard)
- gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bi);
- clear_bit(GBF_FULL, &bi->bi_flags);
- memcpy(bi->bi_clone + bi->bi_offset,
- bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
- }
-
- spin_lock(&sdp->sd_rindex_spin);
- rgd->rd_free_clone = rgd->rd_free;
- spin_unlock(&sdp->sd_rindex_spin);
-}
-
/**
* gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
* @ip: the incore GFS2 inode structure
@@ -893,38 +868,35 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ int error;
BUG_ON(ip->i_alloc != NULL);
ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS);
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ fs_warn(sdp, "rindex update returns %d\n", error);
return ip->i_alloc;
}
/**
* try_rgrp_fit - See if a given reservation will fit in a given RG
* @rgd: the RG data
- * @al: the struct gfs2_alloc structure describing the reservation
+ * @ip: the inode
*
* If there's room for the requested blocks to be allocated from the RG:
- * Sets the $al_rgd field in @al.
*
* Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
*/
-static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
+static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip)
{
- struct gfs2_sbd *sdp = rgd->rd_sbd;
- int ret = 0;
+ const struct gfs2_alloc *al = ip->i_alloc;
if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
return 0;
-
- spin_lock(&sdp->sd_rindex_spin);
- if (rgd->rd_free_clone >= al->al_requested) {
- al->al_rgd = rgd;
- ret = 1;
- }
- spin_unlock(&sdp->sd_rindex_spin);
-
- return ret;
+ if (rgd->rd_free_clone >= al->al_requested)
+ return 1;
+ return 0;
}
/**
@@ -992,76 +964,6 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
}
/**
- * recent_rgrp_next - get next RG from "recent" list
- * @cur_rgd: current rgrp
- *
- * Returns: The next rgrp in the recent list
- */
-
-static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
-{
- struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
- struct list_head *head;
- struct gfs2_rgrpd *rgd;
-
- spin_lock(&sdp->sd_rindex_spin);
- head = &sdp->sd_rindex_mru_list;
- if (unlikely(cur_rgd->rd_list_mru.next == head)) {
- spin_unlock(&sdp->sd_rindex_spin);
- return NULL;
- }
- rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
- spin_unlock(&sdp->sd_rindex_spin);
- return rgd;
-}
-
-/**
- * forward_rgrp_get - get an rgrp to try next from full list
- * @sdp: The GFS2 superblock
- *
- * Returns: The rgrp to try next
- */
-
-static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
-{
- struct gfs2_rgrpd *rgd;
- unsigned int journals = gfs2_jindex_size(sdp);
- unsigned int rg = 0, x;
-
- spin_lock(&sdp->sd_rindex_spin);
-
- rgd = sdp->sd_rindex_forward;
- if (!rgd) {
- if (sdp->sd_rgrps >= journals)
- rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
-
- for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg;
- x++, rgd = gfs2_rgrpd_get_next(rgd))
- /* Do Nothing */;
-
- sdp->sd_rindex_forward = rgd;
- }
-
- spin_unlock(&sdp->sd_rindex_spin);
-
- return rgd;
-}
-
-/**
- * forward_rgrp_set - set the forward rgrp pointer
- * @sdp: the filesystem
- * @rgd: The new forward rgrp
- *
- */
-
-static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
-{
- spin_lock(&sdp->sd_rindex_spin);
- sdp->sd_rindex_forward = rgd;
- spin_unlock(&sdp->sd_rindex_spin);
-}
-
-/**
* get_local_rgrp - Choose and lock a rgrp for allocation
* @ip: the inode to reserve space for
* @rgp: the chosen and locked rgrp
@@ -1076,14 +978,18 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd, *begin = NULL;
struct gfs2_alloc *al = ip->i_alloc;
- int flags = LM_FLAG_TRY;
- int skipped = 0;
- int loops = 0;
int error, rg_locked;
+ int loops = 0;
+
+ if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal))
+ rgd = begin = ip->i_rgd;
+ else
+ rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal);
- rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
+ if (rgd == NULL)
+ return -EBADSLT;
- while (rgd) {
+ while (loops < 3) {
rg_locked = 0;
if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
@@ -1095,92 +1001,36 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
}
switch (error) {
case 0:
- if (try_rgrp_fit(rgd, al))
- goto out;
+ if (try_rgrp_fit(rgd, ip)) {
+ ip->i_rgd = rgd;
+ return 0;
+ }
if (rgd->rd_flags & GFS2_RDF_CHECK)
try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
/* fall through */
case GLR_TRYFAILED:
- rgd = recent_rgrp_next(rgd);
- break;
-
- default:
- return error;
- }
- }
-
- /* Go through full list of rgrps */
-
- begin = rgd = forward_rgrp_get(sdp);
-
- for (;;) {
- rg_locked = 0;
-
- if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
- rg_locked = 1;
- error = 0;
- } else {
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags,
- &al->al_rgd_gh);
- }
- switch (error) {
- case 0:
- if (try_rgrp_fit(rgd, al))
- goto out;
- if (rgd->rd_flags & GFS2_RDF_CHECK)
- try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
- if (!rg_locked)
- gfs2_glock_dq_uninit(&al->al_rgd_gh);
- break;
-
- case GLR_TRYFAILED:
- skipped++;
+ rgd = gfs2_rgrpd_get_next(rgd);
+ if (rgd == begin)
+ loops++;
break;
-
default:
return error;
}
-
- rgd = gfs2_rgrpd_get_next(rgd);
- if (!rgd)
- rgd = gfs2_rgrpd_get_first(sdp);
-
- if (rgd == begin) {
- if (++loops >= 3)
- return -ENOSPC;
- if (!skipped)
- loops++;
- flags = 0;
- if (loops == 2)
- gfs2_log_flush(sdp, NULL);
- }
}
-out:
- if (begin) {
- spin_lock(&sdp->sd_rindex_spin);
- list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
- spin_unlock(&sdp->sd_rindex_spin);
- rgd = gfs2_rgrpd_get_next(rgd);
- if (!rgd)
- rgd = gfs2_rgrpd_get_first(sdp);
- forward_rgrp_set(sdp, rgd);
- }
-
- return 0;
+ return -ENOSPC;
}
/**
- * gfs2_inplace_reserve_i - Reserve space in the filesystem
+ * gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
*
* Returns: errno
*/
-int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
- char *file, unsigned int line)
+int gfs2_inplace_reserve(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
@@ -1191,45 +1041,22 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
if (gfs2_assert_warn(sdp, al->al_requested))
return -EINVAL;
- if (hold_rindex) {
- /* We need to hold the rindex unless the inode we're using is
- the rindex itself, in which case it's already held. */
- if (ip != GFS2_I(sdp->sd_rindex))
- error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
- else if (!sdp->sd_rgrps) /* We may not have the rindex read
- in, so: */
- error = gfs2_ri_update(ip);
- if (error)
- return error;
- }
-
-try_again:
do {
error = get_local_rgrp(ip, &last_unlinked);
- /* If there is no space, flushing the log may release some */
- if (error) {
- if (ip == GFS2_I(sdp->sd_rindex) &&
- !sdp->sd_rindex_uptodate) {
- error = gfs2_ri_update(ip);
- if (error)
- return error;
- goto try_again;
- }
- gfs2_log_flush(sdp, NULL);
+ if (error != -ENOSPC)
+ break;
+ /* Check that fs hasn't grown if writing to rindex */
+ if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
+ error = gfs2_ri_update(ip);
+ if (error)
+ break;
+ continue;
}
- } while (error && tries++ < 3);
-
- if (error) {
- if (hold_rindex && ip != GFS2_I(sdp->sd_rindex))
- gfs2_glock_dq_uninit(&al->al_ri_gh);
- return error;
- }
-
- /* no error, so we have the rgrp set in the inode's allocation. */
- al->al_file = file;
- al->al_line = line;
+ /* Flushing the log may release space */
+ gfs2_log_flush(sdp, NULL);
+ } while (tries++ < 3);
- return 0;
+ return error;
}
/**
@@ -1241,20 +1068,10 @@ try_again:
void gfs2_inplace_release(struct gfs2_inode *ip)
{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
- if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
- fs_warn(sdp, "al_alloced = %u, al_requested = %u "
- "al_file = %s, al_line = %u\n",
- al->al_alloced, al->al_requested, al->al_file,
- al->al_line);
-
- al->al_rgd = NULL;
if (al->al_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
- if (ip != GFS2_I(sdp->sd_rindex) && al->al_ri_gh.gh_gl)
- gfs2_glock_dq_uninit(&al->al_ri_gh);
}
/**
@@ -1352,6 +1169,7 @@ do_search:
/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
bitmaps, so we must search the originals for that. */
buffer = bi->bi_bh->b_data + bi->bi_offset;
+ WARN_ON(!buffer_uptodate(bi->bi_bh));
if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
@@ -1371,6 +1189,7 @@ skip:
if (blk == BFITNOENT)
return blk;
+
*n = 1;
if (old_state == new_state)
goto out;
@@ -1503,7 +1322,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
if (al == NULL)
return -ECANCELED;
- rgd = al->al_rgd;
+ rgd = ip->i_rgd;
if (rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
@@ -1518,7 +1337,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
rgd->rd_last_alloc = blk;
block = rgd->rd_data0 + blk;
- ip->i_goal = block;
+ ip->i_goal = block + *n - 1;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error == 0) {
struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
@@ -1539,9 +1358,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
- spin_lock(&sdp->sd_rindex_spin);
rgd->rd_free_clone -= *n;
- spin_unlock(&sdp->sd_rindex_spin);
trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED);
*bn = block;
return 0;
@@ -1564,7 +1381,7 @@ int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_alloc *al = dip->i_alloc;
- struct gfs2_rgrpd *rgd = al->al_rgd;
+ struct gfs2_rgrpd *rgd = dip->i_rgd;
u32 blk;
u64 block;
unsigned int n = 1;
@@ -1594,9 +1411,7 @@ int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation)
gfs2_statfs_change(sdp, 0, -1, +1);
gfs2_trans_add_unrevoke(sdp, block, 1);
- spin_lock(&sdp->sd_rindex_spin);
rgd->rd_free_clone--;
- spin_unlock(&sdp->sd_rindex_spin);
trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE);
*bn = block;
return 0;
@@ -1629,8 +1444,6 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_trans_add_rg(rgd);
-
/* Directories keep their data in the metadata address space */
if (meta || ip->i_depth)
gfs2_meta_wipe(ip, bstart, blen);
@@ -1666,7 +1479,6 @@ void gfs2_unlink_di(struct inode *inode)
trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_trans_add_rg(rgd);
}
static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
@@ -1688,7 +1500,6 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
gfs2_statfs_change(sdp, 0, +1, -1);
- gfs2_trans_add_rg(rgd);
}
@@ -1714,41 +1525,33 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{
struct gfs2_rgrpd *rgd;
- struct gfs2_holder ri_gh, rgd_gh;
- struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
- int ri_locked = 0;
+ struct gfs2_holder rgd_gh;
int error;
- if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
- error = gfs2_rindex_hold(sdp, &ri_gh);
- if (error)
- goto fail;
- ri_locked = 1;
- }
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ return error;
error = -EINVAL;
rgd = gfs2_blk2rgrpd(sdp, no_addr);
if (!rgd)
- goto fail_rindex;
+ goto fail;
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
if (error)
- goto fail_rindex;
+ goto fail;
if (gfs2_get_block_type(rgd, no_addr) != type)
error = -ESTALE;
gfs2_glock_dq_uninit(&rgd_gh);
-fail_rindex:
- if (ri_locked)
- gfs2_glock_dq_uninit(&ri_gh);
fail:
return error;
}
/**
* gfs2_rlist_add - add a RG to a list of RGs
- * @sdp: the filesystem
+ * @ip: the inode
* @rlist: the list of resource groups
* @block: the block
*
@@ -1758,9 +1561,10 @@ fail:
*
*/
-void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
+void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
u64 block)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
struct gfs2_rgrpd **tmp;
unsigned int new_space;
@@ -1769,12 +1573,15 @@ void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
return;
- rgd = gfs2_blk2rgrpd(sdp, block);
+ if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
+ rgd = ip->i_rgd;
+ else
+ rgd = gfs2_blk2rgrpd(sdp, block);
if (!rgd) {
- if (gfs2_consist(sdp))
- fs_err(sdp, "block = %llu\n", (unsigned long long)block);
+ fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
return;
}
+ ip->i_rgd = rgd;
for (x = 0; x < rlist->rl_rgrps; x++)
if (rlist->rl_rgd[x] == rgd)