summaryrefslogtreecommitdiffstats
path: root/fs/quota
diff options
context:
space:
mode:
Diffstat (limited to 'fs/quota')
-rw-r--r--fs/quota/Kconfig59
-rw-r--r--fs/quota/Makefile14
-rw-r--r--fs/quota/dquot.c2617
-rw-r--r--fs/quota/quota.c524
-rw-r--r--fs/quota/quota_tree.c651
-rw-r--r--fs/quota/quota_tree.h25
-rw-r--r--fs/quota/quota_v1.c234
-rw-r--r--fs/quota/quota_v2.c237
-rw-r--r--fs/quota/quotaio_v1.h33
-rw-r--r--fs/quota/quotaio_v2.h60
10 files changed, 4454 insertions, 0 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 000000000000..8047e01ef46b
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
+#
+# Quota configuration
+#
+
+config QUOTA
+ bool "Quota support"
+ help
+ If you say Y here, you will be able to set per user limits for disk
+ usage (also called disk quotas). Currently, it works for the
+ ext2, ext3, and reiserfs file system. ext3 also supports journalled
+ quotas for which you don't need to run quotacheck(8) after an unclean
+ shutdown.
+ For further details, read the Quota mini-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, or the documentation provided
+ with the quota tools. Probably the quota support is only useful for
+ multi user systems. If unsure, say N.
+
+config QUOTA_NETLINK_INTERFACE
+ bool "Report quota messages through netlink interface"
+ depends on QUOTA && NET
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be reported through netlink interface. If unsure,
+ say Y.
+
+config PRINT_QUOTA_WARNING
+ bool "Print quota warnings to console (OBSOLETE)"
+ depends on QUOTA
+ default y
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be printed to the process' controlling terminal.
+ Note that this behavior is currently deprecated and may go away in
+ future. Please use notification via netlink socket instead.
+
+# Generic support for tree structured quota files. Selected when needed.
+config QUOTA_TREE
+ tristate
+
+config QFMT_V1
+ tristate "Old quota format support"
+ depends on QUOTA
+ help
+ This quota format was (is) used by kernels earlier than 2.4.22. If
+ you have quota working and you don't want to convert to new quota
+ format say Y here.
+
+config QFMT_V2
+ tristate "Quota format v2 support"
+ depends on QUOTA
+ select QUOTA_TREE
+ help
+ This quota format allows using quotas with 32-bit UIDs/GIDs. If you
+ need this functionality say Y here.
+
+config QUOTACTL
+ bool
+ depends on XFS_QUOTA || QUOTA
+ default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 000000000000..385a0831cc99
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y :=
+
+obj-$(CONFIG_QUOTA) += dquot.o
+obj-$(CONFIG_QFMT_V1) += quota_v1.o
+obj-$(CONFIG_QFMT_V2) += quota_v2.o
+obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
+obj-$(CONFIG_QUOTACTL) += quota.o
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
new file mode 100644
index 000000000000..2ca967a5ef77
--- /dev/null
+++ b/fs/quota/dquot.c
@@ -0,0 +1,2617 @@
+/*
+ * Implementation of the diskquota system for the LINUX operating system. QUOTA
+ * is implemented using the BSD system call interface as the means of
+ * communication with the user level. This file contains the generic routines
+ * called by the different filesystems on allocation of an inode or block.
+ * These routines take care of the administration needed to have a consistent
+ * diskquota tracking system. The ideas of both user and group quotas are based
+ * on the Melbourne quota system as used on BSD derived systems. The internal
+ * implementation is based on one of the several variants of the LINUX
+ * inode-subsystem with added complexity of the diskquota system.
+ *
+ * Author: Marco van Wieringen <mvw@planets.elm.net>
+ *
+ * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
+ *
+ * Revised list management to avoid races
+ * -- Bill Hawes, <whawes@star.net>, 9/98
+ *
+ * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
+ * As the consequence the locking was moved from dquot_decr_...(),
+ * dquot_incr_...() to calling functions.
+ * invalidate_dquots() now writes modified dquots.
+ * Serialized quota_off() and quota_on() for mount point.
+ * Fixed a few bugs in grow_dquots().
+ * Fixed deadlock in write_dquot() - we no longer account quotas on
+ * quota files
+ * remove_dquot_ref() moved to inode.c - it now traverses through inodes
+ * add_dquot_ref() restarts after blocking
+ * Added check for bogus uid and fixed check for group in quotactl.
+ * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
+ *
+ * Used struct list_head instead of own list struct
+ * Invalidation of referenced dquots is no longer possible
+ * Improved free_dquots list management
+ * Quota and i_blocks are now updated in one place to avoid races
+ * Warnings are now delayed so we won't block in critical section
+ * Write updated not to require dquot lock
+ * Jan Kara, <jack@suse.cz>, 9/2000
+ *
+ * Added dynamic quota structure allocation
+ * Jan Kara <jack@suse.cz> 12/2000
+ *
+ * Rewritten quota interface. Implemented new quota format and
+ * formats registering.
+ * Jan Kara, <jack@suse.cz>, 2001,2002
+ *
+ * New SMP locking.
+ * Jan Kara, <jack@suse.cz>, 10/2002
+ *
+ * Added journalled quota support, fix lock inversion problems
+ * Jan Kara, <jack@suse.cz>, 2003,2004
+ *
+ * (C) Copyright 1994 - 1997 Marco van Wieringen
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/kmod.h>
+#include <linux/namei.h>
+#include <linux/buffer_head.h>
+#include <linux/capability.h>
+#include <linux/quotaops.h>
+#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#define __DQUOT_PARANOIA
+
+/*
+ * There are three quota SMP locks. dq_list_lock protects all lists with quotas
+ * and quota formats, dqstats structure containing statistics about the lists
+ * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
+ * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
+ * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
+ * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
+ * modifications of quota state (on quotaon and quotaoff) and readers who care
+ * about latest values take it as well.
+ *
+ * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
+ * dq_list_lock > dq_state_lock
+ *
+ * Note that some things (eg. sb pointer, type, id) doesn't change during
+ * the life of the dquot structure and so needn't to be protected by a lock
+ *
+ * Any operation working on dquots via inode pointers must hold dqptr_sem. If
+ * operation is just reading pointers from inode (or not using them at all) the
+ * read lock is enough. If pointers are altered function must hold write lock
+ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
+ * for altering the flag i_mutex is also needed).
+ *
+ * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
+ * from inodes (dquot_alloc_space() and such don't check the dq_lock).
+ * Currently dquot is locked only when it is being read to memory (or space for
+ * it is being allocated) on the first dqget() and when it is being released on
+ * the last dqput(). The allocation and release oparations are serialized by
+ * the dq_lock and by checking the use count in dquot_release(). Write
+ * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
+ * spinlock to internal buffers before writing.
+ *
+ * Lock ordering (including related VFS locks) is the following:
+ * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ * dqio_mutex
+ * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
+ * dqptr_sem. But filesystem has to count with the fact that functions such as
+ * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
+ * from inside a transaction to keep filesystem consistency after a crash. Also
+ * filesystems usually want to do some IO on dquot from ->mark_dirty which is
+ * called with dqptr_sem held.
+ * i_mutex on quota files is special (it's below dqio_mutex)
+ */
+
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
+
+static char *quotatypes[] = INITQFNAMES;
+static struct quota_format_type *quota_formats; /* List of registered formats */
+static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
+
+/* SLAB cache for dquot structures */
+static struct kmem_cache *dquot_cachep;
+
+int register_quota_format(struct quota_format_type *fmt)
+{
+ spin_lock(&dq_list_lock);
+ fmt->qf_next = quota_formats;
+ quota_formats = fmt;
+ spin_unlock(&dq_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_quota_format);
+
+void unregister_quota_format(struct quota_format_type *fmt)
+{
+ struct quota_format_type **actqf;
+
+ spin_lock(&dq_list_lock);
+ for (actqf = &quota_formats; *actqf && *actqf != fmt;
+ actqf = &(*actqf)->qf_next)
+ ;
+ if (*actqf)
+ *actqf = (*actqf)->qf_next;
+ spin_unlock(&dq_list_lock);
+}
+EXPORT_SYMBOL(unregister_quota_format);
+
+static struct quota_format_type *find_quota_format(int id)
+{
+ struct quota_format_type *actqf;
+
+ spin_lock(&dq_list_lock);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
+ if (!actqf || !try_module_get(actqf->qf_owner)) {
+ int qm;
+
+ spin_unlock(&dq_list_lock);
+
+ for (qm = 0; module_names[qm].qm_fmt_id &&
+ module_names[qm].qm_fmt_id != id; qm++)
+ ;
+ if (!module_names[qm].qm_fmt_id ||
+ request_module(module_names[qm].qm_mod_name))
+ return NULL;
+
+ spin_lock(&dq_list_lock);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
+ if (actqf && !try_module_get(actqf->qf_owner))
+ actqf = NULL;
+ }
+ spin_unlock(&dq_list_lock);
+ return actqf;
+}
+
+static void put_quota_format(struct quota_format_type *fmt)
+{
+ module_put(fmt->qf_owner);
+}
+
+/*
+ * Dquot List Management:
+ * The quota code uses three lists for dquot management: the inuse_list,
+ * free_dquots, and dquot_hash[] array. A single dquot structure may be
+ * on all three lists, depending on its current state.
+ *
+ * All dquots are placed to the end of inuse_list when first created, and this
+ * list is used for invalidate operation, which must look at every dquot.
+ *
+ * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+ * and this list is searched whenever we need an available dquot. Dquots are
+ * removed from the list as soon as they are used again, and
+ * dqstats.free_dquots gives the number of dquots on the list. When
+ * dquot is invalidated it's completely released from memory.
+ *
+ * Dquots with a specific identity (device, type and id) are placed on
+ * one of the dquot_hash[] hash chains. The provides an efficient search
+ * mechanism to locate a specific dquot.
+ */
+
+static LIST_HEAD(inuse_list);
+static LIST_HEAD(free_dquots);
+static unsigned int dq_hash_bits, dq_hash_mask;
+static struct hlist_head *dquot_hash;
+
+struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
+
+static inline unsigned int
+hashfn(const struct super_block *sb, unsigned int id, int type)
+{
+ unsigned long tmp;
+
+ tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
+ return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
+}
+
+/*
+ * Following list functions expect dq_list_lock to be held
+ */
+static inline void insert_dquot_hash(struct dquot *dquot)
+{
+ struct hlist_head *head;
+ head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+ hlist_add_head(&dquot->dq_hash, head);
+}
+
+static inline void remove_dquot_hash(struct dquot *dquot)
+{
+ hlist_del_init(&dquot->dq_hash);
+}
+
+static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+ unsigned int id, int type)
+{
+ struct hlist_node *node;
+ struct dquot *dquot;
+
+ hlist_for_each (node, dquot_hash+hashent) {
+ dquot = hlist_entry(node, struct dquot, dq_hash);
+ if (dquot->dq_sb == sb && dquot->dq_id == id &&
+ dquot->dq_type == type)
+ return dquot;
+ }
+ return NULL;
+}
+
+/* Add a dquot to the tail of the free list */
+static inline void put_dquot_last(struct dquot *dquot)
+{
+ list_add_tail(&dquot->dq_free, &free_dquots);
+ dqstats.free_dquots++;
+}
+
+static inline void remove_free_dquot(struct dquot *dquot)
+{
+ if (list_empty(&dquot->dq_free))
+ return;
+ list_del_init(&dquot->dq_free);
+ dqstats.free_dquots--;
+}
+
+static inline void put_inuse(struct dquot *dquot)
+{
+ /* We add to the back of inuse list so we don't have to restart
+ * when traversing this list and we block */
+ list_add_tail(&dquot->dq_inuse, &inuse_list);
+ dqstats.allocated_dquots++;
+}
+
+static inline void remove_inuse(struct dquot *dquot)
+{
+ dqstats.allocated_dquots--;
+ list_del(&dquot->dq_inuse);
+}
+/*
+ * End of list functions needing dq_list_lock
+ */
+
+static void wait_on_dquot(struct dquot *dquot)
+{
+ mutex_lock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_lock);
+}
+
+static inline int dquot_dirty(struct dquot *dquot)
+{
+ return test_bit(DQ_MOD_B, &dquot->dq_flags);
+}
+
+static inline int mark_dquot_dirty(struct dquot *dquot)
+{
+ return dquot->dq_sb->dq_op->mark_dirty(dquot);
+}
+
+int dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+ spin_lock(&dq_list_lock);
+ if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
+ list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
+ info[dquot->dq_type].dqi_dirty_list);
+ spin_unlock(&dq_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+
+/* This function needs dq_list_lock */
+static inline int clear_dquot_dirty(struct dquot *dquot)
+{
+ if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
+ return 0;
+ list_del_init(&dquot->dq_dirty);
+ return 1;
+}
+
+void mark_info_dirty(struct super_block *sb, int type)
+{
+ set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
+}
+EXPORT_SYMBOL(mark_info_dirty);
+
+/*
+ * Read dquot from disk and alloc space for it
+ */
+
+int dquot_acquire(struct dquot *dquot)
+{
+ int ret = 0, ret2 = 0;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ mutex_lock(&dquot->dq_lock);
+ mutex_lock(&dqopt->dqio_mutex);
+ if (!test_bit(DQ_READ_B, &dquot->dq_flags))
+ ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
+ if (ret < 0)
+ goto out_iolock;
+ set_bit(DQ_READ_B, &dquot->dq_flags);
+ /* Instantiate dquot if needed */
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
+ ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+ /* Write the info if needed */
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
+ if (ret < 0)
+ goto out_iolock;
+ if (ret2 < 0) {
+ ret = ret2;
+ goto out_iolock;
+ }
+ }
+ set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_iolock:
+ mutex_unlock(&dqopt->dqio_mutex);
+ mutex_unlock(&dquot->dq_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_acquire);
+
+/*
+ * Write dquot to disk
+ */
+int dquot_commit(struct dquot *dquot)
+{
+ int ret = 0, ret2 = 0;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ mutex_lock(&dqopt->dqio_mutex);
+ spin_lock(&dq_list_lock);
+ if (!clear_dquot_dirty(dquot)) {
+ spin_unlock(&dq_list_lock);
+ goto out_sem;
+ }
+ spin_unlock(&dq_list_lock);
+ /* Inactive dquot can be only if there was error during read/init
+ * => we have better not writing it */
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
+ if (ret >= 0)
+ ret = ret2;
+ }
+out_sem:
+ mutex_unlock(&dqopt->dqio_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_commit);
+
+/*
+ * Release dquot
+ */
+int dquot_release(struct dquot *dquot)
+{
+ int ret = 0, ret2 = 0;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ mutex_lock(&dquot->dq_lock);
+ /* Check whether we are not racing with some other dqget() */
+ if (atomic_read(&dquot->dq_count) > 1)
+ goto out_dqlock;
+ mutex_lock(&dqopt->dqio_mutex);
+ if (dqopt->ops[dquot->dq_type]->release_dqblk) {
+ ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
+ /* Write the info */
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
+ if (ret >= 0)
+ ret = ret2;
+ }
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ mutex_unlock(&dqopt->dqio_mutex);
+out_dqlock:
+ mutex_unlock(&dquot->dq_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_release);
+
+void dquot_destroy(struct dquot *dquot)
+{
+ kmem_cache_free(dquot_cachep, dquot);
+}
+EXPORT_SYMBOL(dquot_destroy);
+
+static inline void do_destroy_dquot(struct dquot *dquot)
+{
+ dquot->dq_sb->dq_op->destroy_dquot(dquot);
+}
+
+/* Invalidate all dquots on the list. Note that this function is called after
+ * quota is disabled and pointers from inodes removed so there cannot be new
+ * quota users. There can still be some users of quotas due to inodes being
+ * just deleted or pruned by prune_icache() (those are not attached to any
+ * list) or parallel quotactl call. We have to wait for such users.
+ */
+static void invalidate_dquots(struct super_block *sb, int type)
+{
+ struct dquot *dquot, *tmp;
+
+restart:
+ spin_lock(&dq_list_lock);
+ list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
+ if (dquot->dq_sb != sb)
+ continue;
+ if (dquot->dq_type != type)
+ continue;
+ /* Wait for dquot users */
+ if (atomic_read(&dquot->dq_count)) {
+ DEFINE_WAIT(wait);
+
+ atomic_inc(&dquot->dq_count);
+ prepare_to_wait(&dquot->dq_wait_unused, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&dq_list_lock);
+ /* Once dqput() wakes us up, we know it's time to free
+ * the dquot.
+ * IMPORTANT: we rely on the fact that there is always
+ * at most one process waiting for dquot to free.
+ * Otherwise dq_count would be > 1 and we would never
+ * wake up.
+ */
+ if (atomic_read(&dquot->dq_count) > 1)
+ schedule();
+ finish_wait(&dquot->dq_wait_unused, &wait);
+ dqput(dquot);
+ /* At this moment dquot() need not exist (it could be
+ * reclaimed by prune_dqcache(). Hence we must
+ * restart. */
+ goto restart;
+ }
+ /*
+ * Quota now has no users and it has been written on last
+ * dqput()
+ */
+ remove_dquot_hash(dquot);
+ remove_free_dquot(dquot);
+ remove_inuse(dquot);
+ do_destroy_dquot(dquot);
+ }
+ spin_unlock(&dq_list_lock);
+}
+
+/* Call callback for every active dquot on given filesystem */
+int dquot_scan_active(struct super_block *sb,
+ int (*fn)(struct dquot *dquot, unsigned long priv),
+ unsigned long priv)
+{
+ struct dquot *dquot, *old_dquot = NULL;
+ int ret = 0;
+
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ spin_lock(&dq_list_lock);
+ list_for_each_entry(dquot, &inuse_list, dq_inuse) {
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+ continue;
+ if (dquot->dq_sb != sb)
+ continue;
+ /* Now we have active dquot so we can just increase use count */
+ atomic_inc(&dquot->dq_count);
+ dqstats.lookups++;
+ spin_unlock(&dq_list_lock);
+ dqput(old_dquot);
+ old_dquot = dquot;
+ ret = fn(dquot, priv);
+ if (ret < 0)
+ goto out;
+ spin_lock(&dq_list_lock);
+ /* We are safe to continue now because our dquot could not
+ * be moved out of the inuse list while we hold the reference */
+ }
+ spin_unlock(&dq_list_lock);
+out:
+ dqput(old_dquot);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_scan_active);
+
+int vfs_quota_sync(struct super_block *sb, int type)
+{
+ struct list_head *dirty;
+ struct dquot *dquot;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int cnt;
+
+ mutex_lock(&dqopt->dqonoff_mutex);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+ spin_lock(&dq_list_lock);
+ dirty = &dqopt->info[cnt].dqi_dirty_list;
+ while (!list_empty(dirty)) {
+ dquot = list_first_entry(dirty, struct dquot,
+ dq_dirty);
+ /* Dirty and inactive can be only bad dquot... */
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ clear_dquot_dirty(dquot);
+ continue;
+ }
+ /* Now we have active dquot from which someone is
+ * holding reference so we can safely just increase
+ * use count */
+ atomic_inc(&dquot->dq_count);
+ dqstats.lookups++;
+ spin_unlock(&dq_list_lock);
+ sb->dq_op->write_dquot(dquot);
+ dqput(dquot);
+ spin_lock(&dq_list_lock);
+ }
+ spin_unlock(&dq_list_lock);
+ }
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
+ && info_dirty(&dqopt->info[cnt]))
+ sb->dq_op->write_info(sb, cnt);
+ spin_lock(&dq_list_lock);
+ dqstats.syncs++;
+ spin_unlock(&dq_list_lock);
+ mutex_unlock(&dqopt->dqonoff_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(vfs_quota_sync);
+
+/* Free unused dquots from cache */
+static void prune_dqcache(int count)
+{
+ struct list_head *head;
+ struct dquot *dquot;
+
+ head = free_dquots.prev;
+ while (head != &free_dquots && count) {
+ dquot = list_entry(head, struct dquot, dq_free);
+ remove_dquot_hash(dquot);
+ remove_free_dquot(dquot);
+ remove_inuse(dquot);
+ do_destroy_dquot(dquot);
+ count--;
+ head = free_dquots.prev;
+ }
+}
+
+/*
+ * This is called from kswapd when we think we need some
+ * more memory
+ */
+
+static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
+{
+ if (nr) {
+ spin_lock(&dq_list_lock);
+ prune_dqcache(nr);
+ spin_unlock(&dq_list_lock);
+ }
+ return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
+}
+
+static struct shrinker dqcache_shrinker = {
+ .shrink = shrink_dqcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/*
+ * Put reference to dquot
+ * NOTE: If you change this function please check whether dqput_blocks() works right...
+ */
+void dqput(struct dquot *dquot)
+{
+ int ret;
+
+ if (!dquot)
+ return;
+#ifdef __DQUOT_PARANOIA
+ if (!atomic_read(&dquot->dq_count)) {
+ printk("VFS: dqput: trying to free free dquot\n");
+ printk("VFS: device %s, dquot of %s %d\n",
+ dquot->dq_sb->s_id,
+ quotatypes[dquot->dq_type],
+ dquot->dq_id);
+ BUG();
+ }
+#endif
+
+ spin_lock(&dq_list_lock);
+ dqstats.drops++;
+ spin_unlock(&dq_list_lock);
+we_slept:
+ spin_lock(&dq_list_lock);
+ if (atomic_read(&dquot->dq_count) > 1) {
+ /* We have more than one user... nothing to do */
+ atomic_dec(&dquot->dq_count);
+ /* Releasing dquot during quotaoff phase? */
+ if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
+ atomic_read(&dquot->dq_count) == 1)
+ wake_up(&dquot->dq_wait_unused);
+ spin_unlock(&dq_list_lock);
+ return;
+ }
+ /* Need to release dquot? */
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
+ spin_unlock(&dq_list_lock);
+ /* Commit dquot before releasing */
+ ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+ if (ret < 0) {
+ printk(KERN_ERR "VFS: cannot write quota structure on "
+ "device %s (error %d). Quota may get out of "
+ "sync!\n", dquot->dq_sb->s_id, ret);
+ /*
+ * We clear dirty bit anyway, so that we avoid
+ * infinite loop here
+ */
+ spin_lock(&dq_list_lock);
+ clear_dquot_dirty(dquot);
+ spin_unlock(&dq_list_lock);
+ }
+ goto we_slept;
+ }
+ /* Clear flag in case dquot was inactive (something bad happened) */
+ clear_dquot_dirty(dquot);
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ dquot->dq_sb->dq_op->release_dquot(dquot);
+ goto we_slept;
+ }
+ atomic_dec(&dquot->dq_count);
+#ifdef __DQUOT_PARANOIA
+ /* sanity check */
+ BUG_ON(!list_empty(&dquot->dq_free));
+#endif
+ put_dquot_last(dquot);
+ spin_unlock(&dq_list_lock);
+}
+EXPORT_SYMBOL(dqput);
+
+struct dquot *dquot_alloc(struct super_block *sb, int type)
+{
+ return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
+}
+EXPORT_SYMBOL(dquot_alloc);
+
+static struct dquot *get_empty_dquot(struct super_block *sb, int type)
+{
+ struct dquot *dquot;
+
+ dquot = sb->dq_op->alloc_dquot(sb, type);
+ if(!dquot)
+ return NULL;
+
+ mutex_init(&dquot->dq_lock);
+ INIT_LIST_HEAD(&dquot->dq_free);
+ INIT_LIST_HEAD(&dquot->dq_inuse);
+ INIT_HLIST_NODE(&dquot->dq_hash);
+ INIT_LIST_HEAD(&dquot->dq_dirty);
+ init_waitqueue_head(&dquot->dq_wait_unused);
+ dquot->dq_sb = sb;
+ dquot->dq_type = type;
+ atomic_set(&dquot->dq_count, 1);
+
+ return dquot;
+}
+
+/*
+ * Get reference to dquot
+ *
+ * Locking is slightly tricky here. We are guarded from parallel quotaoff()
+ * destroying our dquot by:
+ * a) checking for quota flags under dq_list_lock and
+ * b) getting a reference to dquot before we release dq_list_lock
+ */
+struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
+{
+ unsigned int hashent = hashfn(sb, id, type);
+ struct dquot *dquot = NULL, *empty = NULL;
+
+ if (!sb_has_quota_active(sb, type))
+ return NULL;
+we_slept:
+ spin_lock(&dq_list_lock);
+ spin_lock(&dq_state_lock);
+ if (!sb_has_quota_active(sb, type)) {
+ spin_unlock(&dq_state_lock);
+ spin_unlock(&dq_list_lock);
+ goto out;
+ }
+ spin_unlock(&dq_state_lock);
+
+ dquot = find_dquot(hashent, sb, id, type);
+ if (!dquot) {
+ if (!empty) {
+ spin_unlock(&dq_list_lock);
+ empty = get_empty_dquot(sb, type);
+ if (!empty)
+ schedule(); /* Try to wait for a moment... */
+ goto we_slept;
+ }
+ dquot = empty;
+ empty = NULL;
+ dquot->dq_id = id;
+ /* all dquots go on the inuse_list */
+ put_inuse(dquot);
+ /* hash it first so it can be found */
+ insert_dquot_hash(dquot);
+ dqstats.lookups++;
+ spin_unlock(&dq_list_lock);
+ } else {
+ if (!atomic_read(&dquot->dq_count))
+ remove_free_dquot(dquot);
+ atomic_inc(&dquot->dq_count);
+ dqstats.cache_hits++;
+ dqstats.lookups++;
+ spin_unlock(&dq_list_lock);
+ }
+ /* Wait for dq_lock - after this we know that either dquot_release() is
+ * already finished or it will be canceled due to dq_count > 1 test */
+ wait_on_dquot(dquot);
+ /* Read the dquot / allocate space in quota file */
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
+ sb->dq_op->acquire_dquot(dquot) < 0) {
+ dqput(dquot);
+ dquot = NULL;
+ goto out;
+ }
+#ifdef __DQUOT_PARANOIA
+ BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
+#endif
+out:
+ if (empty)
+ do_destroy_dquot(empty);
+
+ return dquot;
+}
+EXPORT_SYMBOL(dqget);
+
+static int dqinit_needed(struct inode *inode, int type)
+{
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ return 0;
+ if (type != -1)
+ return !inode->i_dquot[type];
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (!inode->i_dquot[cnt])
+ return 1;
+ return 0;
+}
+
+/* This routine is guarded by dqonoff_mutex mutex */
+static void add_dquot_ref(struct super_block *sb, int type)
+{
+ struct inode *inode, *old_inode = NULL;
+
+ spin_lock(&inode_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ continue;
+ if (!atomic_read(&inode->i_writecount))
+ continue;
+ if (!dqinit_needed(inode, type))
+ continue;
+
+ __iget(inode);
+ spin_unlock(&inode_lock);
+
+ iput(old_inode);
+ sb->dq_op->initialize(inode, type);
+ /* We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the inode_lock.
+ * We cannot iput the inode now as we can be holding the last
+ * reference and we cannot iput it under inode_lock. So we
+ * keep the reference and iput it later. */
+ old_inode = inode;
+ spin_lock(&inode_lock);
+ }
+ spin_unlock(&inode_lock);
+ iput(old_inode);
+}
+
+/*
+ * Return 0 if dqput() won't block.
+ * (note that 1 doesn't necessarily mean blocking)
+ */
+static inline int dqput_blocks(struct dquot *dquot)
+{
+ if (atomic_read(&dquot->dq_count) <= 1)
+ return 1;
+ return 0;
+}
+
+/*
+ * Remove references to dquots from inode and add dquot to list for freeing
+ * if we have the last referece to dquot
+ * We can't race with anybody because we hold dqptr_sem for writing...
+ */
+static int remove_inode_dquot_ref(struct inode *inode, int type,
+ struct list_head *tofree_head)
+{
+ struct dquot *dquot = inode->i_dquot[type];
+
+ inode->i_dquot[type] = NULL;
+ if (dquot) {
+ if (dqput_blocks(dquot)) {
+#ifdef __DQUOT_PARANOIA
+ if (atomic_read(&dquot->dq_count) != 1)
+ printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
+#endif
+ spin_lock(&dq_list_lock);
+ /* As dquot must have currently users it can't be on
+ * the free list... */
+ list_add(&dquot->dq_free, tofree_head);
+ spin_unlock(&dq_list_lock);
+ return 1;
+ }
+ else
+ dqput(dquot); /* We have guaranteed we won't block */
+ }
+ return 0;
+}
+
+/*
+ * Free list of dquots
+ * Dquots are removed from inodes and no new references can be got so we are
+ * the only ones holding reference
+ */
+static void put_dquot_list(struct list_head *tofree_head)
+{
+ struct list_head *act_head;
+ struct dquot *dquot;
+
+ act_head = tofree_head->next;
+ while (act_head != tofree_head) {
+ dquot = list_entry(act_head, struct dquot, dq_free);
+ act_head = act_head->next;
+ /* Remove dquot from the list so we won't have problems... */
+ list_del_init(&dquot->dq_free);
+ dqput(dquot);
+ }
+}
+
+static void remove_dquot_ref(struct super_block *sb, int type,
+ struct list_head *tofree_head)
+{
+ struct inode *inode;
+
+ spin_lock(&inode_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ /*
+ * We have to scan also I_NEW inodes because they can already
+ * have quota pointer initialized. Luckily, we need to touch
+ * only quota pointers and these have separate locking
+ * (dqptr_sem).
+ */
+ if (!IS_NOQUOTA(inode))
+ remove_inode_dquot_ref(inode, type, tofree_head);
+ }
+ spin_unlock(&inode_lock);
+}
+
+/* Gather all references from inodes and drop them */
+static void drop_dquot_ref(struct super_block *sb, int type)
+{
+ LIST_HEAD(tofree_head);
+
+ if (sb->dq_op) {
+ down_write(&sb_dqopt(sb)->dqptr_sem);
+ remove_dquot_ref(sb, type, &tofree_head);
+ up_write(&sb_dqopt(sb)->dqptr_sem);
+ put_dquot_list(&tofree_head);
+ }
+}
+
+static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_curinodes += number;
+}
+
+static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_curspace += number;
+}
+
+static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace += number;
+}
+
+/*
+ * Claim reserved quota space
+ */