[PATCH] sem2mutex: quota

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kara <jack@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Ingo Molnar 2006-03-23 03:00:29 -08:00 committed by Linus Torvalds
parent c039e3134a
commit d3be915fc5
6 changed files with 71 additions and 69 deletions

View file

@ -103,12 +103,12 @@
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
* for altering the flag i_mutex is also needed). If operation is holding
* reference to dquot in other way (e.g. quotactl ops) it must be guarded by
* dqonoff_sem.
* dqonoff_mutex.
* This locking assures that:
* a) update/access to dquot pointers in inode is serialized
* b) everyone is guarded against invalidate_dquots()
*
* Each dquot has its dq_lock semaphore. Locked dquots might not be referenced
* Each dquot has its dq_lock mutex. Locked dquots might not be referenced
* from inodes (dquot_alloc_space() and such don't check the dq_lock).
* Currently dquot is locked only when it is being read to memory (or space for
* it is being allocated) on the first dqget() and when it is being released on
@ -118,8 +118,9 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
* i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > dqio_sem
* i_mutex on quota files is special (it's below dqio_sem)
* i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
* dqio_mutex
* i_mutex on quota files is special (it's below dqio_mutex)
*/
static DEFINE_SPINLOCK(dq_list_lock);
@ -280,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot)
static void wait_on_dquot(struct dquot *dquot)
{
down(&dquot->dq_lock);
up(&dquot->dq_lock);
mutex_lock(&dquot->dq_lock);
mutex_unlock(&dquot->dq_lock);
}
#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
@ -320,8 +321,8 @@ int dquot_acquire(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
down(&dquot->dq_lock);
down(&dqopt->dqio_sem);
mutex_lock(&dquot->dq_lock);
mutex_lock(&dqopt->dqio_mutex);
if (!test_bit(DQ_READ_B, &dquot->dq_flags))
ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
if (ret < 0)
@ -342,8 +343,8 @@ int dquot_acquire(struct dquot *dquot)
}
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_iolock:
up(&dqopt->dqio_sem);
up(&dquot->dq_lock);
mutex_unlock(&dqopt->dqio_mutex);
mutex_unlock(&dquot->dq_lock);
return ret;
}
@ -355,7 +356,7 @@ int dquot_commit(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
down(&dqopt->dqio_sem);
mutex_lock(&dqopt->dqio_mutex);
spin_lock(&dq_list_lock);
if (!clear_dquot_dirty(dquot)) {
spin_unlock(&dq_list_lock);
@ -372,7 +373,7 @@ int dquot_commit(struct dquot *dquot)
ret = ret2;
}
out_sem:
up(&dqopt->dqio_sem);
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
@ -384,11 +385,11 @@ int dquot_release(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
down(&dquot->dq_lock);
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
if (atomic_read(&dquot->dq_count) > 1)
goto out_dqlock;
down(&dqopt->dqio_sem);
mutex_lock(&dqopt->dqio_mutex);
if (dqopt->ops[dquot->dq_type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
/* Write the info */
@ -398,9 +399,9 @@ int dquot_release(struct dquot *dquot)
ret = ret2;
}
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
up(&dqopt->dqio_sem);
mutex_unlock(&dqopt->dqio_mutex);
out_dqlock:
up(&dquot->dq_lock);
mutex_unlock(&dquot->dq_lock);
return ret;
}
@ -464,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
down(&dqopt->dqonoff_sem);
mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
@ -499,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
spin_lock(&dq_list_lock);
dqstats.syncs++;
spin_unlock(&dq_list_lock);
up(&dqopt->dqonoff_sem);
mutex_unlock(&dqopt->dqonoff_mutex);
return 0;
}
@ -540,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
/*
* Put reference to dquot
* NOTE: If you change this function please check whether dqput_blocks() works right...
* MUST be called with either dqptr_sem or dqonoff_sem held
* MUST be called with either dqptr_sem or dqonoff_mutex held
*/
static void dqput(struct dquot *dquot)
{
@ -605,7 +606,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
return NODQUOT;
memset((caddr_t)dquot, 0, sizeof(struct dquot));
sema_init(&dquot->dq_lock, 1);
mutex_init(&dquot->dq_lock);
INIT_LIST_HEAD(&dquot->dq_free);
INIT_LIST_HEAD(&dquot->dq_inuse);
INIT_HLIST_NODE(&dquot->dq_hash);
@ -620,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
/*
* Get reference to dquot
* MUST be called with either dqptr_sem or dqonoff_sem held
* MUST be called with either dqptr_sem or dqonoff_mutex held
*/
static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
@ -686,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type)
return 0;
}
/* This routine is guarded by dqonoff_sem semaphore */
/* This routine is guarded by dqonoff_mutex mutex */
static void add_dquot_ref(struct super_block *sb, int type)
{
struct list_head *p;
@ -964,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type)
unsigned int id = 0;
int cnt, ret = 0;
/* First test before acquiring semaphore - solves deadlocks when we
* re-enter the quota code and are already holding the semaphore */
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return 0;
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
@ -1028,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
int cnt, ret = NO_QUOTA;
char warntype[MAXQUOTAS];
/* First test before acquiring semaphore - solves deadlocks when we
* re-enter the quota code and are already holding the semaphore */
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_add:
inode_add_bytes(inode, number);
@ -1077,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number)
int cnt, ret = NO_QUOTA;
char warntype[MAXQUOTAS];
/* First test before acquiring semaphore - solves deadlocks when we
* re-enter the quota code and are already holding the semaphore */
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
@ -1121,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number)
{
unsigned int cnt;
/* First test before acquiring semaphore - solves deadlocks when we
* re-enter the quota code and are already holding the semaphore */
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_sub:
inode_sub_bytes(inode, number);
@ -1157,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number)
{
unsigned int cnt;
/* First test before acquiring semaphore - solves deadlocks when we
* re-enter the quota code and are already holding the semaphore */
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@ -1197,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
char warntype[MAXQUOTAS];
/* First test before acquiring semaphore - solves deadlocks when we
* re-enter the quota code and are already holding the semaphore */
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
/* Clear the arrays */
@ -1292,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type)
int ret;
struct quota_info *dqopt = sb_dqopt(sb);
down(&dqopt->dqio_sem);
mutex_lock(&dqopt->dqio_mutex);
ret = dqopt->ops[type]->write_file_info(sb, type);
up(&dqopt->dqio_sem);
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
@ -1350,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type)
struct inode *toputinode[MAXQUOTAS];
/* We need to serialize quota_off() for device */
down(&dqopt->dqonoff_sem);
mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
toputinode[cnt] = NULL;
if (type != -1 && cnt != type)
@ -1379,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type)
dqopt->info[cnt].dqi_bgrace = 0;
dqopt->ops[cnt] = NULL;
}
up(&dqopt->dqonoff_sem);
mutex_unlock(&dqopt->dqonoff_mutex);
/* Sync the superblock so that buffers with quota data are written to
* disk (and so userspace sees correct data afterwards). */
if (sb->s_op->sync_fs)
@ -1392,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type)
* changes done by userspace on the next quotaon() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (toputinode[cnt]) {
down(&dqopt->dqonoff_sem);
mutex_lock(&dqopt->dqonoff_mutex);
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_enabled(sb, cnt)) {
@ -1404,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type)
mark_inode_dirty(toputinode[cnt]);
iput(toputinode[cnt]);
}
up(&dqopt->dqonoff_sem);
mutex_unlock(&dqopt->dqonoff_mutex);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev, 0);
@ -1445,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
/* And now flush the block cache so that kernel sees the changes */
invalidate_bdev(sb->s_bdev, 0);
mutex_lock(&inode->i_mutex);
down(&dqopt->dqonoff_sem);
mutex_lock(&dqopt->dqonoff_mutex);
if (sb_has_quota_enabled(sb, type)) {
error = -EBUSY;
goto out_lock;
@ -1470,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
down(&dqopt->dqio_sem);
mutex_lock(&dqopt->dqio_mutex);
if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
up(&dqopt->dqio_sem);
mutex_unlock(&dqopt->dqio_mutex);
goto out_file_init;
}
up(&dqopt->dqio_sem);
mutex_unlock(&dqopt->dqio_mutex);
mutex_unlock(&inode->i_mutex);
set_enable_flags(dqopt, type);
add_dquot_ref(sb, type);
up(&dqopt->dqonoff_sem);
mutex_unlock(&dqopt->dqonoff_mutex);
return 0;
@ -1488,7 +1489,7 @@ out_file_init:
dqopt->files[type] = NULL;
iput(inode);
out_lock:
up(&dqopt->dqonoff_sem);
mutex_unlock(&dqopt->dqonoff_mutex);
if (oldflags != -1) {
down_write(&dqopt->dqptr_sem);
/* Set the flags back (in the case of accidental quotaon()
@ -1576,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
{
struct dquot *dquot;
down(&sb_dqopt(sb)->dqonoff_sem);
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!(dquot = dqget(sb, id, type))) {
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
do_get_dqblk(dquot, di);
dqput(dquot);
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
@ -1645,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
{
struct dquot *dquot;
down(&sb_dqopt(sb)->dqonoff_sem);
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!(dquot = dqget(sb, id, type))) {
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
do_set_dqblk(dquot, di);
dqput(dquot);
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
@ -1661,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
{
struct mem_dqinfo *mi;
down(&sb_dqopt(sb)->dqonoff_sem);
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_enabled(sb, type)) {
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
mi = sb_dqopt(sb)->info + type;
@ -1673,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
ii->dqi_flags = mi->dqi_flags & DQF_MASK;
ii->dqi_valid = IIF_ALL;
spin_unlock(&dq_data_lock);
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
@ -1682,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
{
struct mem_dqinfo *mi;
down(&sb_dqopt(sb)->dqonoff_sem);
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_enabled(sb, type)) {
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
mi = sb_dqopt(sb)->info + type;
@ -1699,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
mark_info_dirty(sb, type);
/* Force write to disk */
sb->dq_op->write_info(sb, type);
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}

View file

@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
* Process 1 Process 2
* ext3_create() quota_sync()
* journal_start() write_dquot()
* DQUOT_INIT() down(dqio_sem)
* down(dqio_sem) journal_start()
* DQUOT_INIT() down(dqio_mutex)
* down(dqio_mutex) journal_start()
*
*/

View file

@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type)
/* Now when everything is written we can discard the pagecache so
* that userspace sees the changes. We need i_mutex and so we could
* not do it inside dqonoff_sem. Moreover we need to be carefull
* not do it inside dqonoff_mutex. Moreover we need to be carefull
* about races with quotaoff() (that is the reason why we have own
* reference to inode). */
down(&sb_dqopt(sb)->dqonoff_sem);
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
discard[cnt] = NULL;
if (type != -1 && cnt != type)
@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
continue;
discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
}
up(&sb_dqopt(sb)->dqonoff_sem);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (discard[cnt]) {
mutex_lock(&discard[cnt]->i_mutex);

View file

@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot)
ssize_t ret;
struct v2_disk_dqblk ddquot, empty;
/* dq_off is guarded by dqio_sem */
/* dq_off is guarded by dqio_mutex */
if (!dquot->dq_off)
if ((ret = dq_insert_tree(dquot)) < 0) {
printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret);

View file

@ -77,8 +77,8 @@ static struct super_block *alloc_super(void)
s->s_count = S_BIAS;
atomic_set(&s->s_active, 1);
sema_init(&s->s_vfs_rename_sem,1);
sema_init(&s->s_dquot.dqio_sem, 1);
sema_init(&s->s_dquot.dqonoff_sem, 1);
mutex_init(&s->s_dquot.dqio_mutex);
mutex_init(&s->s_dquot.dqonoff_mutex);
init_rwsem(&s->s_dquot.dqptr_sem);
init_waitqueue_head(&s->s_wait_unfrozen);
s->s_maxbytes = MAX_NON_LFS;

View file

@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#define __DQUOT_VERSION__ "dquot_6.5.1"
#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
@ -215,7 +216,7 @@ struct dquot {
struct list_head dq_inuse; /* List of all quotas */
struct list_head dq_free; /* Free list element */
struct list_head dq_dirty; /* List of dirty dquots */
struct semaphore dq_lock; /* dquot IO lock */
struct mutex dq_lock; /* dquot IO lock */
atomic_t dq_count; /* Use count */
wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
@ -285,8 +286,8 @@ struct quota_format_type {
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
struct semaphore dqio_sem; /* lock device while I/O in progress */
struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */
struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */