mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
[GFS2] Fix a page lock / glock deadlock
We've previously been using a "try lock" in readpage on the basis that it would prevent deadlocks due to the inverted lock ordering (our normal lock ordering is glock first and then page lock). Unfortunately tests have shown that this isn't enough. If the glock has a demote request queued such that run_queue() in the glock code tries to do a demote when its called under readpage then it will try and write out all the dirty pages which requires locking them. This then deadlocks with the page locked by readpage. The solution is to always require two calls into readpage. The first unlocks the page, gets the glock and returns AOP_TRUNCATED_PAGE, the second does the actual readpage and unlocks the glock & page as required. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
60b779cfc1
commit
7afd88d916
5 changed files with 27 additions and 25 deletions
|
@ -32,24 +32,23 @@
|
|||
#define GLR_TRYFAILED 13
|
||||
#define GLR_CANCELED 14
|
||||
|
||||
static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
||||
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_holder *gh;
|
||||
int locked = 0;
|
||||
struct pid *pid;
|
||||
|
||||
/* Look in glock's list of holders for one with current task as owner */
|
||||
spin_lock(&gl->gl_spin);
|
||||
pid = task_pid(current);
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (gh->gh_owner_pid == pid) {
|
||||
locked = 1;
|
||||
break;
|
||||
}
|
||||
if (gh->gh_owner_pid == pid)
|
||||
goto out;
|
||||
}
|
||||
gh = NULL;
|
||||
out:
|
||||
spin_unlock(&gl->gl_spin);
|
||||
|
||||
return locked;
|
||||
return gh;
|
||||
}
|
||||
|
||||
static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
|
||||
|
|
|
@ -493,7 +493,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
|||
return dir;
|
||||
}
|
||||
|
||||
if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
|
||||
if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
|
|
@ -508,23 +508,26 @@ static int __gfs2_readpage(void *file, struct page *page)
|
|||
static int gfs2_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
||||
struct gfs2_holder gh;
|
||||
struct gfs2_holder *gh;
|
||||
int error;
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
|
||||
error = gfs2_glock_nq_atime(&gh);
|
||||
if (unlikely(error)) {
|
||||
gh = gfs2_glock_is_locked_by_me(ip->i_gl);
|
||||
if (!gh) {
|
||||
gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
|
||||
if (!gh)
|
||||
return -ENOBUFS;
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
|
||||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
error = __gfs2_readpage(file, page);
|
||||
gfs2_glock_dq(&gh);
|
||||
out:
|
||||
gfs2_holder_uninit(&gh);
|
||||
if (error == GLR_TRYFAILED) {
|
||||
yield();
|
||||
error = gfs2_glock_nq_atime(gh);
|
||||
if (likely(error != 0))
|
||||
goto out;
|
||||
return AOP_TRUNCATED_PAGE;
|
||||
}
|
||||
error = __gfs2_readpage(file, page);
|
||||
gfs2_glock_dq(gh);
|
||||
out:
|
||||
gfs2_holder_uninit(gh);
|
||||
kfree(gh);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -826,7 +829,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
|
|||
unsigned int to = from + len;
|
||||
int ret;
|
||||
|
||||
BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0);
|
||||
BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
|
||||
|
||||
ret = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (unlikely(ret)) {
|
||||
|
|
|
@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
struct gfs2_holder d_gh;
|
||||
struct gfs2_inode *ip = NULL;
|
||||
int error;
|
||||
int had_lock=0;
|
||||
int had_lock = 0;
|
||||
|
||||
if (inode) {
|
||||
if (is_bad_inode(inode))
|
||||
|
@ -54,7 +54,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
if (sdp->sd_args.ar_localcaching)
|
||||
goto valid;
|
||||
|
||||
had_lock = gfs2_glock_is_locked_by_me(dip->i_gl);
|
||||
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
|
||||
if (!had_lock) {
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
|
||||
if (error)
|
||||
|
|
|
@ -898,7 +898,7 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
|
|||
int error;
|
||||
int unlock = 0;
|
||||
|
||||
if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
|
||||
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -1065,7 +1065,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
|||
int error;
|
||||
int unlock = 0;
|
||||
|
||||
if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
|
||||
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
|
Loading…
Reference in a new issue