mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
vfs: fix possible deadlock in ext2, ext3, ext4 when using xattrs
mb_cache_entry_alloc() was allocating cache entries with GFP_KERNEL. But filesystems are calling this function while holding xattr_sem so possible recursion into the fs violates locking ordering of xattr_sem and transaction start / i_mutex for ext2-4. Change mb_cache_entry_alloc() so that filesystems can specify desired gfp mask and use GFP_NOFS from all of them. Signed-off-by: Jan Kara <jack@suse.cz> Reported-by: Dave Jones <davej@redhat.com> Cc: <linux-ext4@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
423bec4307
commit
335e92e8a5
5 changed files with 6 additions and 6 deletions
|
@ -835,7 +835,7 @@ ext2_xattr_cache_insert(struct buffer_head *bh)
|
||||||
struct mb_cache_entry *ce;
|
struct mb_cache_entry *ce;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
ce = mb_cache_entry_alloc(ext2_xattr_cache);
|
ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
|
||||||
if (!ce)
|
if (!ce)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
|
error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
|
||||||
|
|
|
@ -1126,7 +1126,7 @@ ext3_xattr_cache_insert(struct buffer_head *bh)
|
||||||
struct mb_cache_entry *ce;
|
struct mb_cache_entry *ce;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
ce = mb_cache_entry_alloc(ext3_xattr_cache);
|
ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS);
|
||||||
if (!ce) {
|
if (!ce) {
|
||||||
ea_bdebug(bh, "out of memory");
|
ea_bdebug(bh, "out of memory");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1386,7 +1386,7 @@ ext4_xattr_cache_insert(struct buffer_head *bh)
|
||||||
struct mb_cache_entry *ce;
|
struct mb_cache_entry *ce;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
ce = mb_cache_entry_alloc(ext4_xattr_cache);
|
ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
|
||||||
if (!ce) {
|
if (!ce) {
|
||||||
ea_bdebug(bh, "out of memory");
|
ea_bdebug(bh, "out of memory");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -399,11 +399,11 @@ mb_cache_destroy(struct mb_cache *cache)
|
||||||
* if no more memory was available.
|
* if no more memory was available.
|
||||||
*/
|
*/
|
||||||
struct mb_cache_entry *
|
struct mb_cache_entry *
|
||||||
mb_cache_entry_alloc(struct mb_cache *cache)
|
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
|
||||||
{
|
{
|
||||||
struct mb_cache_entry *ce;
|
struct mb_cache_entry *ce;
|
||||||
|
|
||||||
ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
|
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
|
||||||
if (ce) {
|
if (ce) {
|
||||||
atomic_inc(&cache->c_entry_count);
|
atomic_inc(&cache->c_entry_count);
|
||||||
INIT_LIST_HEAD(&ce->e_lru_list);
|
INIT_LIST_HEAD(&ce->e_lru_list);
|
||||||
|
|
|
@ -34,7 +34,7 @@ void mb_cache_destroy(struct mb_cache *);
|
||||||
|
|
||||||
/* Functions on cache entries */
|
/* Functions on cache entries */
|
||||||
|
|
||||||
struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *);
|
struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
|
||||||
int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
|
int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
|
||||||
sector_t, unsigned int[]);
|
sector_t, unsigned int[]);
|
||||||
void mb_cache_entry_release(struct mb_cache_entry *);
|
void mb_cache_entry_release(struct mb_cache_entry *);
|
||||||
|
|
Loading…
Reference in a new issue