mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
ext4: Remove old legacy block allocator
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
parent
240799cdf2
commit
c2ea3fde61
12 changed files with 40 additions and 1528 deletions
1355
fs/ext4/balloc.c
1355
fs/ext4/balloc.c
File diff suppressed because it is too large
Load diff
|
@ -539,7 +539,6 @@ do { \
|
|||
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
|
||||
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
|
||||
#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
|
||||
#define EXT4_MOUNT_MBALLOC 0x4000000 /* Buddy allocation support */
|
||||
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
|
||||
/* Compatibility, for having both ext2_fs.h and ext4_fs.h included at once */
|
||||
#ifndef _LINUX_EXT2_FS_H
|
||||
|
@ -1002,8 +1001,6 @@ extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
|
|||
extern ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_lblk_t iblock, ext4_fsblk_t goal,
|
||||
unsigned long *count, int *errp);
|
||||
extern ext4_fsblk_t ext4_old_new_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_fsblk_t goal, unsigned long *count, int *errp);
|
||||
extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
|
||||
extern ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
|
||||
s64 nblocks);
|
||||
|
@ -1018,8 +1015,6 @@ extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
|
|||
ext4_group_t block_group,
|
||||
struct buffer_head ** bh);
|
||||
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
|
||||
extern void ext4_init_block_alloc_info(struct inode *);
|
||||
extern void ext4_rsv_window_add(struct super_block *sb, struct ext4_reserve_window_node *rsv);
|
||||
|
||||
/* dir.c */
|
||||
extern int ext4_check_dir_entry(const char *, struct inode *,
|
||||
|
@ -1054,7 +1049,7 @@ extern int ext4_mb_release(struct super_block *);
|
|||
extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
|
||||
struct ext4_allocation_request *, int *);
|
||||
extern int ext4_mb_reserve_blocks(struct super_block *, int);
|
||||
extern void ext4_mb_discard_inode_preallocations(struct inode *);
|
||||
extern void ext4_discard_preallocations(struct inode *);
|
||||
extern int __init init_ext4_mballoc(void);
|
||||
extern void exit_ext4_mballoc(void);
|
||||
extern void ext4_mb_free_blocks(handle_t *, struct inode *,
|
||||
|
@ -1084,7 +1079,6 @@ extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
|||
struct kstat *stat);
|
||||
extern void ext4_delete_inode(struct inode *);
|
||||
extern int ext4_sync_inode(handle_t *, struct inode *);
|
||||
extern void ext4_discard_reservation(struct inode *);
|
||||
extern void ext4_dirty_inode(struct inode *);
|
||||
extern int ext4_change_inode_journal_flag(struct inode *, int);
|
||||
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
|
||||
|
|
|
@ -33,38 +33,6 @@ typedef __u32 ext4_lblk_t;
|
|||
/* data type for block group number */
|
||||
typedef unsigned long ext4_group_t;
|
||||
|
||||
struct ext4_reserve_window {
|
||||
ext4_fsblk_t _rsv_start; /* First byte reserved */
|
||||
ext4_fsblk_t _rsv_end; /* Last byte reserved or 0 */
|
||||
};
|
||||
|
||||
struct ext4_reserve_window_node {
|
||||
struct rb_node rsv_node;
|
||||
__u32 rsv_goal_size;
|
||||
__u32 rsv_alloc_hit;
|
||||
struct ext4_reserve_window rsv_window;
|
||||
};
|
||||
|
||||
struct ext4_block_alloc_info {
|
||||
/* information about reservation window */
|
||||
struct ext4_reserve_window_node rsv_window_node;
|
||||
/*
|
||||
* was i_next_alloc_block in ext4_inode_info
|
||||
* is the logical (file-relative) number of the
|
||||
* most-recently-allocated block in this file.
|
||||
* We use this for detecting linearly ascending allocation requests.
|
||||
*/
|
||||
ext4_lblk_t last_alloc_logical_block;
|
||||
/*
|
||||
* Was i_next_alloc_goal in ext4_inode_info
|
||||
* is the *physical* companion to i_next_alloc_block.
|
||||
* it the physical block number of the block which was most-recentl
|
||||
* allocated to this file. This give us the goal (target) for the next
|
||||
* allocation when we detect linearly ascending requests.
|
||||
*/
|
||||
ext4_fsblk_t last_alloc_physical_block;
|
||||
};
|
||||
|
||||
#define rsv_start rsv_window._rsv_start
|
||||
#define rsv_end rsv_window._rsv_end
|
||||
|
||||
|
@ -97,9 +65,6 @@ struct ext4_inode_info {
|
|||
ext4_group_t i_block_group;
|
||||
__u32 i_state; /* Dynamic state flags for ext4 */
|
||||
|
||||
/* block reservation info */
|
||||
struct ext4_block_alloc_info *i_block_alloc_info;
|
||||
|
||||
ext4_lblk_t i_dir_start_lookup;
|
||||
#ifdef CONFIG_EXT4DEV_FS_XATTR
|
||||
/*
|
||||
|
|
|
@ -67,7 +67,6 @@ struct ext4_sb_info {
|
|||
/* root of the per fs reservation window tree */
|
||||
spinlock_t s_rsv_window_lock;
|
||||
struct rb_root s_rsv_window_root;
|
||||
struct ext4_reserve_window_node s_rsv_window_head;
|
||||
|
||||
/* Journaling */
|
||||
struct inode *s_journal_inode;
|
||||
|
|
|
@ -2697,11 +2697,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||
goto out2;
|
||||
}
|
||||
/*
|
||||
* Okay, we need to do block allocation. Lazily initialize the block
|
||||
* allocation info here if necessary.
|
||||
* Okay, we need to do block allocation.
|
||||
*/
|
||||
if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
|
||||
ext4_init_block_alloc_info(inode);
|
||||
|
||||
/* find neighbour allocated blocks */
|
||||
ar.lleft = iblock;
|
||||
|
@ -2761,7 +2758,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||
/* free data blocks we just allocated */
|
||||
/* not a good idea to call discard here directly,
|
||||
* but otherwise we'd need to call it every free() */
|
||||
ext4_mb_discard_inode_preallocations(inode);
|
||||
ext4_discard_preallocations(inode);
|
||||
ext4_free_blocks(handle, inode, ext_pblock(&newex),
|
||||
ext4_ext_get_actual_len(&newex), 0);
|
||||
goto out2;
|
||||
|
@ -2825,7 +2822,7 @@ void ext4_ext_truncate(struct inode *inode)
|
|||
down_write(&EXT4_I(inode)->i_data_sem);
|
||||
ext4_ext_invalidate_cache(inode);
|
||||
|
||||
ext4_discard_reservation(inode);
|
||||
ext4_discard_preallocations(inode);
|
||||
|
||||
/*
|
||||
* TODO: optimization is possible here.
|
||||
|
|
|
@ -38,7 +38,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
|
|||
(atomic_read(&inode->i_writecount) == 1))
|
||||
{
|
||||
down_write(&EXT4_I(inode)->i_data_sem);
|
||||
ext4_discard_reservation(inode);
|
||||
ext4_discard_preallocations(inode);
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
}
|
||||
if (is_dx(inode) && filp->private_data)
|
||||
|
|
|
@ -817,7 +817,6 @@ got:
|
|||
ei->i_flags &= ~EXT4_DIRSYNC_FL;
|
||||
ei->i_file_acl = 0;
|
||||
ei->i_dtime = 0;
|
||||
ei->i_block_alloc_info = NULL;
|
||||
ei->i_block_group = group;
|
||||
|
||||
ext4_set_inode_flags(inode);
|
||||
|
|
|
@ -486,18 +486,9 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
|
|||
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
|
||||
Indirect *partial)
|
||||
{
|
||||
struct ext4_block_alloc_info *block_i;
|
||||
|
||||
block_i = EXT4_I(inode)->i_block_alloc_info;
|
||||
|
||||
/*
|
||||
* try the heuristic for sequential allocation,
|
||||
* failing that at least try to get decent locality.
|
||||
* XXX need to get goal block from mballoc's data structures
|
||||
*/
|
||||
if (block_i && (block == block_i->last_alloc_logical_block + 1)
|
||||
&& (block_i->last_alloc_physical_block != 0)) {
|
||||
return block_i->last_alloc_physical_block + 1;
|
||||
}
|
||||
|
||||
return ext4_find_near(inode, partial);
|
||||
}
|
||||
|
@ -757,10 +748,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
|
|||
{
|
||||
int i;
|
||||
int err = 0;
|
||||
struct ext4_block_alloc_info *block_i;
|
||||
ext4_fsblk_t current_block;
|
||||
|
||||
block_i = EXT4_I(inode)->i_block_alloc_info;
|
||||
/*
|
||||
* If we're splicing into a [td]indirect block (as opposed to the
|
||||
* inode) then we need to get write access to the [td]indirect block
|
||||
|
@ -786,17 +775,6 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
|
|||
*(where->p + i) = cpu_to_le32(current_block++);
|
||||
}
|
||||
|
||||
/*
|
||||
* update the most recently allocated logical & physical block
|
||||
* in i_block_alloc_info, to assist find the proper goal block for next
|
||||
* allocation
|
||||
*/
|
||||
if (block_i) {
|
||||
block_i->last_alloc_logical_block = block + blks - 1;
|
||||
block_i->last_alloc_physical_block =
|
||||
le32_to_cpu(where[num].key) + blks - 1;
|
||||
}
|
||||
|
||||
/* We are done with atomic stuff, now do the rest of housekeeping */
|
||||
|
||||
inode->i_ctime = ext4_current_time(inode);
|
||||
|
@ -914,12 +892,8 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
|
|||
goto cleanup;
|
||||
|
||||
/*
|
||||
* Okay, we need to do block allocation. Lazily initialize the block
|
||||
* allocation info here if necessary
|
||||
* Okay, we need to do block allocation.
|
||||
*/
|
||||
if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
|
||||
ext4_init_block_alloc_info(inode);
|
||||
|
||||
goal = ext4_find_goal(inode, iblock, partial);
|
||||
|
||||
/* the number of blocks need to allocate for [d,t]indirect blocks */
|
||||
|
@ -3738,7 +3712,7 @@ void ext4_truncate(struct inode *inode)
|
|||
*/
|
||||
down_write(&ei->i_data_sem);
|
||||
|
||||
ext4_discard_reservation(inode);
|
||||
ext4_discard_preallocations(inode);
|
||||
|
||||
/*
|
||||
* The orphan list entry will now protect us from any crash which
|
||||
|
@ -4071,7 +4045,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|||
ei->i_acl = EXT4_ACL_NOT_CACHED;
|
||||
ei->i_default_acl = EXT4_ACL_NOT_CACHED;
|
||||
#endif
|
||||
ei->i_block_alloc_info = NULL;
|
||||
|
||||
ret = __ext4_get_inode_loc(inode, &iloc, 0);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -23,7 +23,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
unsigned int flags;
|
||||
unsigned short rsv_window_size;
|
||||
|
||||
ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
|
||||
|
||||
|
@ -190,49 +189,6 @@ setversion_out:
|
|||
return ret;
|
||||
}
|
||||
#endif
|
||||
case EXT4_IOC_GETRSVSZ:
|
||||
if (test_opt(inode->i_sb, RESERVATION)
|
||||
&& S_ISREG(inode->i_mode)
|
||||
&& ei->i_block_alloc_info) {
|
||||
rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
|
||||
return put_user(rsv_window_size, (int __user *)arg);
|
||||
}
|
||||
return -ENOTTY;
|
||||
case EXT4_IOC_SETRSVSZ: {
|
||||
int err;
|
||||
|
||||
if (!test_opt(inode->i_sb, RESERVATION) || !S_ISREG(inode->i_mode))
|
||||
return -ENOTTY;
|
||||
|
||||
if (!is_owner_or_cap(inode))
|
||||
return -EACCES;
|
||||
|
||||
if (get_user(rsv_window_size, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
err = mnt_want_write(filp->f_path.mnt);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (rsv_window_size > EXT4_MAX_RESERVE_BLOCKS)
|
||||
rsv_window_size = EXT4_MAX_RESERVE_BLOCKS;
|
||||
|
||||
/*
|
||||
* need to allocate reservation structure for this inode
|
||||
* before set the window size
|
||||
*/
|
||||
down_write(&ei->i_data_sem);
|
||||
if (!ei->i_block_alloc_info)
|
||||
ext4_init_block_alloc_info(inode);
|
||||
|
||||
if (ei->i_block_alloc_info){
|
||||
struct ext4_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
|
||||
rsv->rsv_goal_size = rsv_window_size;
|
||||
}
|
||||
up_write(&ei->i_data_sem);
|
||||
mnt_drop_write(filp->f_path.mnt);
|
||||
return 0;
|
||||
}
|
||||
case EXT4_IOC_GROUP_EXTEND: {
|
||||
ext4_fsblk_t n_blocks_count;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
|
|
|
@ -534,9 +534,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
|
|||
void *buddy;
|
||||
void *buddy2;
|
||||
|
||||
if (!test_opt(sb, MBALLOC))
|
||||
return 0;
|
||||
|
||||
{
|
||||
static int mb_check_counter;
|
||||
if (mb_check_counter++ % 100 != 0)
|
||||
|
@ -2487,19 +2484,14 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
unsigned max;
|
||||
int ret;
|
||||
|
||||
if (!test_opt(sb, MBALLOC))
|
||||
return 0;
|
||||
|
||||
i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
|
||||
|
||||
sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
|
||||
if (sbi->s_mb_offsets == NULL) {
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
|
||||
if (sbi->s_mb_maxs == NULL) {
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
kfree(sbi->s_mb_maxs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -2522,7 +2514,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
/* init file for buddy data */
|
||||
ret = ext4_mb_init_backend(sb);
|
||||
if (ret != 0) {
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
kfree(sbi->s_mb_offsets);
|
||||
kfree(sbi->s_mb_maxs);
|
||||
return ret;
|
||||
|
@ -2544,7 +2535,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
|
||||
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
|
||||
if (sbi->s_locality_groups == NULL) {
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
kfree(sbi->s_mb_offsets);
|
||||
kfree(sbi->s_mb_maxs);
|
||||
return -ENOMEM;
|
||||
|
@ -2590,9 +2580,6 @@ int ext4_mb_release(struct super_block *sb)
|
|||
struct ext4_group_info *grinfo;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
||||
if (!test_opt(sb, MBALLOC))
|
||||
return 0;
|
||||
|
||||
/* release freed, non-committed blocks */
|
||||
spin_lock(&sbi->s_md_lock);
|
||||
list_splice_init(&sbi->s_closed_transaction,
|
||||
|
@ -3805,7 +3792,7 @@ out:
|
|||
*
|
||||
* FIXME!! Make sure it is valid at all the call sites
|
||||
*/
|
||||
void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
||||
void ext4_discard_preallocations(struct inode *inode)
|
||||
{
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
|
@ -3817,7 +3804,7 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
|||
struct ext4_buddy e4b;
|
||||
int err;
|
||||
|
||||
if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) {
|
||||
if (!S_ISREG(inode->i_mode)) {
|
||||
/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
|
||||
return;
|
||||
}
|
||||
|
@ -4300,11 +4287,6 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
|
|||
sb = ar->inode->i_sb;
|
||||
sbi = EXT4_SB(sb);
|
||||
|
||||
if (!test_opt(sb, MBALLOC)) {
|
||||
block = ext4_old_new_blocks(handle, ar->inode, ar->goal,
|
||||
&(ar->len), errp);
|
||||
return block;
|
||||
}
|
||||
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
|
||||
/*
|
||||
* With delalloc we already reserved the blocks
|
||||
|
|
|
@ -870,11 +870,10 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
* We can allocate memory for mb_alloc based on the new group
|
||||
* descriptor
|
||||
*/
|
||||
if (test_opt(sb, MBALLOC)) {
|
||||
err = ext4_mb_add_more_groupinfo(sb, input->group, gdp);
|
||||
if (err)
|
||||
goto exit_journal;
|
||||
}
|
||||
err = ext4_mb_add_more_groupinfo(sb, input->group, gdp);
|
||||
if (err)
|
||||
goto exit_journal;
|
||||
|
||||
/*
|
||||
* Make the new blocks and inodes valid next. We do this before
|
||||
* increasing the group count so that once the group is enabled,
|
||||
|
@ -1086,8 +1085,15 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||
/*
|
||||
* Mark mballoc pages as not up to date so that they will be updated
|
||||
* next time they are loaded by ext4_mb_load_buddy.
|
||||
*
|
||||
* XXX Bad, Bad, BAD!!! We should not be overloading the
|
||||
* Uptodate flag, particularly on thte bitmap bh, as way of
|
||||
* hinting to ext4_mb_load_buddy() that it needs to be
|
||||
* overloaded. A user could take a LVM snapshot, then do an
|
||||
* on-line fsck, and clear the uptodate flag, and this would
|
||||
* not be a bug in userspace, but a bug in the kernel. FIXME!!!
|
||||
*/
|
||||
if (test_opt(sb, MBALLOC)) {
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct inode *inode = sbi->s_buddy_cache;
|
||||
int blocks_per_page;
|
||||
|
|
|
@ -574,7 +574,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
|
|||
ei->i_acl = EXT4_ACL_NOT_CACHED;
|
||||
ei->i_default_acl = EXT4_ACL_NOT_CACHED;
|
||||
#endif
|
||||
ei->i_block_alloc_info = NULL;
|
||||
ei->vfs_inode.i_version = 1;
|
||||
ei->vfs_inode.i_data.writeback_index = 0;
|
||||
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
|
||||
|
@ -633,7 +632,6 @@ static void destroy_inodecache(void)
|
|||
|
||||
static void ext4_clear_inode(struct inode *inode)
|
||||
{
|
||||
struct ext4_block_alloc_info *rsv = EXT4_I(inode)->i_block_alloc_info;
|
||||
#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
|
||||
if (EXT4_I(inode)->i_acl &&
|
||||
EXT4_I(inode)->i_acl != EXT4_ACL_NOT_CACHED) {
|
||||
|
@ -646,10 +644,7 @@ static void ext4_clear_inode(struct inode *inode)
|
|||
EXT4_I(inode)->i_default_acl = EXT4_ACL_NOT_CACHED;
|
||||
}
|
||||
#endif
|
||||
ext4_discard_reservation(inode);
|
||||
EXT4_I(inode)->i_block_alloc_info = NULL;
|
||||
if (unlikely(rsv))
|
||||
kfree(rsv);
|
||||
ext4_discard_preallocations(inode);
|
||||
jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
|
||||
&EXT4_I(inode)->jinode);
|
||||
}
|
||||
|
@ -760,8 +755,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
|||
seq_puts(seq, ",nobh");
|
||||
if (!test_opt(sb, EXTENTS))
|
||||
seq_puts(seq, ",noextents");
|
||||
if (!test_opt(sb, MBALLOC))
|
||||
seq_puts(seq, ",nomballoc");
|
||||
if (test_opt(sb, I_VERSION))
|
||||
seq_puts(seq, ",i_version");
|
||||
if (!test_opt(sb, DELALLOC))
|
||||
|
@ -1373,12 +1366,6 @@ set_qf_format:
|
|||
case Opt_nodelalloc:
|
||||
clear_opt(sbi->s_mount_opt, DELALLOC);
|
||||
break;
|
||||
case Opt_mballoc:
|
||||
set_opt(sbi->s_mount_opt, MBALLOC);
|
||||
break;
|
||||
case Opt_nomballoc:
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
break;
|
||||
case Opt_stripe:
|
||||
if (match_int(&args[0], &option))
|
||||
return 0;
|
||||
|
@ -2040,11 +2027,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
ext4_warning(sb, __func__,
|
||||
"extents feature not enabled on this filesystem, "
|
||||
"use tune2fs.\n");
|
||||
/*
|
||||
* turn on mballoc code by default in ext4 filesystem
|
||||
* Use -o nomballoc to turn it off
|
||||
*/
|
||||
set_opt(sbi->s_mount_opt, MBALLOC);
|
||||
|
||||
/*
|
||||
* enable delayed allocation by default
|
||||
|
@ -2301,19 +2283,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
goto failed_mount3;
|
||||
}
|
||||
|
||||
/* per fileystem reservation list head & lock */
|
||||
spin_lock_init(&sbi->s_rsv_window_lock);
|
||||
sbi->s_rsv_window_root = RB_ROOT;
|
||||
/* Add a single, static dummy reservation to the start of the
|
||||
* reservation window list --- it gives us a placeholder for
|
||||
* append-at-start-of-list which makes the allocation logic
|
||||
* _much_ simpler. */
|
||||
sbi->s_rsv_window_head.rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
|
||||
sbi->s_rsv_window_head.rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
|
||||
sbi->s_rsv_window_head.rsv_alloc_hit = 0;
|
||||
sbi->s_rsv_window_head.rsv_goal_size = 0;
|
||||
ext4_rsv_window_add(sb, &sbi->s_rsv_window_head);
|
||||
|
||||
sbi->s_stripe = ext4_get_stripe_size(sbi);
|
||||
|
||||
/*
|
||||
|
@ -2510,7 +2479,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
printk(KERN_INFO "EXT4-fs: delayed allocation enabled\n");
|
||||
|
||||
ext4_ext_init(sb);
|
||||
ext4_mb_init(sb, needs_recovery);
|
||||
err = ext4_mb_init(sb, needs_recovery);
|
||||
if (err) {
|
||||
printk(KERN_ERR "EXT4-fs: failed to initalize mballoc (%d)\n",
|
||||
err);
|
||||
goto failed_mount4;
|
||||
}
|
||||
|
||||
lock_kernel();
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue