mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 04:06:22 +00:00
Btrfs: More throttle tuning
* Make walk_down_tree wake up throttled tasks more often * Make walk_down_tree call cond_resched during long loops * As the size of the ref cache grows, wait longer in throttle * Get rid of the reada code in walk_down_tree, the leaves don't get read anymore, thanks to the ref cache. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
65b51a009e
commit
2dd3e67b1e
3 changed files with 29 additions and 58 deletions
|
@ -188,13 +188,6 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
|
|||
btrfs_csum_final(crc, result);
|
||||
|
||||
if (verify) {
|
||||
int from_this_trans = 0;
|
||||
|
||||
if (root->fs_info->running_transaction &&
|
||||
btrfs_header_generation(buf) ==
|
||||
root->fs_info->running_transaction->transid)
|
||||
from_this_trans = 1;
|
||||
|
||||
/* FIXME, this is not good */
|
||||
if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
|
||||
u32 val;
|
||||
|
@ -203,11 +196,9 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
|
|||
|
||||
read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
|
||||
printk("btrfs: %s checksum verify failed on %llu "
|
||||
"wanted %X found %X from_this_trans %d "
|
||||
"level %d\n",
|
||||
"wanted %X found %X level %d\n",
|
||||
root->fs_info->sb->s_id,
|
||||
buf->start, val, found, from_this_trans,
|
||||
btrfs_header_level(buf));
|
||||
buf->start, val, found, btrfs_header_level(buf));
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -2369,6 +2369,11 @@ static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
|
|||
leaf_owner, leaf_generation,
|
||||
key.objectid, key.offset, 0);
|
||||
mutex_unlock(&root->fs_info->alloc_mutex);
|
||||
|
||||
atomic_inc(&root->fs_info->throttle_gen);
|
||||
wake_up(&root->fs_info->transaction_throttle);
|
||||
cond_resched();
|
||||
|
||||
BUG_ON(ret);
|
||||
}
|
||||
return 0;
|
||||
|
@ -2389,6 +2394,11 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
|
|||
ref->owner, ref->generation,
|
||||
info->objectid, info->offset, 0);
|
||||
mutex_unlock(&root->fs_info->alloc_mutex);
|
||||
|
||||
atomic_inc(&root->fs_info->throttle_gen);
|
||||
wake_up(&root->fs_info->transaction_throttle);
|
||||
cond_resched();
|
||||
|
||||
BUG_ON(ret);
|
||||
info++;
|
||||
}
|
||||
|
@ -2396,51 +2406,6 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void noinline reada_walk_down(struct btrfs_root *root,
|
||||
struct extent_buffer *node,
|
||||
int slot)
|
||||
{
|
||||
u64 bytenr;
|
||||
u64 last = 0;
|
||||
u32 nritems;
|
||||
u32 refs;
|
||||
u32 blocksize;
|
||||
int ret;
|
||||
int i;
|
||||
int level;
|
||||
int skipped = 0;
|
||||
|
||||
nritems = btrfs_header_nritems(node);
|
||||
level = btrfs_header_level(node);
|
||||
if (level)
|
||||
return;
|
||||
|
||||
for (i = slot; i < nritems && skipped < 32; i++) {
|
||||
bytenr = btrfs_node_blockptr(node, i);
|
||||
if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
|
||||
(last > bytenr && last - bytenr > 32 * 1024))) {
|
||||
skipped++;
|
||||
continue;
|
||||
}
|
||||
blocksize = btrfs_level_size(root, level - 1);
|
||||
if (i != slot) {
|
||||
ret = lookup_extent_ref(NULL, root, bytenr,
|
||||
blocksize, &refs);
|
||||
BUG_ON(ret);
|
||||
if (refs != 1) {
|
||||
skipped++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
ret = readahead_tree_block(root, bytenr, blocksize,
|
||||
btrfs_node_ptr_generation(node, i));
|
||||
last = bytenr + blocksize;
|
||||
cond_resched();
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
|
||||
u32 *refs)
|
||||
{
|
||||
|
@ -2549,6 +2514,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
|
|||
|
||||
atomic_inc(&root->fs_info->throttle_gen);
|
||||
wake_up(&root->fs_info->transaction_throttle);
|
||||
cond_resched();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -2578,8 +2544,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
|
|||
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
|
||||
free_extent_buffer(next);
|
||||
|
||||
if (path->slots[*level] == 0)
|
||||
reada_walk_down(root, cur, path->slots[*level]);
|
||||
next = read_tree_block(root, bytenr, blocksize,
|
||||
ptr_gen);
|
||||
cond_resched();
|
||||
|
@ -2601,6 +2565,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
|
|||
path->nodes[*level-1] = next;
|
||||
*level = btrfs_header_level(next);
|
||||
path->slots[*level] = 0;
|
||||
cond_resched();
|
||||
}
|
||||
out:
|
||||
WARN_ON(*level < 0);
|
||||
|
|
|
@ -210,7 +210,9 @@ static noinline int wait_for_commit(struct btrfs_root *root,
|
|||
static void throttle_on_drops(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
int harder_count = 0;
|
||||
|
||||
harder:
|
||||
if (atomic_read(&info->throttles)) {
|
||||
DEFINE_WAIT(wait);
|
||||
int thr;
|
||||
|
@ -226,6 +228,19 @@ static void throttle_on_drops(struct btrfs_root *root)
|
|||
schedule();
|
||||
finish_wait(&info->transaction_throttle, &wait);
|
||||
} while (thr == atomic_read(&info->throttle_gen));
|
||||
harder_count++;
|
||||
|
||||
if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
|
||||
harder_count < 2)
|
||||
goto harder;
|
||||
|
||||
if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
|
||||
harder_count < 10)
|
||||
goto harder;
|
||||
|
||||
if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
|
||||
harder_count < 20)
|
||||
goto harder;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue