mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: jbd2: fix race between write_metadata_buffer and get_write_access ext4: Fix ext4_mb_initialize_context() to initialize all fields ext4: fix null handler of ioctls in no journal mode ext4: Fix buffer head reference leak in no-journal mode ext4: Move __ext4_journalled_writepage() to avoid forward declaration ext4: Fix mmap/truncate race when blocksize < pagesize && !nodellaoc ext4: Fix mmap/truncate race when blocksize < pagesize && delayed allocation ext4: Don't look at buffer_heads outside i_size. ext4: Fix goal inum check in the inode allocator ext4: fix no journal corruption with locale-gen ext4: Calculate required journal credits for inserting an extent properly ext4: Fix truncation of symlinks after failed write jbd2: Fix a race between checkpointing code and journal_get_write_access() ext4: Use rcu_barrier() on module unload. ext4: naturally align struct ext4_allocation_request ext4: mark several more functions in mballoc.c as noinline ext4: Fix potential reclaim deadlock when truncating partial block jbd2: Remove GFP_ATOMIC kmalloc from inside spinlock critical region ext4: Fix type warning on 64-bit platforms in tracing events header
This commit is contained in:
commit
1cf29683f4
11 changed files with 240 additions and 398 deletions
|
@ -93,20 +93,20 @@ typedef unsigned int ext4_group_t;
|
||||||
struct ext4_allocation_request {
|
struct ext4_allocation_request {
|
||||||
/* target inode for block we're allocating */
|
/* target inode for block we're allocating */
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
/* logical block in target inode */
|
|
||||||
ext4_lblk_t logical;
|
|
||||||
/* phys. target (a hint) */
|
|
||||||
ext4_fsblk_t goal;
|
|
||||||
/* the closest logical allocated block to the left */
|
|
||||||
ext4_lblk_t lleft;
|
|
||||||
/* phys. block for ^^^ */
|
|
||||||
ext4_fsblk_t pleft;
|
|
||||||
/* the closest logical allocated block to the right */
|
|
||||||
ext4_lblk_t lright;
|
|
||||||
/* phys. block for ^^^ */
|
|
||||||
ext4_fsblk_t pright;
|
|
||||||
/* how many blocks we want to allocate */
|
/* how many blocks we want to allocate */
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
|
/* logical block in target inode */
|
||||||
|
ext4_lblk_t logical;
|
||||||
|
/* the closest logical allocated block to the left */
|
||||||
|
ext4_lblk_t lleft;
|
||||||
|
/* the closest logical allocated block to the right */
|
||||||
|
ext4_lblk_t lright;
|
||||||
|
/* phys. target (a hint) */
|
||||||
|
ext4_fsblk_t goal;
|
||||||
|
/* phys. block for the closest logical allocated block to the left */
|
||||||
|
ext4_fsblk_t pleft;
|
||||||
|
/* phys. block for the closest logical allocated block to the right */
|
||||||
|
ext4_fsblk_t pright;
|
||||||
/* flags. see above EXT4_MB_HINT_* */
|
/* flags. see above EXT4_MB_HINT_* */
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
};
|
};
|
||||||
|
|
|
@ -43,6 +43,8 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
|
||||||
ext4_journal_abort_handle(where, __func__, bh,
|
ext4_journal_abort_handle(where, __func__, bh,
|
||||||
handle, err);
|
handle, err);
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
brelse(bh);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,6 +59,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
|
||||||
ext4_journal_abort_handle(where, __func__, bh,
|
ext4_journal_abort_handle(where, __func__, bh,
|
||||||
handle, err);
|
handle, err);
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
brelse(bh);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -131,9 +131,11 @@ int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
|
||||||
int __ext4_journal_get_write_access(const char *where, handle_t *handle,
|
int __ext4_journal_get_write_access(const char *where, handle_t *handle,
|
||||||
struct buffer_head *bh);
|
struct buffer_head *bh);
|
||||||
|
|
||||||
|
/* When called with an invalid handle, this will still do a put on the BH */
|
||||||
int __ext4_journal_forget(const char *where, handle_t *handle,
|
int __ext4_journal_forget(const char *where, handle_t *handle,
|
||||||
struct buffer_head *bh);
|
struct buffer_head *bh);
|
||||||
|
|
||||||
|
/* When called with an invalid handle, this will still do a put on the BH */
|
||||||
int __ext4_journal_revoke(const char *where, handle_t *handle,
|
int __ext4_journal_revoke(const char *where, handle_t *handle,
|
||||||
ext4_fsblk_t blocknr, struct buffer_head *bh);
|
ext4_fsblk_t blocknr, struct buffer_head *bh);
|
||||||
|
|
||||||
|
@ -281,10 +283,10 @@ static inline int ext4_should_order_data(struct inode *inode)
|
||||||
|
|
||||||
static inline int ext4_should_writeback_data(struct inode *inode)
|
static inline int ext4_should_writeback_data(struct inode *inode)
|
||||||
{
|
{
|
||||||
if (EXT4_JOURNAL(inode) == NULL)
|
|
||||||
return 0;
|
|
||||||
if (!S_ISREG(inode->i_mode))
|
if (!S_ISREG(inode->i_mode))
|
||||||
return 0;
|
return 0;
|
||||||
|
if (EXT4_JOURNAL(inode) == NULL)
|
||||||
|
return 1;
|
||||||
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
|
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
|
||||||
return 0;
|
return 0;
|
||||||
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
|
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
|
||||||
|
|
|
@ -1977,6 +1977,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
|
||||||
*/
|
*/
|
||||||
/* 1 bitmap, 1 block group descriptor */
|
/* 1 bitmap, 1 block group descriptor */
|
||||||
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
|
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -833,7 +833,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
|
||||||
if (!goal)
|
if (!goal)
|
||||||
goal = sbi->s_inode_goal;
|
goal = sbi->s_inode_goal;
|
||||||
|
|
||||||
if (goal && goal < le32_to_cpu(sbi->s_es->s_inodes_count)) {
|
if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
|
||||||
group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
|
group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
|
||||||
ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
|
ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
|
||||||
ret2 = 0;
|
ret2 = 0;
|
||||||
|
|
384
fs/ext4/inode.c
384
fs/ext4/inode.c
|
@ -78,16 +78,14 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
|
||||||
* but there may still be a record of it in the journal, and that record
|
* but there may still be a record of it in the journal, and that record
|
||||||
* still needs to be revoked.
|
* still needs to be revoked.
|
||||||
*
|
*
|
||||||
* If the handle isn't valid we're not journaling so there's nothing to do.
|
* If the handle isn't valid we're not journaling, but we still need to
|
||||||
|
* call into ext4_journal_revoke() to put the buffer head.
|
||||||
*/
|
*/
|
||||||
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
|
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
|
||||||
struct buffer_head *bh, ext4_fsblk_t blocknr)
|
struct buffer_head *bh, ext4_fsblk_t blocknr)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!ext4_handle_valid(handle))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
BUFFER_TRACE(bh, "enter");
|
BUFFER_TRACE(bh, "enter");
|
||||||
|
@ -1513,14 +1511,14 @@ retry:
|
||||||
* Add inode to orphan list in case we crash before
|
* Add inode to orphan list in case we crash before
|
||||||
* truncate finishes
|
* truncate finishes
|
||||||
*/
|
*/
|
||||||
if (pos + len > inode->i_size)
|
if (pos + len > inode->i_size && ext4_can_truncate(inode))
|
||||||
ext4_orphan_add(handle, inode);
|
ext4_orphan_add(handle, inode);
|
||||||
|
|
||||||
ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
if (pos + len > inode->i_size) {
|
if (pos + len > inode->i_size) {
|
||||||
vmtruncate(inode, inode->i_size);
|
ext4_truncate(inode);
|
||||||
/*
|
/*
|
||||||
* If vmtruncate failed early the inode might
|
* If truncate failed early the inode might
|
||||||
* still be on the orphan list; we need to
|
* still be on the orphan list; we need to
|
||||||
* make sure the inode is removed from the
|
* make sure the inode is removed from the
|
||||||
* orphan list in that case.
|
* orphan list in that case.
|
||||||
|
@ -1614,7 +1612,7 @@ static int ext4_ordered_write_end(struct file *file,
|
||||||
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
|
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
copied = ret2;
|
copied = ret2;
|
||||||
if (pos + len > inode->i_size)
|
if (pos + len > inode->i_size && ext4_can_truncate(inode))
|
||||||
/* if we have allocated more blocks and copied
|
/* if we have allocated more blocks and copied
|
||||||
* less. We will have blocks allocated outside
|
* less. We will have blocks allocated outside
|
||||||
* inode->i_size. So truncate them
|
* inode->i_size. So truncate them
|
||||||
|
@ -1628,9 +1626,9 @@ static int ext4_ordered_write_end(struct file *file,
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
|
|
||||||
if (pos + len > inode->i_size) {
|
if (pos + len > inode->i_size) {
|
||||||
vmtruncate(inode, inode->i_size);
|
ext4_truncate(inode);
|
||||||
/*
|
/*
|
||||||
* If vmtruncate failed early the inode might still be
|
* If truncate failed early the inode might still be
|
||||||
* on the orphan list; we need to make sure the inode
|
* on the orphan list; we need to make sure the inode
|
||||||
* is removed from the orphan list in that case.
|
* is removed from the orphan list in that case.
|
||||||
*/
|
*/
|
||||||
|
@ -1655,7 +1653,7 @@ static int ext4_writeback_write_end(struct file *file,
|
||||||
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
|
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
copied = ret2;
|
copied = ret2;
|
||||||
if (pos + len > inode->i_size)
|
if (pos + len > inode->i_size && ext4_can_truncate(inode))
|
||||||
/* if we have allocated more blocks and copied
|
/* if we have allocated more blocks and copied
|
||||||
* less. We will have blocks allocated outside
|
* less. We will have blocks allocated outside
|
||||||
* inode->i_size. So truncate them
|
* inode->i_size. So truncate them
|
||||||
|
@ -1670,9 +1668,9 @@ static int ext4_writeback_write_end(struct file *file,
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
|
|
||||||
if (pos + len > inode->i_size) {
|
if (pos + len > inode->i_size) {
|
||||||
vmtruncate(inode, inode->i_size);
|
ext4_truncate(inode);
|
||||||
/*
|
/*
|
||||||
* If vmtruncate failed early the inode might still be
|
* If truncate failed early the inode might still be
|
||||||
* on the orphan list; we need to make sure the inode
|
* on the orphan list; we need to make sure the inode
|
||||||
* is removed from the orphan list in that case.
|
* is removed from the orphan list in that case.
|
||||||
*/
|
*/
|
||||||
|
@ -1722,7 +1720,7 @@ static int ext4_journalled_write_end(struct file *file,
|
||||||
|
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
if (pos + len > inode->i_size)
|
if (pos + len > inode->i_size && ext4_can_truncate(inode))
|
||||||
/* if we have allocated more blocks and copied
|
/* if we have allocated more blocks and copied
|
||||||
* less. We will have blocks allocated outside
|
* less. We will have blocks allocated outside
|
||||||
* inode->i_size. So truncate them
|
* inode->i_size. So truncate them
|
||||||
|
@ -1733,9 +1731,9 @@ static int ext4_journalled_write_end(struct file *file,
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
if (pos + len > inode->i_size) {
|
if (pos + len > inode->i_size) {
|
||||||
vmtruncate(inode, inode->i_size);
|
ext4_truncate(inode);
|
||||||
/*
|
/*
|
||||||
* If vmtruncate failed early the inode might still be
|
* If truncate failed early the inode might still be
|
||||||
* on the orphan list; we need to make sure the inode
|
* on the orphan list; we need to make sure the inode
|
||||||
* is removed from the orphan list in that case.
|
* is removed from the orphan list in that case.
|
||||||
*/
|
*/
|
||||||
|
@ -2305,15 +2303,9 @@ flush_it:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
|
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
/*
|
return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
|
||||||
* unmapped buffer is possible for holes.
|
|
||||||
* delay buffer is possible with delayed allocation.
|
|
||||||
* We also need to consider unwritten buffer as unmapped.
|
|
||||||
*/
|
|
||||||
return (!buffer_mapped(bh) || buffer_delay(bh) ||
|
|
||||||
buffer_unwritten(bh)) && buffer_dirty(bh);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2398,9 +2390,9 @@ static int __mpage_da_writepage(struct page *page,
|
||||||
* We need to try to allocate
|
* We need to try to allocate
|
||||||
* unmapped blocks in the same page.
|
* unmapped blocks in the same page.
|
||||||
* Otherwise we won't make progress
|
* Otherwise we won't make progress
|
||||||
* with the page in ext4_da_writepage
|
* with the page in ext4_writepage
|
||||||
*/
|
*/
|
||||||
if (ext4_bh_unmapped_or_delay(NULL, bh)) {
|
if (ext4_bh_delay_or_unwritten(NULL, bh)) {
|
||||||
mpage_add_bh_to_extent(mpd, logical,
|
mpage_add_bh_to_extent(mpd, logical,
|
||||||
bh->b_size,
|
bh->b_size,
|
||||||
bh->b_state);
|
bh->b_state);
|
||||||
|
@ -2517,7 +2509,6 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
|
||||||
* so call get_block_wrap with create = 0
|
* so call get_block_wrap with create = 0
|
||||||
*/
|
*/
|
||||||
ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
|
ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
|
||||||
BUG_ON(create && ret == 0);
|
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
bh_result->b_size = (ret << inode->i_blkbits);
|
bh_result->b_size = (ret << inode->i_blkbits);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -2525,15 +2516,102 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bget_one(handle_t *handle, struct buffer_head *bh)
|
||||||
|
{
|
||||||
|
get_bh(bh);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bput_one(handle_t *handle, struct buffer_head *bh)
|
||||||
|
{
|
||||||
|
put_bh(bh);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __ext4_journalled_writepage(struct page *page,
|
||||||
|
struct writeback_control *wbc,
|
||||||
|
unsigned int len)
|
||||||
|
{
|
||||||
|
struct address_space *mapping = page->mapping;
|
||||||
|
struct inode *inode = mapping->host;
|
||||||
|
struct buffer_head *page_bufs;
|
||||||
|
handle_t *handle = NULL;
|
||||||
|
int ret = 0;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
page_bufs = page_buffers(page);
|
||||||
|
BUG_ON(!page_bufs);
|
||||||
|
walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
|
||||||
|
/* As soon as we unlock the page, it can go away, but we have
|
||||||
|
* references to buffers so we are safe */
|
||||||
|
unlock_page(page);
|
||||||
|
|
||||||
|
handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
|
||||||
|
if (IS_ERR(handle)) {
|
||||||
|
ret = PTR_ERR(handle);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
|
||||||
|
do_journal_get_write_access);
|
||||||
|
|
||||||
|
err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
|
||||||
|
write_end_fn);
|
||||||
|
if (ret == 0)
|
||||||
|
ret = err;
|
||||||
|
err = ext4_journal_stop(handle);
|
||||||
|
if (!ret)
|
||||||
|
ret = err;
|
||||||
|
|
||||||
|
walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
|
||||||
|
EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* Note that we don't need to start a transaction unless we're journaling data
|
||||||
|
* because we should have holes filled from ext4_page_mkwrite(). We even don't
|
||||||
|
* need to file the inode to the transaction's list in ordered mode because if
|
||||||
|
* we are writing back data added by write(), the inode is already there and if
|
||||||
|
* we are writing back data modified via mmap(), noone guarantees in which
|
||||||
|
* transaction the data will hit the disk. In case we are journaling data, we
|
||||||
|
* cannot start transaction directly because transaction start ranks above page
|
||||||
|
* lock so we have to do some magic.
|
||||||
|
*
|
||||||
* This function can get called via...
|
* This function can get called via...
|
||||||
* - ext4_da_writepages after taking page lock (have journal handle)
|
* - ext4_da_writepages after taking page lock (have journal handle)
|
||||||
* - journal_submit_inode_data_buffers (no journal handle)
|
* - journal_submit_inode_data_buffers (no journal handle)
|
||||||
* - shrink_page_list via pdflush (no journal handle)
|
* - shrink_page_list via pdflush (no journal handle)
|
||||||
* - grab_page_cache when doing write_begin (have journal handle)
|
* - grab_page_cache when doing write_begin (have journal handle)
|
||||||
|
*
|
||||||
|
* We don't do any block allocation in this function. If we have page with
|
||||||
|
* multiple blocks we need to write those buffer_heads that are mapped. This
|
||||||
|
* is important for mmaped based write. So if we do with blocksize 1K
|
||||||
|
* truncate(f, 1024);
|
||||||
|
* a = mmap(f, 0, 4096);
|
||||||
|
* a[0] = 'a';
|
||||||
|
* truncate(f, 4096);
|
||||||
|
* we have in the page first buffer_head mapped via page_mkwrite call back
|
||||||
|
* but other bufer_heads would be unmapped but dirty(dirty done via the
|
||||||
|
* do_wp_page). So writepage should write the first block. If we modify
|
||||||
|
* the mmap area beyond 1024 we will again get a page_fault and the
|
||||||
|
* page_mkwrite callback will do the block allocation and mark the
|
||||||
|
* buffer_heads mapped.
|
||||||
|
*
|
||||||
|
* We redirty the page if we have any buffer_heads that is either delay or
|
||||||
|
* unwritten in the page.
|
||||||
|
*
|
||||||
|
* We can get recursively called as show below.
|
||||||
|
*
|
||||||
|
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
|
||||||
|
* ext4_writepage()
|
||||||
|
*
|
||||||
|
* But since we don't do any block allocation we should not deadlock.
|
||||||
|
* Page also have the dirty flag cleared so we don't get recurive page_lock.
|
||||||
*/
|
*/
|
||||||
static int ext4_da_writepage(struct page *page,
|
static int ext4_writepage(struct page *page,
|
||||||
struct writeback_control *wbc)
|
struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
loff_t size;
|
loff_t size;
|
||||||
|
@ -2541,7 +2619,7 @@ static int ext4_da_writepage(struct page *page,
|
||||||
struct buffer_head *page_bufs;
|
struct buffer_head *page_bufs;
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
|
|
||||||
trace_ext4_da_writepage(inode, page);
|
trace_ext4_writepage(inode, page);
|
||||||
size = i_size_read(inode);
|
size = i_size_read(inode);
|
||||||
if (page->index == size >> PAGE_CACHE_SHIFT)
|
if (page->index == size >> PAGE_CACHE_SHIFT)
|
||||||
len = size & ~PAGE_CACHE_MASK;
|
len = size & ~PAGE_CACHE_MASK;
|
||||||
|
@ -2551,7 +2629,7 @@ static int ext4_da_writepage(struct page *page,
|
||||||
if (page_has_buffers(page)) {
|
if (page_has_buffers(page)) {
|
||||||
page_bufs = page_buffers(page);
|
page_bufs = page_buffers(page);
|
||||||
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
|
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
|
||||||
ext4_bh_unmapped_or_delay)) {
|
ext4_bh_delay_or_unwritten)) {
|
||||||
/*
|
/*
|
||||||
* We don't want to do block allocation
|
* We don't want to do block allocation
|
||||||
* So redirty the page and return
|
* So redirty the page and return
|
||||||
|
@ -2578,13 +2656,13 @@ static int ext4_da_writepage(struct page *page,
|
||||||
* all are mapped and non delay. We don't want to
|
* all are mapped and non delay. We don't want to
|
||||||
* do block allocation here.
|
* do block allocation here.
|
||||||
*/
|
*/
|
||||||
ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
|
ret = block_prepare_write(page, 0, len,
|
||||||
noalloc_get_block_write);
|
noalloc_get_block_write);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
page_bufs = page_buffers(page);
|
page_bufs = page_buffers(page);
|
||||||
/* check whether all are mapped and non delay */
|
/* check whether all are mapped and non delay */
|
||||||
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
|
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
|
||||||
ext4_bh_unmapped_or_delay)) {
|
ext4_bh_delay_or_unwritten)) {
|
||||||
redirty_page_for_writepage(wbc, page);
|
redirty_page_for_writepage(wbc, page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2600,7 +2678,16 @@ static int ext4_da_writepage(struct page *page,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* now mark the buffer_heads as dirty and uptodate */
|
/* now mark the buffer_heads as dirty and uptodate */
|
||||||
block_commit_write(page, 0, PAGE_CACHE_SIZE);
|
block_commit_write(page, 0, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (PageChecked(page) && ext4_should_journal_data(inode)) {
|
||||||
|
/*
|
||||||
|
* It's mmapped pagecache. Add buffers and journal it. There
|
||||||
|
* doesn't seem much point in redirtying the page here.
|
||||||
|
*/
|
||||||
|
ClearPageChecked(page);
|
||||||
|
return __ext4_journalled_writepage(page, wbc, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
|
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
|
||||||
|
@ -2907,7 +2994,7 @@ retry:
|
||||||
* i_size_read because we hold i_mutex.
|
* i_size_read because we hold i_mutex.
|
||||||
*/
|
*/
|
||||||
if (pos + len > inode->i_size)
|
if (pos + len > inode->i_size)
|
||||||
vmtruncate(inode, inode->i_size);
|
ext4_truncate(inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
||||||
|
@ -3130,222 +3217,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
|
||||||
return generic_block_bmap(mapping, block, ext4_get_block);
|
return generic_block_bmap(mapping, block, ext4_get_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bget_one(handle_t *handle, struct buffer_head *bh)
|
|
||||||
{
|
|
||||||
get_bh(bh);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int bput_one(handle_t *handle, struct buffer_head *bh)
|
|
||||||
{
|
|
||||||
put_bh(bh);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that we don't need to start a transaction unless we're journaling data
|
|
||||||
* because we should have holes filled from ext4_page_mkwrite(). We even don't
|
|
||||||
* need to file the inode to the transaction's list in ordered mode because if
|
|
||||||
* we are writing back data added by write(), the inode is already there and if
|
|
||||||
* we are writing back data modified via mmap(), noone guarantees in which
|
|
||||||
* transaction the data will hit the disk. In case we are journaling data, we
|
|
||||||
* cannot start transaction directly because transaction start ranks above page
|
|
||||||
* lock so we have to do some magic.
|
|
||||||
*
|
|
||||||
* In all journaling modes block_write_full_page() will start the I/O.
|
|
||||||
*
|
|
||||||
* Problem:
|
|
||||||
*
|
|
||||||
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
|
|
||||||
* ext4_writepage()
|
|
||||||
*
|
|
||||||
* Similar for:
|
|
||||||
*
|
|
||||||
* ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
|
|
||||||
*
|
|
||||||
* Same applies to ext4_get_block(). We will deadlock on various things like
|
|
||||||
* lock_journal and i_data_sem
|
|
||||||
*
|
|
||||||
* Setting PF_MEMALLOC here doesn't work - too many internal memory
|
|
||||||
* allocations fail.
|
|
||||||
*
|
|
||||||
* 16May01: If we're reentered then journal_current_handle() will be
|
|
||||||
* non-zero. We simply *return*.
|
|
||||||
*
|
|
||||||
* 1 July 2001: @@@ FIXME:
|
|
||||||
* In journalled data mode, a data buffer may be metadata against the
|
|
||||||
* current transaction. But the same file is part of a shared mapping
|
|
||||||
* and someone does a writepage() on it.
|
|
||||||
*
|
|
||||||
* We will move the buffer onto the async_data list, but *after* it has
|
|
||||||
* been dirtied. So there's a small window where we have dirty data on
|
|
||||||
* BJ_Metadata.
|
|
||||||
*
|
|
||||||
* Note that this only applies to the last partial page in the file. The
|
|
||||||
* bit which block_write_full_page() uses prepare/commit for. (That's
|
|
||||||
* broken code anyway: it's wrong for msync()).
|
|
||||||
*
|
|
||||||
* It's a rare case: affects the final partial page, for journalled data
|
|
||||||
* where the file is subject to bith write() and writepage() in the same
|
|
||||||
* transction. To fix it we'll need a custom block_write_full_page().
|
|
||||||
* We'll probably need that anyway for journalling writepage() output.
|
|
||||||
*
|
|
||||||
* We don't honour synchronous mounts for writepage(). That would be
|
|
||||||
* disastrous. Any write() or metadata operation will sync the fs for
|
|
||||||
* us.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
static int __ext4_normal_writepage(struct page *page,
|
|
||||||
struct writeback_control *wbc)
|
|
||||||
{
|
|
||||||
struct inode *inode = page->mapping->host;
|
|
||||||
|
|
||||||
if (test_opt(inode->i_sb, NOBH))
|
|
||||||
return nobh_writepage(page, noalloc_get_block_write, wbc);
|
|
||||||
else
|
|
||||||
return block_write_full_page(page, noalloc_get_block_write,
|
|
||||||
wbc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ext4_normal_writepage(struct page *page,
|
|
||||||
struct writeback_control *wbc)
|
|
||||||
{
|
|
||||||
struct inode *inode = page->mapping->host;
|
|
||||||
loff_t size = i_size_read(inode);
|
|
||||||
loff_t len;
|
|
||||||
|
|
||||||
trace_ext4_normal_writepage(inode, page);
|
|
||||||
J_ASSERT(PageLocked(page));
|
|
||||||
if (page->index == size >> PAGE_CACHE_SHIFT)
|
|
||||||
len = size & ~PAGE_CACHE_MASK;
|
|
||||||
else
|
|
||||||
len = PAGE_CACHE_SIZE;
|
|
||||||
|
|
||||||
if (page_has_buffers(page)) {
|
|
||||||
/* if page has buffers it should all be mapped
|
|
||||||
* and allocated. If there are not buffers attached
|
|
||||||
* to the page we know the page is dirty but it lost
|
|
||||||
* buffers. That means that at some moment in time
|
|
||||||
* after write_begin() / write_end() has been called
|
|
||||||
* all buffers have been clean and thus they must have been
|
|
||||||
* written at least once. So they are all mapped and we can
|
|
||||||
* happily proceed with mapping them and writing the page.
|
|
||||||
*/
|
|
||||||
BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
|
|
||||||
ext4_bh_unmapped_or_delay));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ext4_journal_current_handle())
|
|
||||||
return __ext4_normal_writepage(page, wbc);
|
|
||||||
|
|
||||||
redirty_page_for_writepage(wbc, page);
|
|
||||||
unlock_page(page);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __ext4_journalled_writepage(struct page *page,
|
|
||||||
struct writeback_control *wbc)
|
|
||||||
{
|
|
||||||
struct address_space *mapping = page->mapping;
|
|
||||||
struct inode *inode = mapping->host;
|
|
||||||
struct buffer_head *page_bufs;
|
|
||||||
handle_t *handle = NULL;
|
|
||||||
int ret = 0;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
|
|
||||||
noalloc_get_block_write);
|
|
||||||
if (ret != 0)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
page_bufs = page_buffers(page);
|
|
||||||
walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
|
|
||||||
bget_one);
|
|
||||||
/* As soon as we unlock the page, it can go away, but we have
|
|
||||||
* references to buffers so we are safe */
|
|
||||||
unlock_page(page);
|
|
||||||
|
|
||||||
handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
|
|
||||||
if (IS_ERR(handle)) {
|
|
||||||
ret = PTR_ERR(handle);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = walk_page_buffers(handle, page_bufs, 0,
|
|
||||||
PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
|
|
||||||
|
|
||||||
err = walk_page_buffers(handle, page_bufs, 0,
|
|
||||||
PAGE_CACHE_SIZE, NULL, write_end_fn);
|
|
||||||
if (ret == 0)
|
|
||||||
ret = err;
|
|
||||||
err = ext4_journal_stop(handle);
|
|
||||||
if (!ret)
|
|
||||||
ret = err;
|
|
||||||
|
|
||||||
walk_page_buffers(handle, page_bufs, 0,
|
|
||||||
PAGE_CACHE_SIZE, NULL, bput_one);
|
|
||||||
EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
unlock_page(page);
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ext4_journalled_writepage(struct page *page,
|
|
||||||
struct writeback_control *wbc)
|
|
||||||
{
|
|
||||||
struct inode *inode = page->mapping->host;
|
|
||||||
loff_t size = i_size_read(inode);
|
|
||||||
loff_t len;
|
|
||||||
|
|
||||||
trace_ext4_journalled_writepage(inode, page);
|
|
||||||
J_ASSERT(PageLocked(page));
|
|
||||||
if (page->index == size >> PAGE_CACHE_SHIFT)
|
|
||||||
len = size & ~PAGE_CACHE_MASK;
|
|
||||||
else
|
|
||||||
len = PAGE_CACHE_SIZE;
|
|
||||||
|
|
||||||
if (page_has_buffers(page)) {
|
|
||||||
/* if page has buffers it should all be mapped
|
|
||||||
* and allocated. If there are not buffers attached
|
|
||||||
* to the page we know the page is dirty but it lost
|
|
||||||
* buffers. That means that at some moment in time
|
|
||||||
* after write_begin() / write_end() has been called
|
|
||||||
* all buffers have been clean and thus they must have been
|
|
||||||
* written at least once. So they are all mapped and we can
|
|
||||||
* happily proceed with mapping them and writing the page.
|
|
||||||
*/
|
|
||||||
BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
|
|
||||||
ext4_bh_unmapped_or_delay));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ext4_journal_current_handle())
|
|
||||||
goto no_write;
|
|
||||||
|
|
||||||
if (PageChecked(page)) {
|
|
||||||
/*
|
|
||||||
* It's mmapped pagecache. Add buffers and journal it. There
|
|
||||||
* doesn't seem much point in redirtying the page here.
|
|
||||||
*/
|
|
||||||
ClearPageChecked(page);
|
|
||||||
return __ext4_journalled_writepage(page, wbc);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* It may be a page full of checkpoint-mode buffers. We don't
|
|
||||||
* really know unless we go poke around in the buffer_heads.
|
|
||||||
* But block_write_full_page will do the right thing.
|
|
||||||
*/
|
|
||||||
return block_write_full_page(page, noalloc_get_block_write,
|
|
||||||
wbc);
|
|
||||||
}
|
|
||||||
no_write:
|
|
||||||
redirty_page_for_writepage(wbc, page);
|
|
||||||
unlock_page(page);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ext4_readpage(struct file *file, struct page *page)
|
static int ext4_readpage(struct file *file, struct page *page)
|
||||||
{
|
{
|
||||||
return mpage_readpage(page, ext4_get_block);
|
return mpage_readpage(page, ext4_get_block);
|
||||||
|
@ -3492,7 +3363,7 @@ static int ext4_journalled_set_page_dirty(struct page *page)
|
||||||
static const struct address_space_operations ext4_ordered_aops = {
|
static const struct address_space_operations ext4_ordered_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_normal_writepage,
|
.writepage = ext4_writepage,
|
||||||
.sync_page = block_sync_page,
|
.sync_page = block_sync_page,
|
||||||
.write_begin = ext4_write_begin,
|
.write_begin = ext4_write_begin,
|
||||||
.write_end = ext4_ordered_write_end,
|
.write_end = ext4_ordered_write_end,
|
||||||
|
@ -3507,7 +3378,7 @@ static const struct address_space_operations ext4_ordered_aops = {
|
||||||
static const struct address_space_operations ext4_writeback_aops = {
|
static const struct address_space_operations ext4_writeback_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_normal_writepage,
|
.writepage = ext4_writepage,
|
||||||
.sync_page = block_sync_page,
|
.sync_page = block_sync_page,
|
||||||
.write_begin = ext4_write_begin,
|
.write_begin = ext4_write_begin,
|
||||||
.write_end = ext4_writeback_write_end,
|
.write_end = ext4_writeback_write_end,
|
||||||
|
@ -3522,7 +3393,7 @@ static const struct address_space_operations ext4_writeback_aops = {
|
||||||
static const struct address_space_operations ext4_journalled_aops = {
|
static const struct address_space_operations ext4_journalled_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_journalled_writepage,
|
.writepage = ext4_writepage,
|
||||||
.sync_page = block_sync_page,
|
.sync_page = block_sync_page,
|
||||||
.write_begin = ext4_write_begin,
|
.write_begin = ext4_write_begin,
|
||||||
.write_end = ext4_journalled_write_end,
|
.write_end = ext4_journalled_write_end,
|
||||||
|
@ -3536,7 +3407,7 @@ static const struct address_space_operations ext4_journalled_aops = {
|
||||||
static const struct address_space_operations ext4_da_aops = {
|
static const struct address_space_operations ext4_da_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_da_writepage,
|
.writepage = ext4_writepage,
|
||||||
.writepages = ext4_da_writepages,
|
.writepages = ext4_da_writepages,
|
||||||
.sync_page = block_sync_page,
|
.sync_page = block_sync_page,
|
||||||
.write_begin = ext4_da_write_begin,
|
.write_begin = ext4_da_write_begin,
|
||||||
|
@ -3583,7 +3454,8 @@ int ext4_block_truncate_page(handle_t *handle,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
|
page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
|
||||||
|
mapping_gfp_mask(mapping) & ~__GFP_FS);
|
||||||
if (!page)
|
if (!page)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -191,7 +191,7 @@ setversion_out:
|
||||||
case EXT4_IOC_GROUP_EXTEND: {
|
case EXT4_IOC_GROUP_EXTEND: {
|
||||||
ext4_fsblk_t n_blocks_count;
|
ext4_fsblk_t n_blocks_count;
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
int err, err2;
|
int err, err2=0;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_RESOURCE))
|
if (!capable(CAP_SYS_RESOURCE))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
@ -204,9 +204,11 @@ setversion_out:
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
|
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
|
||||||
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
|
if (EXT4_SB(sb)->s_journal) {
|
||||||
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
|
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
|
||||||
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
|
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
|
||||||
|
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
|
||||||
|
}
|
||||||
if (err == 0)
|
if (err == 0)
|
||||||
err = err2;
|
err = err2;
|
||||||
mnt_drop_write(filp->f_path.mnt);
|
mnt_drop_write(filp->f_path.mnt);
|
||||||
|
@ -251,7 +253,7 @@ setversion_out:
|
||||||
case EXT4_IOC_GROUP_ADD: {
|
case EXT4_IOC_GROUP_ADD: {
|
||||||
struct ext4_new_group_data input;
|
struct ext4_new_group_data input;
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
int err, err2;
|
int err, err2=0;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_RESOURCE))
|
if (!capable(CAP_SYS_RESOURCE))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
@ -265,9 +267,11 @@ setversion_out:
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = ext4_group_add(sb, &input);
|
err = ext4_group_add(sb, &input);
|
||||||
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
|
if (EXT4_SB(sb)->s_journal) {
|
||||||
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
|
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
|
||||||
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
|
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
|
||||||
|
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
|
||||||
|
}
|
||||||
if (err == 0)
|
if (err == 0)
|
||||||
err = err2;
|
err = err2;
|
||||||
mnt_drop_write(filp->f_path.mnt);
|
mnt_drop_write(filp->f_path.mnt);
|
||||||
|
|
|
@ -657,7 +657,8 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_mb_generate_buddy(struct super_block *sb,
|
static noinline_for_stack
|
||||||
|
void ext4_mb_generate_buddy(struct super_block *sb,
|
||||||
void *buddy, void *bitmap, ext4_group_t group)
|
void *buddy, void *bitmap, ext4_group_t group)
|
||||||
{
|
{
|
||||||
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
||||||
|
@ -1480,7 +1481,8 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
|
||||||
ext4_mb_check_limits(ac, e4b, 0);
|
ext4_mb_check_limits(ac, e4b, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
|
static noinline_for_stack
|
||||||
|
int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
|
||||||
struct ext4_buddy *e4b)
|
struct ext4_buddy *e4b)
|
||||||
{
|
{
|
||||||
struct ext4_free_extent ex = ac->ac_b_ex;
|
struct ext4_free_extent ex = ac->ac_b_ex;
|
||||||
|
@ -1507,7 +1509,8 @@ static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
static noinline_for_stack
|
||||||
|
int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
||||||
struct ext4_buddy *e4b)
|
struct ext4_buddy *e4b)
|
||||||
{
|
{
|
||||||
ext4_group_t group = ac->ac_g_ex.fe_group;
|
ext4_group_t group = ac->ac_g_ex.fe_group;
|
||||||
|
@ -1566,7 +1569,8 @@ static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
||||||
* The routine scans buddy structures (not bitmap!) from given order
|
* The routine scans buddy structures (not bitmap!) from given order
|
||||||
* to max order and tries to find big enough chunk to satisfy the req
|
* to max order and tries to find big enough chunk to satisfy the req
|
||||||
*/
|
*/
|
||||||
static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
|
static noinline_for_stack
|
||||||
|
void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
|
||||||
struct ext4_buddy *e4b)
|
struct ext4_buddy *e4b)
|
||||||
{
|
{
|
||||||
struct super_block *sb = ac->ac_sb;
|
struct super_block *sb = ac->ac_sb;
|
||||||
|
@ -1609,7 +1613,8 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
|
||||||
* In order to optimize scanning, caller must pass number of
|
* In order to optimize scanning, caller must pass number of
|
||||||
* free blocks in the group, so the routine can know upper limit.
|
* free blocks in the group, so the routine can know upper limit.
|
||||||
*/
|
*/
|
||||||
static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
|
static noinline_for_stack
|
||||||
|
void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
|
||||||
struct ext4_buddy *e4b)
|
struct ext4_buddy *e4b)
|
||||||
{
|
{
|
||||||
struct super_block *sb = ac->ac_sb;
|
struct super_block *sb = ac->ac_sb;
|
||||||
|
@ -1668,7 +1673,8 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
|
||||||
* we try to find stripe-aligned chunks for stripe-size requests
|
* we try to find stripe-aligned chunks for stripe-size requests
|
||||||
* XXX should do so at least for multiples of stripe size as well
|
* XXX should do so at least for multiples of stripe size as well
|
||||||
*/
|
*/
|
||||||
static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
|
static noinline_for_stack
|
||||||
|
void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
|
||||||
struct ext4_buddy *e4b)
|
struct ext4_buddy *e4b)
|
||||||
{
|
{
|
||||||
struct super_block *sb = ac->ac_sb;
|
struct super_block *sb = ac->ac_sb;
|
||||||
|
@ -1831,7 +1837,8 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
|
static noinline_for_stack
|
||||||
|
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
|
||||||
{
|
{
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -2902,7 +2909,11 @@ int __init init_ext4_mballoc(void)
|
||||||
|
|
||||||
void exit_ext4_mballoc(void)
|
void exit_ext4_mballoc(void)
|
||||||
{
|
{
|
||||||
/* XXX: synchronize_rcu(); */
|
/*
|
||||||
|
* Wait for completion of call_rcu()'s on ext4_pspace_cachep
|
||||||
|
* before destroying the slab cache.
|
||||||
|
*/
|
||||||
|
rcu_barrier();
|
||||||
kmem_cache_destroy(ext4_pspace_cachep);
|
kmem_cache_destroy(ext4_pspace_cachep);
|
||||||
kmem_cache_destroy(ext4_ac_cachep);
|
kmem_cache_destroy(ext4_ac_cachep);
|
||||||
kmem_cache_destroy(ext4_free_ext_cachep);
|
kmem_cache_destroy(ext4_free_ext_cachep);
|
||||||
|
@ -3457,7 +3468,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
|
||||||
* used in in-core bitmap. buddy must be generated from this bitmap
|
* used in in-core bitmap. buddy must be generated from this bitmap
|
||||||
* Need to be called with ext4 group lock held
|
* Need to be called with ext4 group lock held
|
||||||
*/
|
*/
|
||||||
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
static noinline_for_stack
|
||||||
|
void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
||||||
ext4_group_t group)
|
ext4_group_t group)
|
||||||
{
|
{
|
||||||
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
||||||
|
@ -4215,14 +4227,9 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
|
||||||
ext4_get_group_no_and_offset(sb, goal, &group, &block);
|
ext4_get_group_no_and_offset(sb, goal, &group, &block);
|
||||||
|
|
||||||
/* set up allocation goals */
|
/* set up allocation goals */
|
||||||
|
memset(ac, 0, sizeof(struct ext4_allocation_context));
|
||||||
ac->ac_b_ex.fe_logical = ar->logical;
|
ac->ac_b_ex.fe_logical = ar->logical;
|
||||||
ac->ac_b_ex.fe_group = 0;
|
|
||||||
ac->ac_b_ex.fe_start = 0;
|
|
||||||
ac->ac_b_ex.fe_len = 0;
|
|
||||||
ac->ac_status = AC_STATUS_CONTINUE;
|
ac->ac_status = AC_STATUS_CONTINUE;
|
||||||
ac->ac_groups_scanned = 0;
|
|
||||||
ac->ac_ex_scanned = 0;
|
|
||||||
ac->ac_found = 0;
|
|
||||||
ac->ac_sb = sb;
|
ac->ac_sb = sb;
|
||||||
ac->ac_inode = ar->inode;
|
ac->ac_inode = ar->inode;
|
||||||
ac->ac_o_ex.fe_logical = ar->logical;
|
ac->ac_o_ex.fe_logical = ar->logical;
|
||||||
|
@ -4233,15 +4240,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
|
||||||
ac->ac_g_ex.fe_group = group;
|
ac->ac_g_ex.fe_group = group;
|
||||||
ac->ac_g_ex.fe_start = block;
|
ac->ac_g_ex.fe_start = block;
|
||||||
ac->ac_g_ex.fe_len = len;
|
ac->ac_g_ex.fe_len = len;
|
||||||
ac->ac_f_ex.fe_len = 0;
|
|
||||||
ac->ac_flags = ar->flags;
|
ac->ac_flags = ar->flags;
|
||||||
ac->ac_2order = 0;
|
|
||||||
ac->ac_criteria = 0;
|
|
||||||
ac->ac_pa = NULL;
|
|
||||||
ac->ac_bitmap_page = NULL;
|
|
||||||
ac->ac_buddy_page = NULL;
|
|
||||||
ac->alloc_semp = NULL;
|
|
||||||
ac->ac_lg = NULL;
|
|
||||||
|
|
||||||
/* we have to define context: we'll we work with a file or
|
/* we have to define context: we'll we work with a file or
|
||||||
* locality group. this is a policy, actually */
|
* locality group. this is a policy, actually */
|
||||||
|
@ -4509,10 +4508,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
|
||||||
}
|
}
|
||||||
|
|
||||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||||
if (ac) {
|
if (!ac) {
|
||||||
ac->ac_sb = sb;
|
|
||||||
ac->ac_inode = ar->inode;
|
|
||||||
} else {
|
|
||||||
ar->len = 0;
|
ar->len = 0;
|
||||||
*errp = -ENOMEM;
|
*errp = -ENOMEM;
|
||||||
goto out1;
|
goto out1;
|
||||||
|
|
|
@ -297,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
|
||||||
unsigned int new_offset;
|
unsigned int new_offset;
|
||||||
struct buffer_head *bh_in = jh2bh(jh_in);
|
struct buffer_head *bh_in = jh2bh(jh_in);
|
||||||
struct jbd2_buffer_trigger_type *triggers;
|
struct jbd2_buffer_trigger_type *triggers;
|
||||||
|
journal_t *journal = transaction->t_journal;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The buffer really shouldn't be locked: only the current committing
|
* The buffer really shouldn't be locked: only the current committing
|
||||||
|
@ -310,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
|
||||||
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
|
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
|
||||||
|
|
||||||
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
|
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
|
||||||
|
/* keep subsequent assertions sane */
|
||||||
|
new_bh->b_state = 0;
|
||||||
|
init_buffer(new_bh, NULL, NULL);
|
||||||
|
atomic_set(&new_bh->b_count, 1);
|
||||||
|
new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a new transaction has already done a buffer copy-out, then
|
* If a new transaction has already done a buffer copy-out, then
|
||||||
|
@ -388,14 +394,6 @@ repeat:
|
||||||
kunmap_atomic(mapped_data, KM_USER0);
|
kunmap_atomic(mapped_data, KM_USER0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* keep subsequent assertions sane */
|
|
||||||
new_bh->b_state = 0;
|
|
||||||
init_buffer(new_bh, NULL, NULL);
|
|
||||||
atomic_set(&new_bh->b_count, 1);
|
|
||||||
jbd_unlock_bh_state(bh_in);
|
|
||||||
|
|
||||||
new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
|
|
||||||
|
|
||||||
set_bh_page(new_bh, new_page, new_offset);
|
set_bh_page(new_bh, new_page, new_offset);
|
||||||
new_jh->b_transaction = NULL;
|
new_jh->b_transaction = NULL;
|
||||||
new_bh->b_size = jh2bh(jh_in)->b_size;
|
new_bh->b_size = jh2bh(jh_in)->b_size;
|
||||||
|
@ -412,7 +410,11 @@ repeat:
|
||||||
* copying is moved to the transaction's shadow queue.
|
* copying is moved to the transaction's shadow queue.
|
||||||
*/
|
*/
|
||||||
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
|
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
|
||||||
jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
|
spin_lock(&journal->j_list_lock);
|
||||||
|
__jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
|
||||||
|
spin_unlock(&journal->j_list_lock);
|
||||||
|
jbd_unlock_bh_state(bh_in);
|
||||||
|
|
||||||
JBUFFER_TRACE(new_jh, "file as BJ_IO");
|
JBUFFER_TRACE(new_jh, "file as BJ_IO");
|
||||||
jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
|
jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
|
||||||
|
|
||||||
|
@ -2410,6 +2412,7 @@ const char *jbd2_dev_to_name(dev_t device)
|
||||||
int i = hash_32(device, CACHE_SIZE_BITS);
|
int i = hash_32(device, CACHE_SIZE_BITS);
|
||||||
char *ret;
|
char *ret;
|
||||||
struct block_device *bd;
|
struct block_device *bd;
|
||||||
|
static struct devname_cache *new_dev;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (devcache[i] && devcache[i]->device == device) {
|
if (devcache[i] && devcache[i]->device == device) {
|
||||||
|
@ -2419,20 +2422,20 @@ const char *jbd2_dev_to_name(dev_t device)
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
|
||||||
|
if (!new_dev)
|
||||||
|
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
|
||||||
spin_lock(&devname_cache_lock);
|
spin_lock(&devname_cache_lock);
|
||||||
if (devcache[i]) {
|
if (devcache[i]) {
|
||||||
if (devcache[i]->device == device) {
|
if (devcache[i]->device == device) {
|
||||||
|
kfree(new_dev);
|
||||||
ret = devcache[i]->devname;
|
ret = devcache[i]->devname;
|
||||||
spin_unlock(&devname_cache_lock);
|
spin_unlock(&devname_cache_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
call_rcu(&devcache[i]->rcu, free_devcache);
|
call_rcu(&devcache[i]->rcu, free_devcache);
|
||||||
}
|
}
|
||||||
devcache[i] = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
|
devcache[i] = new_dev;
|
||||||
if (!devcache[i]) {
|
|
||||||
spin_unlock(&devname_cache_lock);
|
|
||||||
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
|
|
||||||
}
|
|
||||||
devcache[i]->device = device;
|
devcache[i]->device = device;
|
||||||
bd = bdget(device);
|
bd = bdget(device);
|
||||||
if (bd) {
|
if (bd) {
|
||||||
|
|
|
@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal)
|
||||||
wake_up(&journal->j_wait_transaction_locked);
|
wake_up(&journal->j_wait_transaction_locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void warn_dirty_buffer(struct buffer_head *bh)
|
||||||
* Report any unexpected dirty buffers which turn up. Normally those
|
|
||||||
* indicate an error, but they can occur if the user is running (say)
|
|
||||||
* tune2fs to modify the live filesystem, so we need the option of
|
|
||||||
* continuing as gracefully as possible. #
|
|
||||||
*
|
|
||||||
* The caller should already hold the journal lock and
|
|
||||||
* j_list_lock spinlock: most callers will need those anyway
|
|
||||||
* in order to probe the buffer's journaling state safely.
|
|
||||||
*/
|
|
||||||
static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
|
|
||||||
{
|
{
|
||||||
int jlist;
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
/* If this buffer is one which might reasonably be dirty
|
printk(KERN_WARNING
|
||||||
* --- ie. data, or not part of this journal --- then
|
"JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
|
||||||
* we're OK to leave it alone, but otherwise we need to
|
"There's a risk of filesystem corruption in case of system "
|
||||||
* move the dirty bit to the journal's own internal
|
"crash.\n",
|
||||||
* JBDDirty bit. */
|
bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
|
||||||
jlist = jh->b_jlist;
|
|
||||||
|
|
||||||
if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
|
|
||||||
jlist == BJ_Shadow || jlist == BJ_Forget) {
|
|
||||||
struct buffer_head *bh = jh2bh(jh);
|
|
||||||
|
|
||||||
if (test_clear_buffer_dirty(bh))
|
|
||||||
set_buffer_jbddirty(bh);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -593,14 +574,16 @@ repeat:
|
||||||
if (jh->b_next_transaction)
|
if (jh->b_next_transaction)
|
||||||
J_ASSERT_JH(jh, jh->b_next_transaction ==
|
J_ASSERT_JH(jh, jh->b_next_transaction ==
|
||||||
transaction);
|
transaction);
|
||||||
|
warn_dirty_buffer(bh);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* In any case we need to clean the dirty flag and we must
|
* In any case we need to clean the dirty flag and we must
|
||||||
* do it under the buffer lock to be sure we don't race
|
* do it under the buffer lock to be sure we don't race
|
||||||
* with running write-out.
|
* with running write-out.
|
||||||
*/
|
*/
|
||||||
JBUFFER_TRACE(jh, "Unexpected dirty buffer");
|
JBUFFER_TRACE(jh, "Journalling dirty buffer");
|
||||||
jbd_unexpected_dirty_buffer(jh);
|
clear_buffer_dirty(bh);
|
||||||
|
set_buffer_jbddirty(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock_buffer(bh);
|
unlock_buffer(bh);
|
||||||
|
@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
|
||||||
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
|
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
|
||||||
|
|
||||||
if (jh->b_transaction == NULL) {
|
if (jh->b_transaction == NULL) {
|
||||||
|
/*
|
||||||
|
* Previous jbd2_journal_forget() could have left the buffer
|
||||||
|
* with jbddirty bit set because it was being committed. When
|
||||||
|
* the commit finished, we've filed the buffer for
|
||||||
|
* checkpointing and marked it dirty. Now we are reallocating
|
||||||
|
* the buffer so the transaction freeing it must have
|
||||||
|
* committed and so it's safe to clear the dirty bit.
|
||||||
|
*/
|
||||||
|
clear_buffer_dirty(jh2bh(jh));
|
||||||
jh->b_transaction = transaction;
|
jh->b_transaction = transaction;
|
||||||
|
|
||||||
/* first access by this transaction */
|
/* first access by this transaction */
|
||||||
|
@ -1644,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
|
||||||
|
|
||||||
if (jh->b_cp_transaction) {
|
if (jh->b_cp_transaction) {
|
||||||
JBUFFER_TRACE(jh, "on running+cp transaction");
|
JBUFFER_TRACE(jh, "on running+cp transaction");
|
||||||
|
/*
|
||||||
|
* We don't want to write the buffer anymore, clear the
|
||||||
|
* bit so that we don't confuse checks in
|
||||||
|
* __journal_file_buffer
|
||||||
|
*/
|
||||||
|
clear_buffer_dirty(bh);
|
||||||
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
|
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
|
||||||
clear_buffer_jbddirty(bh);
|
|
||||||
may_free = 0;
|
may_free = 0;
|
||||||
} else {
|
} else {
|
||||||
JBUFFER_TRACE(jh, "on running transaction");
|
JBUFFER_TRACE(jh, "on running transaction");
|
||||||
|
@ -1896,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
|
||||||
if (jh->b_transaction && jh->b_jlist == jlist)
|
if (jh->b_transaction && jh->b_jlist == jlist)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* The following list of buffer states needs to be consistent
|
|
||||||
* with __jbd_unexpected_dirty_buffer()'s handling of dirty
|
|
||||||
* state. */
|
|
||||||
|
|
||||||
if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
|
if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
|
||||||
jlist == BJ_Shadow || jlist == BJ_Forget) {
|
jlist == BJ_Shadow || jlist == BJ_Forget) {
|
||||||
|
/*
|
||||||
|
* For metadata buffers, we track dirty bit in buffer_jbddirty
|
||||||
|
* instead of buffer_dirty. We should not see a dirty bit set
|
||||||
|
* here because we clear it in do_get_write_access but e.g.
|
||||||
|
* tune2fs can modify the sb and set the dirty bit at any time
|
||||||
|
* so we try to gracefully handle that.
|
||||||
|
*/
|
||||||
|
if (buffer_dirty(bh))
|
||||||
|
warn_dirty_buffer(bh);
|
||||||
if (test_clear_buffer_dirty(bh) ||
|
if (test_clear_buffer_dirty(bh) ||
|
||||||
test_clear_buffer_jbddirty(bh))
|
test_clear_buffer_jbddirty(bh))
|
||||||
was_dirty = 1;
|
was_dirty = 1;
|
||||||
|
|
|
@ -34,7 +34,8 @@ TRACE_EVENT(ext4_free_inode,
|
||||||
|
|
||||||
TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
|
TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
|
||||||
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode,
|
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode,
|
||||||
__entry->uid, __entry->gid, __entry->blocks)
|
__entry->uid, __entry->gid,
|
||||||
|
(unsigned long long) __entry->blocks)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(ext4_request_inode,
|
TRACE_EVENT(ext4_request_inode,
|
||||||
|
@ -189,7 +190,7 @@ TRACE_EVENT(ext4_journalled_write_end,
|
||||||
__entry->copied)
|
__entry->copied)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(ext4_da_writepage,
|
TRACE_EVENT(ext4_writepage,
|
||||||
TP_PROTO(struct inode *inode, struct page *page),
|
TP_PROTO(struct inode *inode, struct page *page),
|
||||||
|
|
||||||
TP_ARGS(inode, page),
|
TP_ARGS(inode, page),
|
||||||
|
@ -341,49 +342,6 @@ TRACE_EVENT(ext4_da_write_end,
|
||||||
__entry->copied)
|
__entry->copied)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(ext4_normal_writepage,
|
|
||||||
TP_PROTO(struct inode *inode, struct page *page),
|
|
||||||
|
|
||||||
TP_ARGS(inode, page),
|
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
__field( dev_t, dev )
|
|
||||||
__field( ino_t, ino )
|
|
||||||
__field( pgoff_t, index )
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_fast_assign(
|
|
||||||
__entry->dev = inode->i_sb->s_dev;
|
|
||||||
__entry->ino = inode->i_ino;
|
|
||||||
__entry->index = page->index;
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_printk("dev %s ino %lu page_index %lu",
|
|
||||||
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(ext4_journalled_writepage,
|
|
||||||
TP_PROTO(struct inode *inode, struct page *page),
|
|
||||||
|
|
||||||
TP_ARGS(inode, page),
|
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
__field( dev_t, dev )
|
|
||||||
__field( ino_t, ino )
|
|
||||||
__field( pgoff_t, index )
|
|
||||||
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_fast_assign(
|
|
||||||
__entry->dev = inode->i_sb->s_dev;
|
|
||||||
__entry->ino = inode->i_ino;
|
|
||||||
__entry->index = page->index;
|
|
||||||
),
|
|
||||||
|
|
||||||
TP_printk("dev %s ino %lu page_index %lu",
|
|
||||||
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(ext4_discard_blocks,
|
TRACE_EVENT(ext4_discard_blocks,
|
||||||
TP_PROTO(struct super_block *sb, unsigned long long blk,
|
TP_PROTO(struct super_block *sb, unsigned long long blk,
|
||||||
unsigned long long count),
|
unsigned long long count),
|
||||||
|
|
Loading…
Reference in a new issue