mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
UBIFS: fix commentaries
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
This commit is contained in:
parent
fb1cd01a33
commit
7d4e9ccb43
8 changed files with 9 additions and 9 deletions
|
@ -322,8 +322,8 @@ static int can_use_rp(struct ubifs_info *c)
|
|||
* be large, because UBIFS does not do any index consolidation as long as
|
||||
* there is free space. IOW, the index may take a lot of LEBs, but the LEBs
|
||||
* will contain a lot of dirt.
|
||||
* o @c->min_idx_lebs is the the index presumably takes. IOW, the index may be
|
||||
* consolidated to take up to @c->min_idx_lebs LEBs.
|
||||
* o @c->min_idx_lebs is the number of LEBS the index presumably takes. IOW,
|
||||
* the index may be consolidated to take up to @c->min_idx_lebs LEBs.
|
||||
*
|
||||
* This function returns zero in case of success, and %-ENOSPC in case of
|
||||
* failure.
|
||||
|
|
|
@ -1214,7 +1214,7 @@ static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
|
|||
|
||||
/*
|
||||
* Make sure the last key in our znode is less or
|
||||
* equivalent than the the key in zbranch which goes
|
||||
* equivalent than the key in the zbranch which goes
|
||||
* after our pointing zbranch.
|
||||
*/
|
||||
cmp = keys_cmp(c, max,
|
||||
|
|
|
@ -959,7 +959,7 @@ static int do_writepage(struct page *page, int len)
|
|||
* whole index and correct all inode sizes, which is long an unacceptable.
|
||||
*
|
||||
* To prevent situations like this, UBIFS writes pages back only if they are
|
||||
* within last synchronized inode size, i.e. the the size which has been
|
||||
* within the last synchronized inode size, i.e. the size which has been
|
||||
* written to the flash media last time. Otherwise, UBIFS forces inode
|
||||
* write-back, thus making sure the on-flash inode contains current inode size,
|
||||
* and then keeps writing pages back.
|
||||
|
|
|
@ -1365,7 +1365,7 @@ out_ro:
|
|||
* @host: host inode
|
||||
*
|
||||
* This function writes the updated version of an extended attribute inode and
|
||||
* the host inode tho the journal (to the base head). The host inode is written
|
||||
* the host inode to the journal (to the base head). The host inode is written
|
||||
* after the extended attribute inode in order to guarantee that the extended
|
||||
* attribute will be flushed when the inode is synchronized by 'fsync()' and
|
||||
* consequently, the write-buffer is synchronized. This function returns zero
|
||||
|
|
|
@ -239,7 +239,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
|
|||
}
|
||||
|
||||
/*
|
||||
* Make sure the the amount of space in buds will not exceed
|
||||
* Make sure the amount of space in buds will not exceed the
|
||||
* 'c->max_bud_bytes' limit, because we want to guarantee mount time
|
||||
* limits.
|
||||
*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* This file is part of UBIFS.
|
||||
*
|
||||
* Copyright (C) 2006-2008 Nokia Corporation.
|
||||
|
|
|
@ -143,7 +143,7 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r)
|
|||
dirty -= c->leb_size - lp->free;
|
||||
/*
|
||||
* If the replay order was perfect the dirty space would now be
|
||||
* zero. The order is not perfect because the the journal heads
|
||||
* zero. The order is not perfect because the journal heads
|
||||
* race with each other. This is not a problem but is does mean
|
||||
* that the dirty space may temporarily exceed c->leb_size
|
||||
* during the replay.
|
||||
|
|
|
@ -1252,7 +1252,7 @@ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
|
|||
* splitting in the middle of the colliding sequence. Also, when
|
||||
* removing the leftmost key, we would have to correct the key of the
|
||||
* parent node, which would introduce additional complications. Namely,
|
||||
* if we changed the the leftmost key of the parent znode, the garbage
|
||||
* if we changed the leftmost key of the parent znode, the garbage
|
||||
* collector would be unable to find it (GC is doing this when GC'ing
|
||||
* indexing LEBs). Although we already have an additional RB-tree where
|
||||
* we save such changed znodes (see 'ins_clr_old_idx_znode()') until
|
||||
|
|
Loading…
Reference in a new issue