mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6
* 'linux-next' of git://git.infradead.org/ubifs-2.6: UBIFS: pre-allocate bulk-read buffer UBIFS: do not allocate too much UBIFS: do not print scary memory allocation warnings UBIFS: allow for gaps when dirtying the LPT UBIFS: fix compilation warnings MAINTAINERS: change UBI/UBIFS git tree URLs UBIFS: endian handling fixes and annotations UBIFS: remove printk
This commit is contained in:
commit
51eaaa6776
15 changed files with 223 additions and 111 deletions
|
@ -4236,7 +4236,7 @@ M: dedekind@infradead.org
|
|||
P: Adrian Hunter
|
||||
M: ext-adrian.hunter@nokia.com
|
||||
L: linux-mtd@lists.infradead.org
|
||||
T: git git://git.infradead.org/~dedekind/ubifs-2.6.git
|
||||
T: git git://git.infradead.org/ubifs-2.6.git
|
||||
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
|
||||
S: Maintained
|
||||
|
||||
|
@ -4290,7 +4290,7 @@ P: Artem Bityutskiy
|
|||
M: dedekind@infradead.org
|
||||
W: http://www.linux-mtd.infradead.org/
|
||||
L: linux-mtd@lists.infradead.org
|
||||
T: git git://git.infradead.org/~dedekind/ubi-2.6.git
|
||||
T: git git://git.infradead.org/ubi-2.6.git
|
||||
S: Maintained
|
||||
|
||||
USB ACM DRIVER
|
||||
|
|
|
@ -234,8 +234,8 @@ int ubifs_bg_thread(void *info)
|
|||
int err;
|
||||
struct ubifs_info *c = info;
|
||||
|
||||
ubifs_msg("background thread \"%s\" started, PID %d",
|
||||
c->bgt_name, current->pid);
|
||||
dbg_msg("background thread \"%s\" started, PID %d",
|
||||
c->bgt_name, current->pid);
|
||||
set_freezable();
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -101,21 +101,24 @@ static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
|
|||
if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
|
||||
switch (type) {
|
||||
case UBIFS_INO_KEY:
|
||||
sprintf(p, "(%lu, %s)", key_inum(c, key),
|
||||
sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
|
||||
get_key_type(type));
|
||||
break;
|
||||
case UBIFS_DENT_KEY:
|
||||
case UBIFS_XENT_KEY:
|
||||
sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key),
|
||||
sprintf(p, "(%lu, %s, %#08x)",
|
||||
(unsigned long)key_inum(c, key),
|
||||
get_key_type(type), key_hash(c, key));
|
||||
break;
|
||||
case UBIFS_DATA_KEY:
|
||||
sprintf(p, "(%lu, %s, %u)", key_inum(c, key),
|
||||
sprintf(p, "(%lu, %s, %u)",
|
||||
(unsigned long)key_inum(c, key),
|
||||
get_key_type(type), key_block(c, key));
|
||||
break;
|
||||
case UBIFS_TRUN_KEY:
|
||||
sprintf(p, "(%lu, %s)",
|
||||
key_inum(c, key), get_key_type(type));
|
||||
(unsigned long)key_inum(c, key),
|
||||
get_key_type(type));
|
||||
break;
|
||||
default:
|
||||
sprintf(p, "(bad key type: %#08x, %#08x)",
|
||||
|
@ -364,8 +367,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
|
|||
le32_to_cpu(mst->ihead_lnum));
|
||||
printk(KERN_DEBUG "\tihead_offs %u\n",
|
||||
le32_to_cpu(mst->ihead_offs));
|
||||
printk(KERN_DEBUG "\tindex_size %u\n",
|
||||
le32_to_cpu(mst->index_size));
|
||||
printk(KERN_DEBUG "\tindex_size %llu\n",
|
||||
(unsigned long long)le64_to_cpu(mst->index_size));
|
||||
printk(KERN_DEBUG "\tlpt_lnum %u\n",
|
||||
le32_to_cpu(mst->lpt_lnum));
|
||||
printk(KERN_DEBUG "\tlpt_offs %u\n",
|
||||
|
@ -1589,7 +1592,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c,
|
|||
|
||||
if (inum > c->highest_inum) {
|
||||
ubifs_err("too high inode number, max. is %lu",
|
||||
c->highest_inum);
|
||||
(unsigned long)c->highest_inum);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -1668,16 +1671,18 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
|
|||
ino_key_init(c, &key, inum);
|
||||
err = ubifs_lookup_level0(c, &key, &znode, &n);
|
||||
if (!err) {
|
||||
ubifs_err("inode %lu not found in index", inum);
|
||||
ubifs_err("inode %lu not found in index", (unsigned long)inum);
|
||||
return ERR_PTR(-ENOENT);
|
||||
} else if (err < 0) {
|
||||
ubifs_err("error %d while looking up inode %lu", err, inum);
|
||||
ubifs_err("error %d while looking up inode %lu",
|
||||
err, (unsigned long)inum);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
zbr = &znode->zbranch[n];
|
||||
if (zbr->len < UBIFS_INO_NODE_SZ) {
|
||||
ubifs_err("bad node %lu node length %d", inum, zbr->len);
|
||||
ubifs_err("bad node %lu node length %d",
|
||||
(unsigned long)inum, zbr->len);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -1697,7 +1702,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
|
|||
kfree(ino);
|
||||
if (IS_ERR(fscki)) {
|
||||
ubifs_err("error %ld while adding inode %lu node",
|
||||
PTR_ERR(fscki), inum);
|
||||
PTR_ERR(fscki), (unsigned long)inum);
|
||||
return fscki;
|
||||
}
|
||||
|
||||
|
@ -1786,7 +1791,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
|||
if (IS_ERR(fscki)) {
|
||||
err = PTR_ERR(fscki);
|
||||
ubifs_err("error %d while processing data node and "
|
||||
"trying to find inode node %lu", err, inum);
|
||||
"trying to find inode node %lu",
|
||||
err, (unsigned long)inum);
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
|
@ -1819,7 +1825,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
|||
if (IS_ERR(fscki)) {
|
||||
err = PTR_ERR(fscki);
|
||||
ubifs_err("error %d while processing entry node and "
|
||||
"trying to find inode node %lu", err, inum);
|
||||
"trying to find inode node %lu",
|
||||
err, (unsigned long)inum);
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
|
@ -1832,7 +1839,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
|||
err = PTR_ERR(fscki);
|
||||
ubifs_err("error %d while processing entry node and "
|
||||
"trying to find parent inode node %lu",
|
||||
err, inum);
|
||||
err, (unsigned long)inum);
|
||||
goto out_dump;
|
||||
}
|
||||
|
||||
|
@ -1923,7 +1930,8 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
|
|||
fscki->references != 1) {
|
||||
ubifs_err("directory inode %lu has %d "
|
||||
"direntries which refer it, but "
|
||||
"should be 1", fscki->inum,
|
||||
"should be 1",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->references);
|
||||
goto out_dump;
|
||||
}
|
||||
|
@ -1931,27 +1939,29 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
|
|||
fscki->references != 0) {
|
||||
ubifs_err("root inode %lu has non-zero (%d) "
|
||||
"direntries which refer it",
|
||||
fscki->inum, fscki->references);
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->references);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->calc_sz != fscki->size) {
|
||||
ubifs_err("directory inode %lu size is %lld, "
|
||||
"but calculated size is %lld",
|
||||
fscki->inum, fscki->size,
|
||||
fscki->calc_sz);
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->size, fscki->calc_sz);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->calc_cnt != fscki->nlink) {
|
||||
ubifs_err("directory inode %lu nlink is %d, "
|
||||
"but calculated nlink is %d",
|
||||
fscki->inum, fscki->nlink,
|
||||
fscki->calc_cnt);
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->nlink, fscki->calc_cnt);
|
||||
goto out_dump;
|
||||
}
|
||||
} else {
|
||||
if (fscki->references != fscki->nlink) {
|
||||
ubifs_err("inode %lu nlink is %d, but "
|
||||
"calculated nlink is %d", fscki->inum,
|
||||
"calculated nlink is %d",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->nlink, fscki->references);
|
||||
goto out_dump;
|
||||
}
|
||||
|
@ -1959,20 +1969,21 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
|
|||
if (fscki->xattr_sz != fscki->calc_xsz) {
|
||||
ubifs_err("inode %lu has xattr size %u, but "
|
||||
"calculated size is %lld",
|
||||
fscki->inum, fscki->xattr_sz,
|
||||
(unsigned long)fscki->inum, fscki->xattr_sz,
|
||||
fscki->calc_xsz);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->xattr_cnt != fscki->calc_xcnt) {
|
||||
ubifs_err("inode %lu has %u xattrs, but "
|
||||
"calculated count is %lld", fscki->inum,
|
||||
"calculated count is %lld",
|
||||
(unsigned long)fscki->inum,
|
||||
fscki->xattr_cnt, fscki->calc_xcnt);
|
||||
goto out_dump;
|
||||
}
|
||||
if (fscki->xattr_nms != fscki->calc_xnms) {
|
||||
ubifs_err("inode %lu has xattr names' size %u, but "
|
||||
"calculated names' size is %lld",
|
||||
fscki->inum, fscki->xattr_nms,
|
||||
(unsigned long)fscki->inum, fscki->xattr_nms,
|
||||
fscki->calc_xnms);
|
||||
goto out_dump;
|
||||
}
|
||||
|
@ -1985,11 +1996,12 @@ out_dump:
|
|||
ino_key_init(c, &key, fscki->inum);
|
||||
err = ubifs_lookup_level0(c, &key, &znode, &n);
|
||||
if (!err) {
|
||||
ubifs_err("inode %lu not found in index", fscki->inum);
|
||||
ubifs_err("inode %lu not found in index",
|
||||
(unsigned long)fscki->inum);
|
||||
return -ENOENT;
|
||||
} else if (err < 0) {
|
||||
ubifs_err("error %d while looking up inode %lu",
|
||||
err, fscki->inum);
|
||||
err, (unsigned long)fscki->inum);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2007,7 +2019,7 @@ out_dump:
|
|||
}
|
||||
|
||||
ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
|
||||
fscki->inum, zbr->lnum, zbr->offs);
|
||||
(unsigned long)fscki->inum, zbr->lnum, zbr->offs);
|
||||
dbg_dump_node(c, ino);
|
||||
kfree(ino);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -161,7 +161,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
ubifs_warn("running out of inode numbers (current %lu, max %d)",
|
||||
c->highest_inum, INUM_WATERMARK);
|
||||
(unsigned long)c->highest_inum, INUM_WATERMARK);
|
||||
}
|
||||
|
||||
inode->i_ino = ++c->highest_inum;
|
||||
|
@ -428,7 +428,8 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
|
|||
dbg_gen("feed '%s', ino %llu, new f_pos %#x",
|
||||
dent->name, (unsigned long long)le64_to_cpu(dent->inum),
|
||||
key_hash_flash(c, &dent->key));
|
||||
ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum);
|
||||
ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
|
||||
ubifs_inode(dir)->creat_sqnum);
|
||||
|
||||
nm.len = le16_to_cpu(dent->nlen);
|
||||
over = filldir(dirent, dent->name, nm.len, file->f_pos,
|
||||
|
|
|
@ -72,7 +72,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
|
|||
return err;
|
||||
}
|
||||
|
||||
ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum);
|
||||
ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
|
||||
|
||||
len = le32_to_cpu(dn->size);
|
||||
if (len <= 0 || len > UBIFS_BLOCK_SIZE)
|
||||
|
@ -626,7 +626,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
|
|||
|
||||
dn = bu->buf + (bu->zbranch[nn].offs - offs);
|
||||
|
||||
ubifs_assert(dn->ch.sqnum >
|
||||
ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
|
||||
ubifs_inode(inode)->creat_sqnum);
|
||||
|
||||
len = le32_to_cpu(dn->size);
|
||||
|
@ -691,32 +691,22 @@ out_err:
|
|||
/**
|
||||
* ubifs_do_bulk_read - do bulk-read.
|
||||
* @c: UBIFS file-system description object
|
||||
* @page1: first page
|
||||
* @bu: bulk-read information
|
||||
* @page1: first page to read
|
||||
*
|
||||
* This function returns %1 if the bulk-read is done, otherwise %0 is returned.
|
||||
*/
|
||||
static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
|
||||
static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
|
||||
struct page *page1)
|
||||
{
|
||||
pgoff_t offset = page1->index, end_index;
|
||||
struct address_space *mapping = page1->mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
struct bu_info *bu;
|
||||
int err, page_idx, page_cnt, ret = 0, n = 0;
|
||||
int allocate = bu->buf ? 0 : 1;
|
||||
loff_t isize;
|
||||
|
||||
bu = kmalloc(sizeof(struct bu_info), GFP_NOFS);
|
||||
if (!bu)
|
||||
return 0;
|
||||
|
||||
bu->buf_len = c->bulk_read_buf_size;
|
||||
bu->buf = kmalloc(bu->buf_len, GFP_NOFS);
|
||||
if (!bu->buf)
|
||||
goto out_free;
|
||||
|
||||
data_key_init(c, &bu->key, inode->i_ino,
|
||||
offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
|
||||
|
||||
err = ubifs_tnc_get_bu_keys(c, bu);
|
||||
if (err)
|
||||
goto out_warn;
|
||||
|
@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
|
|||
* together. If all the pages were like this, bulk-read would
|
||||
* reduce performance, so we turn it off for a while.
|
||||
*/
|
||||
ui->read_in_a_row = 0;
|
||||
ui->bulk_read = 0;
|
||||
goto out_free;
|
||||
goto out_bu_off;
|
||||
}
|
||||
|
||||
if (bu->cnt) {
|
||||
if (allocate) {
|
||||
/*
|
||||
* Allocate bulk-read buffer depending on how many data
|
||||
* nodes we are going to read.
|
||||
*/
|
||||
bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
|
||||
bu->zbranch[bu->cnt - 1].len -
|
||||
bu->zbranch[0].offs;
|
||||
ubifs_assert(bu->buf_len > 0);
|
||||
ubifs_assert(bu->buf_len <= c->leb_size);
|
||||
bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
|
||||
if (!bu->buf)
|
||||
goto out_bu_off;
|
||||
}
|
||||
|
||||
err = ubifs_tnc_bulk_read(c, bu);
|
||||
if (err)
|
||||
goto out_warn;
|
||||
|
@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
|
|||
ui->last_page_read = offset + page_idx - 1;
|
||||
|
||||
out_free:
|
||||
kfree(bu->buf);
|
||||
kfree(bu);
|
||||
if (allocate)
|
||||
kfree(bu->buf);
|
||||
return ret;
|
||||
|
||||
out_warn:
|
||||
ubifs_warn("ignoring error %d and skipping bulk-read", err);
|
||||
goto out_free;
|
||||
|
||||
out_bu_off:
|
||||
ui->read_in_a_row = ui->bulk_read = 0;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page)
|
|||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||
struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
pgoff_t index = page->index, last_page_read = ui->last_page_read;
|
||||
int ret = 0;
|
||||
struct bu_info *bu;
|
||||
int err = 0, allocated = 0;
|
||||
|
||||
ui->last_page_read = index;
|
||||
|
||||
if (!c->bulk_read)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Bulk-read is protected by ui_mutex, but it is an optimization, so
|
||||
* don't bother if we cannot lock the mutex.
|
||||
* Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
|
||||
* so don't bother if we cannot lock the mutex.
|
||||
*/
|
||||
if (!mutex_trylock(&ui->ui_mutex))
|
||||
return 0;
|
||||
|
||||
if (index != last_page_read + 1) {
|
||||
/* Turn off bulk-read if we stop reading sequentially */
|
||||
ui->read_in_a_row = 1;
|
||||
|
@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page)
|
|||
ui->bulk_read = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!ui->bulk_read) {
|
||||
ui->read_in_a_row += 1;
|
||||
if (ui->read_in_a_row < 3)
|
||||
|
@ -829,10 +839,35 @@ static int ubifs_bulk_read(struct page *page)
|
|||
/* Three reads in a row, so switch on bulk-read */
|
||||
ui->bulk_read = 1;
|
||||
}
|
||||
ret = ubifs_do_bulk_read(c, page);
|
||||
|
||||
/*
|
||||
* If possible, try to use pre-allocated bulk-read information, which
|
||||
* is protected by @c->bu_mutex.
|
||||
*/
|
||||
if (mutex_trylock(&c->bu_mutex))
|
||||
bu = &c->bu;
|
||||
else {
|
||||
bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
|
||||
if (!bu)
|
||||
goto out_unlock;
|
||||
|
||||
bu->buf = NULL;
|
||||
allocated = 1;
|
||||
}
|
||||
|
||||
bu->buf_len = c->max_bu_buf_len;
|
||||
data_key_init(c, &bu->key, inode->i_ino,
|
||||
page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
|
||||
err = ubifs_do_bulk_read(c, bu, page);
|
||||
|
||||
if (!allocated)
|
||||
mutex_unlock(&c->bu_mutex);
|
||||
else
|
||||
kfree(bu);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ui->ui_mutex);
|
||||
return ret;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ubifs_readpage(struct file *file, struct page *page)
|
||||
|
|
|
@ -690,8 +690,9 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
|
|||
int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR;
|
||||
struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
|
||||
dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key),
|
||||
key_block(c, key), len, DBGKEY(key));
|
||||
dbg_jnl("ino %lu, blk %u, len %d, key %s",
|
||||
(unsigned long)key_inum(c, key), key_block(c, key), len,
|
||||
DBGKEY(key));
|
||||
ubifs_assert(len <= UBIFS_BLOCK_SIZE);
|
||||
|
||||
data = kmalloc(dlen, GFP_NOFS);
|
||||
|
@ -1128,7 +1129,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
|
|||
ino_t inum = inode->i_ino;
|
||||
unsigned int blk;
|
||||
|
||||
dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size);
|
||||
dbg_jnl("ino %lu, size %lld -> %lld",
|
||||
(unsigned long)inum, old_size, new_size);
|
||||
ubifs_assert(!ui->data_len);
|
||||
ubifs_assert(S_ISREG(inode->i_mode));
|
||||
ubifs_assert(mutex_is_locked(&ui->ui_mutex));
|
||||
|
|
|
@ -345,7 +345,7 @@ static inline int key_type_flash(const struct ubifs_info *c, const void *k)
|
|||
{
|
||||
const union ubifs_key *key = k;
|
||||
|
||||
return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS;
|
||||
return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -416,7 +416,7 @@ static inline unsigned int key_block_flash(const struct ubifs_info *c,
|
|||
{
|
||||
const union ubifs_key *key = k;
|
||||
|
||||
return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK;
|
||||
return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -571,8 +571,6 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
|
|||
/* We assume here that LEB zero is never an LPT LEB */
|
||||
if (nnode->nbranch[iip].lnum)
|
||||
return ubifs_get_pnode(c, nnode, iip);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Go up while can't go right */
|
||||
|
|
|
@ -105,7 +105,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
|
|||
list_add_tail(&orphan->list, &c->orph_list);
|
||||
list_add_tail(&orphan->new_list, &c->orph_new);
|
||||
spin_unlock(&c->orphan_lock);
|
||||
dbg_gen("ino %lu", inum);
|
||||
dbg_gen("ino %lu", (unsigned long)inum);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -132,14 +132,16 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
|
|||
else {
|
||||
if (o->dnext) {
|
||||
spin_unlock(&c->orphan_lock);
|
||||
dbg_gen("deleted twice ino %lu", inum);
|
||||
dbg_gen("deleted twice ino %lu",
|
||||
(unsigned long)inum);
|
||||
return;
|
||||
}
|
||||
if (o->cnext) {
|
||||
o->dnext = c->orph_dnext;
|
||||
c->orph_dnext = o;
|
||||
spin_unlock(&c->orphan_lock);
|
||||
dbg_gen("delete later ino %lu", inum);
|
||||
dbg_gen("delete later ino %lu",
|
||||
(unsigned long)inum);
|
||||
return;
|
||||
}
|
||||
rb_erase(p, &c->orph_tree);
|
||||
|
@ -151,12 +153,12 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
|
|||
}
|
||||
spin_unlock(&c->orphan_lock);
|
||||
kfree(o);
|
||||
dbg_gen("inum %lu", inum);
|
||||
dbg_gen("inum %lu", (unsigned long)inum);
|
||||
return;
|
||||
}
|
||||
}
|
||||
spin_unlock(&c->orphan_lock);
|
||||
dbg_err("missing orphan ino %lu", inum);
|
||||
dbg_err("missing orphan ino %lu", (unsigned long)inum);
|
||||
dbg_dump_stack();
|
||||
}
|
||||
|
||||
|
@ -448,7 +450,7 @@ static void erase_deleted(struct ubifs_info *c)
|
|||
rb_erase(&orphan->rb, &c->orph_tree);
|
||||
list_del(&orphan->list);
|
||||
c->tot_orphans -= 1;
|
||||
dbg_gen("deleting orphan ino %lu", orphan->inum);
|
||||
dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum);
|
||||
kfree(orphan);
|
||||
}
|
||||
c->orph_dnext = NULL;
|
||||
|
@ -536,8 +538,8 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
|
|||
list_add_tail(&orphan->list, &c->orph_list);
|
||||
orphan->dnext = c->orph_dnext;
|
||||
c->orph_dnext = orphan;
|
||||
dbg_mnt("ino %lu, new %d, tot %d",
|
||||
inum, c->new_orphans, c->tot_orphans);
|
||||
dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
|
||||
c->new_orphans, c->tot_orphans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -609,7 +611,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
|||
n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
|
||||
for (i = 0; i < n; i++) {
|
||||
inum = le64_to_cpu(orph->inos[i]);
|
||||
dbg_rcvry("deleting orphaned inode %lu", inum);
|
||||
dbg_rcvry("deleting orphaned inode %lu",
|
||||
(unsigned long)inum);
|
||||
err = ubifs_tnc_remove_ino(c, inum);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -840,8 +843,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
|||
if (inum != ci->last_ino) {
|
||||
/* Lowest node type is the inode node, so it comes first */
|
||||
if (key_type(c, &zbr->key) != UBIFS_INO_KEY)
|
||||
ubifs_err("found orphan node ino %lu, type %d", inum,
|
||||
key_type(c, &zbr->key));
|
||||
ubifs_err("found orphan node ino %lu, type %d",
|
||||
(unsigned long)inum, key_type(c, &zbr->key));
|
||||
ci->last_ino = inum;
|
||||
ci->tot_inos += 1;
|
||||
err = ubifs_tnc_read_node(c, zbr, ci->node);
|
||||
|
@ -853,7 +856,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr,
|
|||
/* Must be recorded as an orphan */
|
||||
if (!dbg_find_check_orphan(&ci->root, inum) &&
|
||||
!dbg_find_orphan(c, inum)) {
|
||||
ubifs_err("missing orphan, ino %lu", inum);
|
||||
ubifs_err("missing orphan, ino %lu",
|
||||
(unsigned long)inum);
|
||||
ci->missing += 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -168,12 +168,12 @@ static int write_rcvrd_mst_node(struct ubifs_info *c,
|
|||
struct ubifs_mst_node *mst)
|
||||
{
|
||||
int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
|
||||
uint32_t save_flags;
|
||||
__le32 save_flags;
|
||||
|
||||
dbg_rcvry("recovery");
|
||||
|
||||
save_flags = mst->flags;
|
||||
mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY);
|
||||
mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
|
||||
|
||||
ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
|
||||
err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM);
|
||||
|
@ -1435,13 +1435,13 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
|
|||
err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
|
||||
if (err)
|
||||
goto out;
|
||||
dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs,
|
||||
i_size, e->d_size);
|
||||
dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ",
|
||||
(unsigned long)e->inum, lnum, offs, i_size, e->d_size);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
|
||||
e->inum, e->i_size, e->d_size, err);
|
||||
(unsigned long)e->inum, e->i_size, e->d_size, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1472,7 +1472,8 @@ int ubifs_recover_size(struct ubifs_info *c)
|
|||
return err;
|
||||
if (err == -ENOENT) {
|
||||
/* Remove data nodes that have no inode */
|
||||
dbg_rcvry("removing ino %lu", e->inum);
|
||||
dbg_rcvry("removing ino %lu",
|
||||
(unsigned long)e->inum);
|
||||
err = ubifs_tnc_remove_ino(c, e->inum);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1493,8 +1494,8 @@ int ubifs_recover_size(struct ubifs_info *c)
|
|||
return PTR_ERR(inode);
|
||||
if (inode->i_size < e->d_size) {
|
||||
dbg_rcvry("ino %lu size %lld -> %lld",
|
||||
e->inum, e->d_size,
|
||||
inode->i_size);
|
||||
(unsigned long)e->inum,
|
||||
e->d_size, inode->i_size);
|
||||
inode->i_size = e->d_size;
|
||||
ubifs_inode(inode)->ui_size = e->d_size;
|
||||
e->inode = inode;
|
||||
|
|
|
@ -1065,7 +1065,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
|
|||
ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
|
||||
dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
|
||||
"highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
|
||||
c->highest_inum);
|
||||
(unsigned long)c->highest_inum);
|
||||
out:
|
||||
destroy_replay_tree(c);
|
||||
destroy_bud_list(c);
|
||||
|
|
|
@ -81,6 +81,7 @@ static int create_default_filesystem(struct ubifs_info *c)
|
|||
int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0;
|
||||
int min_leb_cnt = UBIFS_MIN_LEB_CNT;
|
||||
uint64_t tmp64, main_bytes;
|
||||
__le64 tmp_le64;
|
||||
|
||||
/* Some functions called from here depend on the @c->key_len filed */
|
||||
c->key_len = UBIFS_SK_LEN;
|
||||
|
@ -295,10 +296,10 @@ static int create_default_filesystem(struct ubifs_info *c)
|
|||
ino->ch.node_type = UBIFS_INO_NODE;
|
||||
ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
|
||||
ino->nlink = cpu_to_le32(2);
|
||||
tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
|
||||
ino->atime_sec = tmp;
|
||||
ino->ctime_sec = tmp;
|
||||
ino->mtime_sec = tmp;
|
||||
tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
|
||||
ino->atime_sec = tmp_le64;
|
||||
ino->ctime_sec = tmp_le64;
|
||||
ino->mtime_sec = tmp_le64;
|
||||
ino->atime_nsec = 0;
|
||||
ino->ctime_nsec = 0;
|
||||
ino->mtime_nsec = 0;
|
||||
|
|
|
@ -36,6 +36,12 @@
|
|||
#include <linux/mount.h>
|
||||
#include "ubifs.h"
|
||||
|
||||
/*
|
||||
* Maximum amount of memory we may 'kmalloc()' without worrying that we are
|
||||
* allocating too much.
|
||||
*/
|
||||
#define UBIFS_KMALLOC_OK (128*1024)
|
||||
|
||||
/* Slab cache for UBIFS inodes */
|
||||
struct kmem_cache *ubifs_inode_slab;
|
||||
|
||||
|
@ -561,18 +567,11 @@ static int init_constants_early(struct ubifs_info *c)
|
|||
* calculations when reporting free space.
|
||||
*/
|
||||
c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
|
||||
/* Buffer size for bulk-reads */
|
||||
c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
|
||||
if (c->bulk_read_buf_size > c->leb_size)
|
||||
c->bulk_read_buf_size = c->leb_size;
|
||||
if (c->bulk_read_buf_size > 128 * 1024) {
|
||||
/* Check if we can kmalloc more than 128KiB */
|
||||
void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL);
|
||||
|
||||
kfree(try);
|
||||
if (!try)
|
||||
c->bulk_read_buf_size = 128 * 1024;
|
||||
}
|
||||
/* Buffer size for bulk-reads */
|
||||
c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
|
||||
if (c->max_bu_buf_len > c->leb_size)
|
||||
c->max_bu_buf_len = c->leb_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -991,6 +990,34 @@ static void destroy_journal(struct ubifs_info *c)
|
|||
free_buds(c);
|
||||
}
|
||||
|
||||
/**
|
||||
* bu_init - initialize bulk-read information.
|
||||
* @c: UBIFS file-system description object
|
||||
*/
|
||||
static void bu_init(struct ubifs_info *c)
|
||||
{
|
||||
ubifs_assert(c->bulk_read == 1);
|
||||
|
||||
if (c->bu.buf)
|
||||
return; /* Already initialized */
|
||||
|
||||
again:
|
||||
c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!c->bu.buf) {
|
||||
if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
|
||||
c->max_bu_buf_len = UBIFS_KMALLOC_OK;
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* Just disable bulk-read */
|
||||
ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, "
|
||||
"disabling it", c->max_bu_buf_len);
|
||||
c->mount_opts.bulk_read = 1;
|
||||
c->bulk_read = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mount_ubifs - mount UBIFS file-system.
|
||||
* @c: UBIFS file-system description object
|
||||
|
@ -1059,6 +1086,13 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
if (c->bulk_read == 1)
|
||||
bu_init(c);
|
||||
|
||||
/*
|
||||
* We have to check all CRCs, even for data nodes, when we mount the FS
|
||||
* (specifically, when we are replaying).
|
||||
*/
|
||||
c->always_chk_crc = 1;
|
||||
|
||||
err = ubifs_read_superblock(c);
|
||||
|
@ -1289,6 +1323,7 @@ out_cbuf:
|
|||
out_dereg:
|
||||
dbg_failure_mode_deregistration(c);
|
||||
out_free:
|
||||
kfree(c->bu.buf);
|
||||
vfree(c->ileb_buf);
|
||||
vfree(c->sbuf);
|
||||
kfree(c->bottom_up_buf);
|
||||
|
@ -1325,10 +1360,11 @@ static void ubifs_umount(struct ubifs_info *c)
|
|||
kfree(c->cbuf);
|
||||
kfree(c->rcvrd_mst_node);
|
||||
kfree(c->mst_node);
|
||||
kfree(c->bu.buf);
|
||||
vfree(c->ileb_buf);
|
||||
vfree(c->sbuf);
|
||||
kfree(c->bottom_up_buf);
|
||||
UBIFS_DBG(vfree(c->dbg_buf));
|
||||
vfree(c->ileb_buf);
|
||||
dbg_failure_mode_deregistration(c);
|
||||
}
|
||||
|
||||
|
@ -1626,6 +1662,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
|
|||
ubifs_err("invalid or unknown remount parameter");
|
||||
return err;
|
||||
}
|
||||
|
||||
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
|
||||
err = ubifs_remount_rw(c);
|
||||
if (err)
|
||||
|
@ -1633,6 +1670,14 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
|
|||
} else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
|
||||
ubifs_remount_ro(c);
|
||||
|
||||
if (c->bulk_read == 1)
|
||||
bu_init(c);
|
||||
else {
|
||||
dbg_gen("disable bulk-read");
|
||||
kfree(c->bu.buf);
|
||||
c->bu.buf = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1723,6 +1768,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
mutex_init(&c->log_mutex);
|
||||
mutex_init(&c->mst_mutex);
|
||||
mutex_init(&c->umount_mutex);
|
||||
mutex_init(&c->bu_mutex);
|
||||
init_waitqueue_head(&c->cmt_wq);
|
||||
c->buds = RB_ROOT;
|
||||
c->old_idx = RB_ROOT;
|
||||
|
|
|
@ -1501,7 +1501,12 @@ out:
|
|||
* @bu: bulk-read parameters and results
|
||||
*
|
||||
* Lookup consecutive data node keys for the same inode that reside
|
||||
* consecutively in the same LEB.
|
||||
* consecutively in the same LEB. This function returns zero in case of success
|
||||
* and a negative error code in case of failure.
|
||||
*
|
||||
* Note, if the bulk-read buffer length (@bu->buf_len) is known, this function
|
||||
* makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares
|
||||
* maxumum possible amount of nodes for bulk-read.
|
||||
*/
|
||||
int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
|
||||
{
|
||||
|
@ -2677,7 +2682,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
|
|||
struct ubifs_dent_node *xent, *pxent = NULL;
|
||||
struct qstr nm = { .name = NULL };
|
||||
|
||||
dbg_tnc("ino %lu", inum);
|
||||
dbg_tnc("ino %lu", (unsigned long)inum);
|
||||
|
||||
/*
|
||||
* Walk all extended attribute entries and remove them together with
|
||||
|
@ -2697,7 +2702,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
|
|||
}
|
||||
|
||||
xattr_inum = le64_to_cpu(xent->inum);
|
||||
dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum);
|
||||
dbg_tnc("xent '%s', ino %lu", xent->name,
|
||||
(unsigned long)xattr_inum);
|
||||
|
||||
nm.name = xent->name;
|
||||
nm.len = le16_to_cpu(xent->nlen);
|
||||
|
|
|
@ -753,7 +753,7 @@ struct ubifs_znode {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct bu_info - bulk-read information
|
||||
* struct bu_info - bulk-read information.
|
||||
* @key: first data node key
|
||||
* @zbranch: zbranches of data nodes to bulk read
|
||||
* @buf: buffer to read into
|
||||
|
@ -969,7 +969,10 @@ struct ubifs_mount_opts {
|
|||
* @mst_node: master node
|
||||
* @mst_offs: offset of valid master node
|
||||
* @mst_mutex: protects the master node area, @mst_node, and @mst_offs
|
||||
* @bulk_read_buf_size: buffer size for bulk-reads
|
||||
*
|
||||
* @max_bu_buf_len: maximum bulk-read buffer length
|
||||
* @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
|
||||
* @bu: pre-allocated bulk-read information
|
||||
*
|
||||
* @log_lebs: number of logical eraseblocks in the log
|
||||
* @log_bytes: log size in bytes
|
||||
|
@ -1217,7 +1220,10 @@ struct ubifs_info {
|
|||
struct ubifs_mst_node *mst_node;
|
||||
int mst_offs;
|
||||
struct mutex mst_mutex;
|
||||
int bulk_read_buf_size;
|
||||
|
||||
int max_bu_buf_len;
|
||||
struct mutex bu_mutex;
|
||||
struct bu_info bu;
|
||||
|
||||
int log_lebs;
|
||||
long long log_bytes;
|
||||
|
|
Loading…
Reference in a new issue