mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 03:06:10 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6: jfs: needs crc32_le jfs: Fix error handling in metapage_writepage() jfs: return f_fsid for statfs(2) jfs: remove xtLookupList() jfs: clean up a dangling comment
This commit is contained in:
commit
ffd1428514
8 changed files with 46 additions and 344 deletions
|
@ -1,6 +1,7 @@
|
|||
config JFS_FS
|
||||
tristate "JFS filesystem support"
|
||||
select NLS
|
||||
select CRC32
|
||||
help
|
||||
This is a port of IBM's Journaled Filesystem . More information is
|
||||
available in the file <file:Documentation/filesystems/jfs.txt>.
|
||||
|
|
|
@ -362,11 +362,12 @@ exit:
|
|||
int extHint(struct inode *ip, s64 offset, xad_t * xp)
|
||||
{
|
||||
struct super_block *sb = ip->i_sb;
|
||||
struct xadlist xadl;
|
||||
struct lxdlist lxdl;
|
||||
lxd_t lxd;
|
||||
int nbperpage = JFS_SBI(sb)->nbperpage;
|
||||
s64 prev;
|
||||
int rc, nbperpage = JFS_SBI(sb)->nbperpage;
|
||||
int rc = 0;
|
||||
s64 xaddr;
|
||||
int xlen;
|
||||
int xflag;
|
||||
|
||||
/* init the hint as "no hint provided" */
|
||||
XADaddress(xp, 0);
|
||||
|
@ -376,46 +377,30 @@ int extHint(struct inode *ip, s64 offset, xad_t * xp)
|
|||
*/
|
||||
prev = ((offset & ~POFFSET) >> JFS_SBI(sb)->l2bsize) - nbperpage;
|
||||
|
||||
/* if the offsets in the first page of the file,
|
||||
* no hint provided.
|
||||
/* if the offset is in the first page of the file, no hint provided.
|
||||
*/
|
||||
if (prev < 0)
|
||||
return (0);
|
||||
goto out;
|
||||
|
||||
/* prepare to lookup the previous page's extent info */
|
||||
lxdl.maxnlxd = 1;
|
||||
lxdl.nlxd = 1;
|
||||
lxdl.lxd = &lxd;
|
||||
LXDoffset(&lxd, prev)
|
||||
LXDlength(&lxd, nbperpage);
|
||||
rc = xtLookup(ip, prev, nbperpage, &xflag, &xaddr, &xlen, 0);
|
||||
|
||||
xadl.maxnxad = 1;
|
||||
xadl.nxad = 0;
|
||||
xadl.xad = xp;
|
||||
if ((rc == 0) && xlen) {
|
||||
if (xlen != nbperpage) {
|
||||
jfs_error(ip->i_sb, "extHint: corrupt xtree");
|
||||
rc = -EIO;
|
||||
}
|
||||
XADaddress(xp, xaddr);
|
||||
XADlength(xp, xlen);
|
||||
/*
|
||||
* only preserve the abnr flag within the xad flags
|
||||
* of the returned hint.
|
||||
*/
|
||||
xp->flag = xflag & XAD_NOTRECORDED;
|
||||
} else
|
||||
rc = 0;
|
||||
|
||||
/* perform the lookup */
|
||||
if ((rc = xtLookupList(ip, &lxdl, &xadl, 0)))
|
||||
return (rc);
|
||||
|
||||
/* check if no extent exists for the previous page.
|
||||
* this is possible for sparse files.
|
||||
*/
|
||||
if (xadl.nxad == 0) {
|
||||
// assert(ISSPARSE(ip));
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* only preserve the abnr flag within the xad flags
|
||||
* of the returned hint.
|
||||
*/
|
||||
xp->flag &= XAD_NOTRECORDED;
|
||||
|
||||
if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {
|
||||
jfs_error(ip->i_sb, "extHint: corrupt xtree");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return (0);
|
||||
out:
|
||||
return (rc);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -56,12 +56,6 @@
|
|||
#include "jfs_superblock.h"
|
||||
#include "jfs_debug.h"
|
||||
|
||||
/*
|
||||
* __mark_inode_dirty expects inodes to be hashed. Since we don't want
|
||||
* special inodes in the fileset inode space, we make them appear hashed,
|
||||
* but do not put on any lists.
|
||||
*/
|
||||
|
||||
/*
|
||||
* imap locks
|
||||
*/
|
||||
|
@ -497,7 +491,9 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
|
|||
release_metapage(mp);
|
||||
|
||||
/*
|
||||
* that will look hashed, but won't be on any list; hlist_del()
|
||||
* __mark_inode_dirty expects inodes to be hashed. Since we don't
|
||||
* want special inodes in the fileset inode space, we make them
|
||||
* appear hashed, but do not put on any lists. hlist_del()
|
||||
* will work fine and require no locking.
|
||||
*/
|
||||
ip->i_hash.pprev = &ip->i_hash.next;
|
||||
|
|
|
@ -369,6 +369,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
|
|||
unsigned long bio_bytes = 0;
|
||||
unsigned long bio_offset = 0;
|
||||
int offset;
|
||||
int bad_blocks = 0;
|
||||
|
||||
page_start = (sector_t)page->index <<
|
||||
(PAGE_CACHE_SHIFT - inode->i_blkbits);
|
||||
|
@ -394,6 +395,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
|
|||
}
|
||||
|
||||
clear_bit(META_dirty, &mp->flag);
|
||||
set_bit(META_io, &mp->flag);
|
||||
block_offset = offset >> inode->i_blkbits;
|
||||
lblock = page_start + block_offset;
|
||||
if (bio) {
|
||||
|
@ -402,7 +404,6 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
|
|||
len = min(xlen, blocks_per_mp);
|
||||
xlen -= len;
|
||||
bio_bytes += len << inode->i_blkbits;
|
||||
set_bit(META_io, &mp->flag);
|
||||
continue;
|
||||
}
|
||||
/* Not contiguous */
|
||||
|
@ -424,12 +425,14 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
|
|||
xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
|
||||
pblock = metapage_get_blocks(inode, lblock, &xlen);
|
||||
if (!pblock) {
|
||||
/* Need better error handling */
|
||||
printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
|
||||
dec_io(page, last_write_complete);
|
||||
/*
|
||||
* We already called inc_io(), but can't cancel it
|
||||
* with dec_io() until we're done with the page
|
||||
*/
|
||||
bad_blocks++;
|
||||
continue;
|
||||
}
|
||||
set_bit(META_io, &mp->flag);
|
||||
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, 1);
|
||||
|
@ -459,6 +462,9 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
|
|||
|
||||
unlock_page(page);
|
||||
|
||||
if (bad_blocks)
|
||||
goto err_out;
|
||||
|
||||
if (nr_underway == 0)
|
||||
end_page_writeback(page);
|
||||
|
||||
|
@ -474,7 +480,9 @@ skip:
|
|||
bio_put(bio);
|
||||
unlock_page(page);
|
||||
dec_io(page, last_write_complete);
|
||||
|
||||
err_out:
|
||||
while (bad_blocks--)
|
||||
dec_io(page, last_write_complete);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,35 +57,6 @@ struct timestruc_t {
|
|||
#define HIGHORDER 0x80000000u /* high order bit on */
|
||||
#define ONES 0xffffffffu /* all bit on */
|
||||
|
||||
/*
|
||||
* logical xd (lxd)
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned len:24;
|
||||
unsigned off1:8;
|
||||
u32 off2;
|
||||
} lxd_t;
|
||||
|
||||
/* lxd_t field construction */
|
||||
#define LXDlength(lxd, length32) ( (lxd)->len = length32 )
|
||||
#define LXDoffset(lxd, offset64)\
|
||||
{\
|
||||
(lxd)->off1 = ((s64)offset64) >> 32;\
|
||||
(lxd)->off2 = (offset64) & 0xffffffff;\
|
||||
}
|
||||
|
||||
/* lxd_t field extraction */
|
||||
#define lengthLXD(lxd) ( (lxd)->len )
|
||||
#define offsetLXD(lxd)\
|
||||
( ((s64)((lxd)->off1)) << 32 | (lxd)->off2 )
|
||||
|
||||
/* lxd list */
|
||||
struct lxdlist {
|
||||
s16 maxnlxd;
|
||||
s16 nlxd;
|
||||
lxd_t *lxd;
|
||||
};
|
||||
|
||||
/*
|
||||
* physical xd (pxd)
|
||||
*/
|
||||
|
|
|
@ -164,11 +164,8 @@ int xtLookup(struct inode *ip, s64 lstart,
|
|||
/* is lookup offset beyond eof ? */
|
||||
size = ((u64) ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
|
||||
JFS_SBI(ip->i_sb)->l2bsize;
|
||||
if (lstart >= size) {
|
||||
jfs_err("xtLookup: lstart (0x%lx) >= size (0x%lx)",
|
||||
(ulong) lstart, (ulong) size);
|
||||
if (lstart >= size)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -220,264 +217,6 @@ int xtLookup(struct inode *ip, s64 lstart,
|
|||
return rc;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* xtLookupList()
|
||||
*
|
||||
* function: map a single logical extent into a list of physical extent;
|
||||
*
|
||||
* parameter:
|
||||
* struct inode *ip,
|
||||
* struct lxdlist *lxdlist, lxd list (in)
|
||||
* struct xadlist *xadlist, xad list (in/out)
|
||||
* int flag)
|
||||
*
|
||||
* coverage of lxd by xad under assumption of
|
||||
* . lxd's are ordered and disjoint.
|
||||
* . xad's are ordered and disjoint.
|
||||
*
|
||||
* return:
|
||||
* 0: success
|
||||
*
|
||||
* note: a page being written (even a single byte) is backed fully,
|
||||
* except the last page which is only backed with blocks
|
||||
* required to cover the last byte;
|
||||
* the extent backing a page is fully contained within an xad;
|
||||
*/
|
||||
int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
|
||||
struct xadlist * xadlist, int flag)
|
||||
{
|
||||
int rc = 0;
|
||||
struct btstack btstack;
|
||||
int cmp;
|
||||
s64 bn;
|
||||
struct metapage *mp;
|
||||
xtpage_t *p;
|
||||
int index;
|
||||
lxd_t *lxd;
|
||||
xad_t *xad, *pxd;
|
||||
s64 size, lstart, lend, xstart, xend, pstart;
|
||||
s64 llen, xlen, plen;
|
||||
s64 xaddr, paddr;
|
||||
int nlxd, npxd, maxnpxd;
|
||||
|
||||
npxd = xadlist->nxad = 0;
|
||||
maxnpxd = xadlist->maxnxad;
|
||||
pxd = xadlist->xad;
|
||||
|
||||
nlxd = lxdlist->nlxd;
|
||||
lxd = lxdlist->lxd;
|
||||
|
||||
lstart = offsetLXD(lxd);
|
||||
llen = lengthLXD(lxd);
|
||||
lend = lstart + llen;
|
||||
|
||||
size = (ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
|
||||
JFS_SBI(ip->i_sb)->l2bsize;
|
||||
|
||||
/*
|
||||
* search for the xad entry covering the logical extent
|
||||
*/
|
||||
search:
|
||||
if (lstart >= size)
|
||||
return 0;
|
||||
|
||||
if ((rc = xtSearch(ip, lstart, NULL, &cmp, &btstack, 0)))
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* compute the physical extent covering logical extent
|
||||
*
|
||||
* N.B. search may have failed (e.g., hole in sparse file),
|
||||
* and returned the index of the next entry.
|
||||
*/
|
||||
//map:
|
||||
/* retrieve search result */
|
||||
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
|
||||
|
||||
/* is xad on the next sibling page ? */
|
||||
if (index == le16_to_cpu(p->header.nextindex)) {
|
||||
if (p->header.flag & BT_ROOT)
|
||||
goto mapend;
|
||||
|
||||
if ((bn = le64_to_cpu(p->header.next)) == 0)
|
||||
goto mapend;
|
||||
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
/* get next sibling page */
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
index = XTENTRYSTART;
|
||||
}
|
||||
|
||||
xad = &p->xad[index];
|
||||
|
||||
/*
|
||||
* is lxd covered by xad ?
|
||||
*/
|
||||
compare:
|
||||
xstart = offsetXAD(xad);
|
||||
xlen = lengthXAD(xad);
|
||||
xend = xstart + xlen;
|
||||
xaddr = addressXAD(xad);
|
||||
|
||||
compare1:
|
||||
if (xstart < lstart)
|
||||
goto compare2;
|
||||
|
||||
/* (lstart <= xstart) */
|
||||
|
||||
/* lxd is NOT covered by xad */
|
||||
if (lend <= xstart) {
|
||||
/*
|
||||
* get next lxd
|
||||
*/
|
||||
if (--nlxd == 0)
|
||||
goto mapend;
|
||||
lxd++;
|
||||
|
||||
lstart = offsetLXD(lxd);
|
||||
llen = lengthLXD(lxd);
|
||||
lend = lstart + llen;
|
||||
if (lstart >= size)
|
||||
goto mapend;
|
||||
|
||||
/* compare with the current xad */
|
||||
goto compare1;
|
||||
}
|
||||
/* lxd is covered by xad */
|
||||
else { /* (xstart < lend) */
|
||||
|
||||
/* initialize new pxd */
|
||||
pstart = xstart;
|
||||
plen = min(lend - xstart, xlen);
|
||||
paddr = xaddr;
|
||||
|
||||
goto cover;
|
||||
}
|
||||
|
||||
/* (xstart < lstart) */
|
||||
compare2:
|
||||
/* lxd is covered by xad */
|
||||
if (lstart < xend) {
|
||||
/* initialize new pxd */
|
||||
pstart = lstart;
|
||||
plen = min(xend - lstart, llen);
|
||||
paddr = xaddr + (lstart - xstart);
|
||||
|
||||
goto cover;
|
||||
}
|
||||
/* lxd is NOT covered by xad */
|
||||
else { /* (xend <= lstart) */
|
||||
|
||||
/*
|
||||
* get next xad
|
||||
*
|
||||
* linear search next xad covering lxd on
|
||||
* the current xad page, and then tree search
|
||||
*/
|
||||
if (index == le16_to_cpu(p->header.nextindex) - 1) {
|
||||
if (p->header.flag & BT_ROOT)
|
||||
goto mapend;
|
||||
|
||||
XT_PUTPAGE(mp);
|
||||
goto search;
|
||||
} else {
|
||||
index++;
|
||||
xad++;
|
||||
|
||||
/* compare with new xad */
|
||||
goto compare;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* lxd is covered by xad and a new pxd has been initialized
|
||||
* (lstart <= xstart < lend) or (xstart < lstart < xend)
|
||||
*/
|
||||
cover:
|
||||
/* finalize pxd corresponding to current xad */
|
||||
XT_PUTENTRY(pxd, xad->flag, pstart, plen, paddr);
|
||||
|
||||
if (++npxd >= maxnpxd)
|
||||
goto mapend;
|
||||
pxd++;
|
||||
|
||||
/*
|
||||
* lxd is fully covered by xad
|
||||
*/
|
||||
if (lend <= xend) {
|
||||
/*
|
||||
* get next lxd
|
||||
*/
|
||||
if (--nlxd == 0)
|
||||
goto mapend;
|
||||
lxd++;
|
||||
|
||||
lstart = offsetLXD(lxd);
|
||||
llen = lengthLXD(lxd);
|
||||
lend = lstart + llen;
|
||||
if (lstart >= size)
|
||||
goto mapend;
|
||||
|
||||
/*
|
||||
* test for old xad covering new lxd
|
||||
* (old xstart < new lstart)
|
||||
*/
|
||||
goto compare2;
|
||||
}
|
||||
/*
|
||||
* lxd is partially covered by xad
|
||||
*/
|
||||
else { /* (xend < lend) */
|
||||
|
||||
/*
|
||||
* get next xad
|
||||
*
|
||||
* linear search next xad covering lxd on
|
||||
* the current xad page, and then next xad page search
|
||||
*/
|
||||
if (index == le16_to_cpu(p->header.nextindex) - 1) {
|
||||
if (p->header.flag & BT_ROOT)
|
||||
goto mapend;
|
||||
|
||||
if ((bn = le64_to_cpu(p->header.next)) == 0)
|
||||
goto mapend;
|
||||
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
/* get next sibling page */
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
index = XTENTRYSTART;
|
||||
xad = &p->xad[index];
|
||||
} else {
|
||||
index++;
|
||||
xad++;
|
||||
}
|
||||
|
||||
/*
|
||||
* test for new xad covering old lxd
|
||||
* (old lstart < new xstart)
|
||||
*/
|
||||
goto compare;
|
||||
}
|
||||
|
||||
mapend:
|
||||
xadlist->nxad = npxd;
|
||||
|
||||
//out:
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* xtSearch()
|
||||
*
|
||||
|
|
|
@ -110,8 +110,6 @@ typedef union {
|
|||
*/
|
||||
extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
|
||||
int *pflag, s64 * paddr, int *plen, int flag);
|
||||
extern int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
|
||||
struct xadlist * xadlist, int flag);
|
||||
extern void xtInitRoot(tid_t tid, struct inode *ip);
|
||||
extern int xtInsert(tid_t tid, struct inode *ip,
|
||||
int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/posix_acl.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/exportfs.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
|
@ -168,6 +169,9 @@ static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
buf->f_files = maxinodes;
|
||||
buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
|
||||
atomic_read(&imap->im_numfree));
|
||||
buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
|
||||
buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
|
||||
sizeof(sbi->uuid)/2);
|
||||
|
||||
buf->f_namelen = JFS_NAME_MAX;
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue