2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* hugetlbpage-backed filesystem. Based on ramfs.
|
|
|
|
*
|
|
|
|
* William Irwin, 2002
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 Linus Torvalds.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#include <asm/current.h>
|
|
|
|
#include <linux/sched.h> /* remove ASAP */
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/file.h>
|
2007-07-16 06:40:52 +00:00
|
|
|
#include <linux/kernel.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/string.h>
|
2006-01-11 20:17:46 +00:00
|
|
|
#include <linux/capability.h>
|
2007-07-16 06:40:52 +00:00
|
|
|
#include <linux/ctype.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/backing-dev.h>
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/pagevec.h>
|
2007-07-16 06:40:52 +00:00
|
|
|
#include <linux/parser.h>
|
2007-05-06 21:50:12 +00:00
|
|
|
#include <linux/mman.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/quotaops.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/dnotify.h>
|
|
|
|
#include <linux/statfs.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
|
|
|
/* some random number */
|
|
|
|
#define HUGETLBFS_MAGIC 0x958458f6
|
|
|
|
|
2007-02-12 08:55:41 +00:00
|
|
|
static const struct super_operations hugetlbfs_ops;
|
2006-06-28 11:26:44 +00:00
|
|
|
static const struct address_space_operations hugetlbfs_aops;
|
2006-03-28 09:56:42 +00:00
|
|
|
const struct file_operations hugetlbfs_file_operations;
|
2007-02-12 08:55:39 +00:00
|
|
|
static const struct inode_operations hugetlbfs_dir_inode_operations;
|
|
|
|
static const struct inode_operations hugetlbfs_inode_operations;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static struct backing_dev_info hugetlbfs_backing_dev_info = {
|
|
|
|
.ra_pages = 0, /* No readahead */
|
2008-04-30 07:54:37 +00:00
|
|
|
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int sysctl_hugetlb_shm_group;
|
|
|
|
|
2007-07-16 06:40:52 +00:00
|
|
|
enum {
|
|
|
|
Opt_size, Opt_nr_inodes,
|
|
|
|
Opt_mode, Opt_uid, Opt_gid,
|
2008-07-24 04:27:43 +00:00
|
|
|
Opt_pagesize,
|
2007-07-16 06:40:52 +00:00
|
|
|
Opt_err,
|
|
|
|
};
|
|
|
|
|
|
|
|
static match_table_t tokens = {
|
|
|
|
{Opt_size, "size=%s"},
|
|
|
|
{Opt_nr_inodes, "nr_inodes=%s"},
|
|
|
|
{Opt_mode, "mode=%o"},
|
|
|
|
{Opt_uid, "uid=%u"},
|
|
|
|
{Opt_gid, "gid=%u"},
|
2008-07-24 04:27:43 +00:00
|
|
|
{Opt_pagesize, "pagesize=%s"},
|
2007-07-16 06:40:52 +00:00
|
|
|
{Opt_err, NULL},
|
|
|
|
};
|
|
|
|
|
2005-10-30 01:16:47 +00:00
|
|
|
static void huge_pagevec_release(struct pagevec *pvec)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < pagevec_count(pvec); ++i)
|
|
|
|
put_page(pvec->pages[i]);
|
|
|
|
|
|
|
|
pagevec_reinit(pvec);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
2006-12-08 10:37:07 +00:00
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
loff_t len, vma_len;
|
|
|
|
int ret;
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_file(file);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:)
If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.
But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it. That will eventually call down to the
non-hugepage version of unmap_page_range(). On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries. I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.
(Hugh:)
prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down. PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.
Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-11-14 10:03:32 +00:00
|
|
|
/*
|
2007-08-31 06:56:40 +00:00
|
|
|
* vma address alignment (but not the pgoff alignment) has
|
|
|
|
* already been checked by prepare_hugepage_range. If you add
|
|
|
|
* any error returns here, do so after setting VM_HUGETLB, so
|
|
|
|
* is_vm_hugetlb_page tests below unmap_region go the right
|
|
|
|
* way when do_mmap_pgoff unwinds (may be important on powerpc
|
|
|
|
* and ia64).
|
[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:)
If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.
But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it. That will eventually call down to the
non-hugepage version of unmap_page_range(). On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries. I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.
(Hugh:)
prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down. PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.
Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-11-14 10:03:32 +00:00
|
|
|
*/
|
|
|
|
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
|
|
|
|
vma->vm_ops = &hugetlb_vm_ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-24 04:27:41 +00:00
|
|
|
if (vma->vm_pgoff & ~(huge_page_mask(h) >> PAGE_SHIFT))
|
2007-08-31 06:56:40 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
|
|
|
|
|
2006-01-09 23:59:24 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
file_accessed(file);
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
|
|
|
|
|
2008-07-24 04:27:23 +00:00
|
|
|
if (hugetlb_reserve_pages(inode,
|
2008-07-24 04:27:41 +00:00
|
|
|
vma->vm_pgoff >> huge_page_order(h),
|
|
|
|
len >> huge_page_shift(h), vma))
|
2006-06-23 09:03:15 +00:00
|
|
|
goto out;
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
|
2005-10-30 01:16:46 +00:00
|
|
|
ret = 0;
|
|
|
|
hugetlb_prefault_arch_hook(vma->vm_mm);
|
[PATCH] mmap zero-length hugetlb file with PROT_NONE to protect a hugetlb virtual area
Sometimes, applications need below call to be successful although
"/mnt/hugepages/file1" doesn't exist.
fd = open("/mnt/hugepages/file1", O_CREAT|O_RDWR, 0755);
*addr = mmap(NULL, 0x1024*1024*256, PROT_NONE, 0, fd, 0);
As for regular pages (or files), above call does work, but as for huge
pages, above call would fail because hugetlbfs_file_mmap would fail if
(!(vma->vm_flags & VM_WRITE) && len > inode->i_size).
This capability on huge page is useful on ia64 when the process wants to
protect one area on region 4, so other threads couldn't read/write this
area. A famous JVM (Java Virtual Machine) implementation on IA64 needs the
capability.
Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: Hugh Dickins <hugh@veritas.com>
[ Expand-on-mmap semantics again... this time matching normal fs's. wli ]
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-10 11:44:49 +00:00
|
|
|
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_size = len;
|
|
|
|
out:
|
2006-01-09 23:59:24 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-10-30 01:16:30 +00:00
|
|
|
* Called under down_write(mmap_sem).
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2007-05-06 21:49:00 +00:00
|
|
|
#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
2005-04-16 22:20:36 +00:00
|
|
|
static unsigned long
|
|
|
|
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned long start_addr;
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_file(file);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-24 04:27:41 +00:00
|
|
|
if (len & ~huge_page_mask(h))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
if (len > TASK_SIZE)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2007-05-06 21:50:12 +00:00
|
|
|
if (flags & MAP_FIXED) {
|
2008-07-24 04:27:41 +00:00
|
|
|
if (prepare_hugepage_range(file, addr, len))
|
2007-05-06 21:50:12 +00:00
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (addr) {
|
2008-07-24 04:27:41 +00:00
|
|
|
addr = ALIGN(addr, huge_page_size(h));
|
2005-04-16 22:20:36 +00:00
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (TASK_SIZE - len >= addr &&
|
|
|
|
(!vma || addr + len <= vma->vm_start))
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
start_addr = mm->free_area_cache;
|
|
|
|
|
2005-06-22 00:14:49 +00:00
|
|
|
if (len <= mm->cached_hole_size)
|
|
|
|
start_addr = TASK_UNMAPPED_BASE;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
full_search:
|
2008-07-24 04:27:41 +00:00
|
|
|
addr = ALIGN(start_addr, huge_page_size(h));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
|
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
|
|
if (TASK_SIZE - len < addr) {
|
|
|
|
/*
|
|
|
|
* Start a new search - just in case we missed
|
|
|
|
* some holes.
|
|
|
|
*/
|
|
|
|
if (start_addr != TASK_UNMAPPED_BASE) {
|
|
|
|
start_addr = TASK_UNMAPPED_BASE;
|
|
|
|
goto full_search;
|
|
|
|
}
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vma || addr + len <= vma->vm_start)
|
|
|
|
return addr;
|
2008-07-24 04:27:41 +00:00
|
|
|
addr = ALIGN(vma->vm_end, huge_page_size(h));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-10-16 08:26:22 +00:00
|
|
|
static int
|
|
|
|
hugetlbfs_read_actor(struct page *page, unsigned long offset,
|
|
|
|
char __user *buf, unsigned long count,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
char *kaddr;
|
|
|
|
unsigned long left, copied = 0;
|
|
|
|
int i, chunksize;
|
|
|
|
|
|
|
|
if (size > count)
|
|
|
|
size = count;
|
|
|
|
|
|
|
|
/* Find which 4k chunk and offset with in that chunk */
|
|
|
|
i = offset >> PAGE_CACHE_SHIFT;
|
|
|
|
offset = offset & ~PAGE_CACHE_MASK;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
chunksize = PAGE_CACHE_SIZE;
|
|
|
|
if (offset)
|
|
|
|
chunksize -= offset;
|
|
|
|
if (chunksize > size)
|
|
|
|
chunksize = size;
|
|
|
|
kaddr = kmap(&page[i]);
|
|
|
|
left = __copy_to_user(buf, kaddr + offset, chunksize);
|
|
|
|
kunmap(&page[i]);
|
|
|
|
if (left) {
|
|
|
|
copied += (chunksize - left);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset = 0;
|
|
|
|
size -= chunksize;
|
|
|
|
buf += chunksize;
|
|
|
|
copied += chunksize;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
return copied ? copied : -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Support for read() - Find the page attached to f_mapping and copy out the
|
|
|
|
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
|
|
|
|
* since it has PAGE_CACHE_SIZE assumptions.
|
|
|
|
*/
|
|
|
|
static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
|
|
|
|
size_t len, loff_t *ppos)
|
|
|
|
{
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_file(filp);
|
2007-10-16 08:26:22 +00:00
|
|
|
struct address_space *mapping = filp->f_mapping;
|
|
|
|
struct inode *inode = mapping->host;
|
2008-07-24 04:27:41 +00:00
|
|
|
unsigned long index = *ppos >> huge_page_shift(h);
|
|
|
|
unsigned long offset = *ppos & ~huge_page_mask(h);
|
2007-10-16 08:26:22 +00:00
|
|
|
unsigned long end_index;
|
|
|
|
loff_t isize;
|
|
|
|
ssize_t retval = 0;
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
|
|
|
|
/* validate length */
|
|
|
|
if (len == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
isize = i_size_read(inode);
|
|
|
|
if (!isize)
|
|
|
|
goto out;
|
|
|
|
|
2008-07-24 04:27:41 +00:00
|
|
|
end_index = (isize - 1) >> huge_page_shift(h);
|
2007-10-16 08:26:22 +00:00
|
|
|
for (;;) {
|
|
|
|
struct page *page;
|
2008-07-24 04:27:41 +00:00
|
|
|
unsigned long nr, ret;
|
2007-10-16 08:26:22 +00:00
|
|
|
|
|
|
|
/* nr is the maximum number of bytes to copy from this page */
|
2008-07-24 04:27:41 +00:00
|
|
|
nr = huge_page_size(h);
|
2007-10-16 08:26:22 +00:00
|
|
|
if (index >= end_index) {
|
|
|
|
if (index > end_index)
|
|
|
|
goto out;
|
2008-07-24 04:27:41 +00:00
|
|
|
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
|
2007-10-16 08:26:22 +00:00
|
|
|
if (nr <= offset) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nr = nr - offset;
|
|
|
|
|
|
|
|
/* Find the page */
|
|
|
|
page = find_get_page(mapping, index);
|
|
|
|
if (unlikely(page == NULL)) {
|
|
|
|
/*
|
|
|
|
* We have a HOLE, zero out the user-buffer for the
|
|
|
|
* length of the hole or request.
|
|
|
|
*/
|
|
|
|
ret = len < nr ? len : nr;
|
|
|
|
if (clear_user(buf, ret))
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We have the page, copy it to user space buffer.
|
|
|
|
*/
|
|
|
|
ret = hugetlbfs_read_actor(page, offset, buf, len, nr);
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
if (retval == 0)
|
|
|
|
retval = ret;
|
|
|
|
if (page)
|
|
|
|
page_cache_release(page);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += ret;
|
|
|
|
retval += ret;
|
|
|
|
len -= ret;
|
2008-07-24 04:27:41 +00:00
|
|
|
index += offset >> huge_page_shift(h);
|
|
|
|
offset &= ~huge_page_mask(h);
|
2007-10-16 08:26:22 +00:00
|
|
|
|
|
|
|
if (page)
|
|
|
|
page_cache_release(page);
|
|
|
|
|
|
|
|
/* short read or no more work */
|
|
|
|
if ((ret != nr) || (len == 0))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
2008-07-24 04:27:41 +00:00
|
|
|
*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
|
2007-10-16 08:26:22 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Read a page. Again trivial. If it didn't already exist
|
|
|
|
* in the page cache, it is zero-filled.
|
|
|
|
*/
|
|
|
|
static int hugetlbfs_readpage(struct file *file, struct page * page)
|
|
|
|
{
|
|
|
|
unlock_page(page);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:25:03 +00:00
|
|
|
static int hugetlbfs_write_begin(struct file *file,
|
|
|
|
struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
|
|
struct page **pagep, void **fsdata)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:25:03 +00:00
|
|
|
static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned copied,
|
|
|
|
struct page *page, void *fsdata)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-16 08:25:03 +00:00
|
|
|
BUG();
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void truncate_huge_page(struct page *page)
|
|
|
|
{
|
VM: Remove "clear_page_dirty()" and "test_clear_page_dirty()" functions
They were horribly easy to mis-use because of their tempting naming, and
they also did way more than any users of them generally wanted them to
do.
A dirty page can become clean under two circumstances:
(a) when we write it out. We have "clear_page_dirty_for_io()" for
this, and that function remains unchanged.
In the "for IO" case it is not sufficient to just clear the dirty
bit, you also have to mark the page as being under writeback etc.
(b) when we actually remove a page due to it becoming inaccessible to
users, notably because it was truncate()'d away or the file (or
metadata) no longer exists, and we thus want to cancel any
outstanding dirty state.
For the (b) case, we now introduce "cancel_dirty_page()", which only
touches the page state itself, and verifies that the page is not mapped
(since cancelling writes on a mapped page would be actively wrong as it
is still accessible to users).
Some filesystems need to be fixed up for this: CIFS, FUSE, JFS,
ReiserFS, XFS all use the old confusing functions, and will be fixed
separately in subsequent commits (with some of them just removing the
offending logic, and others using clear_page_dirty_for_io()).
This was confirmed by Martin Michlmayr to fix the apt database
corruption on ARM.
Cc: Martin Michlmayr <tbm@cyrius.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Andrei Popa <andrei.popa@i-neo.ro>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Cc: Gordon Farquharson <gordonfarquharson@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-20 21:46:42 +00:00
|
|
|
cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
|
2005-04-16 22:20:36 +00:00
|
|
|
ClearPageUptodate(page);
|
|
|
|
remove_from_page_cache(page);
|
|
|
|
put_page(page);
|
|
|
|
}
|
|
|
|
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
static void truncate_hugepages(struct inode *inode, loff_t lstart)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_inode(inode);
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
struct address_space *mapping = &inode->i_data;
|
2008-07-24 04:27:41 +00:00
|
|
|
const pgoff_t start = lstart >> huge_page_shift(h);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pagevec pvec;
|
|
|
|
pgoff_t next;
|
2006-06-23 09:03:15 +00:00
|
|
|
int i, freed = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pagevec_init(&pvec, 0);
|
|
|
|
next = start;
|
|
|
|
while (1) {
|
|
|
|
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
|
|
|
|
if (next == start)
|
|
|
|
break;
|
|
|
|
next = start;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pagevec_count(&pvec); ++i) {
|
|
|
|
struct page *page = pvec.pages[i];
|
|
|
|
|
|
|
|
lock_page(page);
|
|
|
|
if (page->index > next)
|
|
|
|
next = page->index;
|
|
|
|
++next;
|
|
|
|
truncate_huge_page(page);
|
|
|
|
unlock_page(page);
|
2006-06-23 09:03:15 +00:00
|
|
|
freed++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
huge_pagevec_release(&pvec);
|
|
|
|
}
|
|
|
|
BUG_ON(!lstart && mapping->nrpages);
|
2006-06-23 09:03:15 +00:00
|
|
|
hugetlb_unreserve_pages(inode, start, freed);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hugetlbfs_delete_inode(struct inode *inode)
|
|
|
|
{
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
truncate_hugepages(inode, 0);
|
2005-10-30 01:16:43 +00:00
|
|
|
clear_inode(inode);
|
|
|
|
}
|
|
|
|
|
2006-09-29 08:59:27 +00:00
|
|
|
static void hugetlbfs_forget_inode(struct inode *inode) __releases(inode_lock)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:45 +00:00
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
|
|
|
|
if (!hlist_unhashed(&inode->i_hash)) {
|
2007-10-17 06:30:44 +00:00
|
|
|
if (!(inode->i_state & (I_DIRTY|I_SYNC)))
|
2005-10-30 01:16:45 +00:00
|
|
|
list_move(&inode->i_list, &inode_unused);
|
|
|
|
inodes_stat.nr_unused++;
|
|
|
|
if (!sb || (sb->s_flags & MS_ACTIVE)) {
|
|
|
|
spin_unlock(&inode_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
inode->i_state |= I_WILL_FREE;
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock(&inode_lock);
|
2005-10-30 01:16:45 +00:00
|
|
|
/*
|
|
|
|
* write_inode_now is a noop as we set BDI_CAP_NO_WRITEBACK
|
|
|
|
* in our backing_dev_info.
|
|
|
|
*/
|
|
|
|
write_inode_now(inode, 1);
|
|
|
|
spin_lock(&inode_lock);
|
|
|
|
inode->i_state &= ~I_WILL_FREE;
|
|
|
|
inodes_stat.nr_unused--;
|
|
|
|
hlist_del_init(&inode->i_hash);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
list_del_init(&inode->i_list);
|
|
|
|
list_del_init(&inode->i_sb_list);
|
|
|
|
inode->i_state |= I_FREEING;
|
|
|
|
inodes_stat.nr_inodes--;
|
|
|
|
spin_unlock(&inode_lock);
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
truncate_hugepages(inode, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
clear_inode(inode);
|
|
|
|
destroy_inode(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hugetlbfs_drop_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
if (!inode->i_nlink)
|
2005-10-30 01:16:44 +00:00
|
|
|
generic_delete_inode(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
|
|
|
hugetlbfs_forget_inode(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2006-10-28 17:38:43 +00:00
|
|
|
hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct prio_tree_iter iter;
|
|
|
|
|
2006-10-28 17:38:43 +00:00
|
|
|
vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long v_offset;
|
|
|
|
|
|
|
|
/*
|
2006-10-28 17:38:43 +00:00
|
|
|
* Can the expression below overflow on 32-bit arches?
|
|
|
|
* No, because the prio_tree returns us only those vmas
|
|
|
|
* which overlap the truncated area starting at pgoff,
|
|
|
|
* and no vma on a 32-bit arch can span beyond the 4GB.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2006-10-28 17:38:43 +00:00
|
|
|
if (vma->vm_pgoff < pgoff)
|
|
|
|
v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
|
|
|
|
else
|
2005-04-16 22:20:36 +00:00
|
|
|
v_offset = 0;
|
|
|
|
|
2006-10-11 08:20:46 +00:00
|
|
|
__unmap_hugepage_range(vma,
|
hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed
After patch 2 in this series, a process that successfully calls mmap() for
a MAP_PRIVATE mapping will be guaranteed to successfully fault until a
process calls fork(). At that point, the next write fault from the parent
could fail due to COW if the child still has a reference.
We only reserve pages for the parent but a copy must be made to avoid
leaking data from the parent to the child after fork(). Reserves could be
taken for both parent and child at fork time to guarantee faults but if
the mapping is large it is highly likely we will not have sufficient pages
for the reservation, and it is common to fork only to exec() immediatly
after. A failure here would be very undesirable.
Note that the current behaviour of mainline with MAP_PRIVATE pages is
pretty bad. The following situation is allowed to occur today.
1. Process calls mmap(MAP_PRIVATE)
2. Process calls mlock() to fault all pages and makes sure it succeeds
3. Process forks()
4. Process writes to MAP_PRIVATE mapping while child still exists
5. If the COW fails at this point, the process gets SIGKILLed even though it
had taken care to ensure the pages existed
This patch improves the situation by guaranteeing the reliability of the
process that successfully calls mmap(). When the parent performs COW, it
will try to satisfy the allocation without using reserves. If that fails
the parent will steal the page leaving any children without a page.
Faults from the child after that point will result in failure. If the
child COW happens first, an attempt will be made to allocate the page
without reserves and the child will get SIGKILLed on failure.
To summarise the new behaviour:
1. If the original mapper performs COW on a private mapping with multiple
references, it will attempt to allocate a hugepage from the pool or
the buddy allocator without using the existing reserves. On fail, VMAs
mapping the same area are traversed and the page being COW'd is unmapped
where found. It will then steal the original page as the last mapper in
the normal way.
2. The VMAs the pages were unmapped from are flagged to note that pages
with data no longer exist. Future no-page faults on those VMAs will
terminate the process as otherwise it would appear that data was corrupted.
A warning is printed to the console that this situation occured.
2. If the child performs COW first, it will attempt to satisfy the COW
from the pool if there are enough pages or via the buddy allocator if
overcommit is allowed and the buddy allocator can satisfy the request. If
it fails, the child will be killed.
If the pool is large enough, existing applications will not notice that
the reserves were a factor. Existing applications depending on the
no-reserves been set are unlikely to exist as for much of the history of
hugetlbfs, pages were prefaulted at mmap(), allocating the pages at that
point or failing the mmap().
[npiggin@suse.de: fix CONFIG_HUGETLB=n build]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 04:27:25 +00:00
|
|
|
vma->vm_start + v_offset, vma->vm_end, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
|
|
|
|
{
|
2006-10-28 17:38:43 +00:00
|
|
|
pgoff_t pgoff;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct address_space *mapping = inode->i_mapping;
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_inode(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-24 04:27:41 +00:00
|
|
|
BUG_ON(offset & ~huge_page_mask(h));
|
2006-10-28 17:38:43 +00:00
|
|
|
pgoff = offset >> PAGE_SHIFT;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-16 08:26:21 +00:00
|
|
|
i_size_write(inode, offset);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock(&mapping->i_mmap_lock);
|
|
|
|
if (!prio_tree_empty(&mapping->i_mmap))
|
|
|
|
hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
|
|
|
|
spin_unlock(&mapping->i_mmap_lock);
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
truncate_hugepages(inode, offset);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_inode(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
unsigned int ia_valid = attr->ia_valid;
|
|
|
|
|
|
|
|
BUG_ON(!inode);
|
|
|
|
|
|
|
|
error = inode_change_ok(inode, attr);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ia_valid & ATTR_SIZE) {
|
|
|
|
error = -EINVAL;
|
2008-07-24 04:27:41 +00:00
|
|
|
if (!(attr->ia_size & ~huge_page_mask(h)))
|
2005-04-16 22:20:36 +00:00
|
|
|
error = hugetlb_vmtruncate(inode, attr->ia_size);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
attr->ia_valid &= ~ATTR_SIZE;
|
|
|
|
}
|
|
|
|
error = inode_setattr(inode, attr);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
|
|
|
|
gid_t gid, int mode, dev_t dev)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
inode = new_inode(sb);
|
|
|
|
if (inode) {
|
|
|
|
struct hugetlbfs_inode_info *info;
|
|
|
|
inode->i_mode = mode;
|
|
|
|
inode->i_uid = uid;
|
|
|
|
inode->i_gid = gid;
|
|
|
|
inode->i_blocks = 0;
|
|
|
|
inode->i_mapping->a_ops = &hugetlbfs_aops;
|
|
|
|
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
|
|
|
|
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
2006-06-23 09:03:15 +00:00
|
|
|
INIT_LIST_HEAD(&inode->i_mapping->private_list);
|
2005-04-16 22:20:36 +00:00
|
|
|
info = HUGETLBFS_I(inode);
|
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 09:13:26 +00:00
|
|
|
mpol_shared_policy_init(&info->policy, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (mode & S_IFMT) {
|
|
|
|
default:
|
|
|
|
init_special_inode(inode, mode, dev);
|
|
|
|
break;
|
|
|
|
case S_IFREG:
|
|
|
|
inode->i_op = &hugetlbfs_inode_operations;
|
|
|
|
inode->i_fop = &hugetlbfs_file_operations;
|
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
|
|
|
inode->i_op = &hugetlbfs_dir_inode_operations;
|
|
|
|
inode->i_fop = &simple_dir_operations;
|
|
|
|
|
|
|
|
/* directory inodes start off with i_nlink == 2 (for "." entry) */
|
2006-10-01 06:29:04 +00:00
|
|
|
inc_nlink(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
|
|
|
inode->i_op = &page_symlink_inode_operations;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File creation. Allocate an inode, and we're done..
|
|
|
|
*/
|
|
|
|
static int hugetlbfs_mknod(struct inode *dir,
|
|
|
|
struct dentry *dentry, int mode, dev_t dev)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
int error = -ENOSPC;
|
|
|
|
gid_t gid;
|
|
|
|
|
|
|
|
if (dir->i_mode & S_ISGID) {
|
|
|
|
gid = dir->i_gid;
|
|
|
|
if (S_ISDIR(mode))
|
|
|
|
mode |= S_ISGID;
|
|
|
|
} else {
|
|
|
|
gid = current->fsgid;
|
|
|
|
}
|
|
|
|
inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, gid, mode, dev);
|
|
|
|
if (inode) {
|
|
|
|
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
|
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
dget(dentry); /* Extra count - pin the dentry in core */
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|
|
|
{
|
|
|
|
int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
|
|
|
|
if (!retval)
|
2006-10-01 06:29:04 +00:00
|
|
|
inc_nlink(dir);
|
2005-04-16 22:20:36 +00:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hugetlbfs_symlink(struct inode *dir,
|
|
|
|
struct dentry *dentry, const char *symname)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
int error = -ENOSPC;
|
|
|
|
gid_t gid;
|
|
|
|
|
|
|
|
if (dir->i_mode & S_ISGID)
|
|
|
|
gid = dir->i_gid;
|
|
|
|
else
|
|
|
|
gid = current->fsgid;
|
|
|
|
|
|
|
|
inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid,
|
|
|
|
gid, S_IFLNK|S_IRWXUGO, 0);
|
|
|
|
if (inode) {
|
|
|
|
int l = strlen(symname)+1;
|
|
|
|
error = page_symlink(inode, symname, l);
|
|
|
|
if (!error) {
|
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
dget(dentry);
|
|
|
|
} else
|
|
|
|
iput(inode);
|
|
|
|
}
|
|
|
|
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-02-08 22:20:27 +00:00
|
|
|
* mark the head page dirty
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static int hugetlbfs_set_page_dirty(struct page *page)
|
|
|
|
{
|
2007-05-06 21:49:39 +00:00
|
|
|
struct page *head = compound_head(page);
|
2007-02-08 22:20:27 +00:00
|
|
|
|
|
|
|
SetPageDirty(head);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-06-23 09:02:58 +00:00
|
|
|
static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-23 09:02:58 +00:00
|
|
|
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
|
2008-07-24 04:27:41 +00:00
|
|
|
struct hstate *h = hstate_inode(dentry->d_inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
buf->f_type = HUGETLBFS_MAGIC;
|
2008-07-24 04:27:41 +00:00
|
|
|
buf->f_bsize = huge_page_size(h);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (sbinfo) {
|
|
|
|
spin_lock(&sbinfo->stat_lock);
|
2005-11-22 05:32:24 +00:00
|
|
|
/* If no limits set, just report 0 for max/free/used
|
|
|
|
* blocks, like simple_statfs() */
|
|
|
|
if (sbinfo->max_blocks >= 0) {
|
|
|
|
buf->f_blocks = sbinfo->max_blocks;
|
|
|
|
buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
|
|
|
|
buf->f_files = sbinfo->max_inodes;
|
|
|
|
buf->f_ffree = sbinfo->free_inodes;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock(&sbinfo->stat_lock);
|
|
|
|
}
|
|
|
|
buf->f_namelen = NAME_MAX;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hugetlbfs_put_super(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
|
|
|
|
|
|
|
|
if (sbi) {
|
|
|
|
sb->s_fs_info = NULL;
|
|
|
|
kfree(sbi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-10-30 01:16:42 +00:00
|
|
|
static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
|
|
|
|
{
|
|
|
|
if (sbinfo->free_inodes >= 0) {
|
|
|
|
spin_lock(&sbinfo->stat_lock);
|
|
|
|
if (unlikely(!sbinfo->free_inodes)) {
|
|
|
|
spin_unlock(&sbinfo->stat_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
sbinfo->free_inodes--;
|
|
|
|
spin_unlock(&sbinfo->stat_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
|
|
|
|
{
|
|
|
|
if (sbinfo->free_inodes >= 0) {
|
|
|
|
spin_lock(&sbinfo->stat_lock);
|
|
|
|
sbinfo->free_inodes++;
|
|
|
|
spin_unlock(&sbinfo->stat_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-12-07 04:33:20 +00:00
|
|
|
static struct kmem_cache *hugetlbfs_inode_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
|
|
|
|
{
|
2005-10-30 01:16:42 +00:00
|
|
|
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct hugetlbfs_inode_info *p;
|
|
|
|
|
2005-10-30 01:16:42 +00:00
|
|
|
if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
|
|
|
|
return NULL;
|
2006-12-07 04:33:17 +00:00
|
|
|
p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
|
2005-10-30 01:16:42 +00:00
|
|
|
if (unlikely(!p)) {
|
|
|
|
hugetlbfs_inc_free_inodes(sbinfo);
|
2005-04-16 22:20:36 +00:00
|
|
|
return NULL;
|
2005-10-30 01:16:42 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return &p->vfs_inode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hugetlbfs_destroy_inode(struct inode *inode)
|
|
|
|
{
|
2005-10-30 01:16:42 +00:00
|
|
|
hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
|
2005-04-16 22:20:36 +00:00
|
|
|
mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
|
|
|
|
kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
|
|
|
|
}
|
|
|
|
|
2006-06-28 11:26:44 +00:00
|
|
|
static const struct address_space_operations hugetlbfs_aops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.readpage = hugetlbfs_readpage,
|
2007-10-16 08:25:03 +00:00
|
|
|
.write_begin = hugetlbfs_write_begin,
|
|
|
|
.write_end = hugetlbfs_write_end,
|
2005-04-16 22:20:36 +00:00
|
|
|
.set_page_dirty = hugetlbfs_set_page_dirty,
|
|
|
|
};
|
|
|
|
|
2005-10-30 01:16:42 +00:00
|
|
|
|
2008-07-26 02:45:34 +00:00
|
|
|
static void init_once(void *foo)
|
2005-10-30 01:16:42 +00:00
|
|
|
{
|
|
|
|
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
|
|
|
|
|
2007-05-17 05:10:57 +00:00
|
|
|
inode_init_once(&ei->vfs_inode);
|
2005-10-30 01:16:42 +00:00
|
|
|
}
|
|
|
|
|
2006-03-28 09:56:42 +00:00
|
|
|
const struct file_operations hugetlbfs_file_operations = {
|
2007-10-16 08:26:22 +00:00
|
|
|
.read = hugetlbfs_read,
|
2005-04-16 22:20:36 +00:00
|
|
|
.mmap = hugetlbfs_file_mmap,
|
|
|
|
.fsync = simple_sync_file,
|
|
|
|
.get_unmapped_area = hugetlb_get_unmapped_area,
|
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:39 +00:00
|
|
|
static const struct inode_operations hugetlbfs_dir_inode_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.create = hugetlbfs_create,
|
|
|
|
.lookup = simple_lookup,
|
|
|
|
.link = simple_link,
|
|
|
|
.unlink = simple_unlink,
|
|
|
|
.symlink = hugetlbfs_symlink,
|
|
|
|
.mkdir = hugetlbfs_mkdir,
|
|
|
|
.rmdir = simple_rmdir,
|
|
|
|
.mknod = hugetlbfs_mknod,
|
|
|
|
.rename = simple_rename,
|
|
|
|
.setattr = hugetlbfs_setattr,
|
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:39 +00:00
|
|
|
static const struct inode_operations hugetlbfs_inode_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.setattr = hugetlbfs_setattr,
|
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:41 +00:00
|
|
|
static const struct super_operations hugetlbfs_ops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.alloc_inode = hugetlbfs_alloc_inode,
|
|
|
|
.destroy_inode = hugetlbfs_destroy_inode,
|
|
|
|
.statfs = hugetlbfs_statfs,
|
2005-10-30 01:16:43 +00:00
|
|
|
.delete_inode = hugetlbfs_delete_inode,
|
2005-04-16 22:20:36 +00:00
|
|
|
.drop_inode = hugetlbfs_drop_inode,
|
|
|
|
.put_super = hugetlbfs_put_super,
|
2008-02-08 12:21:45 +00:00
|
|
|
.show_options = generic_show_options,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
|
|
|
|
{
|
2007-07-16 06:40:52 +00:00
|
|
|
char *p, *rest;
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
int option;
|
2008-07-24 04:27:43 +00:00
|
|
|
unsigned long long size = 0;
|
|
|
|
enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!options)
|
|
|
|
return 0;
|
|
|
|
|
2007-07-16 06:40:52 +00:00
|
|
|
while ((p = strsep(&options, ",")) != NULL) {
|
|
|
|
int token;
|
2007-07-16 06:40:54 +00:00
|
|
|
if (!*p)
|
|
|
|
continue;
|
2007-07-16 06:40:52 +00:00
|
|
|
|
|
|
|
token = match_token(p, tokens, args);
|
|
|
|
switch (token) {
|
|
|
|
case Opt_uid:
|
|
|
|
if (match_int(&args[0], &option))
|
|
|
|
goto bad_val;
|
|
|
|
pconfig->uid = option;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Opt_gid:
|
|
|
|
if (match_int(&args[0], &option))
|
|
|
|
goto bad_val;
|
|
|
|
pconfig->gid = option;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Opt_mode:
|
|
|
|
if (match_octal(&args[0], &option))
|
|
|
|
goto bad_val;
|
2008-02-05 06:28:36 +00:00
|
|
|
pconfig->mode = option & 01777U;
|
2007-07-16 06:40:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case Opt_size: {
|
|
|
|
/* memparse() will accept a K/M/G without a digit */
|
|
|
|
if (!isdigit(*args[0].from))
|
|
|
|
goto bad_val;
|
|
|
|
size = memparse(args[0].from, &rest);
|
2008-07-24 04:27:43 +00:00
|
|
|
setsize = SIZE_STD;
|
|
|
|
if (*rest == '%')
|
|
|
|
setsize = SIZE_PERCENT;
|
2007-07-16 06:40:52 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-16 06:40:52 +00:00
|
|
|
case Opt_nr_inodes:
|
|
|
|
/* memparse() will accept a K/M/G without a digit */
|
|
|
|
if (!isdigit(*args[0].from))
|
|
|
|
goto bad_val;
|
|
|
|
pconfig->nr_inodes = memparse(args[0].from, &rest);
|
|
|
|
break;
|
|
|
|
|
2008-07-24 04:27:43 +00:00
|
|
|
case Opt_pagesize: {
|
|
|
|
unsigned long ps;
|
|
|
|
ps = memparse(args[0].from, &rest);
|
|
|
|
pconfig->hstate = size_to_hstate(ps);
|
|
|
|
if (!pconfig->hstate) {
|
|
|
|
printk(KERN_ERR
|
|
|
|
"hugetlbfs: Unsupported page size %lu MB\n",
|
|
|
|
ps >> 20);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-07-16 06:40:52 +00:00
|
|
|
default:
|
2007-07-16 06:40:54 +00:00
|
|
|
printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
|
|
|
|
p);
|
|
|
|
return -EINVAL;
|
2007-07-16 06:40:52 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-07-24 04:27:43 +00:00
|
|
|
|
|
|
|
/* Do size after hstate is set up */
|
|
|
|
if (setsize > NO_SIZE) {
|
|
|
|
struct hstate *h = pconfig->hstate;
|
|
|
|
if (setsize == SIZE_PERCENT) {
|
|
|
|
size <<= huge_page_shift(h);
|
|
|
|
size *= h->max_huge_pages;
|
|
|
|
do_div(size, 100);
|
|
|
|
}
|
|
|
|
pconfig->nr_blocks = (size >> huge_page_shift(h));
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2007-07-16 06:40:52 +00:00
|
|
|
|
|
|
|
bad_val:
|
|
|
|
printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
|
|
|
|
args[0].from, p);
|
|
|
|
return 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
|
|
|
|
{
|
|
|
|
struct inode * inode;
|
|
|
|
struct dentry * root;
|
|
|
|
int ret;
|
|
|
|
struct hugetlbfs_config config;
|
|
|
|
struct hugetlbfs_sb_info *sbinfo;
|
|
|
|
|
2008-02-08 12:21:45 +00:00
|
|
|
save_mount_options(sb, data);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
config.nr_blocks = -1; /* No limit on size by default */
|
|
|
|
config.nr_inodes = -1; /* No limit on number of inodes by default */
|
|
|
|
config.uid = current->fsuid;
|
|
|
|
config.gid = current->fsgid;
|
|
|
|
config.mode = 0755;
|
2008-07-24 04:27:43 +00:00
|
|
|
config.hstate = &default_hstate;
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = hugetlbfs_parse_options(data, &config);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
|
|
|
|
if (!sbinfo)
|
|
|
|
return -ENOMEM;
|
|
|
|
sb->s_fs_info = sbinfo;
|
2008-07-24 04:27:43 +00:00
|
|
|
sbinfo->hstate = config.hstate;
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_init(&sbinfo->stat_lock);
|
|
|
|
sbinfo->max_blocks = config.nr_blocks;
|
|
|
|
sbinfo->free_blocks = config.nr_blocks;
|
|
|
|
sbinfo->max_inodes = config.nr_inodes;
|
|
|
|
sbinfo->free_inodes = config.nr_inodes;
|
|
|
|
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
2008-07-24 04:27:43 +00:00
|
|
|
sb->s_blocksize = huge_page_size(config.hstate);
|
|
|
|
sb->s_blocksize_bits = huge_page_shift(config.hstate);
|
2005-04-16 22:20:36 +00:00
|
|
|
sb->s_magic = HUGETLBFS_MAGIC;
|
|
|
|
sb->s_op = &hugetlbfs_ops;
|
|
|
|
sb->s_time_gran = 1;
|
|
|
|
inode = hugetlbfs_get_inode(sb, config.uid, config.gid,
|
|
|
|
S_IFDIR | config.mode, 0);
|
|
|
|
if (!inode)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
root = d_alloc_root(inode);
|
|
|
|
if (!root) {
|
|
|
|
iput(inode);
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
sb->s_root = root;
|
|
|
|
return 0;
|
|
|
|
out_free:
|
|
|
|
kfree(sbinfo);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2007-11-15 00:59:41 +00:00
|
|
|
int hugetlb_get_quota(struct address_space *mapping, long delta)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
|
|
|
|
|
|
|
|
if (sbinfo->free_blocks > -1) {
|
|
|
|
spin_lock(&sbinfo->stat_lock);
|
2007-11-15 00:59:41 +00:00
|
|
|
if (sbinfo->free_blocks - delta >= 0)
|
|
|
|
sbinfo->free_blocks -= delta;
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
|
|
|
ret = -ENOMEM;
|
|
|
|
spin_unlock(&sbinfo->stat_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-11-15 00:59:41 +00:00
|
|
|
void hugetlb_put_quota(struct address_space *mapping, long delta)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
|
|
|
|
|
|
|
|
if (sbinfo->free_blocks > -1) {
|
|
|
|
spin_lock(&sbinfo->stat_lock);
|
2007-11-15 00:59:41 +00:00
|
|
|
sbinfo->free_blocks += delta;
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock(&sbinfo->stat_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:02:57 +00:00
|
|
|
static int hugetlbfs_get_sb(struct file_system_type *fs_type,
|
|
|
|
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:02:57 +00:00
|
|
|
return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super, mnt);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct file_system_type hugetlbfs_fs_type = {
|
|
|
|
.name = "hugetlbfs",
|
|
|
|
.get_sb = hugetlbfs_get_sb,
|
|
|
|
.kill_sb = kill_litter_super,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct vfsmount *hugetlbfs_vfsmount;
|
|
|
|
|
|
|
|
static int can_do_hugetlb_shm(void)
|
|
|
|
{
|
|
|
|
return likely(capable(CAP_IPC_LOCK) ||
|
|
|
|
in_group_p(sysctl_hugetlb_shm_group) ||
|
|
|
|
can_do_mlock());
|
|
|
|
}
|
|
|
|
|
2007-06-16 17:16:16 +00:00
|
|
|
struct file *hugetlb_file_setup(const char *name, size_t size)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int error = -ENOMEM;
|
|
|
|
struct file *file;
|
|
|
|
struct inode *inode;
|
|
|
|
struct dentry *dentry, *root;
|
|
|
|
struct qstr quick_string;
|
|
|
|
|
2007-05-06 21:50:18 +00:00
|
|
|
if (!hugetlbfs_vfsmount)
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!can_do_hugetlb_shm())
|
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
|
|
|
|
if (!user_shm_lock(size, current->user))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
root = hugetlbfs_vfsmount->mnt_root;
|
2007-06-16 17:16:16 +00:00
|
|
|
quick_string.name = name;
|
2005-04-16 22:20:36 +00:00
|
|
|
quick_string.len = strlen(quick_string.name);
|
|
|
|
quick_string.hash = 0;
|
|
|
|
dentry = d_alloc(root, &quick_string);
|
|
|
|
if (!dentry)
|
|
|
|
goto out_shm_unlock;
|
|
|
|
|
|
|
|
error = -ENOSPC;
|
|
|
|
inode = hugetlbfs_get_inode(root->d_sb, current->fsuid,
|
|
|
|
current->fsgid, S_IFREG | S_IRWXUGO, 0);
|
|
|
|
if (!inode)
|
2007-10-17 06:31:13 +00:00
|
|
|
goto out_dentry;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
error = -ENOMEM;
|
2008-07-24 04:27:41 +00:00
|
|
|
if (hugetlb_reserve_pages(inode, 0,
|
|
|
|
size >> huge_page_shift(hstate_inode(inode)), NULL))
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
goto out_inode;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
inode->i_size = size;
|
|
|
|
inode->i_nlink = 0;
|
2007-10-17 06:31:13 +00:00
|
|
|
|
|
|
|
error = -ENFILE;
|
|
|
|
file = alloc_file(hugetlbfs_vfsmount, dentry,
|
|
|
|
FMODE_WRITE | FMODE_READ,
|
|
|
|
&hugetlbfs_file_operations);
|
|
|
|
if (!file)
|
2008-02-23 10:59:19 +00:00
|
|
|
goto out_dentry; /* inode is already attached */
|
2007-10-17 06:31:13 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return file;
|
|
|
|
|
[PATCH] hugepage: Strict page reservation for hugepage inodes
These days, hugepages are demand-allocated at first fault time. There's a
somewhat dubious (and racy) heuristic when making a new mmap() to check if
there are enough available hugepages to fully satisfy that mapping.
A particularly obvious case where the heuristic breaks down is where a
process maps its hugepages not as a single chunk, but as a bunch of
individually mmap()ed (or shmat()ed) blocks without touching and
instantiating the pages in between allocations. In this case the size of
each block is compared against the total number of available hugepages.
It's thus easy for the process to become overcommitted, because each block
mapping will succeed, although the total number of hugepages required by
all blocks exceeds the number available. In particular, this defeats such
a program which will detect a mapping failure and adjust its hugepage usage
downward accordingly.
The patch below addresses this problem, by strictly reserving a number of
physical hugepages for hugepage inodes which have been mapped, but not
instatiated. MAP_SHARED mappings are thus "safe" - they will fail on
mmap(), not later with an OOM SIGKILL. MAP_PRIVATE mappings can still
trigger an OOM. (Actually SHARED mappings can technically still OOM, but
only if the sysadmin explicitly reduces the hugepage pool between mapping
and instantiation)
This patch appears to address the problem at hand - it allows DB2 to start
correctly, for instance, which previously suffered the failure described
above.
This patch causes no regressions on the libhugetblfs testsuite, and makes a
test (designed to catch this problem) pass which previously failed (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:55 +00:00
|
|
|
out_inode:
|
|
|
|
iput(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
out_dentry:
|
|
|
|
dput(dentry);
|
|
|
|
out_shm_unlock:
|
|
|
|
user_shm_unlock(size, current->user);
|
|
|
|
return ERR_PTR(error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init init_hugetlbfs_fs(void)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct vfsmount *vfsmount;
|
|
|
|
|
2007-10-17 06:25:46 +00:00
|
|
|
error = bdi_init(&hugetlbfs_backing_dev_info);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
|
|
|
|
sizeof(struct hugetlbfs_inode_info),
|
2007-07-20 01:11:58 +00:00
|
|
|
0, 0, init_once);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (hugetlbfs_inode_cachep == NULL)
|
2007-10-17 06:25:46 +00:00
|
|
|
goto out2;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
error = register_filesystem(&hugetlbfs_fs_type);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
vfsmount = kern_mount(&hugetlbfs_fs_type);
|
|
|
|
|
|
|
|
if (!IS_ERR(vfsmount)) {
|
|
|
|
hugetlbfs_vfsmount = vfsmount;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = PTR_ERR(vfsmount);
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (error)
|
|
|
|
kmem_cache_destroy(hugetlbfs_inode_cachep);
|
2007-10-17 06:25:46 +00:00
|
|
|
out2:
|
|
|
|
bdi_destroy(&hugetlbfs_backing_dev_info);
|
2005-04-16 22:20:36 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit exit_hugetlbfs_fs(void)
|
|
|
|
{
|
|
|
|
kmem_cache_destroy(hugetlbfs_inode_cachep);
|
|
|
|
unregister_filesystem(&hugetlbfs_fs_type);
|
2007-10-17 06:25:46 +00:00
|
|
|
bdi_destroy(&hugetlbfs_backing_dev_info);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_hugetlbfs_fs)
|
|
|
|
module_exit(exit_hugetlbfs_fs)
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|