mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
/dev/mem: introduce size_inside_page()
Introduce size_inside_page() to replace duplicate /dev/mem code. Also apply it to /dev/kmem, whose alignment logic was buggy. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Acked-by: Andi Kleen <ak@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Greg Kroah-Hartman <gregkh@suse.de> Cc: Mark Brown <broonie@opensource.wolfsonmicro.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4ea2f43f28
commit
f222318e9c
1 changed files with 19 additions and 41 deletions
|
@ -34,6 +34,19 @@
|
|||
# include <linux/efi.h>
|
||||
#endif
|
||||
|
||||
static inline unsigned long size_inside_page(unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long sz;
|
||||
|
||||
if (-start & (PAGE_SIZE - 1))
|
||||
sz = -start & (PAGE_SIZE - 1);
|
||||
else
|
||||
sz = PAGE_SIZE;
|
||||
|
||||
return min_t(unsigned long, sz, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architectures vary in how they handle caching for addresses
|
||||
* outside of main memory.
|
||||
|
@ -141,15 +154,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
|
|||
#endif
|
||||
|
||||
while (count > 0) {
|
||||
/*
|
||||
* Handle first page in case it's not aligned
|
||||
*/
|
||||
if (-p & (PAGE_SIZE - 1))
|
||||
sz = -p & (PAGE_SIZE - 1);
|
||||
else
|
||||
sz = PAGE_SIZE;
|
||||
|
||||
sz = min_t(unsigned long, sz, count);
|
||||
sz = size_inside_page(p, count);
|
||||
|
||||
if (!range_is_allowed(p >> PAGE_SHIFT, count))
|
||||
return -EPERM;
|
||||
|
@ -208,15 +213,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
|
|||
#endif
|
||||
|
||||
while (count > 0) {
|
||||
/*
|
||||
* Handle first page in case it's not aligned
|
||||
*/
|
||||
if (-p & (PAGE_SIZE - 1))
|
||||
sz = -p & (PAGE_SIZE - 1);
|
||||
else
|
||||
sz = PAGE_SIZE;
|
||||
|
||||
sz = min_t(unsigned long, sz, count);
|
||||
sz = size_inside_page(p, count);
|
||||
|
||||
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
|
||||
return -EPERM;
|
||||
|
@ -429,15 +426,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|||
}
|
||||
#endif
|
||||
while (low_count > 0) {
|
||||
/*
|
||||
* Handle first page in case it's not aligned
|
||||
*/
|
||||
if (-p & (PAGE_SIZE - 1))
|
||||
sz = -p & (PAGE_SIZE - 1);
|
||||
else
|
||||
sz = PAGE_SIZE;
|
||||
|
||||
sz = min_t(unsigned long, sz, low_count);
|
||||
sz = size_inside_page(p, low_count);
|
||||
|
||||
/*
|
||||
* On ia64 if a page has been mapped somewhere as
|
||||
|
@ -461,10 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
while (count > 0) {
|
||||
int len = count;
|
||||
int len = size_inside_page(p, count);
|
||||
|
||||
if (len > PAGE_SIZE)
|
||||
len = PAGE_SIZE;
|
||||
len = vread(kbuf, (char *)p, len);
|
||||
if (!len)
|
||||
break;
|
||||
|
@ -509,15 +496,8 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
|
|||
|
||||
while (count > 0) {
|
||||
char *ptr;
|
||||
/*
|
||||
* Handle first page in case it's not aligned
|
||||
*/
|
||||
if (-realp & (PAGE_SIZE - 1))
|
||||
sz = -realp & (PAGE_SIZE - 1);
|
||||
else
|
||||
sz = PAGE_SIZE;
|
||||
|
||||
sz = min_t(unsigned long, sz, count);
|
||||
sz = size_inside_page(realp, count);
|
||||
|
||||
/*
|
||||
* On ia64 if a page has been mapped somewhere as
|
||||
|
@ -577,10 +557,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
|
|||
if (!kbuf)
|
||||
return wrote ? wrote : -ENOMEM;
|
||||
while (count > 0) {
|
||||
int len = count;
|
||||
int len = size_inside_page(p, count);
|
||||
|
||||
if (len > PAGE_SIZE)
|
||||
len = PAGE_SIZE;
|
||||
written = copy_from_user(kbuf, buf, len);
|
||||
if (written) {
|
||||
if (wrote + virtr)
|
||||
|
|
Loading…
Reference in a new issue