mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest: lguest: tidy up documentation kernel/futex.c: make 3 functions static unexport access_process_vm lguest: make async_hcall() static
This commit is contained in:
commit
221d46841b
5 changed files with 38 additions and 50 deletions
|
@ -93,38 +93,7 @@ struct lguest_data lguest_data = {
|
|||
};
|
||||
static cycle_t clock_base;
|
||||
|
||||
/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
|
||||
* real optimization trick!
|
||||
*
|
||||
* When lazy_mode is set, it means we're allowed to defer all hypercalls and do
|
||||
* them as a batch when lazy_mode is eventually turned off. Because hypercalls
|
||||
* are reasonably expensive, batching them up makes sense. For example, a
|
||||
* large munmap might update dozens of page table entries: that code calls
|
||||
* paravirt_enter_lazy_mmu(), does the dozen updates, then calls
|
||||
* lguest_leave_lazy_mode().
|
||||
*
|
||||
* So, when we're in lazy mode, we call async_hypercall() to store the call for
|
||||
* future processing. When lazy mode is turned off we issue a hypercall to
|
||||
* flush the stored calls.
|
||||
*/
|
||||
static void lguest_leave_lazy_mode(void)
|
||||
{
|
||||
paravirt_leave_lazy(paravirt_get_lazy_mode());
|
||||
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
|
||||
}
|
||||
|
||||
static void lazy_hcall(unsigned long call,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3)
|
||||
{
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
|
||||
hcall(call, arg1, arg2, arg3);
|
||||
else
|
||||
async_hcall(call, arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
|
||||
/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
|
||||
* ring buffer of stored hypercalls which the Host will run though next time we
|
||||
* do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
|
||||
* arguments, and a "hcall_status" word which is 0 if the call is ready to go,
|
||||
|
@ -134,8 +103,8 @@ static void lazy_hcall(unsigned long call,
|
|||
* full and we just make the hypercall directly. This has the nice side
|
||||
* effect of causing the Host to run all the stored calls in the ring buffer
|
||||
* which empties it for next time! */
|
||||
void async_hcall(unsigned long call,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||
static void async_hcall(unsigned long call, unsigned long arg1,
|
||||
unsigned long arg2, unsigned long arg3)
|
||||
{
|
||||
/* Note: This code assumes we're uniprocessor. */
|
||||
static unsigned int next_call;
|
||||
|
@ -161,7 +130,37 @@ void async_hcall(unsigned long call,
|
|||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
/*:*/
|
||||
|
||||
/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
|
||||
* real optimization trick!
|
||||
*
|
||||
* When lazy_mode is set, it means we're allowed to defer all hypercalls and do
|
||||
* them as a batch when lazy_mode is eventually turned off. Because hypercalls
|
||||
* are reasonably expensive, batching them up makes sense. For example, a
|
||||
* large munmap might update dozens of page table entries: that code calls
|
||||
* paravirt_enter_lazy_mmu(), does the dozen updates, then calls
|
||||
* lguest_leave_lazy_mode().
|
||||
*
|
||||
* So, when we're in lazy mode, we call async_hcall() to store the call for
|
||||
* future processing. */
|
||||
static void lazy_hcall(unsigned long call,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3)
|
||||
{
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
|
||||
hcall(call, arg1, arg2, arg3);
|
||||
else
|
||||
async_hcall(call, arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
|
||||
* issue a hypercall to flush any stored calls. */
|
||||
static void lguest_leave_lazy_mode(void)
|
||||
{
|
||||
paravirt_leave_lazy(paravirt_get_lazy_mode());
|
||||
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
|
||||
}
|
||||
|
||||
/*G:033
|
||||
* After that diversion we return to our first native-instruction
|
||||
|
|
|
@ -54,9 +54,6 @@ hcall(unsigned long call,
|
|||
}
|
||||
/*:*/
|
||||
|
||||
void async_hcall(unsigned long call,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
||||
|
||||
/* Can't use our min() macro here: needs to be a constant */
|
||||
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
|
||||
|
||||
|
|
|
@ -149,10 +149,6 @@ union futex_key {
|
|||
int offset;
|
||||
} both;
|
||||
};
|
||||
int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared,
|
||||
union futex_key *key);
|
||||
void get_futex_key_refs(union futex_key *key);
|
||||
void drop_futex_key_refs(union futex_key *key);
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
extern void exit_robust_list(struct task_struct *curr);
|
||||
|
|
|
@ -181,8 +181,8 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
|
|||
* For other futexes, it points to ¤t->mm->mmap_sem and
|
||||
* caller must have taken the reader lock. but NOT any spinlocks.
|
||||
*/
|
||||
int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
|
||||
union futex_key *key)
|
||||
static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
|
||||
union futex_key *key)
|
||||
{
|
||||
unsigned long address = (unsigned long)uaddr;
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
@ -268,14 +268,13 @@ int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
|
|||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_futex_key);
|
||||
|
||||
/*
|
||||
* Take a reference to the resource addressed by a key.
|
||||
* Can be called while holding spinlocks.
|
||||
*
|
||||
*/
|
||||
inline void get_futex_key_refs(union futex_key *key)
|
||||
static void get_futex_key_refs(union futex_key *key)
|
||||
{
|
||||
if (key->both.ptr == 0)
|
||||
return;
|
||||
|
@ -288,13 +287,12 @@ inline void get_futex_key_refs(union futex_key *key)
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_futex_key_refs);
|
||||
|
||||
/*
|
||||
* Drop a reference to the resource addressed by a key.
|
||||
* The hash bucket spinlock must not be held.
|
||||
*/
|
||||
void drop_futex_key_refs(union futex_key *key)
|
||||
static void drop_futex_key_refs(union futex_key *key)
|
||||
{
|
||||
if (!key->both.ptr)
|
||||
return;
|
||||
|
@ -307,7 +305,6 @@ void drop_futex_key_refs(union futex_key *key)
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drop_futex_key_refs);
|
||||
|
||||
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
|
||||
{
|
||||
|
|
|
@ -2748,4 +2748,3 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
|
|||
|
||||
return buf - old_buf;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(access_process_vm);
|
||||
|
|
Loading…
Reference in a new issue