mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
kexec: use a mutex for locking rather than xchg()
Functionally the same, but more conventional. Cc: Huang Ying <ying.huang@intel.com> Tested-by: Vivek Goyal <vgoyal@redhat.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3122c33119
commit
8c5a1cf0ad
1 changed files with 10 additions and 24 deletions
|
@ -12,7 +12,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image,
|
|||
*/
|
||||
struct kimage *kexec_image;
|
||||
struct kimage *kexec_crash_image;
|
||||
/*
|
||||
* A home grown binary mutex.
|
||||
* Nothing can wait so this mutex is safe to use
|
||||
* in interrupt context :)
|
||||
*/
|
||||
static int kexec_lock;
|
||||
|
||||
static DEFINE_MUTEX(kexec_mutex);
|
||||
|
||||
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct kimage **dest_image, *image;
|
||||
int locked;
|
||||
int result;
|
||||
|
||||
/* We only trust the superuser with rebooting the system. */
|
||||
|
@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
|||
*
|
||||
* KISS: always take the mutex.
|
||||
*/
|
||||
locked = xchg(&kexec_lock, 1);
|
||||
if (locked)
|
||||
if (!mutex_trylock(&kexec_mutex))
|
||||
return -EBUSY;
|
||||
|
||||
dest_image = &kexec_image;
|
||||
|
@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
|||
image = xchg(dest_image, image);
|
||||
|
||||
out:
|
||||
locked = xchg(&kexec_lock, 0); /* Release the mutex */
|
||||
BUG_ON(!locked);
|
||||
mutex_unlock(&kexec_mutex);
|
||||
kimage_free(image);
|
||||
|
||||
return result;
|
||||
|
@ -1063,10 +1056,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
|||
|
||||
void crash_kexec(struct pt_regs *regs)
|
||||
{
|
||||
int locked;
|
||||
|
||||
|
||||
/* Take the kexec_lock here to prevent sys_kexec_load
|
||||
/* Take the kexec_mutex here to prevent sys_kexec_load
|
||||
* running on one cpu from replacing the crash kernel
|
||||
* we are using after a panic on a different cpu.
|
||||
*
|
||||
|
@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs)
|
|||
* of memory the xchg(&kexec_crash_image) would be
|
||||
* sufficient. But since I reuse the memory...
|
||||
*/
|
||||
locked = xchg(&kexec_lock, 1);
|
||||
if (!locked) {
|
||||
if (mutex_trylock(&kexec_mutex)) {
|
||||
if (kexec_crash_image) {
|
||||
struct pt_regs fixed_regs;
|
||||
crash_setup_regs(&fixed_regs, regs);
|
||||
|
@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs)
|
|||
machine_crash_shutdown(&fixed_regs);
|
||||
machine_kexec(kexec_crash_image);
|
||||
}
|
||||
locked = xchg(&kexec_lock, 0);
|
||||
BUG_ON(!locked);
|
||||
mutex_unlock(&kexec_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1434,7 +1422,7 @@ int kernel_kexec(void)
|
|||
{
|
||||
int error = 0;
|
||||
|
||||
if (xchg(&kexec_lock, 1))
|
||||
if (!mutex_trylock(&kexec_mutex))
|
||||
return -EBUSY;
|
||||
if (!kexec_image) {
|
||||
error = -EINVAL;
|
||||
|
@ -1498,8 +1486,6 @@ int kernel_kexec(void)
|
|||
#endif
|
||||
|
||||
Unlock:
|
||||
if (!xchg(&kexec_lock, 0))
|
||||
BUG();
|
||||
|
||||
mutex_unlock(&kexec_mutex);
|
||||
return error;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue