diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index bb94a51e53d..08975ee0a10 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 5e9d3eadd16..9622e126a8d 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index 03b377abf5c..c8a3928a58c 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -17,7 +17,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 447ccc6ca7a..6a11b65fa66 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 17d15bd6322..5873a870110 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile int lock; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index 2e1060892d3..b4c5efaadb9 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -14,7 +14,7 @@ typedef struct { unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 735caafb81f..396d2746ca5 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -4,10 +4,10 @@ typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __RAW_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else volatile unsigned int lock[4]; -# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif } arch_spinlock_t; @@ -16,6 +16,6 @@ typedef struct { volatile int counter; } raw_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } +#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index fdd7f583de5..353963d4205 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -13,7 +13,7 @@ #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED + [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 4312e5baaf8..f5f39d82711 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile signed int lock; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 579069c1215..57dfa414cfb 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,7 @@ #include struct rtas_t rtas = { - .lock = __RAW_SPIN_LOCK_UNLOCKED + .lock = __ARCH_SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index a93638eee3f..e25c0370f6c 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int owner_cpu; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index 37712c32ba9..a3be2db960e 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 41d9a8fec13..c145e63a5d6 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned char lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 2ae7637ed52..696f8364a4f 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct arch_spinlock { unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0862d9d89c9..5b75afac8a3 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9f908b9d1ab..f1714697a09 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index d4af2d7a86e..7dadce303eb 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -43,14 +43,14 @@ typedef struct { #ifdef CONFIG_DEBUG_SPINLOCK # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ SPIN_DEP_MAP_INIT(lockname) } #else # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ SPIN_DEP_MAP_INIT(lockname) } #endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 34d36691c4e..10db021f487 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -18,13 +18,13 @@ typedef struct { volatile unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else typedef struct { } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { } +#define __ARCH_SPIN_LOCK_UNLOCKED { } #endif diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7cc50c62af5..2389e3f85cf 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5ac8ee0a9e3..fb7a0fa508b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->buffer = buffer; spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); - cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7d56cecc2c6..63bc1cc3821 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) * CONFIG_TRACER_MAX_TRACE. */ static arch_spinlock_t ftrace_max_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; -static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { static arch_spinlock_t trace_buf_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; @@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s) static void __ftrace_dump(bool disable_tracing) { static arch_spinlock_t ftrace_dump_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 206ec3d4b3c..433e2eda2d0 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -74,7 +74,7 @@ static struct { arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { - .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, + .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4cf7e83ec23..e347853564e 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1; static int wakeup_rt; static arch_spinlock_t wakeup_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 9a82d568fde..728c3522148 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = { static unsigned long max_stack_size; static arch_spinlock_t max_stack_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 2acd501b382..f7300413714 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1;