mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED
Further name space cleanup. No functional change Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
This commit is contained in:
parent
445c89514b
commit
edc35bd72e
25 changed files with 33 additions and 33 deletions
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -17,7 +17,7 @@ typedef struct {
|
|||
volatile unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int read_counter : 31;
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile int slock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
|
||||
typedef struct {
|
||||
volatile int lock;
|
||||
|
|
|
@ -14,7 +14,7 @@ typedef struct {
|
|||
unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
typedef struct {
|
||||
#ifdef CONFIG_PA20
|
||||
volatile unsigned int slock;
|
||||
# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
#else
|
||||
volatile unsigned int lock[4];
|
||||
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
#endif
|
||||
} arch_spinlock_t;
|
||||
|
||||
|
@ -16,6 +16,6 @@ typedef struct {
|
|||
volatile int counter;
|
||||
} raw_rwlock_t;
|
||||
|
||||
#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
|
||||
#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
|
||||
|
||||
#endif
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
||||
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
|
||||
[0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned int slock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile signed int lock;
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include <asm/mmu.h>
|
||||
|
||||
struct rtas_t rtas = {
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED
|
||||
.lock = __ARCH_SPIN_LOCK_UNLOCKED
|
||||
};
|
||||
EXPORT_SYMBOL(rtas);
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned int owner_cpu;
|
||||
} __attribute__ ((aligned (4))) arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct {
|
|||
volatile unsigned char lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
|
|
|
@ -9,7 +9,7 @@ typedef struct arch_spinlock {
|
|||
unsigned int slock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
unsigned int lock;
|
||||
|
|
|
@ -188,7 +188,7 @@ void dump_stack(void)
|
|||
}
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static int die_owner = -1;
|
||||
static unsigned int die_nest_count;
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
|
|||
* we want to have the fastest, inlined, non-debug version
|
||||
* of a critical section, to be able to prove TSC time-warps:
|
||||
*/
|
||||
static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static __cpuinitdata cycles_t last_tsc;
|
||||
static __cpuinitdata cycles_t max_warp;
|
||||
|
|
|
@ -43,14 +43,14 @@ typedef struct {
|
|||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
# define __SPIN_LOCK_UNLOCKED(lockname) \
|
||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
|
||||
(spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
||||
.magic = SPINLOCK_MAGIC, \
|
||||
.owner = SPINLOCK_OWNER_INIT, \
|
||||
.owner_cpu = -1, \
|
||||
SPIN_DEP_MAP_INIT(lockname) }
|
||||
#else
|
||||
# define __SPIN_LOCK_UNLOCKED(lockname) \
|
||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
|
||||
(spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
||||
SPIN_DEP_MAP_INIT(lockname) }
|
||||
#endif
|
||||
|
||||
|
|
|
@ -18,13 +18,13 @@ typedef struct {
|
|||
volatile unsigned int slock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
|
||||
#else
|
||||
|
||||
typedef struct { } arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { }
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
|
|||
* to use a raw spinlock - we really dont want the spinlock
|
||||
* code to recurse back into the lockdep code...
|
||||
*/
|
||||
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static int graph_lock(void)
|
||||
{
|
||||
|
|
|
@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|||
cpu_buffer->buffer = buffer;
|
||||
spin_lock_init(&cpu_buffer->reader_lock);
|
||||
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
||||
cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
|
|
|
@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
|||
* CONFIG_TRACER_MAX_TRACE.
|
||||
*/
|
||||
static arch_spinlock_t ftrace_max_lock =
|
||||
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
unsigned long __read_mostly tracing_max_latency;
|
||||
|
@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
|||
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
||||
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
|
||||
static int cmdline_idx;
|
||||
static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
/* temporary disable recording */
|
||||
static atomic_t trace_record_cmdline_disabled __read_mostly;
|
||||
|
@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
|||
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
static arch_spinlock_t trace_buf_lock =
|
||||
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static u32 trace_buf[TRACE_BUF_SIZE];
|
||||
|
||||
struct ftrace_event_call *call = &event_bprint;
|
||||
|
@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
|
|||
int trace_array_vprintk(struct trace_array *tr,
|
||||
unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static char trace_buf[TRACE_BUF_SIZE];
|
||||
|
||||
struct ftrace_event_call *call = &event_print;
|
||||
|
@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s)
|
|||
static void __ftrace_dump(bool disable_tracing)
|
||||
{
|
||||
static arch_spinlock_t ftrace_dump_lock =
|
||||
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
/* use static because iter can be a bit big for the stack */
|
||||
static struct trace_iterator iter;
|
||||
unsigned int old_userobj;
|
||||
|
|
|
@ -74,7 +74,7 @@ static struct {
|
|||
arch_spinlock_t lock;
|
||||
} trace_clock_struct ____cacheline_aligned_in_smp =
|
||||
{
|
||||
.lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
|
||||
.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
|
||||
};
|
||||
|
||||
u64 notrace trace_clock_global(void)
|
||||
|
|
|
@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1;
|
|||
static int wakeup_rt;
|
||||
|
||||
static arch_spinlock_t wakeup_lock =
|
||||
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static void __wakeup_reset(struct trace_array *tr);
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = {
|
|||
|
||||
static unsigned long max_stack_size;
|
||||
static arch_spinlock_t max_stack_lock =
|
||||
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static int stack_trace_disabled __read_mostly;
|
||||
static DEFINE_PER_CPU(int, trace_active);
|
||||
|
|
|
@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
|
|||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||
#endif
|
||||
lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
lock->magic = SPINLOCK_MAGIC;
|
||||
lock->owner = SPINLOCK_OWNER_INIT;
|
||||
lock->owner_cpu = -1;
|
||||
|
|
Loading…
Reference in a new issue