aha/arch/frv/include/asm/system.h
David Howells 00460f41ff FRV: Implement atomic64_t
Implement atomic64_t and its ops for FRV.  Tested with the following patch:

	diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
	index 55e4fab..086d50d 100644
	--- a/arch/frv/kernel/setup.c
	+++ b/arch/frv/kernel/setup.c
	@@ -746,6 +746,52 @@ static void __init parse_cmdline_early(char *cmdline)

	 } /* end parse_cmdline_early() */

	+static atomic64_t xxx;
	+
	+static void test_atomic64(void)
	+{
	+	atomic64_set(&xxx, 0x12300000023LL);
	+
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000023LL);
	+	mb();
	+	if (atomic64_inc_return(&xxx) != 0x12300000024LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000024LL);
	+	mb();
	+	if (atomic64_sub_return(0x36900000050LL, &xxx) != -0x2460000002cLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002cLL);
	+	mb();
	+	if (atomic64_dec_return(&xxx) != -0x2460000002dLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002dLL);
	+	mb();
	+	if (atomic64_add_return(0x36800000001LL, &xxx) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x123456789abcdefLL, 0x121ffffffd4LL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x121ffffffd4LL, 0x123456789abcdefLL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x123456789abcdefLL);
	+	mb();
	+	if (atomic64_xchg(&xxx, 0xabcdef123456789LL) != 0x123456789abcdefLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0xabcdef123456789LL);
	+	mb();
	+}
	+
	 /*****************************************************************************/
	 /*
	  *
	@@ -845,6 +891,8 @@ void __init setup_arch(char **cmdline_p)
	 //	asm volatile("movgs %0,timerd" :: "r"(10000000));
	 //	__set_HSR(0, __get_HSR(0) | HSR0_ETMD);

	+	test_atomic64();
	+
	 } /* end setup_arch() */

	 #if 0

Note that this doesn't cover all the trivial wrappers, but does cover all the
substantial implementations.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-01 19:38:09 -07:00

303 lines
8 KiB
C

/* system.h: FR-V CPU control definitions
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H
#include <linux/types.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
struct thread_struct;
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'.
* The `mb' is to tell GCC not to cache `current' across this call.
*/
extern asmlinkage
struct task_struct *__switch_to(struct thread_struct *prev_thread,
struct thread_struct *next_thread,
struct task_struct *prev);
#define switch_to(prev, next, last) \
do { \
(prev)->thread.sched_lr = \
(unsigned long) __builtin_return_address(0); \
(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
mb(); \
} while(0)
/*
* interrupt flag manipulation
* - use virtual interrupt management since touching the PSR is slow
* - ICC2.Z: T if interrupts virtually disabled
* - ICC2.C: F if interrupts really disabled
* - if Z==1 upon interrupt:
* - C is set to 0
* - interrupts are really disabled
* - entry.S returns immediately
* - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts
* - if taken, the trap:
* - sets ICC2.C
* - enables interrupts
*/
#define local_irq_disable() \
do { \
/* set Z flag, but don't change the C flag */ \
asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \
: \
: \
: "memory", "icc2" \
); \
} while(0)
#define local_irq_enable() \
do { \
/* clear Z flag and then test the C flag */ \
asm volatile(" oricc gr0,#1,gr0,icc2 \n" \
" tihi icc2,gr0,#2 \n" \
: \
: \
: "memory", "icc2" \
); \
} while(0)
#define local_save_flags(flags) \
do { \
typecheck(unsigned long, flags); \
asm volatile("movsg ccr,%0" \
: "=r"(flags) \
: \
: "memory"); \
\
/* shift ICC2.Z to bit 0 */ \
flags >>= 26; \
\
/* make flags 1 if interrupts disabled, 0 otherwise */ \
flags &= 1UL; \
} while(0)
#define irqs_disabled() \
({unsigned long flags; local_save_flags(flags); !!flags; })
#define local_irq_save(flags) \
do { \
typecheck(unsigned long, flags); \
local_save_flags(flags); \
local_irq_disable(); \
} while(0)
#define local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
\
/* load the Z flag by turning 1 if disabled into 0 if disabled \
* and thus setting the Z flag but not the C flag */ \
asm volatile(" xoricc %0,#1,gr0,icc2 \n" \
/* then test Z=0 and C=0 */ \
" tihi icc2,gr0,#2 \n" \
: \
: "r"(flags) \
: "memory", "icc2" \
); \
\
} while(0)
/*
* real interrupt flag manipulation
*/
#define __local_irq_disable() \
do { \
unsigned long psr; \
asm volatile(" movsg psr,%0 \n" \
" andi %0,%2,%0 \n" \
" ori %0,%1,%0 \n" \
" movgs %0,psr \n" \
: "=r"(psr) \
: "i" (PSR_PIL_14), "i" (~PSR_PIL) \
: "memory"); \
} while(0)
#define __local_irq_enable() \
do { \
unsigned long psr; \
asm volatile(" movsg psr,%0 \n" \
" andi %0,%1,%0 \n" \
" movgs %0,psr \n" \
: "=r"(psr) \
: "i" (~PSR_PIL) \
: "memory"); \
} while(0)
#define __local_save_flags(flags) \
do { \
typecheck(unsigned long, flags); \
asm("movsg psr,%0" \
: "=r"(flags) \
: \
: "memory"); \
} while(0)
#define __local_irq_save(flags) \
do { \
unsigned long npsr; \
typecheck(unsigned long, flags); \
asm volatile(" movsg psr,%0 \n" \
" andi %0,%3,%1 \n" \
" ori %1,%2,%1 \n" \
" movgs %1,psr \n" \
: "=r"(flags), "=r"(npsr) \
: "i" (PSR_PIL_14), "i" (~PSR_PIL) \
: "memory"); \
} while(0)
#define __local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
asm volatile(" movgs %0,psr \n" \
: \
: "r" (flags) \
: "memory"); \
} while(0)
#define __irqs_disabled() \
((__get_PSR() & PSR_PIL) >= PSR_PIL_14)
/*
* Force strict CPU ordering.
*/
#define nop() asm volatile ("nop"::)
#define mb() asm volatile ("membar" : : :"memory")
#define rmb() asm volatile ("membar" : : :"memory")
#define wmb() asm volatile ("membar" : : :"memory")
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) \
do { xchg(&var, (value)); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#define set_mb(var, value) \
do { var = (value); barrier(); } while (0)
#endif
extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2)));
extern void free_initmem(void);
#define arch_align_stack(x) (x)
/*****************************************************************************/
/*
* compare and conditionally exchange value with memory
* - if (*ptr == test) then orig = *ptr; *ptr = test;
* - if (*ptr != test) then orig = *ptr;
*/
extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define cmpxchg(ptr, test, new) \
({ \
__typeof__(ptr) __xg_ptr = (ptr); \
__typeof__(*(ptr)) __xg_orig, __xg_tmp; \
__typeof__(*(ptr)) __xg_test = (test); \
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
case 4: \
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ld.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" sub%I4cc %1,%4,%2,icc0 \n" \
" bne icc0,#0,1f \n" \
" cst.p %3,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
"1: \n" \
: "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
: "r"(__xg_new), "NPr"(__xg_test) \
: "memory", "cc7", "cc3", "icc3", "icc0" \
); \
break; \
\
default: \
__xg_orig = (__typeof__(__xg_orig))0; \
asm volatile("break"); \
break; \
} \
\
__xg_orig; \
})
#else
extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#define cmpxchg(ptr, test, new) \
({ \
__typeof__(ptr) __xg_ptr = (ptr); \
__typeof__(*(ptr)) __xg_orig; \
__typeof__(*(ptr)) __xg_test = (test); \
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
case 4: __xg_orig = (__force __typeof__(*ptr)) \
__cmpxchg_32((__force uint32_t *)__xg_ptr, \
(__force uint32_t)__xg_test, \
(__force uint32_t)__xg_new); break; \
default: \
__xg_orig = (__typeof__(__xg_orig))0; \
asm volatile("break"); \
break; \
} \
\
__xg_orig; \
})
#endif
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return cmpxchg((unsigned long *)ptr, old, new);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
return old;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* _ASM_SYSTEM_H */