aha/arch/frv/lib/atomic-ops.S
David Howells 00460f41ff FRV: Implement atomic64_t
Implement atomic64_t and its ops for FRV.  Tested with the following patch:

	diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
	index 55e4fab..086d50d 100644
	--- a/arch/frv/kernel/setup.c
	+++ b/arch/frv/kernel/setup.c
	@@ -746,6 +746,52 @@ static void __init parse_cmdline_early(char *cmdline)

	 } /* end parse_cmdline_early() */

	+static atomic64_t xxx;
	+
	+static void test_atomic64(void)
	+{
	+	atomic64_set(&xxx, 0x12300000023LL);
	+
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000023LL);
	+	mb();
	+	if (atomic64_inc_return(&xxx) != 0x12300000024LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000024LL);
	+	mb();
	+	if (atomic64_sub_return(0x36900000050LL, &xxx) != -0x2460000002cLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002cLL);
	+	mb();
	+	if (atomic64_dec_return(&xxx) != -0x2460000002dLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002dLL);
	+	mb();
	+	if (atomic64_add_return(0x36800000001LL, &xxx) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x123456789abcdefLL, 0x121ffffffd4LL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x121ffffffd4LL, 0x123456789abcdefLL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x123456789abcdefLL);
	+	mb();
	+	if (atomic64_xchg(&xxx, 0xabcdef123456789LL) != 0x123456789abcdefLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0xabcdef123456789LL);
	+	mb();
	+}
	+
	 /*****************************************************************************/
	 /*
	  *
	@@ -845,6 +891,8 @@ void __init setup_arch(char **cmdline_p)
	 //	asm volatile("movgs %0,timerd" :: "r"(10000000));
	 //	__set_HSR(0, __get_HSR(0) | HSR0_ETMD);

	+	test_atomic64();
	+
	 } /* end setup_arch() */

	 #if 0

Note that this doesn't cover all the trivial wrappers, but does cover all the
substantial implementations.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-01 19:38:09 -07:00

172 lines
5.2 KiB
ArmAsm

/* atomic-ops.S: kernel atomic operations
*
* For an explanation of how atomic ops work in this arch, see:
* Documentation/frv/atomic-ops.txt
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/spr-regs.h>
.text
.balign 4
###############################################################################
#
# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
.globl atomic_test_and_ANDNOT_mask
.type atomic_test_and_ANDNOT_mask,@function
atomic_test_and_ANDNOT_mask:
not.p gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
and gr8,gr10,gr11
cst.p gr11,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
###############################################################################
#
# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
.globl atomic_test_and_OR_mask
.type atomic_test_and_OR_mask,@function
atomic_test_and_OR_mask:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
or gr8,gr10,gr11
cst.p gr11,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
###############################################################################
#
# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
.globl atomic_test_and_XOR_mask
.type atomic_test_and_XOR_mask,@function
atomic_test_and_XOR_mask:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
xor gr8,gr10,gr11
cst.p gr11,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
###############################################################################
#
# int atomic_add_return(int i, atomic_t *v)
#
###############################################################################
.globl atomic_add_return
.type atomic_add_return,@function
atomic_add_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
add gr8,gr10,gr8
cst.p gr8,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_add_return, .-atomic_add_return
###############################################################################
#
# int atomic_sub_return(int i, atomic_t *v)
#
###############################################################################
.globl atomic_sub_return
.type atomic_sub_return,@function
atomic_sub_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
sub gr8,gr10,gr8
cst.p gr8,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_sub_return, .-atomic_sub_return
###############################################################################
#
# uint32_t __xchg_32(uint32_t i, uint32_t *v)
#
###############################################################################
.globl __xchg_32
.type __xchg_32,@function
__xchg_32:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
cst.p gr10,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size __xchg_32, .-__xchg_32
###############################################################################
#
# uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new)
#
###############################################################################
.globl __cmpxchg_32
.type __cmpxchg_32,@function
__cmpxchg_32:
or.p gr8,gr8,gr11
0:
orcc gr0,gr0,gr0,icc3
ckeq icc3,cc7
ld.p @(gr11,gr0),gr8
orcr cc7,cc7,cc3
subcc gr8,gr9,gr7,icc0
bnelr icc0,#0
cst.p gr10,@(gr11,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1
beq icc3,#0,0b
bralr
.size __cmpxchg_32, .-__cmpxchg_32