mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
bac4e960b5
Mathieu Desnoyers pointed out that the ARM barriers were lacking: - cmpxchg, xchg and atomic add return need memory barriers on architectures which can reorder the relative order in which memory read/writes can be seen between CPUs, which seems to include recent ARM architectures. Those barriers are currently missing on ARM. - test_and_xxx_bit were missing SMP barriers. So put these barriers in. Provide separate atomic_add/atomic_sub operations which do not require barriers. Reported-Reviewed-and-Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
129 lines
2.8 KiB
C
129 lines
2.8 KiB
C
/*
|
|
* arch/arm/include/asm/assembler.h
|
|
*
|
|
* Copyright (C) 1996-2000 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This file contains arm architecture specific defines
|
|
* for the different processors.
|
|
*
|
|
* Do not include any C declarations in this file - it is included by
|
|
* assembler source.
|
|
*/
|
|
#ifndef __ASSEMBLY__
|
|
#error "Only include this from assembly code"
|
|
#endif
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
/*
|
|
* Endian independent macros for shifting bytes within registers.
|
|
*/
|
|
#ifndef __ARMEB__
|
|
#define pull lsr
|
|
#define push lsl
|
|
#define get_byte_0 lsl #0
|
|
#define get_byte_1 lsr #8
|
|
#define get_byte_2 lsr #16
|
|
#define get_byte_3 lsr #24
|
|
#define put_byte_0 lsl #0
|
|
#define put_byte_1 lsl #8
|
|
#define put_byte_2 lsl #16
|
|
#define put_byte_3 lsl #24
|
|
#else
|
|
#define pull lsl
|
|
#define push lsr
|
|
#define get_byte_0 lsr #24
|
|
#define get_byte_1 lsr #16
|
|
#define get_byte_2 lsr #8
|
|
#define get_byte_3 lsl #0
|
|
#define put_byte_0 lsl #24
|
|
#define put_byte_1 lsl #16
|
|
#define put_byte_2 lsl #8
|
|
#define put_byte_3 lsl #0
|
|
#endif
|
|
|
|
/*
|
|
* Data preload for architectures that support it
|
|
*/
|
|
#if __LINUX_ARM_ARCH__ >= 5
|
|
#define PLD(code...) code
|
|
#else
|
|
#define PLD(code...)
|
|
#endif
|
|
|
|
/*
|
|
* This can be used to enable code to cacheline align the destination
|
|
* pointer when bulk writing to memory. Experiments on StrongARM and
|
|
* XScale didn't show this a worthwhile thing to do when the cache is not
|
|
* set to write-allocate (this would need further testing on XScale when WA
|
|
* is used).
|
|
*
|
|
* On Feroceon there is much to gain however, regardless of cache mode.
|
|
*/
|
|
#ifdef CONFIG_CPU_FEROCEON
|
|
#define CALGN(code...) code
|
|
#else
|
|
#define CALGN(code...)
|
|
#endif
|
|
|
|
/*
|
|
* Enable and disable interrupts
|
|
*/
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
.macro disable_irq
|
|
cpsid i
|
|
.endm
|
|
|
|
.macro enable_irq
|
|
cpsie i
|
|
.endm
|
|
#else
|
|
.macro disable_irq
|
|
msr cpsr_c, #PSR_I_BIT | SVC_MODE
|
|
.endm
|
|
|
|
.macro enable_irq
|
|
msr cpsr_c, #SVC_MODE
|
|
.endm
|
|
#endif
|
|
|
|
/*
|
|
* Save the current IRQ state and disable IRQs. Note that this macro
|
|
* assumes FIQs are enabled, and that the processor is in SVC mode.
|
|
*/
|
|
.macro save_and_disable_irqs, oldcpsr
|
|
mrs \oldcpsr, cpsr
|
|
disable_irq
|
|
.endm
|
|
|
|
/*
|
|
* Restore interrupt state previously stored in a register. We don't
|
|
* guarantee that this will preserve the flags.
|
|
*/
|
|
.macro restore_irqs, oldcpsr
|
|
msr cpsr_c, \oldcpsr
|
|
.endm
|
|
|
|
#define USER(x...) \
|
|
9999: x; \
|
|
.section __ex_table,"a"; \
|
|
.align 3; \
|
|
.long 9999b,9001f; \
|
|
.previous
|
|
|
|
/*
|
|
* SMP data memory barrier
|
|
*/
|
|
.macro smp_dmb
|
|
#ifdef CONFIG_SMP
|
|
#if __LINUX_ARM_ARCH__ >= 7
|
|
dmb
|
|
#elif __LINUX_ARM_ARCH__ == 6
|
|
mcr p15, 0, r0, c7, c10, 5 @ dmb
|
|
#endif
|
|
#endif
|
|
.endm
|