mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
Remove fastcall from linux/include
[akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9f741cb8fe
commit
ec7015840a
6 changed files with 20 additions and 20 deletions
|
@ -25,7 +25,7 @@
|
|||
#include <asm/irq_regs.h>
|
||||
|
||||
struct irq_desc;
|
||||
typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
|
||||
typedef void (*irq_flow_handler_t)(unsigned int irq,
|
||||
struct irq_desc *desc);
|
||||
|
||||
|
||||
|
@ -276,19 +276,19 @@ extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
|
|||
* Built-in IRQ handlers for various IRQ types,
|
||||
* callable via desc->chip->handle_irq()
|
||||
*/
|
||||
extern void fastcall handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void fastcall handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void fastcall handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void fastcall handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void fastcall handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void fastcall handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||
|
||||
/*
|
||||
* Monolithic do_IRQ implementation.
|
||||
* (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
|
||||
*/
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
extern fastcall unsigned int __do_IRQ(unsigned int irq);
|
||||
extern unsigned int __do_IRQ(unsigned int irq);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -133,7 +133,7 @@ NORET_TYPE void panic(const char * fmt, ...)
|
|||
extern void oops_enter(void);
|
||||
extern void oops_exit(void);
|
||||
extern int oops_may_print(void);
|
||||
fastcall NORET_TYPE void do_exit(long error_code)
|
||||
NORET_TYPE void do_exit(long error_code)
|
||||
ATTRIB_NORET;
|
||||
NORET_TYPE void complete_and_exit(struct completion *, long)
|
||||
ATTRIB_NORET;
|
||||
|
|
|
@ -112,7 +112,7 @@ extern void __mutex_init(struct mutex *lock, const char *name,
|
|||
*
|
||||
* Returns 1 if the mutex is locked, 0 if unlocked.
|
||||
*/
|
||||
static inline int fastcall mutex_is_locked(struct mutex *lock)
|
||||
static inline int mutex_is_locked(struct mutex *lock)
|
||||
{
|
||||
return atomic_read(&lock->count) != 1;
|
||||
}
|
||||
|
@ -132,9 +132,9 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
|||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
||||
#else
|
||||
extern void fastcall mutex_lock(struct mutex *lock);
|
||||
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
|
||||
extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
|
||||
extern void mutex_lock(struct mutex *lock);
|
||||
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
||||
extern int __must_check mutex_lock_killable(struct mutex *lock);
|
||||
|
||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||
|
@ -145,7 +145,7 @@ extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
|
|||
* NOTE: mutex_trylock() follows the spin_trylock() convention,
|
||||
* not the down_trylock() convention!
|
||||
*/
|
||||
extern int fastcall mutex_trylock(struct mutex *lock);
|
||||
extern void fastcall mutex_unlock(struct mutex *lock);
|
||||
extern int mutex_trylock(struct mutex *lock);
|
||||
extern void mutex_unlock(struct mutex *lock);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
#include <linux/list.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
extern void fastcall add_preempt_count(int val);
|
||||
extern void fastcall sub_preempt_count(int val);
|
||||
extern void add_preempt_count(int val);
|
||||
extern void sub_preempt_count(int val);
|
||||
#else
|
||||
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
|
||||
# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
|
||||
|
|
|
@ -71,7 +71,7 @@
|
|||
#define LOCK_SECTION_END \
|
||||
".previous\n\t"
|
||||
|
||||
#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
|
||||
#define __lockfunc __attribute__((section(".spinlock.text")))
|
||||
|
||||
/*
|
||||
* Pull the raw_spinlock_t and raw_rwlock_t definitions:
|
||||
|
|
|
@ -35,8 +35,8 @@ extern struct tvec_base boot_tvec_bases;
|
|||
struct timer_list _name = \
|
||||
TIMER_INITIALIZER(_function, _expires, _data)
|
||||
|
||||
void fastcall init_timer(struct timer_list * timer);
|
||||
void fastcall init_timer_deferrable(struct timer_list *timer);
|
||||
void init_timer(struct timer_list *timer);
|
||||
void init_timer_deferrable(struct timer_list *timer);
|
||||
|
||||
static inline void setup_timer(struct timer_list * timer,
|
||||
void (*function)(unsigned long),
|
||||
|
|
Loading…
Reference in a new issue