From 4c6ab3ee4cdb86cbd4e9400dd22fad7701cbe795 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 21 Sep 2009 23:21:53 -0700 Subject: [PATCH 0001/1007] crypto: padlock-sha - Fix stack alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PadLock hardware requires the output buffer for SHA to be 128-bit aligned. We currentply place the buffer on the stack, and ask gcc to align it to 128 bits. That doesn't work on i386 because the kernel stack is only aligned to 32 bits. This patch changes the code to align the buffer by hand so that the hardware doesn't fault on unaligned buffers. Reported-by: Séguier Régis Tested-by: Séguier Régis Signed-off-by: Herbert Xu --- drivers/crypto/padlock-sha.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 76cb6b345e7..0af80577dc7 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -24,6 +24,12 @@ #include #include "padlock.h" +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + struct padlock_sha_desc { struct shash_desc fallback; }; @@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct sha1_state state; unsigned int space; @@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct sha256_state state; unsigned int space; From 01f1afaf7b77a16198e65e909e3c410c036eb0d0 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Tue, 22 Sep 2009 16:29:00 -0700 Subject: [PATCH 0002/1007] ide: use printk_once Signed-off-by: Marcin Slusarz Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- drivers/ide/ide-proc.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 28d09a5d845..017c09540c2 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = { static void proc_ide_settings_warn(void) { - static int warned; - - if (warned) - return; - - printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " + printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " "obsolete, and will be removed soon!\n"); - warned = 1; } static int ide_settings_proc_show(struct seq_file *m, void *v) From fc2219d49ef1606e7fd2c88af2b423b01ff3d319 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Sep 2009 09:50:41 -0700 Subject: [PATCH 0003/1007] rcu: Clean up code based on review feedback from Josh Triplett These issues identified during an old-fashioned face-to-face code review extended over many hours. o Bury various forms of the "rsp->completed == rsp->gpnum" comparison into an rcu_gp_in_progress() function, which has the beneficial side-effect of forcing consistent use of ACCESS_ONCE(). o Replace hand-coded arithmetic with DIV_ROUND_UP(). o Bury several "!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x01])" instances into an rcu_preempted_readers() function, as this expression indicates that there are no readers blocked within RCU read-side critical sections blocking the current grace period. (Though there might well be similar readers blocking the next grace period.) o Remove a dangling rcu_restart_cpu() declaration that has been dangling for almost 20 minor releases of the kernel. Signed-off-by: Paul E. McKenney Acked-by: Peter Zijlstra Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12537246442687-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/rcutree.h | 1 - kernel/rcutree.c | 29 +++++++++++++++--------- kernel/rcutree.h | 6 ++--- kernel/rcutree_plugin.h | 50 ++++++++++++++++++++--------------------- 4 files changed, 46 insertions(+), 40 deletions(-) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 37682770e9d..88109c87f29 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -85,7 +85,6 @@ static inline void synchronize_rcu_bh_expedited(void) extern void __rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 52b06f6e158..f85b6842d1e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -100,6 +100,16 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, #include "rcutree_plugin.h" +/* + * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s + * permit this function to be invoked without holding the root rcu_node + * structure's ->lock, but of course results can be subject to change. + */ +static int rcu_gp_in_progress(struct rcu_state *rsp) +{ + return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); +} + /* * Note a quiescent state. Because we do not need to know * how many quiescent states passed, just if there was at least @@ -173,9 +183,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { - /* ACCESS_ONCE() because we are accessing outside of lock. */ - return *rdp->nxttail[RCU_DONE_TAIL] && - ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum); + return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); } /* @@ -482,7 +490,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) spin_lock_irqsave(&rnp->lock, flags); delta = jiffies - rsp->jiffies_stall; - if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { + if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { spin_unlock_irqrestore(&rnp->lock, flags); return; } @@ -537,8 +545,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) /* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); - } else if (rsp->gpnum != rsp->completed && - delta >= RCU_STALL_RAT_DELAY) { + } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { /* They had two time units to dump stack, so complain. */ print_other_cpu_stall(rsp); @@ -703,9 +710,9 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) * hold rnp->lock, as required by rcu_start_gp(), which will release it. */ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) - __releases(rnp->lock) + __releases(rcu_get_root(rsp)->lock) { - WARN_ON_ONCE(rsp->completed == rsp->gpnum); + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); rsp->completed = rsp->gpnum; rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ @@ -1092,7 +1099,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) struct rcu_node *rnp = rcu_get_root(rsp); u8 signaled; - if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) + if (!rcu_gp_in_progress(rsp)) return; /* No grace period in progress, nothing to force. */ if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ @@ -1251,7 +1258,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), rdp->nxttail[RCU_NEXT_TAIL] = &head->next; /* Start a new grace period if one not already started. */ - if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { + if (!rcu_gp_in_progress(rsp)) { unsigned long nestflag; struct rcu_node *rnp_root = rcu_get_root(rsp); @@ -1331,7 +1338,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) } /* Has an RCU GP gone long enough to send resched IPIs &c? */ - if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && + if (rcu_gp_in_progress(rsp) && ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { rdp->n_rp_need_fqs++; return 1; diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 8e8287a983c..9aa8c8a160d 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -48,14 +48,14 @@ #elif NR_CPUS <= RCU_FANOUT_SQ # define NUM_RCU_LVLS 2 # define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) # define NUM_RCU_LVL_2 (NR_CPUS) # define NUM_RCU_LVL_3 0 #elif NR_CPUS <= RCU_FANOUT_CUBE # define NUM_RCU_LVLS 3 # define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) -# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) +# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) # define NUM_RCU_LVL_3 NR_CPUS #else # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 1cee04f627e..8ff1ba7b3c4 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -150,6 +150,16 @@ void __rcu_read_lock(void) } EXPORT_SYMBOL_GPL(__rcu_read_lock); +/* + * Check for preempted RCU readers blocking the current grace period + * for the specified rcu_node structure. If the caller needs a reliable + * answer, it must hold the rcu_node's ->lock. + */ +static int rcu_preempted_readers(struct rcu_node *rnp) +{ + return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); +} + static void rcu_read_unlock_special(struct task_struct *t) { int empty; @@ -196,7 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t) break; spin_unlock(&rnp->lock); /* irqs remain disabled. */ } - empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); + empty = !rcu_preempted_readers(rnp); list_del_init(&t->rcu_node_entry); t->rcu_blocked_node = NULL; @@ -207,7 +217,7 @@ static void rcu_read_unlock_special(struct task_struct *t) * drop rnp->lock and restore irq. */ if (!empty && rnp->qsmask == 0 && - list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { + !rcu_preempted_readers(rnp)) { struct rcu_node *rnp_p; if (rnp->parent == NULL) { @@ -257,12 +267,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp) { unsigned long flags; struct list_head *lp; - int phase = rnp->gpnum & 0x1; + int phase; struct task_struct *t; - if (!list_empty(&rnp->blocked_tasks[phase])) { + if (rcu_preempted_readers(rnp)) { spin_lock_irqsave(&rnp->lock, flags); - phase = rnp->gpnum & 0x1; /* re-read under lock. */ + phase = rnp->gpnum & 0x1; lp = &rnp->blocked_tasks[phase]; list_for_each_entry(t, lp, rcu_node_entry) printk(" P%d", t->pid); @@ -281,20 +291,10 @@ static void rcu_print_task_stall(struct rcu_node *rnp) */ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) { - WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); + WARN_ON_ONCE(rcu_preempted_readers(rnp)); WARN_ON_ONCE(rnp->qsmask); } -/* - * Check for preempted RCU readers for the specified rcu_node structure. - * If the caller needs a reliable answer, it must hold the rcu_node's - * >lock. - */ -static int rcu_preempted_readers(struct rcu_node *rnp) -{ - return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); -} - #ifdef CONFIG_HOTPLUG_CPU /* @@ -461,6 +461,15 @@ static void rcu_preempt_note_context_switch(int cpu) { } +/* + * Because preemptable RCU does not exist, there are never any preempted + * RCU readers. + */ +static int rcu_preempted_readers(struct rcu_node *rnp) +{ + return 0; +} + #ifdef CONFIG_RCU_CPU_STALL_DETECTOR /* @@ -483,15 +492,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) WARN_ON_ONCE(rnp->qsmask); } -/* - * Because preemptable RCU does not exist, there are never any preempted - * RCU readers. - */ -static int rcu_preempted_readers(struct rcu_node *rnp) -{ - return 0; -} - #ifdef CONFIG_HOTPLUG_CPU /* From 1eba8f84380bede3c602bd7758dea96925cead01 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Sep 2009 09:50:42 -0700 Subject: [PATCH 0004/1007] rcu: Clean up code based on review feedback from Josh Triplett, part 2 These issues identified during an old-fashioned face-to-face code review extending over many hours. o Add comments for tricky parts of code, and correct comments that have passed their sell-by date. o Get rid of the vestiges of rcu_init_sched(), which is no longer needed now that PREEMPT_RCU is gone. o Move the #include of rcutree_plugin.h to the end of rcutree.c, which means that, rather than having a random collection of forward declarations, the new set of forward declarations document the set of plugins. The new home for this #include also allows __rcu_init_preempt() to move into rcutree_plugin.h. o Fix rcu_preempt_check_callbacks() to be static. Suggested-by: Josh Triplett Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12537246443924-git-send-email-> Signed-off-by: Ingo Molnar Peter Zijlstra --- include/linux/rcupdate.h | 4 +++ include/linux/rcutree.h | 6 +--- init/main.c | 1 - kernel/rcutree.c | 72 ++++++++++++++++++---------------------- kernel/rcutree.h | 31 ++++++++++++----- kernel/rcutree_plugin.h | 23 +++++++++++-- 6 files changed, 82 insertions(+), 55 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6fe0363724e..70331218e4b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -196,6 +196,8 @@ static inline void rcu_read_lock_sched(void) __acquire(RCU_SCHED); rcu_read_acquire(); } + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); @@ -213,6 +215,8 @@ static inline void rcu_read_unlock_sched(void) __release(RCU_SCHED); preempt_enable(); } + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 88109c87f29..19a3b06943e 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -90,10 +90,6 @@ extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_sched(void); -static inline void rcu_init_sched(void) -{ -} - #ifdef CONFIG_NO_HZ void rcu_enter_nohz(void); void rcu_exit_nohz(void); @@ -106,7 +102,7 @@ static inline void rcu_exit_nohz(void) } #endif /* CONFIG_NO_HZ */ -/* A context switch is a grace period for rcutree. */ +/* A context switch is a grace period for RCU-sched and RCU-bh. */ static inline int rcu_blocking_is_gp(void) { return num_online_cpus() == 1; diff --git a/init/main.c b/init/main.c index 34971becbd3..833d675677d 100644 --- a/init/main.c +++ b/init/main.c @@ -782,7 +782,6 @@ static void __init do_initcalls(void) */ static void __init do_basic_setup(void) { - rcu_init_sched(); /* needed by module_init stage. */ init_workqueues(); cpuset_init_smp(); usermodehelper_init(); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f85b6842d1e..53a5ef0ca91 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -81,24 +81,29 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); -extern long rcu_batches_completed_sched(void); -static struct rcu_node *rcu_get_root(struct rcu_state *rsp); -static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, - struct rcu_node *rnp, unsigned long flags); -static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); +/* Forward declarations for rcutree_plugin.h */ +static inline void rcu_bootup_announce(void); +long rcu_batches_completed(void); +static void rcu_preempt_note_context_switch(int cpu); +static int rcu_preempted_readers(struct rcu_node *rnp); +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +static void rcu_print_task_stall(struct rcu_node *rnp); +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU -static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); +static void rcu_preempt_offline_tasks(struct rcu_state *rsp, + struct rcu_node *rnp, + struct rcu_data *rdp); +static void rcu_preempt_offline_cpu(int cpu); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ -static void __rcu_process_callbacks(struct rcu_state *rsp, - struct rcu_data *rdp); -static void __call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu), - struct rcu_state *rsp); -static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); -static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, - int preemptable); +static void rcu_preempt_check_callbacks(int cpu); +static void rcu_preempt_process_callbacks(void); +void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +static int rcu_preempt_pending(int cpu); +static int rcu_preempt_needs_cpu(int cpu); +static void __cpuinit rcu_preempt_init_percpu_data(int cpu); +static void __init __rcu_init_preempt(void); -#include "rcutree_plugin.h" /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s @@ -377,7 +382,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp) /* * Snapshot the specified CPU's dynticks counter so that we can later * credit them with an implicit quiescent state. Return 1 if this CPU - * is already in a quiescent state courtesy of dynticks idle mode. + * is in dynticks idle mode, which is an extended quiescent state. */ static int dyntick_save_progress_counter(struct rcu_data *rdp) { @@ -624,9 +629,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) note_new_gpnum(rsp, rdp); /* - * Because we are first, we know that all our callbacks will - * be covered by this upcoming grace period, even the ones - * that were registered arbitrarily recently. + * Because this CPU just now started the new grace period, we know + * that all of its callbacks will be covered by this upcoming grace + * period, even the ones that were registered arbitrarily recently. + * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. + * + * Other CPUs cannot be sure exactly when the grace period started. + * Therefore, their recently registered callbacks must pass through + * an additional RCU_NEXT_READY stage, so that they will be handled + * by the next RCU grace period. */ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; @@ -886,7 +897,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) /* * Move callbacks from the outgoing CPU to the running CPU. - * Note that the outgoing CPU is now quiscent, so it is now + * Note that the outgoing CPU is now quiescent, so it is now * (uncharacteristically) safe to access its rcu_data structure. * Note also that we must carefully retain the order of the * outgoing CPU's callbacks in order for rcu_barrier() to work @@ -1577,25 +1588,6 @@ do { \ } \ } while (0) -#ifdef CONFIG_TREE_PREEMPT_RCU - -void __init __rcu_init_preempt(void) -{ - int i; /* All used by RCU_INIT_FLAVOR(). */ - int j; - struct rcu_node *rnp; - - RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); -} - -#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ - -void __init __rcu_init_preempt(void) -{ -} - -#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ - void __init __rcu_init(void) { int i; /* All used by RCU_INIT_FLAVOR(). */ @@ -1612,6 +1604,8 @@ void __init __rcu_init(void) open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } +#include "rcutree_plugin.h" + module_param(blimit, int, 0); module_param(qhimark, int, 0); module_param(qlowmark, int, 0); diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 9aa8c8a160d..a48d11f37b4 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -79,15 +79,21 @@ struct rcu_dynticks { * Definition for node within the RCU grace-period-detection hierarchy. */ struct rcu_node { - spinlock_t lock; + spinlock_t lock; /* Root rcu_node's lock protects some */ + /* rcu_state fields as well as following. */ long gpnum; /* Current grace period for this node. */ /* This will either be equal to or one */ /* behind the root rcu_node's gpnum. */ unsigned long qsmask; /* CPUs or groups that need to switch in */ /* order for current grace period to proceed.*/ + /* In leaf rcu_node, each bit corresponds to */ + /* an rcu_data structure, otherwise, each */ + /* bit corresponds to a child rcu_node */ + /* structure. */ unsigned long qsmaskinit; /* Per-GP initialization for qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */ + /* Only one bit will be set in this mask. */ int grplo; /* lowest-numbered CPU or group here. */ int grphi; /* highest-numbered CPU or group here. */ u8 grpnum; /* CPU/group number for next level up. */ @@ -95,6 +101,9 @@ struct rcu_node { struct rcu_node *parent; struct list_head blocked_tasks[2]; /* Tasks blocked in RCU read-side critsect. */ + /* Grace period number (->gpnum) x blocked */ + /* by tasks on the (x & 0x1) element of the */ + /* blocked_tasks[] array. */ } ____cacheline_internodealigned_in_smp; /* Index values for nxttail array in struct rcu_data. */ @@ -126,19 +135,22 @@ struct rcu_data { * Any of the partitions might be empty, in which case the * pointer to that partition will be equal to the pointer for * the following partition. When the list is empty, all of - * the nxttail elements point to nxtlist, which is NULL. + * the nxttail elements point to the ->nxtlist pointer itself, + * which in that case is NULL. * - * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): - * Entries that might have arrived after current GP ended - * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): - * Entries known to have arrived before current GP ended - * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): - * Entries that batch # <= ->completed - 1: waiting for current GP * [nxtlist, *nxttail[RCU_DONE_TAIL]): * Entries that batch # <= ->completed * The grace period for these entries has completed, and * the other grace-period-completed entries may be moved * here temporarily in rcu_process_callbacks(). + * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): + * Entries that batch # <= ->completed - 1: waiting for current GP + * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): + * Entries known to have arrived before current GP ended + * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): + * Entries that might have arrived after current GP ended + * Note that the value of *nxttail[RCU_NEXT_TAIL] will + * always be NULL, as this is the end of the list. */ struct rcu_head *nxtlist; struct rcu_head **nxttail[RCU_NEXT_SIZE]; @@ -216,6 +228,9 @@ struct rcu_state { /* Force QS state. */ long gpnum; /* Current gp number. */ long completed; /* # of last completed gp. */ + + /* End of fields guarded by root rcu_node's lock. */ + spinlock_t onofflock; /* exclude on/offline and */ /* starting new GP. */ spinlock_t fqslock; /* Only one task forcing */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 8ff1ba7b3c4..65250219ab6 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -418,6 +418,18 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); } +/* + * Initialize preemptable RCU's state structures. + */ +static void __init __rcu_init_preempt(void) +{ + int i; /* All used by RCU_INIT_FLAVOR(). */ + int j; + struct rcu_node *rnp; + + RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); +} + /* * Check for a task exiting while in a preemptable-RCU read-side * critical section, clean up if so. No need to issue warnings, @@ -518,7 +530,7 @@ static void rcu_preempt_offline_cpu(int cpu) * Because preemptable RCU does not exist, it never has any callbacks * to check. */ -void rcu_preempt_check_callbacks(int cpu) +static void rcu_preempt_check_callbacks(int cpu) { } @@ -526,7 +538,7 @@ void rcu_preempt_check_callbacks(int cpu) * Because preemptable RCU does not exist, it never has any callbacks * to process. */ -void rcu_preempt_process_callbacks(void) +static void rcu_preempt_process_callbacks(void) { } @@ -563,4 +575,11 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) { } +/* + * Because preemptable RCU does not exist, it need not be initialized. + */ +static void __init __rcu_init_preempt(void) +{ +} + #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ From 9b2619aff0332e95ea5eb7a0d75b0208818d871c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Sep 2009 09:50:43 -0700 Subject: [PATCH 0005/1007] rcu: Clean up code to address Ingo's checkpatch feedback Move declarations and update storage classes to make checkpatch happy. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12537246441701-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/rcutree.h | 6 +++++- kernel/rcupdate.c | 3 --- kernel/rcutorture.c | 4 +--- kernel/rcutree.c | 23 ----------------------- kernel/rcutree.h | 26 +++++++++++++++++++++++++- kernel/rcutree_trace.c | 12 ++++++------ 6 files changed, 37 insertions(+), 37 deletions(-) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 19a3b06943e..46e9ab3ee6e 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,10 +30,14 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H +struct notifier_block; + extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); - +extern int rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu); extern int rcu_needs_cpu(int cpu); +extern int rcu_expedited_torture_stats(char *page); #ifdef CONFIG_TREE_PREEMPT_RCU diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 37ac4548308..8e795133b33 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -259,9 +259,6 @@ static void rcu_migrate_callback(struct rcu_head *notused) wake_up(&rcu_migrate_wq); } -extern int rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu); - static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, unsigned long action, void *hcpu) { diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 233768f21f9..697c0a0229d 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -606,8 +606,6 @@ static struct rcu_torture_ops sched_ops_sync = { .name = "sched_sync" }; -extern int rcu_expedited_torture_stats(char *page); - static struct rcu_torture_ops sched_expedited_ops = { .init = rcu_sync_torture_init, .cleanup = NULL, @@ -650,7 +648,7 @@ rcu_torture_writer(void *arg) old_rp = rcu_torture_current; rp->rtort_mbtest = 1; rcu_assign_pointer(rcu_torture_current, rp); - smp_wmb(); + smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ if (old_rp) { i = old_rp->rtort_pipe_count; if (i > RCU_TORTURE_PIPE_LEN) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 53a5ef0ca91..8e52cde7b8f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -81,29 +81,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); -/* Forward declarations for rcutree_plugin.h */ -static inline void rcu_bootup_announce(void); -long rcu_batches_completed(void); -static void rcu_preempt_note_context_switch(int cpu); -static int rcu_preempted_readers(struct rcu_node *rnp); -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR -static void rcu_print_task_stall(struct rcu_node *rnp); -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); -#ifdef CONFIG_HOTPLUG_CPU -static void rcu_preempt_offline_tasks(struct rcu_state *rsp, - struct rcu_node *rnp, - struct rcu_data *rdp); -static void rcu_preempt_offline_cpu(int cpu); -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ -static void rcu_preempt_check_callbacks(int cpu); -static void rcu_preempt_process_callbacks(void); -void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); -static int rcu_preempt_pending(int cpu); -static int rcu_preempt_needs_cpu(int cpu); -static void __cpuinit rcu_preempt_init_percpu_data(int cpu); -static void __init __rcu_init_preempt(void); - /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s diff --git a/kernel/rcutree.h b/kernel/rcutree.h index a48d11f37b4..e6ab31cc28b 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -270,5 +270,29 @@ extern struct rcu_state rcu_preempt_state; DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -#endif /* #ifdef RCU_TREE_NONCORE */ +#else /* #ifdef RCU_TREE_NONCORE */ +/* Forward declarations for rcutree_plugin.h */ +static inline void rcu_bootup_announce(void); +long rcu_batches_completed(void); +static void rcu_preempt_note_context_switch(int cpu); +static int rcu_preempted_readers(struct rcu_node *rnp); +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +static void rcu_print_task_stall(struct rcu_node *rnp); +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); +#ifdef CONFIG_HOTPLUG_CPU +static void rcu_preempt_offline_tasks(struct rcu_state *rsp, + struct rcu_node *rnp, + struct rcu_data *rdp); +static void rcu_preempt_offline_cpu(int cpu); +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ +static void rcu_preempt_check_callbacks(int cpu); +static void rcu_preempt_process_callbacks(void); +void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +static int rcu_preempt_pending(int cpu); +static int rcu_preempt_needs_cpu(int cpu); +static void __cpuinit rcu_preempt_init_percpu_data(int cpu); +static void __init __rcu_init_preempt(void); + +#endif /* #else #ifdef RCU_TREE_NONCORE */ diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index c89f5e9fd17..f09af28b826 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file) return single_open(file, show_rcudata, NULL); } -static struct file_operations rcudata_fops = { +static const struct file_operations rcudata_fops = { .owner = THIS_MODULE, .open = rcudata_open, .read = seq_read, @@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file) return single_open(file, show_rcudata_csv, NULL); } -static struct file_operations rcudata_csv_fops = { +static const struct file_operations rcudata_csv_fops = { .owner = THIS_MODULE, .open = rcudata_csv_open, .read = seq_read, @@ -159,7 +159,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) struct rcu_node *rnp; seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " - "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", + "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", rsp->completed, rsp->gpnum, rsp->signaled, (long)(rsp->jiffies_force_qs - jiffies), (int)(jiffies & 0xffff), @@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file) return single_open(file, show_rcuhier, NULL); } -static struct file_operations rcuhier_fops = { +static const struct file_operations rcuhier_fops = { .owner = THIS_MODULE, .open = rcuhier_open, .read = seq_read, @@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file) return single_open(file, show_rcugp, NULL); } -static struct file_operations rcugp_fops = { +static const struct file_operations rcugp_fops = { .owner = THIS_MODULE, .open = rcugp_open, .read = seq_read, @@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file) return single_open(file, show_rcu_pending, NULL); } -static struct file_operations rcu_pending_fops = { +static const struct file_operations rcu_pending_fops = { .owner = THIS_MODULE, .open = rcu_pending_open, .read = seq_read, From 4de75cf9391b538bbfe7dc0a9782f1ebe8e242ad Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 24 Sep 2009 01:01:29 +0100 Subject: [PATCH 0006/1007] intel-iommu: Make "Unknown DMAR structure" message more informative We might as well print the type of the DMAR structure we don't know how to handle when skipping it. Then someone getting this message has a chance of telling whether the structure is just bogus, or if there really is something valid that the kernel doesn't know how to handle. Signed-off-by: Roland Dreier Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 14bbaa17e2c..d199bcabed7 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -461,7 +461,8 @@ parse_dmar_table(void) break; default: printk(KERN_WARNING PREFIX - "Unknown DMAR structure type\n"); + "Unknown DMAR structure type %d\n", + entry_header->type); ret = 0; /* for forward compatibility */ break; } From 17b6097753e926ca546189463070a7e94e7ea9fa Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 24 Sep 2009 12:14:00 -0700 Subject: [PATCH 0007/1007] intel-iommu: Decode (and ignore) RHSA entries I recently got a system where the DMAR table included a couple of RHSA (remapping hardware static affinity) entries. Rather than printing a message about an "Unknown DMAR structure," it would probably be more useful to dump the RHSA structure (as other DMAR structures are dumped). Signed-off-by: Roland Dreier Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index d199bcabed7..22b02c6df85 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -354,6 +354,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_reserved_memory *rmrr; struct acpi_dmar_atsr *atsr; + struct acpi_dmar_rhsa *rhsa; switch (header->type) { case ACPI_DMAR_TYPE_HARDWARE_UNIT: @@ -375,6 +376,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) atsr = container_of(header, struct acpi_dmar_atsr, header); printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); break; + case ACPI_DMAR_HARDWARE_AFFINITY: + rhsa = container_of(header, struct acpi_dmar_rhsa, header); + printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", + (unsigned long long)rhsa->base_address, + rhsa->proximity_domain); + break; } } @@ -459,6 +466,9 @@ parse_dmar_table(void) ret = dmar_parse_one_atsr(entry_header); #endif break; + case ACPI_DMAR_HARDWARE_AFFINITY: + /* We don't do anything with RHSA (yet?) */ + break; default: printk(KERN_WARNING PREFIX "Unknown DMAR structure type %d\n", From d3f6302e7e51b41af86c6496ffb2f95e8f2179df Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 24 Sep 2009 14:07:55 -0700 Subject: [PATCH 0008/1007] hrtimer: Remove overly verbose "switch to high res mode" message On big systems, printing copies of Switched to high resolution mode on CPU nnn clutters up the kernel log for minimal gain. Just get rid of them. Signed-off-by: Roland Dreier LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/hrtimer.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index e5d98ce50f8..42672790ee1 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -720,8 +720,6 @@ static int hrtimer_switch_to_hres(void) /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); local_irq_restore(flags); - printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", - smp_processor_id()); return 1; } From 2ce4da2efcaca0dcbfed7a1f24177f18e75e0e89 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 20:42:10 -0700 Subject: [PATCH 0009/1007] sparc: Support HW cache events. First supported chip for HW cache events is Ultra-IIIi. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 145 +++++++++++++++++++++++++++++++-- 1 file changed, 139 insertions(+), 6 deletions(-) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2d6a1b10c81..48375f69467 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -68,8 +68,19 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +#define CACHE_OP_UNSUPPORTED 0xfffe +#define CACHE_OP_NONSENSE 0xffff + +typedef struct perf_event_map cache_map_t + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + struct sparc_pmu { const struct perf_event_map *(*event_map)(int); + const cache_map_t *cache_map; int max_events; int upper_shift; int lower_shift; @@ -92,8 +103,96 @@ static const struct perf_event_map *ultra3i_event_map(int event_id) return &ultra3i_perfmon_event_map[event_id]; } +static const cache_map_t ultra3i_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + static const struct sparc_pmu ultra3i_pmu = { .event_map = ultra3i_event_map, + .cache_map = &ultra3i_cache_map, .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, @@ -375,6 +474,37 @@ void perf_event_release_pmc(void) } } +static const struct perf_event_map *sparc_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct perf_event_map *pmap; + + if (!sparc_pmu->cache_map) + return ERR_PTR(-ENOENT); + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); + + if (pmap->encoding == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + if (pmap->encoding == CACHE_OP_NONSENSE) + return ERR_PTR(-EINVAL); + + return pmap; +} + static void hw_perf_event_destroy(struct perf_event *event) { perf_event_release_pmc(); @@ -390,12 +520,17 @@ static int __hw_perf_event_init(struct perf_event *event) if (atomic_read(&nmi_active) < 0) return -ENODEV; - if (attr->type != PERF_TYPE_HARDWARE) + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + pmap = sparc_pmu->event_map(attr->config); + } else if (attr->type == PERF_TYPE_HW_CACHE) { + pmap = sparc_map_cache_event(attr->config); + if (IS_ERR(pmap)) + return PTR_ERR(pmap); + } else return -EOPNOTSUPP; - if (attr->config >= sparc_pmu->max_events) - return -EINVAL; - perf_event_grab_pmc(); event->destroy = hw_perf_event_destroy; @@ -417,8 +552,6 @@ static int __hw_perf_event_init(struct perf_event *event) atomic64_set(&hwc->period_left, hwc->sample_period); } - pmap = sparc_pmu->event_map(attr->config); - enc = pmap->encoding; if (pmap->pic_mask & PIC_UPPER) { hwc->idx = PIC_UPPER_INDEX; From 28e8f9bead060aafc630a4256d23e2a55fb8b97d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 20:54:22 -0700 Subject: [PATCH 0010/1007] sparc: Support all ultra3 and ultra4 derivatives. For the generic events we support, all of these chips have the same encodings as ultra3i. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 48375f69467..8abdc4d1baa 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -91,19 +91,19 @@ struct sparc_pmu { int lower_nop; }; -static const struct perf_event_map ultra3i_perfmon_event_map[] = { +static const struct perf_event_map ultra3_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; -static const struct perf_event_map *ultra3i_event_map(int event_id) +static const struct perf_event_map *ultra3_event_map(int event_id) { - return &ultra3i_perfmon_event_map[event_id]; + return &ultra3_perfmon_event_map[event_id]; } -static const cache_map_t ultra3i_cache_map = { +static const cache_map_t ultra3_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, @@ -190,10 +190,10 @@ static const cache_map_t ultra3i_cache_map = { }, }; -static const struct sparc_pmu ultra3i_pmu = { - .event_map = ultra3i_event_map, - .cache_map = &ultra3i_cache_map, - .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), +static const struct sparc_pmu ultra3_pmu = { + .event_map = ultra3_event_map, + .cache_map = &ultra3_cache_map, + .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, @@ -658,8 +658,11 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { static bool __init supported_pmu(void) { - if (!strcmp(sparc_pmu_type, "ultra3i")) { - sparc_pmu = &ultra3i_pmu; + if (!strcmp(sparc_pmu_type, "ultra3") || + !strcmp(sparc_pmu_type, "ultra3+") || + !strcmp(sparc_pmu_type, "ultra3i") || + !strcmp(sparc_pmu_type, "ultra4+")) { + sparc_pmu = &ultra3_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara2")) { From d0b86480f5b33f4a86d7c106706d6e0dcd1935ce Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 21:04:16 -0700 Subject: [PATCH 0011/1007] sparc: Add Niagara2 HW cache event support. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 88 ++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 8abdc4d1baa..6f01e04cc32 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -215,8 +215,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id) return &niagara2_perfmon_event_map[event_id]; } +static const cache_map_t niagara2_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, + .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), .upper_shift = 19, .lower_shift = 6, From 7eebda60d57a0862a410f45122c73b8bbe6e260c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 21:23:41 -0700 Subject: [PATCH 0012/1007] sparc: Niagara1 perf event support. This chip is extremely limited, and many of the events supported are approximations at best. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 119 +++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 6f01e04cc32..9541b456c3e 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -201,6 +201,121 @@ static const struct sparc_pmu ultra3_pmu = { .lower_nop = 0x14, }; +/* Niagara1 is very limited. The upper PIC is hard-locked to count + * only instructions, so it is free running which creates all kinds of + * problems. Some hardware designs make one wonder if the creastor + * even looked at how this stuff gets used by software. + */ +static const struct perf_event_map niagara1_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, +}; + +static const struct perf_event_map *niagara1_event_map(int event_id) +{ + return &niagara1_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara1_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, + [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu niagara1_pmu = { + .event_map = niagara1_event_map, + .cache_map = &niagara1_cache_map, + .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), + .upper_shift = 0, + .lower_shift = 4, + .event_mask = 0x7, + .upper_nop = 0x0, + .lower_nop = 0x0, +}; + static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, @@ -753,6 +868,10 @@ static bool __init supported_pmu(void) sparc_pmu = &ultra3_pmu; return true; } + if (!strcmp(sparc_pmu_type, "niagara")) { + sparc_pmu = &niagara1_pmu; + return true; + } if (!strcmp(sparc_pmu_type, "niagara2")) { sparc_pmu = &niagara2_pmu; return true; From d6b58772dc39262629708e5f3c30ef06de290894 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 26 Apr 2009 23:53:34 -0400 Subject: [PATCH 0013/1007] parisc: tracehook_report_syscall This makes parisc use the standard tracehook_report_syscall_entry and tracehook_report_syscall_exit hooks in . To do this, we need to access current->thread.regs, and to know whether we're entering or exiting the syscall, so add this to syscall_trace. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/ptrace.c | 23 +++++++++++------------ arch/parisc/kernel/syscall.S | 4 +++- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 927db3668b6..2118ed02e88 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -264,21 +265,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, #endif -void syscall_trace(void) +void syscall_trace(int why) { + struct pt_regs *regs = ¤t->thread.regs; + if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - if (!(current->ptrace & PT_PTRACED)) - return; - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl + * Report the system call for tracing. Entry tracing can + * decide to abort the call. We handle that by setting an + * invalid syscall number (-1) to force an ENOSYS error. */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + if (why) + tracehook_report_syscall_exit(regs, 0); + else if (tracehook_report_syscall_entry(regs)) + regs->gr[20] = -1; /* force ENOSYS */ } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 59fc1a43ec3..5698668166c 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -288,6 +288,7 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ + ldi 0,%r26 /* syscall entry */ ldil L%syscall_trace,%r1 ldil L%tracesys_next,%r2 be R%syscall_trace(%sr7,%r1) @@ -336,6 +337,7 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif + ldi 1,%r26 /* syscall exit */ bl syscall_trace, %r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ @@ -358,7 +360,7 @@ tracesys_sigexit: ldo -16(%r30),%r29 /* Reference param save area */ #endif bl syscall_trace, %r2 - nop + ldi 1,%r26 /* syscall exit */ ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) From ecf02de5a1491592d1b68d8095ff62ea3aaee67e Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Mon, 27 Apr 2009 00:29:53 -0400 Subject: [PATCH 0014/1007] parisc: tracehook_signal_handler This makes parisc call the standard tracehook_signal_handler hook in after setting up a signal handler. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/signal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8eb3c63c407..c5f3d823d42 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -468,6 +469,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigaddset(¤t->blocked,sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + tracehook_signal_handler(sig, info, ka, regs, 0); + return 1; } From 5837d42f8ae9874fd93928c6784b2224f0edd4c9 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 5 Jul 2009 14:39:58 -0400 Subject: [PATCH 0015/1007] parisc: add missing TI_TASK macro in syscall.S LDREG 0(%r1),%r1 really wants to be accessing thread_info.task, instead of hardcoding the 0. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/syscall.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 5698668166c..07de1bbb69a 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -355,7 +355,7 @@ tracesys_exit: tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG 0(%r1), %r1 + LDREG TI_TASK(%r1), %r1 #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif From 2798af1abb5d560b926fd07ec58c5dcc6d3484d8 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 5 Jul 2009 14:36:16 -0400 Subject: [PATCH 0016/1007] parisc: split syscall_trace into two halves Instead of fiddling with gr[20], restructure code to return whether or not to -ENOSYS. (Also do a bit of fiddling to let them take pt_regs directly instead of re-computing it.) Signed-off-by: Kyle McMartin --- arch/parisc/kernel/ptrace.c | 27 +++++++++++++-------------- arch/parisc/kernel/syscall.S | 22 +++++++++++++--------- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2118ed02e88..08f6d2cbf0e 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -264,20 +264,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } #endif - -void syscall_trace(int why) +long do_syscall_trace_enter(struct pt_regs *regs) { - struct pt_regs *regs = ¤t->thread.regs; + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + return -1L; - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - /* - * Report the system call for tracing. Entry tracing can - * decide to abort the call. We handle that by setting an - * invalid syscall number (-1) to force an ENOSYS error. - */ - if (why) - tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) - regs->gr[20] = -1; /* force ENOSYS */ + return regs->gr[20]; +} + +void do_syscall_trace_exit(struct pt_regs *regs) +{ + int stepping = !!(current->ptrace & (PT_SINGLESTEP|PT_BLOCKSTEP)); + + if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, stepping); } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 07de1bbb69a..f5f96021caa 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -288,19 +288,23 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ - ldi 0,%r26 /* syscall entry */ - ldil L%syscall_trace,%r1 + copy %r2,%r26 + ldil L%do_syscall_trace_enter,%r1 ldil L%tracesys_next,%r2 - be R%syscall_trace(%sr7,%r1) + be R%do_syscall_trace_enter(%sr7,%r1) ldo R%tracesys_next(%r2),%r2 -tracesys_next: +tracesys_next: + /* do_syscall_trace_enter either returned the syscallno, or -1L, + * so we skip restoring the PT_GR20 below, since we pulled it from + * task->thread.regs.gr[20] above. + */ + copy %ret0,%r20 ldil L%sys_call_table,%r1 ldo R%sys_call_table(%r1), %r19 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 - LDREG TASK_PT_GR20(%r1), %r20 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ LDREG TASK_PT_GR25(%r1), %r25 LDREG TASK_PT_GR24(%r1), %r24 @@ -337,8 +341,8 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - ldi 1,%r26 /* syscall exit */ - bl syscall_trace, %r2 + ldo TASK_REGS(%r1),%r26 + bl do_syscall_trace_exit,%r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 @@ -359,8 +363,8 @@ tracesys_sigexit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl syscall_trace, %r2 - ldi 1,%r26 /* syscall exit */ + bl do_syscall_trace_exit,%r2 + ldo TASK_REGS(%r1),%r26 ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) From ecd3d4bc06e48357d12e730482577c756a9f2dbc Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 27 Sep 2009 23:03:02 -0400 Subject: [PATCH 0017/1007] parisc: stop using task->ptrace for {single,block}step flags task->ptrace flags belong to generic code, so instead thief some TIF_ bits to use. Somewhat risky conversion of code to test TASK_FLAGS instead of TASK_PTRACE in assembly, but it looks alright in the end. Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/thread_info.h | 4 ++++ arch/parisc/kernel/asm-offsets.c | 4 ++-- arch/parisc/kernel/entry.S | 21 +++++++++++---------- arch/parisc/kernel/ptrace.c | 14 ++++++++------ 4 files changed, 25 insertions(+), 18 deletions(-) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index ac775a76bff..0399548c523 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -60,6 +60,8 @@ struct thread_info { #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ #define TIF_FREEZE 7 /* is freezing for suspend */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ +#define TIF_SINGLESTEP 9 /* single stepping? */ +#define TIF_BLOCKSTEP 10 /* branch stepping? */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -69,6 +71,8 @@ struct thread_info { #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 699cf8ef211..fcd3c707bf1 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -270,8 +270,8 @@ int main(void) DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); BLANK(); - DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); - DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); + DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); + DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 8c4712b74dc..3a44f7f704f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -2047,12 +2047,13 @@ syscall_do_signal: b,n syscall_check_sig syscall_restore: - /* Are we being ptraced? */ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 - ldw TASK_PTRACE(%r1), %r19 - bb,< %r19,31,syscall_restore_rfi - nop + /* Are we being ptraced? */ + ldw TASK_FLAGS(%r1),%r19 + ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 + and,COND(=) %r19,%r2,%r0 + b,n syscall_restore_rfi ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ rest_fp %r19 @@ -2113,16 +2114,16 @@ syscall_restore_rfi: ldi 0x0b,%r20 /* Create new PSW */ depi -1,13,1,%r20 /* C, Q, D, and I bits */ - /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are - * set in include/linux/ptrace.h and converted to PA bitmap + /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are + * set in thread_info.h and converted to PA bitmap * numbers in asm-offsets.c */ - /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ - extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 + /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ + extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 depi -1,27,1,%r20 /* R bit */ - /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ - extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 + /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ + extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */ STREG %r20,TASK_PT_PSW(%r1) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 08f6d2cbf0e..c4f49e45129 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -36,7 +36,8 @@ */ void ptrace_disable(struct task_struct *task) { - task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); + clear_tsk_thread_flag(task, TIF_SINGLESTEP); + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); /* make sure the trap bits are not set */ pa_psw(task)->r = 0; @@ -56,8 +57,8 @@ void user_disable_single_step(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { - task->ptrace &= ~PT_BLOCKSTEP; - task->ptrace |= PT_SINGLESTEP; + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); + set_tsk_thread_flag(task, TIF_SINGLESTEP); if (pa_psw(task)->n) { struct siginfo si; @@ -99,8 +100,8 @@ void user_enable_single_step(struct task_struct *task) void user_enable_block_step(struct task_struct *task) { - task->ptrace &= ~PT_SINGLESTEP; - task->ptrace |= PT_BLOCKSTEP; + clear_tsk_thread_flag(task, TIF_SINGLESTEP); + set_tsk_thread_flag(task, TIF_BLOCKSTEP); /* Enable taken branch trap. */ pa_psw(task)->r = 0; @@ -275,7 +276,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) void do_syscall_trace_exit(struct pt_regs *regs) { - int stepping = !!(current->ptrace & (PT_SINGLESTEP|PT_BLOCKSTEP)); + int stepping = test_thread_flag(TIF_SINGLESTEP) || + test_thread_flag(TIF_BLOCKSTEP); if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, stepping); From 40e03b6857051ea61ebdb4a53e65485acfb1d98e Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 5 Jul 2009 15:59:56 -0400 Subject: [PATCH 0018/1007] parisc: add skeleton syscall.h Needed for lib/syscall.c when HAVE_ARCH_TRACEHOOK. Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/ptrace.h | 5 +++- arch/parisc/include/asm/syscall.h | 40 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 arch/parisc/include/asm/syscall.h diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 302f68dc889..aead40b16dd 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task); #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) +#define user_stack_pointer(regs) ((regs)->gr[30]) unsigned long profile_pc(struct pt_regs *); extern void show_regs(struct pt_regs *); -#endif + + +#endif /* __KERNEL__ */ #endif diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h new file mode 100644 index 00000000000..8bdfd2c8c39 --- /dev/null +++ b/arch/parisc/include/asm/syscall.h @@ -0,0 +1,40 @@ +/* syscall.h */ + +#ifndef _ASM_PARISC_SYSCALL_H_ +#define _ASM_PARISC_SYSCALL_H_ + +#include +#include + +static inline long syscall_get_nr(struct task_struct *tsk, + struct pt_regs *regs) +{ + return regs->gr[20]; +} + +static inline void syscall_get_arguments(struct task_struct *tsk, + struct pt_regs *regs, unsigned int i, + unsigned int n, unsigned long *args) +{ + BUG_ON(i); + + switch (n) { + case 6: + args[5] = regs->gr[21]; + case 5: + args[4] = regs->gr[22]; + case 4: + args[3] = regs->gr[23]; + case 3: + args[2] = regs->gr[24]; + case 2: + args[1] = regs->gr[25]; + case 1: + args[0] = regs->gr[26]; + break; + default: + BUG(); + } +} + +#endif /*_ASM_PARISC_SYSCALL_H_*/ From 81bf550d9cdfe0325eb1504b06c9f6511b442c1a Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 27 Sep 2009 23:04:47 -0400 Subject: [PATCH 0019/1007] parisc: HAVE_ARCH_TRACEHOOK Let 'er rip. Signed-off-by: Kyle McMartin --- arch/parisc/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index f388dc68f60..524d9352f17 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -18,6 +18,7 @@ config PARISC select BUG select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT + select HAVE_ARCH_TRACEHOOK help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, From 530e949cc99ec6afba5207cbcd5f45c8cc8accc6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 22 Sep 2009 13:35:32 +0000 Subject: [PATCH 0020/1007] parisc: includecheck fix: signal.c fix the following 'make includecheck' warning: arch/parisc/kernel/signal.c: linux/compat.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Kyle McMartin --- arch/parisc/kernel/signal.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index c5f3d823d42..e8467e4aa8d 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -35,7 +35,6 @@ #include #ifdef CONFIG_COMPAT -#include #include "signal32.h" #endif From b882877082602bd2d9444069bae1926bf10ebe0d Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 4 Aug 2009 23:59:55 +0000 Subject: [PATCH 0021/1007] parisc: add me to Maintainers It has been suggested that given my proficiency with less popular architectures, I should be officially listed as willing to see to the care and feeding of this one. Signed-off-by: James Bottomley Signed-off-by: Kyle McMartin --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index c450f3abb8c..a51b896528c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3953,6 +3953,7 @@ F: drivers/block/paride/ PARISC ARCHITECTURE M: Kyle McMartin M: Helge Deller +M: "James E.J. Bottomley" L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git From 4255f0d2a132fb38dbe5b5ad74e27ba472507415 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sun, 27 Sep 2009 23:26:01 -0400 Subject: [PATCH 0022/1007] parisc: rename parisc's vmalloc_start to parisc_vmalloc_start building kernel 2.6.32(pre), gives this compiler warning: /linus-linux-2.6/mm/vmalloc.c: In function 'pcpu_get_vm_areas': /linus-linux-2.6/mm/vmalloc.c:2104: warning: 'vmalloc_start' is used uninitialized in this function The reason is, that the code in mm/vmalloc defines a local variable called vmalloc_start, which is already defined as global variable in parisc's code. To avoid this kind of problems in future, I suggest to rename the parisc variable to parisc_vmalloc_start. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/fixmap.h | 4 ++-- arch/parisc/mm/init.c | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index de3fe3a1822..6fec4d4a1a1 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -21,9 +21,9 @@ #define KERNEL_MAP_END (TMPALIAS_MAP_START) #ifndef __ASSEMBLY__ -extern void *vmalloc_start; +extern void *parisc_vmalloc_start; #define PCXL_DMA_MAP_SIZE (8*1024*1024) -#define VMALLOC_START ((unsigned long)vmalloc_start) +#define VMALLOC_START ((unsigned long)parisc_vmalloc_start) #define VMALLOC_END (KERNEL_MAP_END) #endif /*__ASSEMBLY__*/ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index d5aca31fddb..13b6e3e59b9 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -434,8 +434,8 @@ void mark_rodata_ro(void) #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ & ~(VM_MAP_OFFSET-1))) -void *vmalloc_start __read_mostly; -EXPORT_SYMBOL(vmalloc_start); +void *parisc_vmalloc_start __read_mostly; +EXPORT_SYMBOL(parisc_vmalloc_start); #ifdef CONFIG_PA11 unsigned long pcxl_dma_start __read_mostly; @@ -496,13 +496,14 @@ void __init mem_init(void) #ifdef CONFIG_PA11 if (hppa_dma_ops == &pcxl_dma_ops) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); - vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); + parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + + PCXL_DMA_MAP_SIZE); } else { pcxl_dma_start = 0; - vmalloc_start = SET_MAP_OFFSET(MAP_START); + parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); } #else - vmalloc_start = SET_MAP_OFFSET(MAP_START); + parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); #endif printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", From fe579c69c6d437009460feeb4d748c2f2020a5f2 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 4 Aug 2009 22:27:07 +0200 Subject: [PATCH 0023/1007] parisc: correct use of SHF_ALLOC SHF_ALLOC is suitable for testing against the sh_flags field, not the sh_type field. Signed-off-by: Julia Lawall Signed-off-by: Kyle McMartin --- arch/parisc/kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 61ee0eec4e6..212074653df 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, * ourselves */ for (i = 1; i < hdr->e_shnum; i++) { if(sechdrs[i].sh_type == SHT_SYMTAB - && (sechdrs[i].sh_type & SHF_ALLOC)) { + && (sechdrs[i].sh_flags & SHF_ALLOC)) { int strindex = sechdrs[i].sh_link; /* FIXME: AWFUL HACK * The cast is to drop the const from From f32ed3954c5e365907738b1206e849b6bbe9bcef Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sun, 27 Sep 2009 17:25:08 -0400 Subject: [PATCH 0024/1007] parisc: Make THREAD_SIZE available to assembly files and linker scripts. Signed-off-by: Tim Abbott Acked-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/thread_info.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 0399548c523..7ecc1039cfe 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -32,6 +32,11 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) +/* how to get the thread information struct from C */ +#define current_thread_info() ((struct thread_info *)mfctl(30)) + +#endif /* !__ASSEMBLY */ + /* thread information allocation */ #define THREAD_SIZE_ORDER 2 @@ -40,11 +45,6 @@ struct thread_info { #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) -/* how to get the thread information struct from C */ -#define current_thread_info() ((struct thread_info *)mfctl(30)) - -#endif /* !__ASSEMBLY */ - #define PREEMPT_ACTIVE_BIT 28 #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) From 01552f765cae873d0ea3cca1e64e41dfd62659e6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 27 Sep 2009 20:43:07 -0700 Subject: [PATCH 0025/1007] sparc64: Add initial perf event conflict resolution and checks. Cribbed from powerpc code, as usual. :-) Currently it is only used to validate that all counters have the same user/kernel/hv attributes. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 82 +++++++++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 5 deletions(-) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 9541b456c3e..91995249815 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -713,12 +713,66 @@ static void hw_perf_event_destroy(struct perf_event *event) perf_event_release_pmc(); } +static int check_excludes(struct perf_event **evts, int n_prev, int n_new) +{ + int eu = 0, ek = 0, eh = 0; + struct perf_event *event; + int i, n, first; + + n = n_prev + n_new; + if (n <= 1) + return 0; + + first = 1; + for (i = 0; i < n; i++) { + event = evts[i]; + if (first) { + eu = event->attr.exclude_user; + ek = event->attr.exclude_kernel; + eh = event->attr.exclude_hv; + first = 0; + } else if (event->attr.exclude_user != eu || + event->attr.exclude_kernel != ek || + event->attr.exclude_hv != eh) { + return -EAGAIN; + } + } + + return 0; +} + +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *evts[], u64 *events) +{ + struct perf_event *event; + int n = 0; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + evts[n] = group; + events[n++] = group->hw.config; + } + list_for_each_entry(event, &group->sibling_list, group_entry) { + if (!is_software_event(event) && + event->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + evts[n] = event; + events[n++] = event->hw.config; + } + } + return n; +} + static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; + struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; const struct perf_event_map *pmap; - u64 enc; + u64 enc, events[MAX_HWEVENTS]; + int n; if (atomic_read(&nmi_active) < 0) return -ENODEV; @@ -734,9 +788,6 @@ static int __hw_perf_event_init(struct perf_event *event) } else return -EOPNOTSUPP; - perf_event_grab_pmc(); - event->destroy = hw_perf_event_destroy; - /* We save the enable bits in the config_base. So to * turn off sampling just write 'config', and to enable * things write 'config | config_base'. @@ -749,13 +800,34 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; + enc = pmap->encoding; + + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, + perf_max_events - 1, + evts, events); + if (n < 0) + return -EINVAL; + } + events[n] = enc; + evts[n] = event; + + if (check_excludes(evts, n, 1)) + return -EINVAL; + + /* Try to do all error checking before this point, as unwinding + * state after grabbing the PMC is difficult. + */ + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; + if (!hwc->sample_period) { hwc->sample_period = MAX_PERIOD; hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); } - enc = pmap->encoding; if (pmap->pic_mask & PIC_UPPER) { hwc->idx = PIC_UPPER_INDEX; enc <<= sparc_pmu->upper_shift; From 9609bfec6d869bc0d82ccfb909d5e72b7002d813 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 4 Aug 2009 14:54:41 +0200 Subject: [PATCH 0026/1007] parisc: convert to asm-generic/hardirq.h Signed-off-by: Christoph Hellwig Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/hardirq.h | 20 +------------------- arch/parisc/kernel/irq.c | 5 ----- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d511..0d68184a76c 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -1,29 +1,11 @@ /* hardirq.h: PA-RISC hard IRQ support. * * Copyright (C) 2001 Matthew Wilcox - * - * The locking is really quite interesting. There's a cpu-local - * count of how many interrupts are being handled, and a global - * lock. An interrupt can only be serviced if the global lock - * is free. You can't be sure no more interrupts are being - * serviced until you've acquired the lock and then checked - * all the per-cpu interrupt counts are all zero. It's a specialised - * br_lock, and that's exactly how Sparc does it. We don't because - * it's more locking for us. This way is lock-free in the interrupt path. */ #ifndef _PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H -#include -#include - -typedef struct { - unsigned long __softirq_pending; /* set_bit is used on this */ -} ____cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -void ack_bad_irq(unsigned int irq); +#include #endif /* _PARISC_HARDIRQ_H */ diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 330f536a932..2e7610cb33d 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -423,8 +423,3 @@ void __init init_IRQ(void) set_eiem(cpu_eiem); /* EIEM : enable all external intr */ } - -void ack_bad_irq(unsigned int irq) -{ - printk(KERN_WARNING "unexpected IRQ %d\n", irq); -} From 8cf06fc9bd660c28afea4115fd91a2cc4978eb54 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 28 Sep 2009 00:39:47 +0200 Subject: [PATCH 0027/1007] parisc: Fix linker script breakage. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/kernel/vmlinux.lds.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 775be2791bc..fda4baa059b 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -28,6 +28,7 @@ #include #include #include +#include /* ld script to make hppa Linux kernel */ #ifndef CONFIG_64BIT @@ -134,6 +135,15 @@ SECTIONS __init_begin = .; INIT_TEXT_SECTION(16384) INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); From 48ff3e04ffd5e1b578462eb1958f15ca122c7fbf Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 28 Sep 2009 15:04:04 +0900 Subject: [PATCH 0028/1007] sh: Handle ioport_map() cases for >= P1SEG addresses. This fixes up the case where certain drivers already do their own remapping and subsequently attempt to use the PIO calls for I/O. In this case there is no additional remapping that needs to be done, and the address can be casted in to the cookie directly. Signed-off-by: Paul Mundt --- arch/sh/kernel/io_generic.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index 4ff50723928..b8fa6524760 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c @@ -147,6 +147,9 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count) void __iomem *generic_ioport_map(unsigned long addr, unsigned int size) { + if (PXSEG(addr) >= P1SEG) + return (void __iomem *)addr; + return (void __iomem *)(addr + generic_io_base); } From d44ee12ad61ff7aa7a6344560bd430cb72fcbc27 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 28 Sep 2009 15:05:41 +0900 Subject: [PATCH 0029/1007] sh: Set the default I/O port base to P2SEG. This bumps up the default I/O base to P2SEG, which allows legacy probing to bail out gracefully rather than oopsing. Platforms that have a real PIO offset still need to fix this up on their own, although most platforms are content with P2SEG already. The previous change to teach ioport_map() about >= P1SEG offsets in combination with this patch allows both the already remapped and the legacy address probing to pass through and succeed. Fixes up an oops with i8042 on the sh7785lcr board. Signed-off-by: Paul Mundt --- arch/sh/kernel/machvec.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index 548f6607fd0..cbce639b108 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -133,4 +134,6 @@ void __init sh_mv_setup(void) if (!sh_mv.mv_nr_irqs) sh_mv.mv_nr_irqs = NR_IRQS; + + __set_io_port_base(P2SEG); } From 421f7a5dbdd1720c718eed187c46cf202529cf7d Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 26 Sep 2009 19:44:05 +0000 Subject: [PATCH 0030/1007] sh: Don't enable interrupts in the page fault path There's already code in do_page_fault() to conditionally enable interrupts, so we don't need to unconditonally enable them before calling it. This fixes a lockdep warning where we called trace_hardirqs_off() but with irqs still enabled. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 0151933e525..bb407ef0b91 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -152,8 +152,6 @@ call_do_page_fault: mov.l 1f, r0 mov.l @r0, r6 - sti - mov.l 3f, r0 mov.l 4f, r1 mov r15, r4 From fe1dbfd3f992d5432d7463a5bd6c2fc96d3eccd8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 28 Sep 2009 15:22:10 +0900 Subject: [PATCH 0031/1007] sh: magicpanelr2 and dreamcast can use the generic I/O base. There is now no need for the magicpanelr2 and dreamcast platforms to set their own I/O port bas values, given that the generic machvec code now sets this to P2SEG for everyone. Signed-off-by: Paul Mundt --- arch/sh/boards/board-magicpanelr2.c | 2 -- arch/sh/boards/mach-dreamcast/setup.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c index 0a37c8bfc95..99ffc5f1c0d 100644 --- a/arch/sh/boards/board-magicpanelr2.c +++ b/arch/sh/boards/board-magicpanelr2.c @@ -205,8 +205,6 @@ static void __init setup_port_multiplexing(void) static void __init mpr2_setup(char **cmdline_p) { - __set_io_port_base(0xa0000000); - /* set Pin Select Register A: * /PCC_CD1, /PCC_CD2, PCC_BVD1, PCC_BVD2, * /IOIS16, IRQ4, IRQ5, USB1d_SUSPEND diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c index ebe99227d4e..a4b7402d617 100644 --- a/arch/sh/boards/mach-dreamcast/setup.c +++ b/arch/sh/boards/mach-dreamcast/setup.c @@ -42,8 +42,6 @@ static void __init dreamcast_setup(char **cmdline_p) /* Acknowledge any previous events */ /* XXX */ - __set_io_port_base(0xa0000000); - /* Assign all virtual IRQs to the System ASIC int. handler */ for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++) set_irq_chip_and_handler(i, &systemasic_int, From 1b6b9d62475bdea00b366cb80b0fadba446f176d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 28 Sep 2009 14:39:58 -0700 Subject: [PATCH 0032/1007] sparc64: Increase vmalloc size to fix percpu regressions. Since we now use the embedding percpu allocator we have to make the vmalloc area at least as large as the stretch can be between nodes. Besides some minor asm adjustments, this turned out to be pretty trivial. Signed-off-by: David S. Miller --- arch/sparc/include/asm/pgtable_64.h | 4 ++-- arch/sparc/kernel/ktlb.S | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 0ff92fa2206..f3cb790fa2a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -41,8 +41,8 @@ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) #define VMALLOC_START _AC(0x0000000100000000,UL) -#define VMALLOC_END _AC(0x0000000200000000,UL) -#define VMEMMAP_BASE _AC(0x0000000200000000,UL) +#define VMALLOC_END _AC(0x0000010000000000,UL) +#define VMEMMAP_BASE _AC(0x0000010000000000,UL) #define vmemmap ((struct page *)VMEMMAP_BASE) diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 3ea6e8cde8c..1d361477d7d 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear: #ifdef CONFIG_SPARSEMEM_VMEMMAP /* Do not use the TSB for vmemmap. */ - mov (VMEMMAP_BASE >> 24), %g5 - sllx %g5, 24, %g5 + mov (VMEMMAP_BASE >> 40), %g5 + sllx %g5, 40, %g5 cmp %g4,%g5 bgeu,pn %xcc, kvmap_vmemmap nop @@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss: sethi %hi(MODULES_VADDR), %g5 cmp %g4, %g5 blu,pn %xcc, kvmap_dtlb_longpath - mov (VMALLOC_END >> 24), %g5 - sllx %g5, 24, %g5 + mov (VMALLOC_END >> 40), %g5 + sllx %g5, 40, %g5 cmp %g4, %g5 bgeu,pn %xcc, kvmap_dtlb_longpath nop From 4781f20f29926ec68715f5cc930273a79fc0a9eb Mon Sep 17 00:00:00 2001 From: Brian Rogers Date: Mon, 28 Sep 2009 15:41:08 -0700 Subject: [PATCH 0033/1007] drm/i915: Don't call intel_update_fbc from intel_crtc_cursor_set Commit 74dff282 exposed this unnecessary call by causing a change in the failure path on i965 where framebuffer compression will be turned on and off on every cursor update. If you don't have the xf86-video-intel fix to avoid the blinking cursor effect, this is very slow. Symptoms were a far more noticeable cursor blink with every cursor image change combined with severe slowdown for animated cursors. Signed-off-by: Brian Rogers Acked-by: Jesse Barnes Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 93ff6c03733..7a5fb7932c0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3095,7 +3095,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_gem_object *bo; struct drm_i915_gem_object *obj_priv; int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp = I915_READ(control); @@ -3182,9 +3181,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, drm_gem_object_unreference(intel_crtc->cursor_bo); } - if ((IS_I965G(dev) || plane == 0)) - intel_update_fbc(crtc, &crtc->mode); - mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; From a72a8a5f2ea32074e98803d4b15d0e093c5a9e4d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 28 Sep 2009 17:35:20 -0700 Subject: [PATCH 0034/1007] sparc64: Add a basic conflict engine in preparation for multi-counter support. The hardware counter ->event_base state records and encoding of the "struct perf_event_map" entry used for the event. We use this to make sure that when we have more than 1 event, both can be scheduled into the hardware at the same time. As usual, structure of code is largely cribbed from powerpc. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 69 +++++++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 5 deletions(-) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 91995249815..2b7743466ae 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -68,6 +68,17 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; +static unsigned long perf_event_encode(const struct perf_event_map *pmap) +{ + return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; +} + +static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) +{ + *msk = val & 0xff; + *enc = val >> 16; +} + #define C(x) PERF_COUNT_HW_CACHE_##x #define CACHE_OP_UNSUPPORTED 0xfffe @@ -713,6 +724,48 @@ static void hw_perf_event_destroy(struct perf_event *event) perf_event_release_pmc(); } +/* Make sure all events can be scheduled into the hardware at + * the same time. This is simplified by the fact that we only + * need to support 2 simultaneous HW events. + */ +static int sparc_check_constraints(unsigned long *events, int n_ev) +{ + if (n_ev <= perf_max_events) { + u8 msk1, msk2; + u16 dummy; + + if (n_ev == 1) + return 0; + BUG_ON(n_ev != 2); + perf_event_decode(events[0], &dummy, &msk1); + perf_event_decode(events[1], &dummy, &msk2); + + /* If both events can go on any counter, OK. */ + if (msk1 == (PIC_UPPER | PIC_LOWER) && + msk2 == (PIC_UPPER | PIC_LOWER)) + return 0; + + /* If one event is limited to a specific counter, + * and the other can go on both, OK. + */ + if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && + msk2 == (PIC_UPPER | PIC_LOWER)) + return 0; + if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) + return 0; + + /* If the events are fixed to different counters, OK. */ + if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || + (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) + return 0; + + /* Otherwise, there is a conflict. */ + } + + return -1; +} + static int check_excludes(struct perf_event **evts, int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; @@ -742,7 +795,7 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new) } static int collect_events(struct perf_event *group, int max_count, - struct perf_event *evts[], u64 *events) + struct perf_event *evts[], unsigned long *events) { struct perf_event *event; int n = 0; @@ -751,7 +804,7 @@ static int collect_events(struct perf_event *group, int max_count, if (n >= max_count) return -1; evts[n] = group; - events[n++] = group->hw.config; + events[n++] = group->hw.event_base; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && @@ -759,7 +812,7 @@ static int collect_events(struct perf_event *group, int max_count, if (n >= max_count) return -1; evts[n] = event; - events[n++] = event->hw.config; + events[n++] = event->hw.event_base; } } return n; @@ -770,8 +823,9 @@ static int __hw_perf_event_init(struct perf_event *event) struct perf_event_attr *attr = &event->attr; struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; + unsigned long events[MAX_HWEVENTS]; const struct perf_event_map *pmap; - u64 enc, events[MAX_HWEVENTS]; + u64 enc; int n; if (atomic_read(&nmi_active) < 0) @@ -800,6 +854,8 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; + hwc->event_base = perf_event_encode(pmap); + enc = pmap->encoding; n = 0; @@ -810,12 +866,15 @@ static int __hw_perf_event_init(struct perf_event *event) if (n < 0) return -EINVAL; } - events[n] = enc; + events[n] = hwc->event_base; evts[n] = event; if (check_excludes(evts, n, 1)) return -EINVAL; + if (sparc_check_constraints(events, n + 1)) + return -EINVAL; + /* Try to do all error checking before this point, as unwinding * state after grabbing the PMC is difficult. */ From d29862f03575cdfa8819f78b0f3f78eec3b44629 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 28 Sep 2009 17:37:12 -0700 Subject: [PATCH 0035/1007] sparc64: Minor coding style fixups in perf code. These got introduced during the counter --> event tree-wide renaming. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2b7743466ae..03b041c4d95 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -464,8 +464,7 @@ static u64 nop_for_index(int idx) sparc_pmu->lower_nop, idx); } -static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, - int idx) +static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, int idx) { u64 val, mask = mask_for_index(idx); @@ -473,8 +472,7 @@ static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, pcr_ops->write((val & ~mask) | hwc->config); } -static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, - int idx) +static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); @@ -555,7 +553,7 @@ static void write_pmc(int idx, u64 val) } static int sparc_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; @@ -607,7 +605,7 @@ static int sparc_pmu_enable(struct perf_event *event) } static u64 sparc_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; @@ -939,7 +937,7 @@ void perf_event_print_debug(void) } static int __kprobes perf_event_nmi_handler(struct notifier_block *self, - unsigned long cmd, void *__args) + unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; From 4f49be546806bf3839daa0601e1c2d4342c93359 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Sep 2009 00:23:33 +0100 Subject: [PATCH 0036/1007] drm/i915: Record device minor rather than pointer in TRACE_EVENT Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_trace.h | 48 +++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 5567a40816f..908b3c4d8cf 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -158,16 +158,16 @@ TRACE_EVENT(i915_gem_request_submit, TP_ARGS(dev, seqno), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; __entry->seqno = seqno; ), - TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ); TRACE_EVENT(i915_gem_request_flush, @@ -178,20 +178,20 @@ TRACE_EVENT(i915_gem_request_flush, TP_ARGS(dev, seqno, flush_domains, invalidate_domains), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) __field(u32, seqno) __field(u32, flush_domains) __field(u32, invalidate_domains) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; __entry->seqno = seqno; __entry->flush_domains = flush_domains; __entry->invalidate_domains = invalidate_domains; ), - TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", + TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", __entry->dev, __entry->seqno, __entry->flush_domains, __entry->invalidate_domains) ); @@ -204,16 +204,16 @@ TRACE_EVENT(i915_gem_request_complete, TP_ARGS(dev, seqno), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; __entry->seqno = seqno; ), - TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ); TRACE_EVENT(i915_gem_request_retire, @@ -223,16 +223,16 @@ TRACE_EVENT(i915_gem_request_retire, TP_ARGS(dev, seqno), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; __entry->seqno = seqno; ), - TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ); TRACE_EVENT(i915_gem_request_wait_begin, @@ -242,16 +242,16 @@ TRACE_EVENT(i915_gem_request_wait_begin, TP_ARGS(dev, seqno), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; __entry->seqno = seqno; ), - TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ); TRACE_EVENT(i915_gem_request_wait_end, @@ -261,16 +261,16 @@ TRACE_EVENT(i915_gem_request_wait_end, TP_ARGS(dev, seqno), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; __entry->seqno = seqno; ), - TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ); TRACE_EVENT(i915_ring_wait_begin, @@ -280,14 +280,14 @@ TRACE_EVENT(i915_ring_wait_begin, TP_ARGS(dev), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; ), - TP_printk("dev=%p", __entry->dev) + TP_printk("dev=%u", __entry->dev) ); TRACE_EVENT(i915_ring_wait_end, @@ -297,14 +297,14 @@ TRACE_EVENT(i915_ring_wait_end, TP_ARGS(dev), TP_STRUCT__entry( - __field(struct drm_device *, dev) + __field(u32, dev) ), TP_fast_assign( - __entry->dev = dev; + __entry->dev = dev->primary->index; ), - TP_printk("dev=%p", __entry->dev) + TP_printk("dev=%u", __entry->dev) ); #endif /* _I915_TRACE_H_ */ From 8f0dc5bf17dfd947bf7b2cd07a8b1f43e72fb750 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Sep 2009 00:43:17 +0100 Subject: [PATCH 0037/1007] drm/i915: batch submit seqno off-by-one. We increment the seqno number between submitting the batch buffer and the flush/interrupt that demarcates its end, so the tracepoint needs to reference the incremented value to match the completion event. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 40727d4c291..b5f9df230d0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3352,7 +3352,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; - trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); + trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); count = nbox ? nbox : 1; From 9d34e5db07303c9609053e2e651aa6d1fc74e923 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Sep 2009 05:26:06 +0100 Subject: [PATCH 0038/1007] drm/i915: Enable irq to trace batch buffer completion. If we trigger a tracepoint for batch buffer submission, it is a reasonable assumption that we wish to also trace the batch buffer completion. So in order to capture the completion events, we need to enable irqs... However, we cannot rely on the completion event to disable the irq later, so we defer the irq disable to the retire request. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 8 +++++++- drivers/gpu/drm/i915/i915_irq.c | 10 ++++++++++ drivers/gpu/drm/i915/i915_trace.h | 1 + 5 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 45d507ebd3f..92aeb918e0c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1468,6 +1468,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->user_irq_lock); spin_lock_init(&dev_priv->error_lock); dev_priv->user_irq_refcount = 0; + dev_priv->trace_irq_seqno = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b24b2d145b7..6035d3dae85 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -202,6 +202,7 @@ typedef struct drm_i915_private { spinlock_t user_irq_lock; /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ int user_irq_refcount; + u32 trace_irq_seqno; /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask_reg; u32 pipestat[2]; @@ -665,6 +666,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, extern int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_user_irq_get(struct drm_device *dev); +void i915_trace_irq_get(struct drm_device *dev, u32 seqno); void i915_user_irq_put(struct drm_device *dev); extern void i915_enable_interrupt (struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b5f9df230d0..abfc27b0c2e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1770,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; - if (!dev_priv->hw_status_page) + if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) return; seqno = i915_get_gem_seqno(dev); @@ -1794,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev) } else break; } + + if (unlikely (dev_priv->trace_irq_seqno && + i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { + i915_user_irq_put(dev); + dev_priv->trace_irq_seqno = 0; + } } void diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4dfeec7cdd4..c3ceffa46ea 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -725,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } +void i915_trace_irq_get(struct drm_device *dev, u32 seqno) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (dev_priv->trace_irq_seqno == 0) + i915_user_irq_get(dev); + + dev_priv->trace_irq_seqno = seqno; +} + static int i915_wait_irq(struct drm_device * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 908b3c4d8cf..01840d9bc38 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -165,6 +165,7 @@ TRACE_EVENT(i915_gem_request_submit, TP_fast_assign( __entry->dev = dev->primary->index; __entry->seqno = seqno; + i915_trace_irq_get(dev, seqno); ), TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) From fa59530267e1006b7180e9ce1dee4136907b2b65 Mon Sep 17 00:00:00 2001 From: Peter Huewe Date: Tue, 29 Sep 2009 03:24:55 +0200 Subject: [PATCH 0039/1007] HID: add __init/__exit macros to twinhan.c Trivial patch which adds the __init/__exit macros to the module_init/ module_exit functions of the twinhan driver in hid. Signed-off-by: Peter Huewe Signed-off-by: Jiri Kosina --- drivers/hid/hid-twinhan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c index b05f602c051..c40afc57fc8 100644 --- a/drivers/hid/hid-twinhan.c +++ b/drivers/hid/hid-twinhan.c @@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = { .input_mapping = twinhan_input_mapping, }; -static int twinhan_init(void) +static int __init twinhan_init(void) { return hid_register_driver(&twinhan_driver); } -static void twinhan_exit(void) +static void __exit twinhan_exit(void) { hid_unregister_driver(&twinhan_driver); } From 6e804251d119bbd5522d76bdb0f48f5c9a7abf51 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 29 Sep 2009 15:10:23 -0700 Subject: [PATCH 0040/1007] sparc64: Fix comment typo in perf_event.c Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 03b041c4d95..32fc974bf8b 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -214,7 +214,7 @@ static const struct sparc_pmu ultra3_pmu = { /* Niagara1 is very limited. The upper PIC is hard-locked to count * only instructions, so it is free running which creates all kinds of - * problems. Some hardware designs make one wonder if the creastor + * problems. Some hardware designs make one wonder if the creator * even looked at how this stuff gets used by software. */ static const struct perf_event_map niagara1_perfmon_event_map[] = { From cd15e8c3313f2831d2d6c7951a93eb5de9621e5a Mon Sep 17 00:00:00 2001 From: Lennart Sorensen Date: Thu, 17 Sep 2009 11:49:41 -0400 Subject: [PATCH 0041/1007] m68knommu: show KiB rather than pages in "Freeing initrd memory:" message Fix "Freeing initrd memory:" message m68knommu to show kilobytes as claimed rather than number of pages. Signed-off-by: Lennart Sorensen Signed-off-by: Greg Ungerer --- arch/m68knommu/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c index b1703c67a4f..f3236d0b522 100644 --- a/arch/m68knommu/mm/init.c +++ b/arch/m68knommu/mm/init.c @@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) totalram_pages++; pages++; } - printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); + printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); } #endif From 63335138334e5ae40bf485a07b46be59397e50da Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Fri, 18 Sep 2009 06:18:56 +0800 Subject: [PATCH 0042/1007] m68knommu: remove duplicated #include Signed-off-by: Huang Weiyi Signed-off-by: Greg Ungerer --- arch/m68knommu/platform/5206e/config.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c index 0f41ba82a3b..942397984c6 100644 --- a/arch/m68knommu/platform/5206e/config.c +++ b/arch/m68knommu/platform/5206e/config.c @@ -17,7 +17,6 @@ #include #include #include -#include /***************************************************************************/ From c84b564e82777b00ab48178a88bc533ba62b9922 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Sun, 20 Sep 2009 21:32:32 +1000 Subject: [PATCH 0043/1007] m68knommu: fix rename of pt_regs offset defines breakage Commit f159ee782990aacb5494738c98f13a2aa61dbb4a ("locking, m68k/asm-offsets: Rename pt_regs offset defines") breaks the m68knommu entry code that relies on these define names. Fix the files to match the new define names. Signed-off-by: Greg Ungerer --- arch/m68knommu/kernel/asm-offsets.c | 28 ++++++++++----------- arch/m68knommu/kernel/entry.S | 6 ++--- arch/m68knommu/platform/68328/entry.S | 32 ++++++++++++------------ arch/m68knommu/platform/68360/entry.S | 16 ++++++------ arch/m68knommu/platform/coldfire/entry.S | 20 +++++++-------- 5 files changed, 51 insertions(+), 51 deletions(-) diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index 594ee0e657f..9a8876f715d 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c @@ -45,25 +45,25 @@ int main(void) DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); /* offsets into the pt_regs */ - DEFINE(PT_D0, offsetof(struct pt_regs, d0)); - DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); - DEFINE(PT_D1, offsetof(struct pt_regs, d1)); - DEFINE(PT_D2, offsetof(struct pt_regs, d2)); - DEFINE(PT_D3, offsetof(struct pt_regs, d3)); - DEFINE(PT_D4, offsetof(struct pt_regs, d4)); - DEFINE(PT_D5, offsetof(struct pt_regs, d5)); - DEFINE(PT_A0, offsetof(struct pt_regs, a0)); - DEFINE(PT_A1, offsetof(struct pt_regs, a1)); - DEFINE(PT_A2, offsetof(struct pt_regs, a2)); - DEFINE(PT_PC, offsetof(struct pt_regs, pc)); - DEFINE(PT_SR, offsetof(struct pt_regs, sr)); + DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); + DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); + DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); + DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); + DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); + DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); + DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); + DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); #ifdef CONFIG_COLDFIRE /* bitfields are a bit difficult */ - DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); + DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); #else /* bitfields are a bit difficult */ - DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); + DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4); #endif /* signal defines */ diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index f56faa5c9cd..56043ade394 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S @@ -46,7 +46,7 @@ ENTRY(buserr) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr buserr_c addql #4,%sp @@ -55,7 +55,7 @@ ENTRY(buserr) ENTRY(trap) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr trap_c addql #4,%sp @@ -67,7 +67,7 @@ ENTRY(trap) ENTRY(dbginterrupt) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr dbginterrupt_c addql #4,%sp diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S index b1aef72f3ba..9d80d2c4286 100644 --- a/arch/m68knommu/platform/68328/entry.S +++ b/arch/m68knommu/platform/68328/entry.S @@ -39,17 +39,17 @@ .globl inthandler7 badsys: - movel #-ENOSYS,%sp@(PT_D0) + movel #-ENOSYS,%sp@(PT_OFF_D0) jra ret_from_exception do_trace: - movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ + movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp - movel %sp@(PT_ORIG_D0),%d1 + movel %sp@(PT_OFF_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 jcc 1f @@ -57,7 +57,7 @@ do_trace: lea sys_call_table, %a0 jbsr %a0@(%d1) -1: movel %d0,%sp@(PT_D0) /* save the return value */ +1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -75,7 +75,7 @@ ENTRY(system_call) jbsr set_esp0 addql #4,%sp - movel %sp@(PT_ORIG_D0),%d0 + movel %sp@(PT_OFF_ORIG_D0),%d0 movel %sp,%d1 /* get thread_info pointer */ andl #-THREAD_SIZE,%d1 @@ -88,10 +88,10 @@ ENTRY(system_call) lea sys_call_table,%a0 movel %a0@(%d0), %a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value*/ + movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ ret_from_exception: - btst #5,%sp@(PT_SR) /* check if returning to kernel*/ + btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ jeq Luser_return /* if so, skip resched, signals*/ Lkernel_return: @@ -133,7 +133,7 @@ Lreturn: */ inthandler1: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -144,7 +144,7 @@ inthandler1: inthandler2: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -155,7 +155,7 @@ inthandler2: inthandler3: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -166,7 +166,7 @@ inthandler3: inthandler4: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -177,7 +177,7 @@ inthandler4: inthandler5: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -188,7 +188,7 @@ inthandler5: inthandler6: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -199,7 +199,7 @@ inthandler6: inthandler7: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -210,7 +210,7 @@ inthandler7: inthandler: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -224,7 +224,7 @@ ret_from_interrupt: 2: RESTORE_ALL 1: - moveb %sp@(PT_SR), %d0 + moveb %sp@(PT_OFF_SR), %d0 and #7, %d0 jhi 2b diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S index 55dfefe3864..6d3460a39ca 100644 --- a/arch/m68knommu/platform/68360/entry.S +++ b/arch/m68knommu/platform/68360/entry.S @@ -35,17 +35,17 @@ .globl inthandler badsys: - movel #-ENOSYS,%sp@(PT_D0) + movel #-ENOSYS,%sp@(PT_OFF_D0) jra ret_from_exception do_trace: - movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ + movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp - movel %sp@(PT_ORIG_D0),%d1 + movel %sp@(PT_OFF_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 jcc 1f @@ -53,7 +53,7 @@ do_trace: lea sys_call_table, %a0 jbsr %a0@(%d1) -1: movel %d0,%sp@(PT_D0) /* save the return value */ +1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -79,10 +79,10 @@ ENTRY(system_call) lea sys_call_table,%a0 movel %a0@(%d0), %a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value*/ + movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ ret_from_exception: - btst #5,%sp@(PT_SR) /* check if returning to kernel*/ + btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ jeq Luser_return /* if so, skip resched, signals*/ Lkernel_return: @@ -124,7 +124,7 @@ Lreturn: */ inthandler: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and.l #0x3ff, %d0 lsr.l #0x02, %d0 @@ -139,7 +139,7 @@ ret_from_interrupt: 2: RESTORE_ALL 1: - moveb %sp@(PT_SR), %d0 + moveb %sp@(PT_OFF_SR), %d0 and #7, %d0 jhi 2b /* check if we need to do software interrupts */ diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 3b471c0da24..dd7d591f70e 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S @@ -81,11 +81,11 @@ ENTRY(system_call) movel %d3,%a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value */ + movel %d0,%sp@(PT_OFF_D0) /* save the return value */ jra ret_from_exception 1: - movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ - movel %d2,PT_D0(%sp) /* on syscall entry */ + movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */ + movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace @@ -93,7 +93,7 @@ ENTRY(system_call) addql #4,%sp movel %d3,%a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value */ + movel %d0,%sp@(PT_OFF_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -104,7 +104,7 @@ ret_from_signal: ret_from_exception: move #0x2700,%sr /* disable intrs */ - btst #5,%sp@(PT_SR) /* check if returning to kernel */ + btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ jeq Luser_return /* if so, skip resched, signals */ #ifdef CONFIG_PREEMPT @@ -142,8 +142,8 @@ Luser_return: Lreturn: move #0x2700,%sr /* disable intrs */ movel sw_usp,%a0 /* get usp */ - movel %sp@(PT_PC),%a0@- /* copy exception program counter */ - movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ + movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ + movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */ moveml %sp@,%d1-%d5/%a0-%a2 lea %sp@(32),%sp /* space for 8 regs */ movel %sp@+,%d0 @@ -181,9 +181,9 @@ Lsignal_return: ENTRY(inthandler) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) - movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ + movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ andl #0x03fc,%d0 /* mask out vector only */ movel %sp,%sp@- /* push regs arg */ @@ -203,7 +203,7 @@ ENTRY(inthandler) ENTRY(fasthandler) SAVE_LOCAL - movew %sp@(PT_FORMATVEC),%d0 + movew %sp@(PT_OFF_FORMATVEC),%d0 andl #0x03fc,%d0 /* mask out vector only */ movel %sp,%sp@- /* push regs arg */ From 8810e0553fec6ff0a0db1431e388de39e2a2a512 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 28 Sep 2009 08:21:41 +0000 Subject: [PATCH 0044/1007] sh: mach-ecovec24: Add TouchScreen support Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/boards/mach-ecovec24/setup.c | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 5f9881e16e2..52912a6149c 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include