mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
softirq: Add support for triggering softirq work on softirqs.
This is basically a genericization of Jens Axboe's block layer remote softirq changes. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
2e532d68a2
commit
54514a70ad
3 changed files with 153 additions and 1 deletions
|
@ -11,6 +11,8 @@
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
@ -273,6 +275,25 @@ extern void softirq_init(void);
|
||||||
extern void raise_softirq_irqoff(unsigned int nr);
|
extern void raise_softirq_irqoff(unsigned int nr);
|
||||||
extern void raise_softirq(unsigned int nr);
|
extern void raise_softirq(unsigned int nr);
|
||||||
|
|
||||||
|
/* This is the worklist that queues up per-cpu softirq work.
|
||||||
|
*
|
||||||
|
* send_remote_sendirq() adds work to these lists, and
|
||||||
|
* the softirq handler itself dequeues from them. The queues
|
||||||
|
* are protected by disabling local cpu interrupts and they must
|
||||||
|
* only be accessed by the local cpu that they are for.
|
||||||
|
*/
|
||||||
|
DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
|
||||||
|
|
||||||
|
/* Try to send a softirq to a remote cpu. If this cannot be done, the
|
||||||
|
* work will be queued to the local cpu.
|
||||||
|
*/
|
||||||
|
extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
|
||||||
|
|
||||||
|
/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
|
||||||
|
* and compute the current cpu, passed in as 'this_cpu'.
|
||||||
|
*/
|
||||||
|
extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
|
||||||
|
int this_cpu, int softirq);
|
||||||
|
|
||||||
/* Tasklets --- multithreaded analogue of BHs.
|
/* Tasklets --- multithreaded analogue of BHs.
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/types.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
|
||||||
|
@ -16,7 +17,8 @@ struct call_single_data {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
void (*func) (void *info);
|
void (*func) (void *info);
|
||||||
void *info;
|
void *info;
|
||||||
unsigned int flags;
|
u16 flags;
|
||||||
|
u16 priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
129
kernel/softirq.c
129
kernel/softirq.c
|
@ -6,6 +6,8 @@
|
||||||
* Distribute under GPLv2.
|
* Distribute under GPLv2.
|
||||||
*
|
*
|
||||||
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
|
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
|
||||||
|
*
|
||||||
|
* Remote softirq infrastructure is by Jens Axboe.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -474,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t)
|
||||||
|
|
||||||
EXPORT_SYMBOL(tasklet_kill);
|
EXPORT_SYMBOL(tasklet_kill);
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
|
||||||
|
EXPORT_PER_CPU_SYMBOL(softirq_work_list);
|
||||||
|
|
||||||
|
static void __local_trigger(struct call_single_data *cp, int softirq)
|
||||||
|
{
|
||||||
|
struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
|
||||||
|
|
||||||
|
list_add_tail(&cp->list, head);
|
||||||
|
|
||||||
|
/* Trigger the softirq only if the list was previously empty. */
|
||||||
|
if (head->next == &cp->list)
|
||||||
|
raise_softirq_irqoff(softirq);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
||||||
|
static void remote_softirq_receive(void *data)
|
||||||
|
{
|
||||||
|
struct call_single_data *cp = data;
|
||||||
|
unsigned long flags;
|
||||||
|
int softirq;
|
||||||
|
|
||||||
|
softirq = cp->priv;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
__local_trigger(cp, softirq);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
||||||
|
{
|
||||||
|
if (cpu_online(cpu)) {
|
||||||
|
cp->func = remote_softirq_receive;
|
||||||
|
cp->info = cp;
|
||||||
|
cp->flags = 0;
|
||||||
|
cp->priv = softirq;
|
||||||
|
|
||||||
|
__smp_call_function_single(cpu, cp);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
|
||||||
|
static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __send_remote_softirq - try to schedule softirq work on a remote cpu
|
||||||
|
* @cp: private SMP call function data area
|
||||||
|
* @cpu: the remote cpu
|
||||||
|
* @this_cpu: the currently executing cpu
|
||||||
|
* @softirq: the softirq for the work
|
||||||
|
*
|
||||||
|
* Attempt to schedule softirq work on a remote cpu. If this cannot be
|
||||||
|
* done, the work is instead queued up on the local cpu.
|
||||||
|
*
|
||||||
|
* Interrupts must be disabled.
|
||||||
|
*/
|
||||||
|
void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
|
||||||
|
{
|
||||||
|
if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
|
||||||
|
__local_trigger(cp, softirq);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__send_remote_softirq);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* send_remote_softirq - try to schedule softirq work on a remote cpu
|
||||||
|
* @cp: private SMP call function data area
|
||||||
|
* @cpu: the remote cpu
|
||||||
|
* @softirq: the softirq for the work
|
||||||
|
*
|
||||||
|
* Like __send_remote_softirq except that disabling interrupts and
|
||||||
|
* computing the current cpu is done for the caller.
|
||||||
|
*/
|
||||||
|
void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int this_cpu;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
this_cpu = smp_processor_id();
|
||||||
|
__send_remote_softirq(cp, cpu, this_cpu, softirq);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(send_remote_softirq);
|
||||||
|
|
||||||
|
static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
|
||||||
|
unsigned long action, void *hcpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If a CPU goes away, splice its entries to the current CPU
|
||||||
|
* and trigger a run of the softirq
|
||||||
|
*/
|
||||||
|
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
||||||
|
int cpu = (unsigned long) hcpu;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
|
for (i = 0; i < NR_SOFTIRQS; i++) {
|
||||||
|
struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
|
||||||
|
struct list_head *local_head;
|
||||||
|
|
||||||
|
if (list_empty(head))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
local_head = &__get_cpu_var(softirq_work_list[i]);
|
||||||
|
list_splice_init(head, local_head);
|
||||||
|
raise_softirq_irqoff(i);
|
||||||
|
}
|
||||||
|
local_irq_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
|
||||||
|
.notifier_call = remote_softirq_cpu_notify,
|
||||||
|
};
|
||||||
|
|
||||||
void __init softirq_init(void)
|
void __init softirq_init(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
|
int i;
|
||||||
|
|
||||||
per_cpu(tasklet_vec, cpu).tail =
|
per_cpu(tasklet_vec, cpu).tail =
|
||||||
&per_cpu(tasklet_vec, cpu).head;
|
&per_cpu(tasklet_vec, cpu).head;
|
||||||
per_cpu(tasklet_hi_vec, cpu).tail =
|
per_cpu(tasklet_hi_vec, cpu).tail =
|
||||||
&per_cpu(tasklet_hi_vec, cpu).head;
|
&per_cpu(tasklet_hi_vec, cpu).head;
|
||||||
|
for (i = 0; i < NR_SOFTIRQS; i++)
|
||||||
|
INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
register_hotcpu_notifier(&remote_softirq_cpu_notifier);
|
||||||
|
|
||||||
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
|
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
|
||||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue