WorkStruct: Separate delayable and non-delayable events.

Separate delayable work items from non-delayable work items be splitting them
into a separate structure (delayed_work), which incorporates a work_struct and
the timer_list removed from work_struct.

The work_struct struct is huge, and this limits it's usefulness.  On a 64-bit
architecture it's nearly 100 bytes in size.  This reduces that by half for the
non-delayable type of event.

Signed-Off-By: David Howells <dhowells@redhat.com>
This commit is contained in:
David Howells 2006-11-22 14:54:01 +00:00
parent 0f9005a6f7
commit 52bad64d95
22 changed files with 96 additions and 73 deletions

View file

@ -307,7 +307,7 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
static int check_interval = 5 * 60; /* 5 minutes */ static int check_interval = 5 * 60; /* 5 minutes */
static void mcheck_timer(void *data); static void mcheck_timer(void *data);
static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL);
static void mcheck_check_cpu(void *info) static void mcheck_check_cpu(void *info)
{ {

View file

@ -937,12 +937,9 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return; return;
PREPARE_WORK(&ap->port_task, fn, data); PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
if (!delay) rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
rc = queue_work(ata_wq, &ap->port_task);
else
rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
/* rc == 0 means that another user is using port task */ /* rc == 0 means that another user is using port task */
WARN_ON(rc == 0); WARN_ON(rc == 0);
@ -5320,8 +5317,8 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif #endif
INIT_WORK(&ap->port_task, NULL, NULL); INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
INIT_LIST_HEAD(&ap->eh_done_q); INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q); init_waitqueue_head(&ap->eh_wait_q);

View file

@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
if (ap->pflags & ATA_PFLAG_LOADING) if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING; ap->pflags &= ~ATA_PFLAG_LOADING;
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
queue_work(ata_aux_wq, &ap->hotplug_task); queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED) if (ap->pflags & ATA_PFLAG_RECOVERED)
ata_port_printk(ap, KERN_INFO, "EH complete\n"); ata_port_printk(ap, KERN_INFO, "EH complete\n");

View file

@ -1424,7 +1424,7 @@ static unsigned int ip_cnt;
static void rekey_seq_generator(void *private_); static void rekey_seq_generator(void *private_);
static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
/* /*
* Lock avoidance: * Lock avoidance:

View file

@ -3580,7 +3580,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
tty->overrun_time = jiffies; tty->overrun_time = jiffies;
tty->buf.head = tty->buf.tail = NULL; tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty); tty_buffer_init(tty);
INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty);
init_MUTEX(&tty->buf.pty_sem); init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex); mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait); init_waitqueue_head(&tty->write_wait);

View file

@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
INIT_LIST_HEAD(&ctx->active_reqs); INIT_LIST_HEAD(&ctx->active_reqs);
INIT_LIST_HEAD(&ctx->run_list); INIT_LIST_HEAD(&ctx->run_list);
INIT_WORK(&ctx->wq, aio_kick_handler, ctx); INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx);
if (aio_setup_ring(ctx) < 0) if (aio_setup_ring(ctx) < 0)
goto out_freectx; goto out_freectx;
@ -876,7 +876,7 @@ static void aio_kick_handler(void *data)
* we're in a worker thread already, don't use queue_delayed_work, * we're in a worker thread already, don't use queue_delayed_work,
*/ */
if (requeue) if (requeue)
queue_work(aio_wq, &ctx->wq); queue_delayed_work(aio_wq, &ctx->wq, 0);
} }

View file

@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
INIT_LIST_HEAD(&clp->cl_state_owners); INIT_LIST_HEAD(&clp->cl_state_owners);
INIT_LIST_HEAD(&clp->cl_unused); INIT_LIST_HEAD(&clp->cl_unused);
spin_lock_init(&clp->cl_lock); spin_lock_init(&clp->cl_lock);
INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME; clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;

View file

@ -21,7 +21,8 @@
static void nfs_expire_automounts(void *list); static void nfs_expire_automounts(void *list);
LIST_HEAD(nfs_automount_list); LIST_HEAD(nfs_automount_list);
static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts,
&nfs_automount_list);
int nfs_mountpoint_expiry_timeout = 500 * HZ; int nfs_mountpoint_expiry_timeout = 500 * HZ;
static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,

View file

@ -194,7 +194,7 @@ struct kioctx {
struct aio_ring_info ring_info; struct aio_ring_info ring_info;
struct work_struct wq; struct delayed_work wq;
}; };
/* prototypes */ /* prototypes */

View file

@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
if (t->buf.tail != NULL) if (t->buf.tail != NULL)
t->buf.tail->commit = t->buf.tail->used; t->buf.tail->commit = t->buf.tail->used;
spin_unlock_irqrestore(&t->buf.lock, flags); spin_unlock_irqrestore(&t->buf.lock, flags);
schedule_work(&t->buf.work); schedule_delayed_work(&t->buf.work, 0);
} }
#endif #endif

View file

@ -568,8 +568,8 @@ struct ata_port {
struct ata_host *host; struct ata_host *host;
struct device *dev; struct device *dev;
struct work_struct port_task; struct delayed_work port_task;
struct work_struct hotplug_task; struct delayed_work hotplug_task;
struct work_struct scsi_rescan_task; struct work_struct scsi_rescan_task;
unsigned int hsm_task_state; unsigned int hsm_task_state;

View file

@ -51,7 +51,7 @@ struct nfs_client {
unsigned long cl_lease_time; unsigned long cl_lease_time;
unsigned long cl_last_renewal; unsigned long cl_last_renewal;
struct work_struct cl_renewd; struct delayed_work cl_renewd;
struct rpc_wait_queue cl_rpcwaitq; struct rpc_wait_queue cl_rpcwaitq;

View file

@ -30,7 +30,7 @@ struct rpc_inode {
#define RPC_PIPE_WAIT_FOR_OPEN 1 #define RPC_PIPE_WAIT_FOR_OPEN 1
int flags; int flags;
struct rpc_pipe_ops *ops; struct rpc_pipe_ops *ops;
struct work_struct queue_timeout; struct delayed_work queue_timeout;
}; };
static inline struct rpc_inode * static inline struct rpc_inode *

View file

@ -177,7 +177,7 @@ struct rpc_xprt {
unsigned long connect_timeout, unsigned long connect_timeout,
bind_timeout, bind_timeout,
reestablish_timeout; reestablish_timeout;
struct work_struct connect_worker; struct delayed_work connect_worker;
unsigned short port; unsigned short port;
/* /*

View file

@ -53,7 +53,7 @@ struct tty_buffer {
}; };
struct tty_bufhead { struct tty_bufhead {
struct work_struct work; struct delayed_work work;
struct semaphore pty_sem; struct semaphore pty_sem;
spinlock_t lock; spinlock_t lock;
struct tty_buffer *head; /* Queue head */ struct tty_buffer *head; /* Queue head */

View file

@ -17,6 +17,10 @@ struct work_struct {
void (*func)(void *); void (*func)(void *);
void *data; void *data;
void *wq_data; void *wq_data;
};
struct delayed_work {
struct work_struct work;
struct timer_list timer; struct timer_list timer;
}; };
@ -28,32 +32,48 @@ struct execute_work {
.entry = { &(n).entry, &(n).entry }, \ .entry = { &(n).entry, &(n).entry }, \
.func = (f), \ .func = (f), \
.data = (d), \ .data = (d), \
}
#define __DELAYED_WORK_INITIALIZER(n, f, d) { \
.work = __WORK_INITIALIZER((n).work, (f), (d)), \
.timer = TIMER_INITIALIZER(NULL, 0, 0), \ .timer = TIMER_INITIALIZER(NULL, 0, 0), \
} }
#define DECLARE_WORK(n, f, d) \ #define DECLARE_WORK(n, f, d) \
struct work_struct n = __WORK_INITIALIZER(n, f, d) struct work_struct n = __WORK_INITIALIZER(n, f, d)
#define DECLARE_DELAYED_WORK(n, f, d) \
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d)
/* /*
* initialize a work-struct's func and data pointers: * initialize a work item's function and data pointers
*/ */
#define PREPARE_WORK(_work, _func, _data) \ #define PREPARE_WORK(_work, _func, _data) \
do { \ do { \
(_work)->func = _func; \ (_work)->func = (_func); \
(_work)->data = _data; \ (_work)->data = (_data); \
} while (0) } while (0)
#define PREPARE_DELAYED_WORK(_work, _func, _data) \
PREPARE_WORK(&(_work)->work, (_func), (_data))
/* /*
* initialize all of a work-struct: * initialize all of a work item in one go
*/ */
#define INIT_WORK(_work, _func, _data) \ #define INIT_WORK(_work, _func, _data) \
do { \ do { \
INIT_LIST_HEAD(&(_work)->entry); \ INIT_LIST_HEAD(&(_work)->entry); \
(_work)->pending = 0; \ (_work)->pending = 0; \
PREPARE_WORK((_work), (_func), (_data)); \ PREPARE_WORK((_work), (_func), (_data)); \
} while (0)
#define INIT_DELAYED_WORK(_work, _func, _data) \
do { \
INIT_WORK(&(_work)->work, (_func), (_data)); \
init_timer(&(_work)->timer); \ init_timer(&(_work)->timer); \
} while (0) } while (0)
extern struct workqueue_struct *__create_workqueue(const char *name, extern struct workqueue_struct *__create_workqueue(const char *name,
int singlethread); int singlethread);
#define create_workqueue(name) __create_workqueue((name), 0) #define create_workqueue(name) __create_workqueue((name), 0)
@ -62,24 +82,24 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
extern void destroy_workqueue(struct workqueue_struct *wq); extern void destroy_workqueue(struct workqueue_struct *wq);
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay); struct delayed_work *work, unsigned long delay);
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work)); extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
extern int schedule_on_each_cpu(void (*func)(void *info), void *info); extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
extern void flush_scheduled_work(void); extern void flush_scheduled_work(void);
extern int current_is_keventd(void); extern int current_is_keventd(void);
extern int keventd_up(void); extern int keventd_up(void);
extern void init_workqueues(void); extern void init_workqueues(void);
void cancel_rearming_delayed_work(struct work_struct *work); void cancel_rearming_delayed_work(struct delayed_work *work);
void cancel_rearming_delayed_workqueue(struct workqueue_struct *, void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
struct work_struct *); struct delayed_work *);
int execute_in_process_context(void (*fn)(void *), void *, int execute_in_process_context(void (*fn)(void *), void *,
struct execute_work *); struct execute_work *);
@ -88,13 +108,13 @@ int execute_in_process_context(void (*fn)(void *), void *,
* function may still be running on return from cancel_delayed_work(). Run * function may still be running on return from cancel_delayed_work(). Run
* flush_scheduled_work() to wait on it. * flush_scheduled_work() to wait on it.
*/ */
static inline int cancel_delayed_work(struct work_struct *work) static inline int cancel_delayed_work(struct delayed_work *work)
{ {
int ret; int ret;
ret = del_timer_sync(&work->timer); ret = del_timer_sync(&work->timer);
if (ret) if (ret)
clear_bit(0, &work->pending); clear_bit(0, &work->work.pending);
return ret; return ret;
} }

View file

@ -122,29 +122,33 @@ EXPORT_SYMBOL_GPL(queue_work);
static void delayed_work_timer_fn(unsigned long __data) static void delayed_work_timer_fn(unsigned long __data)
{ {
struct work_struct *work = (struct work_struct *)__data; struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = work->wq_data; struct workqueue_struct *wq = dwork->work.wq_data;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq))) if (unlikely(is_single_threaded(wq)))
cpu = singlethread_cpu; cpu = singlethread_cpu;
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
} }
/** /**
* queue_delayed_work - queue work on a workqueue after delay * queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use * @wq: workqueue to use
* @work: work to queue * @work: delayable work to queue
* @delay: number of jiffies to wait before queueing * @delay: number of jiffies to wait before queueing
* *
* Returns 0 if @work was already on a queue, non-zero otherwise. * Returns 0 if @work was already on a queue, non-zero otherwise.
*/ */
int fastcall queue_delayed_work(struct workqueue_struct *wq, int fastcall queue_delayed_work(struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay) struct delayed_work *dwork, unsigned long delay)
{ {
int ret = 0; int ret = 0;
struct timer_list *timer = &work->timer; struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
if (delay == 0)
return queue_work(wq, work);
if (!test_and_set_bit(0, &work->pending)) { if (!test_and_set_bit(0, &work->pending)) {
BUG_ON(timer_pending(timer)); BUG_ON(timer_pending(timer));
@ -153,7 +157,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
/* This stores wq for the moment, for the timer_fn */ /* This stores wq for the moment, for the timer_fn */
work->wq_data = wq; work->wq_data = wq;
timer->expires = jiffies + delay; timer->expires = jiffies + delay;
timer->data = (unsigned long)work; timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn; timer->function = delayed_work_timer_fn;
add_timer(timer); add_timer(timer);
ret = 1; ret = 1;
@ -172,10 +176,11 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
* Returns 0 if @work was already on a queue, non-zero otherwise. * Returns 0 if @work was already on a queue, non-zero otherwise.
*/ */
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay) struct delayed_work *dwork, unsigned long delay)
{ {
int ret = 0; int ret = 0;
struct timer_list *timer = &work->timer; struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
if (!test_and_set_bit(0, &work->pending)) { if (!test_and_set_bit(0, &work->pending)) {
BUG_ON(timer_pending(timer)); BUG_ON(timer_pending(timer));
@ -184,7 +189,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
/* This stores wq for the moment, for the timer_fn */ /* This stores wq for the moment, for the timer_fn */
work->wq_data = wq; work->wq_data = wq;
timer->expires = jiffies + delay; timer->expires = jiffies + delay;
timer->data = (unsigned long)work; timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn; timer->function = delayed_work_timer_fn;
add_timer_on(timer, cpu); add_timer_on(timer, cpu);
ret = 1; ret = 1;
@ -468,31 +473,31 @@ EXPORT_SYMBOL(schedule_work);
/** /**
* schedule_delayed_work - put work task in global workqueue after delay * schedule_delayed_work - put work task in global workqueue after delay
* @work: job to be done * @dwork: job to be done
* @delay: number of jiffies to wait * @delay: number of jiffies to wait or 0 for immediate execution
* *
* After waiting for a given time this puts a job in the kernel-global * After waiting for a given time this puts a job in the kernel-global
* workqueue. * workqueue.
*/ */
int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
{ {
return queue_delayed_work(keventd_wq, work, delay); return queue_delayed_work(keventd_wq, dwork, delay);
} }
EXPORT_SYMBOL(schedule_delayed_work); EXPORT_SYMBOL(schedule_delayed_work);
/** /**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use * @cpu: cpu to use
* @work: job to be done * @dwork: job to be done
* @delay: number of jiffies to wait * @delay: number of jiffies to wait
* *
* After waiting for a given time this puts a job in the kernel-global * After waiting for a given time this puts a job in the kernel-global
* workqueue on the specified CPU. * workqueue on the specified CPU.
*/ */
int schedule_delayed_work_on(int cpu, int schedule_delayed_work_on(int cpu,
struct work_struct *work, unsigned long delay) struct delayed_work *dwork, unsigned long delay)
{ {
return queue_delayed_work_on(cpu, keventd_wq, work, delay); return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
} }
EXPORT_SYMBOL(schedule_delayed_work_on); EXPORT_SYMBOL(schedule_delayed_work_on);
@ -539,12 +544,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
* cancel_rearming_delayed_workqueue - reliably kill off a delayed * cancel_rearming_delayed_workqueue - reliably kill off a delayed
* work whose handler rearms the delayed work. * work whose handler rearms the delayed work.
* @wq: the controlling workqueue structure * @wq: the controlling workqueue structure
* @work: the delayed work struct * @dwork: the delayed work struct
*/ */
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
struct work_struct *work) struct delayed_work *dwork)
{ {
while (!cancel_delayed_work(work)) while (!cancel_delayed_work(dwork))
flush_workqueue(wq); flush_workqueue(wq);
} }
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@ -552,11 +557,11 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
/** /**
* cancel_rearming_delayed_work - reliably kill off a delayed keventd * cancel_rearming_delayed_work - reliably kill off a delayed keventd
* work whose handler rearms the delayed work. * work whose handler rearms the delayed work.
* @work: the delayed work struct * @dwork: the delayed work struct
*/ */
void cancel_rearming_delayed_work(struct work_struct *work) void cancel_rearming_delayed_work(struct delayed_work *dwork)
{ {
cancel_rearming_delayed_workqueue(keventd_wq, work); cancel_rearming_delayed_workqueue(keventd_wq, dwork);
} }
EXPORT_SYMBOL(cancel_rearming_delayed_work); EXPORT_SYMBOL(cancel_rearming_delayed_work);

View file

@ -753,7 +753,7 @@ int slab_is_available(void)
return g_cpucache_up == FULL; return g_cpucache_up == FULL;
} }
static DEFINE_PER_CPU(struct work_struct, reap_work); static DEFINE_PER_CPU(struct delayed_work, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{ {
@ -916,16 +916,16 @@ static void next_reap_node(void)
*/ */
static void __devinit start_cpu_timer(int cpu) static void __devinit start_cpu_timer(int cpu)
{ {
struct work_struct *reap_work = &per_cpu(reap_work, cpu); struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
/* /*
* When this gets called from do_initcalls via cpucache_init(), * When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup * init_workqueues() has already run, so keventd will be setup
* at that time. * at that time.
*/ */
if (keventd_up() && reap_work->func == NULL) { if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu); init_reap_node(cpu);
INIT_WORK(reap_work, cache_reap, NULL); INIT_DELAYED_WORK(reap_work, cache_reap, NULL);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
} }
} }

View file

@ -35,7 +35,7 @@ static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent; static unsigned long linkwatch_nextevent;
static void linkwatch_event(void *dummy); static void linkwatch_event(void *dummy);
static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
static LIST_HEAD(lweventlist); static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock); static DEFINE_SPINLOCK(lweventlist_lock);
@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
unsigned long delay = linkwatch_nextevent - jiffies; unsigned long delay = linkwatch_nextevent - jiffies;
/* If we wrap around we'll delay it by at most HZ. */ /* If we wrap around we'll delay it by at most HZ. */
if (!delay || delay > HZ) if (delay > HZ)
schedule_work(&linkwatch_work); delay = 0;
else schedule_delayed_work(&linkwatch_work, delay);
schedule_delayed_work(&linkwatch_work, delay);
} }
} }
} }

View file

@ -285,7 +285,7 @@ static struct file_operations content_file_operations;
static struct file_operations cache_flush_operations; static struct file_operations cache_flush_operations;
static void do_cache_clean(void *data); static void do_cache_clean(void *data);
static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
void cache_register(struct cache_detail *cd) void cache_register(struct cache_detail *cd)
{ {
@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
spin_unlock(&cache_list_lock); spin_unlock(&cache_list_lock);
/* start the cleaning process */ /* start the cleaning process */
schedule_work(&cache_cleaner); schedule_delayed_work(&cache_cleaner, 0);
} }
int cache_unregister(struct cache_detail *cd) int cache_unregister(struct cache_detail *cd)

View file

@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
INIT_LIST_HEAD(&rpci->pipe); INIT_LIST_HEAD(&rpci->pipe);
rpci->pipelen = 0; rpci->pipelen = 0;
init_waitqueue_head(&rpci->waitq); init_waitqueue_head(&rpci->waitq);
INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); INIT_DELAYED_WORK(&rpci->queue_timeout,
rpc_timeout_upcall_queue, rpci);
rpci->ops = NULL; rpci->ops = NULL;
} }
} }

View file

@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task)
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else { } else {
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
schedule_work(&xprt->connect_worker); schedule_delayed_work(&xprt->connect_worker, 0);
/* flush_scheduled_work can sleep... */ /* flush_scheduled_work can sleep... */
if (!RPC_IS_ASYNC(task)) if (!RPC_IS_ASYNC(task))
@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
/* XXX: header size can vary due to auth type, IPv6, etc. */ /* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
xprt->bind_timeout = XS_BIND_TO; xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_UDP_CONN_TO; xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO;
@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
xprt->bind_timeout = XS_BIND_TO; xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_TCP_CONN_TO; xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;