mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
0f9005a6f7
commit
52bad64d95
22 changed files with 96 additions and 73 deletions
|
@ -307,7 +307,7 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
|
|||
|
||||
static int check_interval = 5 * 60; /* 5 minutes */
|
||||
static void mcheck_timer(void *data);
|
||||
static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
|
||||
static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL);
|
||||
|
||||
static void mcheck_check_cpu(void *info)
|
||||
{
|
||||
|
|
|
@ -937,12 +937,9 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
|
|||
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
|
||||
return;
|
||||
|
||||
PREPARE_WORK(&ap->port_task, fn, data);
|
||||
PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
|
||||
|
||||
if (!delay)
|
||||
rc = queue_work(ata_wq, &ap->port_task);
|
||||
else
|
||||
rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
|
||||
rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
|
||||
|
||||
/* rc == 0 means that another user is using port task */
|
||||
WARN_ON(rc == 0);
|
||||
|
@ -5320,8 +5317,8 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
|
|||
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
|
||||
#endif
|
||||
|
||||
INIT_WORK(&ap->port_task, NULL, NULL);
|
||||
INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
|
||||
INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
|
||||
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
|
||||
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
|
||||
INIT_LIST_HEAD(&ap->eh_done_q);
|
||||
init_waitqueue_head(&ap->eh_wait_q);
|
||||
|
|
|
@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
|||
if (ap->pflags & ATA_PFLAG_LOADING)
|
||||
ap->pflags &= ~ATA_PFLAG_LOADING;
|
||||
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
|
||||
queue_work(ata_aux_wq, &ap->hotplug_task);
|
||||
queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_RECOVERED)
|
||||
ata_port_printk(ap, KERN_INFO, "EH complete\n");
|
||||
|
|
|
@ -1424,7 +1424,7 @@ static unsigned int ip_cnt;
|
|||
|
||||
static void rekey_seq_generator(void *private_);
|
||||
|
||||
static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
|
||||
static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
|
||||
|
||||
/*
|
||||
* Lock avoidance:
|
||||
|
|
|
@ -3580,7 +3580,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
|
|||
tty->overrun_time = jiffies;
|
||||
tty->buf.head = tty->buf.tail = NULL;
|
||||
tty_buffer_init(tty);
|
||||
INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
|
||||
INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty);
|
||||
init_MUTEX(&tty->buf.pty_sem);
|
||||
mutex_init(&tty->termios_mutex);
|
||||
init_waitqueue_head(&tty->write_wait);
|
||||
|
|
4
fs/aio.c
4
fs/aio.c
|
@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
|||
|
||||
INIT_LIST_HEAD(&ctx->active_reqs);
|
||||
INIT_LIST_HEAD(&ctx->run_list);
|
||||
INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
|
||||
INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx);
|
||||
|
||||
if (aio_setup_ring(ctx) < 0)
|
||||
goto out_freectx;
|
||||
|
@ -876,7 +876,7 @@ static void aio_kick_handler(void *data)
|
|||
* we're in a worker thread already, don't use queue_delayed_work,
|
||||
*/
|
||||
if (requeue)
|
||||
queue_work(aio_wq, &ctx->wq);
|
||||
queue_delayed_work(aio_wq, &ctx->wq, 0);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
|
|||
INIT_LIST_HEAD(&clp->cl_state_owners);
|
||||
INIT_LIST_HEAD(&clp->cl_unused);
|
||||
spin_lock_init(&clp->cl_lock);
|
||||
INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
|
||||
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
|
||||
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
|
||||
clp->cl_boot_time = CURRENT_TIME;
|
||||
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
static void nfs_expire_automounts(void *list);
|
||||
|
||||
LIST_HEAD(nfs_automount_list);
|
||||
static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
|
||||
static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts,
|
||||
&nfs_automount_list);
|
||||
int nfs_mountpoint_expiry_timeout = 500 * HZ;
|
||||
|
||||
static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
|
||||
|
|
|
@ -194,7 +194,7 @@ struct kioctx {
|
|||
|
||||
struct aio_ring_info ring_info;
|
||||
|
||||
struct work_struct wq;
|
||||
struct delayed_work wq;
|
||||
};
|
||||
|
||||
/* prototypes */
|
||||
|
|
|
@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
|
|||
if (t->buf.tail != NULL)
|
||||
t->buf.tail->commit = t->buf.tail->used;
|
||||
spin_unlock_irqrestore(&t->buf.lock, flags);
|
||||
schedule_work(&t->buf.work);
|
||||
schedule_delayed_work(&t->buf.work, 0);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -568,8 +568,8 @@ struct ata_port {
|
|||
struct ata_host *host;
|
||||
struct device *dev;
|
||||
|
||||
struct work_struct port_task;
|
||||
struct work_struct hotplug_task;
|
||||
struct delayed_work port_task;
|
||||
struct delayed_work hotplug_task;
|
||||
struct work_struct scsi_rescan_task;
|
||||
|
||||
unsigned int hsm_task_state;
|
||||
|
|
|
@ -51,7 +51,7 @@ struct nfs_client {
|
|||
|
||||
unsigned long cl_lease_time;
|
||||
unsigned long cl_last_renewal;
|
||||
struct work_struct cl_renewd;
|
||||
struct delayed_work cl_renewd;
|
||||
|
||||
struct rpc_wait_queue cl_rpcwaitq;
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ struct rpc_inode {
|
|||
#define RPC_PIPE_WAIT_FOR_OPEN 1
|
||||
int flags;
|
||||
struct rpc_pipe_ops *ops;
|
||||
struct work_struct queue_timeout;
|
||||
struct delayed_work queue_timeout;
|
||||
};
|
||||
|
||||
static inline struct rpc_inode *
|
||||
|
|
|
@ -177,7 +177,7 @@ struct rpc_xprt {
|
|||
unsigned long connect_timeout,
|
||||
bind_timeout,
|
||||
reestablish_timeout;
|
||||
struct work_struct connect_worker;
|
||||
struct delayed_work connect_worker;
|
||||
unsigned short port;
|
||||
|
||||
/*
|
||||
|
|
|
@ -53,7 +53,7 @@ struct tty_buffer {
|
|||
};
|
||||
|
||||
struct tty_bufhead {
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
struct semaphore pty_sem;
|
||||
spinlock_t lock;
|
||||
struct tty_buffer *head; /* Queue head */
|
||||
|
|
|
@ -17,6 +17,10 @@ struct work_struct {
|
|||
void (*func)(void *);
|
||||
void *data;
|
||||
void *wq_data;
|
||||
};
|
||||
|
||||
struct delayed_work {
|
||||
struct work_struct work;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
|
@ -28,32 +32,48 @@ struct execute_work {
|
|||
.entry = { &(n).entry, &(n).entry }, \
|
||||
.func = (f), \
|
||||
.data = (d), \
|
||||
}
|
||||
|
||||
#define __DELAYED_WORK_INITIALIZER(n, f, d) { \
|
||||
.work = __WORK_INITIALIZER((n).work, (f), (d)), \
|
||||
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
|
||||
}
|
||||
|
||||
#define DECLARE_WORK(n, f, d) \
|
||||
struct work_struct n = __WORK_INITIALIZER(n, f, d)
|
||||
|
||||
#define DECLARE_DELAYED_WORK(n, f, d) \
|
||||
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d)
|
||||
|
||||
/*
|
||||
* initialize a work-struct's func and data pointers:
|
||||
* initialize a work item's function and data pointers
|
||||
*/
|
||||
#define PREPARE_WORK(_work, _func, _data) \
|
||||
do { \
|
||||
(_work)->func = _func; \
|
||||
(_work)->data = _data; \
|
||||
(_work)->func = (_func); \
|
||||
(_work)->data = (_data); \
|
||||
} while (0)
|
||||
|
||||
#define PREPARE_DELAYED_WORK(_work, _func, _data) \
|
||||
PREPARE_WORK(&(_work)->work, (_func), (_data))
|
||||
|
||||
/*
|
||||
* initialize all of a work-struct:
|
||||
* initialize all of a work item in one go
|
||||
*/
|
||||
#define INIT_WORK(_work, _func, _data) \
|
||||
do { \
|
||||
INIT_LIST_HEAD(&(_work)->entry); \
|
||||
(_work)->pending = 0; \
|
||||
PREPARE_WORK((_work), (_func), (_data)); \
|
||||
} while (0)
|
||||
|
||||
#define INIT_DELAYED_WORK(_work, _func, _data) \
|
||||
do { \
|
||||
INIT_WORK(&(_work)->work, (_func), (_data)); \
|
||||
init_timer(&(_work)->timer); \
|
||||
} while (0)
|
||||
|
||||
|
||||
extern struct workqueue_struct *__create_workqueue(const char *name,
|
||||
int singlethread);
|
||||
#define create_workqueue(name) __create_workqueue((name), 0)
|
||||
|
@ -62,24 +82,24 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
|
|||
extern void destroy_workqueue(struct workqueue_struct *wq);
|
||||
|
||||
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
|
||||
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
|
||||
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
|
||||
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct work_struct *work, unsigned long delay);
|
||||
struct delayed_work *work, unsigned long delay);
|
||||
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
||||
|
||||
extern int FASTCALL(schedule_work(struct work_struct *work));
|
||||
extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
|
||||
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
|
||||
|
||||
extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
|
||||
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
|
||||
extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
|
||||
extern void flush_scheduled_work(void);
|
||||
extern int current_is_keventd(void);
|
||||
extern int keventd_up(void);
|
||||
|
||||
extern void init_workqueues(void);
|
||||
void cancel_rearming_delayed_work(struct work_struct *work);
|
||||
void cancel_rearming_delayed_work(struct delayed_work *work);
|
||||
void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
|
||||
struct work_struct *);
|
||||
struct delayed_work *);
|
||||
int execute_in_process_context(void (*fn)(void *), void *,
|
||||
struct execute_work *);
|
||||
|
||||
|
@ -88,13 +108,13 @@ int execute_in_process_context(void (*fn)(void *), void *,
|
|||
* function may still be running on return from cancel_delayed_work(). Run
|
||||
* flush_scheduled_work() to wait on it.
|
||||
*/
|
||||
static inline int cancel_delayed_work(struct work_struct *work)
|
||||
static inline int cancel_delayed_work(struct delayed_work *work)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = del_timer_sync(&work->timer);
|
||||
if (ret)
|
||||
clear_bit(0, &work->pending);
|
||||
clear_bit(0, &work->work.pending);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -122,29 +122,33 @@ EXPORT_SYMBOL_GPL(queue_work);
|
|||
|
||||
static void delayed_work_timer_fn(unsigned long __data)
|
||||
{
|
||||
struct work_struct *work = (struct work_struct *)__data;
|
||||
struct workqueue_struct *wq = work->wq_data;
|
||||
struct delayed_work *dwork = (struct delayed_work *)__data;
|
||||
struct workqueue_struct *wq = dwork->work.wq_data;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (unlikely(is_single_threaded(wq)))
|
||||
cpu = singlethread_cpu;
|
||||
|
||||
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
|
||||
}
|
||||
|
||||
/**
|
||||
* queue_delayed_work - queue work on a workqueue after delay
|
||||
* @wq: workqueue to use
|
||||
* @work: work to queue
|
||||
* @work: delayable work to queue
|
||||
* @delay: number of jiffies to wait before queueing
|
||||
*
|
||||
* Returns 0 if @work was already on a queue, non-zero otherwise.
|
||||
*/
|
||||
int fastcall queue_delayed_work(struct workqueue_struct *wq,
|
||||
struct work_struct *work, unsigned long delay)
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
int ret = 0;
|
||||
struct timer_list *timer = &work->timer;
|
||||
struct timer_list *timer = &dwork->timer;
|
||||
struct work_struct *work = &dwork->work;
|
||||
|
||||
if (delay == 0)
|
||||
return queue_work(wq, work);
|
||||
|
||||
if (!test_and_set_bit(0, &work->pending)) {
|
||||
BUG_ON(timer_pending(timer));
|
||||
|
@ -153,7 +157,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
|
|||
/* This stores wq for the moment, for the timer_fn */
|
||||
work->wq_data = wq;
|
||||
timer->expires = jiffies + delay;
|
||||
timer->data = (unsigned long)work;
|
||||
timer->data = (unsigned long)dwork;
|
||||
timer->function = delayed_work_timer_fn;
|
||||
add_timer(timer);
|
||||
ret = 1;
|
||||
|
@ -172,10 +176,11 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
|
|||
* Returns 0 if @work was already on a queue, non-zero otherwise.
|
||||
*/
|
||||
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct work_struct *work, unsigned long delay)
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
int ret = 0;
|
||||
struct timer_list *timer = &work->timer;
|
||||
struct timer_list *timer = &dwork->timer;
|
||||
struct work_struct *work = &dwork->work;
|
||||
|
||||
if (!test_and_set_bit(0, &work->pending)) {
|
||||
BUG_ON(timer_pending(timer));
|
||||
|
@ -184,7 +189,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|||
/* This stores wq for the moment, for the timer_fn */
|
||||
work->wq_data = wq;
|
||||
timer->expires = jiffies + delay;
|
||||
timer->data = (unsigned long)work;
|
||||
timer->data = (unsigned long)dwork;
|
||||
timer->function = delayed_work_timer_fn;
|
||||
add_timer_on(timer, cpu);
|
||||
ret = 1;
|
||||
|
@ -468,31 +473,31 @@ EXPORT_SYMBOL(schedule_work);
|
|||
|
||||
/**
|
||||
* schedule_delayed_work - put work task in global workqueue after delay
|
||||
* @work: job to be done
|
||||
* @delay: number of jiffies to wait
|
||||
* @dwork: job to be done
|
||||
* @delay: number of jiffies to wait or 0 for immediate execution
|
||||
*
|
||||
* After waiting for a given time this puts a job in the kernel-global
|
||||
* workqueue.
|
||||
*/
|
||||
int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
|
||||
int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work(keventd_wq, work, delay);
|
||||
return queue_delayed_work(keventd_wq, dwork, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(schedule_delayed_work);
|
||||
|
||||
/**
|
||||
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
||||
* @cpu: cpu to use
|
||||
* @work: job to be done
|
||||
* @dwork: job to be done
|
||||
* @delay: number of jiffies to wait
|
||||
*
|
||||
* After waiting for a given time this puts a job in the kernel-global
|
||||
* workqueue on the specified CPU.
|
||||
*/
|
||||
int schedule_delayed_work_on(int cpu,
|
||||
struct work_struct *work, unsigned long delay)
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work_on(cpu, keventd_wq, work, delay);
|
||||
return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(schedule_delayed_work_on);
|
||||
|
||||
|
@ -539,12 +544,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
|
|||
* cancel_rearming_delayed_workqueue - reliably kill off a delayed
|
||||
* work whose handler rearms the delayed work.
|
||||
* @wq: the controlling workqueue structure
|
||||
* @work: the delayed work struct
|
||||
* @dwork: the delayed work struct
|
||||
*/
|
||||
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
|
||||
struct work_struct *work)
|
||||
struct delayed_work *dwork)
|
||||
{
|
||||
while (!cancel_delayed_work(work))
|
||||
while (!cancel_delayed_work(dwork))
|
||||
flush_workqueue(wq);
|
||||
}
|
||||
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
|
||||
|
@ -552,11 +557,11 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
|
|||
/**
|
||||
* cancel_rearming_delayed_work - reliably kill off a delayed keventd
|
||||
* work whose handler rearms the delayed work.
|
||||
* @work: the delayed work struct
|
||||
* @dwork: the delayed work struct
|
||||
*/
|
||||
void cancel_rearming_delayed_work(struct work_struct *work)
|
||||
void cancel_rearming_delayed_work(struct delayed_work *dwork)
|
||||
{
|
||||
cancel_rearming_delayed_workqueue(keventd_wq, work);
|
||||
cancel_rearming_delayed_workqueue(keventd_wq, dwork);
|
||||
}
|
||||
EXPORT_SYMBOL(cancel_rearming_delayed_work);
|
||||
|
||||
|
|
|
@ -753,7 +753,7 @@ int slab_is_available(void)
|
|||
return g_cpucache_up == FULL;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct work_struct, reap_work);
|
||||
static DEFINE_PER_CPU(struct delayed_work, reap_work);
|
||||
|
||||
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
||||
{
|
||||
|
@ -916,16 +916,16 @@ static void next_reap_node(void)
|
|||
*/
|
||||
static void __devinit start_cpu_timer(int cpu)
|
||||
{
|
||||
struct work_struct *reap_work = &per_cpu(reap_work, cpu);
|
||||
struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
|
||||
|
||||
/*
|
||||
* When this gets called from do_initcalls via cpucache_init(),
|
||||
* init_workqueues() has already run, so keventd will be setup
|
||||
* at that time.
|
||||
*/
|
||||
if (keventd_up() && reap_work->func == NULL) {
|
||||
if (keventd_up() && reap_work->work.func == NULL) {
|
||||
init_reap_node(cpu);
|
||||
INIT_WORK(reap_work, cache_reap, NULL);
|
||||
INIT_DELAYED_WORK(reap_work, cache_reap, NULL);
|
||||
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ static unsigned long linkwatch_flags;
|
|||
static unsigned long linkwatch_nextevent;
|
||||
|
||||
static void linkwatch_event(void *dummy);
|
||||
static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
|
||||
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
|
||||
|
||||
static LIST_HEAD(lweventlist);
|
||||
static DEFINE_SPINLOCK(lweventlist_lock);
|
||||
|
@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
|
|||
unsigned long delay = linkwatch_nextevent - jiffies;
|
||||
|
||||
/* If we wrap around we'll delay it by at most HZ. */
|
||||
if (!delay || delay > HZ)
|
||||
schedule_work(&linkwatch_work);
|
||||
else
|
||||
schedule_delayed_work(&linkwatch_work, delay);
|
||||
if (delay > HZ)
|
||||
delay = 0;
|
||||
schedule_delayed_work(&linkwatch_work, delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -285,7 +285,7 @@ static struct file_operations content_file_operations;
|
|||
static struct file_operations cache_flush_operations;
|
||||
|
||||
static void do_cache_clean(void *data);
|
||||
static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
|
||||
static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
|
||||
|
||||
void cache_register(struct cache_detail *cd)
|
||||
{
|
||||
|
@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
|
|||
spin_unlock(&cache_list_lock);
|
||||
|
||||
/* start the cleaning process */
|
||||
schedule_work(&cache_cleaner);
|
||||
schedule_delayed_work(&cache_cleaner, 0);
|
||||
}
|
||||
|
||||
int cache_unregister(struct cache_detail *cd)
|
||||
|
|
|
@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
|
|||
INIT_LIST_HEAD(&rpci->pipe);
|
||||
rpci->pipelen = 0;
|
||||
init_waitqueue_head(&rpci->waitq);
|
||||
INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
|
||||
INIT_DELAYED_WORK(&rpci->queue_timeout,
|
||||
rpc_timeout_upcall_queue, rpci);
|
||||
rpci->ops = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task)
|
|||
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
|
||||
} else {
|
||||
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
|
||||
schedule_work(&xprt->connect_worker);
|
||||
schedule_delayed_work(&xprt->connect_worker, 0);
|
||||
|
||||
/* flush_scheduled_work can sleep... */
|
||||
if (!RPC_IS_ASYNC(task))
|
||||
|
@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
|
|||
/* XXX: header size can vary due to auth type, IPv6, etc. */
|
||||
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
|
||||
|
||||
INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
|
||||
INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
|
||||
xprt->bind_timeout = XS_BIND_TO;
|
||||
xprt->connect_timeout = XS_UDP_CONN_TO;
|
||||
xprt->reestablish_timeout = XS_UDP_REEST_TO;
|
||||
|
@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
|
|||
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
|
||||
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
|
||||
|
||||
INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
|
||||
INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
|
||||
xprt->bind_timeout = XS_BIND_TO;
|
||||
xprt->connect_timeout = XS_TCP_CONN_TO;
|
||||
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
||||
|
|
Loading…
Reference in a new issue