mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
[CONNECTOR]: Replace delayed work with usual work queue.
Signed-off-by: Evgeniy Polyakov <johnpol@2ka.mipt.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
14fb8a7647
commit
a240d9f1d8
3 changed files with 10 additions and 13 deletions
|
@ -34,7 +34,7 @@
|
||||||
void cn_queue_wrapper(struct work_struct *work)
|
void cn_queue_wrapper(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct cn_callback_entry *cbq =
|
struct cn_callback_entry *cbq =
|
||||||
container_of(work, struct cn_callback_entry, work.work);
|
container_of(work, struct cn_callback_entry, work);
|
||||||
struct cn_callback_data *d = &cbq->data;
|
struct cn_callback_data *d = &cbq->data;
|
||||||
|
|
||||||
d->callback(d->callback_priv);
|
d->callback(d->callback_priv);
|
||||||
|
@ -59,13 +59,12 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
|
||||||
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
|
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
|
||||||
cbq->data.callback = callback;
|
cbq->data.callback = callback;
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
|
INIT_WORK(&cbq->work, &cn_queue_wrapper);
|
||||||
return cbq;
|
return cbq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
|
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
|
||||||
{
|
{
|
||||||
cancel_delayed_work(&cbq->work);
|
|
||||||
flush_workqueue(cbq->pdev->cn_queue);
|
flush_workqueue(cbq->pdev->cn_queue);
|
||||||
|
|
||||||
kfree(cbq);
|
kfree(cbq);
|
||||||
|
|
|
@ -135,16 +135,15 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
|
||||||
spin_lock_bh(&dev->cbdev->queue_lock);
|
spin_lock_bh(&dev->cbdev->queue_lock);
|
||||||
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
|
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
|
||||||
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
|
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
|
||||||
if (likely(!work_pending(&__cbq->work.work) &&
|
if (likely(!work_pending(&__cbq->work) &&
|
||||||
__cbq->data.ddata == NULL)) {
|
__cbq->data.ddata == NULL)) {
|
||||||
__cbq->data.callback_priv = msg;
|
__cbq->data.callback_priv = msg;
|
||||||
|
|
||||||
__cbq->data.ddata = data;
|
__cbq->data.ddata = data;
|
||||||
__cbq->data.destruct_data = destruct_data;
|
__cbq->data.destruct_data = destruct_data;
|
||||||
|
|
||||||
if (queue_delayed_work(
|
if (queue_work(dev->cbdev->cn_queue,
|
||||||
dev->cbdev->cn_queue,
|
&__cbq->work))
|
||||||
&__cbq->work, 0))
|
|
||||||
err = 0;
|
err = 0;
|
||||||
} else {
|
} else {
|
||||||
struct cn_callback_data *d;
|
struct cn_callback_data *d;
|
||||||
|
@ -158,12 +157,11 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
|
||||||
d->destruct_data = destruct_data;
|
d->destruct_data = destruct_data;
|
||||||
d->free = __cbq;
|
d->free = __cbq;
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&__cbq->work,
|
INIT_WORK(&__cbq->work,
|
||||||
&cn_queue_wrapper);
|
&cn_queue_wrapper);
|
||||||
|
|
||||||
if (queue_delayed_work(
|
if (queue_work(dev->cbdev->cn_queue,
|
||||||
dev->cbdev->cn_queue,
|
&__cbq->work))
|
||||||
&__cbq->work, 0))
|
|
||||||
err = 0;
|
err = 0;
|
||||||
else {
|
else {
|
||||||
kfree(__cbq);
|
kfree(__cbq);
|
||||||
|
|
|
@ -133,7 +133,7 @@ struct cn_callback_data {
|
||||||
struct cn_callback_entry {
|
struct cn_callback_entry {
|
||||||
struct list_head callback_entry;
|
struct list_head callback_entry;
|
||||||
struct cn_callback *cb;
|
struct cn_callback *cb;
|
||||||
struct delayed_work work;
|
struct work_struct work;
|
||||||
struct cn_queue_dev *pdev;
|
struct cn_queue_dev *pdev;
|
||||||
|
|
||||||
struct cn_callback_id id;
|
struct cn_callback_id id;
|
||||||
|
|
Loading…
Reference in a new issue