mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
firewire: normalize style of queue_work wrappers
A few stylistic changes to unify some code patterns in the subsystem: - The similar queue_delayed_work helpers fw_schedule_bm_work, schedule_iso_resource, and sbp2_queue_work now have the same call convention. - Two conditional calls of schedule_iso_resource are factored into another small helper. - An sbp2_target_get helper is added as counterpart to sbp2_target_put. Object size of firewire-core is decreased a little bit, object size of firewire-sbp2 remains unchanged. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
This commit is contained in:
parent
7e44c0b56b
commit
9fb551bf72
3 changed files with 27 additions and 25 deletions
|
@ -211,11 +211,8 @@ static const char gap_count_table[] = {
|
||||||
|
|
||||||
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
|
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
|
||||||
{
|
{
|
||||||
int scheduled;
|
|
||||||
|
|
||||||
fw_card_get(card);
|
fw_card_get(card);
|
||||||
scheduled = schedule_delayed_work(&card->work, delay);
|
if (!schedule_delayed_work(&card->work, delay))
|
||||||
if (!scheduled)
|
|
||||||
fw_card_put(card);
|
fw_card_put(card);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -129,9 +129,22 @@ struct iso_resource {
|
||||||
struct iso_resource_event *e_alloc, *e_dealloc;
|
struct iso_resource_event *e_alloc, *e_dealloc;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void schedule_iso_resource(struct iso_resource *);
|
|
||||||
static void release_iso_resource(struct client *, struct client_resource *);
|
static void release_iso_resource(struct client *, struct client_resource *);
|
||||||
|
|
||||||
|
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
|
||||||
|
{
|
||||||
|
client_get(r->client);
|
||||||
|
if (!schedule_delayed_work(&r->work, delay))
|
||||||
|
client_put(r->client);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void schedule_if_iso_resource(struct client_resource *resource)
|
||||||
|
{
|
||||||
|
if (resource->release == release_iso_resource)
|
||||||
|
schedule_iso_resource(container_of(resource,
|
||||||
|
struct iso_resource, resource), 0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dequeue_event() just kfree()'s the event, so the event has to be
|
* dequeue_event() just kfree()'s the event, so the event has to be
|
||||||
* the first field in a struct XYZ_event.
|
* the first field in a struct XYZ_event.
|
||||||
|
@ -313,11 +326,8 @@ static void for_each_client(struct fw_device *device,
|
||||||
|
|
||||||
static int schedule_reallocations(int id, void *p, void *data)
|
static int schedule_reallocations(int id, void *p, void *data)
|
||||||
{
|
{
|
||||||
struct client_resource *r = p;
|
schedule_if_iso_resource(p);
|
||||||
|
|
||||||
if (r->release == release_iso_resource)
|
|
||||||
schedule_iso_resource(container_of(r,
|
|
||||||
struct iso_resource, resource));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,9 +423,7 @@ static int add_client_resource(struct client *client,
|
||||||
&resource->handle);
|
&resource->handle);
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
client_get(client);
|
client_get(client);
|
||||||
if (resource->release == release_iso_resource)
|
schedule_if_iso_resource(resource);
|
||||||
schedule_iso_resource(container_of(resource,
|
|
||||||
struct iso_resource, resource));
|
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&client->lock, flags);
|
spin_unlock_irqrestore(&client->lock, flags);
|
||||||
|
|
||||||
|
@ -1032,8 +1040,7 @@ static void iso_resource_work(struct work_struct *work)
|
||||||
/* Allow 1000ms grace period for other reallocations. */
|
/* Allow 1000ms grace period for other reallocations. */
|
||||||
if (todo == ISO_RES_ALLOC &&
|
if (todo == ISO_RES_ALLOC &&
|
||||||
time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
|
time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
|
||||||
if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
|
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
|
||||||
client_get(client);
|
|
||||||
skip = true;
|
skip = true;
|
||||||
} else {
|
} else {
|
||||||
/* We could be called twice within the same generation. */
|
/* We could be called twice within the same generation. */
|
||||||
|
@ -1118,13 +1125,6 @@ static void iso_resource_work(struct work_struct *work)
|
||||||
client_put(client);
|
client_put(client);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void schedule_iso_resource(struct iso_resource *r)
|
|
||||||
{
|
|
||||||
client_get(r->client);
|
|
||||||
if (!schedule_delayed_work(&r->work, 0))
|
|
||||||
client_put(r->client);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void release_iso_resource(struct client *client,
|
static void release_iso_resource(struct client *client,
|
||||||
struct client_resource *resource)
|
struct client_resource *resource)
|
||||||
{
|
{
|
||||||
|
@ -1133,7 +1133,7 @@ static void release_iso_resource(struct client *client,
|
||||||
|
|
||||||
spin_lock_irq(&client->lock);
|
spin_lock_irq(&client->lock);
|
||||||
r->todo = ISO_RES_DEALLOC;
|
r->todo = ISO_RES_DEALLOC;
|
||||||
schedule_iso_resource(r);
|
schedule_iso_resource(r, 0);
|
||||||
spin_unlock_irq(&client->lock);
|
spin_unlock_irq(&client->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1179,7 +1179,7 @@ static int init_iso_resource(struct client *client,
|
||||||
} else {
|
} else {
|
||||||
r->resource.release = NULL;
|
r->resource.release = NULL;
|
||||||
r->resource.handle = -1;
|
r->resource.handle = -1;
|
||||||
schedule_iso_resource(r);
|
schedule_iso_resource(r, 0);
|
||||||
}
|
}
|
||||||
request->handle = r->resource.handle;
|
request->handle = r->resource.handle;
|
||||||
|
|
||||||
|
|
|
@ -820,20 +820,25 @@ static void sbp2_release_target(struct kref *kref)
|
||||||
fw_device_put(device);
|
fw_device_put(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct workqueue_struct *sbp2_wq;
|
static void sbp2_target_get(struct sbp2_target *tgt)
|
||||||
|
{
|
||||||
|
kref_get(&tgt->kref);
|
||||||
|
}
|
||||||
|
|
||||||
static void sbp2_target_put(struct sbp2_target *tgt)
|
static void sbp2_target_put(struct sbp2_target *tgt)
|
||||||
{
|
{
|
||||||
kref_put(&tgt->kref, sbp2_release_target);
|
kref_put(&tgt->kref, sbp2_release_target);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct workqueue_struct *sbp2_wq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Always get the target's kref when scheduling work on one its units.
|
* Always get the target's kref when scheduling work on one its units.
|
||||||
* Each workqueue job is responsible to call sbp2_target_put() upon return.
|
* Each workqueue job is responsible to call sbp2_target_put() upon return.
|
||||||
*/
|
*/
|
||||||
static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
|
static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
|
||||||
{
|
{
|
||||||
kref_get(&lu->tgt->kref);
|
sbp2_target_get(lu->tgt);
|
||||||
if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
|
if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
|
||||||
sbp2_target_put(lu->tgt);
|
sbp2_target_put(lu->tgt);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue