mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
dlm: change rsbtbl rwlock to spinlock
The rwlock is almost always used in write mode, so there's no reason to not use a spinlock instead. Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
parent
892c4467e3
commit
c7be761a81
5 changed files with 32 additions and 32 deletions
|
@ -416,7 +416,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
if (seq->op == &format3_seq_ops)
|
||||
ri->format = 3;
|
||||
|
||||
read_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
|
||||
list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
|
||||
res_hashchain) {
|
||||
|
@ -424,12 +424,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
dlm_hold_rsb(r);
|
||||
ri->rsb = r;
|
||||
ri->bucket = bucket;
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
return ri;
|
||||
}
|
||||
}
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
|
||||
/*
|
||||
* move to the first rsb in the next non-empty bucket
|
||||
|
@ -447,18 +447,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
read_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
|
||||
r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
|
||||
struct dlm_rsb, res_hashchain);
|
||||
dlm_hold_rsb(r);
|
||||
ri->rsb = r;
|
||||
ri->bucket = bucket;
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
*pos = n;
|
||||
return ri;
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,7 +477,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
|
|||
* move to the next rsb in the same bucket
|
||||
*/
|
||||
|
||||
read_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
rp = ri->rsb;
|
||||
next = rp->res_hashchain.next;
|
||||
|
||||
|
@ -485,12 +485,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
|
|||
r = list_entry(next, struct dlm_rsb, res_hashchain);
|
||||
dlm_hold_rsb(r);
|
||||
ri->rsb = r;
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
dlm_put_rsb(rp);
|
||||
++*pos;
|
||||
return ri;
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
dlm_put_rsb(rp);
|
||||
|
||||
/*
|
||||
|
@ -509,18 +509,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
read_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
|
||||
r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
|
||||
struct dlm_rsb, res_hashchain);
|
||||
dlm_hold_rsb(r);
|
||||
ri->rsb = r;
|
||||
ri->bucket = bucket;
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
*pos = n;
|
||||
return ri;
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ struct dlm_dirtable {
|
|||
struct dlm_rsbtable {
|
||||
struct list_head list;
|
||||
struct list_head toss;
|
||||
rwlock_t lock;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct dlm_lkbtable {
|
||||
|
|
|
@ -412,9 +412,9 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
|
|||
unsigned int flags, struct dlm_rsb **r_ret)
|
||||
{
|
||||
int error;
|
||||
write_lock(&ls->ls_rsbtbl[b].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[b].lock);
|
||||
error = _search_rsb(ls, name, len, b, flags, r_ret);
|
||||
write_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -478,16 +478,16 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
|
|||
r->res_nodeid = nodeid;
|
||||
}
|
||||
|
||||
write_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
|
||||
if (!error) {
|
||||
write_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
dlm_free_rsb(r);
|
||||
r = tmp;
|
||||
goto out;
|
||||
}
|
||||
list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
|
||||
write_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
error = 0;
|
||||
out:
|
||||
*r_ret = r;
|
||||
|
@ -530,9 +530,9 @@ static void put_rsb(struct dlm_rsb *r)
|
|||
struct dlm_ls *ls = r->res_ls;
|
||||
uint32_t bucket = r->res_bucket;
|
||||
|
||||
write_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
kref_put(&r->res_ref, toss_rsb);
|
||||
write_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
}
|
||||
|
||||
void dlm_put_rsb(struct dlm_rsb *r)
|
||||
|
@ -967,7 +967,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
|
|||
|
||||
for (;;) {
|
||||
found = 0;
|
||||
write_lock(&ls->ls_rsbtbl[b].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[b].lock);
|
||||
list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
|
||||
res_hashchain) {
|
||||
if (!time_after_eq(jiffies, r->res_toss_time +
|
||||
|
@ -978,20 +978,20 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
|
|||
}
|
||||
|
||||
if (!found) {
|
||||
write_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
break;
|
||||
}
|
||||
|
||||
if (kref_put(&r->res_ref, kill_rsb)) {
|
||||
list_del(&r->res_hashchain);
|
||||
write_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
|
||||
if (is_master(r))
|
||||
dir_remove(r);
|
||||
dlm_free_rsb(r);
|
||||
count++;
|
||||
} else {
|
||||
write_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[b].lock);
|
||||
log_error(ls, "tossed rsb in use %s", r->res_name);
|
||||
}
|
||||
}
|
||||
|
@ -4224,7 +4224,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
|
|||
{
|
||||
struct dlm_rsb *r, *r_ret = NULL;
|
||||
|
||||
read_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[bucket].lock);
|
||||
list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
|
||||
if (!rsb_flag(r, RSB_LOCKS_PURGED))
|
||||
continue;
|
||||
|
@ -4233,7 +4233,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
|
|||
r_ret = r;
|
||||
break;
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
|
||||
return r_ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -464,7 +464,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
|
|||
for (i = 0; i < size; i++) {
|
||||
INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
|
||||
INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
|
||||
rwlock_init(&ls->ls_rsbtbl[i].lock);
|
||||
spin_lock_init(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
|
||||
size = dlm_config.ci_lkbtbl_size;
|
||||
|
|
|
@ -726,7 +726,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
|
|||
}
|
||||
|
||||
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
|
||||
read_lock(&ls->ls_rsbtbl[i].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[i].lock);
|
||||
list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
|
||||
list_add(&r->res_root_list, &ls->ls_root_list);
|
||||
dlm_hold_rsb(r);
|
||||
|
@ -737,7 +737,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
|
|||
but no other recovery steps should do anything with them. */
|
||||
|
||||
if (dlm_no_directory(ls)) {
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -745,7 +745,7 @@ int dlm_create_root_list(struct dlm_ls *ls)
|
|||
list_add(&r->res_root_list, &ls->ls_root_list);
|
||||
dlm_hold_rsb(r);
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
out:
|
||||
up_write(&ls->ls_root_sem);
|
||||
|
@ -775,7 +775,7 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
|
||||
write_lock(&ls->ls_rsbtbl[i].lock);
|
||||
spin_lock(&ls->ls_rsbtbl[i].lock);
|
||||
list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
|
||||
res_hashchain) {
|
||||
if (dlm_no_directory(ls) || !is_master(r)) {
|
||||
|
@ -783,7 +783,7 @@ void dlm_clear_toss_list(struct dlm_ls *ls)
|
|||
dlm_free_rsb(r);
|
||||
}
|
||||
}
|
||||
write_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
spin_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue