mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
ipc: get rid of ipc_lock_down()
Remove the ipc_lock_down() routines: they used to call idr_find() locklessly (given that the ipc ids lock was already held), so they are not needed anymore. Signed-off-by: Nadia Derbey <Nadia.Derbey@bull.net> Acked-by: "Paul E. McKenney" <paulmck@us.ibm.com> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Jim Houston <jim.houston@comcast.net> Cc: Pierre Peiffer <peifferp@gmail.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
983bfb7db3
commit
00c2bf85d8
3 changed files with 4 additions and 75 deletions
21
ipc/shm.c
21
ipc/shm.c
|
@ -111,24 +111,9 @@ void __init shm_init (void)
|
|||
IPC_SHM_IDS, sysvipc_shm_proc_show);
|
||||
}
|
||||
|
||||
/*
|
||||
* shm_lock_(check_)down routines are called in the paths where the rw_mutex
|
||||
* is held to protect access to the idr tree.
|
||||
*/
|
||||
static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
|
||||
int id)
|
||||
{
|
||||
struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
|
||||
|
||||
if (IS_ERR(ipcp))
|
||||
return (struct shmid_kernel *)ipcp;
|
||||
|
||||
return container_of(ipcp, struct shmid_kernel, shm_perm);
|
||||
}
|
||||
|
||||
/*
|
||||
* shm_lock_(check_) routines are called in the paths where the rw_mutex
|
||||
* is not held.
|
||||
* is not necessarily held.
|
||||
*/
|
||||
static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
|
||||
{
|
||||
|
@ -211,7 +196,7 @@ static void shm_close(struct vm_area_struct *vma)
|
|||
|
||||
down_write(&shm_ids(ns).rw_mutex);
|
||||
/* remove from the list of attaches of the shm segment */
|
||||
shp = shm_lock_down(ns, sfd->id);
|
||||
shp = shm_lock(ns, sfd->id);
|
||||
BUG_ON(IS_ERR(shp));
|
||||
shp->shm_lprid = task_tgid_vnr(current);
|
||||
shp->shm_dtim = get_seconds();
|
||||
|
@ -932,7 +917,7 @@ invalid:
|
|||
|
||||
out_nattch:
|
||||
down_write(&shm_ids(ns).rw_mutex);
|
||||
shp = shm_lock_down(ns, shmid);
|
||||
shp = shm_lock(ns, shmid);
|
||||
BUG_ON(IS_ERR(shp));
|
||||
shp->shm_nattch--;
|
||||
if(shp->shm_nattch == 0 &&
|
||||
|
|
52
ipc/util.c
52
ipc/util.c
|
@ -716,56 +716,6 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
|
|||
return out;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipc_lock_down - Lock an ipc structure with rw_sem held
|
||||
* @ids: IPC identifier set
|
||||
* @id: ipc id to look for
|
||||
*
|
||||
* Look for an id in the ipc ids idr and lock the associated ipc object.
|
||||
*
|
||||
* The ipc object is locked on exit.
|
||||
*
|
||||
* This is the routine that should be called when the rw_mutex is already
|
||||
* held, i.e. idr tree protected.
|
||||
*/
|
||||
|
||||
struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id)
|
||||
{
|
||||
struct kern_ipc_perm *out;
|
||||
int lid = ipcid_to_idx(id);
|
||||
|
||||
rcu_read_lock();
|
||||
out = idr_find(&ids->ipcs_idr, lid);
|
||||
if (out == NULL) {
|
||||
rcu_read_unlock();
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
spin_lock(&out->lock);
|
||||
|
||||
/*
|
||||
* No need to verify that the structure is still valid since the
|
||||
* rw_mutex is held.
|
||||
*/
|
||||
return out;
|
||||
}
|
||||
|
||||
struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id)
|
||||
{
|
||||
struct kern_ipc_perm *out;
|
||||
|
||||
out = ipc_lock_down(ids, id);
|
||||
if (IS_ERR(out))
|
||||
return out;
|
||||
|
||||
if (ipc_checkid(out, id)) {
|
||||
ipc_unlock(out);
|
||||
return ERR_PTR(-EIDRM);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
|
||||
{
|
||||
struct kern_ipc_perm *out;
|
||||
|
@ -837,7 +787,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
|
|||
int err;
|
||||
|
||||
down_write(&ids->rw_mutex);
|
||||
ipcp = ipc_lock_check_down(ids, id);
|
||||
ipcp = ipc_lock_check(ids, id);
|
||||
if (IS_ERR(ipcp)) {
|
||||
err = PTR_ERR(ipcp);
|
||||
goto out_up;
|
||||
|
|
|
@ -102,11 +102,6 @@ void* ipc_rcu_alloc(int size);
|
|||
void ipc_rcu_getref(void *ptr);
|
||||
void ipc_rcu_putref(void *ptr);
|
||||
|
||||
/*
|
||||
* ipc_lock_down: called with rw_mutex held
|
||||
* ipc_lock: called without that lock held
|
||||
*/
|
||||
struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *, int);
|
||||
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
|
||||
|
||||
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
|
||||
|
@ -155,7 +150,6 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id);
|
||||
struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
|
||||
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
|
||||
struct ipc_ops *ops, struct ipc_params *params);
|
||||
|
|
Loading…
Reference in a new issue