svc: Make svc_send transport neutral

Move the sk_mutex field to the transport independent svc_xprt structure.
Now all the fields that svc_send touches are transport neutral. Change the
svc_send function to use the transport independent svc_xprt directly instead
of the transport dependent svc_sock structure.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
Tom Tucker 2007-12-30 21:07:59 -06:00 committed by J. Bruce Fields
parent f6150c3cab
commit a50fea26b9
4 changed files with 10 additions and 12 deletions

View file

@ -54,6 +54,7 @@ struct svc_xprt {
struct svc_pool *xpt_pool; /* current pool iff queued */ struct svc_pool *xpt_pool; /* current pool iff queued */
struct svc_serv *xpt_server; /* service for transport */ struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */ atomic_t xpt_reserved; /* space on outq that is rsvd */
struct mutex xpt_mutex; /* to serialize sending data */
}; };
int svc_reg_xprt_class(struct svc_xprt_class *); int svc_reg_xprt_class(struct svc_xprt_class *);

View file

@ -24,7 +24,6 @@ struct svc_sock {
* sk_info_authunix */ * sk_info_authunix */
struct list_head sk_deferred; /* deferred requests that need to struct list_head sk_deferred; /* deferred requests that need to
* be revisted */ * be revisted */
struct mutex sk_mutex; /* to serialize sending data */
/* We keep the old state_change and data_ready CB's here */ /* We keep the old state_change and data_ready CB's here */
void (*sk_ostate)(struct sock *); void (*sk_ostate)(struct sock *);

View file

@ -99,6 +99,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
xprt->xpt_server = serv; xprt->xpt_server = serv;
INIT_LIST_HEAD(&xprt->xpt_list); INIT_LIST_HEAD(&xprt->xpt_list);
INIT_LIST_HEAD(&xprt->xpt_ready); INIT_LIST_HEAD(&xprt->xpt_ready);
mutex_init(&xprt->xpt_mutex);
} }
EXPORT_SYMBOL_GPL(svc_xprt_init); EXPORT_SYMBOL_GPL(svc_xprt_init);

View file

@ -1632,15 +1632,13 @@ svc_drop(struct svc_rqst *rqstp)
int int
svc_send(struct svc_rqst *rqstp) svc_send(struct svc_rqst *rqstp)
{ {
struct svc_sock *svsk; struct svc_xprt *xprt;
int len; int len;
struct xdr_buf *xb; struct xdr_buf *xb;
if ((svsk = rqstp->rq_sock) == NULL) { xprt = rqstp->rq_xprt;
printk(KERN_WARNING "NULL socket pointer in %s:%d\n", if (!xprt)
__FILE__, __LINE__);
return -EFAULT; return -EFAULT;
}
/* release the receive skb before sending the reply */ /* release the receive skb before sending the reply */
rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
@ -1651,13 +1649,13 @@ svc_send(struct svc_rqst *rqstp)
xb->page_len + xb->page_len +
xb->tail[0].iov_len; xb->tail[0].iov_len;
/* Grab svsk->sk_mutex to serialize outgoing data. */ /* Grab mutex to serialize outgoing data. */
mutex_lock(&svsk->sk_mutex); mutex_lock(&xprt->xpt_mutex);
if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) if (test_bit(XPT_DEAD, &xprt->xpt_flags))
len = -ENOTCONN; len = -ENOTCONN;
else else
len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp); len = xprt->xpt_ops->xpo_sendto(rqstp);
mutex_unlock(&svsk->sk_mutex); mutex_unlock(&xprt->xpt_mutex);
svc_sock_release(rqstp); svc_sock_release(rqstp);
if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
@ -1759,7 +1757,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
svsk->sk_lastrecv = get_seconds(); svsk->sk_lastrecv = get_seconds();
spin_lock_init(&svsk->sk_lock); spin_lock_init(&svsk->sk_lock);
INIT_LIST_HEAD(&svsk->sk_deferred); INIT_LIST_HEAD(&svsk->sk_deferred);
mutex_init(&svsk->sk_mutex);
/* Initialize the socket */ /* Initialize the socket */
if (sock->type == SOCK_DGRAM) if (sock->type == SOCK_DGRAM)