svc: Make svc_recv transport neutral

All of the transport field and functions used by svc_recv are now
transport independent. Change the svc_recv function to use the svc_xprt
structure directly instead of the transport specific svc_sock structure.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
Tom Tucker 2007-12-30 21:08:16 -06:00 committed by J. Bruce Fields
parent eab996d4ac
commit c36adb2a7f

View file

@ -310,22 +310,21 @@ EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
/* /*
* Dequeue the first socket. Must be called with the pool->sp_lock held. * Dequeue the first socket. Must be called with the pool->sp_lock held.
*/ */
static inline struct svc_sock * static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
svc_sock_dequeue(struct svc_pool *pool)
{ {
struct svc_sock *svsk; struct svc_xprt *xprt;
if (list_empty(&pool->sp_sockets)) if (list_empty(&pool->sp_sockets))
return NULL; return NULL;
svsk = list_entry(pool->sp_sockets.next, xprt = list_entry(pool->sp_sockets.next,
struct svc_sock, sk_xprt.xpt_ready); struct svc_xprt, xpt_ready);
list_del_init(&svsk->sk_xprt.xpt_ready); list_del_init(&xprt->xpt_ready);
dprintk("svc: socket %p dequeued, inuse=%d\n", dprintk("svc: transport %p dequeued, inuse=%d\n",
svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); xprt, atomic_read(&xprt->xpt_ref.refcount));
return svsk; return xprt;
} }
/* /*
@ -1475,7 +1474,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
int int
svc_recv(struct svc_rqst *rqstp, long timeout) svc_recv(struct svc_rqst *rqstp, long timeout)
{ {
struct svc_sock *svsk = NULL; struct svc_xprt *xprt = NULL;
struct svc_serv *serv = rqstp->rq_server; struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool; struct svc_pool *pool = rqstp->rq_pool;
int len, i; int len, i;
@ -1486,9 +1485,9 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
dprintk("svc: server %p waiting for data (to = %ld)\n", dprintk("svc: server %p waiting for data (to = %ld)\n",
rqstp, timeout); rqstp, timeout);
if (rqstp->rq_sock) if (rqstp->rq_xprt)
printk(KERN_ERR printk(KERN_ERR
"svc_recv: service %p, socket not NULL!\n", "svc_recv: service %p, transport not NULL!\n",
rqstp); rqstp);
if (waitqueue_active(&rqstp->rq_wait)) if (waitqueue_active(&rqstp->rq_wait))
printk(KERN_ERR printk(KERN_ERR
@ -1525,11 +1524,12 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
return -EINTR; return -EINTR;
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if ((svsk = svc_sock_dequeue(pool)) != NULL) { xprt = svc_xprt_dequeue(pool);
rqstp->rq_sock = svsk; if (xprt) {
svc_xprt_get(&svsk->sk_xprt); rqstp->rq_xprt = xprt;
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg; rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved); atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
} else { } else {
/* No data pending. Go to sleep */ /* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp); svc_thread_enqueue(pool, rqstp);
@ -1549,7 +1549,8 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
remove_wait_queue(&rqstp->rq_wait, &wait); remove_wait_queue(&rqstp->rq_wait, &wait);
if (!(svsk = rqstp->rq_sock)) { xprt = rqstp->rq_xprt;
if (!xprt) {
svc_thread_dequeue(pool, rqstp); svc_thread_dequeue(pool, rqstp);
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
dprintk("svc: server %p, no data yet\n", rqstp); dprintk("svc: server %p, no data yet\n", rqstp);
@ -1559,32 +1560,32 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
len = 0; len = 0;
if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) { if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n"); dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_xprt(&svsk->sk_xprt); svc_delete_xprt(xprt);
} else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) { } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt; struct svc_xprt *newxpt;
newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) { if (newxpt) {
/* /*
* We know this module_get will succeed because the * We know this module_get will succeed because the
* listener holds a reference too * listener holds a reference too
*/ */
__module_get(newxpt->xpt_class->xcl_owner); __module_get(newxpt->xpt_class->xcl_owner);
svc_check_conn_limits(svsk->sk_xprt.xpt_server); svc_check_conn_limits(xprt->xpt_server);
svc_xprt_received(newxpt); svc_xprt_received(newxpt);
} }
svc_xprt_received(&svsk->sk_xprt); svc_xprt_received(xprt);
} else { } else {
dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
rqstp, pool->sp_id, svsk, rqstp, pool->sp_id, xprt,
atomic_read(&svsk->sk_xprt.xpt_ref.refcount)); atomic_read(&xprt->xpt_ref.refcount));
rqstp->rq_deferred = svc_deferred_dequeue(&svsk->sk_xprt); rqstp->rq_deferred = svc_deferred_dequeue(xprt);
if (rqstp->rq_deferred) { if (rqstp->rq_deferred) {
svc_xprt_received(&svsk->sk_xprt); svc_xprt_received(xprt);
len = svc_deferred_recv(rqstp); len = svc_deferred_recv(rqstp);
} else } else
len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp); len = xprt->xpt_ops->xpo_recvfrom(rqstp);
dprintk("svc: got len=%d\n", len); dprintk("svc: got len=%d\n", len);
} }
@ -1594,7 +1595,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
svc_xprt_release(rqstp); svc_xprt_release(rqstp);
return -EAGAIN; return -EAGAIN;
} }
clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags); clear_bit(XPT_OLD, &xprt->xpt_flags);
rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
rqstp->rq_chandle.defer = svc_defer; rqstp->rq_chandle.defer = svc_defer;