aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2009-01-05 15:21:19 -0600
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-01-07 17:08:46 -0500
commit22945e4a1c7454c97f5d8aee1ef526c83fef3223 (patch)
treef082143420da55b97c98a1534336b0cf03412e0b /net
parent9a8d248e2d2e9c880ac4561f27fea5dc200655bd (diff)
downloadkernel_samsung_smdk4412-22945e4a1c7454c97f5d8aee1ef526c83fef3223.zip
kernel_samsung_smdk4412-22945e4a1c7454c97f5d8aee1ef526c83fef3223.tar.gz
kernel_samsung_smdk4412-22945e4a1c7454c97f5d8aee1ef526c83fef3223.tar.bz2
svc: Clean up deferred requests on transport destruction
A race between svc_revisit and svc_delete_xprt can result in deferred requests holding references on a transport that can never be recovered because dead transports are not enqueued for subsequent processing. Check for XPT_DEAD in revisit to clean up completing deferrals on a dead transport and sweep a transport's deferred queue to do the same for queued but unprocessed deferrals. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc_xprt.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 2961961..a78b879 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -850,6 +850,11 @@ static void svc_age_temp_xprts(unsigned long closure)
void svc_delete_xprt(struct svc_xprt *xprt)
{
struct svc_serv *serv = xprt->xpt_server;
+ struct svc_deferred_req *dr;
+
+ /* Only do this once */
+ if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
+ return;
dprintk("svc: svc_delete_xprt(%p)\n", xprt);
xprt->xpt_ops->xpo_detach(xprt);
@@ -864,12 +869,16 @@ void svc_delete_xprt(struct svc_xprt *xprt)
* while still attached to a queue, the queue itself
* is about to be destroyed (in svc_destroy).
*/
- if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) {
- BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2);
- if (test_bit(XPT_TEMP, &xprt->xpt_flags))
- serv->sv_tmpcnt--;
+ if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ serv->sv_tmpcnt--;
+
+ for (dr = svc_deferred_dequeue(xprt); dr;
+ dr = svc_deferred_dequeue(xprt)) {
svc_xprt_put(xprt);
+ kfree(dr);
}
+
+ svc_xprt_put(xprt);
spin_unlock_bh(&serv->sv_lock);
}
@@ -915,17 +924,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
container_of(dreq, struct svc_deferred_req, handle);
struct svc_xprt *xprt = dr->xprt;
- if (too_many) {
+ spin_lock(&xprt->xpt_lock);
+ set_bit(XPT_DEFERRED, &xprt->xpt_flags);
+ if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
+ spin_unlock(&xprt->xpt_lock);
+ dprintk("revisit canceled\n");
svc_xprt_put(xprt);
kfree(dr);
return;
}
dprintk("revisit queued\n");
dr->xprt = NULL;
- spin_lock(&xprt->xpt_lock);
list_add(&dr->handle.recent, &xprt->xpt_deferred);
spin_unlock(&xprt->xpt_lock);
- set_bit(XPT_DEFERRED, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
svc_xprt_put(xprt);
}