diff --git a/0001-svcrpc-make-svc_age_temp_xprts-enqueue-under-sv_lock.patch b/0001-svcrpc-make-svc_age_temp_xprts-enqueue-under-sv_lock.patch new file mode 100644 index 0000000..ce80a9d --- /dev/null +++ b/0001-svcrpc-make-svc_age_temp_xprts-enqueue-under-sv_lock.patch @@ -0,0 +1,69 @@ +From e75bafbff2270993926abcc31358361db74a9bc2 Mon Sep 17 00:00:00 2001 +From: "J. Bruce Fields" +Date: Sun, 10 Feb 2013 11:33:48 -0500 +Subject: [PATCH 1/2] svcrpc: make svc_age_temp_xprts enqueue under sv_lock +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +svc_age_temp_xprts expires xprts in a two-step process: first it takes +the sv_lock and moves the xprts to expire off their server-wide list +(sv_tempsocks or sv_permsocks) to a local list. Then it drops the +sv_lock and enqueues and puts each one. + +I see no reason for this: svc_xprt_enqueue() will take sp_lock, but the +sv_lock and sp_lock are not otherwise nested anywhere (and documentation +at the top of this file claims it's correct to nest these with sp_lock +inside.) + +Cc: stable@kernel.org +Tested-by: Jason Tibbitts +Tested-by: Paweł Sikora +Signed-off-by: J. Bruce Fields +--- + net/sunrpc/svc_xprt.c | 15 ++------------- + 1 file changed, 2 insertions(+), 13 deletions(-) + +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c +index 5a9d40c..11a33c8 100644 +--- a/net/sunrpc/svc_xprt.c ++++ b/net/sunrpc/svc_xprt.c +@@ -863,7 +863,6 @@ static void svc_age_temp_xprts(unsigned long closure) + struct svc_serv *serv = (struct svc_serv *)closure; + struct svc_xprt *xprt; + struct list_head *le, *next; +- LIST_HEAD(to_be_aged); + + dprintk("svc_age_temp_xprts\n"); + +@@ -884,25 +883,15 @@ static void svc_age_temp_xprts(unsigned long closure) + if (atomic_read(&xprt->xpt_ref.refcount) > 1 || + test_bit(XPT_BUSY, &xprt->xpt_flags)) + continue; +- svc_xprt_get(xprt); +- list_move(le, &to_be_aged); ++ list_del_init(le); + set_bit(XPT_CLOSE, &xprt->xpt_flags); + set_bit(XPT_DETACHED, &xprt->xpt_flags); +- } +- spin_unlock_bh(&serv->sv_lock); +- +- while (!list_empty(&to_be_aged)) { +- le = to_be_aged.next; +- /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ +- list_del_init(le); +- xprt = list_entry(le, struct svc_xprt, xpt_list); +- + dprintk("queuing xprt %p for closing\n", xprt); + + /* a thread will dequeue and close it soon */ + svc_xprt_enqueue(xprt); +- svc_xprt_put(xprt); + } ++ spin_unlock_bh(&serv->sv_lock); + + mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); + } +-- +1.8.1.2 + diff --git a/0002-svcrpc-fix-rpc-server-shutdown-races.patch b/0002-svcrpc-fix-rpc-server-shutdown-races.patch new file mode 100644 index 0000000..25f0e37 --- /dev/null +++ b/0002-svcrpc-fix-rpc-server-shutdown-races.patch @@ -0,0 +1,154 @@ +From cc630d9f476445927fca599f81182c7f06f79058 Mon Sep 17 00:00:00 2001 +From: "J. Bruce Fields" +Date: Sun, 10 Feb 2013 16:08:11 -0500 +Subject: [PATCH 2/2] svcrpc: fix rpc server shutdown races +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Rewrite server shutdown to remove the assumption that there are no +longer any threads running (no longer true, for example, when shutting +down the service in one network namespace while it's still running in +others). + +Do that by doing what we'd do in normal circumstances: just CLOSE each +socket, then enqueue it. + +Since there may not be threads to handle the resulting queued xprts, +also run a simplified version of the svc_recv() loop run by a server to +clean up any closed xprts afterwards. + +Cc: stable@kernel.org +Tested-by: Jason Tibbitts +Tested-by: Paweł Sikora +Acked-by: Stanislav Kinsbursky +Signed-off-by: J. Bruce Fields +--- + net/sunrpc/svc.c | 9 -------- + net/sunrpc/svc_xprt.c | 57 +++++++++++++++++++++++++++++---------------------- + 2 files changed, 32 insertions(+), 34 deletions(-) + +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index b9ba2a8..89a588b 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled); + + void svc_shutdown_net(struct svc_serv *serv, struct net *net) + { +- /* +- * The set of xprts (contained in the sv_tempsocks and +- * sv_permsocks lists) is now constant, since it is modified +- * only by accepting new sockets (done by service threads in +- * svc_recv) or aging old ones (done by sv_temptimer), or +- * configuration changes (excluded by whatever locking the +- * caller is using--nfsd_mutex in the case of nfsd). So it's +- * safe to traverse those lists and shut everything down: +- */ + svc_close_net(serv, net); + + if (serv->sv_shutdown) +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c +index 11a33c8..80a6640 100644 +--- a/net/sunrpc/svc_xprt.c ++++ b/net/sunrpc/svc_xprt.c +@@ -955,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt) + } + EXPORT_SYMBOL_GPL(svc_close_xprt); + +-static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) ++static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) + { + struct svc_xprt *xprt; ++ int ret = 0; + + spin_lock(&serv->sv_lock); + list_for_each_entry(xprt, xprt_list, xpt_list) { + if (xprt->xpt_net != net) + continue; ++ ret++; + set_bit(XPT_CLOSE, &xprt->xpt_flags); +- set_bit(XPT_BUSY, &xprt->xpt_flags); ++ svc_xprt_enqueue(xprt); + } + spin_unlock(&serv->sv_lock); ++ return ret; + } + +-static void svc_clear_pools(struct svc_serv *serv, struct net *net) ++static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) + { + struct svc_pool *pool; + struct svc_xprt *xprt; +@@ -984,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net) + if (xprt->xpt_net != net) + continue; + list_del_init(&xprt->xpt_ready); ++ spin_unlock_bh(&pool->sp_lock); ++ return xprt; + } + spin_unlock_bh(&pool->sp_lock); + } ++ return NULL; + } + +-static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) ++static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) + { + struct svc_xprt *xprt; +- struct svc_xprt *tmp; +- LIST_HEAD(victims); +- +- spin_lock(&serv->sv_lock); +- list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { +- if (xprt->xpt_net != net) +- continue; +- list_move(&xprt->xpt_list, &victims); +- } +- spin_unlock(&serv->sv_lock); + +- list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) ++ while ((xprt = svc_dequeue_net(serv, net))) { ++ set_bit(XPT_CLOSE, &xprt->xpt_flags); + svc_delete_xprt(xprt); ++ } + } + ++/* ++ * Server threads may still be running (especially in the case where the ++ * service is still running in other network namespaces). ++ * ++ * So we shut down sockets the same way we would on a running server, by ++ * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do ++ * the close. In the case there are no such other threads, ++ * threads running, svc_clean_up_xprts() does a simple version of a ++ * server's main event loop, and in the case where there are other ++ * threads, we may need to wait a little while and then check again to ++ * see if they're done. ++ */ + void svc_close_net(struct svc_serv *serv, struct net *net) + { +- svc_close_list(serv, &serv->sv_tempsocks, net); +- svc_close_list(serv, &serv->sv_permsocks, net); ++ int delay = 0; + +- svc_clear_pools(serv, net); +- /* +- * At this point the sp_sockets lists will stay empty, since +- * svc_xprt_enqueue will not add new entries without taking the +- * sp_lock and checking XPT_BUSY. +- */ +- svc_clear_list(serv, &serv->sv_tempsocks, net); +- svc_clear_list(serv, &serv->sv_permsocks, net); ++ while (svc_close_list(serv, &serv->sv_permsocks, net) + ++ svc_close_list(serv, &serv->sv_tempsocks, net)) { ++ ++ svc_clean_up_xprts(serv, net); ++ msleep(delay++); ++ } + } + + /* +-- +1.8.1.2 + diff --git a/kernel.spec b/kernel.spec index 8be5db7..601b29d 100644 --- a/kernel.spec +++ b/kernel.spec @@ -62,7 +62,7 @@ Summary: The Linux kernel # For non-released -rc kernels, this will be appended after the rcX and # gitX tags, so a 3 here would become part of release "0.rcX.gitX.3" # -%global baserelease 201 +%global baserelease 202 %global fedora_build %{baserelease} # base_sublevel is the kernel version we're starting with and patching @@ -751,6 +751,10 @@ Patch22261: 0001-kmsg-Honor-dmesg_restrict-sysctl-on-dev-kmsg.patch #rhbz 914737 Patch22262: x86-mm-Fix-vmalloc_fault-oops-during-lazy-MMU-updates.patch +#rhbz 904870 +Patch22263: 0001-svcrpc-make-svc_age_temp_xprts-enqueue-under-sv_lock.patch +Patch22264: 0002-svcrpc-fix-rpc-server-shutdown-races.patch + #rhbz 812111 Patch24000: alps.patch @@ -1462,6 +1466,10 @@ ApplyPatch x86-mm-Fix-vmalloc_fault-oops-during-lazy-MMU-updates.patch ApplyPatch userns-avoid-recursion-in-put_user_ns.patch +#rhbz 904870 +ApplyPatch 0001-svcrpc-make-svc_age_temp_xprts-enqueue-under-sv_lock.patch +ApplyPatch 0002-svcrpc-fix-rpc-server-shutdown-races.patch + # END OF PATCH APPLICATIONS @@ -2320,6 +2328,9 @@ fi # ||----w | # || || %changelog +* Fri Mar 01 2013 Josh Boyer +- Add patches to fix sunrpc panic (rhbz 904870) + * Thu Feb 28 2013 Peter Robinson - Update ARM config for 3.8