From 1830f9798f11a18a001764948f37c81dd9a1db40 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Sep 14 2011 20:12:36 +0000 Subject: Add patch to fix deadlock in ppc64 icswx code (rhbz 737984) --- diff --git a/kernel.spec b/kernel.spec index dfdd339..044aaf0 100644 --- a/kernel.spec +++ b/kernel.spec @@ -734,6 +734,8 @@ Patch13010: ibmveth-Fix-DMA-unmap-error.patch Patch13011: ibmveth-Fix-issue-with-DMA-mapping-failure.patch Patch13012: ibmveth-Checksum-offload-is-always-disabled.patch +Patch13013: powerpc-Fix-deadlock-in-icswx-code.patch + Patch20000: utrace.patch # Flattened devicetree support @@ -1352,6 +1354,8 @@ ApplyPatch ibmveth-Fix-DMA-unmap-error.patch ApplyPatch ibmveth-Fix-issue-with-DMA-mapping-failure.patch ApplyPatch ibmveth-Checksum-offload-is-always-disabled.patch +ApplyPatch powerpc-Fix-deadlock-in-icswx-code.patch + # utrace. ApplyPatch utrace.patch @@ -2063,6 +2067,9 @@ fi # ||----w | # || || %changelog +* Wed Sep 14 2011 Josh Boyer +- Add patch to fix deadlock in ppc64 icswx (rhbz 737984) + * Wed Sep 14 2011 Neil Horman - Enable CONFIG_IP_VS_IPV6 (bz 738194) diff --git a/powerpc-Fix-deadlock-in-icswx-code.patch b/powerpc-Fix-deadlock-in-icswx-code.patch new file mode 100644 index 0000000..a2ce3cf --- /dev/null +++ b/powerpc-Fix-deadlock-in-icswx-code.patch @@ -0,0 +1,74 @@ +From patchwork Wed Sep 14 19:43:15 2011 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: powerpc: Fix deadlock in icswx code +Date: Wed, 14 Sep 2011 09:43:15 -0000 +From: Anton Blanchard +X-Patchwork-Id: 114701 +Message-Id: <20110915054315.5e5ae062@kryten> +To: benh@kernel.crashing.org, paulus@samba.org +Cc: linuxppc-dev@lists.ozlabs.org + +The icswx code introduced an A-B B-A deadlock: + + CPU0 CPU1 + ---- ---- +lock(&anon_vma->mutex); + lock(&mm->mmap_sem); + lock(&anon_vma->mutex); +lock(&mm->mmap_sem); + +Instead of using the mmap_sem to keep mm_users constant, take the +page table spinlock. + +Signed-off-by: Anton Blanchard +Cc: + +--- + + +diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c +index 3bafc3d..4ff587e 100644 +--- a/arch/powerpc/mm/mmu_context_hash64.c ++++ b/arch/powerpc/mm/mmu_context_hash64.c +@@ -136,8 +136,8 @@ int use_cop(unsigned long acop, struct mm_struct *mm) + if (!mm || !acop) + return -EINVAL; + +- /* We need to make sure mm_users doesn't change */ +- down_read(&mm->mmap_sem); ++ /* The page_table_lock ensures mm_users won't change under us */ ++ spin_lock(&mm->page_table_lock); + spin_lock(mm->context.cop_lockp); + + if (mm->context.cop_pid == COP_PID_NONE) { +@@ -164,7 +164,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm) + + out: + spin_unlock(mm->context.cop_lockp); +- up_read(&mm->mmap_sem); ++ spin_unlock(&mm->page_table_lock); + + return ret; + } +@@ -185,8 +185,8 @@ void drop_cop(unsigned long acop, struct mm_struct *mm) + if (WARN_ON_ONCE(!mm)) + return; + +- /* We need to make sure mm_users doesn't change */ +- down_read(&mm->mmap_sem); ++ /* The page_table_lock ensures mm_users won't change under us */ ++ spin_lock(&mm->page_table_lock); + spin_lock(mm->context.cop_lockp); + + mm->context.acop &= ~acop; +@@ -213,7 +213,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm) + } + + spin_unlock(mm->context.cop_lockp); +- up_read(&mm->mmap_sem); ++ spin_unlock(&mm->page_table_lock); + } + EXPORT_SYMBOL_GPL(drop_cop); +