From 89ad8e0d7fce902acad848af1c5d55215db48789 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Aug 17 2010 10:38:49 +0000 Subject: Fix fallout from the stack guard page fixes. (mm-fix-page-table-unmap-for-stack-guard-page-properly.patch, mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch) --- diff --git a/kernel.spec b/kernel.spec index e90e10f..8319314 100644 --- a/kernel.spec +++ b/kernel.spec @@ -47,7 +47,7 @@ Summary: The Linux kernel # reset this by hand to 1 (or to 0 and then use rpmdev-bumpspec). # scripts/rebase.sh should be made to do that for you, actually. # -%global baserelease 161 +%global baserelease 162 %global fedora_build %{baserelease} # base_sublevel is the kernel version we're starting with and patching @@ -843,6 +843,8 @@ Patch14130: kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch Patch14140: hid-01-usbhid-initialize-interface-pointers-early-enough.patch Patch14141: hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch +Patch14150: mm-fix-page-table-unmap-for-stack-guard-page-properly.patch +Patch14150: mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch # ============================================================================== %endif @@ -1556,6 +1558,10 @@ ApplyPatch kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch ApplyPatch hid-01-usbhid-initialize-interface-pointers-early-enough.patch ApplyPatch hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch +# Fix fallout from stack guard page +ApplyPatch mm-fix-page-table-unmap-for-stack-guard-page-properly.patch +ApplyPatch mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch + # END OF PATCH APPLICATIONS ==================================================== %endif @@ -2206,6 +2212,11 @@ fi %kernel_variant_files -k vmlinux %{with_kdump} kdump %changelog +* Tue Aug 17 2010 Chuck Ebbert 2.6.32.19-162 +- Fix fallout from the stack guard page fixes. + (mm-fix-page-table-unmap-for-stack-guard-page-properly.patch, + mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch) + * Sat Aug 14 2010 Chuck Ebbert 2.6.32.19-161 - Linux 2.6.32.19 diff --git a/mm-fix-page-table-unmap-for-stack-guard-page-properly.patch b/mm-fix-page-table-unmap-for-stack-guard-page-properly.patch new file mode 100644 index 0000000..0989fe3 --- /dev/null +++ b/mm-fix-page-table-unmap-for-stack-guard-page-properly.patch @@ -0,0 +1,71 @@ +From: Linus Torvalds +Date: Sat, 14 Aug 2010 18:44:56 +0000 (-0700) +Subject: mm: fix page table unmap for stack guard page properly +X-Git-Tag: v2.6.36-rc1~20 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=11ac552477e32835cb6970bf0a70c210807f5673 + +mm: fix page table unmap for stack guard page properly + +We do in fact need to unmap the page table _before_ doing the whole +stack guard page logic, because if it is needed (mainly 32-bit x86 with +PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it +will do a kmap_atomic/kunmap_atomic. + +And those kmaps will create an atomic region that we cannot do +allocations in. However, the whole stack expand code will need to do +anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an +atomic region. + +Now, a better model might actually be to do the anon_vma_prepare() when +_creating_ a VM_GROWSDOWN segment, and not have to worry about any of +this at page fault time. But in the meantime, this is the +straightforward fix for the issue. + +See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details. + +Reported-by: Wylda +Reported-by: Sedat Dilek +Reported-by: Mike Pagano +Reported-by: François Valenduc +Tested-by: Ed Tomlinson +Cc: Pekka Enberg +Cc: Greg KH +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +--- + +diff --git a/mm/memory.c b/mm/memory.c +index 9b3b73f..b6e5fd2 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + spinlock_t *ptl; + pte_t entry; + +- if (check_stack_guard_page(vma, address) < 0) { +- pte_unmap(page_table); ++ pte_unmap(page_table); ++ ++ /* Check if we need to add a guard page to the stack */ ++ if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGBUS; +- } + ++ /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- ptl = pte_lockptr(mm, pmd); +- spin_lock(ptl); ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ +- pte_unmap(page_table); +- + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); diff --git a/mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch b/mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch new file mode 100644 index 0000000..095a933 --- /dev/null +++ b/mm-fix-up-some-user-visible-effects-of-the-stack-guard-page.patch @@ -0,0 +1,85 @@ +From: Linus Torvalds +Date: Sun, 15 Aug 2010 18:35:52 +0000 (-0700) +Subject: mm: fix up some user-visible effects of the stack guard page +X-Git-Tag: v2.6.36-rc1~5 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=d7824370e26325c881b665350ce64fb0a4fde24a + +mm: fix up some user-visible effects of the stack guard page + +This commit makes the stack guard page somewhat less visible to user +space. It does this by: + + - not showing the guard page in /proc//maps + + It looks like lvm-tools will actually read /proc/self/maps to figure + out where all its mappings are, and effectively do a specialized + "mlockall()" in user space. By not showing the guard page as part of + the mapping (by just adding PAGE_SIZE to the start for grows-up + pages), lvm-tools ends up not being aware of it. + + - by also teaching the _real_ mlock() functionality not to try to lock + the guard page. + + That would just expand the mapping down to create a new guard page, + so there really is no point in trying to lock it in place. + +It would perhaps be nice to show the guard page specially in +/proc//maps (or at least mark grow-down segments some way), but +let's not open ourselves up to more breakage by user space from programs +that depends on the exact deails of the 'maps' file. + +Special thanks to Henrique de Moraes Holschuh for diving into lvm-tools +source code to see what was going on with the whole new warning. + +Reported-and-tested-by: François Valenduc +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +--- + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index aea1d3f..439fc1f 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -210,6 +210,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + int flags = vma->vm_flags; + unsigned long ino = 0; + unsigned long long pgoff = 0; ++ unsigned long start; + dev_t dev = 0; + int len; + +@@ -220,8 +221,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + ++ /* We don't show the stack guard page in /proc/maps */ ++ start = vma->vm_start; ++ if (vma->vm_flags & VM_GROWSDOWN) ++ start += PAGE_SIZE; ++ + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", +- vma->vm_start, ++ start, + vma->vm_end, + flags & VM_READ ? 'r' : '-', + flags & VM_WRITE ? 'w' : '-', +diff --git a/mm/mlock.c b/mm/mlock.c +index 3f82720..49e5e4c 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -167,6 +167,14 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, + if (vma->vm_flags & VM_WRITE) + gup_flags |= FOLL_WRITE; + ++ /* We don't try to access the guard page of a stack vma */ ++ if (vma->vm_flags & VM_GROWSDOWN) { ++ if (start == vma->vm_start) { ++ start += PAGE_SIZE; ++ nr_pages--; ++ } ++ } ++ + while (nr_pages > 0) { + int i; +