diff options
-rw-r--r-- | 2.6.32/0000_README | 2 | ||||
-rw-r--r-- | 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201307050015.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201306302051.patch) | 754 | ||||
-rw-r--r-- | 3.2.48/0000_README | 2 | ||||
-rw-r--r-- | 3.2.48/4420_grsecurity-2.9.1-3.2.48-201307050016.patch (renamed from 3.2.48/4420_grsecurity-2.9.1-3.2.48-201306302051.patch) | 756 | ||||
-rw-r--r-- | 3.9.9/0000_README (renamed from 3.9.8/0000_README) | 2 | ||||
-rw-r--r-- | 3.9.9/4420_grsecurity-2.9.1-3.9.9-201307050017.patch (renamed from 3.9.8/4420_grsecurity-2.9.1-3.9.8-201306302052.patch) | 706 | ||||
-rw-r--r-- | 3.9.9/4425_grsec_remove_EI_PAX.patch (renamed from 3.9.8/4425_grsec_remove_EI_PAX.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.9.8/4427_force_XATTR_PAX_tmpfs.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4430_grsec-remove-localversion-grsec.patch (renamed from 3.9.8/4430_grsec-remove-localversion-grsec.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4435_grsec-mute-warnings.patch (renamed from 3.9.8/4435_grsec-mute-warnings.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4440_grsec-remove-protected-paths.patch (renamed from 3.9.8/4440_grsec-remove-protected-paths.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4450_grsec-kconfig-default-gids.patch (renamed from 3.9.8/4450_grsec-kconfig-default-gids.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.9.8/4465_selinux-avc_audit-log-curr_ip.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4470_disable-compat_vdso.patch (renamed from 3.9.8/4470_disable-compat_vdso.patch) | 0 | ||||
-rw-r--r-- | 3.9.9/4475_emutramp_default_on.patch (renamed from 3.9.8/4475_emutramp_default_on.patch) | 0 |
15 files changed, 1210 insertions, 1012 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README index 7480e7a..51f219a 100644 --- a/2.6.32/0000_README +++ b/2.6.32/0000_README @@ -38,7 +38,7 @@ Patch: 1060_linux-2.6.32.61.patch From: http://www.kernel.org Desc: Linux 2.6.32.61 -Patch: 4420_grsecurity-2.9.1-2.6.32.61-201306302051.patch +Patch: 4420_grsecurity-2.9.1-2.6.32.61-201307050015.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201306302051.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201307050015.patch index d3ad7ec..9f43159 100644 --- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201306302051.patch +++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201307050015.patch @@ -628,7 +628,7 @@ index ebc3c89..20cfa63 100644 for (i = 0; i < n; i++) { diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c -index a94e49c..ad84d0e 100644 +index a94e49c..491c166 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1163,16 +1163,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) @@ -648,7 +648,7 @@ index a94e49c..ad84d0e 100644 if (limit - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) return addr; addr = vma->vm_end; vma = vma->vm_next; @@ -1832,7 +1832,7 @@ index 3191cd6..68bd2d7 100644 return; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c -index f5abc51..5f5262a 100644 +index f5abc51..aac904f 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -30,6 +30,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, @@ -1860,7 +1860,7 @@ index f5abc51..5f5262a 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (len > mm->cached_hole_size) { @@ -1888,7 +1888,7 @@ index f5abc51..5f5262a 100644 return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* * Remember the place where we stopped the search: */ @@ -2151,7 +2151,7 @@ index f8e16b2..c73ff79 100644 }; diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c -index 385fd30..3aaf4fe 100644 +index 385fd30..27cf8ba 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi @@ -2168,7 +2168,7 @@ index 385fd30..3aaf4fe 100644 vma = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) goto success; } @@ -2177,7 +2177,7 @@ index 385fd30..3aaf4fe 100644 if (addr > limit) break; - if (addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) goto success; addr = vma->vm_end; } @@ -2186,7 +2186,7 @@ index 385fd30..3aaf4fe 100644 if (addr > limit) break; - if (addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) goto success; addr = vma->vm_end; } @@ -2761,7 +2761,7 @@ index f178270..2dcff27 100644 vma_pages(vma)); up_write(&task->mm->mmap_sem); diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c -index 609d500..acd0429 100644 +index 609d500..254a3d7 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len @@ -2800,7 +2800,7 @@ index 609d500..acd0429 100644 return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* Remember the address where we stopped this search: */ mm->free_area_cache = addr + len; return addr; @@ -2884,7 +2884,7 @@ index 19261a9..1611b7a 100644 /* * If for any reason at all we couldn't handle the fault, make diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c -index b0f6157..f83c84f 100644 +index b0f6157..0a3ade5 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -150,6 +150,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u @@ -2900,7 +2900,7 @@ index b0f6157..f83c84f 100644 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len, offset)) ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) return addr; addr = ALIGN(vmm->vm_end, HPAGE_SIZE); } @@ -3736,7 +3736,7 @@ index cb71f3d..306f0c0 100644 #ifdef CONFIG_BLK_DEV_INITRD if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index 9147391..d09f456 100644 +index 9147391..1c99b82 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -33,9 +33,11 @@ @@ -3757,7 +3757,7 @@ index 9147391..d09f456 100644 if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) return addr; addr = vma->vm_end; } @@ -3781,7 +3781,7 @@ index 9147391..d09f456 100644 if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, rand_offset)) ++ if (check_heap_stack_gap(vma, &addr, len, rand_offset)) return addr; addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; if (addr < vma->vm_end) /* handle wraparound */ @@ -5283,7 +5283,7 @@ index 0d957a4..eae383e 100644 mm->unmap_area = arch_unmap_area_topdown; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c -index ba51948..0e45275 100644 +index ba51948..9190915 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, @@ -5291,7 +5291,7 @@ index ba51948..0e45275 100644 return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); -+ return check_heap_stack_gap(vma, addr, len, 0); ++ return check_heap_stack_gap(vma, &addr, len, 0); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) @@ -5300,7 +5300,7 @@ index ba51948..0e45275 100644 continue; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, 0)) { ++ if (check_heap_stack_gap(vma, &addr, len, 0)) { /* * Remember the place where we stopped the search: */ @@ -5327,7 +5327,7 @@ index ba51948..0e45275 100644 */ vma = find_vma(mm, addr); - if (!vma || (addr + len) <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, 0)) { ++ if (check_heap_stack_gap(vma, &addr, len, 0)) { /* remember the address as a hint for next time */ if (use_cache) mm->free_area_cache = addr; @@ -5894,7 +5894,7 @@ index 3e532d0..9faa306 100644 #ifdef CONFIG_CPU_LITTLE_ENDIAN .gdb_bpt_instr = { 0x3c, 0xc3 }, diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c -index afeb710..e8366ef 100644 +index afeb710..8da5c79 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, @@ -5911,7 +5911,7 @@ index afeb710..e8366ef 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } @@ -5920,7 +5920,7 @@ index afeb710..e8366ef 100644 return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* * Remember the place where we stopped the search: */ @@ -5938,20 +5938,25 @@ index afeb710..e8366ef 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } -@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -178,28 +178,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) { ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + if (unlikely(mm->mmap_base < len)) goto bottomup; @@ -5970,11 +5975,11 @@ index afeb710..e8366ef 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } -@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -209,10 +210,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -6899,7 +6904,7 @@ index 6edc4e5..06a69b4 100644 #include <asm/sigcontext.h> #include <asm/fpumacro.h> diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c -index 3a82e65..ad9761e 100644 +index 3a82e65..c066b9b 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -40,6 +40,7 @@ asmlinkage unsigned long sys_getpagesize(void) @@ -6924,12 +6929,12 @@ index 3a82e65..ad9761e 100644 if (TASK_SIZE - PAGE_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len, offset)) ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c -index cfa0e19..23de658 100644 +index cfa0e19..fd81c1b 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -120,12 +120,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi @@ -6964,7 +6969,7 @@ index cfa0e19..23de658 100644 vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } @@ -6991,7 +6996,7 @@ index cfa0e19..23de658 100644 return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* * Remember the place where we stopped the search: */ @@ -7018,20 +7023,25 @@ index cfa0e19..23de658 100644 vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } -@@ -259,7 +263,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -258,28 +262,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) { ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -268,18 +272,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + if (unlikely(mm->mmap_base < len)) goto bottomup; @@ -7050,11 +7060,11 @@ index cfa0e19..23de658 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } -@@ -289,10 +293,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -289,10 +294,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -7067,7 +7077,7 @@ index cfa0e19..23de658 100644 bottomup: /* -@@ -366,6 +368,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -366,6 +369,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; @@ -7078,7 +7088,7 @@ index cfa0e19..23de658 100644 if (current->flags & PF_RANDOMIZE) { random_factor = get_random_int(); if (test_thread_flag(TIF_32BIT)) -@@ -384,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -384,6 +391,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; @@ -7091,7 +7101,7 @@ index cfa0e19..23de658 100644 mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { -@@ -398,6 +410,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -398,6 +411,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); @@ -8648,7 +8658,7 @@ index 43b0da9..f9f9985 100644 * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c -index f27d103..d4fd7ba 100644 +index f27d103..7688136 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -36,6 +36,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, @@ -8664,7 +8674,7 @@ index f27d103..d4fd7ba 100644 return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* * Remember the place where we stopped the search: */ @@ -8676,16 +8686,21 @@ index f27d103..d4fd7ba 100644 /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); -@@ -108,7 +110,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -107,26 +109,28 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) { ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -117,16 +119,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + if (unlikely(mm->mmap_base < len)) goto bottomup; @@ -8701,11 +8716,11 @@ index f27d103..d4fd7ba 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } -@@ -136,8 +139,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -136,8 +140,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -8716,7 +8731,7 @@ index f27d103..d4fd7ba 100644 bottomup: /* -@@ -165,6 +168,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, +@@ -165,6 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; @@ -8724,13 +8739,13 @@ index f27d103..d4fd7ba 100644 if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; -@@ -183,8 +187,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, +@@ -183,8 +188,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) @@ -19995,7 +20010,7 @@ index 34c3308..162120a 100644 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0 + .endr diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S -index 780cd92..758b2a6 100644 +index 780cd92..0e071b9 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -19,6 +19,8 @@ @@ -20020,7 +20035,7 @@ index 780cd92..758b2a6 100644 .text __HEAD -@@ -85,35 +93,23 @@ startup_64: +@@ -85,35 +93,22 @@ startup_64: */ addq %rbp, init_level4_pgt + 0(%rip) addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) @@ -20037,12 +20052,11 @@ index 780cd92..758b2a6 100644 - addq %rbp, level3_kernel_pgt + (510*8)(%rip) - addq %rbp, level3_kernel_pgt + (511*8)(%rip) + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) -+ + +- addq %rbp, level2_fixmap_pgt + (506*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) - addq %rbp, level2_fixmap_pgt + (506*8)(%rip) -- - /* Add an Identity mapping if I am above 1G */ - leaq _text(%rip), %rdi - andq $PMD_PAGE_MASK, %rdi @@ -20067,7 +20081,7 @@ index 780cd92..758b2a6 100644 /* * Fixup the kernel text+data virtual addresses. Note that -@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64) +@@ -161,8 +156,8 @@ ENTRY(secondary_startup_64) * after the boot processor executes this code. */ @@ -20078,7 +20092,7 @@ index 780cd92..758b2a6 100644 movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ -@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64) +@@ -184,9 +179,16 @@ ENTRY(secondary_startup_64) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ @@ -20096,7 +20110,7 @@ index 780cd92..758b2a6 100644 1: wrmsr /* Make changes effective */ /* Setup cr0 */ -@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64) +@@ -249,6 +251,7 @@ ENTRY(secondary_startup_64) * jump. In addition we need to ensure %cs is set so we make this * a far return. */ @@ -20104,7 +20118,7 @@ index 780cd92..758b2a6 100644 movq initial_code(%rip),%rax pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs -@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64) +@@ -262,16 +265,16 @@ ENTRY(secondary_startup_64) .quad x86_64_start_kernel ENTRY(initial_gs) .quad INIT_PER_CPU_VAR(irq_stack_union) @@ -20123,7 +20137,7 @@ index 780cd92..758b2a6 100644 #ifdef CONFIG_EARLY_PRINTK .globl early_idt_handlers early_idt_handlers: -@@ -316,18 +320,23 @@ ENTRY(early_idt_handler) +@@ -316,18 +319,23 @@ ENTRY(early_idt_handler) #endif /* EARLY_PRINTK */ 1: hlt jmp 1b @@ -20148,7 +20162,7 @@ index 780cd92..758b2a6 100644 #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ ENTRY(name) -@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt) +@@ -350,13 +358,41 @@ NEXT_PAGE(init_level4_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE @@ -20190,7 +20204,7 @@ index 780cd92..758b2a6 100644 NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 -@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt) +@@ -364,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt) .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE @@ -20222,7 +20236,7 @@ index 780cd92..758b2a6 100644 NEXT_PAGE(level2_kernel_pgt) /* -@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt) +@@ -390,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt) * If you want to increase this then increase MODULES_VADDR * too.) */ @@ -22252,7 +22266,7 @@ index 3149032..14f1053 100644 return 0; /* 64-bit mode: REX prefix */ diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c -index dee1ff7..585a36b 100644 +index dee1ff7..59c4a25 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c @@ -24,6 +24,22 @@ @@ -22278,7 +22292,7 @@ index dee1ff7..585a36b 100644 /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux/i386 didn't use to be able to handle more than -@@ -58,6 +74,214 @@ out: +@@ -58,6 +74,215 @@ out: return err; } @@ -22312,7 +22326,7 @@ index dee1ff7..585a36b 100644 + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } + } @@ -22354,7 +22368,7 @@ index dee1ff7..585a36b 100644 + } + return -ENOMEM; + } -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; @@ -22411,7 +22425,7 @@ index dee1ff7..585a36b 100644 + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } + } @@ -22427,10 +22441,11 @@ index dee1ff7..585a36b 100644 + + /* make sure it can fit in the remaining address space */ + if (addr > len) { -+ vma = find_vma(mm, addr-len); -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + /* remember the address as a hint for next time */ -+ return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); + } + + if (mm->mmap_base < len) @@ -22445,7 +22460,7 @@ index dee1ff7..585a36b 100644 + * return with success: + */ + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + @@ -22493,7 +22508,7 @@ index dee1ff7..585a36b 100644 struct sel_arg_struct { unsigned long n; -@@ -93,7 +317,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, +@@ -93,7 +318,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL); case SEMTIMEDOP: return sys_semtimedop(first, (struct sembuf __user *)ptr, second, @@ -22502,7 +22517,7 @@ index dee1ff7..585a36b 100644 case SEMGET: return sys_semget(first, second, third); -@@ -140,7 +364,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, +@@ -140,7 +365,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, ret = do_shmat(first, (char __user *) ptr, second, &raddr); if (ret) return ret; @@ -22511,7 +22526,7 @@ index dee1ff7..585a36b 100644 } case 1: /* iBCS2 emulator entry point */ if (!segment_eq(get_fs(), get_ds())) -@@ -207,17 +431,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name) +@@ -207,17 +432,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name) return error; } @@ -22530,7 +22545,7 @@ index dee1ff7..585a36b 100644 - return __res; -} diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index 8aa2057..4db7318 100644 +index 8aa2057..489c52d 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -32,8 +32,8 @@ out: @@ -22577,7 +22592,7 @@ index 8aa2057..4db7318 100644 vma = find_vma(mm, addr); - if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (end - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) @@ -22586,7 +22601,7 @@ index 8aa2057..4db7318 100644 return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* * Remember the place where we stopped the search: */ @@ -22617,31 +22632,37 @@ index 8aa2057..4db7318 100644 - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } } /* check if free_area_cache is useful for us */ -@@ -162,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -161,10 +171,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (addr > len) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ - return mm->free_area_cache = addr-len; +- return mm->free_area_cache = addr-len; ++ return mm->free_area_cache = addr; } -@@ -179,7 +189,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + if (mm->mmap_base < len) +@@ -179,7 +190,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ return mm->free_area_cache = addr; -@@ -188,8 +198,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -188,8 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -22652,7 +22673,7 @@ index 8aa2057..4db7318 100644 bottomup: /* -@@ -198,13 +208,21 @@ bottomup: +@@ -198,13 +209,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -27538,7 +27559,7 @@ index 63a6ba6..79abd7a 100644 return (void *)vaddr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c -index f46c3407..4984c26 100644 +index f46c3407..e840ce6 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -267,13 +267,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, @@ -27590,7 +27611,7 @@ index f46c3407..4984c26 100644 - mm->free_area_cache = addr + len; - return addr; - } -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + break; if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; @@ -27646,7 +27667,7 @@ index f46c3407..4984c26 100644 */ - if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - return (mm->free_area_cache = addr); @@ -27755,12 +27776,12 @@ index f46c3407..4984c26 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index 73ffd55..e88dff5 100644 +index 73ffd55..628bc4e 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -1,6 +1,7 @@ @@ -27801,7 +27822,22 @@ index 73ffd55..e88dff5 100644 printk(KERN_INFO "NX (Execute Disable) protection: active\n"); /* Enable PSE if available */ -@@ -329,10 +327,35 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, +@@ -283,7 +281,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + + #ifdef CONFIG_X86_32 + early_ioremap_page_table_range_init(); ++#endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ load_cr3(get_cpu_pgd(0)); ++#elif defined(CONFIG_X86_32) + load_cr3(swapper_pg_dir); + #endif + +@@ -329,10 +334,35 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ @@ -27837,7 +27873,7 @@ index 73ffd55..e88dff5 100644 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) -@@ -377,8 +400,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) +@@ -377,8 +407,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) #endif } @@ -27956,7 +27992,7 @@ index 73ffd55..e88dff5 100644 (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 30938c1..bda3d5d 100644 +index 30938c1..60601c8 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void) @@ -28202,20 +28238,7 @@ index 30938c1..bda3d5d 100644 PAGE_SIZE); if (bootmap == -1L) panic("Cannot find bootmem map of size %ld\n", bootmap_size); -@@ -864,6 +863,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - #ifdef CONFIG_FLATMEM - BUG_ON(!mem_map); - #endif -@@ -881,7 +886,7 @@ void __init mem_init(void) +@@ -881,7 +880,7 @@ void __init mem_init(void) set_highmem_pages_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; @@ -28224,7 +28247,7 @@ index 30938c1..bda3d5d 100644 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " -@@ -923,10 +928,10 @@ void __init mem_init(void) +@@ -923,10 +922,10 @@ void __init mem_init(void) ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, @@ -28238,7 +28261,7 @@ index 30938c1..bda3d5d 100644 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* -@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void) +@@ -1007,6 +1006,7 @@ void set_kernel_text_rw(void) if (!kernel_set_to_readonly) return; @@ -28246,7 +28269,7 @@ index 30938c1..bda3d5d 100644 pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); -@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void) +@@ -1021,6 +1021,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; @@ -28254,7 +28277,7 @@ index 30938c1..bda3d5d 100644 pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); -@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void) +@@ -1032,6 +1033,7 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; @@ -28263,7 +28286,7 @@ index 30938c1..bda3d5d 100644 printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index ccbc61b..704b879 100644 +index ccbc61b..751fd75 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -123,7 +123,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) @@ -28329,20 +28352,7 @@ index ccbc61b..704b879 100644 spin_unlock(&init_mm.page_table_lock); } __flush_tlb_all(); -@@ -675,6 +675,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - /* clear_bss() already clear the empty_zero_page */ - - reservedpages = 0; -@@ -864,8 +870,8 @@ int kern_addr_valid(unsigned long addr) +@@ -864,8 +864,8 @@ int kern_addr_valid(unsigned long addr) static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), @@ -28353,7 +28363,7 @@ index ccbc61b..704b879 100644 }; struct vm_area_struct *get_gate_vma(struct task_struct *tsk) -@@ -899,7 +905,7 @@ int in_gate_area_no_task(unsigned long addr) +@@ -899,7 +899,7 @@ int in_gate_area_no_task(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { @@ -77538,10 +77548,20 @@ index a5bf577..6d19845 100644 return hit; } diff --git a/fs/compat.c b/fs/compat.c -index 46b93d1..191dbaa 100644 +index 46b93d1..06d6d11 100644 --- a/fs/compat.c +++ b/fs/compat.c -@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _ +@@ -71,6 +71,9 @@ int compat_printk(const char *fmt, ...) + return ret; + } + ++extern int gr_process_kernel_exec_ban(void); ++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm); ++ + #include "read_write.h" + + /* +@@ -133,8 +136,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) { compat_ino_t ino = stat->ino; @@ -77552,7 +77572,7 @@ index 46b93d1..191dbaa 100644 int err; SET_UID(uid, stat->uid); -@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p) +@@ -533,7 +536,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p) set_fs(KERNEL_DS); /* The __user pointer cast is valid because of the set_fs() */ @@ -77561,7 +77581,7 @@ index 46b93d1..191dbaa 100644 set_fs(oldfs); /* truncating is ok because it's a user address */ if (!ret) -@@ -830,6 +830,7 @@ struct compat_old_linux_dirent { +@@ -830,6 +833,7 @@ struct compat_old_linux_dirent { struct compat_readdir_callback { struct compat_old_linux_dirent __user *dirent; @@ -77569,7 +77589,7 @@ index 46b93d1..191dbaa 100644 int result; }; -@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen, +@@ -847,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen, buf->result = -EOVERFLOW; return -EOVERFLOW; } @@ -77580,7 +77600,7 @@ index 46b93d1..191dbaa 100644 buf->result++; dirent = buf->dirent; if (!access_ok(VERIFY_WRITE, dirent, -@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, +@@ -879,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, buf.result = 0; buf.dirent = dirent; @@ -77588,7 +77608,7 @@ index 46b93d1..191dbaa 100644 error = vfs_readdir(file, compat_fillonedir, &buf); if (buf.result) -@@ -899,6 +905,7 @@ struct compat_linux_dirent { +@@ -899,6 +908,7 @@ struct compat_linux_dirent { struct compat_getdents_callback { struct compat_linux_dirent __user *current_dir; struct compat_linux_dirent __user *previous; @@ -77596,7 +77616,7 @@ index 46b93d1..191dbaa 100644 int count; int error; }; -@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen, +@@ -919,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen, buf->error = -EOVERFLOW; return -EOVERFLOW; } @@ -77607,7 +77627,7 @@ index 46b93d1..191dbaa 100644 dirent = buf->previous; if (dirent) { if (__put_user(offset, &dirent->d_off)) -@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, +@@ -966,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, buf.previous = NULL; buf.count = count; buf.error = 0; @@ -77615,7 +77635,7 @@ index 46b93d1..191dbaa 100644 error = vfs_readdir(file, compat_filldir, &buf); if (error >= 0) -@@ -987,6 +999,7 @@ out: +@@ -987,6 +1002,7 @@ out: struct compat_getdents_callback64 { struct linux_dirent64 __user *current_dir; struct linux_dirent64 __user *previous; @@ -77623,7 +77643,7 @@ index 46b93d1..191dbaa 100644 int count; int error; }; -@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t +@@ -1003,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; @@ -77634,7 +77654,7 @@ index 46b93d1..191dbaa 100644 dirent = buf->previous; if (dirent) { -@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, +@@ -1054,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, buf.previous = NULL; buf.count = count; buf.error = 0; @@ -77650,7 +77670,7 @@ index 46b93d1..191dbaa 100644 if (__put_user_unaligned(d_off, &lastdirent->d_off)) error = -EFAULT; else -@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, +@@ -1098,7 +1119,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, * verify all the pointers */ ret = -EINVAL; @@ -77659,7 +77679,7 @@ index 46b93d1..191dbaa 100644 goto out; if (!file->f_op) goto out; -@@ -1460,6 +1478,9 @@ out: +@@ -1460,6 +1481,9 @@ out: return ret; } @@ -77669,7 +77689,7 @@ index 46b93d1..191dbaa 100644 /* * compat_do_execve() is mostly a copy of do_execve(), with the exception * that it processes 32 bit argv and envp pointers. -@@ -1469,11 +1490,35 @@ int compat_do_execve(char * filename, +@@ -1469,11 +1493,35 @@ int compat_do_execve(char * filename, compat_uptr_t __user *envp, struct pt_regs * regs) { @@ -77705,7 +77725,7 @@ index 46b93d1..191dbaa 100644 retval = unshare_files(&displaced); if (retval) -@@ -1499,12 +1544,26 @@ int compat_do_execve(char * filename, +@@ -1499,12 +1547,21 @@ int compat_do_execve(char * filename, if (IS_ERR(file)) goto out_unmark; @@ -77720,11 +77740,6 @@ index 46b93d1..191dbaa 100644 bprm->filename = filename; bprm->interp = filename; -+ if (gr_process_user_ban()) { -+ retval = -EPERM; -+ goto out_file; -+ } -+ + retval = -EACCES; + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) + goto out_file; @@ -77732,7 +77747,7 @@ index 46b93d1..191dbaa 100644 retval = bprm_mm_init(bprm); if (retval) goto out_file; -@@ -1521,24 +1580,63 @@ int compat_do_execve(char * filename, +@@ -1521,24 +1578,68 @@ int compat_do_execve(char * filename, if (retval < 0) goto out; @@ -77751,6 +77766,11 @@ index 46b93d1..191dbaa 100644 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; +#endif + ++ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) { ++ retval = -EPERM; ++ goto out_fail; ++ } ++ + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out_fail; @@ -77800,7 +77820,7 @@ index 46b93d1..191dbaa 100644 current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); -@@ -1547,6 +1645,14 @@ int compat_do_execve(char * filename, +@@ -1547,6 +1648,14 @@ int compat_do_execve(char * filename, put_files_struct(displaced); return retval; @@ -77815,7 +77835,7 @@ index 46b93d1..191dbaa 100644 out: if (bprm->mm) { acct_arg_size(bprm, 0); -@@ -1717,6 +1823,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, +@@ -1717,6 +1826,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, struct fdtable *fdt; long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; @@ -77824,7 +77844,7 @@ index 46b93d1..191dbaa 100644 if (n < 0) goto out_nofds; -@@ -2157,7 +2265,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd, +@@ -2157,7 +2268,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd, oldfs = get_fs(); set_fs(KERNEL_DS); /* The __user pointer casts are valid because of the set_fs() */ @@ -78102,7 +78122,7 @@ index 83fbd64..8353dce 100644 out_free_fd: diff --git a/fs/exec.c b/fs/exec.c -index feb2435..04123c5 100644 +index feb2435..0d2ad2b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -56,12 +56,34 @@ @@ -78140,7 +78160,17 @@ index feb2435..04123c5 100644 int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; unsigned int core_pipe_limit; -@@ -178,18 +200,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +@@ -72,6 +94,9 @@ int suid_dumpable = 0; + static LIST_HEAD(formats); + static DEFINE_RWLOCK(binfmt_lock); + ++extern int gr_process_kernel_exec_ban(void); ++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm); ++ + int __register_binfmt(struct linux_binfmt * fmt, int insert) + { + if (!fmt) +@@ -178,18 +203,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; @@ -78162,7 +78192,7 @@ index feb2435..04123c5 100644 return NULL; if (write) { -@@ -205,6 +219,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +@@ -205,6 +222,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (size <= ARG_MAX) return page; @@ -78180,7 +78210,7 @@ index feb2435..04123c5 100644 /* * Limit to 1/4-th the stack size for the argv+env strings. * This ensures that: -@@ -263,6 +288,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -263,6 +291,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS; @@ -78192,7 +78222,7 @@ index feb2435..04123c5 100644 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); -@@ -276,6 +306,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -276,6 +309,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) mm->stack_vm = mm->total_vm = 1; up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); @@ -78205,7 +78235,7 @@ index feb2435..04123c5 100644 return 0; err: up_write(&mm->mmap_sem); -@@ -400,8 +436,9 @@ static int count(char __user * __user * argv, int max) +@@ -400,8 +439,9 @@ static int count(char __user * __user * argv, int max) if (!p) break; argv++; @@ -78216,7 +78246,7 @@ index feb2435..04123c5 100644 if (fatal_signal_pending(current)) return -ERESTARTNOHAND; -@@ -510,7 +547,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm) +@@ -510,7 +550,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm) int r; mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); @@ -78225,7 +78255,7 @@ index feb2435..04123c5 100644 set_fs(oldfs); return r; } -@@ -540,7 +577,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -540,7 +580,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) unsigned long new_end = old_end - shift; struct mmu_gather *tlb; @@ -78235,7 +78265,7 @@ index feb2435..04123c5 100644 /* * ensure there are no vmas between where we want to go -@@ -549,6 +587,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -549,6 +590,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) if (vma != find_vma(mm, new_start)) return -EFAULT; @@ -78246,7 +78276,7 @@ index feb2435..04123c5 100644 /* * cover the whole range: [new_start, old_end) */ -@@ -630,10 +672,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -630,10 +675,6 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); @@ -78257,7 +78287,7 @@ index feb2435..04123c5 100644 stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; -@@ -645,6 +683,14 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -645,6 +686,14 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->exec -= stack_shift; down_write(&mm->mmap_sem); @@ -78272,7 +78302,7 @@ index feb2435..04123c5 100644 vm_flags = VM_STACK_FLAGS; /* -@@ -658,19 +704,24 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -658,19 +707,24 @@ int setup_arg_pages(struct linux_binprm *bprm, vm_flags &= ~VM_EXEC; vm_flags |= mm->def_flags; @@ -78304,7 +78334,7 @@ index feb2435..04123c5 100644 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; stack_size = vma->vm_end - vma->vm_start; /* -@@ -690,6 +741,27 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -690,6 +744,27 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_base = vma->vm_start - stack_expand; #endif ret = expand_stack(vma, stack_base); @@ -78332,7 +78362,7 @@ index feb2435..04123c5 100644 if (ret) ret = -EFAULT; -@@ -721,6 +793,8 @@ struct file *open_exec(const char *name) +@@ -721,6 +796,8 @@ struct file *open_exec(const char *name) fsnotify_open(file->f_path.dentry); @@ -78341,7 +78371,7 @@ index feb2435..04123c5 100644 err = deny_write_access(file); if (err) goto exit; -@@ -744,7 +818,7 @@ int kernel_read(struct file *file, loff_t offset, +@@ -744,7 +821,7 @@ int kernel_read(struct file *file, loff_t offset, old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ @@ -78350,7 +78380,7 @@ index feb2435..04123c5 100644 set_fs(old_fs); return result; } -@@ -985,6 +1059,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) +@@ -985,6 +1062,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) perf_event_comm(tsk); } @@ -78372,7 +78402,7 @@ index feb2435..04123c5 100644 int flush_old_exec(struct linux_binprm * bprm) { int retval; -@@ -999,6 +1088,7 @@ int flush_old_exec(struct linux_binprm * bprm) +@@ -999,6 +1091,7 @@ int flush_old_exec(struct linux_binprm * bprm) set_mm_exe_file(bprm->mm, bprm->file); @@ -78380,7 +78410,7 @@ index feb2435..04123c5 100644 /* * Release all of the old mmap stuff */ -@@ -1023,10 +1113,6 @@ EXPORT_SYMBOL(flush_old_exec); +@@ -1023,10 +1116,6 @@ EXPORT_SYMBOL(flush_old_exec); void setup_new_exec(struct linux_binprm * bprm) { @@ -78391,7 +78421,7 @@ index feb2435..04123c5 100644 arch_pick_mmap_layout(current->mm); /* This is the point of no return */ -@@ -1037,18 +1123,7 @@ void setup_new_exec(struct linux_binprm * bprm) +@@ -1037,18 +1126,7 @@ void setup_new_exec(struct linux_binprm * bprm) else set_dumpable(current->mm, suid_dumpable); @@ -78411,7 +78441,7 @@ index feb2435..04123c5 100644 /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on -@@ -1065,13 +1140,6 @@ void setup_new_exec(struct linux_binprm * bprm) +@@ -1065,13 +1143,6 @@ void setup_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } @@ -78425,7 +78455,7 @@ index feb2435..04123c5 100644 /* An exec changes our domain. We are no longer part of the thread group */ -@@ -1090,14 +1158,14 @@ EXPORT_SYMBOL(setup_new_exec); +@@ -1090,14 +1161,14 @@ EXPORT_SYMBOL(setup_new_exec); */ int prepare_bprm_creds(struct linux_binprm *bprm) { @@ -78442,7 +78472,7 @@ index feb2435..04123c5 100644 return -ENOMEM; } -@@ -1105,7 +1173,7 @@ void free_bprm(struct linux_binprm *bprm) +@@ -1105,7 +1176,7 @@ void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { @@ -78451,7 +78481,7 @@ index feb2435..04123c5 100644 abort_creds(bprm->cred); } /* If a binfmt changed the interp, free it. */ -@@ -1135,19 +1203,28 @@ void install_exec_creds(struct linux_binprm *bprm) +@@ -1135,19 +1206,28 @@ void install_exec_creds(struct linux_binprm *bprm) commit_creds(bprm->cred); bprm->cred = NULL; @@ -78482,7 +78512,7 @@ index feb2435..04123c5 100644 * PTRACE_ATTACH */ int check_unsafe_exec(struct linux_binprm *bprm) -@@ -1167,7 +1244,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) +@@ -1167,7 +1247,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) } rcu_read_unlock(); @@ -78491,7 +78521,7 @@ index feb2435..04123c5 100644 bprm->unsafe |= LSM_UNSAFE_SHARE; } else { res = -EAGAIN; -@@ -1354,6 +1431,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) +@@ -1354,6 +1434,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) EXPORT_SYMBOL(search_binary_handler); @@ -78513,7 +78543,7 @@ index feb2435..04123c5 100644 /* * sys_execve() executes a new program. */ -@@ -1362,11 +1454,35 @@ int do_execve(char * filename, +@@ -1362,11 +1457,35 @@ int do_execve(char * filename, char __user *__user *envp, struct pt_regs * regs) { @@ -78549,7 +78579,7 @@ index feb2435..04123c5 100644 retval = unshare_files(&displaced); if (retval) -@@ -1392,12 +1508,27 @@ int do_execve(char * filename, +@@ -1392,12 +1511,22 @@ int do_execve(char * filename, if (IS_ERR(file)) goto out_unmark; @@ -78564,11 +78594,6 @@ index feb2435..04123c5 100644 bprm->filename = filename; bprm->interp = filename; -+ if (gr_process_user_ban()) { -+ retval = -EPERM; -+ goto out_file; -+ } -+ + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { + retval = -EACCES; + goto out_file; @@ -78577,7 +78602,7 @@ index feb2435..04123c5 100644 retval = bprm_mm_init(bprm); if (retval) goto out_file; -@@ -1414,25 +1545,66 @@ int do_execve(char * filename, +@@ -1414,25 +1543,71 @@ int do_execve(char * filename, if (retval < 0) goto out; @@ -78597,6 +78622,11 @@ index feb2435..04123c5 100644 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; +#endif + ++ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) { ++ retval = -EPERM; ++ goto out_fail; ++ } ++ + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out_fail; @@ -78648,7 +78678,7 @@ index feb2435..04123c5 100644 current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); -@@ -1441,6 +1613,14 @@ int do_execve(char * filename, +@@ -1441,6 +1616,14 @@ int do_execve(char * filename, put_files_struct(displaced); return retval; @@ -78663,7 +78693,7 @@ index feb2435..04123c5 100644 out: if (bprm->mm) { acct_arg_size(bprm, 0); -@@ -1606,6 +1786,251 @@ out: +@@ -1606,6 +1789,251 @@ out: return ispipe; } @@ -78915,7 +78945,7 @@ index feb2435..04123c5 100644 static int zap_process(struct task_struct *start) { struct task_struct *t; -@@ -1808,17 +2233,17 @@ static void wait_for_dump_helpers(struct file *file) +@@ -1808,17 +2236,17 @@ static void wait_for_dump_helpers(struct file *file) pipe = file->f_path.dentry->d_inode->i_pipe; pipe_lock(pipe); @@ -78938,7 +78968,7 @@ index feb2435..04123c5 100644 pipe_unlock(pipe); } -@@ -1841,10 +2266,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -1841,10 +2269,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) char **helper_argv = NULL; int helper_argc = 0; int dump_count = 0; @@ -78953,7 +78983,7 @@ index feb2435..04123c5 100644 binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; -@@ -1889,6 +2317,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -1889,6 +2320,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) */ clear_thread_flag(TIF_SIGPENDING); @@ -78962,7 +78992,7 @@ index feb2435..04123c5 100644 /* * lock_kernel() because format_corename() is controlled by sysctl, which * uses lock_kernel() -@@ -1923,7 +2353,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -1923,7 +2356,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) goto fail_unlock; } @@ -78971,7 +79001,7 @@ index feb2435..04123c5 100644 if (core_pipe_limit && (core_pipe_limit < dump_count)) { printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", task_tgid_vnr(current), current->comm); -@@ -1987,7 +2417,7 @@ close_fail: +@@ -1987,7 +2420,7 @@ close_fail: filp_close(file, NULL); fail_dropcount: if (dump_count) @@ -81068,7 +81098,7 @@ index 43022f3..7298079 100644 if (!sbi) return -ENOMEM; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c -index 2179de8..2410bd6 100644 +index 2179de8..78ccc77 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -134,6 +134,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, @@ -81099,7 +81129,7 @@ index 2179de8..2410bd6 100644 full_search: addr = ALIGN(start_addr, huge_page_size(h)); -@@ -169,14 +174,14 @@ full_search: +@@ -169,15 +174,17 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -81113,11 +81143,14 @@ index 2179de8..2410bd6 100644 } - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { ++ mm->free_area_cache = addr + len; return addr; ++ } addr = ALIGN(vma->vm_end, huge_page_size(h)); } -@@ -897,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = { + } +@@ -897,7 +904,7 @@ static struct file_system_type hugetlbfs_fs_type = { .kill_sb = kill_litter_super, }; @@ -85442,10 +85475,10 @@ index e89734e..5e84d8d 100644 return 0; diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig new file mode 100644 -index 0000000..9351296 +index 0000000..2147ad0 --- /dev/null +++ b/grsecurity/Kconfig -@@ -0,0 +1,1013 @@ +@@ -0,0 +1,1014 @@ +# +# grecurity configuration +# @@ -85566,8 +85599,9 @@ index 0000000..9351296 + fork until the administrator is able to assess the situation and + restart the daemon. + In the suid/sgid case, the attempt is logged, the user has all their -+ processes terminated, and they are prevented from executing any further -+ processes for 15 minutes. ++ existing instances of the suid/sgid binary terminated and will ++ be unable to execute any suid/sgid binaries for 15 minutes. ++ + It is recommended that you also enable signal logging in the auditing + section so that logs are generated when a process triggers a suspicious + signal. @@ -86505,7 +86539,7 @@ index 0000000..1b9afa9 +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 -index 0000000..6c8c298 +index 0000000..56a4704 --- /dev/null +++ b/grsecurity/gracl.c @@ -0,0 +1,4203 @@ @@ -88822,7 +88856,7 @@ index 0000000..6c8c298 + return; +} + -+extern int __gr_process_user_ban(struct user_struct *user); ++extern int gr_process_kernel_setuid_ban(struct user_struct *user); + +int +gr_check_user_change(int real, int effective, int fs) @@ -88835,7 +88869,7 @@ index 0000000..6c8c298 + int effectiveok = 0; + int fsok = 0; + -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) + struct user_struct *user; + + if (real == -1) @@ -88845,7 +88879,7 @@ index 0000000..6c8c298 + if (user == NULL) + goto skipit; + -+ if (__gr_process_user_ban(user)) { ++ if (gr_process_kernel_setuid_ban(user)) { + /* for find_user */ + free_uid(user); + return 1; @@ -92050,7 +92084,7 @@ index 0000000..70b2179 +} diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c new file mode 100644 -index 0000000..f755034 +index 0000000..bfbe602 --- /dev/null +++ b/grsecurity/gracl_segv.c @@ -0,0 +1,301 @@ @@ -92288,7 +92322,7 @@ index 0000000..f755034 + if (likely(tsk != task)) { + // if this thread has the same subject as the one that triggered + // RES_CRASH and it's the same binary, kill it -+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file) ++ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } + } while_each_thread(tsk2, tsk); @@ -94413,10 +94447,10 @@ index 0000000..78f8733 +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 -index 0000000..d450a74 +index 0000000..d9d6bac --- /dev/null +++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,219 @@ +@@ -0,0 +1,243 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/delay.h> @@ -94518,7 +94552,7 @@ index 0000000..d450a74 + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) { ++ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) { + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; + daemon = 1; @@ -94535,14 +94569,15 @@ index 0000000..d450a74 + user = find_user(uid); + if (user == NULL) + goto unlock; -+ user->banned = 1; -+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME; -+ if (user->ban_expires == ~0UL) -+ user->ban_expires--; ++ user->suid_banned = 1; ++ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME; ++ if (user->suid_ban_expires == ~0UL) ++ user->suid_ban_expires--; + ++ /* only kill other threads of the same binary, from the same user */ + do_each_thread(tsk2, tsk) { + cred2 = __task_cred(tsk); -+ if (tsk != p && cred2->uid == uid) ++ if (tsk != p && cred2->uid == uid && gr_is_same_file(tsk->exec_file, p->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + } @@ -94553,7 +94588,7 @@ index 0000000..d450a74 + rcu_read_unlock(); + + if (uid) -+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, uid, GR_USER_BAN_TIME / 60); + else if (daemon) + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); +#endif @@ -94598,11 +94633,10 @@ index 0000000..d450a74 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid); + /* we intentionally leak this ref */ + user = get_uid(current->cred->user); -+ if (user) { -+ user->banned = 1; -+ user->ban_expires = ~0UL; -+ } ++ if (user) ++ user->kernel_banned = 1; + ++ /* kill all processes of this user */ + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + cred = __task_cred(tsk); @@ -94614,25 +94648,49 @@ index 0000000..d450a74 +#endif +} + -+int __gr_process_user_ban(struct user_struct *user) ++#ifdef CONFIG_GRKERNSEC_BRUTE ++static bool suid_ban_expired(struct user_struct *user) +{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ if (unlikely(user->banned)) { -+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) { -+ user->banned = 0; -+ user->ban_expires = 0; -+ free_uid(user); -+ } else -+ return -EPERM; ++ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) { ++ user->suid_banned = 0; ++ user->suid_ban_expires = 0; ++ free_uid(user); ++ return true; + } ++ ++ return false; ++} ++#endif ++ ++int gr_process_kernel_exec_ban(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(current->cred->user->kernel_banned)) ++ return -EPERM; ++#endif ++ return 0; ++} ++ ++int gr_process_kernel_setuid_ban(struct user_struct *user) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(user->kernel_banned)) ++ gr_fake_force_sig(SIGKILL, current); +#endif + return 0; +} + -+int gr_process_user_ban(void) ++int gr_process_suid_exec_ban(const struct linux_binprm *bprm) +{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ return __gr_process_user_ban(current->cred->user); ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct user_struct *user = current->cred->user; ++ if (unlikely(user->suid_banned)) { ++ if (suid_ban_expired(user)) ++ return 0; ++ /* disallow execution of suid binaries only */ ++ else if (bprm->cred->euid != current->cred->uid) ++ return -EPERM; ++ } +#endif + return 0; +} @@ -98275,10 +98333,10 @@ index 0000000..0b166f4 +#endif diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h new file mode 100644 -index 0000000..3322652 +index 0000000..0159022 --- /dev/null +++ b/include/linux/grinternal.h -@@ -0,0 +1,221 @@ +@@ -0,0 +1,233 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H + @@ -98399,6 +98457,18 @@ index 0000000..3322652 + (pcred)->uid, (pcred)->euid, \ + (pcred)->gid, (pcred)->egid + ++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2) ++{ ++ if (file1 && file2) { ++ const struct inode *inode1 = file1->f_path.dentry->d_inode; ++ const struct inode *inode2 = file2->f_path.dentry->d_inode; ++ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev) ++ return true; ++ } ++ ++ return false; ++} ++ +#define GR_CHROOT_CAPS {{ \ + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ @@ -98502,10 +98572,10 @@ index 0000000..3322652 +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 -index 0000000..18863d1 +index 0000000..607de0d --- /dev/null +++ b/include/linux/grmsg.h -@@ -0,0 +1,111 @@ +@@ -0,0 +1,112 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " @@ -98617,12 +98687,13 @@ index 0000000..18863d1 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " ++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 -index 0000000..9ced8a0 +index 0000000..71812fd --- /dev/null +++ b/include/linux/grsecurity.h -@@ -0,0 +1,222 @@ +@@ -0,0 +1,221 @@ +#ifndef GR_SECURITY_H +#define GR_SECURITY_H +#include <linux/fs.h> @@ -98647,7 +98718,6 @@ index 0000000..9ced8a0 +void gr_handle_brute_attach(unsigned long mm_flags); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); -+int gr_process_user_ban(void); + +char gr_roletype_to_char(void); + @@ -100311,7 +100381,7 @@ index 14a86bc..17d0700 100644 /* * CONFIG_RELAY kernel API, kernel/relay.c diff --git a/include/linux/sched.h b/include/linux/sched.h -index 73c3b9b..9dc8027 100644 +index 73c3b9b..a320221 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,6 +101,7 @@ struct bio; @@ -100348,7 +100418,7 @@ index 73c3b9b..9dc8027 100644 +} +#endif + -+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset); ++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset); +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, @@ -100374,19 +100444,22 @@ index 73c3b9b..9dc8027 100644 }; /* Context switch must be unlocked if interrupts are to be enabled */ -@@ -723,6 +749,11 @@ struct user_struct { +@@ -723,6 +749,14 @@ struct user_struct { struct key *session_keyring; /* UID's default session keyring */ #endif -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ unsigned int banned; -+ unsigned long ban_expires; ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ unsigned char kernel_banned; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ unsigned char suid_banned; ++ unsigned long suid_ban_expires; +#endif + /* Hash table maintenance information */ struct hlist_node uidhash_node; uid_t uid; -@@ -1328,8 +1359,8 @@ struct task_struct { +@@ -1328,8 +1362,8 @@ struct task_struct { struct list_head thread_group; struct completion *vfork_done; /* for vfork() */ @@ -100397,7 +100470,7 @@ index 73c3b9b..9dc8027 100644 cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; -@@ -1343,16 +1374,6 @@ struct task_struct { +@@ -1343,16 +1377,6 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -100414,7 +100487,7 @@ index 73c3b9b..9dc8027 100644 char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) -@@ -1369,6 +1390,10 @@ struct task_struct { +@@ -1369,6 +1393,10 @@ struct task_struct { #endif /* CPU-specific state of this task */ struct thread_struct thread; @@ -100425,7 +100498,7 @@ index 73c3b9b..9dc8027 100644 /* filesystem information */ struct fs_struct *fs; /* open file information */ -@@ -1436,6 +1461,12 @@ struct task_struct { +@@ -1436,6 +1464,12 @@ struct task_struct { int hardirq_context; int softirq_context; #endif @@ -100438,7 +100511,7 @@ index 73c3b9b..9dc8027 100644 #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL u64 curr_chain_key; -@@ -1456,6 +1487,9 @@ struct task_struct { +@@ -1456,6 +1490,9 @@ struct task_struct { struct backing_dev_info *backing_dev_info; @@ -100448,7 +100521,7 @@ index 73c3b9b..9dc8027 100644 struct io_context *io_context; unsigned long ptrace_message; -@@ -1519,6 +1553,28 @@ struct task_struct { +@@ -1519,6 +1556,28 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; @@ -100477,7 +100550,7 @@ index 73c3b9b..9dc8027 100644 #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored adress in ret_stack */ int curr_ret_stack; -@@ -1542,6 +1598,56 @@ struct task_struct { +@@ -1542,6 +1601,56 @@ struct task_struct { #endif /* CONFIG_TRACING */ }; @@ -100534,7 +100607,7 @@ index 73c3b9b..9dc8027 100644 /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) -@@ -1740,7 +1846,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * +@@ -1740,7 +1849,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ #define PF_MEMALLOC 0x00000800 /* Allocating memory */ @@ -100543,7 +100616,7 @@ index 73c3b9b..9dc8027 100644 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ -@@ -1978,7 +2084,9 @@ void yield(void); +@@ -1978,7 +2087,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -100553,7 +100626,7 @@ index 73c3b9b..9dc8027 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2011,6 +2119,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2011,6 +2122,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -100561,7 +100634,7 @@ index 73c3b9b..9dc8027 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2155,7 +2264,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2155,7 +2267,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -100570,7 +100643,7 @@ index 73c3b9b..9dc8027 100644 extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2284,9 +2393,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2284,9 +2396,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -100582,7 +100655,7 @@ index 73c3b9b..9dc8027 100644 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } -@@ -2625,6 +2734,23 @@ static inline unsigned long rlimit_max(unsigned int limit) +@@ -2625,6 +2737,23 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } @@ -110812,7 +110885,7 @@ index 2d846cf..ca1e492 100644 capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); diff --git a/mm/mmap.c b/mm/mmap.c -index 4b80cbf..89afb9e 100644 +index 4b80cbf..5121f9e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -29,6 +29,7 @@ @@ -111248,7 +111321,7 @@ index 4b80cbf..89afb9e 100644 kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) -@@ -1255,6 +1429,62 @@ unacct_error: +@@ -1255,6 +1429,73 @@ unacct_error: return error; } @@ -111256,18 +111329,18 @@ index 4b80cbf..89afb9e 100644 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags) +{ + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK)) -+ return (random32() & 0xFF) << PAGE_SHIFT; ++ return ((random32() & 0xFF) + 1) << PAGE_SHIFT; + + return 0; +} +#endif + -+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset) ++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset) +{ + if (!vma) { +#ifdef CONFIG_STACK_GROWSUP -+ if (addr > sysctl_heap_stack_gap) -+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ if (*addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, *addr - sysctl_heap_stack_gap); + else + vma = find_vma(current->mm, 0); + if (vma && (vma->vm_flags & VM_GROWSUP)) @@ -111276,17 +111349,28 @@ index 4b80cbf..89afb9e 100644 + return true; + } + -+ if (addr + len > vma->vm_start) ++ if (*addr + len > vma->vm_start) + return false; + -+ if (vma->vm_flags & VM_GROWSDOWN) -+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++ if (offset) { ++ if (vma->vm_prev && *addr == vma->vm_prev->vm_end && (vma->vm_start - len - vma->vm_prev->vm_end >= offset)) { ++ *addr = vma->vm_prev->vm_end + offset; ++ return true; ++ } ++ return offset <= vma->vm_start - *addr - len; ++ } else if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - *addr - len; +#ifdef CONFIG_STACK_GROWSUP -+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) -+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) { ++ if (*addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap) ++ return true; ++ if (vma->vm_start - len - vma->vm_prev->vm_end >= sysctl_heap_stack_gap) { ++ *addr = vma->vm_start - len; ++ return true; ++ } ++ return false; ++ } +#endif -+ else if (offset) -+ return offset <= vma->vm_start - addr - len; + + return true; +} @@ -111311,7 +111395,7 @@ index 4b80cbf..89afb9e 100644 /* Get an address range which is currently unmapped. * For shmat() with addr=0. * -@@ -1274,6 +1504,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, +@@ -1274,6 +1515,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; @@ -111319,7 +111403,7 @@ index 4b80cbf..89afb9e 100644 if (len > TASK_SIZE) return -ENOMEM; -@@ -1281,18 +1512,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, +@@ -1281,18 +1523,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr; @@ -111335,7 +111419,7 @@ index 4b80cbf..89afb9e 100644 - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } } @@ -111350,7 +111434,7 @@ index 4b80cbf..89afb9e 100644 } full_search: -@@ -1303,34 +1539,40 @@ full_search: +@@ -1303,34 +1550,40 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -111371,7 +111455,7 @@ index 4b80cbf..89afb9e 100644 - mm->free_area_cache = addr + len; - return addr; - } -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + break; if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; @@ -111402,7 +111486,7 @@ index 4b80cbf..89afb9e 100644 mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } -@@ -1348,7 +1590,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1348,7 +1601,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -111412,7 +111496,7 @@ index 4b80cbf..89afb9e 100644 /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -1357,13 +1600,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1357,13 +1611,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (flags & MAP_FIXED) return addr; @@ -111429,31 +111513,37 @@ index 4b80cbf..89afb9e 100644 - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } } /* check if free_area_cache is useful for us */ -@@ -1378,7 +1626,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1377,10 +1636,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (addr > len) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -1395,7 +1643,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + if (mm->mmap_base < len) +@@ -1395,7 +1655,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); -@@ -1404,8 +1652,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1404,8 +1664,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -111464,7 +111554,7 @@ index 4b80cbf..89afb9e 100644 bottomup: /* -@@ -1414,13 +1662,21 @@ bottomup: +@@ -1414,13 +1674,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -111488,7 +111578,7 @@ index 4b80cbf..89afb9e 100644 mm->cached_hole_size = ~0UL; return addr; -@@ -1429,6 +1685,12 @@ bottomup: +@@ -1429,6 +1697,12 @@ bottomup: void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { @@ -111501,7 +111591,7 @@ index 4b80cbf..89afb9e 100644 /* * Is this a new hole at the highest possible address? */ -@@ -1436,8 +1698,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) +@@ -1436,8 +1710,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) mm->free_area_cache = addr; /* dont allow allocations above current base */ @@ -111513,7 +111603,7 @@ index 4b80cbf..89afb9e 100644 } unsigned long -@@ -1480,7 +1744,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +@@ -1480,7 +1756,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) if (mm) { /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ @@ -111522,7 +111612,7 @@ index 4b80cbf..89afb9e 100644 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { struct rb_node * rb_node; -@@ -1510,40 +1774,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +@@ -1510,40 +1786,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) EXPORT_SYMBOL(find_vma); @@ -111597,7 +111687,7 @@ index 4b80cbf..89afb9e 100644 /* * Verify that the stack growth is acceptable and -@@ -1561,6 +1834,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1561,6 +1846,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns return -ENOMEM; /* Stack limit test */ @@ -111605,7 +111695,7 @@ index 4b80cbf..89afb9e 100644 if (size > rlim[RLIMIT_STACK].rlim_cur) return -ENOMEM; -@@ -1570,6 +1844,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1570,6 +1856,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns unsigned long limit; locked = mm->locked_vm + grow; limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; @@ -111613,7 +111703,7 @@ index 4b80cbf..89afb9e 100644 if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } -@@ -1588,7 +1863,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1588,7 +1875,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns return -ENOMEM; /* Ok, everything looks good - let it rip */ @@ -111621,7 +111711,7 @@ index 4b80cbf..89afb9e 100644 if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); -@@ -1600,37 +1874,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1600,37 +1886,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ @@ -111679,7 +111769,7 @@ index 4b80cbf..89afb9e 100644 unsigned long size, grow; size = address - vma->vm_start; -@@ -1643,6 +1928,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) +@@ -1643,6 +1940,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) vma->vm_end = address; } } @@ -111688,7 +111778,7 @@ index 4b80cbf..89afb9e 100644 anon_vma_unlock(vma); return error; } -@@ -1655,6 +1942,8 @@ static int expand_downwards(struct vm_area_struct *vma, +@@ -1655,6 +1954,8 @@ static int expand_downwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -111697,7 +111787,7 @@ index 4b80cbf..89afb9e 100644 /* * We must make sure the anon_vma is allocated -@@ -1668,6 +1957,15 @@ static int expand_downwards(struct vm_area_struct *vma, +@@ -1668,6 +1969,15 @@ static int expand_downwards(struct vm_area_struct *vma, if (error) return error; @@ -111713,7 +111803,7 @@ index 4b80cbf..89afb9e 100644 anon_vma_lock(vma); /* -@@ -1677,9 +1975,17 @@ static int expand_downwards(struct vm_area_struct *vma, +@@ -1677,9 +1987,17 @@ static int expand_downwards(struct vm_area_struct *vma, */ /* Somebody else might have raced and expanded it already */ @@ -111732,7 +111822,7 @@ index 4b80cbf..89afb9e 100644 size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; -@@ -1689,21 +1995,60 @@ static int expand_downwards(struct vm_area_struct *vma, +@@ -1689,21 +2007,60 @@ static int expand_downwards(struct vm_area_struct *vma, if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; @@ -111793,7 +111883,7 @@ index 4b80cbf..89afb9e 100644 return expand_upwards(vma, address); } -@@ -1727,6 +2072,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) +@@ -1727,6 +2084,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { @@ -111808,7 +111898,7 @@ index 4b80cbf..89afb9e 100644 return expand_downwards(vma, address); } -@@ -1768,7 +2121,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) +@@ -1768,7 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) do { long nrpages = vma_pages(vma); @@ -111823,7 +111913,7 @@ index 4b80cbf..89afb9e 100644 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); -@@ -1813,6 +2172,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -1813,6 +2184,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { @@ -111840,7 +111930,7 @@ index 4b80cbf..89afb9e 100644 rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; -@@ -1840,10 +2209,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1840,10 +2221,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, struct mempolicy *pol; struct vm_area_struct *new; @@ -111866,7 +111956,7 @@ index 4b80cbf..89afb9e 100644 if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; -@@ -1851,6 +2235,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1851,6 +2247,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, if (!new) return -ENOMEM; @@ -111883,7 +111973,7 @@ index 4b80cbf..89afb9e 100644 /* most fields are the same, copy all, and then fixup */ *new = *vma; -@@ -1861,8 +2255,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1861,8 +2267,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } @@ -111913,7 +112003,7 @@ index 4b80cbf..89afb9e 100644 kmem_cache_free(vm_area_cachep, new); return PTR_ERR(pol); } -@@ -1883,6 +2298,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1883,6 +2310,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, else vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -111942,7 +112032,7 @@ index 4b80cbf..89afb9e 100644 return 0; } -@@ -1891,11 +2328,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1891,11 +2340,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ @@ -111973,7 +112063,7 @@ index 4b80cbf..89afb9e 100644 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; -@@ -1959,6 +2415,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +@@ -1959,6 +2427,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) /* Fix up all other VM information */ remove_vma_list(mm, vma); @@ -111982,7 +112072,7 @@ index 4b80cbf..89afb9e 100644 return 0; } -@@ -1971,22 +2429,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +@@ -1971,22 +2441,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) profile_munmap(addr); @@ -112011,7 +112101,7 @@ index 4b80cbf..89afb9e 100644 /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some -@@ -2000,6 +2454,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2000,6 +2466,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; @@ -112019,7 +112109,7 @@ index 4b80cbf..89afb9e 100644 len = PAGE_ALIGN(len); if (!len) -@@ -2011,16 +2466,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2011,16 +2478,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -112051,7 +112141,7 @@ index 4b80cbf..89afb9e 100644 locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; -@@ -2037,22 +2506,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2037,22 +2518,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* * Clear old maps. this also does some error checking for us */ @@ -112078,7 +112168,7 @@ index 4b80cbf..89afb9e 100644 return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -@@ -2066,7 +2535,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2066,7 +2547,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { @@ -112087,7 +112177,7 @@ index 4b80cbf..89afb9e 100644 return -ENOMEM; } -@@ -2078,11 +2547,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2078,11 +2559,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: @@ -112102,7 +112192,7 @@ index 4b80cbf..89afb9e 100644 return addr; } -@@ -2129,8 +2599,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2129,8 +2611,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ @@ -112114,7 +112204,7 @@ index 4b80cbf..89afb9e 100644 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -@@ -2144,6 +2616,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +@@ -2144,6 +2628,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -112125,7 +112215,7 @@ index 4b80cbf..89afb9e 100644 /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index -@@ -2166,7 +2642,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +@@ -2166,7 +2654,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -112148,7 +112238,7 @@ index 4b80cbf..89afb9e 100644 return 0; } -@@ -2184,6 +2675,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2184,6 +2687,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, struct rb_node **rb_link, *rb_parent; struct mempolicy *pol; @@ -112157,7 +112247,7 @@ index 4b80cbf..89afb9e 100644 /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. -@@ -2227,6 +2720,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2227,6 +2732,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return new_vma; } @@ -112193,7 +112283,7 @@ index 4b80cbf..89afb9e 100644 /* * Return true if the calling process may expand its vm space by the passed * number of pages -@@ -2238,6 +2760,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) +@@ -2238,6 +2772,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; @@ -112201,7 +112291,7 @@ index 4b80cbf..89afb9e 100644 if (cur + npages > lim) return 0; return 1; -@@ -2307,6 +2830,22 @@ int install_special_mapping(struct mm_struct *mm, +@@ -2307,6 +2842,22 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_start = addr; vma->vm_end = addr + len; diff --git a/3.2.48/0000_README b/3.2.48/0000_README index 910c7c3..8078d10 100644 --- a/3.2.48/0000_README +++ b/3.2.48/0000_README @@ -110,7 +110,7 @@ Patch: 1047_linux-3.2.48.patch From: http://www.kernel.org Desc: Linux 3.2.48 -Patch: 4420_grsecurity-2.9.1-3.2.48-201306302051.patch +Patch: 4420_grsecurity-2.9.1-3.2.48-201307050016.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.2.48/4420_grsecurity-2.9.1-3.2.48-201306302051.patch b/3.2.48/4420_grsecurity-2.9.1-3.2.48-201307050016.patch index 6eccde6..bddc4aa 100644 --- a/3.2.48/4420_grsecurity-2.9.1-3.2.48-201306302051.patch +++ b/3.2.48/4420_grsecurity-2.9.1-3.2.48-201307050016.patch @@ -599,7 +599,7 @@ index 2fd00b7..cfd5069 100644 for (i = 0; i < n; i++) { diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c -index 01e8715..05ce5f1 100644 +index 01e8715..6a5a03b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1138,16 +1138,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) @@ -619,7 +619,7 @@ index 01e8715..05ce5f1 100644 if (limit - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) return addr; addr = vma->vm_end; vma = vma->vm_next; @@ -2119,7 +2119,7 @@ index 4b0bc37..d556b08 100644 return; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c -index 44b628e..3e41096 100644 +index 44b628e..be706ee 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -33,6 +33,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, @@ -2147,7 +2147,7 @@ index 44b628e..3e41096 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (len > mm->cached_hole_size) { @@ -2181,7 +2181,7 @@ index 44b628e..3e41096 100644 return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* * Remember the place where we stopped the search: */ @@ -2419,7 +2419,7 @@ index f8e16b2..c73ff79 100644 }; diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c -index 385fd30..3aaf4fe 100644 +index 385fd30..27cf8ba 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi @@ -2436,7 +2436,7 @@ index 385fd30..3aaf4fe 100644 vma = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) goto success; } @@ -2445,7 +2445,7 @@ index 385fd30..3aaf4fe 100644 if (addr > limit) break; - if (addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) goto success; addr = vma->vm_end; } @@ -2454,7 +2454,7 @@ index 385fd30..3aaf4fe 100644 if (addr > limit) break; - if (addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) goto success; addr = vma->vm_end; } @@ -2862,7 +2862,7 @@ index 79802e5..1a89ec5 100644 .notifier_call = salinfo_cpu_callback, .priority = 0, diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c -index 609d500..acd0429 100644 +index 609d500..254a3d7 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len @@ -2901,7 +2901,7 @@ index 609d500..acd0429 100644 return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* Remember the address where we stopped this search: */ mm->free_area_cache = addr + len; return addr; @@ -2985,7 +2985,7 @@ index 20b3593..1ce77f0 100644 * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c -index 5ca674b..127c3cb 100644 +index 5ca674b..0d1395a 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u @@ -3001,7 +3001,7 @@ index 5ca674b..127c3cb 100644 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len, offset)) ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) return addr; addr = ALIGN(vmm->vm_end, HPAGE_SIZE); } @@ -3417,7 +3417,7 @@ index 937cf33..adb39bb 100644 * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c -index 302d779..6459dc0 100644 +index 302d779..3845a09 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, @@ -3446,7 +3446,7 @@ index 302d779..6459dc0 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, &addr, len, offset)) return addr; } @@ -3455,20 +3455,26 @@ index 302d779..6459dc0 100644 if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len, offset)) ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) return addr; addr = vma->vm_end; if (do_color_align) -@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, +@@ -144,10 +149,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr - len); +- vma = find_vma(mm, addr - len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vmm, addr - len, len, offset)) ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) /* cache the address as a hint for next time */ - return mm->free_area_cache = addr - len; +- return mm->free_area_cache = addr - len; ++ return (mm->free_area_cache = addr); } -@@ -155,17 +160,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + } + +@@ -155,17 +161,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, goto bottomup; addr = mm->mmap_base - len; @@ -3485,11 +3491,11 @@ index 302d779..6459dc0 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (check_heap_stack_gap(vmm, addr, len, offset)) { ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) { /* cache the address as a hint for next time */ return mm->free_area_cache = addr; } -@@ -175,10 +180,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, +@@ -175,10 +181,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -3502,7 +3508,7 @@ index 302d779..6459dc0 100644 bottomup: /* -@@ -223,6 +226,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -223,6 +227,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; @@ -3513,7 +3519,7 @@ index 302d779..6459dc0 100644 if (current->flags & PF_RANDOMIZE) { random_factor = get_random_int(); random_factor = random_factor << PAGE_SHIFT; -@@ -234,38 +241,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -234,38 +242,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; @@ -3995,7 +4001,7 @@ index a3328c2..3b812eb 100644 #ifdef CONFIG_BLK_DEV_INITRD if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index 7ea75d1..38ca97d 100644 +index 7ea75d1..5075226 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -33,9 +33,11 @@ @@ -4016,7 +4022,7 @@ index 7ea75d1..38ca97d 100644 if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) return addr; addr = vma->vm_end; } @@ -4040,7 +4046,7 @@ index 7ea75d1..38ca97d 100644 if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, rand_offset)) ++ if (check_heap_stack_gap(vma, &addr, len, rand_offset)) return addr; addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; if (addr < vma->vm_end) /* handle wraparound */ @@ -5259,7 +5265,7 @@ index 24523dc..7205007 100644 .priority = 1 /* Must run before sched domains notifier. */ }; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c -index 73709f7..63db0f7 100644 +index 73709f7..8e825a8 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, @@ -5267,7 +5273,7 @@ index 73709f7..63db0f7 100644 return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); -+ return check_heap_stack_gap(vma, addr, len, 0); ++ return check_heap_stack_gap(vma, &addr, len, 0); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) @@ -5276,7 +5282,7 @@ index 73709f7..63db0f7 100644 continue; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, 0)) { ++ if (check_heap_stack_gap(vma, &addr, len, 0)) { /* * Remember the place where we stopped the search: */ @@ -5303,7 +5309,7 @@ index 73709f7..63db0f7 100644 */ vma = find_vma(mm, addr); - if (!vma || (addr + len) <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, 0)) { ++ if (check_heap_stack_gap(vma, &addr, len, 0)) { /* remember the address as a hint for next time */ if (use_cache) mm->free_area_cache = addr; @@ -5779,7 +5785,7 @@ index 03f2b55..b0270327 100644 }; diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c -index afeb710..e8366ef 100644 +index afeb710..8da5c79 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, @@ -5796,7 +5802,7 @@ index afeb710..e8366ef 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } @@ -5805,7 +5811,7 @@ index afeb710..e8366ef 100644 return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* * Remember the place where we stopped the search: */ @@ -5823,20 +5829,25 @@ index afeb710..e8366ef 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } -@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -178,28 +178,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) { ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + if (unlikely(mm->mmap_base < len)) goto bottomup; @@ -5855,11 +5866,11 @@ index afeb710..e8366ef 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } -@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -209,10 +210,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -6679,7 +6690,7 @@ index 96ee50a..68ce124 100644 if (unlikely(current->audit_context)) { unsigned long tstate = regs->tstate; diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c -index 42b282f..89c1f61 100644 +index 42b282f..408977c 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void) @@ -6704,12 +6715,12 @@ index 42b282f..89c1f61 100644 if (TASK_SIZE - PAGE_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len, offset)) ++ if (check_heap_stack_gap(vmm, &addr, len, offset)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c -index 5e4252b..5820092 100644 +index 5e4252b..05942dd 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -119,12 +119,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi @@ -6744,7 +6755,7 @@ index 5e4252b..5820092 100644 vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } @@ -6771,7 +6782,7 @@ index 5e4252b..5820092 100644 return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* * Remember the place where we stopped the search: */ @@ -6798,20 +6809,25 @@ index 5e4252b..5820092 100644 vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } -@@ -258,7 +262,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -257,28 +261,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) { ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -267,18 +271,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + if (unlikely(mm->mmap_base < len)) goto bottomup; @@ -6830,11 +6846,11 @@ index 5e4252b..5820092 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } -@@ -288,10 +292,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -288,10 +293,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -6847,7 +6863,7 @@ index 5e4252b..5820092 100644 bottomup: /* -@@ -365,6 +367,10 @@ static unsigned long mmap_rnd(void) +@@ -365,6 +368,10 @@ static unsigned long mmap_rnd(void) { unsigned long rnd = 0UL; @@ -6858,7 +6874,7 @@ index 5e4252b..5820092 100644 if (current->flags & PF_RANDOMIZE) { unsigned long val = get_random_int(); if (test_thread_flag(TIF_32BIT)) -@@ -390,6 +396,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -390,6 +397,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; @@ -6871,7 +6887,7 @@ index 5e4252b..5820092 100644 mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { -@@ -402,6 +414,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) +@@ -402,6 +415,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); @@ -8358,7 +8374,7 @@ index 504c062..a383267 100644 * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c -index 07e1453..6364e54 100644 +index 07e1453..2ec39cd 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, @@ -8374,7 +8390,7 @@ index 07e1453..6364e54 100644 return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* * Remember the place where we stopped the search: */ @@ -8386,16 +8402,21 @@ index 07e1453..6364e54 100644 /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); -@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -105,26 +107,28 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) { ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + if (unlikely(mm->mmap_base < len)) goto bottomup; @@ -8411,11 +8432,11 @@ index 07e1453..6364e54 100644 */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) { ++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } -@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -134,8 +138,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -8426,7 +8447,7 @@ index 07e1453..6364e54 100644 bottomup: /* -@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, +@@ -163,6 +167,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; @@ -8434,13 +8455,13 @@ index 07e1453..6364e54 100644 if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; -@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, +@@ -181,8 +186,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) @@ -18677,7 +18698,7 @@ index ce0be7c..1252d68 100644 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0 + .endr diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S -index e11e394..b1c65cc 100644 +index e11e394..599d09a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -19,6 +19,8 @@ @@ -18702,7 +18723,7 @@ index e11e394..b1c65cc 100644 .text __HEAD -@@ -85,35 +93,23 @@ startup_64: +@@ -85,35 +93,22 @@ startup_64: */ addq %rbp, init_level4_pgt + 0(%rip) addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) @@ -18719,12 +18740,11 @@ index e11e394..b1c65cc 100644 - addq %rbp, level3_kernel_pgt + (510*8)(%rip) - addq %rbp, level3_kernel_pgt + (511*8)(%rip) + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) -+ + +- addq %rbp, level2_fixmap_pgt + (506*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) - addq %rbp, level2_fixmap_pgt + (506*8)(%rip) -- - /* Add an Identity mapping if I am above 1G */ - leaq _text(%rip), %rdi - andq $PMD_PAGE_MASK, %rdi @@ -18749,7 +18769,7 @@ index e11e394..b1c65cc 100644 /* * Fixup the kernel text+data virtual addresses. Note that -@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64) +@@ -160,8 +155,8 @@ ENTRY(secondary_startup_64) * after the boot processor executes this code. */ @@ -18760,7 +18780,7 @@ index e11e394..b1c65cc 100644 movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ -@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64) +@@ -183,9 +178,17 @@ ENTRY(secondary_startup_64) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ @@ -18779,7 +18799,7 @@ index e11e394..b1c65cc 100644 1: wrmsr /* Make changes effective */ /* Setup cr0 */ -@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64) +@@ -247,6 +250,7 @@ ENTRY(secondary_startup_64) * jump. In addition we need to ensure %cs is set so we make this * a far return. */ @@ -18787,7 +18807,7 @@ index e11e394..b1c65cc 100644 movq initial_code(%rip),%rax pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs -@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64) +@@ -269,7 +273,7 @@ ENTRY(secondary_startup_64) bad_address: jmp bad_address @@ -18796,7 +18816,7 @@ index e11e394..b1c65cc 100644 #ifdef CONFIG_EARLY_PRINTK .globl early_idt_handlers early_idt_handlers: -@@ -314,18 +319,23 @@ ENTRY(early_idt_handler) +@@ -314,18 +318,23 @@ ENTRY(early_idt_handler) #endif /* EARLY_PRINTK */ 1: hlt jmp 1b @@ -18821,7 +18841,7 @@ index e11e394..b1c65cc 100644 #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ ENTRY(name) -@@ -338,7 +348,6 @@ ENTRY(name) +@@ -338,7 +347,6 @@ ENTRY(name) i = i + 1 ; \ .endr @@ -18829,7 +18849,7 @@ index e11e394..b1c65cc 100644 /* * This default setting generates an ident mapping at address 0x100000 * and a mapping for the kernel that precisely maps virtual address -@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt) +@@ -349,13 +357,41 @@ NEXT_PAGE(init_level4_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE @@ -18871,7 +18891,7 @@ index e11e394..b1c65cc 100644 NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 -@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt) +@@ -363,20 +399,23 @@ NEXT_PAGE(level3_kernel_pgt) .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE @@ -18903,7 +18923,7 @@ index e11e394..b1c65cc 100644 NEXT_PAGE(level2_kernel_pgt) /* -@@ -389,35 +429,56 @@ NEXT_PAGE(level2_kernel_pgt) +@@ -389,35 +428,56 @@ NEXT_PAGE(level2_kernel_pgt) * If you want to increase this then increase MODULES_VADDR * too.) */ @@ -21097,10 +21117,10 @@ index d4f278e..86c58c0 100644 for (i = 0; i < copied; i++) { switch (opcode[i]) { diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c -index 0b0cb5f..207bec6 100644 +index 0b0cb5f..560d0df 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c -@@ -24,17 +24,227 @@ +@@ -24,17 +24,228 @@ #include <asm/syscalls.h> @@ -21162,7 +21182,7 @@ index 0b0cb5f..207bec6 100644 + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } + } @@ -21204,7 +21224,7 @@ index 0b0cb5f..207bec6 100644 + } + return -ENOMEM; + } -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; @@ -21261,7 +21281,7 @@ index 0b0cb5f..207bec6 100644 + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } + } @@ -21277,10 +21297,11 @@ index 0b0cb5f..207bec6 100644 + + /* make sure it can fit in the remaining address space */ + if (addr > len) { -+ vma = find_vma(mm, addr-len); -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + /* remember the address as a hint for next time */ -+ return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); + } + + if (mm->mmap_base < len) @@ -21295,7 +21316,7 @@ index 0b0cb5f..207bec6 100644 + * return with success: + */ + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + @@ -21341,7 +21362,7 @@ index 0b0cb5f..207bec6 100644 + return addr; } diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index 0514890..37204bc 100644 +index 0514890..8efa56b 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -95,8 +95,8 @@ out: @@ -21388,7 +21409,7 @@ index 0514890..37204bc 100644 vma = find_vma(mm, addr); - if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (end - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) @@ -21397,7 +21418,7 @@ index 0514890..37204bc 100644 return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* * Remember the place where we stopped the search: */ @@ -21428,7 +21449,7 @@ index 0514890..37204bc 100644 - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } } @@ -21439,7 +21460,7 @@ index 0514890..37204bc 100644 vma = find_vma(mm, tmp_addr); - if (!vma || tmp_addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, tmp_addr, len, offset)) ++ if (check_heap_stack_gap(vma, &tmp_addr, len, offset)) /* remember the address as a hint for next time */ return mm->free_area_cache = tmp_addr; } @@ -21448,7 +21469,7 @@ index 0514890..37204bc 100644 */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ return mm->free_area_cache = addr; @@ -26376,7 +26397,7 @@ index f4f29b1..5cac4fb 100644 return (void *)vaddr; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c -index df7d12c..7bbdfc3 100644 +index df7d12c..93fae8e 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -277,13 +277,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, @@ -26428,7 +26449,7 @@ index df7d12c..7bbdfc3 100644 - mm->free_area_cache = addr + len; - return addr; - } -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + break; if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; @@ -26484,7 +26505,7 @@ index df7d12c..7bbdfc3 100644 */ - if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - return (mm->free_area_cache = addr); @@ -26593,12 +26614,12 @@ index df7d12c..7bbdfc3 100644 vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) -+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index a4cca06..e2ccdf7 100644 +index a4cca06..9e00106 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -3,6 +3,7 @@ @@ -26627,7 +26648,22 @@ index a4cca06..e2ccdf7 100644 unsigned long pgd_extra = 0; phys_addr_t base; -@@ -324,10 +327,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, +@@ -282,7 +285,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + + #ifdef CONFIG_X86_32 + early_ioremap_page_table_range_init(); ++#endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ load_cr3(get_cpu_pgd(0)); ++#elif defined(CONFIG_X86_32) + load_cr3(swapper_pg_dir); + #endif + +@@ -324,10 +334,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ @@ -26669,7 +26705,7 @@ index a4cca06..e2ccdf7 100644 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) -@@ -384,8 +417,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) +@@ -384,8 +424,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) #endif } @@ -26788,7 +26824,7 @@ index a4cca06..e2ccdf7 100644 (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 29f7c6d..b46b35b 100644 +index 29f7c6d..7500c2f 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void) @@ -27005,20 +27041,7 @@ index 29f7c6d..b46b35b 100644 EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ -@@ -757,6 +756,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - #ifdef CONFIG_FLATMEM - BUG_ON(!mem_map); - #endif -@@ -774,7 +779,7 @@ void __init mem_init(void) +@@ -774,7 +773,7 @@ void __init mem_init(void) set_highmem_pages_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; @@ -27027,7 +27050,7 @@ index 29f7c6d..b46b35b 100644 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " -@@ -815,10 +820,10 @@ void __init mem_init(void) +@@ -815,10 +814,10 @@ void __init mem_init(void) ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, @@ -27041,7 +27064,7 @@ index 29f7c6d..b46b35b 100644 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* -@@ -896,6 +901,7 @@ void set_kernel_text_rw(void) +@@ -896,6 +895,7 @@ void set_kernel_text_rw(void) if (!kernel_set_to_readonly) return; @@ -27049,7 +27072,7 @@ index 29f7c6d..b46b35b 100644 pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); -@@ -910,6 +916,7 @@ void set_kernel_text_ro(void) +@@ -910,6 +910,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; @@ -27057,7 +27080,7 @@ index 29f7c6d..b46b35b 100644 pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); -@@ -938,6 +945,7 @@ void mark_rodata_ro(void) +@@ -938,6 +939,7 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; @@ -27066,7 +27089,7 @@ index 29f7c6d..b46b35b 100644 printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 44b93da..b5cb517 100644 +index 44b93da..5a0b3ee 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on); @@ -27201,20 +27224,7 @@ index 44b93da..b5cb517 100644 spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } -@@ -693,6 +707,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - /* clear_bss() already clear the empty_zero_page */ - - reservedpages = 0; -@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr) +@@ -856,8 +870,8 @@ int kern_addr_valid(unsigned long addr) static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), @@ -27225,7 +27235,7 @@ index 44b93da..b5cb517 100644 }; struct vm_area_struct *get_gate_vma(struct mm_struct *mm) -@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr) +@@ -891,7 +905,7 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { @@ -28899,7 +28909,7 @@ index db0e9a5..0372c14 100644 } EXPORT_SYMBOL(pcibios_set_irq_routing); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c -index 40e4469..1ab536e 100644 +index 40e4469..0592924 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void) @@ -28925,7 +28935,7 @@ index 40e4469..1ab536e 100644 gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); -@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void) +@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void) { struct desc_ptr gdt_descr; @@ -28940,6 +28950,44 @@ index 40e4469..1ab536e 100644 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id())); ++#else + load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + + local_irq_restore(efi_rt_eflags); +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 0fba86d..3642981 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -75,6 +75,11 @@ void __init efi_call_phys_prelog(void) + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); + } ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + } + +@@ -88,6 +93,11 @@ void __init efi_call_phys_epilog(void) + for (pgd = 0; pgd < n_pgds; pgd++) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id())); ++#endif ++ + __flush_tlb_all(); + local_irq_restore(efi_flags); + early_code_mapping_set_exec(0); diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index fbe66e6..eae5e38 100644 --- a/arch/x86/platform/efi/efi_stub_32.S @@ -50146,7 +50194,7 @@ index 451b9b8..12e5a03 100644 out_free_fd: diff --git a/fs/exec.c b/fs/exec.c -index 312e297..6367442 100644 +index 312e297..4df82cf 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,12 +55,35 @@ @@ -50185,7 +50233,7 @@ index 312e297..6367442 100644 int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; unsigned int core_pipe_limit; -@@ -70,7 +93,7 @@ struct core_name { +@@ -70,20 +93,23 @@ struct core_name { char *corename; int used, size; }; @@ -50194,7 +50242,14 @@ index 312e297..6367442 100644 /* The maximal length of core_pattern is also specified in sysctl.c */ -@@ -82,8 +105,8 @@ int __register_binfmt(struct linux_binfmt * fmt, int insert) + static LIST_HEAD(formats); + static DEFINE_RWLOCK(binfmt_lock); + ++extern int gr_process_kernel_exec_ban(void); ++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm); ++ + int __register_binfmt(struct linux_binfmt * fmt, int insert) + { if (!fmt) return -EINVAL; write_lock(&binfmt_lock); @@ -50205,7 +50260,7 @@ index 312e297..6367442 100644 write_unlock(&binfmt_lock); return 0; } -@@ -93,7 +116,7 @@ EXPORT_SYMBOL(__register_binfmt); +@@ -93,7 +119,7 @@ EXPORT_SYMBOL(__register_binfmt); void unregister_binfmt(struct linux_binfmt * fmt) { write_lock(&binfmt_lock); @@ -50214,7 +50269,7 @@ index 312e297..6367442 100644 write_unlock(&binfmt_lock); } -@@ -188,18 +211,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +@@ -188,18 +214,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; @@ -50236,7 +50291,7 @@ index 312e297..6367442 100644 return NULL; if (write) { -@@ -215,6 +230,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +@@ -215,6 +233,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (size <= ARG_MAX) return page; @@ -50254,7 +50309,7 @@ index 312e297..6367442 100644 /* * Limit to 1/4-th the stack size for the argv+env strings. * This ensures that: -@@ -274,6 +300,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -274,6 +303,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; @@ -50266,7 +50321,7 @@ index 312e297..6367442 100644 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); -@@ -288,6 +319,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -288,6 +322,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) mm->stack_vm = mm->total_vm = 1; up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); @@ -50279,7 +50334,7 @@ index 312e297..6367442 100644 return 0; err: up_write(&mm->mmap_sem); -@@ -403,12 +440,12 @@ struct user_arg_ptr { +@@ -403,12 +443,12 @@ struct user_arg_ptr { union { const char __user *const __user *native; #ifdef CONFIG_COMPAT @@ -50294,7 +50349,7 @@ index 312e297..6367442 100644 { const char __user *native; -@@ -417,14 +454,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) +@@ -417,14 +457,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) compat_uptr_t compat; if (get_user(compat, argv.ptr.compat + nr)) @@ -50311,7 +50366,7 @@ index 312e297..6367442 100644 return native; } -@@ -443,11 +480,12 @@ static int count(struct user_arg_ptr argv, int max) +@@ -443,11 +483,12 @@ static int count(struct user_arg_ptr argv, int max) if (!p) break; @@ -50326,7 +50381,7 @@ index 312e297..6367442 100644 if (fatal_signal_pending(current)) return -ERESTARTNOHAND; -@@ -477,7 +515,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, +@@ -477,7 +518,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, ret = -EFAULT; str = get_user_arg_ptr(argv, argc); @@ -50335,7 +50390,7 @@ index 312e297..6367442 100644 goto out; len = strnlen_user(str, MAX_ARG_STRLEN); -@@ -559,7 +597,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, +@@ -559,7 +600,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, int r; mm_segment_t oldfs = get_fs(); struct user_arg_ptr argv = { @@ -50344,7 +50399,7 @@ index 312e297..6367442 100644 }; set_fs(KERNEL_DS); -@@ -594,7 +632,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -594,7 +635,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) unsigned long new_end = old_end - shift; struct mmu_gather tlb; @@ -50354,7 +50409,7 @@ index 312e297..6367442 100644 /* * ensure there are no vmas between where we want to go -@@ -603,6 +642,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -603,6 +645,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) if (vma != find_vma(mm, new_start)) return -EFAULT; @@ -50365,7 +50420,7 @@ index 312e297..6367442 100644 /* * cover the whole range: [new_start, old_end) */ -@@ -683,10 +726,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -683,10 +729,6 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); @@ -50376,7 +50431,7 @@ index 312e297..6367442 100644 stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; -@@ -698,8 +737,28 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -698,8 +740,28 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->exec -= stack_shift; down_write(&mm->mmap_sem); @@ -50405,7 +50460,7 @@ index 312e297..6367442 100644 /* * Adjust stack execute permissions; explicitly enable for * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone -@@ -718,13 +777,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -718,13 +780,6 @@ int setup_arg_pages(struct linux_binprm *bprm, goto out_unlock; BUG_ON(prev != vma); @@ -50419,7 +50474,7 @@ index 312e297..6367442 100644 /* mprotect_fixup is overkill to remove the temporary stack flags */ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; -@@ -748,6 +800,27 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -748,6 +803,27 @@ int setup_arg_pages(struct linux_binprm *bprm, #endif current->mm->start_stack = bprm->p; ret = expand_stack(vma, stack_base); @@ -50447,7 +50502,7 @@ index 312e297..6367442 100644 if (ret) ret = -EFAULT; -@@ -782,6 +855,8 @@ struct file *open_exec(const char *name) +@@ -782,6 +858,8 @@ struct file *open_exec(const char *name) fsnotify_open(file); @@ -50456,7 +50511,7 @@ index 312e297..6367442 100644 err = deny_write_access(file); if (err) goto exit; -@@ -805,7 +880,7 @@ int kernel_read(struct file *file, loff_t offset, +@@ -805,7 +883,7 @@ int kernel_read(struct file *file, loff_t offset, old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ @@ -50465,7 +50520,7 @@ index 312e297..6367442 100644 set_fs(old_fs); return result; } -@@ -1070,6 +1145,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) +@@ -1070,6 +1148,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) perf_event_comm(tsk); } @@ -50487,7 +50542,7 @@ index 312e297..6367442 100644 int flush_old_exec(struct linux_binprm * bprm) { int retval; -@@ -1084,6 +1174,7 @@ int flush_old_exec(struct linux_binprm * bprm) +@@ -1084,6 +1177,7 @@ int flush_old_exec(struct linux_binprm * bprm) set_mm_exe_file(bprm->mm, bprm->file); @@ -50495,7 +50550,7 @@ index 312e297..6367442 100644 /* * Release all of the old mmap stuff */ -@@ -1116,10 +1207,6 @@ EXPORT_SYMBOL(would_dump); +@@ -1116,10 +1210,6 @@ EXPORT_SYMBOL(would_dump); void setup_new_exec(struct linux_binprm * bprm) { @@ -50506,7 +50561,7 @@ index 312e297..6367442 100644 arch_pick_mmap_layout(current->mm); /* This is the point of no return */ -@@ -1130,18 +1217,7 @@ void setup_new_exec(struct linux_binprm * bprm) +@@ -1130,18 +1220,7 @@ void setup_new_exec(struct linux_binprm * bprm) else set_dumpable(current->mm, suid_dumpable); @@ -50526,7 +50581,7 @@ index 312e297..6367442 100644 /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on -@@ -1159,13 +1235,6 @@ void setup_new_exec(struct linux_binprm * bprm) +@@ -1159,13 +1238,6 @@ void setup_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } @@ -50540,7 +50595,7 @@ index 312e297..6367442 100644 /* An exec changes our domain. We are no longer part of the thread group */ -@@ -1229,6 +1298,15 @@ void install_exec_creds(struct linux_binprm *bprm) +@@ -1229,6 +1301,15 @@ void install_exec_creds(struct linux_binprm *bprm) commit_creds(bprm->cred); bprm->cred = NULL; @@ -50556,7 +50611,7 @@ index 312e297..6367442 100644 /* * cred_guard_mutex must be held at least to this point to prevent * ptrace_attach() from altering our determination of the task's -@@ -1266,7 +1344,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) +@@ -1266,7 +1347,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) } rcu_read_unlock(); @@ -50565,7 +50620,7 @@ index 312e297..6367442 100644 bprm->unsafe |= LSM_UNSAFE_SHARE; } else { res = -EAGAIN; -@@ -1461,6 +1539,31 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) +@@ -1461,6 +1542,31 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) EXPORT_SYMBOL(search_binary_handler); @@ -50597,7 +50652,7 @@ index 312e297..6367442 100644 /* * sys_execve() executes a new program. */ -@@ -1469,6 +1572,11 @@ static int do_execve_common(const char *filename, +@@ -1469,6 +1575,11 @@ static int do_execve_common(const char *filename, struct user_arg_ptr envp, struct pt_regs *regs) { @@ -50609,7 +50664,7 @@ index 312e297..6367442 100644 struct linux_binprm *bprm; struct file *file; struct files_struct *displaced; -@@ -1476,6 +1584,8 @@ static int do_execve_common(const char *filename, +@@ -1476,6 +1587,8 @@ static int do_execve_common(const char *filename, int retval; const struct cred *cred = current_cred(); @@ -50618,7 +50673,7 @@ index 312e297..6367442 100644 /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs -@@ -1516,12 +1626,27 @@ static int do_execve_common(const char *filename, +@@ -1516,12 +1629,22 @@ static int do_execve_common(const char *filename, if (IS_ERR(file)) goto out_unmark; @@ -50633,11 +50688,6 @@ index 312e297..6367442 100644 bprm->filename = filename; bprm->interp = filename; -+ if (gr_process_user_ban()) { -+ retval = -EPERM; -+ goto out_file; -+ } -+ + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { + retval = -EACCES; + goto out_file; @@ -50646,7 +50696,7 @@ index 312e297..6367442 100644 retval = bprm_mm_init(bprm); if (retval) goto out_file; -@@ -1538,24 +1663,65 @@ static int do_execve_common(const char *filename, +@@ -1538,24 +1661,70 @@ static int do_execve_common(const char *filename, if (retval < 0) goto out; @@ -50666,6 +50716,11 @@ index 312e297..6367442 100644 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; +#endif + ++ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) { ++ retval = -EPERM; ++ goto out_fail; ++ } ++ + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out_fail; @@ -50716,7 +50771,7 @@ index 312e297..6367442 100644 current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); -@@ -1564,6 +1730,14 @@ static int do_execve_common(const char *filename, +@@ -1564,6 +1733,14 @@ static int do_execve_common(const char *filename, put_files_struct(displaced); return retval; @@ -50731,7 +50786,7 @@ index 312e297..6367442 100644 out: if (bprm->mm) { acct_arg_size(bprm, 0); -@@ -1637,7 +1811,7 @@ static int expand_corename(struct core_name *cn) +@@ -1637,7 +1814,7 @@ static int expand_corename(struct core_name *cn) { char *old_corename = cn->corename; @@ -50740,7 +50795,7 @@ index 312e297..6367442 100644 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); if (!cn->corename) { -@@ -1734,7 +1908,7 @@ static int format_corename(struct core_name *cn, long signr) +@@ -1734,7 +1911,7 @@ static int format_corename(struct core_name *cn, long signr) int pid_in_pattern = 0; int err = 0; @@ -50749,7 +50804,7 @@ index 312e297..6367442 100644 cn->corename = kmalloc(cn->size, GFP_KERNEL); cn->used = 0; -@@ -1831,6 +2005,280 @@ out: +@@ -1831,6 +2008,280 @@ out: return ispipe; } @@ -51030,7 +51085,7 @@ index 312e297..6367442 100644 static int zap_process(struct task_struct *start, int exit_code) { struct task_struct *t; -@@ -2004,17 +2452,17 @@ static void coredump_finish(struct mm_struct *mm) +@@ -2004,17 +2455,17 @@ static void coredump_finish(struct mm_struct *mm) void set_dumpable(struct mm_struct *mm, int value) { switch (value) { @@ -51051,7 +51106,7 @@ index 312e297..6367442 100644 set_bit(MMF_DUMP_SECURELY, &mm->flags); smp_wmb(); set_bit(MMF_DUMPABLE, &mm->flags); -@@ -2027,7 +2475,7 @@ static int __get_dumpable(unsigned long mm_flags) +@@ -2027,7 +2478,7 @@ static int __get_dumpable(unsigned long mm_flags) int ret; ret = mm_flags & MMF_DUMPABLE_MASK; @@ -51060,7 +51115,7 @@ index 312e297..6367442 100644 } int get_dumpable(struct mm_struct *mm) -@@ -2042,17 +2490,17 @@ static void wait_for_dump_helpers(struct file *file) +@@ -2042,17 +2493,17 @@ static void wait_for_dump_helpers(struct file *file) pipe = file->f_path.dentry->d_inode->i_pipe; pipe_lock(pipe); @@ -51083,7 +51138,7 @@ index 312e297..6367442 100644 pipe_unlock(pipe); } -@@ -2113,7 +2561,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2113,7 +2564,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) int retval = 0; int flag = 0; int ispipe; @@ -51093,7 +51148,7 @@ index 312e297..6367442 100644 struct coredump_params cprm = { .signr = signr, .regs = regs, -@@ -2128,6 +2577,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2128,6 +2580,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) audit_core_dumps(signr); @@ -51103,7 +51158,7 @@ index 312e297..6367442 100644 binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; -@@ -2138,14 +2590,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2138,14 +2593,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) if (!cred) goto fail; /* @@ -51124,7 +51179,7 @@ index 312e297..6367442 100644 } retval = coredump_wait(exit_code, &core_state); -@@ -2195,7 +2649,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2195,7 +2652,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) } cprm.limit = RLIM_INFINITY; @@ -51133,7 +51188,7 @@ index 312e297..6367442 100644 if (core_pipe_limit && (core_pipe_limit < dump_count)) { printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", task_tgid_vnr(current), current->comm); -@@ -2222,9 +2676,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2222,9 +2679,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) } else { struct inode *inode; @@ -51153,7 +51208,7 @@ index 312e297..6367442 100644 cprm.file = filp_open(cn.corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600); -@@ -2265,7 +2729,7 @@ close_fail: +@@ -2265,7 +2732,7 @@ close_fail: filp_close(cprm.file, NULL); fail_dropcount: if (ispipe) @@ -51162,7 +51217,7 @@ index 312e297..6367442 100644 fail_unlock: kfree(cn.corename); fail_corename: -@@ -2284,7 +2748,7 @@ fail: +@@ -2284,7 +2751,7 @@ fail: */ int dump_write(struct file *file, const void *addr, int nr) { @@ -53277,7 +53332,7 @@ index f590b11..414cf4b 100644 static int __init init_hppfs(void) { diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c -index 0aa424a..332097d8 100644 +index 0aa424a..c5563a6 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -134,6 +134,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, @@ -53327,7 +53382,7 @@ index 0aa424a..332097d8 100644 } - if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) { ++ if (check_heap_stack_gap(vma, &addr, len, offset)) { + mm->free_area_cache = addr + len; return addr; + } @@ -57647,10 +57702,10 @@ index 8a89949..6776861 100644 xfs_init_zones(void) diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig new file mode 100644 -index 0000000..15aaf25 +index 0000000..dc33dcd --- /dev/null +++ b/grsecurity/Kconfig -@@ -0,0 +1,1053 @@ +@@ -0,0 +1,1054 @@ +# +# grecurity configuration +# @@ -57807,8 +57862,9 @@ index 0000000..15aaf25 + fork until the administrator is able to assess the situation and + restart the daemon. + In the suid/sgid case, the attempt is logged, the user has all their -+ processes terminated, and they are prevented from executing any further -+ processes for 15 minutes. ++ existing instances of the suid/sgid binary terminated and will ++ be unable to execute any suid/sgid binaries for 15 minutes. ++ + It is recommended that you also enable signal logging in the auditing + section so that logs are generated when a process triggers a suspicious + signal. @@ -58750,7 +58806,7 @@ index 0000000..1b9afa9 +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 -index 0000000..59ef60a +index 0000000..c8f4c9f --- /dev/null +++ b/grsecurity/gracl.c @@ -0,0 +1,4218 @@ @@ -61083,7 +61139,7 @@ index 0000000..59ef60a + return; +} + -+extern int __gr_process_user_ban(struct user_struct *user); ++extern int gr_process_kernel_setuid_ban(struct user_struct *user); + +int +gr_check_user_change(int real, int effective, int fs) @@ -61096,7 +61152,7 @@ index 0000000..59ef60a + int effectiveok = 0; + int fsok = 0; + -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) + struct user_struct *user; + + if (real == -1) @@ -61106,7 +61162,7 @@ index 0000000..59ef60a + if (user == NULL) + goto skipit; + -+ if (__gr_process_user_ban(user)) { ++ if (gr_process_kernel_setuid_ban(user)) { + /* for find_user */ + free_uid(user); + return 1; @@ -64309,7 +64365,7 @@ index 0000000..39645c9 +} diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c new file mode 100644 -index 0000000..42b6af4 +index 0000000..1117026 --- /dev/null +++ b/grsecurity/gracl_segv.c @@ -0,0 +1,301 @@ @@ -64547,7 +64603,7 @@ index 0000000..42b6af4 + if (likely(tsk != task)) { + // if this thread has the same subject as the one that triggered + // RES_CRASH and it's the same binary, kill it -+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file) ++ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } + } while_each_thread(tsk2, tsk); @@ -66591,10 +66647,10 @@ index 0000000..f7f29aa +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 -index 0000000..1080a03 +index 0000000..3752208 --- /dev/null +++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,220 @@ +@@ -0,0 +1,244 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/delay.h> @@ -66696,7 +66752,7 @@ index 0000000..1080a03 + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) { ++ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) { + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; + daemon = 1; @@ -66713,14 +66769,15 @@ index 0000000..1080a03 + user = find_user(uid); + if (user == NULL) + goto unlock; -+ user->banned = 1; -+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME; -+ if (user->ban_expires == ~0UL) -+ user->ban_expires--; ++ user->suid_banned = 1; ++ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME; ++ if (user->suid_ban_expires == ~0UL) ++ user->suid_ban_expires--; + ++ /* only kill other threads of the same binary, from the same user */ + do_each_thread(tsk2, tsk) { + cred2 = __task_cred(tsk); -+ if (tsk != p && cred2->uid == uid) ++ if (tsk != p && cred2->uid == uid && gr_is_same_file(tsk->exec_file, p->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + } @@ -66731,7 +66788,7 @@ index 0000000..1080a03 + rcu_read_unlock(); + + if (uid) -+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, uid, GR_USER_BAN_TIME / 60); + else if (daemon) + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); + @@ -66777,11 +66834,10 @@ index 0000000..1080a03 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid); + /* we intentionally leak this ref */ + user = get_uid(current->cred->user); -+ if (user) { -+ user->banned = 1; -+ user->ban_expires = ~0UL; -+ } ++ if (user) ++ user->kernel_banned = 1; + ++ /* kill all processes of this user */ + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + cred = __task_cred(tsk); @@ -66793,25 +66849,49 @@ index 0000000..1080a03 +#endif +} + -+int __gr_process_user_ban(struct user_struct *user) ++#ifdef CONFIG_GRKERNSEC_BRUTE ++static bool suid_ban_expired(struct user_struct *user) +{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ if (unlikely(user->banned)) { -+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) { -+ user->banned = 0; -+ user->ban_expires = 0; -+ free_uid(user); -+ } else -+ return -EPERM; ++ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) { ++ user->suid_banned = 0; ++ user->suid_ban_expires = 0; ++ free_uid(user); ++ return true; + } ++ ++ return false; ++} ++#endif ++ ++int gr_process_kernel_exec_ban(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(current->cred->user->kernel_banned)) ++ return -EPERM; +#endif + return 0; +} + -+int gr_process_user_ban(void) ++int gr_process_kernel_setuid_ban(struct user_struct *user) +{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ return __gr_process_user_ban(current->cred->user); ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(user->kernel_banned)) ++ gr_fake_force_sig(SIGKILL, current); ++#endif ++ return 0; ++} ++ ++int gr_process_suid_exec_ban(const struct linux_binprm *bprm) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct user_struct *user = current->cred->user; ++ if (unlikely(user->suid_banned)) { ++ if (suid_ban_expired(user)) ++ return 0; ++ /* disallow execution of suid binaries only */ ++ else if (bprm->cred->euid != current->cred->uid) ++ return -EPERM; ++ } +#endif + return 0; +} @@ -69960,10 +70040,10 @@ index 0000000..be66033 +#endif diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h new file mode 100644 -index 0000000..c9292f7 +index 0000000..99019db --- /dev/null +++ b/include/linux/grinternal.h -@@ -0,0 +1,223 @@ +@@ -0,0 +1,235 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H + @@ -70085,6 +70165,18 @@ index 0000000..c9292f7 + (pcred)->uid, (pcred)->euid, \ + (pcred)->gid, (pcred)->egid + ++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2) ++{ ++ if (file1 && file2) { ++ const struct inode *inode1 = file1->f_path.dentry->d_inode; ++ const struct inode *inode2 = file2->f_path.dentry->d_inode; ++ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev) ++ return true; ++ } ++ ++ return false; ++} ++ +#define GR_CHROOT_CAPS {{ \ + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ @@ -70189,10 +70281,10 @@ index 0000000..c9292f7 +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 -index 0000000..2bd4c8d +index 0000000..2f159b5 --- /dev/null +++ b/include/linux/grmsg.h -@@ -0,0 +1,111 @@ +@@ -0,0 +1,112 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " @@ -70304,12 +70396,13 @@ index 0000000..2bd4c8d +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " ++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 -index 0000000..14100e6 +index 0000000..4af9529 --- /dev/null +++ b/include/linux/grsecurity.h -@@ -0,0 +1,221 @@ +@@ -0,0 +1,220 @@ +#ifndef GR_SECURITY_H +#define GR_SECURITY_H +#include <linux/fs.h> @@ -70334,7 +70427,6 @@ index 0000000..14100e6 +void gr_handle_brute_attach(unsigned long mm_flags); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); -+int gr_process_user_ban(void); + +char gr_roletype_to_char(void); + @@ -72251,7 +72343,7 @@ index 2148b12..519b820 100644 static inline void anon_vma_merge(struct vm_area_struct *vma, diff --git a/include/linux/sched.h b/include/linux/sched.h -index 8204898..7e2119f 100644 +index 8204898..b268075 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,6 +101,7 @@ struct bio_list; @@ -72290,7 +72382,7 @@ index 8204898..7e2119f 100644 +} +#endif + -+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset); ++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset); +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset); extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long @@ -72325,19 +72417,22 @@ index 8204898..7e2119f 100644 #ifdef CONFIG_AUDIT unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; -@@ -711,6 +741,11 @@ struct user_struct { +@@ -711,6 +741,14 @@ struct user_struct { struct key *session_keyring; /* UID's default session keyring */ #endif -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ unsigned int banned; -+ unsigned long ban_expires; ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ unsigned char kernel_banned; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ unsigned char suid_banned; ++ unsigned long suid_ban_expires; +#endif + /* Hash table maintenance information */ struct hlist_node uidhash_node; uid_t uid; -@@ -1125,7 +1160,7 @@ struct sched_class { +@@ -1125,7 +1163,7 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p, int on_rq); #endif @@ -72346,7 +72441,7 @@ index 8204898..7e2119f 100644 struct load_weight { unsigned long weight, inv_weight; -@@ -1341,8 +1376,8 @@ struct task_struct { +@@ -1341,8 +1379,8 @@ struct task_struct { struct list_head thread_group; struct completion *vfork_done; /* for vfork() */ @@ -72357,7 +72452,7 @@ index 8204898..7e2119f 100644 cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; -@@ -1358,13 +1393,6 @@ struct task_struct { +@@ -1358,13 +1396,6 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -72371,7 +72466,7 @@ index 8204898..7e2119f 100644 char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) -@@ -1381,8 +1409,16 @@ struct task_struct { +@@ -1381,8 +1412,16 @@ struct task_struct { #endif /* CPU-specific state of this task */ struct thread_struct thread; @@ -72388,7 +72483,7 @@ index 8204898..7e2119f 100644 /* open file information */ struct files_struct *files; /* namespaces */ -@@ -1429,6 +1465,11 @@ struct task_struct { +@@ -1429,6 +1468,11 @@ struct task_struct { struct rt_mutex_waiter *pi_blocked_on; #endif @@ -72400,7 +72495,7 @@ index 8204898..7e2119f 100644 #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ struct mutex_waiter *blocked_on; -@@ -1544,6 +1585,28 @@ struct task_struct { +@@ -1544,6 +1588,28 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; @@ -72429,7 +72524,7 @@ index 8204898..7e2119f 100644 #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; -@@ -1578,6 +1641,50 @@ struct task_struct { +@@ -1578,6 +1644,50 @@ struct task_struct { #endif }; @@ -72480,7 +72575,7 @@ index 8204898..7e2119f 100644 /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -@@ -2093,7 +2200,9 @@ void yield(void); +@@ -2093,7 +2203,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -72490,7 +72585,7 @@ index 8204898..7e2119f 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2126,6 +2235,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2126,6 +2238,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -72498,7 +72593,7 @@ index 8204898..7e2119f 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2247,6 +2357,12 @@ static inline void mmdrop(struct mm_struct * mm) +@@ -2247,6 +2360,12 @@ static inline void mmdrop(struct mm_struct * mm) extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); @@ -72511,7 +72606,7 @@ index 8204898..7e2119f 100644 /* Remove the current tasks stale references to the old mm_struct */ extern void mm_release(struct task_struct *, struct mm_struct *); /* Allocate a new mm structure and copy contents from tsk->mm */ -@@ -2263,7 +2379,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2263,7 +2382,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -72520,7 +72615,7 @@ index 8204898..7e2119f 100644 extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2428,9 +2544,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2428,9 +2547,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -83013,7 +83108,7 @@ index 4f4f53b..02d443a 100644 capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); diff --git a/mm/mmap.c b/mm/mmap.c -index dff37a6..d2e3afd 100644 +index dff37a6..0e57094 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -30,6 +30,7 @@ @@ -83447,7 +83542,7 @@ index dff37a6..d2e3afd 100644 kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) -@@ -1348,6 +1522,62 @@ unacct_error: +@@ -1348,6 +1522,73 @@ unacct_error: return error; } @@ -83455,18 +83550,18 @@ index dff37a6..d2e3afd 100644 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags) +{ + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK)) -+ return (random32() & 0xFF) << PAGE_SHIFT; ++ return ((random32() & 0xFF) + 1) << PAGE_SHIFT; + + return 0; +} +#endif + -+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset) ++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset) +{ + if (!vma) { +#ifdef CONFIG_STACK_GROWSUP -+ if (addr > sysctl_heap_stack_gap) -+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ if (*addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, *addr - sysctl_heap_stack_gap); + else + vma = find_vma(current->mm, 0); + if (vma && (vma->vm_flags & VM_GROWSUP)) @@ -83475,17 +83570,28 @@ index dff37a6..d2e3afd 100644 + return true; + } + -+ if (addr + len > vma->vm_start) ++ if (*addr + len > vma->vm_start) + return false; + -+ if (vma->vm_flags & VM_GROWSDOWN) -+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++ if (offset) { ++ if (vma->vm_prev && *addr == vma->vm_prev->vm_end && (vma->vm_start - len - vma->vm_prev->vm_end >= offset)) { ++ *addr = vma->vm_prev->vm_end + offset; ++ return true; ++ } ++ return offset <= vma->vm_start - *addr - len; ++ } else if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - *addr - len; +#ifdef CONFIG_STACK_GROWSUP -+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) -+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) { ++ if (*addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap) ++ return true; ++ if (vma->vm_start - len - vma->vm_prev->vm_end >= sysctl_heap_stack_gap) { ++ *addr = vma->vm_start - len; ++ return true; ++ } ++ return false; ++ } +#endif -+ else if (offset) -+ return offset <= vma->vm_start - addr - len; + + return true; +} @@ -83510,7 +83616,7 @@ index dff37a6..d2e3afd 100644 /* Get an address range which is currently unmapped. * For shmat() with addr=0. * -@@ -1367,6 +1597,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, +@@ -1367,6 +1608,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; @@ -83518,7 +83624,7 @@ index dff37a6..d2e3afd 100644 if (len > TASK_SIZE) return -ENOMEM; -@@ -1374,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, +@@ -1374,18 +1616,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr; @@ -83534,7 +83640,7 @@ index dff37a6..d2e3afd 100644 - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } } @@ -83549,7 +83655,7 @@ index dff37a6..d2e3afd 100644 } full_search: -@@ -1396,34 +1632,40 @@ full_search: +@@ -1396,34 +1643,40 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -83570,7 +83676,7 @@ index dff37a6..d2e3afd 100644 - mm->free_area_cache = addr + len; - return addr; - } -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + break; if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; @@ -83601,7 +83707,7 @@ index dff37a6..d2e3afd 100644 mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } -@@ -1441,7 +1683,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1441,7 +1694,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -83611,7 +83717,7 @@ index dff37a6..d2e3afd 100644 /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -1450,13 +1693,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1450,13 +1704,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (flags & MAP_FIXED) return addr; @@ -83628,31 +83734,37 @@ index dff37a6..d2e3afd 100644 - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) + return addr; + } } /* check if free_area_cache is useful for us */ -@@ -1471,7 +1719,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1470,10 +1729,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ if (addr > len) { - vma = find_vma(mm, addr-len); +- vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) ++ addr -= len; ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); +- return (mm->free_area_cache = addr-len); ++ return (mm->free_area_cache = addr); } -@@ -1488,7 +1736,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + if (mm->mmap_base < len) +@@ -1488,7 +1748,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len, offset)) ++ if (check_heap_stack_gap(vma, &addr, len, offset)) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); -@@ -1497,8 +1745,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, +@@ -1497,8 +1757,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ @@ -83663,7 +83775,7 @@ index dff37a6..d2e3afd 100644 bottomup: /* -@@ -1507,13 +1755,21 @@ bottomup: +@@ -1507,13 +1767,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -83687,7 +83799,7 @@ index dff37a6..d2e3afd 100644 mm->cached_hole_size = ~0UL; return addr; -@@ -1522,6 +1778,12 @@ bottomup: +@@ -1522,6 +1790,12 @@ bottomup: void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { @@ -83700,7 +83812,7 @@ index dff37a6..d2e3afd 100644 /* * Is this a new hole at the highest possible address? */ -@@ -1529,8 +1791,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) +@@ -1529,8 +1803,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) mm->free_area_cache = addr; /* dont allow allocations above current base */ @@ -83712,7 +83824,7 @@ index dff37a6..d2e3afd 100644 } unsigned long -@@ -1603,40 +1867,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +@@ -1603,40 +1879,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) EXPORT_SYMBOL(find_vma); @@ -83788,7 +83900,7 @@ index dff37a6..d2e3afd 100644 /* * Verify that the stack growth is acceptable and -@@ -1654,6 +1928,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1654,6 +1940,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns return -ENOMEM; /* Stack limit test */ @@ -83796,7 +83908,7 @@ index dff37a6..d2e3afd 100644 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; -@@ -1664,6 +1939,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1664,6 +1951,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns locked = mm->locked_vm + grow; limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); limit >>= PAGE_SHIFT; @@ -83804,7 +83916,7 @@ index dff37a6..d2e3afd 100644 if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } -@@ -1682,7 +1958,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1682,7 +1970,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns return -ENOMEM; /* Ok, everything looks good - let it rip */ @@ -83812,7 +83924,7 @@ index dff37a6..d2e3afd 100644 if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); -@@ -1694,37 +1969,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1694,37 +1981,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ @@ -83870,7 +83982,7 @@ index dff37a6..d2e3afd 100644 unsigned long size, grow; size = address - vma->vm_start; -@@ -1739,6 +2025,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) +@@ -1739,6 +2037,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } } @@ -83879,7 +83991,7 @@ index dff37a6..d2e3afd 100644 vma_unlock_anon_vma(vma); khugepaged_enter_vma_merge(vma); return error; -@@ -1752,6 +2040,8 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1752,6 +2052,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -83888,7 +84000,7 @@ index dff37a6..d2e3afd 100644 /* * We must make sure the anon_vma is allocated -@@ -1765,6 +2055,15 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1765,6 +2067,15 @@ int expand_downwards(struct vm_area_struct *vma, if (error) return error; @@ -83904,7 +84016,7 @@ index dff37a6..d2e3afd 100644 vma_lock_anon_vma(vma); /* -@@ -1774,9 +2073,17 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1774,9 +2085,17 @@ int expand_downwards(struct vm_area_struct *vma, */ /* Somebody else might have raced and expanded it already */ @@ -83923,7 +84035,7 @@ index dff37a6..d2e3afd 100644 size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; -@@ -1786,18 +2093,48 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1786,18 +2105,48 @@ int expand_downwards(struct vm_area_struct *vma, if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; @@ -83972,7 +84084,7 @@ index dff37a6..d2e3afd 100644 return expand_upwards(vma, address); } -@@ -1820,6 +2157,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) +@@ -1820,6 +2169,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { @@ -83987,7 +84099,7 @@ index dff37a6..d2e3afd 100644 return expand_downwards(vma, address); } -@@ -1860,7 +2205,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) +@@ -1860,7 +2217,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) do { long nrpages = vma_pages(vma); @@ -84002,7 +84114,7 @@ index dff37a6..d2e3afd 100644 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); -@@ -1905,6 +2256,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -1905,6 +2268,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { @@ -84019,7 +84131,7 @@ index dff37a6..d2e3afd 100644 rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; -@@ -1933,14 +2294,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1933,14 +2306,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct *new; int err = -ENOMEM; @@ -84053,7 +84165,7 @@ index dff37a6..d2e3afd 100644 /* most fields are the same, copy all, and then fixup */ *new = *vma; -@@ -1953,6 +2333,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1953,6 +2345,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } @@ -84076,7 +84188,7 @@ index dff37a6..d2e3afd 100644 pol = mpol_dup(vma_policy(vma)); if (IS_ERR(pol)) { err = PTR_ERR(pol); -@@ -1978,6 +2374,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1978,6 +2386,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -84119,7 +84231,7 @@ index dff37a6..d2e3afd 100644 /* Success. */ if (!err) return 0; -@@ -1990,10 +2422,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1990,10 +2434,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, removed_exe_file_vma(mm); fput(new->vm_file); } @@ -84139,7 +84251,7 @@ index dff37a6..d2e3afd 100644 kmem_cache_free(vm_area_cachep, new); out_err: return err; -@@ -2006,6 +2446,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -2006,6 +2458,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { @@ -84155,7 +84267,7 @@ index dff37a6..d2e3afd 100644 if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; -@@ -2017,11 +2466,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2017,11 +2478,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ @@ -84186,7 +84298,7 @@ index dff37a6..d2e3afd 100644 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; -@@ -2096,6 +2564,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +@@ -2096,6 +2576,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) /* Fix up all other VM information */ remove_vma_list(mm, vma); @@ -84195,7 +84307,7 @@ index dff37a6..d2e3afd 100644 return 0; } -@@ -2108,22 +2578,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +@@ -2108,22 +2590,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) profile_munmap(addr); @@ -84224,7 +84336,7 @@ index dff37a6..d2e3afd 100644 /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some -@@ -2137,6 +2603,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2137,6 +2615,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; @@ -84232,7 +84344,7 @@ index dff37a6..d2e3afd 100644 len = PAGE_ALIGN(len); if (!len) -@@ -2148,16 +2615,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2148,16 +2627,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -84264,7 +84376,7 @@ index dff37a6..d2e3afd 100644 locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; -@@ -2174,22 +2655,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2174,22 +2667,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* * Clear old maps. this also does some error checking for us */ @@ -84291,7 +84403,7 @@ index dff37a6..d2e3afd 100644 return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -@@ -2203,7 +2684,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2203,7 +2696,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { @@ -84300,7 +84412,7 @@ index dff37a6..d2e3afd 100644 return -ENOMEM; } -@@ -2217,11 +2698,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2217,11 +2710,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); @@ -84315,7 +84427,7 @@ index dff37a6..d2e3afd 100644 return addr; } -@@ -2268,8 +2750,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2268,8 +2762,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ @@ -84327,7 +84439,7 @@ index dff37a6..d2e3afd 100644 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -@@ -2283,6 +2767,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +@@ -2283,6 +2779,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -84341,7 +84453,7 @@ index dff37a6..d2e3afd 100644 /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index -@@ -2305,7 +2796,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +@@ -2305,7 +2808,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -84364,7 +84476,7 @@ index dff37a6..d2e3afd 100644 return 0; } -@@ -2323,6 +2829,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2323,6 +2841,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, struct rb_node **rb_link, *rb_parent; struct mempolicy *pol; @@ -84373,7 +84485,7 @@ index dff37a6..d2e3afd 100644 /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. -@@ -2373,6 +2881,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2373,6 +2893,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return NULL; } @@ -84413,7 +84525,7 @@ index dff37a6..d2e3afd 100644 /* * Return true if the calling process may expand its vm space by the passed * number of pages -@@ -2384,6 +2925,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) +@@ -2384,6 +2937,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; @@ -84421,7 +84533,7 @@ index dff37a6..d2e3afd 100644 if (cur + npages > lim) return 0; return 1; -@@ -2454,6 +2996,22 @@ int install_special_mapping(struct mm_struct *mm, +@@ -2454,6 +3008,22 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_start = addr; vma->vm_end = addr + len; diff --git a/3.9.8/0000_README b/3.9.9/0000_README index 1e4e620..be794cc 100644 --- a/3.9.8/0000_README +++ b/3.9.9/0000_README @@ -2,7 +2,7 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-2.9.1-3.9.8-201306302052.patch +Patch: 4420_grsecurity-2.9.1-3.9.9-201307050017.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.9.8/4420_grsecurity-2.9.1-3.9.8-201306302052.patch b/3.9.9/4420_grsecurity-2.9.1-3.9.9-201307050017.patch index 9c80933..1ae3c82 100644 --- a/3.9.8/4420_grsecurity-2.9.1-3.9.8-201306302052.patch +++ b/3.9.9/4420_grsecurity-2.9.1-3.9.9-201307050017.patch @@ -263,7 +263,7 @@ index 8ccbf27..afffeb4 100644 pcd. [PARIDE] diff --git a/Makefile b/Makefile -index b013cbe..4ca639b 100644 +index 9591325..1457ef3 100644 --- a/Makefile +++ b/Makefile @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -1475,7 +1475,7 @@ index 75fe66b..ba3dee4 100644 #endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h -index e1489c5..d418304 100644 +index 738fcba..7a43500 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,7 +116,7 @@ struct cpu_cache_fns { @@ -2102,7 +2102,7 @@ index cddda1f..ff357f7 100644 /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h -index 7e1f760..752fcb7 100644 +index 7e1f760..510061e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -18,6 +18,7 @@ @@ -2113,15 +2113,21 @@ index 7e1f760..752fcb7 100644 #define VERIFY_READ 0 #define VERIFY_WRITE 1 -@@ -60,10 +61,34 @@ extern int __put_user_bad(void); - #define USER_DS TASK_SIZE - #define get_fs() (current_thread_info()->addr_limit) +@@ -63,11 +64,35 @@ extern int __put_user_bad(void); + static inline void set_fs(mm_segment_t fs) + { + current_thread_info()->addr_limit = fs; +- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); ++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); + } + + #define segment_eq(a,b) ((a) == (b)) +static inline void pax_open_userland(void) +{ + +#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (get_fs() == USER_DS) { ++ if (segment_eq(get_fs(), USER_DS) { + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF)); + modify_domain(DOMAIN_USER, DOMAIN_UDEREF); + } @@ -2133,7 +2139,7 @@ index 7e1f760..752fcb7 100644 +{ + +#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (get_fs() == USER_DS) { ++ if (segment_eq(get_fs(), USER_DS) { + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS)); + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS); + } @@ -2141,14 +2147,9 @@ index 7e1f760..752fcb7 100644 + +} + - static inline void set_fs(mm_segment_t fs) - { - current_thread_info()->addr_limit = fs; -- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); -+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); - } - - #define segment_eq(a,b) ((a) == (b)) + #define __addr_ok(addr) ({ \ + unsigned long flag; \ + __asm__("cmp %2, %0; movlo %0, #0" \ @@ -143,8 +168,12 @@ extern int __get_user_4(void *); #define get_user(x,p) \ @@ -2295,9 +2296,18 @@ index 96ee092..37f1844 100644 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c -index 60d3b73..d27ee09 100644 +index 60d3b73..e5a0f22 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c +@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops); + + /* networking */ + EXPORT_SYMBOL(csum_partial); +-EXPORT_SYMBOL(csum_partial_copy_from_user); ++EXPORT_SYMBOL(__csum_partial_copy_from_user); + EXPORT_SYMBOL(csum_partial_copy_nocheck); + EXPORT_SYMBOL(__csum_ipv6_magic); + @@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero); #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); @@ -3453,7 +3463,7 @@ index bddce2b..3eb04e2 100644 extern void ux500_cpu_die(unsigned int cpu); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig -index 4045c49..4e26c79 100644 +index 4045c49..0263c07 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -425,7 +425,7 @@ config CPU_32v5 @@ -3461,7 +3471,7 @@ index 4045c49..4e26c79 100644 config CPU_32v6 bool - select CPU_USE_DOMAINS if CPU_V6 && MMU -+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC ++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF select TLS_REG_EMUL if !CPU_32v6K && !MMU config CPU_32v6K @@ -11545,7 +11555,7 @@ index cf1a471..3bc4cf8 100644 err |= copy_siginfo_to_user32(&frame->info, &ksig->info); diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S -index 474dc1b..be7bff5 100644 +index 474dc1b..24aaa3e 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -15,8 +15,10 @@ @@ -11583,11 +11593,11 @@ index 474dc1b..be7bff5 100644 +#endif + .endm + -+.macro pax_erase_kstack ++ .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif -+.endm ++ .endm + /* * 32bit SYSENTER instruction entry. @@ -14091,6 +14101,18 @@ index c0fa356..07a498a 100644 void unregister_nmi_handler(unsigned int, const char *); +diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h +index c878924..21f4889 100644 +--- a/arch/x86/include/asm/page.h ++++ b/arch/x86/include/asm/page.h +@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x))) + + #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) ++#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base)) + + #define __boot_va(x) __va(x) + #define __boot_pa(x) __pa(x) diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 0f1ddee..e2fc3d1 100644 --- a/arch/x86/include/asm/page_64.h @@ -18228,7 +18250,7 @@ index 9b9f18b..9fcaa04 100644 #include <asm/processor.h> #include <asm/fcntl.h> diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S -index 8f3e2de..934870f 100644 +index 8f3e2de..caecc4e 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -177,13 +177,153 @@ @@ -18326,11 +18348,11 @@ index 8f3e2de..934870f 100644 +ENDPROC(pax_exit_kernel) +#endif + -+.macro pax_erase_kstack ++ .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif -+.endm ++ .endm + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +/* @@ -18988,7 +19010,7 @@ index 8f3e2de..934870f 100644 /* diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S -index c1d01e6..7f633850 100644 +index c1d01e6..a88cf02 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -59,6 +59,8 @@ @@ -19326,11 +19348,11 @@ index c1d01e6..7f633850 100644 +#endif + .endm + -+.macro pax_erase_kstack ++ .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif -+.endm ++ .endm + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +ENTRY(pax_erase_kstack) @@ -19900,9 +19922,12 @@ index c1d01e6..7f633850 100644 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall -@@ -1498,16 +1907,31 @@ ENTRY(paranoid_exit) +@@ -1496,18 +1905,33 @@ ENTRY(paranoid_exit) + DEFAULT_FRAME + DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF_DEBUG - testl %ebx,%ebx /* swapgs needed? */ +- testl %ebx,%ebx /* swapgs needed? */ ++ testl $1,%ebx /* swapgs needed? */ jnz paranoid_restore - testl $3,CS(%rsp) + testb $3,CS(%rsp) @@ -19966,6 +19991,15 @@ index c1d01e6..7f633850 100644 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ +@@ -1606,7 +2031,7 @@ ENTRY(error_exit) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + GET_THREAD_INFO(%rcx) +- testl %eax,%eax ++ testl $1,%eax + jne retint_kernel + LOCKDEP_SYS_EXIT_IRQ + movl TI_flags(%rcx),%edx @@ -1615,7 +2040,7 @@ ENTRY(error_exit) jnz retint_careful jmp retint_swapgs @@ -20118,9 +20152,50 @@ index 42a392a..fbbd930 100644 return -EFAULT; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c -index 8f3201d..aa860bf 100644 +index 8f3201d..6898c0c 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c +@@ -67,12 +67,12 @@ again: + pgd = *pgd_p; + + /* +- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is +- * critical -- __PAGE_OFFSET would point us back into the dynamic ++ * The use of __early_va rather than __va here is critical: ++ * __va would point us back into the dynamic + * range and we might end up looping forever... + */ + if (pgd) +- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); ++ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK)); + else { + if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { + reset_early_page_tables(); +@@ -82,13 +82,13 @@ again: + pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; + for (i = 0; i < PTRS_PER_PUD; i++) + pud_p[i] = 0; +- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; ++ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE; + } + pud_p += pud_index(address); + pud = *pud_p; + + if (pud) +- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); ++ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK)); + else { + if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { + reset_early_page_tables(); +@@ -98,7 +98,7 @@ again: + pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; + for (i = 0; i < PTRS_PER_PMD; i++) + pmd_p[i] = 0; +- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; ++ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE; + } + pmd = (physaddr & PMD_MASK) + early_pmd_flags; + pmd_p[pmd_index(address)] = pmd; @@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data) if (console_loglevel == 10) early_printk("Kernel alive\n"); @@ -20562,7 +20637,7 @@ index 73afd11..d1670f5 100644 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0 + .endr diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S -index 321d65e..7830f05 100644 +index 321d65e..ad8817d 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -20,6 +20,8 @@ @@ -20587,23 +20662,34 @@ index 321d65e..7830f05 100644 .text __HEAD -@@ -89,11 +97,15 @@ startup_64: +@@ -89,11 +97,23 @@ startup_64: * Fixup the physical addresses in the page table */ addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) - addq %rbp, level3_kernel_pgt + (510*8)(%rip) - addq %rbp, level3_kernel_pgt + (511*8)(%rip) +- addq %rbp, level3_kernel_pgt + (510*8)(%rip) +- addq %rbp, level3_kernel_pgt + (511*8)(%rip) ++ addq %rbp, level3_ident_pgt + (0*8)(%rip) ++#ifndef CONFIG_XEN ++ addq %rbp, level3_ident_pgt + (1*8)(%rip) ++#endif - addq %rbp, level2_fixmap_pgt + (506*8)(%rip) +- addq %rbp, level2_fixmap_pgt + (506*8)(%rip) ++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) ++ ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) ++ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip) ++ + addq %rbp, level2_fixmap_pgt + (507*8)(%rip) /* * Set up the identity mapping for the switchover. These -@@ -177,8 +189,8 @@ ENTRY(secondary_startup_64) +@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64) movq $(init_level4_pgt - __START_KERNEL_map), %rax 1: @@ -20614,7 +20700,7 @@ index 321d65e..7830f05 100644 movq %rcx, %cr4 /* Setup early boot stage 4 level pagetables. */ -@@ -199,10 +211,18 @@ ENTRY(secondary_startup_64) +@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ @@ -20634,7 +20720,7 @@ index 321d65e..7830f05 100644 1: wrmsr /* Make changes effective */ /* Setup cr0 */ -@@ -282,6 +302,7 @@ ENTRY(secondary_startup_64) +@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64) * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, * address given in m16:64. */ @@ -20642,7 +20728,7 @@ index 321d65e..7830f05 100644 movq initial_code(%rip),%rax pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs -@@ -388,7 +409,7 @@ ENTRY(early_idt_handler) +@@ -388,7 +417,7 @@ ENTRY(early_idt_handler) call dump_stack #ifdef CONFIG_KALLSYMS leaq early_idt_ripmsg(%rip),%rdi @@ -20651,7 +20737,7 @@ index 321d65e..7830f05 100644 call __print_symbol #endif #endif /* EARLY_PRINTK */ -@@ -416,6 +437,7 @@ ENDPROC(early_idt_handler) +@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler) early_recursion_flag: .long 0 @@ -20659,9 +20745,12 @@ index 321d65e..7830f05 100644 #ifdef CONFIG_EARLY_PRINTK early_idt_msg: .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" -@@ -445,27 +467,50 @@ NEXT_PAGE(early_dynamic_pgts) +@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt) + NEXT_PAGE(early_dynamic_pgts) + .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 - .data +- .data ++ .section .rodata,"a",@progbits -#ifndef CONFIG_XEN NEXT_PAGE(init_level4_pgt) @@ -20718,7 +20807,7 @@ index 321d65e..7830f05 100644 NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 -@@ -473,6 +518,9 @@ NEXT_PAGE(level3_kernel_pgt) +@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt) .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE @@ -20728,7 +20817,7 @@ index 321d65e..7830f05 100644 NEXT_PAGE(level2_kernel_pgt) /* * 512 MB kernel mapping. We spend a full page on this pagetable -@@ -488,39 +536,64 @@ NEXT_PAGE(level2_kernel_pgt) +@@ -488,39 +544,64 @@ NEXT_PAGE(level2_kernel_pgt) KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) @@ -28081,7 +28170,7 @@ index ae1aa71..d9bea75 100644 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index 0c13708..689fe7f 100644 +index 0c13708..ca05f23 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -4,6 +4,7 @@ @@ -28101,7 +28190,23 @@ index 0c13708..689fe7f 100644 #include "mm_internal.h" -@@ -464,10 +467,40 @@ void __init init_mem_mapping(void) +@@ -448,7 +451,15 @@ void __init init_mem_mapping(void) + early_ioremap_page_table_range_init(); + #endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ load_cr3(get_cpu_pgd(0)); ++#else + load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + + early_memtest(0, max_pfn_mapped << PAGE_SHIFT); +@@ -464,10 +475,40 @@ void __init init_mem_mapping(void) * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ @@ -28143,7 +28248,7 @@ index 0c13708..689fe7f 100644 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) -@@ -524,8 +557,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) +@@ -524,8 +565,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) #endif } @@ -28262,7 +28367,7 @@ index 0c13708..689fe7f 100644 (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 2d19001..6a1046c 100644 +index 2d19001..e549d98 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void); @@ -28476,20 +28581,7 @@ index 2d19001..6a1046c 100644 EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ -@@ -752,6 +754,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - #ifdef CONFIG_FLATMEM - BUG_ON(!mem_map); - #endif -@@ -780,7 +788,7 @@ void __init mem_init(void) +@@ -780,7 +782,7 @@ void __init mem_init(void) after_bootmem = 1; codesize = (unsigned long) &_etext - (unsigned long) &_text; @@ -28498,7 +28590,7 @@ index 2d19001..6a1046c 100644 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " -@@ -821,10 +829,10 @@ void __init mem_init(void) +@@ -821,10 +823,10 @@ void __init mem_init(void) ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, @@ -28512,7 +28604,7 @@ index 2d19001..6a1046c 100644 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* -@@ -914,6 +922,7 @@ void set_kernel_text_rw(void) +@@ -914,6 +916,7 @@ void set_kernel_text_rw(void) if (!kernel_set_to_readonly) return; @@ -28520,7 +28612,7 @@ index 2d19001..6a1046c 100644 pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); -@@ -928,6 +937,7 @@ void set_kernel_text_ro(void) +@@ -928,6 +931,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; @@ -28528,7 +28620,7 @@ index 2d19001..6a1046c 100644 pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); -@@ -956,6 +966,7 @@ void mark_rodata_ro(void) +@@ -956,6 +960,7 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; @@ -28537,7 +28629,7 @@ index 2d19001..6a1046c 100644 printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 474e28f..647dd12 100644 +index 474e28f..f016b6e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on); @@ -28654,20 +28746,7 @@ index 474e28f..647dd12 100644 spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } -@@ -1065,6 +1079,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - /* clear_bss() already clear the empty_zero_page */ - - reservedpages = 0; -@@ -1224,8 +1244,8 @@ int kern_addr_valid(unsigned long addr) +@@ -1224,8 +1238,8 @@ int kern_addr_valid(unsigned long addr) static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), @@ -28678,7 +28757,7 @@ index 474e28f..647dd12 100644 }; struct vm_area_struct *get_gate_vma(struct mm_struct *mm) -@@ -1259,7 +1279,7 @@ int in_gate_area_no_mm(unsigned long addr) +@@ -1259,7 +1273,7 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { @@ -30390,7 +30469,7 @@ index c77b24a..c979855 100644 } EXPORT_SYMBOL(pcibios_set_irq_routing); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c -index 40e4469..1ab536e 100644 +index 40e4469..0592924 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void) @@ -30416,7 +30495,7 @@ index 40e4469..1ab536e 100644 gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); -@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void) +@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void) { struct desc_ptr gdt_descr; @@ -30431,6 +30510,44 @@ index 40e4469..1ab536e 100644 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id())); ++#else + load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + + local_irq_restore(efi_rt_eflags); +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 2b20038..eaf558f 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -75,6 +75,11 @@ void __init efi_call_phys_prelog(void) + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); + } ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + } + +@@ -88,6 +93,11 @@ void __init efi_call_phys_epilog(void) + for (pgd = 0; pgd < n_pgds; pgd++) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id())); ++#endif ++ + __flush_tlb_all(); + local_irq_restore(efi_flags); + early_code_mapping_set_exec(0); diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index fbe66e6..eae5e38 100644 --- a/arch/x86/platform/efi/efi_stub_32.S @@ -31988,7 +32105,7 @@ index 34c8216..f56c828 100644 unsigned long timeout_msec) { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index cf15aee..e0b7078 100644 +index 8038ee3..a19a6e6 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) @@ -37624,10 +37741,10 @@ index b972d43..8943713 100644 /** diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c -index 7c11ff3..a2a0457 100644 +index dcfea4e..f4226b2 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c -@@ -348,7 +348,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) +@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) void panic_if_irq_remap(const char *msg) { if (irq_remapping_enabled) @@ -37636,7 +37753,7 @@ index 7c11ff3..a2a0457 100644 } static void ir_ack_apic_edge(struct irq_data *data) -@@ -369,10 +369,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p) +@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p) void irq_remap_modify_chip_defaults(struct irq_chip *chip) { @@ -40388,62 +40505,6 @@ index a4fe5f1..6c9e77f 100644 .kind = "vxlan", .maxtype = IFLA_VXLAN_MAX, .policy = vxlan_policy, -diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c -index 147614e..6a8a382 100644 ---- a/drivers/net/wan/dlci.c -+++ b/drivers/net/wan/dlci.c -@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci) - struct frad_local *flp; - struct net_device *master, *slave; - int err; -+ bool found = false; -+ -+ rtnl_lock(); - - /* validate slave device */ - master = __dev_get_by_name(&init_net, dlci->devname); -- if (!master) -- return -ENODEV; -+ if (!master) { -+ err = -ENODEV; -+ goto out; -+ } -+ -+ list_for_each_entry(dlp, &dlci_devs, list) { -+ if (dlp->master == master) { -+ found = true; -+ break; -+ } -+ } -+ if (!found) { -+ err = -ENODEV; -+ goto out; -+ } - - if (netif_running(master)) { -- return -EBUSY; -+ err = -EBUSY; -+ goto out; - } - - dlp = netdev_priv(master); - slave = dlp->slave; - flp = netdev_priv(slave); - -- rtnl_lock(); - err = (*flp->deassoc)(slave, master); - if (!err) { - list_del(&dlp->list); -@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci) - - dev_put(slave); - } -+out: - rtnl_unlock(); -- - return err; - } - diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 5ac5f7a..5f82012 100644 --- a/drivers/net/wireless/at76c50x-usb.c @@ -50560,7 +50621,7 @@ index 6a16053..2155147 100644 return rc; } diff --git a/fs/exec.c b/fs/exec.c -index 6d56ff2..f65b4ca 100644 +index 0d5c76f..3d4585e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,8 +55,20 @@ @@ -50584,7 +50645,7 @@ index 6d56ff2..f65b4ca 100644 #include <asm/mmu_context.h> #include <asm/tlb.h> -@@ -66,6 +78,18 @@ +@@ -66,17 +78,32 @@ #include <trace/events/sched.h> @@ -50603,7 +50664,12 @@ index 6d56ff2..f65b4ca 100644 int suid_dumpable = 0; static LIST_HEAD(formats); -@@ -75,8 +99,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert) + static DEFINE_RWLOCK(binfmt_lock); + ++extern int gr_process_kernel_exec_ban(void); ++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm); ++ + void __register_binfmt(struct linux_binfmt * fmt, int insert) { BUG_ON(!fmt); write_lock(&binfmt_lock); @@ -50614,7 +50680,7 @@ index 6d56ff2..f65b4ca 100644 write_unlock(&binfmt_lock); } -@@ -85,7 +109,7 @@ EXPORT_SYMBOL(__register_binfmt); +@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt); void unregister_binfmt(struct linux_binfmt * fmt) { write_lock(&binfmt_lock); @@ -50623,7 +50689,7 @@ index 6d56ff2..f65b4ca 100644 write_unlock(&binfmt_lock); } -@@ -180,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; @@ -50645,7 +50711,7 @@ index 6d56ff2..f65b4ca 100644 return NULL; if (write) { -@@ -207,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (size <= ARG_MAX) return page; @@ -50663,7 +50729,7 @@ index 6d56ff2..f65b4ca 100644 /* * Limit to 1/4-th the stack size for the argv+env strings. * This ensures that: -@@ -266,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; @@ -50675,7 +50741,7 @@ index 6d56ff2..f65b4ca 100644 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); -@@ -276,6 +308,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) mm->stack_vm = mm->total_vm = 1; up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); @@ -50688,7 +50754,7 @@ index 6d56ff2..f65b4ca 100644 return 0; err: up_write(&mm->mmap_sem); -@@ -396,7 +434,7 @@ struct user_arg_ptr { +@@ -396,7 +437,7 @@ struct user_arg_ptr { } ptr; }; @@ -50697,7 +50763,7 @@ index 6d56ff2..f65b4ca 100644 { const char __user *native; -@@ -405,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) +@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) compat_uptr_t compat; if (get_user(compat, argv.ptr.compat + nr)) @@ -50714,7 +50780,7 @@ index 6d56ff2..f65b4ca 100644 return native; } -@@ -431,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max) +@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max) if (!p) break; @@ -50723,7 +50789,7 @@ index 6d56ff2..f65b4ca 100644 return -EFAULT; if (i >= max) -@@ -466,7 +504,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, +@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, ret = -EFAULT; str = get_user_arg_ptr(argv, argc); @@ -50732,7 +50798,7 @@ index 6d56ff2..f65b4ca 100644 goto out; len = strnlen_user(str, MAX_ARG_STRLEN); -@@ -548,7 +586,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, +@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, int r; mm_segment_t oldfs = get_fs(); struct user_arg_ptr argv = { @@ -50741,7 +50807,7 @@ index 6d56ff2..f65b4ca 100644 }; set_fs(KERNEL_DS); -@@ -583,7 +621,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) unsigned long new_end = old_end - shift; struct mmu_gather tlb; @@ -50751,7 +50817,7 @@ index 6d56ff2..f65b4ca 100644 /* * ensure there are no vmas between where we want to go -@@ -592,6 +631,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) if (vma != find_vma(mm, new_start)) return -EFAULT; @@ -50762,7 +50828,7 @@ index 6d56ff2..f65b4ca 100644 /* * cover the whole range: [new_start, old_end) */ -@@ -672,10 +715,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); @@ -50773,7 +50839,7 @@ index 6d56ff2..f65b4ca 100644 stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; -@@ -687,8 +726,28 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->exec -= stack_shift; down_write(&mm->mmap_sem); @@ -50802,7 +50868,7 @@ index 6d56ff2..f65b4ca 100644 /* * Adjust stack execute permissions; explicitly enable for * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone -@@ -707,13 +766,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm, goto out_unlock; BUG_ON(prev != vma); @@ -50816,7 +50882,7 @@ index 6d56ff2..f65b4ca 100644 /* mprotect_fixup is overkill to remove the temporary stack flags */ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; -@@ -737,6 +789,27 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm, #endif current->mm->start_stack = bprm->p; ret = expand_stack(vma, stack_base); @@ -50844,7 +50910,7 @@ index 6d56ff2..f65b4ca 100644 if (ret) ret = -EFAULT; -@@ -772,6 +845,8 @@ struct file *open_exec(const char *name) +@@ -772,6 +848,8 @@ struct file *open_exec(const char *name) fsnotify_open(file); @@ -50853,7 +50919,7 @@ index 6d56ff2..f65b4ca 100644 err = deny_write_access(file); if (err) goto exit; -@@ -795,7 +870,7 @@ int kernel_read(struct file *file, loff_t offset, +@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset, old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ @@ -50862,37 +50928,7 @@ index 6d56ff2..f65b4ca 100644 set_fs(old_fs); return result; } -@@ -1136,13 +1211,6 @@ void setup_new_exec(struct linux_binprm * bprm) - set_dumpable(current->mm, suid_dumpable); - } - -- /* -- * Flush performance counters when crossing a -- * security domain: -- */ -- if (!get_dumpable(current->mm)) -- perf_event_exit_task(current); -- - /* An exec changes our domain. We are no longer part of the thread - group */ - -@@ -1206,6 +1274,15 @@ void install_exec_creds(struct linux_binprm *bprm) - - commit_creds(bprm->cred); - bprm->cred = NULL; -+ -+ /* -+ * Disable monitoring for regular users -+ * when executing setuid binaries. Must -+ * wait until new credentials are committed -+ * by commit_creds() above -+ */ -+ if (get_dumpable(current->mm) != SUID_DUMP_USER) -+ perf_event_exit_task(current); - /* - * cred_guard_mutex must be held at least to this point to prevent - * ptrace_attach() from altering our determination of the task's -@@ -1250,7 +1327,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm) +@@ -1252,7 +1330,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm) } rcu_read_unlock(); @@ -50901,7 +50937,7 @@ index 6d56ff2..f65b4ca 100644 bprm->unsafe |= LSM_UNSAFE_SHARE; } else { res = -EAGAIN; -@@ -1450,6 +1527,31 @@ int search_binary_handler(struct linux_binprm *bprm) +@@ -1452,6 +1530,31 @@ int search_binary_handler(struct linux_binprm *bprm) EXPORT_SYMBOL(search_binary_handler); @@ -50933,7 +50969,7 @@ index 6d56ff2..f65b4ca 100644 /* * sys_execve() executes a new program. */ -@@ -1457,6 +1559,11 @@ static int do_execve_common(const char *filename, +@@ -1459,6 +1562,11 @@ static int do_execve_common(const char *filename, struct user_arg_ptr argv, struct user_arg_ptr envp) { @@ -50945,7 +50981,7 @@ index 6d56ff2..f65b4ca 100644 struct linux_binprm *bprm; struct file *file; struct files_struct *displaced; -@@ -1464,6 +1571,8 @@ static int do_execve_common(const char *filename, +@@ -1466,6 +1574,8 @@ static int do_execve_common(const char *filename, int retval; const struct cred *cred = current_cred(); @@ -50954,7 +50990,7 @@ index 6d56ff2..f65b4ca 100644 /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs -@@ -1504,12 +1613,27 @@ static int do_execve_common(const char *filename, +@@ -1506,12 +1616,22 @@ static int do_execve_common(const char *filename, if (IS_ERR(file)) goto out_unmark; @@ -50969,11 +51005,6 @@ index 6d56ff2..f65b4ca 100644 bprm->filename = filename; bprm->interp = filename; -+ if (gr_process_user_ban()) { -+ retval = -EPERM; -+ goto out_file; -+ } -+ + if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) { + retval = -EACCES; + goto out_file; @@ -50982,7 +51013,7 @@ index 6d56ff2..f65b4ca 100644 retval = bprm_mm_init(bprm); if (retval) goto out_file; -@@ -1526,24 +1650,65 @@ static int do_execve_common(const char *filename, +@@ -1528,24 +1648,70 @@ static int do_execve_common(const char *filename, if (retval < 0) goto out; @@ -51002,6 +51033,11 @@ index 6d56ff2..f65b4ca 100644 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; +#endif + ++ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) { ++ retval = -EPERM; ++ goto out_fail; ++ } ++ + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out_fail; @@ -51052,7 +51088,7 @@ index 6d56ff2..f65b4ca 100644 current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); -@@ -1552,6 +1717,14 @@ static int do_execve_common(const char *filename, +@@ -1554,6 +1720,14 @@ static int do_execve_common(const char *filename, put_files_struct(displaced); return retval; @@ -51067,7 +51103,7 @@ index 6d56ff2..f65b4ca 100644 out: if (bprm->mm) { acct_arg_size(bprm, 0); -@@ -1700,3 +1873,283 @@ asmlinkage long compat_sys_execve(const char __user * filename, +@@ -1702,3 +1876,283 @@ asmlinkage long compat_sys_execve(const char __user * filename, return error; } #endif @@ -56788,67 +56824,6 @@ index 69d4889..a810bd4 100644 { if (sbi->s_bytesex == BYTESEX_PDP) return PDP_swab((__force __u32)n); -diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c -index de08c92f..732cd63 100644 ---- a/fs/ubifs/dir.c -+++ b/fs/ubifs/dir.c -@@ -364,6 +364,24 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) - */ - return 0; - -+ if (file->f_version == 0) { -+ /* -+ * The file was seek'ed, which means that @file->private_data -+ * is now invalid. This may also be just the first -+ * 'ubifs_readdir()' invocation, in which case -+ * @file->private_data is NULL, and the below code is -+ * basically a no-op. -+ */ -+ kfree(file->private_data); -+ file->private_data = NULL; -+ } -+ -+ /* -+ * 'generic_file_llseek()' unconditionally sets @file->f_version to -+ * zero, and we use this for detecting whether the file was seek'ed. -+ */ -+ file->f_version = 1; -+ - /* File positions 0 and 1 correspond to "." and ".." */ - if (file->f_pos == 0) { - ubifs_assert(!file->private_data); -@@ -438,6 +456,14 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) - file->f_pos = key_hash_flash(c, &dent->key); - file->private_data = dent; - cond_resched(); -+ -+ if (file->f_version == 0) -+ /* -+ * The file was seek'ed meanwhile, lets return and start -+ * reading direntries from the new position on the next -+ * invocation. -+ */ -+ return 0; - } - - out: -@@ -448,15 +474,13 @@ out: - - kfree(file->private_data); - file->private_data = NULL; -+ /* 2 is a special value indicating that there are no more direntries */ - file->f_pos = 2; - return 0; - } - --/* If a directory is seeked, we have to free saved readdir() state */ - static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) - { -- kfree(file->private_data); -- file->private_data = NULL; - return generic_file_llseek(file, offset, whence); - } - diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index e18b988..f1d4ad0f 100644 --- a/fs/ubifs/io.c @@ -57091,10 +57066,10 @@ index ca9ecaa..60100c7 100644 kfree(s); diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig new file mode 100644 -index 0000000..4fb1dde +index 0000000..c9c4ac3 --- /dev/null +++ b/grsecurity/Kconfig -@@ -0,0 +1,1053 @@ +@@ -0,0 +1,1054 @@ +# +# grecurity configuration +# @@ -57251,8 +57226,9 @@ index 0000000..4fb1dde + fork until the administrator is able to assess the situation and + restart the daemon. + In the suid/sgid case, the attempt is logged, the user has all their -+ processes terminated, and they are prevented from executing any further -+ processes for 15 minutes. ++ existing instances of the suid/sgid binary terminated and will ++ be unable to execute any suid/sgid binaries for 15 minutes. ++ + It is recommended that you also enable signal logging in the auditing + section so that logs are generated when a process triggers a suspicious + signal. @@ -58194,7 +58170,7 @@ index 0000000..1b9afa9 +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 -index 0000000..1248ee0 +index 0000000..0d5c602 --- /dev/null +++ b/grsecurity/gracl.c @@ -0,0 +1,4073 @@ @@ -60545,7 +60521,7 @@ index 0000000..1248ee0 + return; +} + -+extern int __gr_process_user_ban(struct user_struct *user); ++extern int gr_process_kernel_setuid_ban(struct user_struct *user); + +int +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs) @@ -60559,7 +60535,7 @@ index 0000000..1248ee0 + int fsok = 0; + uid_t globalreal, globaleffective, globalfs; + -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) + struct user_struct *user; + + if (!uid_valid(real)) @@ -60573,7 +60549,7 @@ index 0000000..1248ee0 + if (user == NULL) + goto skipit; + -+ if (__gr_process_user_ban(user)) { ++ if (gr_process_kernel_setuid_ban(user)) { + /* for find_user */ + free_uid(user); + return 1; @@ -63617,7 +63593,7 @@ index 0000000..39645c9 +} diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c new file mode 100644 -index 0000000..4dcc92a +index 0000000..3c38bfe --- /dev/null +++ b/grsecurity/gracl_segv.c @@ -0,0 +1,305 @@ @@ -63859,7 +63835,7 @@ index 0000000..4dcc92a + if (likely(tsk != task)) { + // if this thread has the same subject as the one that triggered + // RES_CRASH and it's the same binary, kill it -+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file) ++ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } + } while_each_thread(tsk2, tsk); @@ -65944,12 +65920,13 @@ index 0000000..f7f29aa +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 -index 0000000..e09715a +index 0000000..4e29cc7 --- /dev/null +++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,222 @@ +@@ -0,0 +1,246 @@ +#include <linux/kernel.h> +#include <linux/sched.h> ++#include <linux/fs.h> +#include <linux/delay.h> +#include <linux/grsecurity.h> +#include <linux/grinternal.h> @@ -66049,7 +66026,7 @@ index 0000000..e09715a + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) { ++ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) { + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; + daemon = 1; @@ -66066,14 +66043,15 @@ index 0000000..e09715a + user = find_user(uid); + if (user == NULL) + goto unlock; -+ user->banned = 1; -+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME; -+ if (user->ban_expires == ~0UL) -+ user->ban_expires--; ++ user->suid_banned = 1; ++ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME; ++ if (user->suid_ban_expires == ~0UL) ++ user->suid_ban_expires--; + ++ /* only kill other threads of the same binary, from the same user */ + do_each_thread(tsk2, tsk) { + cred2 = __task_cred(tsk); -+ if (tsk != p && uid_eq(cred2->uid, uid)) ++ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + } @@ -66084,8 +66062,7 @@ index 0000000..e09715a + rcu_read_unlock(); + + if (gr_is_global_nonroot(uid)) -+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", -+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60); ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60); + else if (daemon) + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); + @@ -66132,11 +66109,10 @@ index 0000000..e09715a + GR_GLOBAL_UID(uid)); + /* we intentionally leak this ref */ + user = get_uid(current->cred->user); -+ if (user) { -+ user->banned = 1; -+ user->ban_expires = ~0UL; -+ } ++ if (user) ++ user->kernel_banned = 1; + ++ /* kill all processes of this user */ + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + cred = __task_cred(tsk); @@ -66148,25 +66124,49 @@ index 0000000..e09715a +#endif +} + -+int __gr_process_user_ban(struct user_struct *user) ++#ifdef CONFIG_GRKERNSEC_BRUTE ++static bool suid_ban_expired(struct user_struct *user) +{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ if (unlikely(user->banned)) { -+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) { -+ user->banned = 0; -+ user->ban_expires = 0; -+ free_uid(user); -+ } else -+ return -EPERM; ++ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) { ++ user->suid_banned = 0; ++ user->suid_ban_expires = 0; ++ free_uid(user); ++ return true; + } ++ ++ return false; ++} ++#endif ++ ++int gr_process_kernel_exec_ban(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(current->cred->user->kernel_banned)) ++ return -EPERM; ++#endif ++ return 0; ++} ++ ++int gr_process_kernel_setuid_ban(struct user_struct *user) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(user->kernel_banned)) ++ gr_fake_force_sig(SIGKILL, current); +#endif + return 0; +} + -+int gr_process_user_ban(void) ++int gr_process_suid_exec_ban(const struct linux_binprm *bprm) +{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ return __gr_process_user_ban(current->cred->user); ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct user_struct *user = current->cred->user; ++ if (unlikely(user->suid_banned)) { ++ if (suid_ban_expired(user)) ++ return 0; ++ /* disallow execution of suid binaries only */ ++ else if (!uid_eq(bprm->cred->euid, current->cred->uid)) ++ return -EPERM; ++ } +#endif + return 0; +} @@ -69201,10 +69201,10 @@ index 0000000..be66033 +#endif diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h new file mode 100644 -index 0000000..5402bce +index 0000000..12994b5 --- /dev/null +++ b/include/linux/grinternal.h -@@ -0,0 +1,215 @@ +@@ -0,0 +1,227 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H + @@ -69318,6 +69318,18 @@ index 0000000..5402bce + +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry) + ++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2) ++{ ++ if (file1 && file2) { ++ const struct inode *inode1 = file1->f_path.dentry->d_inode; ++ const struct inode *inode2 = file2->f_path.dentry->d_inode; ++ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev) ++ return true; ++ } ++ ++ return false; ++} ++ +#define GR_CHROOT_CAPS {{ \ + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ @@ -69422,10 +69434,10 @@ index 0000000..5402bce +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 -index 0000000..2bd4c8d +index 0000000..2f159b5 --- /dev/null +++ b/include/linux/grmsg.h -@@ -0,0 +1,111 @@ +@@ -0,0 +1,112 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " @@ -69537,12 +69549,13 @@ index 0000000..2bd4c8d +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " ++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 -index 0000000..d7ef0ac +index 0000000..d957f6d --- /dev/null +++ b/include/linux/grsecurity.h -@@ -0,0 +1,242 @@ +@@ -0,0 +1,241 @@ +#ifndef GR_SECURITY_H +#define GR_SECURITY_H +#include <linux/fs.h> @@ -69567,7 +69580,6 @@ index 0000000..d7ef0ac +void gr_handle_brute_attach(unsigned long mm_flags); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); -+int gr_process_user_ban(void); + +char gr_roletype_to_char(void); + @@ -71444,7 +71456,7 @@ index 6dacb93..6174423 100644 static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) diff --git a/include/linux/sched.h b/include/linux/sched.h -index be4e742..7f9d593 100644 +index be4e742..01f1387 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -62,6 +62,7 @@ struct bio_list; @@ -71501,19 +71513,22 @@ index be4e742..7f9d593 100644 #ifdef CONFIG_AUDIT unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; -@@ -683,6 +707,11 @@ struct user_struct { +@@ -683,6 +707,14 @@ struct user_struct { struct key *session_keyring; /* UID's default session keyring */ #endif -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ unsigned int banned; -+ unsigned long ban_expires; ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ unsigned char kernel_banned; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ unsigned char suid_banned; ++ unsigned long suid_ban_expires; +#endif + /* Hash table maintenance information */ struct hlist_node uidhash_node; kuid_t uid; -@@ -1082,7 +1111,7 @@ struct sched_class { +@@ -1082,7 +1114,7 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p, int on_rq); #endif @@ -71522,7 +71537,7 @@ index be4e742..7f9d593 100644 struct load_weight { unsigned long weight, inv_weight; -@@ -1323,8 +1352,8 @@ struct task_struct { +@@ -1323,8 +1355,8 @@ struct task_struct { struct list_head thread_group; struct completion *vfork_done; /* for vfork() */ @@ -71533,7 +71548,7 @@ index be4e742..7f9d593 100644 cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; -@@ -1349,11 +1378,6 @@ struct task_struct { +@@ -1349,11 +1381,6 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -71545,7 +71560,7 @@ index be4e742..7f9d593 100644 char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) -@@ -1370,6 +1394,10 @@ struct task_struct { +@@ -1370,6 +1397,10 @@ struct task_struct { #endif /* CPU-specific state of this task */ struct thread_struct thread; @@ -71556,7 +71571,7 @@ index be4e742..7f9d593 100644 /* filesystem information */ struct fs_struct *fs; /* open file information */ -@@ -1443,6 +1471,10 @@ struct task_struct { +@@ -1443,6 +1474,10 @@ struct task_struct { gfp_t lockdep_reclaim_gfp; #endif @@ -71567,7 +71582,7 @@ index be4e742..7f9d593 100644 /* journalling filesystem info */ void *journal_info; -@@ -1481,6 +1513,10 @@ struct task_struct { +@@ -1481,6 +1516,10 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock */ struct list_head cg_list; #endif @@ -71578,7 +71593,7 @@ index be4e742..7f9d593 100644 #ifdef CONFIG_FUTEX struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT -@@ -1577,8 +1613,74 @@ struct task_struct { +@@ -1577,8 +1616,74 @@ struct task_struct { #ifdef CONFIG_UPROBES struct uprobe_task *utask; #endif @@ -71653,7 +71668,7 @@ index be4e742..7f9d593 100644 /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -@@ -1637,7 +1739,7 @@ struct pid_namespace; +@@ -1637,7 +1742,7 @@ struct pid_namespace; pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); @@ -71662,7 +71677,7 @@ index be4e742..7f9d593 100644 { return tsk->pid; } -@@ -2073,7 +2175,9 @@ void yield(void); +@@ -2073,7 +2178,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -71672,7 +71687,7 @@ index be4e742..7f9d593 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2106,6 +2210,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2106,6 +2213,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -71680,7 +71695,7 @@ index be4e742..7f9d593 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2272,7 +2377,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2272,7 +2380,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -71689,7 +71704,7 @@ index be4e742..7f9d593 100644 extern int allow_signal(int); extern int disallow_signal(int); -@@ -2463,9 +2568,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2463,9 +2571,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -82291,7 +82306,7 @@ index 79b7cf7..9944291 100644 capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); diff --git a/mm/mmap.c b/mm/mmap.c -index 0dceed8..bfcaf45 100644 +index 0dceed8..a559c2e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -33,6 +33,7 @@ @@ -82701,7 +82716,7 @@ index 0dceed8..bfcaf45 100644 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags) +{ + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK)) -+ return (random32() & 0xFF) << PAGE_SHIFT; ++ return ((random32() & 0xFF) + 1) << PAGE_SHIFT; + + return 0; +} @@ -85978,20 +85993,10 @@ index 6a93614..1415549 100644 err = -EFAULT; break; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c -index c5f9cd6..dfc8ec1 100644 +index 04b32e1..dfc8ec1 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c -@@ -2743,6 +2743,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, - BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", - conn, code, ident, dlen); - -+ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) -+ return NULL; -+ - len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; - count = min_t(unsigned int, conn->mtu, len); - -@@ -3395,8 +3398,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, +@@ -3398,8 +3398,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, break; case L2CAP_CONF_RFC: @@ -86004,15 +86009,6 @@ index c5f9cd6..dfc8ec1 100644 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && rfc.mode != chan->mode) -@@ -4221,7 +4226,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, - struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; - u16 type, result; - -- if (cmd_len != sizeof(*rsp)) -+ if (cmd_len < sizeof(*rsp)) - return -EPROTO; - - type = __le16_to_cpu(rsp->type); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1bcfb84..dad9f98 100644 --- a/net/bluetooth/l2cap_sock.c @@ -88850,7 +88846,7 @@ index 843d8c4..cb04fa1 100644 if (local->use_chanctx) *chandef = local->monitor_chandef; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h -index 5672533..6738c93 100644 +index 4e74cd6..963b8a1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -28,6 +28,7 @@ @@ -89008,7 +89004,7 @@ index c97a065..ff61928 100644 return p; diff --git a/net/mac80211/util.c b/net/mac80211/util.c -index 0f38f43..e53d4a8 100644 +index 1f4b908..c4def45 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1388,7 +1388,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) diff --git a/3.9.8/4425_grsec_remove_EI_PAX.patch b/3.9.9/4425_grsec_remove_EI_PAX.patch index 415fda5..415fda5 100644 --- a/3.9.8/4425_grsec_remove_EI_PAX.patch +++ b/3.9.9/4425_grsec_remove_EI_PAX.patch diff --git a/3.9.8/4427_force_XATTR_PAX_tmpfs.patch b/3.9.9/4427_force_XATTR_PAX_tmpfs.patch index e2a9551..e2a9551 100644 --- a/3.9.8/4427_force_XATTR_PAX_tmpfs.patch +++ b/3.9.9/4427_force_XATTR_PAX_tmpfs.patch diff --git a/3.9.8/4430_grsec-remove-localversion-grsec.patch b/3.9.9/4430_grsec-remove-localversion-grsec.patch index 31cf878..31cf878 100644 --- a/3.9.8/4430_grsec-remove-localversion-grsec.patch +++ b/3.9.9/4430_grsec-remove-localversion-grsec.patch diff --git a/3.9.8/4435_grsec-mute-warnings.patch b/3.9.9/4435_grsec-mute-warnings.patch index ed941d5..ed941d5 100644 --- a/3.9.8/4435_grsec-mute-warnings.patch +++ b/3.9.9/4435_grsec-mute-warnings.patch diff --git a/3.9.8/4440_grsec-remove-protected-paths.patch b/3.9.9/4440_grsec-remove-protected-paths.patch index 637934a..637934a 100644 --- a/3.9.8/4440_grsec-remove-protected-paths.patch +++ b/3.9.9/4440_grsec-remove-protected-paths.patch diff --git a/3.9.8/4450_grsec-kconfig-default-gids.patch b/3.9.9/4450_grsec-kconfig-default-gids.patch index f144c0e..f144c0e 100644 --- a/3.9.8/4450_grsec-kconfig-default-gids.patch +++ b/3.9.9/4450_grsec-kconfig-default-gids.patch diff --git a/3.9.8/4465_selinux-avc_audit-log-curr_ip.patch b/3.9.9/4465_selinux-avc_audit-log-curr_ip.patch index b0786d4..b0786d4 100644 --- a/3.9.8/4465_selinux-avc_audit-log-curr_ip.patch +++ b/3.9.9/4465_selinux-avc_audit-log-curr_ip.patch diff --git a/3.9.8/4470_disable-compat_vdso.patch b/3.9.9/4470_disable-compat_vdso.patch index 424d91f..424d91f 100644 --- a/3.9.8/4470_disable-compat_vdso.patch +++ b/3.9.9/4470_disable-compat_vdso.patch diff --git a/3.9.8/4475_emutramp_default_on.patch b/3.9.9/4475_emutramp_default_on.patch index 27bfc2d..27bfc2d 100644 --- a/3.9.8/4475_emutramp_default_on.patch +++ b/3.9.9/4475_emutramp_default_on.patch |