Commit fbe1bf14 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge tag 'pr-20141220-x86-vdso' of...

Merge tag 'pr-20141220-x86-vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux

 into x86/urgent

Pull a VDSO fix from Andy Lutomirski:

  "One vdso fix for a longstanding ASLR bug that's been in the news lately.

   The vdso base address has always been randomized, and I don't think there's
   anything particularly wrong with the range over which it's randomized,
   but the implementation seems to have been buggy since the very beginning.

   This fixes the implementation to remove a large bias that caused a small
   fraction of possible vdso load addresess to be vastly more likely than
   the rest of the possible addresses."

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 97bf6af1 394f56fe
Loading
Loading
Loading
Loading
+29 −16
Original line number Original line Diff line number Diff line
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)


struct linux_binprm;
struct linux_binprm;


/* Put the vdso above the (randomized) stack with another randomized offset.
/*
   This way there is no hole in the middle of address space.
 * Put the vdso above the (randomized) stack with another randomized
   To save memory make sure it is still in the same PTE as the stack top.
 * offset.  This way there is no hole in the middle of address space.
   This doesn't give that many random bits.
 * To save memory make sure it is still in the same PTE as the stack

 * top.  This doesn't give that many random bits.
   Only used for the 64-bit and x32 vdsos. */
 *
 * Note that this algorithm is imperfect: the distribution of the vdso
 * start address within a PMD is biased toward the end.
 *
 * Only used for the 64-bit and x32 vdsos.
 */
static unsigned long vdso_addr(unsigned long start, unsigned len)
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
{
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else
#else
	unsigned long addr, end;
	unsigned long addr, end;
	unsigned offset;
	unsigned offset;
	end = (start + PMD_SIZE - 1) & PMD_MASK;

	/*
	 * Round up the start address.  It can start out unaligned as a result
	 * of stack start randomization.
	 */
	start = PAGE_ALIGN(start);

	/* Round the lowest possible end address up to a PMD boundary. */
	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
	if (end >= TASK_SIZE_MAX)
	if (end >= TASK_SIZE_MAX)
		end = TASK_SIZE_MAX;
		end = TASK_SIZE_MAX;
	end -= len;
	end -= len;
	/* This loses some more bits than a modulo, but is cheaper */

	offset = get_random_int() & (PTRS_PER_PTE - 1);
	if (end > start) {
		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
		addr = start + (offset << PAGE_SHIFT);
		addr = start + (offset << PAGE_SHIFT);
	if (addr >= end)
	} else {
		addr = end;
		addr = start;
	}


	/*
	/*
	 * page-align it here so that get_unmapped_area doesn't
	 * Forcibly align the final address in case we have a hardware
	 * align it wrongfully again to the next page. addr can come in 4K
	 * issue that requires alignment for performance reasons.
	 * unaligned here as a result of stack start randomization.
	 */
	 */
	addr = PAGE_ALIGN(addr);
	addr = align_vdso_addr(addr);
	addr = align_vdso_addr(addr);


	return addr;
	return addr;