Commit 8737d9e0 authored by Peter Lieven's avatar Peter Lieven Committed by Kevin Wolf
Browse files

oslib-posix: add helpers for stack alloc and free



the allocated stack will be adjusted to the minimum supported stack size
by the OS and rounded up to be a multiple of the system pagesize.
Additionally an architecture dependent guard page is added to the stack
to catch stack overflows.

Signed-off-by: default avatarPeter Lieven <pl@kamp.de>
Signed-off-by: default avatarKevin Wolf <kwolf@redhat.com>
parent 74e1ae7c
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -60,4 +60,31 @@ int qemu_utimens(const char *path, const qemu_timespec *times);

bool is_daemonized(void);

/**
 * qemu_alloc_stack:
 * @sz: pointer to a size_t holding the requested usable stack size
 *
 * Allocate memory that can be used as a stack, for instance for
 * coroutines. If the memory cannot be allocated, this function
 * will abort (like g_malloc()). This function also inserts an
 * additional guard page to catch a potential stack overflow.
 * Note that the memory required for the guard page and alignment
 * and minimal stack size restrictions will increase the value of sz.
 *
 * The allocated stack must be freed with qemu_free_stack().
 *
 * Returns: pointer to (the lowest address of) the stack memory.
 */
void *qemu_alloc_stack(size_t *sz);

/**
 * qemu_free_stack:
 * @stack: stack to free
 * @sz: size of stack in bytes
 *
 * Free a stack allocated via qemu_alloc_stack(). Note that sz must
 * be exactly the adjusted stack size returned by qemu_alloc_stack.
 */
void qemu_free_stack(void *stack, size_t sz);

#endif
+42 −0
Original line number Diff line number Diff line
@@ -499,3 +499,45 @@ pid_t qemu_fork(Error **errp)
    }
    return pid;
}

void *qemu_alloc_stack(size_t *sz)
{
    void *ptr, *guardpage;
    size_t pagesz = getpagesize();
#ifdef _SC_THREAD_STACK_MIN
    /* avoid stacks smaller than _SC_THREAD_STACK_MIN */
    long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
    *sz = MAX(MAX(min_stack_sz, 0), *sz);
#endif
    /* adjust stack size to a multiple of the page size */
    *sz = ROUND_UP(*sz, pagesz);
    /* allocate one extra page for the guard page */
    *sz += pagesz;

    ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE,
               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    if (ptr == MAP_FAILED) {
        abort();
    }

#if defined(HOST_IA64)
    /* separate register stack */
    guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
#elif defined(HOST_HPPA)
    /* stack grows up */
    guardpage = ptr + *sz - pagesz;
#else
    /* stack grows down */
    guardpage = ptr;
#endif
    if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
        abort();
    }

    return ptr;
}

void qemu_free_stack(void *stack, size_t sz)
{
    munmap(stack, sz);
}