1*4e44baf3Sderaadt /* $OpenBSD: rthread_stack.c,v 1.20 2021/09/17 15:20:21 deraadt Exp $ */
22b8233b0Smarc
32b8233b0Smarc /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
42b8233b0Smarc
5*4e44baf3Sderaadt #include <sys/types.h>
62b8233b0Smarc #include <sys/mman.h>
72b8233b0Smarc
8f12804daSguenther #include <errno.h>
92b8233b0Smarc #include <pthread.h>
1058dbb15cSguenther #include <stdint.h>
112b8233b0Smarc #include <stdlib.h>
122b8233b0Smarc #include <unistd.h>
132b8233b0Smarc
142b8233b0Smarc #include "rthread.h"
152b8233b0Smarc
1658dbb15cSguenther /*
1758dbb15cSguenther * Follow uthread's example and keep around stacks that have default
1858dbb15cSguenther * attributes for possible reuse.
1958dbb15cSguenther */
2058dbb15cSguenther static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
21f050dd83Sakfaew static _atomic_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
2258dbb15cSguenther
232b8233b0Smarc struct stack *
_rthread_alloc_stack(pthread_t thread)242b8233b0Smarc _rthread_alloc_stack(pthread_t thread)
252b8233b0Smarc {
262b8233b0Smarc struct stack *stack;
2707ac9641Sderaadt u_int32_t rnd;
282b8233b0Smarc caddr_t base;
292b8233b0Smarc caddr_t guard;
302b8233b0Smarc size_t size;
3158dbb15cSguenther size_t guardsize;
322b8233b0Smarc
3358dbb15cSguenther /* if the request uses the defaults, try to reuse one */
3459188181Sguenther if (thread->attr.stack_addr == NULL &&
3558dbb15cSguenther thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
36a5511fa9Sguenther thread->attr.guard_size == _thread_pagesize) {
3758dbb15cSguenther _spinlock(&def_stacks_lock);
3858dbb15cSguenther stack = SLIST_FIRST(&def_stacks);
3959188181Sguenther if (stack != NULL) {
4059188181Sguenther SLIST_REMOVE_HEAD(&def_stacks, link);
4158dbb15cSguenther _spinunlock(&def_stacks_lock);
422b8233b0Smarc return (stack);
432b8233b0Smarc }
4459188181Sguenther _spinunlock(&def_stacks_lock);
4559188181Sguenther }
4658dbb15cSguenther
4758dbb15cSguenther /* allocate the stack struct that we'll return */
4858dbb15cSguenther stack = malloc(sizeof(*stack));
4958dbb15cSguenther if (stack == NULL)
5058dbb15cSguenther return (NULL);
5158dbb15cSguenther
5207ac9641Sderaadt /* Smaller the stack, smaller the random bias */
5307ac9641Sderaadt if (thread->attr.stack_size > _thread_pagesize)
5407ac9641Sderaadt rnd = arc4random() & (_thread_pagesize - 1);
5507ac9641Sderaadt else if (thread->attr.stack_size == _thread_pagesize)
5607ac9641Sderaadt rnd = arc4random() & (_thread_pagesize / 16 - 1);
5707ac9641Sderaadt else
5807ac9641Sderaadt rnd = 0;
5907ac9641Sderaadt rnd &= ~_STACKALIGNBYTES;
6007ac9641Sderaadt
6158dbb15cSguenther /* If a stack address was provided, just fill in the details */
6258dbb15cSguenther if (thread->attr.stack_addr != NULL) {
6307f29b68Sguenther stack->base = base = thread->attr.stack_addr;
6458dbb15cSguenther stack->len = thread->attr.stack_size;
6558dbb15cSguenther #ifdef MACHINE_STACK_GROWS_UP
6607ac9641Sderaadt stack->sp = base + rnd;
6758dbb15cSguenther #else
683b1a36d1Sderaadt stack->sp = base + thread->attr.stack_size - (_STACKALIGNBYTES+1) - rnd;
6958dbb15cSguenther #endif
7058dbb15cSguenther /*
7158dbb15cSguenther * This impossible guardsize marks this stack as
7258dbb15cSguenther * application allocated so it won't be freed or
7358dbb15cSguenther * cached by _rthread_free_stack()
7458dbb15cSguenther */
7558dbb15cSguenther stack->guardsize = 1;
7658dbb15cSguenther return (stack);
7758dbb15cSguenther }
7858dbb15cSguenther
7958dbb15cSguenther /* round up the requested sizes up to full pages */
8058dbb15cSguenther size = ROUND_TO_PAGE(thread->attr.stack_size);
8158dbb15cSguenther guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
8258dbb15cSguenther
8358dbb15cSguenther /* check for overflow */
8458dbb15cSguenther if (size < thread->attr.stack_size ||
8558dbb15cSguenther guardsize < thread->attr.guard_size ||
8658dbb15cSguenther SIZE_MAX - size < guardsize) {
8758dbb15cSguenther free(stack);
88f12804daSguenther errno = EINVAL;
892b8233b0Smarc return (NULL);
902b8233b0Smarc }
9158dbb15cSguenther size += guardsize;
9258dbb15cSguenther
9358dbb15cSguenther /* actually allocate the real stack */
945a78b117Smatthew base = mmap(NULL, size, PROT_READ | PROT_WRITE,
958adff5e2Sderaadt MAP_PRIVATE | MAP_ANON | MAP_STACK, -1, 0);
9658dbb15cSguenther if (base == MAP_FAILED) {
9758dbb15cSguenther free(stack);
9858dbb15cSguenther return (NULL);
9958dbb15cSguenther }
10058dbb15cSguenther
10158dbb15cSguenther #ifdef MACHINE_STACK_GROWS_UP
10258dbb15cSguenther guard = base + size - guardsize;
10307ac9641Sderaadt stack->sp = base + rnd;
10458dbb15cSguenther #else
10558dbb15cSguenther guard = base;
1063b1a36d1Sderaadt stack->sp = base + size - (_STACKALIGNBYTES+1) - rnd;
10758dbb15cSguenther #endif
10858dbb15cSguenther
10958dbb15cSguenther /* memory protect the guard region */
11058dbb15cSguenther if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
11158dbb15cSguenther munmap(base, size);
11258dbb15cSguenther free(stack);
11358dbb15cSguenther return (NULL);
11458dbb15cSguenther }
11558dbb15cSguenther
11658dbb15cSguenther stack->base = base;
11758dbb15cSguenther stack->guardsize = guardsize;
11858dbb15cSguenther stack->len = size;
11958dbb15cSguenther return (stack);
12058dbb15cSguenther }
1212b8233b0Smarc
1222b8233b0Smarc void
_rthread_free_stack(struct stack * stack)1232b8233b0Smarc _rthread_free_stack(struct stack *stack)
1242b8233b0Smarc {
12559188181Sguenther if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
126a5511fa9Sguenther stack->guardsize == _thread_pagesize) {
12758dbb15cSguenther _spinlock(&def_stacks_lock);
12858dbb15cSguenther SLIST_INSERT_HEAD(&def_stacks, stack, link);
12958dbb15cSguenther _spinunlock(&def_stacks_lock);
13058dbb15cSguenther } else {
13158dbb15cSguenther /* unmap the storage unless it was application allocated */
13258dbb15cSguenther if (stack->guardsize != 1)
1332b8233b0Smarc munmap(stack->base, stack->len);
1342b8233b0Smarc free(stack);
1352b8233b0Smarc }
13658dbb15cSguenther }
137