Lines Matching +full:use +full:- +full:guard +full:- +full:pages
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * Permission to use, copy, modify and distribute this software and
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 * Pittsburgh PA 15213-3890
168 map = &curproc->p_vmspace->vm_map;
192 npages = atop(end - start);
195 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
198 curthread->td_vslock_sz += len;
214 MPASS(curthread->td_vslock_sz >= len);
215 curthread->td_vslock_sz -= len;
216 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
272 pmap_sync_icache(map->pmap, va, sz);
293 if (error == 0 && req->newptr && oldsize != kstack_cache_size)
314 /* Allocate from the kernel arena for non-standard kstack sizes. */
323 KASSERT(atop(addr - VM_MIN_KERNEL_ADDRESS) %
361 * for holes induced by guard pages.
371 * a multiple of kernel stack pages + guard pages in size.
374 * is divisible by the total number of kstack VA pages. This is necessary to
377 * We import a multiple of KVA_KSTACK_QUANTUM-sized region from the parent
389 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
399 rem = atop(*addrp - VM_MIN_KERNEL_ADDRESS) % kpages;
402 *addrp = *addrp + (kpages - rem) * PAGE_SIZE;
410 * be a multiple of kernel stack pages + guard pages in size.
419 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
422 KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % kpages == 0,
426 * If the address is not KVA_KSTACK_QUANTUM-aligned we have to decrement
434 addr -= rem;
443 vm_thread_stack_create(struct domainset *ds, int pages)
452 obj = vm_thread_kstack_size_to_obj(pages);
454 obj->domain.dr_policy = ds;
460 ks = vm_thread_alloc_kstack_kva(ptoa(pages + KSTACK_GUARD_PAGES),
467 * Allocate physical pages to back the stack.
469 if (vm_thread_stack_back(ks, ma, pages, req, domain) != 0) {
470 vm_thread_free_kstack_kva(ks - ptoa(KSTACK_GUARD_PAGES),
471 ptoa(pages + KSTACK_GUARD_PAGES), domain);
475 pmap_qremove(ks - ptoa(KSTACK_GUARD_PAGES),
478 for (i = 0; i < pages; i++)
480 pmap_qenter(ks, ma, pages);
488 vm_thread_stack_dispose(vm_offset_t ks, int pages)
493 vm_object_t obj = vm_thread_kstack_size_to_obj(pages);
495 pindex = vm_kstack_pindex(ks, pages);
497 pmap_qremove(ks, pages);
499 for (i = 0; i < pages; i++) {
511 kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
512 vm_thread_free_kstack_kva(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
513 ptoa(pages + KSTACK_GUARD_PAGES), domain);
520 vm_thread_new(struct thread *td, int pages)
526 if (pages <= 1)
527 pages = kstack_pages;
528 else if (pages > KSTACK_MAX_PAGES)
529 pages = KSTACK_MAX_PAGES;
532 if (pages == kstack_pages && kstack_cache != NULL)
536 * Ensure that kstack objects can draw pages from any memory
538 * swap-in.
542 pages);
549 td->td_kstack = ks;
550 td->td_kstack_pages = pages;
551 td->td_kstack_domain = ks_domain;
562 int pages;
564 pages = td->td_kstack_pages;
565 ks = td->td_kstack;
566 td->td_kstack = 0;
567 td->td_kstack_pages = 0;
568 td->td_kstack_domain = MAXMEMDOM;
569 if (pages == kstack_pages) {
570 kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED);
573 vm_thread_stack_dispose(ks, pages);
580 * Uses a non-identity mapping if guard pages are
586 vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
592 * Return the linear pindex if guard pages aren't active or if we are
593 * allocating a non-standard kstack size.
599 ("%s: Attempting to calculate kstack guard page pindex", __func__));
601 return (pindex -
607 * Allocate physical pages, following the specified NUMA policy, to back a
626 m = n > 0 ? ma[n - 1] : vm_page_mpred(obj, pindex);
694 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
703 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
710 kstack_quantum -= (kstack_pages + KSTACK_GUARD_PAGES) * PAGE_SIZE;
754 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
759 * Hardware could use a dedicated stack for interrupt handling.
761 if (stack_top <= current || current < td->td_kstack)
764 used = stack_top - current;
779 * machine-dependent layer to fill those in and make the new process
787 struct proc *p1 = td->td_proc;
807 p2->p_vmspace = p1->p_vmspace;
808 refcount_acquire(&p1->p_vmspace->vm_refcnt);
810 dset = td2->td_domain.dr_policy;
811 while (vm_page_count_severe_set(&dset->ds_mask)) {
812 vm_wait_doms(&dset->ds_mask, 0);
816 p2->p_vmspace = vm2;
817 if (p1->p_vmspace->vm_shm)
838 vmspace_exitfree(p); /* and clean-out the vmspace */