Lines Matching defs:pages
361 * for holes induced by guard pages.
371 * a multiple of kernel stack pages + guard pages in size.
374 * is divisible by the total number of kstack VA pages. This is necessary to
389 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
410 * be a multiple of kernel stack pages + guard pages in size.
419 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
443 vm_thread_stack_create(struct domainset *ds, int pages)
452 obj = vm_thread_kstack_size_to_obj(pages);
460 ks = vm_thread_alloc_kstack_kva(ptoa(pages + KSTACK_GUARD_PAGES),
467 * Allocate physical pages to back the stack.
469 if (vm_thread_stack_back(ks, ma, pages, req, domain) != 0) {
471 ptoa(pages + KSTACK_GUARD_PAGES), domain);
478 for (i = 0; i < pages; i++)
480 pmap_qenter(ks, ma, pages);
488 vm_thread_stack_dispose(vm_offset_t ks, int pages)
493 vm_object_t obj = vm_thread_kstack_size_to_obj(pages);
495 pindex = vm_kstack_pindex(ks, pages);
497 pmap_qremove(ks, pages);
499 for (i = 0; i < pages; i++) {
511 kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
513 ptoa(pages + KSTACK_GUARD_PAGES), domain);
520 vm_thread_new(struct thread *td, int pages)
526 if (pages <= 1)
527 pages = kstack_pages;
528 else if (pages > KSTACK_MAX_PAGES)
529 pages = KSTACK_MAX_PAGES;
532 if (pages == kstack_pages && kstack_cache != NULL)
536 * Ensure that kstack objects can draw pages from any memory
542 pages);
550 td->td_kstack_pages = pages;
562 int pages;
564 pages = td->td_kstack_pages;
569 if (pages == kstack_pages) {
570 kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED);
573 vm_thread_stack_dispose(ks, pages);
580 * Uses a non-identity mapping if guard pages are
592 * Return the linear pindex if guard pages aren't active or if we are
607 * Allocate physical pages, following the specified NUMA policy, to back a