Lines Matching defs:pp
93 #define pl_init(pp, pl) do { \
95 (pp)->pr_lock_ops->pl_init(pp, pl, &__lock_type); \
98 #define pl_init(pp, pl) (pp)->pr_lock_ops->pl_init(pp, pl, NULL)
102 pl_enter(struct pool *pp, union pool_lock *pl)
104 pp->pr_lock_ops->pl_enter(pl);
107 pl_enter_try(struct pool *pp, union pool_lock *pl)
109 return pp->pr_lock_ops->pl_enter_try(pl);
112 pl_leave(struct pool *pp, union pool_lock *pl)
114 pp->pr_lock_ops->pl_leave(pl);
117 pl_assert_locked(struct pool *pp, union pool_lock *pl)
119 pp->pr_lock_ops->pl_assert_locked(pl);
122 pl_assert_unlocked(struct pool *pp, union pool_lock *pl)
124 pp->pr_lock_ops->pl_assert_unlocked(pl);
127 pl_sleep(struct pool *pp, void *ident, union pool_lock *lock, int priority,
130 return pp->pr_lock_ops->pl_sleep(ident, lock, priority, wmesg);
205 #define POOL_INPGHDR(pp) ((pp)->pr_phoffset != 0)
297 pr_find_pagehead(struct pool *pp, void *v)
301 if (POOL_INPGHDR(pp)) {
304 page = (caddr_t)((vaddr_t)v & pp->pr_pgmask);
306 return ((struct pool_page_header *)(page + pp->pr_phoffset));
310 ph = RBT_NFIND(phtree, &pp->pr_phtree, &key);
312 panic("%s: %s: page header missing", __func__, pp->pr_wchan);
315 if (ph->ph_page + pp->pr_pgsize <= (caddr_t)v)
316 panic("%s: %s: incorrect page", __func__, pp->pr_wchan);
328 pool_init(struct pool *pp, size_t size, u_int align, int ipl, int flags,
402 memset(pp, 0, sizeof(*pp));
403 refcnt_init(&pp->pr_refcnt);
406 pp->pr_lock_ops = &pool_lock_ops_rw;
408 pp->pr_lock_ops = &pool_lock_ops_mtx;
409 TAILQ_INIT(&pp->pr_emptypages);
410 TAILQ_INIT(&pp->pr_fullpages);
411 TAILQ_INIT(&pp->pr_partpages);
412 pp->pr_curpage = NULL;
413 pp->pr_npages = 0;
414 pp->pr_minitems = 0;
415 pp->pr_minpages = 0;
416 pp->pr_maxpages = 8;
417 pp->pr_size = size;
418 pp->pr_pgsize = pgsize;
419 pp->pr_pgmask = ~0UL ^ (pgsize - 1);
420 pp->pr_phoffset = off;
421 pp->pr_itemsperpage = items;
422 pp->pr_wchan = wchan;
423 pp->pr_alloc = palloc;
424 pp->pr_nitems = 0;
425 pp->pr_nout = 0;
426 pp->pr_hardlimit = UINT_MAX;
427 pp->pr_hardlimit_warning = NULL;
428 pp->pr_hardlimit_ratecap.tv_sec = 0;
429 pp->pr_hardlimit_ratecap.tv_usec = 0;
430 pp->pr_hardlimit_warning_last.tv_sec = 0;
431 pp->pr_hardlimit_warning_last.tv_usec = 0;
432 RBT_INIT(phtree, &pp->pr_phtree);
438 space = POOL_INPGHDR(pp) ? pp->pr_phoffset : pp->pr_pgsize;
439 space -= pp->pr_itemsperpage * pp->pr_size;
440 pp->pr_align = align;
441 pp->pr_maxcolors = (space / align) + 1;
443 pp->pr_nget = 0;
444 pp->pr_nfail = 0;
445 pp->pr_nput = 0;
446 pp->pr_npagealloc = 0;
447 pp->pr_npagefree = 0;
448 pp->pr_hiwat = 0;
449 pp->pr_nidle = 0;
451 pp->pr_ipl = ipl;
452 pp->pr_flags = flags;
454 pl_init(pp, &pp->pr_lock);
455 pl_init(pp, &pp->pr_requests_lock);
456 TAILQ_INIT(&pp->pr_requests);
467 pp->pr_crange = &kp_dirty;
473 if (iter == pp)
478 pp->pr_serial = ++pool_serial;
482 SIMPLEQ_INSERT_HEAD(&pool_head, pp, pr_poollist);
491 pool_destroy(struct pool *pp)
497 if (pp->pr_nout != 0)
498 panic("%s: pool busy: still out: %u", __func__, pp->pr_nout);
504 if (pp == SIMPLEQ_FIRST(&pool_head))
509 if (iter == pp) {
520 refcnt_finalize(&pp->pr_refcnt, "pooldtor");
523 if (pp->pr_cache != NULL)
524 pool_cache_destroy(pp);
528 while ((ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL) {
529 pl_enter(pp, &pp->pr_lock);
530 pool_p_remove(pp, ph);
531 pl_leave(pp, &pp->pr_lock);
532 pool_p_free(pp, ph);
534 KASSERT(TAILQ_EMPTY(&pp->pr_fullpages));
535 KASSERT(TAILQ_EMPTY(&pp->pr_partpages));
548 pool_request(struct pool *pp, struct pool_request *pr)
550 pl_enter(pp, &pp->pr_requests_lock);
551 TAILQ_INSERT_TAIL(&pp->pr_requests, pr, pr_entry);
552 pool_runqueue(pp, PR_NOWAIT);
553 pl_leave(pp, &pp->pr_requests_lock);
565 pool_get(struct pool *pp, int flags)
571 if (pp->pr_flags & PR_RWLOCK)
575 if (pp->pr_cache != NULL) {
576 v = pool_cache_get(pp);
582 pl_enter(pp, &pp->pr_lock);
583 if (pp->pr_nout >= pp->pr_hardlimit) {
586 } else if ((v = pool_do_get(pp, flags, &slowdown)) == NULL) {
590 pl_leave(pp, &pp->pr_lock);
604 pl_init(pp, &mem.lock);
606 pool_request(pp, &pr);
608 pl_enter(pp, &mem.lock);
610 pl_sleep(pp, &mem, &mem.lock, PSWP, pp->pr_wchan);
611 pl_leave(pp, &mem.lock);
620 memset(v, 0, pp->pr_size);
622 TRACEPOINT(uvm, pool_get, pp, v, flags);
627 pp->pr_nfail++;
628 pl_leave(pp, &pp->pr_lock);
633 pool_get_done(struct pool *pp, void *xmem, void *v)
637 pl_enter(pp, &mem->lock);
639 pl_leave(pp, &mem->lock);
645 pool_runqueue(struct pool *pp, int flags)
650 pl_assert_unlocked(pp, &pp->pr_lock);
651 pl_assert_locked(pp, &pp->pr_requests_lock);
653 if (pp->pr_requesting++)
657 pp->pr_requesting = 1;
659 TAILQ_CONCAT(&prl, &pp->pr_requests, pr_entry);
663 pl_leave(pp, &pp->pr_requests_lock);
665 pl_enter(pp, &pp->pr_lock);
670 if (pp->pr_nout >= pp->pr_hardlimit)
673 pr->pr_item = pool_do_get(pp, flags, &slowdown);
679 pl_leave(pp, &pp->pr_lock);
684 (*pr->pr_handler)(pp, pr->pr_cookie, pr->pr_item);
687 pl_enter(pp, &pp->pr_requests_lock);
688 } while (--pp->pr_requesting);
690 TAILQ_CONCAT(&pp->pr_requests, &prl, pr_entry);
694 pool_do_get(struct pool *pp, int flags, int *slowdown)
699 pl_assert_locked(pp, &pp->pr_lock);
701 splassert(pp->pr_ipl);
707 pp->pr_nout++;
709 if (pp->pr_curpage == NULL) {
710 pl_leave(pp, &pp->pr_lock);
711 ph = pool_p_alloc(pp, flags, slowdown);
712 pl_enter(pp, &pp->pr_lock);
715 pp->pr_nout--;
719 pool_p_insert(pp, ph);
722 ph = pp->pr_curpage;
725 panic("%s: %s: page empty", __func__, pp->pr_wchan);
730 __func__, pp->pr_wchan, ph->ph_page, pi,
740 if (poison_check(pi + 1, pp->pr_size - sizeof(*pi),
745 __func__, pp->pr_wchan, ph->ph_page, pi,
756 TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_entry);
757 TAILQ_INSERT_TAIL(&pp->pr_partpages, ph, ph_entry);
759 pp->pr_nidle--;
762 if (ph->ph_nmissing == pp->pr_itemsperpage) {
767 TAILQ_REMOVE(&pp->pr_partpages, ph, ph_entry);
768 TAILQ_INSERT_TAIL(&pp->pr_fullpages, ph, ph_entry);
769 pool_update_curpage(pp);
772 pp->pr_nget++;
781 pool_put(struct pool *pp, void *v)
790 TRACEPOINT(uvm, pool_put, pp, v);
793 if (pp->pr_cache != NULL && TAILQ_EMPTY(&pp->pr_requests)) {
794 pool_cache_put(pp, v);
799 pl_enter(pp, &pp->pr_lock);
801 pool_do_put(pp, v);
803 pp->pr_nout--;
804 pp->pr_nput++;
807 if (pp->pr_nidle > pp->pr_maxpages &&
808 (ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL &&
811 pool_p_remove(pp, freeph);
814 pl_leave(pp, &pp->pr_lock);
817 pool_p_free(pp, freeph);
819 pool_wakeup(pp);
823 pool_wakeup(struct pool *pp)
825 if (!TAILQ_EMPTY(&pp->pr_requests)) {
826 pl_enter(pp, &pp->pr_requests_lock);
827 pool_runqueue(pp, PR_NOWAIT);
828 pl_leave(pp, &pp->pr_requests_lock);
833 pool_do_put(struct pool *pp, void *v)
838 splassert(pp->pr_ipl);
840 ph = pr_find_pagehead(pp, v);
848 pp->pr_wchan, pi);
858 poison_mem(pi + 1, pp->pr_size - sizeof(*pi));
861 if (ph->ph_nmissing-- == pp->pr_itemsperpage) {
866 TAILQ_REMOVE(&pp->pr_fullpages, ph, ph_entry);
867 TAILQ_INSERT_TAIL(&pp->pr_partpages, ph, ph_entry);
874 pp->pr_nidle++;
877 TAILQ_REMOVE(&pp->pr_partpages, ph, ph_entry);
878 TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_entry);
879 pool_update_curpage(pp);
887 pool_prime(struct pool *pp, int n)
893 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
898 ph = pool_p_alloc(pp, PR_NOWAIT, &slowdown);
905 pl_enter(pp, &pp->pr_lock);
908 pool_p_insert(pp, ph);
910 pl_leave(pp, &pp->pr_lock);
916 pool_p_alloc(struct pool *pp, int flags, int *slowdown)
925 pl_assert_unlocked(pp, &pp->pr_lock);
926 KASSERT(pp->pr_size >= sizeof(*pi));
928 addr = pool_allocator_alloc(pp, flags, slowdown);
932 if (POOL_INPGHDR(pp))
933 ph = (struct pool_page_header *)(addr + pp->pr_phoffset);
937 pool_allocator_free(pp, addr);
944 addr += pp->pr_align * (pp->pr_npagealloc % pp->pr_maxcolors);
956 n = pp->pr_itemsperpage;
973 poison_mem(pi + 1, pp->pr_size - sizeof(*pi));
976 addr += pp->pr_size;
983 pool_p_free(struct pool *pp, struct pool_page_header *ph)
987 pl_assert_unlocked(pp, &pp->pr_lock);
994 __func__, pp->pr_wchan, ph->ph_page, pi,
1002 if (poison_check(pi + 1, pp->pr_size - sizeof(*pi),
1007 __func__, pp->pr_wchan, ph->ph_page, pi,
1014 pool_allocator_free(pp, ph->ph_page);
1016 if (!POOL_INPGHDR(pp))
1021 pool_p_insert(struct pool *pp, struct pool_page_header *ph)
1023 pl_assert_locked(pp, &pp->pr_lock);
1026 if (pp->pr_curpage == NULL)
1027 pp->pr_curpage = ph;
1029 TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_entry);
1030 if (!POOL_INPGHDR(pp))
1031 RBT_INSERT(phtree, &pp->pr_phtree, ph);
1033 pp->pr_nitems += pp->pr_itemsperpage;
1034 pp->pr_nidle++;
1036 pp->pr_npagealloc++;
1037 if (++pp->pr_npages > pp->pr_hiwat)
1038 pp->pr_hiwat = pp->pr_npages;
1042 pool_p_remove(struct pool *pp, struct pool_page_header *ph)
1044 pl_assert_locked(pp, &pp->pr_lock);
1046 pp->pr_npagefree++;
1047 pp->pr_npages--;
1048 pp->pr_nidle--;
1049 pp->pr_nitems -= pp->pr_itemsperpage;
1051 if (!POOL_INPGHDR(pp))
1052 RBT_REMOVE(phtree, &pp->pr_phtree, ph);
1053 TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_entry);
1055 pool_update_curpage(pp);
1059 pool_update_curpage(struct pool *pp)
1061 pp->pr_curpage = TAILQ_LAST(&pp->pr_partpages, pool_pagelist);
1062 if (pp->pr_curpage == NULL) {
1063 pp->pr_curpage = TAILQ_LAST(&pp->pr_emptypages, pool_pagelist);
1068 pool_setlowat(struct pool *pp, int n)
1072 pl_enter(pp, &pp->pr_lock);
1073 pp->pr_minitems = n;
1074 pp->pr_minpages = (n == 0)
1076 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1078 if (pp->pr_nitems < n)
1079 prime = n - pp->pr_nitems;
1080 pl_leave(pp, &pp->pr_lock);
1083 pool_prime(pp, prime);
1087 pool_sethiwat(struct pool *pp, int n)
1089 pp->pr_maxpages = (n == 0)
1091 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1095 pool_sethardlimit(struct pool *pp, u_int n, const char *warnmsg, int ratecap)
1099 if (n < pp->pr_nout) {
1104 pp->pr_hardlimit = n;
1105 pp->pr_hardlimit_warning = warnmsg;
1106 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1107 pp->pr_hardlimit_warning_last.tv_sec = 0;
1108 pp->pr_hardlimit_warning_last.tv_usec = 0;
1115 pool_set_constraints(struct pool *pp, const struct kmem_pa_mode *mode)
1117 pp->pr_crange = mode;
1126 pool_reclaim(struct pool *pp)
1131 pl_enter(pp, &pp->pr_lock);
1132 for (ph = TAILQ_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1136 if (pp->pr_npages <= pp->pr_minpages)
1143 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1144 pp->pr_minitems)
1147 pool_p_remove(pp, ph);
1150 pl_leave(pp, &pp->pr_lock);
1157 pool_p_free(pp, ph);
1170 struct pool *pp;
1173 SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist)
1174 pool_reclaim(pp);
1186 pool_printit(struct pool *pp, const char *modif,
1189 pool_print1(pp, modif, pr);
1212 pool_print1(struct pool *pp, const char *modif,
1225 (*pr)("POOL %s: size %u maxcolors %u\n", pp->pr_wchan, pp->pr_size,
1226 pp->pr_maxcolors);
1227 (*pr)("\talloc %p\n", pp->pr_alloc);
1229 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1231 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1234 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1236 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1241 if ((ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL)
1243 pool_print_pagelist(&pp->pr_emptypages, pr);
1244 if ((ph = TAILQ_FIRST(&pp->pr_fullpages)) != NULL)
1246 pool_print_pagelist(&pp->pr_fullpages, pr);
1247 if ((ph = TAILQ_FIRST(&pp->pr_partpages)) != NULL)
1249 pool_print_pagelist(&pp->pr_partpages, pr);
1251 if (pp->pr_curpage == NULL)
1254 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1260 struct pool *pp;
1289 SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) {
1291 db_printf("%-12s %18p %18p\n", pp->pr_wchan, pp,
1292 pp->pr_alloc);
1296 if (!pp->pr_nget)
1299 if (pp->pr_maxpages == UINT_MAX)
1302 snprintf(maxp, sizeof maxp, "%u", pp->pr_maxpages);
1314 PRWORD(ovflw, "%-*s", 10, 0, pp->pr_wchan);
1315 PRWORD(ovflw, " %*u", 4, 1, pp->pr_size);
1316 PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nget);
1317 PRWORD(ovflw, " %*lu", 5, 1, pp->pr_nfail);
1318 PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nput);
1319 PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagealloc);
1320 PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagefree);
1321 PRWORD(ovflw, " %*d", 6, 1, pp->pr_npages);
1322 PRWORD(ovflw, " %*d", 6, 1, pp->pr_hiwat);
1323 PRWORD(ovflw, " %*d", 6, 1, pp->pr_minpages);
1325 PRWORD(ovflw, " %*lu\n", 5, 1, pp->pr_nidle);
1327 pool_chk(pp);
1334 pool_chk_page(struct pool *pp, struct pool_page_header *ph, int expected)
1339 const char *label = pp->pr_wchan;
1341 page = (caddr_t)((u_long)ph & pp->pr_pgmask);
1342 if (page != ph->ph_page && POOL_INPGHDR(pp)) {
1346 pp, pp->pr_wchan, ph->ph_page, ph, page);
1354 (caddr_t)pi >= ph->ph_page + pp->pr_pgsize) {
1357 " item ordinal %d; addr %p\n", pp,
1358 pp->pr_wchan, ph->ph_page, n, pi);
1367 pp, pp->pr_wchan, ph->ph_page, n, pi, page,
1375 if (poison_check(pi + 1, pp->pr_size - sizeof(*pi),
1381 pp->pr_wchan, ph->ph_page, n, pi,
1387 if (n + ph->ph_nmissing != pp->pr_itemsperpage) {
1389 " %d on list, %d missing, %d items per page\n", pp,
1390 pp->pr_wchan, ph->ph_page, n, ph->ph_nmissing,
1391 pp->pr_itemsperpage);
1396 " %d on list, %d missing, %d expected\n", pp,
1397 pp->pr_wchan, ph->ph_page, n, ph->ph_nmissing,
1405 pool_chk(struct pool *pp)
1410 TAILQ_FOREACH(ph, &pp->pr_emptypages, ph_entry)
1411 r += pool_chk_page(pp, ph, pp->pr_itemsperpage);
1412 TAILQ_FOREACH(ph, &pp->pr_fullpages, ph_entry)
1413 r += pool_chk_page(pp, ph, 0);
1414 TAILQ_FOREACH(ph, &pp->pr_partpages, ph_entry)
1415 r += pool_chk_page(pp, ph, -1);
1423 pool_walk(struct pool *pp, int full,
1433 TAILQ_FOREACH(ph, &pp->pr_fullpages, ph_entry) {
1439 cp += pp->pr_size;
1443 TAILQ_FOREACH(ph, &pp->pr_partpages, ph_entry) {
1457 cp += pp->pr_size;
1473 struct pool *pp;
1495 SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) {
1496 if (name[1] == pp->pr_serial) {
1497 refcnt_take(&pp->pr_refcnt);
1503 if (pp == NULL)
1508 rv = sysctl_rdstring(oldp, oldlenp, NULL, pp->pr_wchan);
1513 pl_enter(pp, &pp->pr_lock);
1514 pi.pr_size = pp->pr_size;
1515 pi.pr_pgsize = pp->pr_pgsize;
1516 pi.pr_itemsperpage = pp->pr_itemsperpage;
1517 pi.pr_npages = pp->pr_npages;
1518 pi.pr_minpages = pp->pr_minpages;
1519 pi.pr_maxpages = pp->pr_maxpages;
1520 pi.pr_hardlimit = pp->pr_hardlimit;
1521 pi.pr_nout = pp->pr_nout;
1522 pi.pr_nitems = pp->pr_nitems;
1523 pi.pr_nget = pp->pr_nget;
1524 pi.pr_nput = pp->pr_nput;
1525 pi.pr_nfail = pp->pr_nfail;
1526 pi.pr_npagealloc = pp->pr_npagealloc;
1527 pi.pr_npagefree = pp->pr_npagefree;
1528 pi.pr_hiwat = pp->pr_hiwat;
1529 pi.pr_nidle = pp->pr_nidle;
1530 pl_leave(pp, &pp->pr_lock);
1532 pool_cache_pool_info(pp, &pi);
1538 rv = pool_cache_info(pp, oldp, oldlenp);
1542 rv = pool_cache_cpus_info(pp, oldp, oldlenp);
1546 refcnt_rele_wake(&pp->pr_refcnt);
1560 struct pool *pp;
1566 SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) {
1568 if (pp->pr_cache != NULL)
1569 pool_cache_gc(pp);
1572 if (pp->pr_nidle <= pp->pr_minpages || /* guess */
1573 !pl_enter_try(pp, &pp->pr_lock)) /* try */
1577 if (pp->pr_nidle > pp->pr_minpages &&
1578 (ph = TAILQ_FIRST(&pp->pr_emptypages)) != NULL &&
1581 pool_p_remove(pp, freeph);
1585 pl_leave(pp, &pp->pr_lock);
1588 pool_p_free(pp, freeph);
1601 pool_allocator_alloc(struct pool *pp, int flags, int *slowdown)
1605 v = (*pp->pr_alloc->pa_alloc)(pp, flags, slowdown);
1608 if (v != NULL && POOL_INPGHDR(pp)) {
1610 if ((addr & pp->pr_pgmask) != addr) {
1612 __func__, pp->pr_wchan, v, pp->pr_pgsize);
1621 pool_allocator_free(struct pool *pp, void *v)
1623 struct pool_allocator *pa = pp->pr_alloc;
1625 (*pa->pa_free)(pp, v);
1629 pool_page_alloc(struct pool *pp, int flags, int *slowdown)
1636 return (km_alloc(pp->pr_pgsize, &kv_page, pp->pr_crange, &kd));
1640 pool_page_free(struct pool *pp, void *v)
1642 km_free(v, pp->pr_pgsize, &kv_page, pp->pr_crange);
1646 pool_multi_alloc(struct pool *pp, int flags, int *slowdown)
1653 if (POOL_INPGHDR(pp))
1654 kv.kv_align = pp->pr_pgsize;
1660 v = km_alloc(pp->pr_pgsize, &kv, pp->pr_crange, &kd);
1667 pool_multi_free(struct pool *pp, void *v)
1672 if (POOL_INPGHDR(pp))
1673 kv.kv_align = pp->pr_pgsize;
1676 km_free(v, pp->pr_pgsize, &kv, pp->pr_crange);
1681 pool_multi_alloc_ni(struct pool *pp, int flags, int *slowdown)
1687 if (POOL_INPGHDR(pp))
1688 kv.kv_align = pp->pr_pgsize;
1694 v = km_alloc(pp->pr_pgsize, &kv, pp->pr_crange, &kd);
1701 pool_multi_free_ni(struct pool *pp, void *v)
1705 if (POOL_INPGHDR(pp))
1706 kv.kv_align = pp->pr_pgsize;
1709 km_free(v, pp->pr_pgsize, &kv, pp->pr_crange);
1718 pool_cache_init(struct pool *pp)
1731 KASSERT(pp->pr_size >= sizeof(struct pool_cache_item));
1735 pl_init(pp, &pp->pr_cache_lock);
1736 arc4random_buf(pp->pr_cache_magic, sizeof(pp->pr_cache_magic));
1737 TAILQ_INIT(&pp->pr_cache_lists);
1738 pp->pr_cache_nitems = 0;
1739 pp->pr_cache_timestamp = getnsecuptime();
1740 pp->pr_cache_items = 8;
1741 pp->pr_cache_contention = 0;
1742 pp->pr_cache_ngc = 0;
1760 pp->pr_cache = cm;
1764 pool_cache_item_magic(struct pool *pp, struct pool_cache_item *ci)
1768 entry[0] = pp->pr_cache_magic[0] ^ (u_long)ci;
1769 entry[1] = pp->pr_cache_magic[1] ^ (u_long)ci->ci_next;
1773 pool_cache_item_magic_check(struct pool *pp, struct pool_cache_item *ci)
1779 val = pp->pr_cache_magic[0] ^ (u_long)ci;
1784 val = pp->pr_cache_magic[1] ^ (u_long)ci->ci_next;
1792 __func__, pp->pr_wchan, ci, (caddr_t)entry - (caddr_t)ci,
1797 pool_list_enter(struct pool *pp)
1799 if (pl_enter_try(pp, &pp->pr_cache_lock) == 0) {
1800 pl_enter(pp, &pp->pr_cache_lock);
1801 pp->pr_cache_contention++;
1806 pool_list_leave(struct pool *pp)
1808 pl_leave(pp, &pp->pr_cache_lock);
1812 pool_cache_list_alloc(struct pool *pp, struct pool_cache *pc)
1816 pool_list_enter(pp);
1817 pl = TAILQ_FIRST(&pp->pr_cache_lists);
1819 TAILQ_REMOVE(&pp->pr_cache_lists, pl, ci_nextl);
1820 pp->pr_cache_nitems -= POOL_CACHE_ITEM_NITEMS(pl);
1822 pool_cache_item_magic(pp, pl);
1829 pp->pr_cache_nout += pc->pc_nout;
1831 pool_list_leave(pp);
1837 pool_cache_list_free(struct pool *pp, struct pool_cache *pc,
1840 pool_list_enter(pp);
1841 if (TAILQ_EMPTY(&pp->pr_cache_lists))
1842 pp->pr_cache_timestamp = getnsecuptime();
1844 pp->pr_cache_nitems += POOL_CACHE_ITEM_NITEMS(ci);
1845 TAILQ_INSERT_TAIL(&pp->pr_cache_lists, ci, ci_nextl);
1850 pp->pr_cache_nout += pc->pc_nout;
1852 pool_list_leave(pp);
1856 pool_cache_enter(struct pool *pp, int *s)
1860 pc = cpumem_enter(pp->pr_cache);
1861 *s = splraise(pp->pr_ipl);
1868 pool_cache_leave(struct pool *pp, struct pool_cache *pc, int s)
1872 cpumem_leave(pp->pr_cache, pc);
1876 pool_cache_get(struct pool *pp)
1882 pc = pool_cache_enter(pp, &s);
1889 } else if ((ci = pool_cache_list_alloc(pp, pc)) == NULL) {
1894 pool_cache_item_magic_check(pp, ci);
1900 if (poison_check(ci + 1, pp->pr_size - sizeof(*ci),
1907 __func__, pp->pr_wchan, ci,
1919 pool_cache_leave(pp, pc, s);
1925 pool_cache_put(struct pool *pp, void *v)
1932 int poison = pool_debug && pp->pr_size > sizeof(*ci);
1935 poison_mem(ci + 1, pp->pr_size - sizeof(*ci));
1938 pc = pool_cache_enter(pp, &s);
1941 if (nitems >= pp->pr_cache_items) {
1943 pool_cache_list_free(pp, pc, pc->pc_prev);
1957 pool_cache_item_magic(pp, ci);
1965 pool_cache_leave(pp, pc, s);
1969 pool_cache_list_put(struct pool *pp, struct pool_cache_item *pl)
1978 pl_enter(pp, &pp->pr_lock);
1981 pool_do_put(pp, pl);
1984 pl_leave(pp, &pp->pr_lock);
1990 pool_cache_destroy(struct pool *pp)
1998 cm = pp->pr_cache;
1999 pp->pr_cache = NULL; /* make pool_put avoid the cache */
2003 pool_cache_list_put(pp, pc->pc_actv);
2004 pool_cache_list_put(pp, pc->pc_prev);
2009 pl = TAILQ_FIRST(&pp->pr_cache_lists);
2011 pl = pool_cache_list_put(pp, pl);
2015 pool_cache_gc(struct pool *pp)
2019 if (getnsecuptime() - pp->pr_cache_timestamp > POOL_WAIT_GC &&
2020 !TAILQ_EMPTY(&pp->pr_cache_lists) &&
2021 pl_enter_try(pp, &pp->pr_cache_lock)) {
2024 pl = TAILQ_FIRST(&pp->pr_cache_lists);
2026 TAILQ_REMOVE(&pp->pr_cache_lists, pl, ci_nextl);
2027 pp->pr_cache_nitems -= POOL_CACHE_ITEM_NITEMS(pl);
2028 pp->pr_cache_timestamp = getnsecuptime();
2030 pp->pr_cache_ngc++;
2033 pl_leave(pp, &pp->pr_cache_lock);
2035 pool_cache_list_put(pp, pl);
2044 contention = pp->pr_cache_contention;
2045 delta = contention - pp->pr_cache_contention_prev;
2047 if ((ncpusfound * 8 * 2) <= pp->pr_cache_nitems)
2048 pp->pr_cache_items += 8;
2050 if (pp->pr_cache_items > 8)
2051 pp->pr_cache_items--;
2053 pp->pr_cache_contention_prev = contention;
2057 pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi)
2062 if (pp->pr_cache == NULL)
2068 CPUMEM_FOREACH(pc, &i, pp->pr_cache) {
2084 pl_enter(pp, &pp->pr_cache_lock);
2085 CPUMEM_FOREACH(pc, &i, pp->pr_cache)
2088 pi->pr_nout += pp->pr_cache_nout;
2089 pl_leave(pp, &pp->pr_cache_lock);
2093 pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp)
2097 if (pp->pr_cache == NULL)
2102 pl_enter(pp, &pp->pr_cache_lock);
2103 kpc.pr_ngc = pp->pr_cache_ngc;
2104 kpc.pr_len = pp->pr_cache_items;
2105 kpc.pr_nitems = pp->pr_cache_nitems;
2106 kpc.pr_contention = pp->pr_cache_contention;
2107 pl_leave(pp, &pp->pr_cache_lock);
2113 pool_cache_cpus_info(struct pool *pp, void *oldp, size_t *oldlenp)
2122 if (pp->pr_cache == NULL)
2134 CPUMEM_FOREACH(pc, &i, pp->pr_cache) {
2168 pool_cache_init(struct pool *pp)
2174 pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi)
2180 pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp)
2186 pool_cache_cpus_info(struct pool *pp, void *oldp, size_t *oldlenp)
2194 pool_lock_mtx_init(struct pool *pp, union pool_lock *lock,
2197 _mtx_init_flags(&lock->prl_mtx, pp->pr_ipl, pp->pr_wchan, 0, type);
2248 pool_lock_rw_init(struct pool *pp, union pool_lock *lock,
2251 _rw_init_flags(&lock->prl_rwlock, pp->pr_wchan, 0, type);