Lines Matching defs:pp

107 # define pool_redzone_init(pp, sz)		__nothing
108 # define pool_redzone_fill(pp, ptr) __nothing
109 # define pool_redzone_check(pp, ptr) __nothing
119 #define pool_get_kmsan(pp, ptr) __nothing
120 #define pool_put_kmsan(pp, ptr) __nothing
149 #define pp_has_pser(pp) (((pp)->pr_roflags & PR_PSERIALIZE) != 0)
330 #define POOL_NEEDS_CATCHUP(pp) \
331 ((pp)->pr_nitems < (pp)->pr_minitems || \
332 (pp)->pr_npages < (pp)->pr_minpages)
333 #define POOL_OBJ_TO_PAGE(pp, v) \
334 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask)
408 pr_item_bitmap_index(const struct pool *pp, const struct pool_item_header *ph,
414 KASSERT(pp->pr_roflags & PR_USEBMAP);
415 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
417 if (__predict_false(idx >= pp->pr_itemsperpage)) {
418 panic("%s: [%s] %u >= %u", __func__, pp->pr_wchan, idx,
419 pp->pr_itemsperpage);
426 pr_item_bitmap_put(const struct pool *pp, struct pool_item_header *ph,
429 unsigned int idx = pr_item_bitmap_index(pp, ph, obj);
434 panic("%s: [%s] %p already freed", __func__, pp->pr_wchan, obj);
441 pr_item_bitmap_get(const struct pool *pp, struct pool_item_header *ph)
450 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
463 KASSERT(idx < pp->pr_itemsperpage);
464 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
468 pr_item_bitmap_init(const struct pool *pp, struct pool_item_header *ph)
471 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
482 pr_item_linkedlist_put(const struct pool *pp, struct pool_item_header *ph,
487 KASSERT(!pp_has_pser(pp));
493 if (pp->pr_redzone) {
505 pr_item_linkedlist_get(struct pool *pp, struct pool_item_header *ph)
512 mutex_exit(&pp->pr_lock);
513 panic("%s: [%s] page empty", __func__, pp->pr_wchan);
515 KASSERTMSG((pp->pr_nitems > 0),
517 __func__, pp->pr_wchan, pp->pr_nitems);
522 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
536 pr_phinpage_check(struct pool *pp, struct pool_item_header *ph, void *page,
541 pp->pr_wchan, object);
545 pp->pr_wchan, object);
547 if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
549 pp->pr_wchan, object, ph->ph_poolid, pp->pr_poolid);
557 struct pool *pp;
560 pp = &pc->pc_pool;
561 page = POOL_OBJ_TO_PAGE(pp, object);
564 pr_phinpage_check(pp, ph, page, object);
589 pr_find_pagehead_noalign(struct pool *pp, void *v)
594 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
596 ph = SPLAY_ROOT(&pp->pr_phtree);
598 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
610 pr_find_pagehead(struct pool *pp, void *v)
614 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
615 ph = pr_find_pagehead_noalign(pp, v);
617 void *page = POOL_OBJ_TO_PAGE(pp, v);
618 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
620 pr_phinpage_check(pp, ph, page, v);
623 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
627 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
629 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
634 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
640 pool_allocator_free(pp, ph->ph_page);
641 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
642 pool_put(pp->pr_phpool, ph);
650 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
654 KASSERT(mutex_owned(&pp->pr_lock));
660 KASSERT(pp->pr_nidle != 0);
661 KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
663 pp->pr_wchan, pp->pr_nitems, pp->pr_itemsperpage);
664 pp->pr_nidle--;
667 pp->pr_nitems -= pp->pr_itemsperpage;
673 if (pp->pr_roflags & PR_PHINPAGE) {
674 if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
676 __func__, pp->pr_wchan, ph, ph->ph_poolid,
677 pp->pr_poolid);
680 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
684 pp->pr_npages--;
685 pp->pr_npagefree++;
687 pool_update_curpage(pp);
740 pool_init_is_phinpage(const struct pool *pp)
744 if (pp->pr_roflags & PR_PHINPAGE) {
747 if (pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) {
751 pagesize = pp->pr_alloc->pa_pagesz;
758 if (pp->pr_size < MIN(pagesize / 16, PHSIZE * 8)) {
763 if (pagesize / pp->pr_size == (pagesize - PHSIZE) / pp->pr_size) {
771 pool_init_is_usebmap(const struct pool *pp)
775 if (pp->pr_roflags & PR_NOTOUCH) {
782 if (!(pp->pr_roflags & PR_PHINPAGE)) {
790 bmapsize = roundup(PHSIZE, pp->pr_align) -
793 if (pp->pr_itemsperpage <= bmapsize * CHAR_BIT) {
807 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
825 if (pp == pp1)
884 LIST_INIT(&pp->pr_emptypages);
885 LIST_INIT(&pp->pr_fullpages);
886 LIST_INIT(&pp->pr_partpages);
887 pp->pr_cache = NULL;
888 pp->pr_curpage = NULL;
889 pp->pr_npages = 0;
890 pp->pr_minitems = 0;
891 pp->pr_minpages = 0;
892 pp->pr_maxitems = UINT_MAX;
893 pp->pr_maxpages = UINT_MAX;
894 pp->pr_roflags = flags;
895 pp->pr_flags = 0;
896 pp->pr_size = prsize;
897 pp->pr_reqsize = size;
898 pp->pr_align = align;
899 pp->pr_wchan = wchan;
900 pp->pr_alloc = palloc;
901 pp->pr_poolid = atomic_inc_uint_nv(&poolid_counter);
902 pp->pr_nitems = 0;
903 pp->pr_nout = 0;
904 pp->pr_hardlimit = UINT_MAX;
905 pp->pr_hardlimit_warning = NULL;
906 pp->pr_hardlimit_ratecap.tv_sec = 0;
907 pp->pr_hardlimit_ratecap.tv_usec = 0;
908 pp->pr_hardlimit_warning_last.tv_sec = 0;
909 pp->pr_hardlimit_warning_last.tv_usec = 0;
910 pp->pr_drain_hook = NULL;
911 pp->pr_drain_hook_arg = NULL;
912 pp->pr_freecheck = NULL;
913 pp->pr_redzone = false;
914 pool_redzone_init(pp, size);
915 pool_quarantine_init(pp);
923 if (pool_init_is_phinpage(pp)) {
926 pp->pr_itemoffset = roundup(PHSIZE, align);
927 pp->pr_roflags |= PR_PHINPAGE;
931 pp->pr_itemoffset = 0;
932 SPLAY_INIT(&pp->pr_phtree);
935 pp->pr_itemsperpage = itemspace / pp->pr_size;
936 KASSERT(pp->pr_itemsperpage != 0);
942 if (pool_init_is_usebmap(pp)) {
943 pp->pr_roflags |= PR_USEBMAP;
951 if (!(pp->pr_roflags & PR_PHINPAGE)) {
954 KASSERT(pp->pr_roflags & PR_USEBMAP);
956 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
967 pp->pr_wchan, pp->pr_itemsperpage);
969 pp->pr_phpool = &phpool[idx];
971 pp->pr_phpool = NULL;
978 slack = itemspace - pp->pr_itemsperpage * pp->pr_size;
979 pp->pr_maxcolor = rounddown(slack, align);
980 pp->pr_curcolor = 0;
982 pp->pr_nget = 0;
983 pp->pr_nfail = 0;
984 pp->pr_nput = 0;
985 pp->pr_npagealloc = 0;
986 pp->pr_npagefree = 0;
987 pp->pr_hiwat = 0;
988 pp->pr_nidle = 0;
989 pp->pr_refcnt = 0;
991 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
992 cv_init(&pp->pr_cv, wchan);
993 pp->pr_ipl = ipl;
999 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
1003 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
1005 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1012 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1021 pool_destroy(struct pool *pp)
1026 pool_quarantine_flush(pp);
1030 while (pp->pr_refcnt != 0)
1032 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1033 if (drainpp == pp)
1038 mutex_enter(&pp->pr_alloc->pa_lock);
1039 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1040 mutex_exit(&pp->pr_alloc->pa_lock);
1043 if (--pp->pr_alloc->pa_refcnt == 0)
1044 mutex_destroy(&pp->pr_alloc->pa_lock);
1047 mutex_enter(&pp->pr_lock);
1049 KASSERT(pp->pr_cache == NULL);
1050 KASSERTMSG((pp->pr_nout == 0),
1051 "%s: [%s] pool busy: still out: %u", __func__, pp->pr_wchan,
1052 pp->pr_nout);
1053 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
1054 KASSERT(LIST_EMPTY(&pp->pr_partpages));
1058 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1059 pr_rmpage(pp, ph, &pq);
1061 mutex_exit(&pp->pr_lock);
1063 pr_pagelist_free(pp, &pq);
1064 cv_destroy(&pp->pr_cv);
1065 mutex_destroy(&pp->pr_lock);
1069 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
1073 KASSERTMSG((pp->pr_drain_hook == NULL),
1074 "%s: [%s] already set", __func__, pp->pr_wchan);
1075 pp->pr_drain_hook = fn;
1076 pp->pr_drain_hook_arg = arg;
1080 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1084 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1087 ph = pool_get(pp->pr_phpool, flags);
1096 pool_get(struct pool *pp, int flags)
1102 KASSERTMSG((pp->pr_itemsperpage != 0),
1104 "pool not initialized?", __func__, pp->pr_wchan);
1106 || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
1108 __func__, pp->pr_wchan);
1118 mutex_enter(&pp->pr_lock);
1125 KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
1126 "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
1127 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1128 if (pp->pr_drain_hook != NULL) {
1134 mutex_exit(&pp->pr_lock);
1135 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1136 mutex_enter(&pp->pr_lock);
1137 if (pp->pr_nout < pp->pr_hardlimit)
1146 pp->pr_flags |= PR_WANTED;
1148 cv_wait(&pp->pr_cv, &pp->pr_lock);
1149 } while (pp->pr_flags & PR_WANTED);
1156 if (pp->pr_hardlimit_warning != NULL &&
1157 ratecheck(&pp->pr_hardlimit_warning_last,
1158 &pp->pr_hardlimit_ratecap))
1159 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1161 pp->pr_nfail++;
1163 mutex_exit(&pp->pr_lock);
1174 if ((ph = pp->pr_curpage) == NULL) {
1177 KASSERTMSG((pp->pr_nitems == 0),
1179 __func__, pp->pr_wchan, pp->pr_nitems);
1186 error = pool_grow(pp, flags);
1202 if (pp->pr_curpage != NULL)
1205 pp->pr_nfail++;
1206 mutex_exit(&pp->pr_lock);
1214 if (pp->pr_roflags & PR_USEBMAP) {
1215 KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
1216 "%s: [%s] pool page empty", __func__, pp->pr_wchan);
1217 v = pr_item_bitmap_get(pp, ph);
1219 v = pr_item_linkedlist_get(pp, ph);
1221 pp->pr_nitems--;
1222 pp->pr_nout++;
1224 KASSERT(pp->pr_nidle > 0);
1225 pp->pr_nidle--;
1232 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1235 if (ph->ph_nmissing == pp->pr_itemsperpage) {
1236 KASSERTMSG(((pp->pr_roflags & PR_USEBMAP) ||
1239 pp->pr_wchan, ph->ph_nmissing);
1245 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1246 pool_update_curpage(pp);
1249 pp->pr_nget++;
1255 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1263 mutex_exit(&pp->pr_lock);
1264 KASSERT((((vaddr_t)v) & (pp->pr_align - 1)) == 0);
1265 FREECHECK_OUT(&pp->pr_freecheck, v);
1266 pool_redzone_fill(pp, v);
1267 pool_get_kmsan(pp, v);
1269 memset(v, 0, pp->pr_reqsize);
1277 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1281 KASSERT(mutex_owned(&pp->pr_lock));
1282 pool_redzone_check(pp, v);
1283 pool_put_kmsan(pp, v);
1284 FREECHECK_IN(&pp->pr_freecheck, v);
1285 LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1287 KASSERTMSG((pp->pr_nout > 0),
1288 "%s: [%s] putting with none out", __func__, pp->pr_wchan);
1290 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1291 panic("%s: [%s] page header missing", __func__, pp->pr_wchan);
1297 if (pp->pr_roflags & PR_USEBMAP) {
1298 pr_item_bitmap_put(pp, ph, v);
1300 pr_item_linkedlist_put(pp, ph, v);
1304 pp->pr_nput++;
1305 pp->pr_nitems++;
1306 pp->pr_nout--;
1309 if (pp->pr_curpage == NULL)
1310 pp->pr_curpage = ph;
1312 if (pp->pr_flags & PR_WANTED) {
1313 pp->pr_flags &= ~PR_WANTED;
1314 cv_broadcast(&pp->pr_cv);
1331 pp->pr_nidle++;
1332 if (pp->pr_nitems - pp->pr_itemsperpage >= pp->pr_minitems &&
1333 pp->pr_npages > pp->pr_minpages &&
1334 (pp->pr_npages > pp->pr_maxpages ||
1335 pp->pr_nitems > pp->pr_maxitems)) {
1336 pr_rmpage(pp, ph, pq);
1339 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1352 pool_update_curpage(pp);
1361 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1363 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1364 pp->pr_curpage = ph;
1369 pool_put(struct pool *pp, void *v)
1375 mutex_enter(&pp->pr_lock);
1376 if (!pool_put_quarantine(pp, v, &pq)) {
1377 pool_do_put(pp, v, &pq);
1379 mutex_exit(&pp->pr_lock);
1381 pr_pagelist_free(pp, &pq);
1393 pool_grow(struct pool *pp, int flags)
1402 if (pp->pr_flags & PR_GROWING) {
1405 cv_wait(&pp->pr_cv, &pp->pr_lock);
1406 } while (pp->pr_flags & PR_GROWING);
1409 if (pp->pr_flags & PR_GROWINGNOWAIT) {
1416 mutex_exit(&pp->pr_lock);
1417 mutex_enter(&pp->pr_lock);
1423 pp->pr_flags |= PR_GROWING;
1425 mutex_exit(&pp->pr_lock);
1427 pp->pr_flags |= PR_GROWINGNOWAIT;
1429 storage = pool_allocator_alloc(pp, flags);
1433 ph = pool_alloc_item_header(pp, storage, flags);
1435 pool_allocator_free(pp, storage);
1440 mutex_enter(&pp->pr_lock);
1441 pool_prime_page(pp, storage, ph);
1442 pp->pr_npagealloc++;
1443 KASSERT(pp->pr_flags & PR_GROWING);
1444 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1449 cv_broadcast(&pp->pr_cv);
1453 mutex_enter(&pp->pr_lock);
1454 KASSERT(pp->pr_flags & PR_GROWING);
1455 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1460 pool_prime(struct pool *pp, int n)
1463 mutex_enter(&pp->pr_lock);
1464 pp->pr_minpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1465 if (pp->pr_maxpages <= pp->pr_minpages)
1466 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1467 while (pp->pr_npages < pp->pr_minpages)
1468 (void) pool_grow(pp, PR_WAITOK);
1469 mutex_exit(&pp->pr_lock);
1478 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1480 const unsigned int align = pp->pr_align;
1485 KASSERT(mutex_owned(&pp->pr_lock));
1486 KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1487 (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1488 "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
1493 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1498 if (pp->pr_roflags & PR_PHINPAGE)
1499 ph->ph_poolid = pp->pr_poolid;
1501 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1503 pp->pr_nidle++;
1508 ph->ph_off = pp->pr_itemoffset;
1513 ph->ph_off += pp->pr_curcolor;
1515 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1516 pp->pr_curcolor = 0;
1523 n = pp->pr_itemsperpage;
1524 pp->pr_nitems += n;
1526 if (pp->pr_roflags & PR_USEBMAP) {
1527 pr_item_bitmap_init(pp, ph);
1539 cp = (char *)cp + pp->pr_size;
1548 if (pp->pr_curpage == NULL)
1549 pp->pr_curpage = ph;
1551 if (++pp->pr_npages > pp->pr_hiwat)
1552 pp->pr_hiwat = pp->pr_npages;
1565 pool_catchup(struct pool *pp)
1569 while (POOL_NEEDS_CATCHUP(pp)) {
1570 error = pool_grow(pp, PR_NOWAIT);
1581 pool_update_curpage(struct pool *pp)
1584 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1585 if (pp->pr_curpage == NULL) {
1586 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1588 KASSERTMSG((pp->pr_curpage == NULL) == (pp->pr_nitems == 0),
1589 "pp=%p curpage=%p nitems=%u", pp, pp->pr_curpage, pp->pr_nitems);
1593 pool_setlowat(struct pool *pp, int n)
1596 mutex_enter(&pp->pr_lock);
1597 pp->pr_minitems = n;
1600 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1608 mutex_exit(&pp->pr_lock);
1612 pool_sethiwat(struct pool *pp, int n)
1615 mutex_enter(&pp->pr_lock);
1617 pp->pr_maxitems = n;
1619 mutex_exit(&pp->pr_lock);
1623 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1626 mutex_enter(&pp->pr_lock);
1628 pp->pr_hardlimit = n;
1629 pp->pr_hardlimit_warning = warnmess;
1630 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1631 pp->pr_hardlimit_warning_last.tv_sec = 0;
1632 pp->pr_hardlimit_warning_last.tv_usec = 0;
1634 pp->pr_maxpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1636 mutex_exit(&pp->pr_lock);
1640 pool_nget(struct pool *pp)
1643 return pp->pr_nget;
1647 pool_nput(struct pool *pp)
1650 return pp->pr_nput;
1659 pool_reclaim(struct pool *pp)
1671 if (pp->pr_drain_hook != NULL) {
1675 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1682 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1683 pp->pr_ipl == IPL_SOFTSERIAL) {
1690 if ((pc = atomic_load_consume(&pp->pr_cache)) != NULL)
1693 if (mutex_tryenter(&pp->pr_lock) == 0) {
1704 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1708 if (pp->pr_npages <= pp->pr_minpages)
1719 if (pp->pr_nitems - pp->pr_itemsperpage < pp->pr_minitems ||
1720 pp->pr_npages - 1 < pp->pr_minpages)
1723 pr_rmpage(pp, ph, &pq);
1726 mutex_exit(&pp->pr_lock);
1731 pr_pagelist_free(pp, &pq);
1751 struct pool *pp;
1755 pp = NULL;
1764 pp = drainpp;
1765 drainpp = TAILQ_NEXT(pp, pr_poollist);
1771 } while (pp == NULL || pp->pr_npages == 0);
1772 pp->pr_refcnt++;
1776 reclaimed = pool_reclaim(pp);
1780 pp->pr_refcnt--;
1785 *ppp = pp;
1807 struct pool *pp;
1810 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1812 (uint64_t)pp->pr_npages * pp->pr_alloc->pa_pagesz;
1814 if ((pp->pr_roflags & PR_RECURSIVE) != 0)
1815 bytes -= ((uint64_t)pp->pr_nout * pp->pr_size);
1829 struct pool *pp;
1831 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1832 pool_printit(pp, modif, pr);
1837 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1840 if (pp == NULL) {
1845 pool_print1(pp, modif, pr);
1849 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1859 if (!(pp->pr_roflags & PR_USEBMAP)) {
1872 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1898 if (skip_empty && pp->pr_nget == 0)
1901 if ((pc = atomic_load_consume(&pp->pr_cache)) != NULL) {
1910 pp->pr_wchan, pp, pp->pr_size, pp->pr_align, pp->pr_npages,
1911 pp->pr_nitems, pp->pr_nout, pp->pr_nget, pp->pr_nput,
1912 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_nidle,
1913 (size_t)pp->pr_npagealloc * pp->pr_alloc->pa_pagesz);
1918 pp->pr_wchan, pp->pr_size,
1919 (size_t)pp->pr_npagealloc * pp->pr_alloc->pa_pagesz,
1920 pp->pr_align, pp->pr_itemoffset, pp->pr_roflags);
1921 (*pr)("\tpool %p, alloc %p\n", pp, pp->pr_alloc);
1923 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1925 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1928 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1930 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1935 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1937 pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1938 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1940 pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1941 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1943 pool_print_pagelist(pp, &pp->pr_partpages, pr);
1945 if (pp->pr_curpage == NULL)
1948 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
2002 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
2008 if ((pp->pr_roflags & PR_NOALIGN) == 0) {
2009 page = POOL_OBJ_TO_PAGE(pp, ph);
2011 (pp->pr_roflags & PR_PHINPAGE) != 0) {
2015 " at page head addr %p (p %p)\n", pp,
2016 pp->pr_wchan, ph->ph_page,
2022 if ((pp->pr_roflags & PR_USEBMAP) != 0)
2035 pp->pr_wchan, pi->pi_magic, ph->ph_page,
2040 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
2043 page = POOL_OBJ_TO_PAGE(pp, pi);
2050 " item ordinal %d; addr %p (p %p)\n", pp,
2051 pp->pr_wchan, ph->ph_page,
2060 pool_chk(struct pool *pp, const char *label)
2065 mutex_enter(&pp->pr_lock);
2066 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2067 r = pool_chk_page(pp, label, ph);
2072 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2073 r = pool_chk_page(pp, label, ph);
2078 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2079 r = pool_chk_page(pp, label, ph);
2086 mutex_exit(&pp->pr_lock);
2128 struct pool *pp;
2131 pp = &pc->pc_pool;
2146 pool_init(pp, size, align, align_offset, ppflags, wchan, palloc, ipl);
2193 atomic_store_release(&pp->pr_cache, pc);
2217 struct pool *pp = &pc->pc_pool;
2231 mutex_enter(&pp->pr_lock);
2232 atomic_store_relaxed(&pp->pr_cache, NULL);
2233 mutex_exit(&pp->pr_lock);
2240 pool_destroy(pp);
2967 pool_allocator_alloc(struct pool *pp, int flags)
2969 struct pool_allocator *pa = pp->pr_alloc;
2972 res = (*pa->pa_alloc)(pp, flags);
2979 if (pp->pr_drain_hook != NULL) {
2980 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2981 res = (*pa->pa_alloc)(pp, flags);
2988 pool_allocator_free(struct pool *pp, void *v)
2990 struct pool_allocator *pa = pp->pr_alloc;
2992 if (pp->pr_redzone) {
2993 KASSERT(!pp_has_pser(pp));
2995 } else if (__predict_false(pp_has_pser(pp))) {
3002 (*pa->pa_free)(pp, v);
3006 pool_page_alloc(struct pool *pp, int flags)
3012 ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
3019 pool_page_free(struct pool *pp, void *v)
3022 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
3026 pool_page_alloc_meta(struct pool *pp, int flags)
3032 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
3039 pool_page_free_meta(struct pool *pp, void *v)
3042 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
3047 pool_get_kmsan(struct pool *pp, void *p)
3049 kmsan_orig(p, pp->pr_size, KMSAN_TYPE_POOL, __RET_ADDR);
3050 kmsan_mark(p, pp->pr_size, KMSAN_STATE_UNINIT);
3054 pool_put_kmsan(struct pool *pp, void *p)
3056 kmsan_mark(p, pp->pr_size, KMSAN_STATE_INITED);
3077 pool_quarantine_init(struct pool *pp)
3079 pp->pr_quar.rotor = 0;
3080 memset(&pp->pr_quar, 0, sizeof(pp->pr_quar));
3084 pool_quarantine_flush(struct pool *pp)
3086 pool_quar_t *quar = &pp->pr_quar;
3092 mutex_enter(&pp->pr_lock);
3096 pool_do_put(pp, (void *)quar->list[i], &pq);
3098 mutex_exit(&pp->pr_lock);
3100 pr_pagelist_free(pp, &pq);
3104 pool_put_quarantine(struct pool *pp, void *v, struct pool_pagelist *pq)
3106 pool_quar_t *quar = &pp->pr_quar;
3109 if (pp->pr_roflags & PR_NOTOUCH) {
3113 pool_redzone_check(pp, v);
3119 pool_do_put(pp, (void *)old, pq);
3154 pool_redzone_init(struct pool *pp, size_t requested_size)
3167 if (pp->pr_roflags & PR_NOTOUCH) {
3168 pp->pr_redzone = false;
3176 if (pp->pr_size - requested_size >= redzsz) {
3177 pp->pr_reqsize_with_redzone = requested_size + redzsz;
3178 pp->pr_redzone = true;
3191 nsz = roundup(pp->pr_size + redzsz, pp->pr_align);
3192 if (nsz <= (pp->pr_alloc->pa_pagesz / 2)) {
3194 pp->pr_size = nsz;
3195 pp->pr_reqsize_with_redzone = requested_size + redzsz;
3196 pp->pr_redzone = true;
3199 pp->pr_redzone = false;
3200 aprint_debug("pool redzone disabled for '%s'\n", pp->pr_wchan);
3205 pool_redzone_fill(struct pool *pp, void *p)
3207 if (!pp->pr_redzone)
3209 KASSERT(!pp_has_pser(pp));
3211 kasan_mark(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone,
3217 cp = (uint8_t *)p + pp->pr_reqsize;
3236 pool_redzone_check(struct pool *pp, void *p)
3238 if (!pp->pr_redzone)
3240 KASSERT(!pp_has_pser(pp));
3242 kasan_mark(p, 0, pp->pr_reqsize_with_redzone, KASAN_POOL_FREED);
3247 cp = (uint8_t *)p + pp->pr_reqsize;
3254 pp->pr_wchan, *cp, expected);
3262 pp->pr_wchan, *cp, expected);
3289 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3293 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
3297 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
3300 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
3304 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
3312 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
3320 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3323 if ((pp->pr_roflags & PR_USEBMAP) != 0) {
3324 unsigned int idx = pr_item_bitmap_index(pp, ph, (void *)addr);
3334 if (pool_in_item(pp, pi, addr)) {
3345 struct pool *pp;
3347 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3356 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
3357 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
3358 if (pool_in_page(pp, ph, addr)) {
3362 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
3363 if (pool_in_page(pp, ph, addr)) {
3365 pool_allocated(pp, ph, addr);
3369 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
3370 if (pool_in_page(pp, ph, addr)) {
3377 ph = pr_find_pagehead_noalign(pp, (void *)addr);
3378 if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3381 allocated = pool_allocated(pp, ph, addr);
3385 (pc = atomic_load_consume(&pp->pr_cache)) != NULL) {
3391 if (pool_in_cg(pp, pcg, addr)) {
3402 if (pool_in_cg(pp, cc->cc_current, addr) ||
3403 pool_in_cg(pp, cc->cc_previous, addr)) {
3418 item = item + rounddown(addr - item, pp->pr_size);
3421 pp->pr_wchan,
3432 struct pool *pp;
3440 TAILQ_FOREACH(pp, &pool_head, pr_poollist)
3449 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3452 pp->pr_refcnt++;
3453 strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
3454 data.pr_pagesize = pp->pr_alloc->pa_pagesz;
3455 data.pr_flags = pp->pr_roflags | pp->pr_flags;
3456 #define COPY(field) data.field = pp->field
3482 if ((pc = atomic_load_consume(&pp->pr_cache)) != NULL) {
3504 if (pp->pr_refcnt == UINT_MAX) /* XXX possible? */
3509 if (--pp->pr_refcnt == 0)