1*5c969a7eSbluhm /* $OpenBSD: art.c,v 1.31 2023/11/11 12:17:50 bluhm Exp $ */
23a49c894Smpi
33a49c894Smpi /*
43a49c894Smpi * Copyright (c) 2015 Martin Pieuchot
56fa97700Smpi * Copyright (c) 2001 Yoichi Hariguchi
63a49c894Smpi *
73a49c894Smpi * Permission to use, copy, modify, and distribute this software for any
83a49c894Smpi * purpose with or without fee is hereby granted, provided that the above
93a49c894Smpi * copyright notice and this permission notice appear in all copies.
103a49c894Smpi *
113a49c894Smpi * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
123a49c894Smpi * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
133a49c894Smpi * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
143a49c894Smpi * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
153a49c894Smpi * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
163a49c894Smpi * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
173a49c894Smpi * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
183a49c894Smpi */
193a49c894Smpi
203a49c894Smpi /*
213a49c894Smpi * Allotment Routing Table (ART).
223a49c894Smpi *
233a49c894Smpi * Yoichi Hariguchi paper can be found at:
243a49c894Smpi * http://www.hariguchi.org/art/art.pdf
253a49c894Smpi */
263a49c894Smpi
27b228adf0Smpi #ifndef _KERNEL
28b228adf0Smpi #include "kern_compat.h"
29b228adf0Smpi #else
303a49c894Smpi #include <sys/param.h>
313a49c894Smpi #include <sys/systm.h>
323a49c894Smpi #include <sys/malloc.h>
33cbef0879Smpi #include <sys/pool.h>
3455c556fbSdlg #include <sys/task.h>
353a49c894Smpi #include <sys/socket.h>
36b228adf0Smpi #endif
373a49c894Smpi
383a49c894Smpi #include <net/art.h>
393a49c894Smpi
40a3e24bb9Sbluhm int art_bindex(struct art_table *, const uint8_t *, int);
413a49c894Smpi void art_allot(struct art_table *at, int, struct art_node *,
423a49c894Smpi struct art_node *);
433a49c894Smpi struct art_table *art_table_get(struct art_root *, struct art_table *,
443a49c894Smpi int);
453a49c894Smpi struct art_table *art_table_put(struct art_root *, struct art_table *);
463a49c894Smpi struct art_node *art_table_insert(struct art_root *, struct art_table *,
473a49c894Smpi int, struct art_node *);
483a49c894Smpi struct art_node *art_table_delete(struct art_root *, struct art_table *,
493a49c894Smpi int, struct art_node *);
5003a6f99fSdlg struct art_table *art_table_ref(struct art_root *, struct art_table *);
513a49c894Smpi int art_table_free(struct art_root *, struct art_table *);
5266848ccaSmpi int art_table_walk(struct art_root *, struct art_table *,
533a49c894Smpi int (*f)(struct art_node *, void *), void *);
5403a6f99fSdlg int art_walk_apply(struct art_root *,
5503a6f99fSdlg struct art_node *, struct art_node *,
5603a6f99fSdlg int (*f)(struct art_node *, void *), void *);
5755c556fbSdlg void art_table_gc(void *);
5855c556fbSdlg void art_gc(void *);
593a49c894Smpi
601b3ea614Smpi struct pool an_pool, at_pool, at_heap_4_pool, at_heap_8_pool;
61d560d45fSmpi
6255c556fbSdlg struct art_table *art_table_gc_list = NULL;
6355c556fbSdlg struct mutex art_table_gc_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
6455c556fbSdlg struct task art_table_gc_task =
6555c556fbSdlg TASK_INITIALIZER(art_table_gc, NULL);
6655c556fbSdlg
6755c556fbSdlg struct art_node *art_node_gc_list = NULL;
6855c556fbSdlg struct mutex art_node_gc_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
6955c556fbSdlg struct task art_node_gc_task = TASK_INITIALIZER(art_gc, NULL);
7055c556fbSdlg
71d560d45fSmpi void
art_init(void)72d560d45fSmpi art_init(void)
73d560d45fSmpi {
741378bae2Sdlg pool_init(&an_pool, sizeof(struct art_node), 0, IPL_SOFTNET, 0,
751378bae2Sdlg "art_node", NULL);
761378bae2Sdlg pool_init(&at_pool, sizeof(struct art_table), 0, IPL_SOFTNET, 0,
771378bae2Sdlg "art_table", NULL);
781378bae2Sdlg pool_init(&at_heap_4_pool, AT_HEAPSIZE(4), 0, IPL_SOFTNET, 0,
791378bae2Sdlg "art_heap4", NULL);
801378bae2Sdlg pool_init(&at_heap_8_pool, AT_HEAPSIZE(8), 0, IPL_SOFTNET, 0,
811378bae2Sdlg "art_heap8", &pool_allocator_single);
82d560d45fSmpi }
83cbef0879Smpi
843a49c894Smpi /*
853a49c894Smpi * Per routing table initialization API function.
863a49c894Smpi */
872d6ba272Smpi struct art_root *
art_alloc(unsigned int rtableid,unsigned int alen,unsigned int off)88d28efb3fSmpi art_alloc(unsigned int rtableid, unsigned int alen, unsigned int off)
893a49c894Smpi {
903a49c894Smpi struct art_root *ar;
913a49c894Smpi int i;
923a49c894Smpi
933a49c894Smpi ar = malloc(sizeof(*ar), M_RTABLE, M_NOWAIT|M_ZERO);
943a49c894Smpi if (ar == NULL)
952d6ba272Smpi return (NULL);
963a49c894Smpi
97d28efb3fSmpi switch (alen) {
98d28efb3fSmpi case 32:
993a49c894Smpi ar->ar_alen = 32;
10092162031Smpi ar->ar_nlvl = 7;
10192162031Smpi ar->ar_bits[0] = 8;
10292162031Smpi for (i = 1; i < ar->ar_nlvl; i++)
10392162031Smpi ar->ar_bits[i] = 4;
1043a49c894Smpi break;
105d28efb3fSmpi case 128:
1063a49c894Smpi ar->ar_alen = 128;
10792162031Smpi ar->ar_nlvl = 32;
1083a49c894Smpi for (i = 0; i < ar->ar_nlvl; i++)
10992162031Smpi ar->ar_bits[i] = 4;
1103a49c894Smpi break;
1113a49c894Smpi default:
112d28efb3fSmpi printf("%s: incorrect address length %u\n", __func__, alen);
1134904e51aSmpi free(ar, M_RTABLE, sizeof(*ar));
1142d6ba272Smpi return (NULL);
1153a49c894Smpi }
1163a49c894Smpi
1172d6ba272Smpi ar->ar_off = off;
1189b397541Sjmatthew rw_init(&ar->ar_lock, "art");
1193a49c894Smpi
1202d6ba272Smpi return (ar);
1213a49c894Smpi }
1223a49c894Smpi
1233a49c894Smpi /*
1243a49c894Smpi * Return 1 if ``old'' and ``new`` are identical, 0 otherwise.
1253a49c894Smpi */
1263a49c894Smpi static inline int
art_check_duplicate(struct art_root * ar,struct art_node * old,struct art_node * new)1273a49c894Smpi art_check_duplicate(struct art_root *ar, struct art_node *old,
1283a49c894Smpi struct art_node *new)
1293a49c894Smpi {
1301ff67b00Smpi if (old == NULL)
1313a49c894Smpi return (0);
1323a49c894Smpi
1331ff67b00Smpi if (old->an_plen == new->an_plen)
1343a49c894Smpi return (1);
1353a49c894Smpi
1363a49c894Smpi return (0);
1373a49c894Smpi }
1383a49c894Smpi
1393a49c894Smpi /*
1403a49c894Smpi * Return the base index of the part of ``addr'' and ``plen''
1413a49c894Smpi * corresponding to the range covered by the table ``at''.
1423a49c894Smpi *
1433a49c894Smpi * In other words, this function take the multi-level (complete)
1443a49c894Smpi * address ``addr'' and prefix length ``plen'' and return the
1453a49c894Smpi * single level base index for the table ``at''.
1463a49c894Smpi *
1473a49c894Smpi * For example with an address size of 32bit divided into four
1483a49c894Smpi * 8bit-long tables, there's a maximum of 4 base indexes if the
1493a49c894Smpi * prefix length is > 24.
1503a49c894Smpi */
1513a49c894Smpi int
art_bindex(struct art_table * at,const uint8_t * addr,int plen)152a3e24bb9Sbluhm art_bindex(struct art_table *at, const uint8_t *addr, int plen)
1533a49c894Smpi {
1543a49c894Smpi uint8_t boff, bend;
1553a49c894Smpi uint32_t k;
1563a49c894Smpi
1573a49c894Smpi if (plen < at->at_offset || plen > (at->at_offset + at->at_bits))
1583a49c894Smpi return (-1);
1593a49c894Smpi
1603a49c894Smpi /*
1613a49c894Smpi * We are only interested in the part of the prefix length
1623a49c894Smpi * corresponding to the range of this table.
1633a49c894Smpi */
1643a49c894Smpi plen -= at->at_offset;
1653a49c894Smpi
1663a49c894Smpi /*
1673a49c894Smpi * Jump to the first byte of the address containing bits
1683a49c894Smpi * covered by this table.
1693a49c894Smpi */
1703a49c894Smpi addr += (at->at_offset / 8);
1713a49c894Smpi
1723a49c894Smpi /* ``at'' covers the bit range between ``boff'' & ``bend''. */
1733a49c894Smpi boff = (at->at_offset % 8);
1743a49c894Smpi bend = (at->at_bits + boff);
1753a49c894Smpi
1763a49c894Smpi KASSERT(bend <= 32);
1773a49c894Smpi
1783a49c894Smpi if (bend > 24) {
1793a49c894Smpi k = (addr[0] & ((1 << (8 - boff)) - 1)) << (bend - 8);
1803a49c894Smpi k |= addr[1] << (bend - 16);
1813a49c894Smpi k |= addr[2] << (bend - 24);
1823a49c894Smpi k |= addr[3] >> (32 - bend);
1833a49c894Smpi } else if (bend > 16) {
1843a49c894Smpi k = (addr[0] & ((1 << (8 - boff)) - 1)) << (bend - 8);
1853a49c894Smpi k |= addr[1] << (bend - 16);
1863a49c894Smpi k |= addr[2] >> (24 - bend);
1873a49c894Smpi } else if (bend > 8) {
1883a49c894Smpi k = (addr[0] & ((1 << (8 - boff)) - 1)) << (bend - 8);
1893a49c894Smpi k |= addr[1] >> (16 - bend);
1903a49c894Smpi } else {
1913a49c894Smpi k = (addr[0] >> (8 - bend)) & ((1 << at->at_bits) - 1);
1923a49c894Smpi }
1933a49c894Smpi
1943a49c894Smpi /*
1953a49c894Smpi * Single level base index formula:
1963a49c894Smpi */
1973a49c894Smpi return ((k >> (at->at_bits - plen)) + (1 << plen));
1983a49c894Smpi }
1993a49c894Smpi
2003a49c894Smpi /*
2013a49c894Smpi * Single level lookup function.
2023a49c894Smpi *
2033a49c894Smpi * Return the fringe index of the part of ``addr''
2043a49c894Smpi * corresponding to the range covered by the table ``at''.
2053a49c894Smpi */
2063a49c894Smpi static inline int
art_findex(struct art_table * at,const uint8_t * addr)207a3e24bb9Sbluhm art_findex(struct art_table *at, const uint8_t *addr)
2083a49c894Smpi {
2093a49c894Smpi return art_bindex(at, addr, (at->at_offset + at->at_bits));
2103a49c894Smpi }
2113a49c894Smpi
2123a49c894Smpi /*
2133a49c894Smpi * (Non-perfect) lookup API function.
2143a49c894Smpi *
2153a49c894Smpi * Return the best existing match for a destination.
2163a49c894Smpi */
2173a49c894Smpi struct art_node *
art_match(struct art_root * ar,const void * addr,struct srp_ref * nsr)218a3e24bb9Sbluhm art_match(struct art_root *ar, const void *addr, struct srp_ref *nsr)
2193a49c894Smpi {
220e0d1f3a9Sjmatthew struct srp_ref dsr, ndsr;
221e0d1f3a9Sjmatthew void *entry;
22266848ccaSmpi struct art_table *at;
223e0d1f3a9Sjmatthew struct art_node *dflt, *ndflt;
2243a49c894Smpi int j;
2253a49c894Smpi
226e0d1f3a9Sjmatthew entry = srp_enter(nsr, &ar->ar_root);
227e0d1f3a9Sjmatthew at = entry;
228e0d1f3a9Sjmatthew
22966848ccaSmpi if (at == NULL)
230e0d1f3a9Sjmatthew goto done;
231e0d1f3a9Sjmatthew
232e0d1f3a9Sjmatthew /*
233e0d1f3a9Sjmatthew * Remember the default route of each table we visit in case
234e0d1f3a9Sjmatthew * we do not find a better matching route.
235e0d1f3a9Sjmatthew */
236e0d1f3a9Sjmatthew dflt = srp_enter(&dsr, &at->at_default);
23766848ccaSmpi
2383a49c894Smpi /*
2393a49c894Smpi * Iterate until we find a leaf.
2403a49c894Smpi */
2413a49c894Smpi while (1) {
2423a49c894Smpi /* Do a single level route lookup. */
2433a49c894Smpi j = art_findex(at, addr);
244e0d1f3a9Sjmatthew entry = srp_follow(nsr, &at->at_heap[j].node);
2453a49c894Smpi
246e0d1f3a9Sjmatthew /* If this is a leaf (NULL is a leaf) we're done. */
247e0d1f3a9Sjmatthew if (ISLEAF(entry))
2483a49c894Smpi break;
2493a49c894Smpi
250e0d1f3a9Sjmatthew at = SUBTABLE(entry);
251e0d1f3a9Sjmatthew
252e0d1f3a9Sjmatthew ndflt = srp_enter(&ndsr, &at->at_default);
253e0d1f3a9Sjmatthew if (ndflt != NULL) {
254e0d1f3a9Sjmatthew srp_leave(&dsr);
255e0d1f3a9Sjmatthew dsr = ndsr;
256e0d1f3a9Sjmatthew dflt = ndflt;
257e0d1f3a9Sjmatthew } else
258e0d1f3a9Sjmatthew srp_leave(&ndsr);
2593a49c894Smpi }
2603a49c894Smpi
261e0d1f3a9Sjmatthew if (entry == NULL) {
262e0d1f3a9Sjmatthew srp_leave(nsr);
263e0d1f3a9Sjmatthew *nsr = dsr;
264e0d1f3a9Sjmatthew KASSERT(ISLEAF(dflt));
2653a49c894Smpi return (dflt);
2663a49c894Smpi }
2673a49c894Smpi
268e0d1f3a9Sjmatthew srp_leave(&dsr);
269e0d1f3a9Sjmatthew done:
270e0d1f3a9Sjmatthew KASSERT(ISLEAF(entry));
271e0d1f3a9Sjmatthew return (entry);
272e0d1f3a9Sjmatthew }
273e0d1f3a9Sjmatthew
2743a49c894Smpi /*
2753a49c894Smpi * Perfect lookup API function.
2763a49c894Smpi *
2773a49c894Smpi * Return a perfect match for a destination/prefix-length pair or NULL if
2783a49c894Smpi * it does not exist.
2793a49c894Smpi */
2803a49c894Smpi struct art_node *
art_lookup(struct art_root * ar,const void * addr,int plen,struct srp_ref * nsr)281a3e24bb9Sbluhm art_lookup(struct art_root *ar, const void *addr, int plen, struct srp_ref *nsr)
2823a49c894Smpi {
283e0d1f3a9Sjmatthew void *entry;
28466848ccaSmpi struct art_table *at;
2853a49c894Smpi int i, j;
2863a49c894Smpi
2873a49c894Smpi KASSERT(plen >= 0 && plen <= ar->ar_alen);
2883a49c894Smpi
289e0d1f3a9Sjmatthew entry = srp_enter(nsr, &ar->ar_root);
290e0d1f3a9Sjmatthew at = entry;
291e0d1f3a9Sjmatthew
29266848ccaSmpi if (at == NULL)
293e0d1f3a9Sjmatthew goto done;
29466848ccaSmpi
2953a49c894Smpi /* Default route */
296e0d1f3a9Sjmatthew if (plen == 0) {
297e0d1f3a9Sjmatthew entry = srp_follow(nsr, &at->at_default);
298e0d1f3a9Sjmatthew goto done;
299e0d1f3a9Sjmatthew }
3003a49c894Smpi
3013a49c894Smpi /*
3023a49c894Smpi * If the prefix length is smaller than the sum of
3033a49c894Smpi * the stride length at this level the entry must
3043a49c894Smpi * be in the current table.
3053a49c894Smpi */
3063a49c894Smpi while (plen > (at->at_offset + at->at_bits)) {
3073a49c894Smpi /* Do a single level route lookup. */
3083a49c894Smpi j = art_findex(at, addr);
309e0d1f3a9Sjmatthew entry = srp_follow(nsr, &at->at_heap[j].node);
3103a49c894Smpi
311e0d1f3a9Sjmatthew /* A leaf is a match, but not a perfect one, or NULL */
312e0d1f3a9Sjmatthew if (ISLEAF(entry))
3133a49c894Smpi return (NULL);
3143a49c894Smpi
315e0d1f3a9Sjmatthew at = SUBTABLE(entry);
3163a49c894Smpi }
3173a49c894Smpi
3183a49c894Smpi i = art_bindex(at, addr, plen);
3193a49c894Smpi if (i == -1)
3203a49c894Smpi return (NULL);
3213a49c894Smpi
322e0d1f3a9Sjmatthew entry = srp_follow(nsr, &at->at_heap[i].node);
323e0d1f3a9Sjmatthew if (!ISLEAF(entry))
324e0d1f3a9Sjmatthew entry = srp_follow(nsr, &SUBTABLE(entry)->at_default);
3253a49c894Smpi
326e0d1f3a9Sjmatthew done:
327e0d1f3a9Sjmatthew KASSERT(ISLEAF(entry));
328e0d1f3a9Sjmatthew return (entry);
3293a49c894Smpi }
3303a49c894Smpi
3313a49c894Smpi
3323a49c894Smpi /*
3333a49c894Smpi * Insertion API function.
3343a49c894Smpi *
3353a49c894Smpi * Insert the given node or return an existing one if a node with the
3363a49c894Smpi * same destination/mask pair is already present.
3373a49c894Smpi */
3383a49c894Smpi struct art_node *
art_insert(struct art_root * ar,struct art_node * an,const void * addr,int plen)339a3e24bb9Sbluhm art_insert(struct art_root *ar, struct art_node *an, const void *addr, int plen)
3403a49c894Smpi {
341e0d1f3a9Sjmatthew struct art_table *at, *child;
342e0d1f3a9Sjmatthew struct art_node *node;
3431ff67b00Smpi int i, j;
3443a49c894Smpi
3459b397541Sjmatthew rw_assert_wrlock(&ar->ar_lock);
3463a49c894Smpi KASSERT(plen >= 0 && plen <= ar->ar_alen);
3473a49c894Smpi
348e0d1f3a9Sjmatthew at = srp_get_locked(&ar->ar_root);
34966848ccaSmpi if (at == NULL) {
35066848ccaSmpi at = art_table_get(ar, NULL, -1);
35166848ccaSmpi if (at == NULL)
35266848ccaSmpi return (NULL);
35366848ccaSmpi
354e0d1f3a9Sjmatthew srp_swap_locked(&ar->ar_root, at);
35566848ccaSmpi }
35666848ccaSmpi
3573a49c894Smpi /* Default route */
3583a49c894Smpi if (plen == 0) {
359e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_default);
360e0d1f3a9Sjmatthew if (node != NULL)
361e0d1f3a9Sjmatthew return (node);
362a8f21e60Sdlg
36366848ccaSmpi art_table_ref(ar, at);
364e0d1f3a9Sjmatthew srp_swap_locked(&at->at_default, an);
3653a49c894Smpi return (an);
3663a49c894Smpi }
3673a49c894Smpi
3683a49c894Smpi /*
3693a49c894Smpi * If the prefix length is smaller than the sum of
3703a49c894Smpi * the stride length at this level the entry must
3713a49c894Smpi * be in the current table.
3723a49c894Smpi */
3733a49c894Smpi while (plen > (at->at_offset + at->at_bits)) {
3743a49c894Smpi /* Do a single level route lookup. */
3753a49c894Smpi j = art_findex(at, addr);
376e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_heap[j].node);
3773a49c894Smpi
3783a49c894Smpi /*
3793a49c894Smpi * If the node corresponding to the fringe index is
3803a49c894Smpi * a leaf we need to allocate a subtable. The route
3813a49c894Smpi * entry of this node will then become the default
3823a49c894Smpi * route of the subtable.
3833a49c894Smpi */
384e0d1f3a9Sjmatthew if (ISLEAF(node)) {
3853a49c894Smpi child = art_table_get(ar, at, j);
3863a49c894Smpi if (child == NULL)
3873a49c894Smpi return (NULL);
3883a49c894Smpi
38966848ccaSmpi art_table_ref(ar, at);
390e0d1f3a9Sjmatthew srp_swap_locked(&at->at_heap[j].node, ASNODE(child));
391e0d1f3a9Sjmatthew at = child;
392e0d1f3a9Sjmatthew } else
393e0d1f3a9Sjmatthew at = SUBTABLE(node);
3943a49c894Smpi }
3953a49c894Smpi
3963a49c894Smpi i = art_bindex(at, addr, plen);
3973a49c894Smpi if (i == -1)
3983a49c894Smpi return (NULL);
3993a49c894Smpi
4003a49c894Smpi return (art_table_insert(ar, at, i, an));
4013a49c894Smpi }
4023a49c894Smpi
4033a49c894Smpi /*
4043a49c894Smpi * Single level insertion.
4053a49c894Smpi */
4063a49c894Smpi struct art_node *
art_table_insert(struct art_root * ar,struct art_table * at,int i,struct art_node * an)4073a49c894Smpi art_table_insert(struct art_root *ar, struct art_table *at, int i,
4083a49c894Smpi struct art_node *an)
4093a49c894Smpi {
410e0d1f3a9Sjmatthew struct art_node *prev, *node;
4113a49c894Smpi
412e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_heap[i].node);
413e0d1f3a9Sjmatthew if (!ISLEAF(node))
414e0d1f3a9Sjmatthew prev = srp_get_locked(&SUBTABLE(node)->at_default);
4153a49c894Smpi else
416e0d1f3a9Sjmatthew prev = node;
4173a49c894Smpi
4183a49c894Smpi if (art_check_duplicate(ar, prev, an))
4193a49c894Smpi return (prev);
4203a49c894Smpi
4213a49c894Smpi art_table_ref(ar, at);
4223a49c894Smpi
4233a49c894Smpi /*
4243a49c894Smpi * If the index `i' of the route that we are inserting is not
4253a49c894Smpi * a fringe index, we need to allot this new route pointer to
4263a49c894Smpi * all the corresponding fringe indices.
4273a49c894Smpi */
4283a49c894Smpi if (i < at->at_minfringe)
4293a49c894Smpi art_allot(at, i, prev, an);
430e0d1f3a9Sjmatthew else if (!ISLEAF(node))
431e0d1f3a9Sjmatthew srp_swap_locked(&SUBTABLE(node)->at_default, an);
4323a49c894Smpi else
433e0d1f3a9Sjmatthew srp_swap_locked(&at->at_heap[i].node, an);
4343a49c894Smpi
4353a49c894Smpi return (an);
4363a49c894Smpi }
4373a49c894Smpi
4383a49c894Smpi
4393a49c894Smpi /*
4403a49c894Smpi * Deletion API function.
4413a49c894Smpi */
4423a49c894Smpi struct art_node *
art_delete(struct art_root * ar,struct art_node * an,const void * addr,int plen)443a3e24bb9Sbluhm art_delete(struct art_root *ar, struct art_node *an, const void *addr, int plen)
4443a49c894Smpi {
44566848ccaSmpi struct art_table *at;
446e0d1f3a9Sjmatthew struct art_node *node;
4471ff67b00Smpi int i, j;
4483a49c894Smpi
4499b397541Sjmatthew rw_assert_wrlock(&ar->ar_lock);
4503a49c894Smpi KASSERT(plen >= 0 && plen <= ar->ar_alen);
4513a49c894Smpi
452e0d1f3a9Sjmatthew at = srp_get_locked(&ar->ar_root);
45366848ccaSmpi if (at == NULL)
45466848ccaSmpi return (NULL);
45566848ccaSmpi
4563a49c894Smpi /* Default route */
4573a49c894Smpi if (plen == 0) {
458e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_default);
459e0d1f3a9Sjmatthew srp_swap_locked(&at->at_default, NULL);
46066848ccaSmpi art_table_free(ar, at);
461e0d1f3a9Sjmatthew return (node);
4623a49c894Smpi }
4633a49c894Smpi
4643a49c894Smpi /*
4653a49c894Smpi * If the prefix length is smaller than the sum of
4663a49c894Smpi * the stride length at this level the entry must
4673a49c894Smpi * be in the current table.
4683a49c894Smpi */
4693a49c894Smpi while (plen > (at->at_offset + at->at_bits)) {
4703a49c894Smpi /* Do a single level route lookup. */
4713a49c894Smpi j = art_findex(at, addr);
472e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_heap[j].node);
4733a49c894Smpi
4743a49c894Smpi /* If this is a leaf, there is no route to delete. */
475e0d1f3a9Sjmatthew if (ISLEAF(node))
4763a49c894Smpi return (NULL);
4773a49c894Smpi
478e0d1f3a9Sjmatthew at = SUBTABLE(node);
4793a49c894Smpi }
4803a49c894Smpi
4813a49c894Smpi i = art_bindex(at, addr, plen);
4823a49c894Smpi if (i == -1)
4833a49c894Smpi return (NULL);
4843a49c894Smpi
4853a49c894Smpi return (art_table_delete(ar, at, i, an));
4863a49c894Smpi }
4873a49c894Smpi
4883a49c894Smpi /*
4893a49c894Smpi * Single level deletion.
4903a49c894Smpi */
4913a49c894Smpi struct art_node *
art_table_delete(struct art_root * ar,struct art_table * at,int i,struct art_node * an)4923a49c894Smpi art_table_delete(struct art_root *ar, struct art_table *at, int i,
493e0d1f3a9Sjmatthew struct art_node *an)
4943a49c894Smpi {
495e0d1f3a9Sjmatthew struct art_node *next, *node;
4963a49c894Smpi
4973a49c894Smpi #ifdef DIAGNOSTIC
4983a49c894Smpi struct art_node *prev;
499e0d1f3a9Sjmatthew #endif
5003a49c894Smpi
501e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_heap[i].node);
502e0d1f3a9Sjmatthew #ifdef DIAGNOSTIC
503e0d1f3a9Sjmatthew if (!ISLEAF(node))
504e0d1f3a9Sjmatthew prev = srp_get_locked(&SUBTABLE(node)->at_default);
5053a49c894Smpi else
506e0d1f3a9Sjmatthew prev = node;
5073a49c894Smpi
508e0d1f3a9Sjmatthew KASSERT(prev == an);
5093a49c894Smpi #endif
5103a49c894Smpi
5113a49c894Smpi /* Get the next most specific route for the index `i'. */
5123a49c894Smpi if ((i >> 1) > 1)
513e0d1f3a9Sjmatthew next = srp_get_locked(&at->at_heap[i >> 1].node);
5143a49c894Smpi else
5153a49c894Smpi next = NULL;
5163a49c894Smpi
5173a49c894Smpi /*
5183a49c894Smpi * If the index `i' of the route that we are removing is not
5193a49c894Smpi * a fringe index, we need to allot the next most specific
5203a49c894Smpi * route pointer to all the corresponding fringe indices.
5213a49c894Smpi */
5223a49c894Smpi if (i < at->at_minfringe)
523e0d1f3a9Sjmatthew art_allot(at, i, an, next);
524e0d1f3a9Sjmatthew else if (!ISLEAF(node))
525e0d1f3a9Sjmatthew srp_swap_locked(&SUBTABLE(node)->at_default, next);
5263a49c894Smpi else
527e0d1f3a9Sjmatthew srp_swap_locked(&at->at_heap[i].node, next);
5283a49c894Smpi
52962133b7fSdlg /* We have removed an entry from this table. */
53062133b7fSdlg art_table_free(ar, at);
53162133b7fSdlg
532e0d1f3a9Sjmatthew return (an);
5333a49c894Smpi }
5343a49c894Smpi
53503a6f99fSdlg struct art_table *
art_table_ref(struct art_root * ar,struct art_table * at)5363a49c894Smpi art_table_ref(struct art_root *ar, struct art_table *at)
5373a49c894Smpi {
5383a49c894Smpi at->at_refcnt++;
53903a6f99fSdlg return (at);
5403a49c894Smpi }
5413a49c894Smpi
542e0d1f3a9Sjmatthew static inline int
art_table_rele(struct art_table * at)543e0d1f3a9Sjmatthew art_table_rele(struct art_table *at)
544e0d1f3a9Sjmatthew {
545e0d1f3a9Sjmatthew if (at == NULL)
546e0d1f3a9Sjmatthew return (0);
547e0d1f3a9Sjmatthew
548e0d1f3a9Sjmatthew return (--at->at_refcnt == 0);
549e0d1f3a9Sjmatthew }
550e0d1f3a9Sjmatthew
5513a49c894Smpi int
art_table_free(struct art_root * ar,struct art_table * at)5523a49c894Smpi art_table_free(struct art_root *ar, struct art_table *at)
5533a49c894Smpi {
554e0d1f3a9Sjmatthew if (art_table_rele(at)) {
5553a49c894Smpi /*
5563a49c894Smpi * Garbage collect this table and all its parents
5573a49c894Smpi * that are empty.
5583a49c894Smpi */
5593a49c894Smpi do {
5603a49c894Smpi at = art_table_put(ar, at);
561e0d1f3a9Sjmatthew } while (art_table_rele(at));
5623a49c894Smpi
5633a49c894Smpi return (1);
5643a49c894Smpi }
5653a49c894Smpi
5663a49c894Smpi return (0);
5673a49c894Smpi }
5683a49c894Smpi
5693a49c894Smpi /*
5703a49c894Smpi * Iteration API function.
5713a49c894Smpi */
5723a49c894Smpi int
art_walk(struct art_root * ar,int (* f)(struct art_node *,void *),void * arg)5733a49c894Smpi art_walk(struct art_root *ar, int (*f)(struct art_node *, void *), void *arg)
5743a49c894Smpi {
57503a6f99fSdlg struct srp_ref sr;
57666848ccaSmpi struct art_table *at;
577e0d1f3a9Sjmatthew struct art_node *node;
57803a6f99fSdlg int error = 0;
5793a49c894Smpi
5809b397541Sjmatthew rw_enter_write(&ar->ar_lock);
581e0d1f3a9Sjmatthew at = srp_get_locked(&ar->ar_root);
58203a6f99fSdlg if (at != NULL) {
58303a6f99fSdlg art_table_ref(ar, at);
58466848ccaSmpi
5853a49c894Smpi /*
5863a49c894Smpi * The default route should be processed here because the root
5873a49c894Smpi * table does not have a parent.
5883a49c894Smpi */
58903a6f99fSdlg node = srp_enter(&sr, &at->at_default);
59003a6f99fSdlg error = art_walk_apply(ar, node, NULL, f, arg);
59103a6f99fSdlg srp_leave(&sr);
5923a49c894Smpi
59303a6f99fSdlg if (error == 0)
59403a6f99fSdlg error = art_table_walk(ar, at, f, arg);
59503a6f99fSdlg
59603a6f99fSdlg art_table_free(ar, at);
59703a6f99fSdlg }
5989b397541Sjmatthew rw_exit_write(&ar->ar_lock);
59903a6f99fSdlg
60003a6f99fSdlg return (error);
6013a49c894Smpi }
6023a49c894Smpi
6033a49c894Smpi int
art_table_walk(struct art_root * ar,struct art_table * at,int (* f)(struct art_node *,void *),void * arg)60466848ccaSmpi art_table_walk(struct art_root *ar, struct art_table *at,
6053a49c894Smpi int (*f)(struct art_node *, void *), void *arg)
6063a49c894Smpi {
60703a6f99fSdlg struct srp_ref sr;
60803a6f99fSdlg struct art_node *node, *next;
60903a6f99fSdlg struct art_table *nat;
6103a49c894Smpi int i, j, error = 0;
6113a49c894Smpi uint32_t maxfringe = (at->at_minfringe << 1);
6123a49c894Smpi
6133a49c894Smpi /*
61466848ccaSmpi * Iterate non-fringe nodes in ``natural'' order.
6153a49c894Smpi */
6163a49c894Smpi for (j = 1; j < at->at_minfringe; j += 2) {
6173a49c894Smpi /*
6183a49c894Smpi * The default route (index 1) is processed by the
6193a49c894Smpi * parent table (where it belongs) otherwise it could
6203a49c894Smpi * be processed more than once.
6213a49c894Smpi */
6223a49c894Smpi for (i = max(j, 2); i < at->at_minfringe; i <<= 1) {
623e0d1f3a9Sjmatthew next = srp_get_locked(&at->at_heap[i >> 1].node);
62403a6f99fSdlg
62503a6f99fSdlg node = srp_enter(&sr, &at->at_heap[i].node);
62603a6f99fSdlg error = art_walk_apply(ar, node, next, f, arg);
62703a6f99fSdlg srp_leave(&sr);
62803a6f99fSdlg
62903a6f99fSdlg if (error != 0)
63003a6f99fSdlg return (error);
6313a49c894Smpi }
6323a49c894Smpi }
6333a49c894Smpi
6343a49c894Smpi /*
6353a49c894Smpi * Iterate fringe nodes.
6363a49c894Smpi */
6373a49c894Smpi for (i = at->at_minfringe; i < maxfringe; i++) {
638e0d1f3a9Sjmatthew next = srp_get_locked(&at->at_heap[i >> 1].node);
63903a6f99fSdlg
64003a6f99fSdlg node = srp_enter(&sr, &at->at_heap[i].node);
64103a6f99fSdlg if (!ISLEAF(node)) {
64203a6f99fSdlg nat = art_table_ref(ar, SUBTABLE(node));
64303a6f99fSdlg node = srp_follow(&sr, &nat->at_default);
64403a6f99fSdlg } else
64503a6f99fSdlg nat = NULL;
64603a6f99fSdlg
64703a6f99fSdlg error = art_walk_apply(ar, node, next, f, arg);
64803a6f99fSdlg srp_leave(&sr);
64903a6f99fSdlg
65003a6f99fSdlg if (error != 0) {
65103a6f99fSdlg art_table_free(ar, nat);
65203a6f99fSdlg return (error);
65303a6f99fSdlg }
65403a6f99fSdlg
65503a6f99fSdlg if (nat != NULL) {
65603a6f99fSdlg error = art_table_walk(ar, nat, f, arg);
65703a6f99fSdlg art_table_free(ar, nat);
65803a6f99fSdlg if (error != 0)
65903a6f99fSdlg return (error);
66003a6f99fSdlg }
66103a6f99fSdlg }
66203a6f99fSdlg
66303a6f99fSdlg return (0);
66403a6f99fSdlg }
66503a6f99fSdlg
66603a6f99fSdlg int
art_walk_apply(struct art_root * ar,struct art_node * an,struct art_node * next,int (* f)(struct art_node *,void *),void * arg)66703a6f99fSdlg art_walk_apply(struct art_root *ar,
66803a6f99fSdlg struct art_node *an, struct art_node *next,
66903a6f99fSdlg int (*f)(struct art_node *, void *), void *arg)
67003a6f99fSdlg {
67103a6f99fSdlg int error = 0;
672e0d1f3a9Sjmatthew
6733a49c894Smpi if ((an != NULL) && (an != next)) {
6749b397541Sjmatthew rw_exit_write(&ar->ar_lock);
6753a49c894Smpi error = (*f)(an, arg);
6769b397541Sjmatthew rw_enter_write(&ar->ar_lock);
6773a49c894Smpi }
6783a49c894Smpi
6793a49c894Smpi return (error);
6803a49c894Smpi }
6813a49c894Smpi
6823a49c894Smpi
6833a49c894Smpi /*
6843a49c894Smpi * Create a table and use the given index to set its default route.
6853a49c894Smpi *
6863a49c894Smpi * Note: This function does not modify the root or the parent.
6873a49c894Smpi */
6883a49c894Smpi struct art_table *
art_table_get(struct art_root * ar,struct art_table * parent,int j)6893a49c894Smpi art_table_get(struct art_root *ar, struct art_table *parent, int j)
6903a49c894Smpi {
6913a49c894Smpi struct art_table *at;
692e0d1f3a9Sjmatthew struct art_node *node;
693cbef0879Smpi void *at_heap;
6943a49c894Smpi uint32_t lvl;
6953a49c894Smpi
6963a49c894Smpi KASSERT(j != 0 && j != 1);
6973a49c894Smpi KASSERT(parent != NULL || j == -1);
6983a49c894Smpi
6993a49c894Smpi if (parent != NULL)
7003a49c894Smpi lvl = parent->at_level + 1;
7013a49c894Smpi else
7023a49c894Smpi lvl = 0;
7033a49c894Smpi
7043a49c894Smpi KASSERT(lvl < ar->ar_nlvl);
7053a49c894Smpi
706cbef0879Smpi at = pool_get(&at_pool, PR_NOWAIT|PR_ZERO);
7073a49c894Smpi if (at == NULL)
7083a49c894Smpi return (NULL);
7093a49c894Smpi
710cbef0879Smpi switch (AT_HEAPSIZE(ar->ar_bits[lvl])) {
71192162031Smpi case AT_HEAPSIZE(4):
71292162031Smpi at_heap = pool_get(&at_heap_4_pool, PR_NOWAIT|PR_ZERO);
71392162031Smpi break;
714cbef0879Smpi case AT_HEAPSIZE(8):
715cbef0879Smpi at_heap = pool_get(&at_heap_8_pool, PR_NOWAIT|PR_ZERO);
716cbef0879Smpi break;
717cbef0879Smpi default:
718cbef0879Smpi panic("incorrect stride length %u", ar->ar_bits[lvl]);
719cbef0879Smpi }
720cbef0879Smpi
721cbef0879Smpi if (at_heap == NULL) {
722cbef0879Smpi pool_put(&at_pool, at);
723cbef0879Smpi return (NULL);
724cbef0879Smpi }
725cbef0879Smpi
7263a49c894Smpi at->at_parent = parent;
7273a49c894Smpi at->at_index = j;
7283a49c894Smpi at->at_minfringe = (1 << ar->ar_bits[lvl]);
7293a49c894Smpi at->at_level = lvl;
7303a49c894Smpi at->at_bits = ar->ar_bits[lvl];
731cbef0879Smpi at->at_heap = at_heap;
73266848ccaSmpi at->at_refcnt = 0;
7333a49c894Smpi
7343a49c894Smpi if (parent != NULL) {
735e0d1f3a9Sjmatthew node = srp_get_locked(&parent->at_heap[j].node);
736e0d1f3a9Sjmatthew /* node isn't being deleted, no srp_finalize needed */
737e0d1f3a9Sjmatthew srp_swap_locked(&at->at_default, node);
7383a49c894Smpi at->at_offset = (parent->at_offset + parent->at_bits);
7393a49c894Smpi }
7403a49c894Smpi
7413a49c894Smpi return (at);
7423a49c894Smpi }
7433a49c894Smpi
7443a49c894Smpi
7453a49c894Smpi /*
7463a49c894Smpi * Delete a table and use its index to restore its parent's default route.
7473a49c894Smpi *
7483a49c894Smpi * Note: Modify its parent to unlink the table from it.
7493a49c894Smpi */
7503a49c894Smpi struct art_table *
art_table_put(struct art_root * ar,struct art_table * at)7513a49c894Smpi art_table_put(struct art_root *ar, struct art_table *at)
7523a49c894Smpi {
7533a49c894Smpi struct art_table *parent = at->at_parent;
754e0d1f3a9Sjmatthew struct art_node *node;
7553a49c894Smpi uint32_t j = at->at_index;
7563a49c894Smpi
757e0d1f3a9Sjmatthew KASSERT(at->at_refcnt == 0);
7583a49c894Smpi KASSERT(j != 0 && j != 1);
75966848ccaSmpi
76066848ccaSmpi if (parent != NULL) {
761e0d1f3a9Sjmatthew KASSERT(j != -1);
762e0d1f3a9Sjmatthew KASSERT(at->at_level == parent->at_level + 1);
76366848ccaSmpi KASSERT(parent->at_refcnt >= 1);
7643a49c894Smpi
7653a49c894Smpi /* Give the route back to its parent. */
766e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_default);
767e0d1f3a9Sjmatthew srp_swap_locked(&parent->at_heap[j].node, node);
76866848ccaSmpi } else {
769e0d1f3a9Sjmatthew KASSERT(j == -1);
770e0d1f3a9Sjmatthew KASSERT(at->at_level == 0);
771e0d1f3a9Sjmatthew srp_swap_locked(&ar->ar_root, NULL);
77266848ccaSmpi }
7733a49c894Smpi
77455c556fbSdlg mtx_enter(&art_table_gc_mtx);
77555c556fbSdlg at->at_parent = art_table_gc_list;
77655c556fbSdlg art_table_gc_list = at;
77755c556fbSdlg mtx_leave(&art_table_gc_mtx);
77855c556fbSdlg
77955c556fbSdlg task_add(systqmp, &art_table_gc_task);
78055c556fbSdlg
78155c556fbSdlg return (parent);
78255c556fbSdlg }
78355c556fbSdlg
78455c556fbSdlg void
art_table_gc(void * null)78555c556fbSdlg art_table_gc(void *null)
78655c556fbSdlg {
78755c556fbSdlg struct art_table *at, *next;
78855c556fbSdlg
78955c556fbSdlg mtx_enter(&art_table_gc_mtx);
79055c556fbSdlg at = art_table_gc_list;
79155c556fbSdlg art_table_gc_list = NULL;
79255c556fbSdlg mtx_leave(&art_table_gc_mtx);
79355c556fbSdlg
79455c556fbSdlg while (at != NULL) {
79555c556fbSdlg next = at->at_parent;
79655c556fbSdlg
797e0d1f3a9Sjmatthew if (at->at_level == 0)
798e0d1f3a9Sjmatthew srp_finalize(at, "arttfini");
799e0d1f3a9Sjmatthew else
800e0d1f3a9Sjmatthew srp_finalize(ASNODE(at), "arttfini");
801e0d1f3a9Sjmatthew
80292162031Smpi switch (AT_HEAPSIZE(at->at_bits)) {
80392162031Smpi case AT_HEAPSIZE(4):
80492162031Smpi pool_put(&at_heap_4_pool, at->at_heap);
80592162031Smpi break;
806cbef0879Smpi case AT_HEAPSIZE(8):
807cbef0879Smpi pool_put(&at_heap_8_pool, at->at_heap);
808cbef0879Smpi break;
809cbef0879Smpi default:
81092162031Smpi panic("incorrect stride length %u", at->at_bits);
811cbef0879Smpi }
812cbef0879Smpi
813cbef0879Smpi pool_put(&at_pool, at);
8143a49c894Smpi
81555c556fbSdlg at = next;
81655c556fbSdlg }
8173a49c894Smpi }
8183a49c894Smpi
8193a49c894Smpi /*
8203a49c894Smpi * Substitute a node by another in the subtree whose root index is given.
8213a49c894Smpi *
8223a49c894Smpi * This function iterates on the table ``at'' at index ``i'' until no
8233a49c894Smpi * more ``old'' node can be replaced by ``new''.
8243a49c894Smpi *
8253a49c894Smpi * This function was originally written by Don Knuth in CWEB. The
8263a49c894Smpi * complicated ``goto''s are the result of expansion of the two
8273a49c894Smpi * following recursions:
8283a49c894Smpi *
8293a49c894Smpi * art_allot(at, i, old, new)
8303a49c894Smpi * {
8313a49c894Smpi * int k = i;
8323a49c894Smpi * if (at->at_heap[k] == old)
8333a49c894Smpi * at->at_heap[k] = new;
8343a49c894Smpi * if (k >= at->at_minfringe)
8353a49c894Smpi * return;
8363a49c894Smpi * k <<= 1;
8373a49c894Smpi * art_allot(at, k, old, new);
8383a49c894Smpi * k++;
8393a49c894Smpi * art_allot(at, k, old, new);
8403a49c894Smpi * }
8413a49c894Smpi */
8423a49c894Smpi void
art_allot(struct art_table * at,int i,struct art_node * old,struct art_node * new)8433a49c894Smpi art_allot(struct art_table *at, int i, struct art_node *old,
8443a49c894Smpi struct art_node *new)
8453a49c894Smpi {
846e0d1f3a9Sjmatthew struct art_node *node, *dflt;
8473a49c894Smpi int k = i;
8483a49c894Smpi
8493a49c894Smpi KASSERT(i < at->at_minfringe);
8503a49c894Smpi
8513a49c894Smpi again:
8523a49c894Smpi k <<= 1;
8533a49c894Smpi if (k < at->at_minfringe)
8543a49c894Smpi goto nonfringe;
8553a49c894Smpi
8563a49c894Smpi /* Change fringe nodes. */
8573a49c894Smpi while (1) {
858e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_heap[k].node);
859e0d1f3a9Sjmatthew if (!ISLEAF(node)) {
860e0d1f3a9Sjmatthew dflt = srp_get_locked(&SUBTABLE(node)->at_default);
861e0d1f3a9Sjmatthew if (dflt == old) {
862e0d1f3a9Sjmatthew srp_swap_locked(&SUBTABLE(node)->at_default,
863e0d1f3a9Sjmatthew new);
8643a49c894Smpi }
865e0d1f3a9Sjmatthew } else if (node == old) {
866e0d1f3a9Sjmatthew srp_swap_locked(&at->at_heap[k].node, new);
8673a49c894Smpi }
8683a49c894Smpi if (k % 2)
8693a49c894Smpi goto moveup;
8703a49c894Smpi k++;
8713a49c894Smpi }
8723a49c894Smpi
8733a49c894Smpi nonfringe:
874e0d1f3a9Sjmatthew node = srp_get_locked(&at->at_heap[k].node);
875e0d1f3a9Sjmatthew if (node == old)
8763a49c894Smpi goto again;
8773a49c894Smpi moveon:
8783a49c894Smpi if (k % 2)
8793a49c894Smpi goto moveup;
8803a49c894Smpi k++;
8813a49c894Smpi goto nonfringe;
8823a49c894Smpi moveup:
8833a49c894Smpi k >>= 1;
884e0d1f3a9Sjmatthew srp_swap_locked(&at->at_heap[k].node, new);
8853a49c894Smpi
8863a49c894Smpi /* Change non-fringe node. */
8873a49c894Smpi if (k != i)
8883a49c894Smpi goto moveon;
8893a49c894Smpi }
8901b3ea614Smpi
8911b3ea614Smpi struct art_node *
art_get(uint8_t plen)892*5c969a7eSbluhm art_get(uint8_t plen)
8931b3ea614Smpi {
8941b3ea614Smpi struct art_node *an;
8951b3ea614Smpi
8961b3ea614Smpi an = pool_get(&an_pool, PR_NOWAIT | PR_ZERO);
8971b3ea614Smpi if (an == NULL)
8981b3ea614Smpi return (NULL);
8991b3ea614Smpi
9001b3ea614Smpi an->an_plen = plen;
90123c09f2eSdlg SRPL_INIT(&an->an_rtlist);
9021b3ea614Smpi
9031b3ea614Smpi return (an);
9041b3ea614Smpi }
9051b3ea614Smpi
9061b3ea614Smpi void
art_put(struct art_node * an)9071b3ea614Smpi art_put(struct art_node *an)
9081b3ea614Smpi {
90955c556fbSdlg KASSERT(SRPL_EMPTY_LOCKED(&an->an_rtlist));
91055c556fbSdlg
91155c556fbSdlg mtx_enter(&art_node_gc_mtx);
91255c556fbSdlg an->an_gc = art_node_gc_list;
91355c556fbSdlg art_node_gc_list = an;
91455c556fbSdlg mtx_leave(&art_node_gc_mtx);
91555c556fbSdlg
91655c556fbSdlg task_add(systqmp, &art_node_gc_task);
91755c556fbSdlg }
91855c556fbSdlg
91955c556fbSdlg void
art_gc(void * null)92055c556fbSdlg art_gc(void *null)
92155c556fbSdlg {
92255c556fbSdlg struct art_node *an, *next;
92355c556fbSdlg
92455c556fbSdlg mtx_enter(&art_node_gc_mtx);
92555c556fbSdlg an = art_node_gc_list;
92655c556fbSdlg art_node_gc_list = NULL;
92755c556fbSdlg mtx_leave(&art_node_gc_mtx);
92855c556fbSdlg
92955c556fbSdlg while (an != NULL) {
93055c556fbSdlg next = an->an_gc;
93155c556fbSdlg
932e0d1f3a9Sjmatthew srp_finalize(an, "artnfini");
933e0d1f3a9Sjmatthew
9341b3ea614Smpi pool_put(&an_pool, an);
93555c556fbSdlg
93655c556fbSdlg an = next;
93755c556fbSdlg }
9381b3ea614Smpi }
939