13448Sdh155122 /*
23448Sdh155122 * CDDL HEADER START
33448Sdh155122 *
43448Sdh155122 * The contents of this file are subject to the terms of the
53448Sdh155122 * Common Development and Distribution License (the "License").
63448Sdh155122 * You may not use this file except in compliance with the License.
73448Sdh155122 *
83448Sdh155122 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93448Sdh155122 * or http://www.opensolaris.org/os/licensing.
103448Sdh155122 * See the License for the specific language governing permissions
113448Sdh155122 * and limitations under the License.
123448Sdh155122 *
133448Sdh155122 * When distributing Covered Code, include this CDDL HEADER in each
143448Sdh155122 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153448Sdh155122 * If applicable, add the following below this CDDL HEADER, with the
163448Sdh155122 * fields enclosed by brackets "[]" replaced with your own identifying
173448Sdh155122 * information: Portions Copyright [yyyy] [name of copyright owner]
183448Sdh155122 *
193448Sdh155122 * CDDL HEADER END
203448Sdh155122 */
213448Sdh155122
223448Sdh155122 /*
23*10639SDarren.Reed@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
243448Sdh155122 * Use is subject to license terms.
253448Sdh155122 */
263448Sdh155122
273448Sdh155122 #include <sys/param.h>
283448Sdh155122 #include <sys/sysmacros.h>
293448Sdh155122 #include <sys/vm.h>
303448Sdh155122 #include <sys/proc.h>
313448Sdh155122 #include <sys/tuneable.h>
323448Sdh155122 #include <sys/systm.h>
333448Sdh155122 #include <sys/cmn_err.h>
343448Sdh155122 #include <sys/debug.h>
353448Sdh155122 #include <sys/sdt.h>
363448Sdh155122 #include <sys/mutex.h>
373448Sdh155122 #include <sys/bitmap.h>
383448Sdh155122 #include <sys/atomic.h>
393448Sdh155122 #include <sys/kobj.h>
403448Sdh155122 #include <sys/disp.h>
413448Sdh155122 #include <vm/seg_kmem.h>
423448Sdh155122 #include <sys/zone.h>
433448Sdh155122 #include <sys/netstack.h>
443448Sdh155122
453448Sdh155122 /*
463448Sdh155122 * What we use so that the zones framework can tell us about new zones,
473448Sdh155122 * which we use to create new stacks.
483448Sdh155122 */
493448Sdh155122 static zone_key_t netstack_zone_key;
503448Sdh155122
513448Sdh155122 static int netstack_initialized = 0;
523448Sdh155122
533448Sdh155122 /*
543448Sdh155122 * Track the registered netstacks.
553448Sdh155122 * The global lock protects
563448Sdh155122 * - ns_reg
573448Sdh155122 * - the list starting at netstack_head and following the netstack_next
583448Sdh155122 * pointers.
593448Sdh155122 */
603448Sdh155122 static kmutex_t netstack_g_lock;
613448Sdh155122
623448Sdh155122 /*
633448Sdh155122 * Registry of netstacks with their create/shutdown/destory functions.
643448Sdh155122 */
653448Sdh155122 static struct netstack_registry ns_reg[NS_MAX];
663448Sdh155122
673448Sdh155122 /*
683448Sdh155122 * Global list of existing stacks. We use this when a new zone with
693448Sdh155122 * an exclusive IP instance is created.
703448Sdh155122 *
713448Sdh155122 * Note that in some cases a netstack_t needs to stay around after the zone
723448Sdh155122 * has gone away. This is because there might be outstanding references
733448Sdh155122 * (from TCP TIME_WAIT connections, IPsec state, etc). The netstack_t data
743448Sdh155122 * structure and all the foo_stack_t's hanging off of it will be cleaned up
753448Sdh155122 * when the last reference to it is dropped.
763448Sdh155122 * However, the same zone might be rebooted. That is handled using the
773448Sdh155122 * assumption that the zones framework picks a new zoneid each time a zone
783448Sdh155122 * is (re)booted. We assert for that condition in netstack_zone_create().
793448Sdh155122 * Thus the old netstack_t can take its time for things to time out.
803448Sdh155122 */
813448Sdh155122 static netstack_t *netstack_head;
823448Sdh155122
833448Sdh155122 /*
843448Sdh155122 * To support kstat_create_netstack() using kstat_zone_add we need
853448Sdh155122 * to track both
863448Sdh155122 * - all zoneids that use the global/shared stack
873448Sdh155122 * - all kstats that have been added for the shared stack
883448Sdh155122 */
893448Sdh155122 struct shared_zone_list {
903448Sdh155122 struct shared_zone_list *sz_next;
913448Sdh155122 zoneid_t sz_zoneid;
923448Sdh155122 };
933448Sdh155122
943448Sdh155122 struct shared_kstat_list {
953448Sdh155122 struct shared_kstat_list *sk_next;
963448Sdh155122 kstat_t *sk_kstat;
973448Sdh155122 };
983448Sdh155122
993448Sdh155122 static kmutex_t netstack_shared_lock; /* protects the following two */
1003448Sdh155122 static struct shared_zone_list *netstack_shared_zones;
1013448Sdh155122 static struct shared_kstat_list *netstack_shared_kstats;
1023448Sdh155122
1033448Sdh155122 static void *netstack_zone_create(zoneid_t zoneid);
1043448Sdh155122 static void netstack_zone_shutdown(zoneid_t zoneid, void *arg);
1053448Sdh155122 static void netstack_zone_destroy(zoneid_t zoneid, void *arg);
1063448Sdh155122
1073448Sdh155122 static void netstack_shared_zone_add(zoneid_t zoneid);
1083448Sdh155122 static void netstack_shared_zone_remove(zoneid_t zoneid);
1093448Sdh155122 static void netstack_shared_kstat_add(kstat_t *ks);
1103448Sdh155122 static void netstack_shared_kstat_remove(kstat_t *ks);
1113448Sdh155122
1124287Snordmark typedef boolean_t applyfn_t(kmutex_t *, netstack_t *, int);
1133448Sdh155122
1145880Snordmark static void apply_all_netstacks(int, applyfn_t *);
1155880Snordmark static void apply_all_modules(netstack_t *, applyfn_t *);
1165880Snordmark static void apply_all_modules_reverse(netstack_t *, applyfn_t *);
1175880Snordmark static boolean_t netstack_apply_create(kmutex_t *, netstack_t *, int);
1185880Snordmark static boolean_t netstack_apply_shutdown(kmutex_t *, netstack_t *, int);
1195880Snordmark static boolean_t netstack_apply_destroy(kmutex_t *, netstack_t *, int);
1205880Snordmark static boolean_t wait_for_zone_creator(netstack_t *, kmutex_t *);
1215880Snordmark static boolean_t wait_for_nms_inprogress(netstack_t *, nm_state_t *,
1225880Snordmark kmutex_t *);
1235880Snordmark
1243448Sdh155122 void
netstack_init(void)1253448Sdh155122 netstack_init(void)
1263448Sdh155122 {
1273448Sdh155122 mutex_init(&netstack_g_lock, NULL, MUTEX_DEFAULT, NULL);
1283448Sdh155122 mutex_init(&netstack_shared_lock, NULL, MUTEX_DEFAULT, NULL);
1293448Sdh155122
1303448Sdh155122 netstack_initialized = 1;
1313448Sdh155122
1323448Sdh155122 /*
1333448Sdh155122 * We want to be informed each time a zone is created or
1343448Sdh155122 * destroyed in the kernel, so we can maintain the
1353448Sdh155122 * stack instance information.
1363448Sdh155122 */
1373448Sdh155122 zone_key_create(&netstack_zone_key, netstack_zone_create,
1383448Sdh155122 netstack_zone_shutdown, netstack_zone_destroy);
1393448Sdh155122 }
1403448Sdh155122
1413448Sdh155122 /*
1423448Sdh155122 * Register a new module with the framework.
1433448Sdh155122 * This registers interest in changes to the set of netstacks.
1443448Sdh155122 * The createfn and destroyfn are required, but the shutdownfn can be
1453448Sdh155122 * NULL.
1463448Sdh155122 * Note that due to the current zsd implementation, when the create
1473448Sdh155122 * function is called the zone isn't fully present, thus functions
1483448Sdh155122 * like zone_find_by_* will fail, hence the create function can not
1493448Sdh155122 * use many zones kernel functions including zcmn_err().
1503448Sdh155122 */
1513448Sdh155122 void
netstack_register(int moduleid,void * (* module_create)(netstackid_t,netstack_t *),void (* module_shutdown)(netstackid_t,void *),void (* module_destroy)(netstackid_t,void *))1523448Sdh155122 netstack_register(int moduleid,
1533448Sdh155122 void *(*module_create)(netstackid_t, netstack_t *),
1543448Sdh155122 void (*module_shutdown)(netstackid_t, void *),
1553448Sdh155122 void (*module_destroy)(netstackid_t, void *))
1563448Sdh155122 {
1573448Sdh155122 netstack_t *ns;
1583448Sdh155122
1593448Sdh155122 ASSERT(netstack_initialized);
1603448Sdh155122 ASSERT(moduleid >= 0 && moduleid < NS_MAX);
1613448Sdh155122 ASSERT(module_create != NULL);
1623448Sdh155122
1635880Snordmark /*
1645880Snordmark * Make instances created after this point in time run the create
1655880Snordmark * callback.
1665880Snordmark */
1673448Sdh155122 mutex_enter(&netstack_g_lock);
1683448Sdh155122 ASSERT(ns_reg[moduleid].nr_create == NULL);
1693448Sdh155122 ASSERT(ns_reg[moduleid].nr_flags == 0);
1703448Sdh155122 ns_reg[moduleid].nr_create = module_create;
1713448Sdh155122 ns_reg[moduleid].nr_shutdown = module_shutdown;
1723448Sdh155122 ns_reg[moduleid].nr_destroy = module_destroy;
1733448Sdh155122 ns_reg[moduleid].nr_flags = NRF_REGISTERED;
1743448Sdh155122
1753448Sdh155122 /*
1763448Sdh155122 * Determine the set of stacks that exist before we drop the lock.
1775880Snordmark * Set NSS_CREATE_NEEDED for each of those.
1783448Sdh155122 * netstacks which have been deleted will have NSS_CREATE_COMPLETED
1793448Sdh155122 * set, but check NSF_CLOSING to be sure.
1803448Sdh155122 */
1813448Sdh155122 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1825880Snordmark nm_state_t *nms = &ns->netstack_m_state[moduleid];
1835880Snordmark
1843448Sdh155122 mutex_enter(&ns->netstack_lock);
1853448Sdh155122 if (!(ns->netstack_flags & NSF_CLOSING) &&
1865880Snordmark (nms->nms_flags & NSS_CREATE_ALL) == 0) {
1875880Snordmark nms->nms_flags |= NSS_CREATE_NEEDED;
1883448Sdh155122 DTRACE_PROBE2(netstack__create__needed,
1893448Sdh155122 netstack_t *, ns, int, moduleid);
1903448Sdh155122 }
1913448Sdh155122 mutex_exit(&ns->netstack_lock);
1923448Sdh155122 }
1933448Sdh155122 mutex_exit(&netstack_g_lock);
1943448Sdh155122
1953448Sdh155122 /*
1965880Snordmark * At this point in time a new instance can be created or an instance
1975880Snordmark * can be destroyed, or some other module can register or unregister.
1985880Snordmark * Make sure we either run all the create functions for this moduleid
1995880Snordmark * or we wait for any other creators for this moduleid.
2003448Sdh155122 */
2015880Snordmark apply_all_netstacks(moduleid, netstack_apply_create);
2023448Sdh155122 }
2033448Sdh155122
2043448Sdh155122 void
netstack_unregister(int moduleid)2053448Sdh155122 netstack_unregister(int moduleid)
2063448Sdh155122 {
2073448Sdh155122 netstack_t *ns;
2083448Sdh155122
2093448Sdh155122 ASSERT(moduleid >= 0 && moduleid < NS_MAX);
2103448Sdh155122
2113448Sdh155122 ASSERT(ns_reg[moduleid].nr_create != NULL);
2123448Sdh155122 ASSERT(ns_reg[moduleid].nr_flags & NRF_REGISTERED);
2133448Sdh155122
2143448Sdh155122 mutex_enter(&netstack_g_lock);
2153448Sdh155122 /*
2163448Sdh155122 * Determine the set of stacks that exist before we drop the lock.
2175880Snordmark * Set NSS_SHUTDOWN_NEEDED and NSS_DESTROY_NEEDED for each of those.
2185880Snordmark * That ensures that when we return all the callbacks for existing
2195880Snordmark * instances have completed. And since we set NRF_DYING no new
2205880Snordmark * instances can use this module.
2213448Sdh155122 */
2223448Sdh155122 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
2235880Snordmark nm_state_t *nms = &ns->netstack_m_state[moduleid];
2245880Snordmark
2253448Sdh155122 mutex_enter(&ns->netstack_lock);
2263448Sdh155122 if (ns_reg[moduleid].nr_shutdown != NULL &&
2275880Snordmark (nms->nms_flags & NSS_CREATE_COMPLETED) &&
2285880Snordmark (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
2295880Snordmark nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
2303448Sdh155122 DTRACE_PROBE2(netstack__shutdown__needed,
2313448Sdh155122 netstack_t *, ns, int, moduleid);
2323448Sdh155122 }
2333448Sdh155122 if ((ns_reg[moduleid].nr_flags & NRF_REGISTERED) &&
2343448Sdh155122 ns_reg[moduleid].nr_destroy != NULL &&
2355880Snordmark (nms->nms_flags & NSS_CREATE_COMPLETED) &&
2365880Snordmark (nms->nms_flags & NSS_DESTROY_ALL) == 0) {
2375880Snordmark nms->nms_flags |= NSS_DESTROY_NEEDED;
2383448Sdh155122 DTRACE_PROBE2(netstack__destroy__needed,
2393448Sdh155122 netstack_t *, ns, int, moduleid);
2403448Sdh155122 }
2413448Sdh155122 mutex_exit(&ns->netstack_lock);
2423448Sdh155122 }
2435880Snordmark /*
2445880Snordmark * Prevent any new netstack from calling the registered create
2455880Snordmark * function, while keeping the function pointers in place until the
2465880Snordmark * shutdown and destroy callbacks are complete.
2475880Snordmark */
2485880Snordmark ns_reg[moduleid].nr_flags |= NRF_DYING;
2493448Sdh155122 mutex_exit(&netstack_g_lock);
2503448Sdh155122
2515880Snordmark apply_all_netstacks(moduleid, netstack_apply_shutdown);
2525880Snordmark apply_all_netstacks(moduleid, netstack_apply_destroy);
2533448Sdh155122
2543448Sdh155122 /*
2555880Snordmark * Clear the nms_flags so that we can handle this module
2563448Sdh155122 * being loaded again.
2575880Snordmark * Also remove the registered functions.
2583448Sdh155122 */
2593448Sdh155122 mutex_enter(&netstack_g_lock);
2605880Snordmark ASSERT(ns_reg[moduleid].nr_flags & NRF_REGISTERED);
2615880Snordmark ASSERT(ns_reg[moduleid].nr_flags & NRF_DYING);
2623448Sdh155122 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
2635880Snordmark nm_state_t *nms = &ns->netstack_m_state[moduleid];
2645880Snordmark
2653448Sdh155122 mutex_enter(&ns->netstack_lock);
2665880Snordmark if (nms->nms_flags & NSS_DESTROY_COMPLETED) {
2675880Snordmark nms->nms_flags = 0;
2683448Sdh155122 DTRACE_PROBE2(netstack__destroy__done,
2693448Sdh155122 netstack_t *, ns, int, moduleid);
2703448Sdh155122 }
2713448Sdh155122 mutex_exit(&ns->netstack_lock);
2723448Sdh155122 }
2733448Sdh155122
2743448Sdh155122 ns_reg[moduleid].nr_create = NULL;
2753448Sdh155122 ns_reg[moduleid].nr_shutdown = NULL;
2763448Sdh155122 ns_reg[moduleid].nr_destroy = NULL;
2773448Sdh155122 ns_reg[moduleid].nr_flags = 0;
2783448Sdh155122 mutex_exit(&netstack_g_lock);
2793448Sdh155122 }
2803448Sdh155122
2813448Sdh155122 /*
2823448Sdh155122 * Lookup and/or allocate a netstack for this zone.
2833448Sdh155122 */
2843448Sdh155122 static void *
netstack_zone_create(zoneid_t zoneid)2853448Sdh155122 netstack_zone_create(zoneid_t zoneid)
2863448Sdh155122 {
2873448Sdh155122 netstackid_t stackid;
2883448Sdh155122 netstack_t *ns;
2893448Sdh155122 netstack_t **nsp;
2903448Sdh155122 zone_t *zone;
2913448Sdh155122 int i;
2923448Sdh155122
2933448Sdh155122 ASSERT(netstack_initialized);
2943448Sdh155122
2953448Sdh155122 zone = zone_find_by_id_nolock(zoneid);
2963448Sdh155122 ASSERT(zone != NULL);
2973448Sdh155122
2983448Sdh155122 if (zone->zone_flags & ZF_NET_EXCL) {
2993448Sdh155122 stackid = zoneid;
3003448Sdh155122 } else {
3013448Sdh155122 /* Look for the stack instance for the global */
3023448Sdh155122 stackid = GLOBAL_NETSTACKID;
3033448Sdh155122 }
3043448Sdh155122
3053448Sdh155122 /* Allocate even if it isn't needed; simplifies locking */
3063448Sdh155122 ns = (netstack_t *)kmem_zalloc(sizeof (netstack_t), KM_SLEEP);
3073448Sdh155122
3083448Sdh155122 /* Look if there is a matching stack instance */
3093448Sdh155122 mutex_enter(&netstack_g_lock);
3103448Sdh155122 for (nsp = &netstack_head; *nsp != NULL;
3113448Sdh155122 nsp = &((*nsp)->netstack_next)) {
3123448Sdh155122 if ((*nsp)->netstack_stackid == stackid) {
3133448Sdh155122 /*
3143448Sdh155122 * Should never find a pre-existing exclusive stack
3153448Sdh155122 */
3163448Sdh155122 ASSERT(stackid == GLOBAL_NETSTACKID);
3173448Sdh155122 kmem_free(ns, sizeof (netstack_t));
3183448Sdh155122 ns = *nsp;
3193448Sdh155122 mutex_enter(&ns->netstack_lock);
3203448Sdh155122 ns->netstack_numzones++;
3213448Sdh155122 mutex_exit(&ns->netstack_lock);
3223448Sdh155122 mutex_exit(&netstack_g_lock);
3233448Sdh155122 DTRACE_PROBE1(netstack__inc__numzones,
3243448Sdh155122 netstack_t *, ns);
3253448Sdh155122 /* Record that we have a new shared stack zone */
3263448Sdh155122 netstack_shared_zone_add(zoneid);
3273448Sdh155122 zone->zone_netstack = ns;
3283448Sdh155122 return (ns);
3293448Sdh155122 }
3303448Sdh155122 }
3313448Sdh155122 /* Not found */
3323448Sdh155122 mutex_init(&ns->netstack_lock, NULL, MUTEX_DEFAULT, NULL);
3335880Snordmark cv_init(&ns->netstack_cv, NULL, CV_DEFAULT, NULL);
3343448Sdh155122 ns->netstack_stackid = zoneid;
3353448Sdh155122 ns->netstack_numzones = 1;
3363448Sdh155122 ns->netstack_refcnt = 1; /* Decremented by netstack_zone_destroy */
3373448Sdh155122 ns->netstack_flags = NSF_UNINIT;
3383448Sdh155122 *nsp = ns;
3393448Sdh155122 zone->zone_netstack = ns;
3403448Sdh155122
3415880Snordmark mutex_enter(&ns->netstack_lock);
3425880Snordmark /*
3435880Snordmark * Mark this netstack as having a CREATE running so
3445880Snordmark * any netstack_register/netstack_unregister waits for
3455880Snordmark * the existing create callbacks to complete in moduleid order
3465880Snordmark */
3475880Snordmark ns->netstack_flags |= NSF_ZONE_CREATE;
3485880Snordmark
3493448Sdh155122 /*
3503448Sdh155122 * Determine the set of module create functions that need to be
3513448Sdh155122 * called before we drop the lock.
3525880Snordmark * Set NSS_CREATE_NEEDED for each of those.
3535880Snordmark * Skip any with NRF_DYING set, since those are in the process of
3545880Snordmark * going away, by checking for flags being exactly NRF_REGISTERED.
3553448Sdh155122 */
3563448Sdh155122 for (i = 0; i < NS_MAX; i++) {
3575880Snordmark nm_state_t *nms = &ns->netstack_m_state[i];
3585880Snordmark
3595880Snordmark cv_init(&nms->nms_cv, NULL, CV_DEFAULT, NULL);
3605880Snordmark
3615880Snordmark if ((ns_reg[i].nr_flags == NRF_REGISTERED) &&
3625880Snordmark (nms->nms_flags & NSS_CREATE_ALL) == 0) {
3635880Snordmark nms->nms_flags |= NSS_CREATE_NEEDED;
3643448Sdh155122 DTRACE_PROBE2(netstack__create__needed,
3653448Sdh155122 netstack_t *, ns, int, i);
3663448Sdh155122 }
3673448Sdh155122 }
3685880Snordmark mutex_exit(&ns->netstack_lock);
3693448Sdh155122 mutex_exit(&netstack_g_lock);
3703448Sdh155122
3715880Snordmark apply_all_modules(ns, netstack_apply_create);
3723448Sdh155122
3735880Snordmark /* Tell any waiting netstack_register/netstack_unregister to proceed */
3743448Sdh155122 mutex_enter(&ns->netstack_lock);
3753448Sdh155122 ns->netstack_flags &= ~NSF_UNINIT;
3765880Snordmark ASSERT(ns->netstack_flags & NSF_ZONE_CREATE);
3775880Snordmark ns->netstack_flags &= ~NSF_ZONE_CREATE;
3785880Snordmark cv_broadcast(&ns->netstack_cv);
3793448Sdh155122 mutex_exit(&ns->netstack_lock);
3803448Sdh155122
3813448Sdh155122 return (ns);
3823448Sdh155122 }
3833448Sdh155122
3843448Sdh155122 /* ARGSUSED */
3853448Sdh155122 static void
netstack_zone_shutdown(zoneid_t zoneid,void * arg)3863448Sdh155122 netstack_zone_shutdown(zoneid_t zoneid, void *arg)
3873448Sdh155122 {
3883448Sdh155122 netstack_t *ns = (netstack_t *)arg;
3893448Sdh155122 int i;
3903448Sdh155122
3913448Sdh155122 ASSERT(arg != NULL);
3923448Sdh155122
3933448Sdh155122 mutex_enter(&ns->netstack_lock);
3943448Sdh155122 ASSERT(ns->netstack_numzones > 0);
3953448Sdh155122 if (ns->netstack_numzones != 1) {
3963448Sdh155122 /* Stack instance being used by other zone */
3973448Sdh155122 mutex_exit(&ns->netstack_lock);
3983448Sdh155122 ASSERT(ns->netstack_stackid == GLOBAL_NETSTACKID);
3993448Sdh155122 return;
4003448Sdh155122 }
4013448Sdh155122 mutex_exit(&ns->netstack_lock);
4023448Sdh155122
4033448Sdh155122 mutex_enter(&netstack_g_lock);
4045880Snordmark mutex_enter(&ns->netstack_lock);
4055880Snordmark /*
4065880Snordmark * Mark this netstack as having a SHUTDOWN running so
4075880Snordmark * any netstack_register/netstack_unregister waits for
4085880Snordmark * the existing create callbacks to complete in moduleid order
4095880Snordmark */
4105880Snordmark ASSERT(!(ns->netstack_flags & NSF_ZONE_INPROGRESS));
4115880Snordmark ns->netstack_flags |= NSF_ZONE_SHUTDOWN;
4125880Snordmark
4133448Sdh155122 /*
4143448Sdh155122 * Determine the set of stacks that exist before we drop the lock.
4155880Snordmark * Set NSS_SHUTDOWN_NEEDED for each of those.
4163448Sdh155122 */
4173448Sdh155122 for (i = 0; i < NS_MAX; i++) {
4185880Snordmark nm_state_t *nms = &ns->netstack_m_state[i];
4195880Snordmark
4203448Sdh155122 if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
4213448Sdh155122 ns_reg[i].nr_shutdown != NULL &&
4225880Snordmark (nms->nms_flags & NSS_CREATE_COMPLETED) &&
4235880Snordmark (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
4245880Snordmark nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
4253448Sdh155122 DTRACE_PROBE2(netstack__shutdown__needed,
4263448Sdh155122 netstack_t *, ns, int, i);
4273448Sdh155122 }
4283448Sdh155122 }
4295880Snordmark mutex_exit(&ns->netstack_lock);
4303448Sdh155122 mutex_exit(&netstack_g_lock);
4313448Sdh155122
4324287Snordmark /*
4334287Snordmark * Call the shutdown function for all registered modules for this
4344287Snordmark * netstack.
4354287Snordmark */
4367513SDarren.Reed@Sun.COM apply_all_modules_reverse(ns, netstack_apply_shutdown);
4375880Snordmark
4385880Snordmark /* Tell any waiting netstack_register/netstack_unregister to proceed */
4395880Snordmark mutex_enter(&ns->netstack_lock);
4405880Snordmark ASSERT(ns->netstack_flags & NSF_ZONE_SHUTDOWN);
4415880Snordmark ns->netstack_flags &= ~NSF_ZONE_SHUTDOWN;
4425880Snordmark cv_broadcast(&ns->netstack_cv);
4435880Snordmark mutex_exit(&ns->netstack_lock);
4443448Sdh155122 }
4453448Sdh155122
4463448Sdh155122 /*
4473448Sdh155122 * Common routine to release a zone.
4483448Sdh155122 * If this was the last zone using the stack instance then prepare to
4493448Sdh155122 * have the refcnt dropping to zero free the zone.
4503448Sdh155122 */
4513448Sdh155122 /* ARGSUSED */
4523448Sdh155122 static void
netstack_zone_destroy(zoneid_t zoneid,void * arg)4533448Sdh155122 netstack_zone_destroy(zoneid_t zoneid, void *arg)
4543448Sdh155122 {
4553448Sdh155122 netstack_t *ns = (netstack_t *)arg;
4563448Sdh155122
4573448Sdh155122 ASSERT(arg != NULL);
4583448Sdh155122
4593448Sdh155122 mutex_enter(&ns->netstack_lock);
4603448Sdh155122 ASSERT(ns->netstack_numzones > 0);
4613448Sdh155122 ns->netstack_numzones--;
4623448Sdh155122 if (ns->netstack_numzones != 0) {
4633448Sdh155122 /* Stack instance being used by other zone */
4643448Sdh155122 mutex_exit(&ns->netstack_lock);
4653448Sdh155122 ASSERT(ns->netstack_stackid == GLOBAL_NETSTACKID);
4663448Sdh155122 /* Record that we a shared stack zone has gone away */
4673448Sdh155122 netstack_shared_zone_remove(zoneid);
4683448Sdh155122 return;
4693448Sdh155122 }
4703448Sdh155122 /*
4714287Snordmark * Set CLOSING so that netstack_find_by will not find it.
4723448Sdh155122 */
4733448Sdh155122 ns->netstack_flags |= NSF_CLOSING;
4743448Sdh155122 mutex_exit(&ns->netstack_lock);
4753448Sdh155122 DTRACE_PROBE1(netstack__dec__numzones, netstack_t *, ns);
4763448Sdh155122 /* No other thread can call zone_destroy for this stack */
4773448Sdh155122
4783448Sdh155122 /*
4793448Sdh155122 * Decrease refcnt to account for the one in netstack_zone_init()
4803448Sdh155122 */
4813448Sdh155122 netstack_rele(ns);
4823448Sdh155122 }
4833448Sdh155122
4843448Sdh155122 /*
4853448Sdh155122 * Called when the reference count drops to zero.
4863448Sdh155122 * Call the destroy functions for each registered module.
4873448Sdh155122 */
4883448Sdh155122 static void
netstack_stack_inactive(netstack_t * ns)4893448Sdh155122 netstack_stack_inactive(netstack_t *ns)
4903448Sdh155122 {
4913448Sdh155122 int i;
4923448Sdh155122
4933448Sdh155122 mutex_enter(&netstack_g_lock);
4945880Snordmark mutex_enter(&ns->netstack_lock);
4955880Snordmark /*
4965880Snordmark * Mark this netstack as having a DESTROY running so
4975880Snordmark * any netstack_register/netstack_unregister waits for
4985880Snordmark * the existing destroy callbacks to complete in reverse moduleid order
4995880Snordmark */
5005880Snordmark ASSERT(!(ns->netstack_flags & NSF_ZONE_INPROGRESS));
5015880Snordmark ns->netstack_flags |= NSF_ZONE_DESTROY;
5023448Sdh155122 /*
5033448Sdh155122 * If the shutdown callback wasn't called earlier (e.g., if this is
5045880Snordmark * a netstack shared between multiple zones), then we schedule it now.
5055880Snordmark *
5065880Snordmark * Determine the set of stacks that exist before we drop the lock.
5075880Snordmark * Set NSS_DESTROY_NEEDED for each of those. That
5085880Snordmark * ensures that when we return all the callbacks for existing
5095880Snordmark * instances have completed.
5103448Sdh155122 */
5113448Sdh155122 for (i = 0; i < NS_MAX; i++) {
5125880Snordmark nm_state_t *nms = &ns->netstack_m_state[i];
5135880Snordmark
5143448Sdh155122 if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
5153448Sdh155122 ns_reg[i].nr_shutdown != NULL &&
5165880Snordmark (nms->nms_flags & NSS_CREATE_COMPLETED) &&
5175880Snordmark (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
5185880Snordmark nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
5193448Sdh155122 DTRACE_PROBE2(netstack__shutdown__needed,
5203448Sdh155122 netstack_t *, ns, int, i);
5213448Sdh155122 }
5225880Snordmark
5233448Sdh155122 if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
5243448Sdh155122 ns_reg[i].nr_destroy != NULL &&
5255880Snordmark (nms->nms_flags & NSS_CREATE_COMPLETED) &&
5265880Snordmark (nms->nms_flags & NSS_DESTROY_ALL) == 0) {
5275880Snordmark nms->nms_flags |= NSS_DESTROY_NEEDED;
5283448Sdh155122 DTRACE_PROBE2(netstack__destroy__needed,
5293448Sdh155122 netstack_t *, ns, int, i);
5303448Sdh155122 }
5313448Sdh155122 }
5325880Snordmark mutex_exit(&ns->netstack_lock);
5333448Sdh155122 mutex_exit(&netstack_g_lock);
5343448Sdh155122
5354287Snordmark /*
5364287Snordmark * Call the shutdown and destroy functions for all registered modules
5374287Snordmark * for this netstack.
5385880Snordmark *
5395880Snordmark * Since there are some ordering dependencies between the modules we
5405880Snordmark * tear them down in the reverse order of what was used to create them.
5415880Snordmark *
5425880Snordmark * Since a netstack_t is never reused (when a zone is rebooted it gets
5435880Snordmark * a new zoneid == netstackid i.e. a new netstack_t is allocated) we
5445880Snordmark * leave nms_flags the way it is i.e. with NSS_DESTROY_COMPLETED set.
5455880Snordmark * That is different than in the netstack_unregister() case.
5464287Snordmark */
5477513SDarren.Reed@Sun.COM apply_all_modules_reverse(ns, netstack_apply_shutdown);
5485880Snordmark apply_all_modules_reverse(ns, netstack_apply_destroy);
5493448Sdh155122
5505880Snordmark /* Tell any waiting netstack_register/netstack_unregister to proceed */
5513448Sdh155122 mutex_enter(&ns->netstack_lock);
5525880Snordmark ASSERT(ns->netstack_flags & NSF_ZONE_DESTROY);
5535880Snordmark ns->netstack_flags &= ~NSF_ZONE_DESTROY;
5545880Snordmark cv_broadcast(&ns->netstack_cv);
5555880Snordmark mutex_exit(&ns->netstack_lock);
5563448Sdh155122 }
5573448Sdh155122
5584287Snordmark /*
5594287Snordmark * Apply a function to all netstacks for a particular moduleid.
5604287Snordmark *
5615880Snordmark * If there is any zone activity (due to a zone being created, shutdown,
5625880Snordmark * or destroyed) we wait for that to complete before we proceed. This ensures
5635880Snordmark * that the moduleids are processed in order when a zone is created or
5645880Snordmark * destroyed.
5655880Snordmark *
5664287Snordmark * The applyfn has to drop netstack_g_lock if it does some work.
5675880Snordmark * In that case we don't follow netstack_next,
5685880Snordmark * even if it is possible to do so without any hazards. This is
5694287Snordmark * because we want the design to allow for the list of netstacks threaded
5704287Snordmark * by netstack_next to change in any arbitrary way during the time the
5714287Snordmark * lock was dropped.
5724287Snordmark *
5734287Snordmark * It is safe to restart the loop at netstack_head since the applyfn
5744287Snordmark * changes netstack_m_state as it processes things, so a subsequent
5754287Snordmark * pass through will have no effect in applyfn, hence the loop will terminate
5764287Snordmark * in at worst O(N^2).
5774287Snordmark */
5783448Sdh155122 static void
apply_all_netstacks(int moduleid,applyfn_t * applyfn)5794287Snordmark apply_all_netstacks(int moduleid, applyfn_t *applyfn)
5803448Sdh155122 {
5813448Sdh155122 netstack_t *ns;
5823448Sdh155122
5834287Snordmark mutex_enter(&netstack_g_lock);
5844287Snordmark ns = netstack_head;
5853448Sdh155122 while (ns != NULL) {
5865880Snordmark if (wait_for_zone_creator(ns, &netstack_g_lock)) {
5874287Snordmark /* Lock dropped - restart at head */
5885880Snordmark ns = netstack_head;
5895880Snordmark } else if ((applyfn)(&netstack_g_lock, ns, moduleid)) {
5905880Snordmark /* Lock dropped - restart at head */
5914287Snordmark ns = netstack_head;
5923448Sdh155122 } else {
5933448Sdh155122 ns = ns->netstack_next;
5943448Sdh155122 }
5953448Sdh155122 }
5964287Snordmark mutex_exit(&netstack_g_lock);
5973448Sdh155122 }
5983448Sdh155122
5993448Sdh155122 /*
6004287Snordmark * Apply a function to all moduleids for a particular netstack.
6014287Snordmark *
6024287Snordmark * Since the netstack linkage doesn't matter in this case we can
6034287Snordmark * ignore whether the function drops the lock.
6044287Snordmark */
6054287Snordmark static void
apply_all_modules(netstack_t * ns,applyfn_t * applyfn)6064287Snordmark apply_all_modules(netstack_t *ns, applyfn_t *applyfn)
6074287Snordmark {
6084287Snordmark int i;
6094287Snordmark
6104287Snordmark mutex_enter(&netstack_g_lock);
6114287Snordmark for (i = 0; i < NS_MAX; i++) {
6125880Snordmark /*
6135880Snordmark * We don't care whether the lock was dropped
6145880Snordmark * since we are not iterating over netstack_head.
6155880Snordmark */
6165880Snordmark (void) (applyfn)(&netstack_g_lock, ns, i);
6174287Snordmark }
6184287Snordmark mutex_exit(&netstack_g_lock);
6194287Snordmark }
6204287Snordmark
6214287Snordmark /* Like the above but in reverse moduleid order */
6224287Snordmark static void
apply_all_modules_reverse(netstack_t * ns,applyfn_t * applyfn)6234287Snordmark apply_all_modules_reverse(netstack_t *ns, applyfn_t *applyfn)
6244287Snordmark {
6254287Snordmark int i;
6264287Snordmark
6274287Snordmark mutex_enter(&netstack_g_lock);
6284287Snordmark for (i = NS_MAX-1; i >= 0; i--) {
6295880Snordmark /*
6305880Snordmark * We don't care whether the lock was dropped
6315880Snordmark * since we are not iterating over netstack_head.
6325880Snordmark */
6335880Snordmark (void) (applyfn)(&netstack_g_lock, ns, i);
6344287Snordmark }
6354287Snordmark mutex_exit(&netstack_g_lock);
6364287Snordmark }
6374287Snordmark
6384287Snordmark /*
6395880Snordmark * Call the create function for the ns and moduleid if CREATE_NEEDED
6405880Snordmark * is set.
6415880Snordmark * If some other thread gets here first and sets *_INPROGRESS, then
6425880Snordmark * we wait for that thread to complete so that we can ensure that
6435880Snordmark * all the callbacks are done when we've looped over all netstacks/moduleids.
6444287Snordmark *
6455880Snordmark * When we call the create function, we temporarily drop the netstack_lock
6465880Snordmark * held by the caller, and return true to tell the caller it needs to
6475880Snordmark * re-evalute the state.
6483448Sdh155122 */
6495880Snordmark static boolean_t
netstack_apply_create(kmutex_t * lockp,netstack_t * ns,int moduleid)6505880Snordmark netstack_apply_create(kmutex_t *lockp, netstack_t *ns, int moduleid)
6513448Sdh155122 {
6525880Snordmark void *result;
6535880Snordmark netstackid_t stackid;
6545880Snordmark nm_state_t *nms = &ns->netstack_m_state[moduleid];
6555880Snordmark boolean_t dropped = B_FALSE;
6565880Snordmark
6575880Snordmark ASSERT(MUTEX_HELD(lockp));
6585880Snordmark mutex_enter(&ns->netstack_lock);
6595880Snordmark
6605880Snordmark if (wait_for_nms_inprogress(ns, nms, lockp))
6615880Snordmark dropped = B_TRUE;
6625880Snordmark
6635880Snordmark if (nms->nms_flags & NSS_CREATE_NEEDED) {
6645880Snordmark nms->nms_flags &= ~NSS_CREATE_NEEDED;
6655880Snordmark nms->nms_flags |= NSS_CREATE_INPROGRESS;
6665880Snordmark DTRACE_PROBE2(netstack__create__inprogress,
6675880Snordmark netstack_t *, ns, int, moduleid);
6685880Snordmark mutex_exit(&ns->netstack_lock);
6695880Snordmark mutex_exit(lockp);
6705880Snordmark dropped = B_TRUE;
6715880Snordmark
6725880Snordmark ASSERT(ns_reg[moduleid].nr_create != NULL);
6735880Snordmark stackid = ns->netstack_stackid;
6745880Snordmark DTRACE_PROBE2(netstack__create__start,
6755880Snordmark netstackid_t, stackid,
6765880Snordmark netstack_t *, ns);
6775880Snordmark result = (ns_reg[moduleid].nr_create)(stackid, ns);
6785880Snordmark DTRACE_PROBE2(netstack__create__end,
6795880Snordmark void *, result, netstack_t *, ns);
6805880Snordmark
6815880Snordmark ASSERT(result != NULL);
6825880Snordmark mutex_enter(lockp);
6835880Snordmark mutex_enter(&ns->netstack_lock);
6845880Snordmark ns->netstack_modules[moduleid] = result;
6855880Snordmark nms->nms_flags &= ~NSS_CREATE_INPROGRESS;
6865880Snordmark nms->nms_flags |= NSS_CREATE_COMPLETED;
6875880Snordmark cv_broadcast(&nms->nms_cv);
6885880Snordmark DTRACE_PROBE2(netstack__create__completed,
6895880Snordmark netstack_t *, ns, int, moduleid);
6905880Snordmark mutex_exit(&ns->netstack_lock);
6915880Snordmark return (dropped);
6924287Snordmark } else {
6935880Snordmark mutex_exit(&ns->netstack_lock);
6945880Snordmark return (dropped);
6955880Snordmark }
6965880Snordmark }
6975880Snordmark
6985880Snordmark /*
6995880Snordmark * Call the shutdown function for the ns and moduleid if SHUTDOWN_NEEDED
7005880Snordmark * is set.
7015880Snordmark * If some other thread gets here first and sets *_INPROGRESS, then
7025880Snordmark * we wait for that thread to complete so that we can ensure that
7035880Snordmark * all the callbacks are done when we've looped over all netstacks/moduleids.
7045880Snordmark *
7055880Snordmark * When we call the shutdown function, we temporarily drop the netstack_lock
7065880Snordmark * held by the caller, and return true to tell the caller it needs to
7075880Snordmark * re-evalute the state.
7085880Snordmark */
7095880Snordmark static boolean_t
netstack_apply_shutdown(kmutex_t * lockp,netstack_t * ns,int moduleid)7105880Snordmark netstack_apply_shutdown(kmutex_t *lockp, netstack_t *ns, int moduleid)
7115880Snordmark {
7125880Snordmark netstackid_t stackid;
7135880Snordmark void * netstack_module;
7145880Snordmark nm_state_t *nms = &ns->netstack_m_state[moduleid];
7155880Snordmark boolean_t dropped = B_FALSE;
7165880Snordmark
7175880Snordmark ASSERT(MUTEX_HELD(lockp));
7185880Snordmark mutex_enter(&ns->netstack_lock);
7195880Snordmark
7205880Snordmark if (wait_for_nms_inprogress(ns, nms, lockp))
7215880Snordmark dropped = B_TRUE;
7224287Snordmark
7235880Snordmark if (nms->nms_flags & NSS_SHUTDOWN_NEEDED) {
7245880Snordmark nms->nms_flags &= ~NSS_SHUTDOWN_NEEDED;
7255880Snordmark nms->nms_flags |= NSS_SHUTDOWN_INPROGRESS;
7265880Snordmark DTRACE_PROBE2(netstack__shutdown__inprogress,
7275880Snordmark netstack_t *, ns, int, moduleid);
7285880Snordmark mutex_exit(&ns->netstack_lock);
7295880Snordmark mutex_exit(lockp);
7305880Snordmark dropped = B_TRUE;
7315880Snordmark
7325880Snordmark ASSERT(ns_reg[moduleid].nr_shutdown != NULL);
7335880Snordmark stackid = ns->netstack_stackid;
7345880Snordmark netstack_module = ns->netstack_modules[moduleid];
7355880Snordmark DTRACE_PROBE2(netstack__shutdown__start,
7365880Snordmark netstackid_t, stackid,
7375880Snordmark void *, netstack_module);
7385880Snordmark (ns_reg[moduleid].nr_shutdown)(stackid, netstack_module);
7395880Snordmark DTRACE_PROBE1(netstack__shutdown__end,
7405880Snordmark netstack_t *, ns);
7415880Snordmark
7425880Snordmark mutex_enter(lockp);
7435880Snordmark mutex_enter(&ns->netstack_lock);
7445880Snordmark nms->nms_flags &= ~NSS_SHUTDOWN_INPROGRESS;
7455880Snordmark nms->nms_flags |= NSS_SHUTDOWN_COMPLETED;
7465880Snordmark cv_broadcast(&nms->nms_cv);
7475880Snordmark DTRACE_PROBE2(netstack__shutdown__completed,
7485880Snordmark netstack_t *, ns, int, moduleid);
7495880Snordmark mutex_exit(&ns->netstack_lock);
7505880Snordmark return (dropped);
7515880Snordmark } else {
7525880Snordmark mutex_exit(&ns->netstack_lock);
7535880Snordmark return (dropped);
7544287Snordmark }
7553448Sdh155122 }
7563448Sdh155122
7573448Sdh155122 /*
7585880Snordmark * Call the destroy function for the ns and moduleid if DESTROY_NEEDED
7595880Snordmark * is set.
7605880Snordmark * If some other thread gets here first and sets *_INPROGRESS, then
7615880Snordmark * we wait for that thread to complete so that we can ensure that
7625880Snordmark * all the callbacks are done when we've looped over all netstacks/moduleids.
7633448Sdh155122 *
7645880Snordmark * When we call the destroy function, we temporarily drop the netstack_lock
7655880Snordmark * held by the caller, and return true to tell the caller it needs to
7665880Snordmark * re-evalute the state.
7673448Sdh155122 */
7685880Snordmark static boolean_t
netstack_apply_destroy(kmutex_t * lockp,netstack_t * ns,int moduleid)7695880Snordmark netstack_apply_destroy(kmutex_t *lockp, netstack_t *ns, int moduleid)
7703448Sdh155122 {
7715880Snordmark netstackid_t stackid;
7725880Snordmark void * netstack_module;
7735880Snordmark nm_state_t *nms = &ns->netstack_m_state[moduleid];
7745880Snordmark boolean_t dropped = B_FALSE;
7755880Snordmark
7765880Snordmark ASSERT(MUTEX_HELD(lockp));
7775880Snordmark mutex_enter(&ns->netstack_lock);
7785880Snordmark
7795880Snordmark if (wait_for_nms_inprogress(ns, nms, lockp))
7805880Snordmark dropped = B_TRUE;
7815880Snordmark
7825880Snordmark if (nms->nms_flags & NSS_DESTROY_NEEDED) {
7835880Snordmark nms->nms_flags &= ~NSS_DESTROY_NEEDED;
7845880Snordmark nms->nms_flags |= NSS_DESTROY_INPROGRESS;
7855880Snordmark DTRACE_PROBE2(netstack__destroy__inprogress,
7865880Snordmark netstack_t *, ns, int, moduleid);
7875880Snordmark mutex_exit(&ns->netstack_lock);
7885880Snordmark mutex_exit(lockp);
7895880Snordmark dropped = B_TRUE;
7905880Snordmark
7915880Snordmark ASSERT(ns_reg[moduleid].nr_destroy != NULL);
7925880Snordmark stackid = ns->netstack_stackid;
7935880Snordmark netstack_module = ns->netstack_modules[moduleid];
7945880Snordmark DTRACE_PROBE2(netstack__destroy__start,
7955880Snordmark netstackid_t, stackid,
7965880Snordmark void *, netstack_module);
7975880Snordmark (ns_reg[moduleid].nr_destroy)(stackid, netstack_module);
7985880Snordmark DTRACE_PROBE1(netstack__destroy__end,
7995880Snordmark netstack_t *, ns);
8005880Snordmark
8015880Snordmark mutex_enter(lockp);
8025880Snordmark mutex_enter(&ns->netstack_lock);
8035880Snordmark ns->netstack_modules[moduleid] = NULL;
8045880Snordmark nms->nms_flags &= ~NSS_DESTROY_INPROGRESS;
8055880Snordmark nms->nms_flags |= NSS_DESTROY_COMPLETED;
8065880Snordmark cv_broadcast(&nms->nms_cv);
8075880Snordmark DTRACE_PROBE2(netstack__destroy__completed,
8085880Snordmark netstack_t *, ns, int, moduleid);
8095880Snordmark mutex_exit(&ns->netstack_lock);
8105880Snordmark return (dropped);
8115880Snordmark } else {
8125880Snordmark mutex_exit(&ns->netstack_lock);
8135880Snordmark return (dropped);
8145880Snordmark }
8153448Sdh155122 }
8163448Sdh155122
8173448Sdh155122 /*
8185880Snordmark * If somebody is creating the netstack (due to a new zone being created)
8195880Snordmark * then we wait for them to complete. This ensures that any additional
8205880Snordmark * netstack_register() doesn't cause the create functions to run out of
8215880Snordmark * order.
8225880Snordmark * Note that we do not need such a global wait in the case of the shutdown
8235880Snordmark * and destroy callbacks, since in that case it is sufficient for both
8245880Snordmark * threads to set NEEDED and wait for INPROGRESS to ensure ordering.
8255880Snordmark * Returns true if lockp was temporarily dropped while waiting.
8263448Sdh155122 */
8275880Snordmark static boolean_t
wait_for_zone_creator(netstack_t * ns,kmutex_t * lockp)8285880Snordmark wait_for_zone_creator(netstack_t *ns, kmutex_t *lockp)
8293448Sdh155122 {
8305880Snordmark boolean_t dropped = B_FALSE;
8315880Snordmark
8325880Snordmark mutex_enter(&ns->netstack_lock);
8335880Snordmark while (ns->netstack_flags & NSF_ZONE_CREATE) {
8345880Snordmark DTRACE_PROBE1(netstack__wait__zone__inprogress,
8355880Snordmark netstack_t *, ns);
8365880Snordmark if (lockp != NULL) {
8375880Snordmark dropped = B_TRUE;
8385880Snordmark mutex_exit(lockp);
8395880Snordmark }
8405880Snordmark cv_wait(&ns->netstack_cv, &ns->netstack_lock);
8415880Snordmark if (lockp != NULL) {
8425880Snordmark /* First drop netstack_lock to preserve order */
8435880Snordmark mutex_exit(&ns->netstack_lock);
8445880Snordmark mutex_enter(lockp);
8455880Snordmark mutex_enter(&ns->netstack_lock);
8465880Snordmark }
8475880Snordmark }
8485880Snordmark mutex_exit(&ns->netstack_lock);
8495880Snordmark return (dropped);
8503448Sdh155122 }
8513448Sdh155122
8523448Sdh155122 /*
8535880Snordmark * Wait for any INPROGRESS flag to be cleared for the netstack/moduleid
8545880Snordmark * combination.
8555880Snordmark * Returns true if lockp was temporarily dropped while waiting.
8563448Sdh155122 */
8575880Snordmark static boolean_t
wait_for_nms_inprogress(netstack_t * ns,nm_state_t * nms,kmutex_t * lockp)8585880Snordmark wait_for_nms_inprogress(netstack_t *ns, nm_state_t *nms, kmutex_t *lockp)
8593448Sdh155122 {
8605880Snordmark boolean_t dropped = B_FALSE;
8615880Snordmark
8625880Snordmark while (nms->nms_flags & NSS_ALL_INPROGRESS) {
8635880Snordmark DTRACE_PROBE2(netstack__wait__nms__inprogress,
8645880Snordmark netstack_t *, ns, nm_state_t *, nms);
8655880Snordmark if (lockp != NULL) {
8665880Snordmark dropped = B_TRUE;
8675880Snordmark mutex_exit(lockp);
8685880Snordmark }
8695880Snordmark cv_wait(&nms->nms_cv, &ns->netstack_lock);
8705880Snordmark if (lockp != NULL) {
8715880Snordmark /* First drop netstack_lock to preserve order */
8725880Snordmark mutex_exit(&ns->netstack_lock);
8735880Snordmark mutex_enter(lockp);
8745880Snordmark mutex_enter(&ns->netstack_lock);
8755880Snordmark }
8765880Snordmark }
8775880Snordmark return (dropped);
8783448Sdh155122 }
8793448Sdh155122
8803448Sdh155122 /*
8813448Sdh155122 * Get the stack instance used in caller's zone.
8823448Sdh155122 * Increases the reference count, caller must do a netstack_rele.
8833448Sdh155122 * It can't be called after zone_destroy() has started.
8843448Sdh155122 */
8854136Snordmark netstack_t *
netstack_get_current(void)8863448Sdh155122 netstack_get_current(void)
8873448Sdh155122 {
8883448Sdh155122 netstack_t *ns;
8893448Sdh155122
8903448Sdh155122 ns = curproc->p_zone->zone_netstack;
8913448Sdh155122 ASSERT(ns != NULL);
8923448Sdh155122 if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
8933448Sdh155122 return (NULL);
8943448Sdh155122
8953448Sdh155122 netstack_hold(ns);
8963448Sdh155122
8973448Sdh155122 return (ns);
8983448Sdh155122 }
8993448Sdh155122
9003448Sdh155122 /*
9013448Sdh155122 * Find a stack instance given the cred.
9023448Sdh155122 * This is used by the modules to potentially allow for a future when
9033448Sdh155122 * something other than the zoneid is used to determine the stack.
9043448Sdh155122 */
9053448Sdh155122 netstack_t *
netstack_find_by_cred(const cred_t * cr)9063448Sdh155122 netstack_find_by_cred(const cred_t *cr)
9073448Sdh155122 {
9083448Sdh155122 zoneid_t zoneid = crgetzoneid(cr);
9093448Sdh155122
9103448Sdh155122 /* Handle the case when cr_zone is NULL */
9113448Sdh155122 if (zoneid == (zoneid_t)-1)
9123448Sdh155122 zoneid = GLOBAL_ZONEID;
9133448Sdh155122
9143448Sdh155122 /* For performance ... */
9153448Sdh155122 if (curproc->p_zone->zone_id == zoneid)
9163448Sdh155122 return (netstack_get_current());
9173448Sdh155122 else
9183448Sdh155122 return (netstack_find_by_zoneid(zoneid));
9193448Sdh155122 }
9203448Sdh155122
9213448Sdh155122 /*
9223448Sdh155122 * Find a stack instance given the zoneid.
9233448Sdh155122 * Increases the reference count if found; caller must do a
9243448Sdh155122 * netstack_rele().
9253448Sdh155122 *
9263448Sdh155122 * If there is no exact match then assume the shared stack instance
9273448Sdh155122 * matches.
9283448Sdh155122 *
9293448Sdh155122 * Skip the unitialized ones.
9303448Sdh155122 */
9313448Sdh155122 netstack_t *
netstack_find_by_zoneid(zoneid_t zoneid)9323448Sdh155122 netstack_find_by_zoneid(zoneid_t zoneid)
9333448Sdh155122 {
9343448Sdh155122 netstack_t *ns;
9353448Sdh155122 zone_t *zone;
9363448Sdh155122
9373448Sdh155122 zone = zone_find_by_id(zoneid);
9383448Sdh155122
9393448Sdh155122 if (zone == NULL)
9403448Sdh155122 return (NULL);
9413448Sdh155122
9423448Sdh155122 ns = zone->zone_netstack;
9433448Sdh155122 ASSERT(ns != NULL);
9443448Sdh155122 if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
9453448Sdh155122 ns = NULL;
9463448Sdh155122 else
9473448Sdh155122 netstack_hold(ns);
9483448Sdh155122
9493448Sdh155122 zone_rele(zone);
9503448Sdh155122 return (ns);
9513448Sdh155122 }
9523448Sdh155122
9533448Sdh155122 /*
9545880Snordmark * Find a stack instance given the zoneid. Can only be called from
9555880Snordmark * the create callback. See the comments in zone_find_by_id_nolock why
9565880Snordmark * that limitation exists.
9575880Snordmark *
9583448Sdh155122 * Increases the reference count if found; caller must do a
9593448Sdh155122 * netstack_rele().
9603448Sdh155122 *
9613448Sdh155122 * If there is no exact match then assume the shared stack instance
9623448Sdh155122 * matches.
9633448Sdh155122 *
9643448Sdh155122 * Skip the unitialized ones.
9653448Sdh155122 */
9663448Sdh155122 netstack_t *
netstack_find_by_zoneid_nolock(zoneid_t zoneid)9673448Sdh155122 netstack_find_by_zoneid_nolock(zoneid_t zoneid)
9683448Sdh155122 {
9693448Sdh155122 netstack_t *ns;
9703448Sdh155122 zone_t *zone;
9713448Sdh155122
9723448Sdh155122 zone = zone_find_by_id_nolock(zoneid);
9733448Sdh155122
9743448Sdh155122 if (zone == NULL)
9753448Sdh155122 return (NULL);
9763448Sdh155122
9773448Sdh155122 ns = zone->zone_netstack;
9783448Sdh155122 ASSERT(ns != NULL);
9793448Sdh155122
9803448Sdh155122 if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
9813448Sdh155122 ns = NULL;
9823448Sdh155122 else
9833448Sdh155122 netstack_hold(ns);
9843448Sdh155122
9855880Snordmark /* zone_find_by_id_nolock does not have a hold on the zone */
9863448Sdh155122 return (ns);
9873448Sdh155122 }
9883448Sdh155122
9893448Sdh155122 /*
9903448Sdh155122 * Find a stack instance given the stackid with exact match?
9913448Sdh155122 * Increases the reference count if found; caller must do a
9923448Sdh155122 * netstack_rele().
9933448Sdh155122 *
9943448Sdh155122 * Skip the unitialized ones.
9953448Sdh155122 */
9963448Sdh155122 netstack_t *
netstack_find_by_stackid(netstackid_t stackid)9973448Sdh155122 netstack_find_by_stackid(netstackid_t stackid)
9983448Sdh155122 {
9993448Sdh155122 netstack_t *ns;
10003448Sdh155122
10013448Sdh155122 mutex_enter(&netstack_g_lock);
10023448Sdh155122 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
10033448Sdh155122 mutex_enter(&ns->netstack_lock);
10043448Sdh155122 if (ns->netstack_stackid == stackid &&
10053448Sdh155122 !(ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))) {
10063448Sdh155122 mutex_exit(&ns->netstack_lock);
10073448Sdh155122 netstack_hold(ns);
10083448Sdh155122 mutex_exit(&netstack_g_lock);
10093448Sdh155122 return (ns);
10103448Sdh155122 }
10113448Sdh155122 mutex_exit(&ns->netstack_lock);
10123448Sdh155122 }
10133448Sdh155122 mutex_exit(&netstack_g_lock);
10143448Sdh155122 return (NULL);
10153448Sdh155122 }
10163448Sdh155122
10173448Sdh155122 void
netstack_rele(netstack_t * ns)10183448Sdh155122 netstack_rele(netstack_t *ns)
10193448Sdh155122 {
10203448Sdh155122 netstack_t **nsp;
10213448Sdh155122 boolean_t found;
10223448Sdh155122 int refcnt, numzones;
10235880Snordmark int i;
10243448Sdh155122
10253448Sdh155122 mutex_enter(&ns->netstack_lock);
10263448Sdh155122 ASSERT(ns->netstack_refcnt > 0);
10273448Sdh155122 ns->netstack_refcnt--;
10283448Sdh155122 /*
10293448Sdh155122 * As we drop the lock additional netstack_rele()s can come in
10303448Sdh155122 * and decrement the refcnt to zero and free the netstack_t.
10313448Sdh155122 * Store pointers in local variables and if we were not the last
10323448Sdh155122 * then don't reference the netstack_t after that.
10333448Sdh155122 */
10343448Sdh155122 refcnt = ns->netstack_refcnt;
10353448Sdh155122 numzones = ns->netstack_numzones;
10363448Sdh155122 DTRACE_PROBE1(netstack__dec__ref, netstack_t *, ns);
10373448Sdh155122 mutex_exit(&ns->netstack_lock);
10383448Sdh155122
10393448Sdh155122 if (refcnt == 0 && numzones == 0) {
10403448Sdh155122 /*
10413448Sdh155122 * Time to call the destroy functions and free up
10423448Sdh155122 * the structure
10433448Sdh155122 */
10443448Sdh155122 netstack_stack_inactive(ns);
10453448Sdh155122
10464287Snordmark /* Make sure nothing increased the references */
10474287Snordmark ASSERT(ns->netstack_refcnt == 0);
10484287Snordmark ASSERT(ns->netstack_numzones == 0);
10494287Snordmark
10503448Sdh155122 /* Finally remove from list of netstacks */
10513448Sdh155122 mutex_enter(&netstack_g_lock);
10523448Sdh155122 found = B_FALSE;
10533448Sdh155122 for (nsp = &netstack_head; *nsp != NULL;
10543448Sdh155122 nsp = &(*nsp)->netstack_next) {
10553448Sdh155122 if (*nsp == ns) {
10563448Sdh155122 *nsp = ns->netstack_next;
10573448Sdh155122 ns->netstack_next = NULL;
10583448Sdh155122 found = B_TRUE;
10593448Sdh155122 break;
10603448Sdh155122 }
10613448Sdh155122 }
10623448Sdh155122 ASSERT(found);
10633448Sdh155122 mutex_exit(&netstack_g_lock);
10643448Sdh155122
10654287Snordmark /* Make sure nothing increased the references */
10664287Snordmark ASSERT(ns->netstack_refcnt == 0);
10674287Snordmark ASSERT(ns->netstack_numzones == 0);
10684287Snordmark
10693448Sdh155122 ASSERT(ns->netstack_flags & NSF_CLOSING);
10705880Snordmark
10715880Snordmark for (i = 0; i < NS_MAX; i++) {
10725880Snordmark nm_state_t *nms = &ns->netstack_m_state[i];
10735880Snordmark
10745880Snordmark cv_destroy(&nms->nms_cv);
10755880Snordmark }
10765880Snordmark mutex_destroy(&ns->netstack_lock);
10775880Snordmark cv_destroy(&ns->netstack_cv);
10783448Sdh155122 kmem_free(ns, sizeof (*ns));
10793448Sdh155122 }
10803448Sdh155122 }
10813448Sdh155122
10823448Sdh155122 void
netstack_hold(netstack_t * ns)10833448Sdh155122 netstack_hold(netstack_t *ns)
10843448Sdh155122 {
10853448Sdh155122 mutex_enter(&ns->netstack_lock);
10863448Sdh155122 ns->netstack_refcnt++;
10873448Sdh155122 ASSERT(ns->netstack_refcnt > 0);
10883448Sdh155122 mutex_exit(&ns->netstack_lock);
10893448Sdh155122 DTRACE_PROBE1(netstack__inc__ref, netstack_t *, ns);
10903448Sdh155122 }
10913448Sdh155122
10923448Sdh155122 /*
10933448Sdh155122 * To support kstat_create_netstack() using kstat_zone_add we need
10943448Sdh155122 * to track both
10953448Sdh155122 * - all zoneids that use the global/shared stack
10963448Sdh155122 * - all kstats that have been added for the shared stack
10973448Sdh155122 */
10983448Sdh155122 kstat_t *
kstat_create_netstack(char * ks_module,int ks_instance,char * ks_name,char * ks_class,uchar_t ks_type,uint_t ks_ndata,uchar_t ks_flags,netstackid_t ks_netstackid)10993448Sdh155122 kstat_create_netstack(char *ks_module, int ks_instance, char *ks_name,
11003448Sdh155122 char *ks_class, uchar_t ks_type, uint_t ks_ndata, uchar_t ks_flags,
11013448Sdh155122 netstackid_t ks_netstackid)
11023448Sdh155122 {
11033448Sdh155122 kstat_t *ks;
11043448Sdh155122
11053448Sdh155122 if (ks_netstackid == GLOBAL_NETSTACKID) {
11063448Sdh155122 ks = kstat_create_zone(ks_module, ks_instance, ks_name,
11073448Sdh155122 ks_class, ks_type, ks_ndata, ks_flags, GLOBAL_ZONEID);
11083448Sdh155122 if (ks != NULL)
11093448Sdh155122 netstack_shared_kstat_add(ks);
11103448Sdh155122 return (ks);
11113448Sdh155122 } else {
11123448Sdh155122 zoneid_t zoneid = ks_netstackid;
11133448Sdh155122
11143448Sdh155122 return (kstat_create_zone(ks_module, ks_instance, ks_name,
11155880Snordmark ks_class, ks_type, ks_ndata, ks_flags, zoneid));
11163448Sdh155122 }
11173448Sdh155122 }
11183448Sdh155122
11193448Sdh155122 void
kstat_delete_netstack(kstat_t * ks,netstackid_t ks_netstackid)11203448Sdh155122 kstat_delete_netstack(kstat_t *ks, netstackid_t ks_netstackid)
11213448Sdh155122 {
11223448Sdh155122 if (ks_netstackid == GLOBAL_NETSTACKID) {
11233448Sdh155122 netstack_shared_kstat_remove(ks);
11243448Sdh155122 }
11253448Sdh155122 kstat_delete(ks);
11263448Sdh155122 }
11273448Sdh155122
11283448Sdh155122 static void
netstack_shared_zone_add(zoneid_t zoneid)11293448Sdh155122 netstack_shared_zone_add(zoneid_t zoneid)
11303448Sdh155122 {
11313448Sdh155122 struct shared_zone_list *sz;
11323448Sdh155122 struct shared_kstat_list *sk;
11333448Sdh155122
11343448Sdh155122 sz = (struct shared_zone_list *)kmem_zalloc(sizeof (*sz), KM_SLEEP);
11353448Sdh155122 sz->sz_zoneid = zoneid;
11363448Sdh155122
11373448Sdh155122 /* Insert in list */
11383448Sdh155122 mutex_enter(&netstack_shared_lock);
11393448Sdh155122 sz->sz_next = netstack_shared_zones;
11403448Sdh155122 netstack_shared_zones = sz;
11413448Sdh155122
11423448Sdh155122 /*
11433448Sdh155122 * Perform kstat_zone_add for each existing shared stack kstat.
11443448Sdh155122 * Note: Holds netstack_shared_lock lock across kstat_zone_add.
11453448Sdh155122 */
11463448Sdh155122 for (sk = netstack_shared_kstats; sk != NULL; sk = sk->sk_next) {
11473448Sdh155122 kstat_zone_add(sk->sk_kstat, zoneid);
11483448Sdh155122 }
11493448Sdh155122 mutex_exit(&netstack_shared_lock);
11503448Sdh155122 }
11513448Sdh155122
11523448Sdh155122 static void
netstack_shared_zone_remove(zoneid_t zoneid)11533448Sdh155122 netstack_shared_zone_remove(zoneid_t zoneid)
11543448Sdh155122 {
11553448Sdh155122 struct shared_zone_list **szp, *sz;
11563448Sdh155122 struct shared_kstat_list *sk;
11573448Sdh155122
11583448Sdh155122 /* Find in list */
11593448Sdh155122 mutex_enter(&netstack_shared_lock);
11603448Sdh155122 sz = NULL;
11613448Sdh155122 for (szp = &netstack_shared_zones; *szp != NULL;
11623448Sdh155122 szp = &((*szp)->sz_next)) {
11633448Sdh155122 if ((*szp)->sz_zoneid == zoneid) {
11643448Sdh155122 sz = *szp;
11653448Sdh155122 break;
11663448Sdh155122 }
11673448Sdh155122 }
11683448Sdh155122 /* We must find it */
11693448Sdh155122 ASSERT(sz != NULL);
11703448Sdh155122 *szp = sz->sz_next;
11713448Sdh155122 sz->sz_next = NULL;
11723448Sdh155122
11733448Sdh155122 /*
11743448Sdh155122 * Perform kstat_zone_remove for each existing shared stack kstat.
11753448Sdh155122 * Note: Holds netstack_shared_lock lock across kstat_zone_remove.
11763448Sdh155122 */
11773448Sdh155122 for (sk = netstack_shared_kstats; sk != NULL; sk = sk->sk_next) {
11783448Sdh155122 kstat_zone_remove(sk->sk_kstat, zoneid);
11793448Sdh155122 }
11803448Sdh155122 mutex_exit(&netstack_shared_lock);
11813448Sdh155122
11823448Sdh155122 kmem_free(sz, sizeof (*sz));
11833448Sdh155122 }
11843448Sdh155122
11853448Sdh155122 static void
netstack_shared_kstat_add(kstat_t * ks)11863448Sdh155122 netstack_shared_kstat_add(kstat_t *ks)
11873448Sdh155122 {
11883448Sdh155122 struct shared_zone_list *sz;
11893448Sdh155122 struct shared_kstat_list *sk;
11903448Sdh155122
11913448Sdh155122 sk = (struct shared_kstat_list *)kmem_zalloc(sizeof (*sk), KM_SLEEP);
11923448Sdh155122 sk->sk_kstat = ks;
11933448Sdh155122
11943448Sdh155122 /* Insert in list */
11953448Sdh155122 mutex_enter(&netstack_shared_lock);
11963448Sdh155122 sk->sk_next = netstack_shared_kstats;
11973448Sdh155122 netstack_shared_kstats = sk;
11983448Sdh155122
11993448Sdh155122 /*
12003448Sdh155122 * Perform kstat_zone_add for each existing shared stack zone.
12013448Sdh155122 * Note: Holds netstack_shared_lock lock across kstat_zone_add.
12023448Sdh155122 */
12033448Sdh155122 for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
12043448Sdh155122 kstat_zone_add(ks, sz->sz_zoneid);
12053448Sdh155122 }
12063448Sdh155122 mutex_exit(&netstack_shared_lock);
12073448Sdh155122 }
12083448Sdh155122
12093448Sdh155122 static void
netstack_shared_kstat_remove(kstat_t * ks)12103448Sdh155122 netstack_shared_kstat_remove(kstat_t *ks)
12113448Sdh155122 {
12123448Sdh155122 struct shared_zone_list *sz;
12133448Sdh155122 struct shared_kstat_list **skp, *sk;
12143448Sdh155122
12153448Sdh155122 /* Find in list */
12163448Sdh155122 mutex_enter(&netstack_shared_lock);
12173448Sdh155122 sk = NULL;
12183448Sdh155122 for (skp = &netstack_shared_kstats; *skp != NULL;
12193448Sdh155122 skp = &((*skp)->sk_next)) {
12203448Sdh155122 if ((*skp)->sk_kstat == ks) {
12213448Sdh155122 sk = *skp;
12223448Sdh155122 break;
12233448Sdh155122 }
12243448Sdh155122 }
12253448Sdh155122 /* Must find it */
12263448Sdh155122 ASSERT(sk != NULL);
12273448Sdh155122 *skp = sk->sk_next;
12283448Sdh155122 sk->sk_next = NULL;
12293448Sdh155122
12303448Sdh155122 /*
12313448Sdh155122 * Perform kstat_zone_remove for each existing shared stack kstat.
12323448Sdh155122 * Note: Holds netstack_shared_lock lock across kstat_zone_remove.
12333448Sdh155122 */
12343448Sdh155122 for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
12353448Sdh155122 kstat_zone_remove(ks, sz->sz_zoneid);
12363448Sdh155122 }
12373448Sdh155122 mutex_exit(&netstack_shared_lock);
12383448Sdh155122 kmem_free(sk, sizeof (*sk));
12393448Sdh155122 }
12403448Sdh155122
12413448Sdh155122 /*
12423448Sdh155122 * If a zoneid is part of the shared zone, return true
12433448Sdh155122 */
12443448Sdh155122 static boolean_t
netstack_find_shared_zoneid(zoneid_t zoneid)12453448Sdh155122 netstack_find_shared_zoneid(zoneid_t zoneid)
12463448Sdh155122 {
12473448Sdh155122 struct shared_zone_list *sz;
12483448Sdh155122
12493448Sdh155122 mutex_enter(&netstack_shared_lock);
12503448Sdh155122 for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
12513448Sdh155122 if (sz->sz_zoneid == zoneid) {
12523448Sdh155122 mutex_exit(&netstack_shared_lock);
12533448Sdh155122 return (B_TRUE);
12543448Sdh155122 }
12553448Sdh155122 }
12563448Sdh155122 mutex_exit(&netstack_shared_lock);
12573448Sdh155122 return (B_FALSE);
12583448Sdh155122 }
12593448Sdh155122
12603448Sdh155122 /*
12613448Sdh155122 * Hide the fact that zoneids and netstackids are allocated from
12623448Sdh155122 * the same space in the current implementation.
12635880Snordmark * We currently do not check that the stackid/zoneids are valid, since there
12645880Snordmark * is no need for that. But this should only be done for ids that are
12655880Snordmark * valid.
12663448Sdh155122 */
12673448Sdh155122 zoneid_t
netstackid_to_zoneid(netstackid_t stackid)12683448Sdh155122 netstackid_to_zoneid(netstackid_t stackid)
12693448Sdh155122 {
12703448Sdh155122 return (stackid);
12713448Sdh155122 }
12723448Sdh155122
12733448Sdh155122 netstackid_t
zoneid_to_netstackid(zoneid_t zoneid)12743448Sdh155122 zoneid_to_netstackid(zoneid_t zoneid)
12753448Sdh155122 {
12763448Sdh155122 if (netstack_find_shared_zoneid(zoneid))
12773448Sdh155122 return (GLOBAL_ZONEID);
12783448Sdh155122 else
12793448Sdh155122 return (zoneid);
12803448Sdh155122 }
12813448Sdh155122
1282*10639SDarren.Reed@Sun.COM zoneid_t
netstack_get_zoneid(netstack_t * ns)1283*10639SDarren.Reed@Sun.COM netstack_get_zoneid(netstack_t *ns)
1284*10639SDarren.Reed@Sun.COM {
1285*10639SDarren.Reed@Sun.COM return (netstackid_to_zoneid(ns->netstack_stackid));
1286*10639SDarren.Reed@Sun.COM }
1287*10639SDarren.Reed@Sun.COM
12883448Sdh155122 /*
12893448Sdh155122 * Simplistic support for walking all the handles.
12903448Sdh155122 * Example usage:
12913448Sdh155122 * netstack_handle_t nh;
12923448Sdh155122 * netstack_t *ns;
12933448Sdh155122 *
12943448Sdh155122 * netstack_next_init(&nh);
12953448Sdh155122 * while ((ns = netstack_next(&nh)) != NULL) {
12963448Sdh155122 * do something;
12973448Sdh155122 * netstack_rele(ns);
12983448Sdh155122 * }
12993448Sdh155122 * netstack_next_fini(&nh);
13003448Sdh155122 */
13013448Sdh155122 void
netstack_next_init(netstack_handle_t * handle)13023448Sdh155122 netstack_next_init(netstack_handle_t *handle)
13033448Sdh155122 {
13043448Sdh155122 *handle = 0;
13053448Sdh155122 }
13063448Sdh155122
13073448Sdh155122 /* ARGSUSED */
13083448Sdh155122 void
netstack_next_fini(netstack_handle_t * handle)13093448Sdh155122 netstack_next_fini(netstack_handle_t *handle)
13103448Sdh155122 {
13113448Sdh155122 }
13123448Sdh155122
13133448Sdh155122 netstack_t *
netstack_next(netstack_handle_t * handle)13143448Sdh155122 netstack_next(netstack_handle_t *handle)
13153448Sdh155122 {
13163448Sdh155122 netstack_t *ns;
13173448Sdh155122 int i, end;
13183448Sdh155122
13193448Sdh155122 end = *handle;
13203448Sdh155122 /* Walk skipping *handle number of instances */
13213448Sdh155122
13223448Sdh155122 /* Look if there is a matching stack instance */
13233448Sdh155122 mutex_enter(&netstack_g_lock);
13243448Sdh155122 ns = netstack_head;
13253448Sdh155122 for (i = 0; i < end; i++) {
13263448Sdh155122 if (ns == NULL)
13273448Sdh155122 break;
13283448Sdh155122 ns = ns->netstack_next;
13293448Sdh155122 }
13303448Sdh155122 /* skip those with that aren't really here */
13313448Sdh155122 while (ns != NULL) {
13323448Sdh155122 mutex_enter(&ns->netstack_lock);
13333448Sdh155122 if ((ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING)) == 0) {
13343448Sdh155122 mutex_exit(&ns->netstack_lock);
13353448Sdh155122 break;
13363448Sdh155122 }
13373448Sdh155122 mutex_exit(&ns->netstack_lock);
13383448Sdh155122 end++;
13393448Sdh155122 ns = ns->netstack_next;
13403448Sdh155122 }
13413448Sdh155122 if (ns != NULL) {
13423448Sdh155122 *handle = end + 1;
13433448Sdh155122 netstack_hold(ns);
13443448Sdh155122 }
13453448Sdh155122 mutex_exit(&netstack_g_lock);
13463448Sdh155122 return (ns);
13473448Sdh155122 }
1348