xref: /onnv-gate/usr/src/uts/common/os/zone.c (revision 9121:f83e5a35a5da)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51676Sjpk  * Common Development and Distribution License (the "License").
61676Sjpk  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21390Sraf 
220Sstevel@tonic-gate /*
238662SJordan.Vaughan@Sun.com  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate /*
280Sstevel@tonic-gate  * Zones
290Sstevel@tonic-gate  *
300Sstevel@tonic-gate  *   A zone is a named collection of processes, namespace constraints,
310Sstevel@tonic-gate  *   and other system resources which comprise a secure and manageable
320Sstevel@tonic-gate  *   application containment facility.
330Sstevel@tonic-gate  *
340Sstevel@tonic-gate  *   Zones (represented by the reference counted zone_t) are tracked in
350Sstevel@tonic-gate  *   the kernel in the zonehash.  Elsewhere in the kernel, Zone IDs
360Sstevel@tonic-gate  *   (zoneid_t) are used to track zone association.  Zone IDs are
370Sstevel@tonic-gate  *   dynamically generated when the zone is created; if a persistent
380Sstevel@tonic-gate  *   identifier is needed (core files, accounting logs, audit trail,
390Sstevel@tonic-gate  *   etc.), the zone name should be used.
400Sstevel@tonic-gate  *
410Sstevel@tonic-gate  *
420Sstevel@tonic-gate  *   Global Zone:
430Sstevel@tonic-gate  *
440Sstevel@tonic-gate  *   The global zone (zoneid 0) is automatically associated with all
450Sstevel@tonic-gate  *   system resources that have not been bound to a user-created zone.
460Sstevel@tonic-gate  *   This means that even systems where zones are not in active use
470Sstevel@tonic-gate  *   have a global zone, and all processes, mounts, etc. are
480Sstevel@tonic-gate  *   associated with that zone.  The global zone is generally
490Sstevel@tonic-gate  *   unconstrained in terms of privileges and access, though the usual
500Sstevel@tonic-gate  *   credential and privilege based restrictions apply.
510Sstevel@tonic-gate  *
520Sstevel@tonic-gate  *
530Sstevel@tonic-gate  *   Zone States:
540Sstevel@tonic-gate  *
550Sstevel@tonic-gate  *   The states in which a zone may be in and the transitions are as
560Sstevel@tonic-gate  *   follows:
570Sstevel@tonic-gate  *
580Sstevel@tonic-gate  *   ZONE_IS_UNINITIALIZED: primordial state for a zone. The partially
590Sstevel@tonic-gate  *   initialized zone is added to the list of active zones on the system but
600Sstevel@tonic-gate  *   isn't accessible.
610Sstevel@tonic-gate  *
625880Snordmark  *   ZONE_IS_INITIALIZED: Initialization complete except the ZSD callbacks are
635880Snordmark  *   not yet completed. Not possible to enter the zone, but attributes can
645880Snordmark  *   be retrieved.
655880Snordmark  *
660Sstevel@tonic-gate  *   ZONE_IS_READY: zsched (the kernel dummy process for a zone) is
670Sstevel@tonic-gate  *   ready.  The zone is made visible after the ZSD constructor callbacks are
680Sstevel@tonic-gate  *   executed.  A zone remains in this state until it transitions into
690Sstevel@tonic-gate  *   the ZONE_IS_BOOTING state as a result of a call to zone_boot().
700Sstevel@tonic-gate  *
710Sstevel@tonic-gate  *   ZONE_IS_BOOTING: in this shortlived-state, zsched attempts to start
720Sstevel@tonic-gate  *   init.  Should that fail, the zone proceeds to the ZONE_IS_SHUTTING_DOWN
730Sstevel@tonic-gate  *   state.
740Sstevel@tonic-gate  *
750Sstevel@tonic-gate  *   ZONE_IS_RUNNING: The zone is open for business: zsched has
760Sstevel@tonic-gate  *   successfully started init.   A zone remains in this state until
770Sstevel@tonic-gate  *   zone_shutdown() is called.
780Sstevel@tonic-gate  *
790Sstevel@tonic-gate  *   ZONE_IS_SHUTTING_DOWN: zone_shutdown() has been called, the system is
800Sstevel@tonic-gate  *   killing all processes running in the zone. The zone remains
810Sstevel@tonic-gate  *   in this state until there are no more user processes running in the zone.
820Sstevel@tonic-gate  *   zone_create(), zone_enter(), and zone_destroy() on this zone will fail.
830Sstevel@tonic-gate  *   Since zone_shutdown() is restartable, it may be called successfully
840Sstevel@tonic-gate  *   multiple times for the same zone_t.  Setting of the zone's state to
850Sstevel@tonic-gate  *   ZONE_IS_SHUTTING_DOWN is synchronized with mounts, so VOP_MOUNT() may check
860Sstevel@tonic-gate  *   the zone's status without worrying about it being a moving target.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  *   ZONE_IS_EMPTY: zone_shutdown() has been called, and there
890Sstevel@tonic-gate  *   are no more user processes in the zone.  The zone remains in this
900Sstevel@tonic-gate  *   state until there are no more kernel threads associated with the
910Sstevel@tonic-gate  *   zone.  zone_create(), zone_enter(), and zone_destroy() on this zone will
920Sstevel@tonic-gate  *   fail.
930Sstevel@tonic-gate  *
940Sstevel@tonic-gate  *   ZONE_IS_DOWN: All kernel threads doing work on behalf of the zone
950Sstevel@tonic-gate  *   have exited.  zone_shutdown() returns.  Henceforth it is not possible to
960Sstevel@tonic-gate  *   join the zone or create kernel threads therein.
970Sstevel@tonic-gate  *
980Sstevel@tonic-gate  *   ZONE_IS_DYING: zone_destroy() has been called on the zone; zone
990Sstevel@tonic-gate  *   remains in this state until zsched exits.  Calls to zone_find_by_*()
1000Sstevel@tonic-gate  *   return NULL from now on.
1010Sstevel@tonic-gate  *
1020Sstevel@tonic-gate  *   ZONE_IS_DEAD: zsched has exited (zone_ntasks == 0).  There are no
1030Sstevel@tonic-gate  *   processes or threads doing work on behalf of the zone.  The zone is
1040Sstevel@tonic-gate  *   removed from the list of active zones.  zone_destroy() returns, and
1050Sstevel@tonic-gate  *   the zone can be recreated.
1060Sstevel@tonic-gate  *
1070Sstevel@tonic-gate  *   ZONE_IS_FREE (internal state): zone_ref goes to 0, ZSD destructor
1080Sstevel@tonic-gate  *   callbacks are executed, and all memory associated with the zone is
1090Sstevel@tonic-gate  *   freed.
1100Sstevel@tonic-gate  *
1110Sstevel@tonic-gate  *   Threads can wait for the zone to enter a requested state by using
1120Sstevel@tonic-gate  *   zone_status_wait() or zone_status_timedwait() with the desired
1130Sstevel@tonic-gate  *   state passed in as an argument.  Zone state transitions are
1140Sstevel@tonic-gate  *   uni-directional; it is not possible to move back to an earlier state.
1150Sstevel@tonic-gate  *
1160Sstevel@tonic-gate  *
1170Sstevel@tonic-gate  *   Zone-Specific Data:
1180Sstevel@tonic-gate  *
1190Sstevel@tonic-gate  *   Subsystems needing to maintain zone-specific data can store that
1200Sstevel@tonic-gate  *   data using the ZSD mechanism.  This provides a zone-specific data
1210Sstevel@tonic-gate  *   store, similar to thread-specific data (see pthread_getspecific(3C)
1220Sstevel@tonic-gate  *   or the TSD code in uts/common/disp/thread.c.  Also, ZSD can be used
1230Sstevel@tonic-gate  *   to register callbacks to be invoked when a zone is created, shut
1240Sstevel@tonic-gate  *   down, or destroyed.  This can be used to initialize zone-specific
1250Sstevel@tonic-gate  *   data for new zones and to clean up when zones go away.
1260Sstevel@tonic-gate  *
1270Sstevel@tonic-gate  *
1280Sstevel@tonic-gate  *   Data Structures:
1290Sstevel@tonic-gate  *
1300Sstevel@tonic-gate  *   The per-zone structure (zone_t) is reference counted, and freed
1310Sstevel@tonic-gate  *   when all references are released.  zone_hold and zone_rele can be
1320Sstevel@tonic-gate  *   used to adjust the reference count.  In addition, reference counts
1330Sstevel@tonic-gate  *   associated with the cred_t structure are tracked separately using
1340Sstevel@tonic-gate  *   zone_cred_hold and zone_cred_rele.
1350Sstevel@tonic-gate  *
1360Sstevel@tonic-gate  *   Pointers to active zone_t's are stored in two hash tables; one
1370Sstevel@tonic-gate  *   for searching by id, the other for searching by name.  Lookups
1380Sstevel@tonic-gate  *   can be performed on either basis, using zone_find_by_id and
1390Sstevel@tonic-gate  *   zone_find_by_name.  Both return zone_t pointers with the zone
1400Sstevel@tonic-gate  *   held, so zone_rele should be called when the pointer is no longer
1410Sstevel@tonic-gate  *   needed.  Zones can also be searched by path; zone_find_by_path
1420Sstevel@tonic-gate  *   returns the zone with which a path name is associated (global
1430Sstevel@tonic-gate  *   zone if the path is not within some other zone's file system
1440Sstevel@tonic-gate  *   hierarchy).  This currently requires iterating through each zone,
1450Sstevel@tonic-gate  *   so it is slower than an id or name search via a hash table.
1460Sstevel@tonic-gate  *
1470Sstevel@tonic-gate  *
1480Sstevel@tonic-gate  *   Locking:
1490Sstevel@tonic-gate  *
1500Sstevel@tonic-gate  *   zonehash_lock: This is a top-level global lock used to protect the
1510Sstevel@tonic-gate  *       zone hash tables and lists.  Zones cannot be created or destroyed
1520Sstevel@tonic-gate  *       while this lock is held.
1530Sstevel@tonic-gate  *   zone_status_lock: This is a global lock protecting zone state.
1540Sstevel@tonic-gate  *       Zones cannot change state while this lock is held.  It also
1550Sstevel@tonic-gate  *       protects the list of kernel threads associated with a zone.
1560Sstevel@tonic-gate  *   zone_lock: This is a per-zone lock used to protect several fields of
1570Sstevel@tonic-gate  *       the zone_t (see <sys/zone.h> for details).  In addition, holding
1580Sstevel@tonic-gate  *       this lock means that the zone cannot go away.
1593247Sgjelinek  *   zone_nlwps_lock: This is a per-zone lock used to protect the fields
1603247Sgjelinek  *	 related to the zone.max-lwps rctl.
1613247Sgjelinek  *   zone_mem_lock: This is a per-zone lock used to protect the fields
1623247Sgjelinek  *	 related to the zone.max-locked-memory and zone.max-swap rctls.
1630Sstevel@tonic-gate  *   zsd_key_lock: This is a global lock protecting the key state for ZSD.
1640Sstevel@tonic-gate  *   zone_deathrow_lock: This is a global lock protecting the "deathrow"
1650Sstevel@tonic-gate  *       list (a list of zones in the ZONE_IS_DEAD state).
1660Sstevel@tonic-gate  *
1670Sstevel@tonic-gate  *   Ordering requirements:
1680Sstevel@tonic-gate  *       pool_lock --> cpu_lock --> zonehash_lock --> zone_status_lock -->
1690Sstevel@tonic-gate  *       	zone_lock --> zsd_key_lock --> pidlock --> p_lock
1700Sstevel@tonic-gate  *
1713247Sgjelinek  *   When taking zone_mem_lock or zone_nlwps_lock, the lock ordering is:
1723247Sgjelinek  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
1733247Sgjelinek  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
1743247Sgjelinek  *
1750Sstevel@tonic-gate  *   Blocking memory allocations are permitted while holding any of the
1760Sstevel@tonic-gate  *   zone locks.
1770Sstevel@tonic-gate  *
1780Sstevel@tonic-gate  *
1790Sstevel@tonic-gate  *   System Call Interface:
1800Sstevel@tonic-gate  *
1810Sstevel@tonic-gate  *   The zone subsystem can be managed and queried from user level with
1820Sstevel@tonic-gate  *   the following system calls (all subcodes of the primary "zone"
1830Sstevel@tonic-gate  *   system call):
1840Sstevel@tonic-gate  *   - zone_create: creates a zone with selected attributes (name,
185789Sahrens  *     root path, privileges, resource controls, ZFS datasets)
1860Sstevel@tonic-gate  *   - zone_enter: allows the current process to enter a zone
1870Sstevel@tonic-gate  *   - zone_getattr: reports attributes of a zone
1882267Sdp  *   - zone_setattr: set attributes of a zone
1892267Sdp  *   - zone_boot: set 'init' running for the zone
1900Sstevel@tonic-gate  *   - zone_list: lists all zones active in the system
1910Sstevel@tonic-gate  *   - zone_lookup: looks up zone id based on name
1920Sstevel@tonic-gate  *   - zone_shutdown: initiates shutdown process (see states above)
1930Sstevel@tonic-gate  *   - zone_destroy: completes shutdown process (see states above)
1940Sstevel@tonic-gate  *
1950Sstevel@tonic-gate  */
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate #include <sys/priv_impl.h>
1980Sstevel@tonic-gate #include <sys/cred.h>
1990Sstevel@tonic-gate #include <c2/audit.h>
2000Sstevel@tonic-gate #include <sys/debug.h>
2010Sstevel@tonic-gate #include <sys/file.h>
2020Sstevel@tonic-gate #include <sys/kmem.h>
2033247Sgjelinek #include <sys/kstat.h>
2040Sstevel@tonic-gate #include <sys/mutex.h>
2051676Sjpk #include <sys/note.h>
2060Sstevel@tonic-gate #include <sys/pathname.h>
2070Sstevel@tonic-gate #include <sys/proc.h>
2080Sstevel@tonic-gate #include <sys/project.h>
2091166Sdstaff #include <sys/sysevent.h>
2100Sstevel@tonic-gate #include <sys/task.h>
2110Sstevel@tonic-gate #include <sys/systm.h>
2120Sstevel@tonic-gate #include <sys/types.h>
2130Sstevel@tonic-gate #include <sys/utsname.h>
2140Sstevel@tonic-gate #include <sys/vnode.h>
2150Sstevel@tonic-gate #include <sys/vfs.h>
2160Sstevel@tonic-gate #include <sys/systeminfo.h>
2170Sstevel@tonic-gate #include <sys/policy.h>
2180Sstevel@tonic-gate #include <sys/cred_impl.h>
2190Sstevel@tonic-gate #include <sys/contract_impl.h>
2200Sstevel@tonic-gate #include <sys/contract/process_impl.h>
2210Sstevel@tonic-gate #include <sys/class.h>
2220Sstevel@tonic-gate #include <sys/pool.h>
2230Sstevel@tonic-gate #include <sys/pool_pset.h>
2240Sstevel@tonic-gate #include <sys/pset.h>
2250Sstevel@tonic-gate #include <sys/sysmacros.h>
2260Sstevel@tonic-gate #include <sys/callb.h>
2270Sstevel@tonic-gate #include <sys/vmparam.h>
2280Sstevel@tonic-gate #include <sys/corectl.h>
2292677Sml93401 #include <sys/ipc_impl.h>
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate #include <sys/door.h>
2320Sstevel@tonic-gate #include <sys/cpuvar.h>
2335880Snordmark #include <sys/sdt.h>
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate #include <sys/uadmin.h>
2360Sstevel@tonic-gate #include <sys/session.h>
2370Sstevel@tonic-gate #include <sys/cmn_err.h>
2380Sstevel@tonic-gate #include <sys/modhash.h>
2392267Sdp #include <sys/sunddi.h>
2400Sstevel@tonic-gate #include <sys/nvpair.h>
2410Sstevel@tonic-gate #include <sys/rctl.h>
2420Sstevel@tonic-gate #include <sys/fss.h>
2432712Snn35248 #include <sys/brand.h>
2440Sstevel@tonic-gate #include <sys/zone.h>
2453448Sdh155122 #include <net/if.h>
2463792Sakolb #include <sys/cpucaps.h>
2473247Sgjelinek #include <vm/seg.h>
2483247Sgjelinek 
2490Sstevel@tonic-gate /*
2500Sstevel@tonic-gate  * cv used to signal that all references to the zone have been released.  This
2510Sstevel@tonic-gate  * needs to be global since there may be multiple waiters, and the first to
2520Sstevel@tonic-gate  * wake up will free the zone_t, hence we cannot use zone->zone_cv.
2530Sstevel@tonic-gate  */
2540Sstevel@tonic-gate static kcondvar_t zone_destroy_cv;
2550Sstevel@tonic-gate /*
2560Sstevel@tonic-gate  * Lock used to serialize access to zone_cv.  This could have been per-zone,
2570Sstevel@tonic-gate  * but then we'd need another lock for zone_destroy_cv, and why bother?
2580Sstevel@tonic-gate  */
2590Sstevel@tonic-gate static kmutex_t zone_status_lock;
2600Sstevel@tonic-gate 
2610Sstevel@tonic-gate /*
2620Sstevel@tonic-gate  * ZSD-related global variables.
2630Sstevel@tonic-gate  */
2640Sstevel@tonic-gate static kmutex_t zsd_key_lock;	/* protects the following two */
2650Sstevel@tonic-gate /*
2660Sstevel@tonic-gate  * The next caller of zone_key_create() will be assigned a key of ++zsd_keyval.
2670Sstevel@tonic-gate  */
2680Sstevel@tonic-gate static zone_key_t zsd_keyval = 0;
2690Sstevel@tonic-gate /*
2700Sstevel@tonic-gate  * Global list of registered keys.  We use this when a new zone is created.
2710Sstevel@tonic-gate  */
2720Sstevel@tonic-gate static list_t zsd_registered_keys;
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate int zone_hash_size = 256;
2751676Sjpk static mod_hash_t *zonehashbyname, *zonehashbyid, *zonehashbylabel;
2760Sstevel@tonic-gate static kmutex_t zonehash_lock;
2770Sstevel@tonic-gate static uint_t zonecount;
2780Sstevel@tonic-gate static id_space_t *zoneid_space;
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate /*
2810Sstevel@tonic-gate  * The global zone (aka zone0) is the all-seeing, all-knowing zone in which the
2820Sstevel@tonic-gate  * kernel proper runs, and which manages all other zones.
2830Sstevel@tonic-gate  *
2840Sstevel@tonic-gate  * Although not declared as static, the variable "zone0" should not be used
2850Sstevel@tonic-gate  * except for by code that needs to reference the global zone early on in boot,
2860Sstevel@tonic-gate  * before it is fully initialized.  All other consumers should use
2870Sstevel@tonic-gate  * 'global_zone'.
2880Sstevel@tonic-gate  */
2890Sstevel@tonic-gate zone_t zone0;
2900Sstevel@tonic-gate zone_t *global_zone = NULL;	/* Set when the global zone is initialized */
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate /*
2930Sstevel@tonic-gate  * List of active zones, protected by zonehash_lock.
2940Sstevel@tonic-gate  */
2950Sstevel@tonic-gate static list_t zone_active;
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate /*
2980Sstevel@tonic-gate  * List of destroyed zones that still have outstanding cred references.
2990Sstevel@tonic-gate  * Used for debugging.  Uses a separate lock to avoid lock ordering
3000Sstevel@tonic-gate  * problems in zone_free.
3010Sstevel@tonic-gate  */
3020Sstevel@tonic-gate static list_t zone_deathrow;
3030Sstevel@tonic-gate static kmutex_t zone_deathrow_lock;
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate /* number of zones is limited by virtual interface limit in IP */
3060Sstevel@tonic-gate uint_t maxzones = 8192;
3070Sstevel@tonic-gate 
3081166Sdstaff /* Event channel to sent zone state change notifications */
3091166Sdstaff evchan_t *zone_event_chan;
3101166Sdstaff 
3111166Sdstaff /*
3121166Sdstaff  * This table holds the mapping from kernel zone states to
3131166Sdstaff  * states visible in the state notification API.
3141166Sdstaff  * The idea is that we only expose "obvious" states and
3151166Sdstaff  * do not expose states which are just implementation details.
3161166Sdstaff  */
3171166Sdstaff const char  *zone_status_table[] = {
3181166Sdstaff 	ZONE_EVENT_UNINITIALIZED,	/* uninitialized */
3195880Snordmark 	ZONE_EVENT_INITIALIZED,		/* initialized */
3201166Sdstaff 	ZONE_EVENT_READY,		/* ready */
3211166Sdstaff 	ZONE_EVENT_READY,		/* booting */
3221166Sdstaff 	ZONE_EVENT_RUNNING,		/* running */
3231166Sdstaff 	ZONE_EVENT_SHUTTING_DOWN,	/* shutting_down */
3241166Sdstaff 	ZONE_EVENT_SHUTTING_DOWN,	/* empty */
3251166Sdstaff 	ZONE_EVENT_SHUTTING_DOWN,	/* down */
3261166Sdstaff 	ZONE_EVENT_SHUTTING_DOWN,	/* dying */
3271166Sdstaff 	ZONE_EVENT_UNINITIALIZED,	/* dead */
3281166Sdstaff };
3291166Sdstaff 
3300Sstevel@tonic-gate /*
3310Sstevel@tonic-gate  * This isn't static so lint doesn't complain.
3320Sstevel@tonic-gate  */
3330Sstevel@tonic-gate rctl_hndl_t rc_zone_cpu_shares;
3342768Ssl108498 rctl_hndl_t rc_zone_locked_mem;
3353247Sgjelinek rctl_hndl_t rc_zone_max_swap;
3363792Sakolb rctl_hndl_t rc_zone_cpu_cap;
3370Sstevel@tonic-gate rctl_hndl_t rc_zone_nlwps;
3382677Sml93401 rctl_hndl_t rc_zone_shmmax;
3392677Sml93401 rctl_hndl_t rc_zone_shmmni;
3402677Sml93401 rctl_hndl_t rc_zone_semmni;
3412677Sml93401 rctl_hndl_t rc_zone_msgmni;
3420Sstevel@tonic-gate /*
3430Sstevel@tonic-gate  * Synchronization primitives used to synchronize between mounts and zone
3440Sstevel@tonic-gate  * creation/destruction.
3450Sstevel@tonic-gate  */
3460Sstevel@tonic-gate static int mounts_in_progress;
3470Sstevel@tonic-gate static kcondvar_t mount_cv;
3480Sstevel@tonic-gate static kmutex_t mount_lock;
3490Sstevel@tonic-gate 
3502267Sdp const char * const zone_default_initname = "/sbin/init";
3511676Sjpk static char * const zone_prefix = "/zone/";
3520Sstevel@tonic-gate static int zone_shutdown(zoneid_t zoneid);
3533448Sdh155122 static int zone_add_datalink(zoneid_t, char *);
3543448Sdh155122 static int zone_remove_datalink(zoneid_t, char *);
3553448Sdh155122 static int zone_check_datalink(zoneid_t *, char *);
3563448Sdh155122 static int zone_list_datalink(zoneid_t, int *, char *);
3570Sstevel@tonic-gate 
3585880Snordmark typedef boolean_t zsd_applyfn_t(kmutex_t *, boolean_t, zone_t *, zone_key_t);
3595880Snordmark 
3605880Snordmark static void zsd_apply_all_zones(zsd_applyfn_t *, zone_key_t);
3615880Snordmark static void zsd_apply_all_keys(zsd_applyfn_t *, zone_t *);
3625880Snordmark static boolean_t zsd_apply_create(kmutex_t *, boolean_t, zone_t *, zone_key_t);
3635880Snordmark static boolean_t zsd_apply_shutdown(kmutex_t *, boolean_t, zone_t *,
3645880Snordmark     zone_key_t);
3655880Snordmark static boolean_t zsd_apply_destroy(kmutex_t *, boolean_t, zone_t *, zone_key_t);
3665880Snordmark static boolean_t zsd_wait_for_creator(zone_t *, struct zsd_entry *,
3675880Snordmark     kmutex_t *);
3685880Snordmark static boolean_t zsd_wait_for_inprogress(zone_t *, struct zsd_entry *,
3695880Snordmark     kmutex_t *);
3705880Snordmark 
3710Sstevel@tonic-gate /*
372813Sdp  * Bump this number when you alter the zone syscall interfaces; this is
373813Sdp  * because we need to have support for previous API versions in libc
374813Sdp  * to support patching; libc calls into the kernel to determine this number.
375813Sdp  *
376813Sdp  * Version 1 of the API is the version originally shipped with Solaris 10
377813Sdp  * Version 2 alters the zone_create system call in order to support more
378813Sdp  *     arguments by moving the args into a structure; and to do better
379813Sdp  *     error reporting when zone_create() fails.
380813Sdp  * Version 3 alters the zone_create system call in order to support the
381813Sdp  *     import of ZFS datasets to zones.
3821676Sjpk  * Version 4 alters the zone_create system call in order to support
3831676Sjpk  *     Trusted Extensions.
3842267Sdp  * Version 5 alters the zone_boot system call, and converts its old
3852267Sdp  *     bootargs parameter to be set by the zone_setattr API instead.
3863448Sdh155122  * Version 6 adds the flag argument to zone_create.
387813Sdp  */
3883448Sdh155122 static const int ZONE_SYSCALL_API_VERSION = 6;
389813Sdp 
390813Sdp /*
3910Sstevel@tonic-gate  * Certain filesystems (such as NFS and autofs) need to know which zone
3920Sstevel@tonic-gate  * the mount is being placed in.  Because of this, we need to be able to
3930Sstevel@tonic-gate  * ensure that a zone isn't in the process of being created such that
3940Sstevel@tonic-gate  * nfs_mount() thinks it is in the global zone, while by the time it
3950Sstevel@tonic-gate  * gets added the list of mounted zones, it ends up on zoneA's mount
3960Sstevel@tonic-gate  * list.
3970Sstevel@tonic-gate  *
3980Sstevel@tonic-gate  * The following functions: block_mounts()/resume_mounts() and
3990Sstevel@tonic-gate  * mount_in_progress()/mount_completed() are used by zones and the VFS
4000Sstevel@tonic-gate  * layer (respectively) to synchronize zone creation and new mounts.
4010Sstevel@tonic-gate  *
4020Sstevel@tonic-gate  * The semantics are like a reader-reader lock such that there may
4030Sstevel@tonic-gate  * either be multiple mounts (or zone creations, if that weren't
4040Sstevel@tonic-gate  * serialized by zonehash_lock) in progress at the same time, but not
4050Sstevel@tonic-gate  * both.
4060Sstevel@tonic-gate  *
4070Sstevel@tonic-gate  * We use cv's so the user can ctrl-C out of the operation if it's
4080Sstevel@tonic-gate  * taking too long.
4090Sstevel@tonic-gate  *
4100Sstevel@tonic-gate  * The semantics are such that there is unfair bias towards the
4110Sstevel@tonic-gate  * "current" operation.  This means that zone creations may starve if
4120Sstevel@tonic-gate  * there is a rapid succession of new mounts coming in to the system, or
4130Sstevel@tonic-gate  * there is a remote possibility that zones will be created at such a
4140Sstevel@tonic-gate  * rate that new mounts will not be able to proceed.
4150Sstevel@tonic-gate  */
4160Sstevel@tonic-gate /*
4170Sstevel@tonic-gate  * Prevent new mounts from progressing to the point of calling
4180Sstevel@tonic-gate  * VFS_MOUNT().  If there are already mounts in this "region", wait for
4190Sstevel@tonic-gate  * them to complete.
4200Sstevel@tonic-gate  */
4210Sstevel@tonic-gate static int
4220Sstevel@tonic-gate block_mounts(void)
4230Sstevel@tonic-gate {
4240Sstevel@tonic-gate 	int retval = 0;
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate 	/*
4270Sstevel@tonic-gate 	 * Since it may block for a long time, block_mounts() shouldn't be
4280Sstevel@tonic-gate 	 * called with zonehash_lock held.
4290Sstevel@tonic-gate 	 */
4300Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
4310Sstevel@tonic-gate 	mutex_enter(&mount_lock);
4320Sstevel@tonic-gate 	while (mounts_in_progress > 0) {
4330Sstevel@tonic-gate 		if (cv_wait_sig(&mount_cv, &mount_lock) == 0)
4340Sstevel@tonic-gate 			goto signaled;
4350Sstevel@tonic-gate 	}
4360Sstevel@tonic-gate 	/*
4370Sstevel@tonic-gate 	 * A negative value of mounts_in_progress indicates that mounts
4380Sstevel@tonic-gate 	 * have been blocked by (-mounts_in_progress) different callers.
4390Sstevel@tonic-gate 	 */
4400Sstevel@tonic-gate 	mounts_in_progress--;
4410Sstevel@tonic-gate 	retval = 1;
4420Sstevel@tonic-gate signaled:
4430Sstevel@tonic-gate 	mutex_exit(&mount_lock);
4440Sstevel@tonic-gate 	return (retval);
4450Sstevel@tonic-gate }
4460Sstevel@tonic-gate 
4470Sstevel@tonic-gate /*
4480Sstevel@tonic-gate  * The VFS layer may progress with new mounts as far as we're concerned.
4490Sstevel@tonic-gate  * Allow them to progress if we were the last obstacle.
4500Sstevel@tonic-gate  */
4510Sstevel@tonic-gate static void
4520Sstevel@tonic-gate resume_mounts(void)
4530Sstevel@tonic-gate {
4540Sstevel@tonic-gate 	mutex_enter(&mount_lock);
4550Sstevel@tonic-gate 	if (++mounts_in_progress == 0)
4560Sstevel@tonic-gate 		cv_broadcast(&mount_cv);
4570Sstevel@tonic-gate 	mutex_exit(&mount_lock);
4580Sstevel@tonic-gate }
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate /*
4610Sstevel@tonic-gate  * The VFS layer is busy with a mount; zones should wait until all
4620Sstevel@tonic-gate  * mounts are completed to progress.
4630Sstevel@tonic-gate  */
4640Sstevel@tonic-gate void
4650Sstevel@tonic-gate mount_in_progress(void)
4660Sstevel@tonic-gate {
4670Sstevel@tonic-gate 	mutex_enter(&mount_lock);
4680Sstevel@tonic-gate 	while (mounts_in_progress < 0)
4690Sstevel@tonic-gate 		cv_wait(&mount_cv, &mount_lock);
4700Sstevel@tonic-gate 	mounts_in_progress++;
4710Sstevel@tonic-gate 	mutex_exit(&mount_lock);
4720Sstevel@tonic-gate }
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate /*
4750Sstevel@tonic-gate  * VFS is done with one mount; wake up any waiting block_mounts()
4760Sstevel@tonic-gate  * callers if this is the last mount.
4770Sstevel@tonic-gate  */
4780Sstevel@tonic-gate void
4790Sstevel@tonic-gate mount_completed(void)
4800Sstevel@tonic-gate {
4810Sstevel@tonic-gate 	mutex_enter(&mount_lock);
4820Sstevel@tonic-gate 	if (--mounts_in_progress == 0)
4830Sstevel@tonic-gate 		cv_broadcast(&mount_cv);
4840Sstevel@tonic-gate 	mutex_exit(&mount_lock);
4850Sstevel@tonic-gate }
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate /*
4880Sstevel@tonic-gate  * ZSD routines.
4890Sstevel@tonic-gate  *
4900Sstevel@tonic-gate  * Zone Specific Data (ZSD) is modeled after Thread Specific Data as
4910Sstevel@tonic-gate  * defined by the pthread_key_create() and related interfaces.
4920Sstevel@tonic-gate  *
4930Sstevel@tonic-gate  * Kernel subsystems may register one or more data items and/or
4940Sstevel@tonic-gate  * callbacks to be executed when a zone is created, shutdown, or
4950Sstevel@tonic-gate  * destroyed.
4960Sstevel@tonic-gate  *
4970Sstevel@tonic-gate  * Unlike the thread counterpart, destructor callbacks will be executed
4980Sstevel@tonic-gate  * even if the data pointer is NULL and/or there are no constructor
4990Sstevel@tonic-gate  * callbacks, so it is the responsibility of such callbacks to check for
5000Sstevel@tonic-gate  * NULL data values if necessary.
5010Sstevel@tonic-gate  *
5020Sstevel@tonic-gate  * The locking strategy and overall picture is as follows:
5030Sstevel@tonic-gate  *
5040Sstevel@tonic-gate  * When someone calls zone_key_create(), a template ZSD entry is added to the
5055880Snordmark  * global list "zsd_registered_keys", protected by zsd_key_lock.  While
5065880Snordmark  * holding that lock all the existing zones are marked as
5075880Snordmark  * ZSD_CREATE_NEEDED and a copy of the ZSD entry added to the per-zone
5085880Snordmark  * zone_zsd list (protected by zone_lock). The global list is updated first
5095880Snordmark  * (under zone_key_lock) to make sure that newly created zones use the
5105880Snordmark  * most recent list of keys. Then under zonehash_lock we walk the zones
5115880Snordmark  * and mark them.  Similar locking is used in zone_key_delete().
5120Sstevel@tonic-gate  *
5135880Snordmark  * The actual create, shutdown, and destroy callbacks are done without
5145880Snordmark  * holding any lock. And zsd_flags are used to ensure that the operations
5155880Snordmark  * completed so that when zone_key_create (and zone_create) is done, as well as
5165880Snordmark  * zone_key_delete (and zone_destroy) is done, all the necessary callbacks
5175880Snordmark  * are completed.
5180Sstevel@tonic-gate  *
5190Sstevel@tonic-gate  * When new zones are created constructor callbacks for all registered ZSD
5205880Snordmark  * entries will be called. That also uses the above two phases of marking
5215880Snordmark  * what needs to be done, and then running the callbacks without holding
5225880Snordmark  * any locks.
5230Sstevel@tonic-gate  *
5240Sstevel@tonic-gate  * The framework does not provide any locking around zone_getspecific() and
5250Sstevel@tonic-gate  * zone_setspecific() apart from that needed for internal consistency, so
5260Sstevel@tonic-gate  * callers interested in atomic "test-and-set" semantics will need to provide
5270Sstevel@tonic-gate  * their own locking.
5280Sstevel@tonic-gate  */
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate /*
5310Sstevel@tonic-gate  * Helper function to find the zsd_entry associated with the key in the
5320Sstevel@tonic-gate  * given list.
5330Sstevel@tonic-gate  */
5340Sstevel@tonic-gate static struct zsd_entry *
5350Sstevel@tonic-gate zsd_find(list_t *l, zone_key_t key)
5360Sstevel@tonic-gate {
5370Sstevel@tonic-gate 	struct zsd_entry *zsd;
5380Sstevel@tonic-gate 
5390Sstevel@tonic-gate 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
5400Sstevel@tonic-gate 		if (zsd->zsd_key == key) {
5415880Snordmark 			return (zsd);
5425880Snordmark 		}
5435880Snordmark 	}
5445880Snordmark 	return (NULL);
5455880Snordmark }
5465880Snordmark 
5475880Snordmark /*
5485880Snordmark  * Helper function to find the zsd_entry associated with the key in the
5495880Snordmark  * given list. Move it to the front of the list.
5505880Snordmark  */
5515880Snordmark static struct zsd_entry *
5525880Snordmark zsd_find_mru(list_t *l, zone_key_t key)
5535880Snordmark {
5545880Snordmark 	struct zsd_entry *zsd;
5555880Snordmark 
5565880Snordmark 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
5575880Snordmark 		if (zsd->zsd_key == key) {
5580Sstevel@tonic-gate 			/*
5590Sstevel@tonic-gate 			 * Move to head of list to keep list in MRU order.
5600Sstevel@tonic-gate 			 */
5610Sstevel@tonic-gate 			if (zsd != list_head(l)) {
5620Sstevel@tonic-gate 				list_remove(l, zsd);
5630Sstevel@tonic-gate 				list_insert_head(l, zsd);
5640Sstevel@tonic-gate 			}
5650Sstevel@tonic-gate 			return (zsd);
5660Sstevel@tonic-gate 		}
5670Sstevel@tonic-gate 	}
5680Sstevel@tonic-gate 	return (NULL);
5690Sstevel@tonic-gate }
5700Sstevel@tonic-gate 
5715880Snordmark void
5725880Snordmark zone_key_create(zone_key_t *keyp, void *(*create)(zoneid_t),
5735880Snordmark     void (*shutdown)(zoneid_t, void *), void (*destroy)(zoneid_t, void *))
5745880Snordmark {
5755880Snordmark 	struct zsd_entry *zsdp;
5765880Snordmark 	struct zsd_entry *t;
5775880Snordmark 	struct zone *zone;
5785880Snordmark 	zone_key_t  key;
5795880Snordmark 
5805880Snordmark 	zsdp = kmem_zalloc(sizeof (*zsdp), KM_SLEEP);
5815880Snordmark 	zsdp->zsd_data = NULL;
5825880Snordmark 	zsdp->zsd_create = create;
5835880Snordmark 	zsdp->zsd_shutdown = shutdown;
5845880Snordmark 	zsdp->zsd_destroy = destroy;
5855880Snordmark 
5865880Snordmark 	/*
5875880Snordmark 	 * Insert in global list of callbacks. Makes future zone creations
5885880Snordmark 	 * see it.
5895880Snordmark 	 */
5905880Snordmark 	mutex_enter(&zsd_key_lock);
5915880Snordmark 	*keyp = key = zsdp->zsd_key = ++zsd_keyval;
5925880Snordmark 	ASSERT(zsd_keyval != 0);
5935880Snordmark 	list_insert_tail(&zsd_registered_keys, zsdp);
5945880Snordmark 	mutex_exit(&zsd_key_lock);
5955880Snordmark 
5965880Snordmark 	/*
5975880Snordmark 	 * Insert for all existing zones and mark them as needing
5985880Snordmark 	 * a create callback.
5995880Snordmark 	 */
6005880Snordmark 	mutex_enter(&zonehash_lock);	/* stop the world */
6015880Snordmark 	for (zone = list_head(&zone_active); zone != NULL;
6025880Snordmark 	    zone = list_next(&zone_active, zone)) {
6035880Snordmark 		zone_status_t status;
6045880Snordmark 
6055880Snordmark 		mutex_enter(&zone->zone_lock);
6065880Snordmark 
6075880Snordmark 		/* Skip zones that are on the way down or not yet up */
6085880Snordmark 		status = zone_status_get(zone);
6095880Snordmark 		if (status >= ZONE_IS_DOWN ||
6105880Snordmark 		    status == ZONE_IS_UNINITIALIZED) {
6115880Snordmark 			mutex_exit(&zone->zone_lock);
6125880Snordmark 			continue;
6135880Snordmark 		}
6145880Snordmark 
6155880Snordmark 		t = zsd_find_mru(&zone->zone_zsd, key);
6165880Snordmark 		if (t != NULL) {
6175880Snordmark 			/*
6185880Snordmark 			 * A zsd_configure already inserted it after
6195880Snordmark 			 * we dropped zsd_key_lock above.
6205880Snordmark 			 */
6215880Snordmark 			mutex_exit(&zone->zone_lock);
6225880Snordmark 			continue;
6235880Snordmark 		}
6245880Snordmark 		t = kmem_zalloc(sizeof (*t), KM_SLEEP);
6255880Snordmark 		t->zsd_key = key;
6265880Snordmark 		t->zsd_create = create;
6275880Snordmark 		t->zsd_shutdown = shutdown;
6285880Snordmark 		t->zsd_destroy = destroy;
6295880Snordmark 		if (create != NULL) {
6305880Snordmark 			t->zsd_flags = ZSD_CREATE_NEEDED;
6315880Snordmark 			DTRACE_PROBE2(zsd__create__needed,
6325880Snordmark 			    zone_t *, zone, zone_key_t, key);
6335880Snordmark 		}
6345880Snordmark 		list_insert_tail(&zone->zone_zsd, t);
6355880Snordmark 		mutex_exit(&zone->zone_lock);
6365880Snordmark 	}
6375880Snordmark 	mutex_exit(&zonehash_lock);
6385880Snordmark 
6395880Snordmark 	if (create != NULL) {
6405880Snordmark 		/* Now call the create callback for this key */
6415880Snordmark 		zsd_apply_all_zones(zsd_apply_create, key);
6425880Snordmark 	}
6435880Snordmark }
6445880Snordmark 
6450Sstevel@tonic-gate /*
6460Sstevel@tonic-gate  * Function called when a module is being unloaded, or otherwise wishes
6470Sstevel@tonic-gate  * to unregister its ZSD key and callbacks.
6485880Snordmark  *
6495880Snordmark  * Remove from the global list and determine the functions that need to
6505880Snordmark  * be called under a global lock. Then call the functions without
6515880Snordmark  * holding any locks. Finally free up the zone_zsd entries. (The apply
6525880Snordmark  * functions need to access the zone_zsd entries to find zsd_data etc.)
6530Sstevel@tonic-gate  */
6540Sstevel@tonic-gate int
6550Sstevel@tonic-gate zone_key_delete(zone_key_t key)
6560Sstevel@tonic-gate {
6570Sstevel@tonic-gate 	struct zsd_entry *zsdp = NULL;
6580Sstevel@tonic-gate 	zone_t *zone;
6590Sstevel@tonic-gate 
6600Sstevel@tonic-gate 	mutex_enter(&zsd_key_lock);
6615880Snordmark 	zsdp = zsd_find_mru(&zsd_registered_keys, key);
6625880Snordmark 	if (zsdp == NULL) {
6635880Snordmark 		mutex_exit(&zsd_key_lock);
6645880Snordmark 		return (-1);
6655880Snordmark 	}
6660Sstevel@tonic-gate 	list_remove(&zsd_registered_keys, zsdp);
6670Sstevel@tonic-gate 	mutex_exit(&zsd_key_lock);
6680Sstevel@tonic-gate 
6695880Snordmark 	mutex_enter(&zonehash_lock);
6700Sstevel@tonic-gate 	for (zone = list_head(&zone_active); zone != NULL;
6710Sstevel@tonic-gate 	    zone = list_next(&zone_active, zone)) {
6720Sstevel@tonic-gate 		struct zsd_entry *del;
6735880Snordmark 
6745880Snordmark 		mutex_enter(&zone->zone_lock);
6755880Snordmark 		del = zsd_find_mru(&zone->zone_zsd, key);
6765880Snordmark 		if (del == NULL) {
6775880Snordmark 			/*
6785880Snordmark 			 * Somebody else got here first e.g the zone going
6795880Snordmark 			 * away.
6805880Snordmark 			 */
6815880Snordmark 			mutex_exit(&zone->zone_lock);
6825880Snordmark 			continue;
6835880Snordmark 		}
6845880Snordmark 		ASSERT(del->zsd_shutdown == zsdp->zsd_shutdown);
6855880Snordmark 		ASSERT(del->zsd_destroy == zsdp->zsd_destroy);
6865880Snordmark 		if (del->zsd_shutdown != NULL &&
6875880Snordmark 		    (del->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
6885880Snordmark 			del->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
6895880Snordmark 			DTRACE_PROBE2(zsd__shutdown__needed,
6905880Snordmark 			    zone_t *, zone, zone_key_t, key);
6915880Snordmark 		}
6925880Snordmark 		if (del->zsd_destroy != NULL &&
6935880Snordmark 		    (del->zsd_flags & ZSD_DESTROY_ALL) == 0) {
6945880Snordmark 			del->zsd_flags |= ZSD_DESTROY_NEEDED;
6955880Snordmark 			DTRACE_PROBE2(zsd__destroy__needed,
6965880Snordmark 			    zone_t *, zone, zone_key_t, key);
6970Sstevel@tonic-gate 		}
6980Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
6990Sstevel@tonic-gate 	}
7000Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
7010Sstevel@tonic-gate 	kmem_free(zsdp, sizeof (*zsdp));
7025880Snordmark 
7035880Snordmark 	/* Now call the shutdown and destroy callback for this key */
7045880Snordmark 	zsd_apply_all_zones(zsd_apply_shutdown, key);
7055880Snordmark 	zsd_apply_all_zones(zsd_apply_destroy, key);
7065880Snordmark 
7075880Snordmark 	/* Now we can free up the zsdp structures in each zone */
7085880Snordmark 	mutex_enter(&zonehash_lock);
7090Sstevel@tonic-gate 	for (zone = list_head(&zone_active); zone != NULL;
7105880Snordmark 	    zone = list_next(&zone_active, zone)) {
7115880Snordmark 		struct zsd_entry *del;
7125880Snordmark 
7135880Snordmark 		mutex_enter(&zone->zone_lock);
7145880Snordmark 		del = zsd_find(&zone->zone_zsd, key);
7155880Snordmark 		if (del != NULL) {
7165880Snordmark 			list_remove(&zone->zone_zsd, del);
7175880Snordmark 			ASSERT(!(del->zsd_flags & ZSD_ALL_INPROGRESS));
7185880Snordmark 			kmem_free(del, sizeof (*del));
7195880Snordmark 		}
7200Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
7215880Snordmark 	}
7220Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
7235880Snordmark 
7245880Snordmark 	return (0);
7250Sstevel@tonic-gate }
7260Sstevel@tonic-gate 
7270Sstevel@tonic-gate /*
7280Sstevel@tonic-gate  * ZSD counterpart of pthread_setspecific().
7295880Snordmark  *
7305880Snordmark  * Since all zsd callbacks, including those with no create function,
7315880Snordmark  * have an entry in zone_zsd, if the key is registered it is part of
7325880Snordmark  * the zone_zsd list.
7335880Snordmark  * Return an error if the key wasn't registerd.
7340Sstevel@tonic-gate  */
7350Sstevel@tonic-gate int
7360Sstevel@tonic-gate zone_setspecific(zone_key_t key, zone_t *zone, const void *data)
7370Sstevel@tonic-gate {
7380Sstevel@tonic-gate 	struct zsd_entry *t;
7390Sstevel@tonic-gate 
7400Sstevel@tonic-gate 	mutex_enter(&zone->zone_lock);
7415880Snordmark 	t = zsd_find_mru(&zone->zone_zsd, key);
7420Sstevel@tonic-gate 	if (t != NULL) {
7430Sstevel@tonic-gate 		/*
7440Sstevel@tonic-gate 		 * Replace old value with new
7450Sstevel@tonic-gate 		 */
7460Sstevel@tonic-gate 		t->zsd_data = (void *)data;
7470Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
7480Sstevel@tonic-gate 		return (0);
7490Sstevel@tonic-gate 	}
7500Sstevel@tonic-gate 	mutex_exit(&zone->zone_lock);
7515880Snordmark 	return (-1);
7520Sstevel@tonic-gate }
7530Sstevel@tonic-gate 
7540Sstevel@tonic-gate /*
7550Sstevel@tonic-gate  * ZSD counterpart of pthread_getspecific().
7560Sstevel@tonic-gate  */
7570Sstevel@tonic-gate void *
7580Sstevel@tonic-gate zone_getspecific(zone_key_t key, zone_t *zone)
7590Sstevel@tonic-gate {
7600Sstevel@tonic-gate 	struct zsd_entry *t;
7610Sstevel@tonic-gate 	void *data;
7620Sstevel@tonic-gate 
7630Sstevel@tonic-gate 	mutex_enter(&zone->zone_lock);
7645880Snordmark 	t = zsd_find_mru(&zone->zone_zsd, key);
7650Sstevel@tonic-gate 	data = (t == NULL ? NULL : t->zsd_data);
7660Sstevel@tonic-gate 	mutex_exit(&zone->zone_lock);
7670Sstevel@tonic-gate 	return (data);
7680Sstevel@tonic-gate }
7690Sstevel@tonic-gate 
7700Sstevel@tonic-gate /*
7710Sstevel@tonic-gate  * Function used to initialize a zone's list of ZSD callbacks and data
7720Sstevel@tonic-gate  * when the zone is being created.  The callbacks are initialized from
7735880Snordmark  * the template list (zsd_registered_keys). The constructor callback is
7745880Snordmark  * executed later (once the zone exists and with locks dropped).
7750Sstevel@tonic-gate  */
7760Sstevel@tonic-gate static void
7770Sstevel@tonic-gate zone_zsd_configure(zone_t *zone)
7780Sstevel@tonic-gate {
7790Sstevel@tonic-gate 	struct zsd_entry *zsdp;
7800Sstevel@tonic-gate 	struct zsd_entry *t;
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zonehash_lock));
7830Sstevel@tonic-gate 	ASSERT(list_head(&zone->zone_zsd) == NULL);
7845880Snordmark 	mutex_enter(&zone->zone_lock);
7850Sstevel@tonic-gate 	mutex_enter(&zsd_key_lock);
7860Sstevel@tonic-gate 	for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL;
7870Sstevel@tonic-gate 	    zsdp = list_next(&zsd_registered_keys, zsdp)) {
7885880Snordmark 		/*
7895880Snordmark 		 * Since this zone is ZONE_IS_UNCONFIGURED, zone_key_create
7905880Snordmark 		 * should not have added anything to it.
7915880Snordmark 		 */
7925880Snordmark 		ASSERT(zsd_find(&zone->zone_zsd, zsdp->zsd_key) == NULL);
7935880Snordmark 
7945880Snordmark 		t = kmem_zalloc(sizeof (*t), KM_SLEEP);
7955880Snordmark 		t->zsd_key = zsdp->zsd_key;
7965880Snordmark 		t->zsd_create = zsdp->zsd_create;
7975880Snordmark 		t->zsd_shutdown = zsdp->zsd_shutdown;
7985880Snordmark 		t->zsd_destroy = zsdp->zsd_destroy;
7990Sstevel@tonic-gate 		if (zsdp->zsd_create != NULL) {
8005880Snordmark 			t->zsd_flags = ZSD_CREATE_NEEDED;
8015880Snordmark 			DTRACE_PROBE2(zsd__create__needed,
8025880Snordmark 			    zone_t *, zone, zone_key_t, zsdp->zsd_key);
8030Sstevel@tonic-gate 		}
8045880Snordmark 		list_insert_tail(&zone->zone_zsd, t);
8050Sstevel@tonic-gate 	}
8060Sstevel@tonic-gate 	mutex_exit(&zsd_key_lock);
8075880Snordmark 	mutex_exit(&zone->zone_lock);
8080Sstevel@tonic-gate }
8090Sstevel@tonic-gate 
8100Sstevel@tonic-gate enum zsd_callback_type { ZSD_CREATE, ZSD_SHUTDOWN, ZSD_DESTROY };
8110Sstevel@tonic-gate 
8120Sstevel@tonic-gate /*
8130Sstevel@tonic-gate  * Helper function to execute shutdown or destructor callbacks.
8140Sstevel@tonic-gate  */
8150Sstevel@tonic-gate static void
8160Sstevel@tonic-gate zone_zsd_callbacks(zone_t *zone, enum zsd_callback_type ct)
8170Sstevel@tonic-gate {
8180Sstevel@tonic-gate 	struct zsd_entry *t;
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	ASSERT(ct == ZSD_SHUTDOWN || ct == ZSD_DESTROY);
8210Sstevel@tonic-gate 	ASSERT(ct != ZSD_SHUTDOWN || zone_status_get(zone) >= ZONE_IS_EMPTY);
8220Sstevel@tonic-gate 	ASSERT(ct != ZSD_DESTROY || zone_status_get(zone) >= ZONE_IS_DOWN);
8230Sstevel@tonic-gate 
8245880Snordmark 	/*
8255880Snordmark 	 * Run the callback solely based on what is registered for the zone
8265880Snordmark 	 * in zone_zsd. The global list can change independently of this
8275880Snordmark 	 * as keys are registered and unregistered and we don't register new
8285880Snordmark 	 * callbacks for a zone that is in the process of going away.
8295880Snordmark 	 */
8300Sstevel@tonic-gate 	mutex_enter(&zone->zone_lock);
8315880Snordmark 	for (t = list_head(&zone->zone_zsd); t != NULL;
8325880Snordmark 	    t = list_next(&zone->zone_zsd, t)) {
8335880Snordmark 		zone_key_t key = t->zsd_key;
8340Sstevel@tonic-gate 
8350Sstevel@tonic-gate 		/* Skip if no callbacks registered */
8365880Snordmark 
8375880Snordmark 		if (ct == ZSD_SHUTDOWN) {
8385880Snordmark 			if (t->zsd_shutdown != NULL &&
8395880Snordmark 			    (t->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
8405880Snordmark 				t->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
8415880Snordmark 				DTRACE_PROBE2(zsd__shutdown__needed,
8425880Snordmark 				    zone_t *, zone, zone_key_t, key);
8430Sstevel@tonic-gate 			}
8440Sstevel@tonic-gate 		} else {
8455880Snordmark 			if (t->zsd_destroy != NULL &&
8465880Snordmark 			    (t->zsd_flags & ZSD_DESTROY_ALL) == 0) {
8475880Snordmark 				t->zsd_flags |= ZSD_DESTROY_NEEDED;
8485880Snordmark 				DTRACE_PROBE2(zsd__destroy__needed,
8495880Snordmark 				    zone_t *, zone, zone_key_t, key);
8500Sstevel@tonic-gate 			}
8510Sstevel@tonic-gate 		}
8520Sstevel@tonic-gate 	}
8535880Snordmark 	mutex_exit(&zone->zone_lock);
8545880Snordmark 
8555880Snordmark 	/* Now call the shutdown and destroy callback for this key */
8565880Snordmark 	zsd_apply_all_keys(zsd_apply_shutdown, zone);
8575880Snordmark 	zsd_apply_all_keys(zsd_apply_destroy, zone);
8585880Snordmark 
8590Sstevel@tonic-gate }
8600Sstevel@tonic-gate 
8610Sstevel@tonic-gate /*
8620Sstevel@tonic-gate  * Called when the zone is going away; free ZSD-related memory, and
8630Sstevel@tonic-gate  * destroy the zone_zsd list.
8640Sstevel@tonic-gate  */
8650Sstevel@tonic-gate static void
8660Sstevel@tonic-gate zone_free_zsd(zone_t *zone)
8670Sstevel@tonic-gate {
8680Sstevel@tonic-gate 	struct zsd_entry *t, *next;
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	/*
8710Sstevel@tonic-gate 	 * Free all the zsd_entry's we had on this zone.
8720Sstevel@tonic-gate 	 */
8735880Snordmark 	mutex_enter(&zone->zone_lock);
8740Sstevel@tonic-gate 	for (t = list_head(&zone->zone_zsd); t != NULL; t = next) {
8750Sstevel@tonic-gate 		next = list_next(&zone->zone_zsd, t);
8760Sstevel@tonic-gate 		list_remove(&zone->zone_zsd, t);
8775880Snordmark 		ASSERT(!(t->zsd_flags & ZSD_ALL_INPROGRESS));
8780Sstevel@tonic-gate 		kmem_free(t, sizeof (*t));
8790Sstevel@tonic-gate 	}
8800Sstevel@tonic-gate 	list_destroy(&zone->zone_zsd);
8815880Snordmark 	mutex_exit(&zone->zone_lock);
8825880Snordmark 
8835880Snordmark }
8845880Snordmark 
8855880Snordmark /*
8865880Snordmark  * Apply a function to all zones for particular key value.
8875880Snordmark  *
8885880Snordmark  * The applyfn has to drop zonehash_lock if it does some work, and
8895880Snordmark  * then reacquire it before it returns.
8905880Snordmark  * When the lock is dropped we don't follow list_next even
8915880Snordmark  * if it is possible to do so without any hazards. This is
8925880Snordmark  * because we want the design to allow for the list of zones
8935880Snordmark  * to change in any arbitrary way during the time the
8945880Snordmark  * lock was dropped.
8955880Snordmark  *
8965880Snordmark  * It is safe to restart the loop at list_head since the applyfn
8975880Snordmark  * changes the zsd_flags as it does work, so a subsequent
8985880Snordmark  * pass through will have no effect in applyfn, hence the loop will terminate
8995880Snordmark  * in at worst O(N^2).
9005880Snordmark  */
9015880Snordmark static void
9025880Snordmark zsd_apply_all_zones(zsd_applyfn_t *applyfn, zone_key_t key)
9035880Snordmark {
9045880Snordmark 	zone_t *zone;
9055880Snordmark 
9065880Snordmark 	mutex_enter(&zonehash_lock);
9075880Snordmark 	zone = list_head(&zone_active);
9085880Snordmark 	while (zone != NULL) {
9095880Snordmark 		if ((applyfn)(&zonehash_lock, B_FALSE, zone, key)) {
9105880Snordmark 			/* Lock dropped - restart at head */
9115880Snordmark 			zone = list_head(&zone_active);
9125880Snordmark 		} else {
9135880Snordmark 			zone = list_next(&zone_active, zone);
9145880Snordmark 		}
9155880Snordmark 	}
9165880Snordmark 	mutex_exit(&zonehash_lock);
9175880Snordmark }
9185880Snordmark 
9195880Snordmark /*
9205880Snordmark  * Apply a function to all keys for a particular zone.
9215880Snordmark  *
9225880Snordmark  * The applyfn has to drop zonehash_lock if it does some work, and
9235880Snordmark  * then reacquire it before it returns.
9245880Snordmark  * When the lock is dropped we don't follow list_next even
9255880Snordmark  * if it is possible to do so without any hazards. This is
9265880Snordmark  * because we want the design to allow for the list of zsd callbacks
9275880Snordmark  * to change in any arbitrary way during the time the
9285880Snordmark  * lock was dropped.
9295880Snordmark  *
9305880Snordmark  * It is safe to restart the loop at list_head since the applyfn
9315880Snordmark  * changes the zsd_flags as it does work, so a subsequent
9325880Snordmark  * pass through will have no effect in applyfn, hence the loop will terminate
9335880Snordmark  * in at worst O(N^2).
9345880Snordmark  */
9355880Snordmark static void
9365880Snordmark zsd_apply_all_keys(zsd_applyfn_t *applyfn, zone_t *zone)
9375880Snordmark {
9385880Snordmark 	struct zsd_entry *t;
9395880Snordmark 
9405880Snordmark 	mutex_enter(&zone->zone_lock);
9415880Snordmark 	t = list_head(&zone->zone_zsd);
9425880Snordmark 	while (t != NULL) {
9435880Snordmark 		if ((applyfn)(NULL, B_TRUE, zone, t->zsd_key)) {
9445880Snordmark 			/* Lock dropped - restart at head */
9455880Snordmark 			t = list_head(&zone->zone_zsd);
9465880Snordmark 		} else {
9475880Snordmark 			t = list_next(&zone->zone_zsd, t);
9485880Snordmark 		}
9495880Snordmark 	}
9505880Snordmark 	mutex_exit(&zone->zone_lock);
9515880Snordmark }
9525880Snordmark 
9535880Snordmark /*
9545880Snordmark  * Call the create function for the zone and key if CREATE_NEEDED
9555880Snordmark  * is set.
9565880Snordmark  * If some other thread gets here first and sets CREATE_INPROGRESS, then
9575880Snordmark  * we wait for that thread to complete so that we can ensure that
9585880Snordmark  * all the callbacks are done when we've looped over all zones/keys.
9595880Snordmark  *
9605880Snordmark  * When we call the create function, we drop the global held by the
9615880Snordmark  * caller, and return true to tell the caller it needs to re-evalute the
9625880Snordmark  * state.
9635880Snordmark  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
9645880Snordmark  * remains held on exit.
9655880Snordmark  */
9665880Snordmark static boolean_t
9675880Snordmark zsd_apply_create(kmutex_t *lockp, boolean_t zone_lock_held,
9685880Snordmark     zone_t *zone, zone_key_t key)
9695880Snordmark {
9705880Snordmark 	void *result;
9715880Snordmark 	struct zsd_entry *t;
9725880Snordmark 	boolean_t dropped;
9735880Snordmark 
9745880Snordmark 	if (lockp != NULL) {
9755880Snordmark 		ASSERT(MUTEX_HELD(lockp));
9765880Snordmark 	}
9775880Snordmark 	if (zone_lock_held) {
9785880Snordmark 		ASSERT(MUTEX_HELD(&zone->zone_lock));
9795880Snordmark 	} else {
9805880Snordmark 		mutex_enter(&zone->zone_lock);
9815880Snordmark 	}
9825880Snordmark 
9835880Snordmark 	t = zsd_find(&zone->zone_zsd, key);
9845880Snordmark 	if (t == NULL) {
9855880Snordmark 		/*
9865880Snordmark 		 * Somebody else got here first e.g the zone going
9875880Snordmark 		 * away.
9885880Snordmark 		 */
9895880Snordmark 		if (!zone_lock_held)
9905880Snordmark 			mutex_exit(&zone->zone_lock);
9915880Snordmark 		return (B_FALSE);
9925880Snordmark 	}
9935880Snordmark 	dropped = B_FALSE;
9945880Snordmark 	if (zsd_wait_for_inprogress(zone, t, lockp))
9955880Snordmark 		dropped = B_TRUE;
9965880Snordmark 
9975880Snordmark 	if (t->zsd_flags & ZSD_CREATE_NEEDED) {
9985880Snordmark 		t->zsd_flags &= ~ZSD_CREATE_NEEDED;
9995880Snordmark 		t->zsd_flags |= ZSD_CREATE_INPROGRESS;
10005880Snordmark 		DTRACE_PROBE2(zsd__create__inprogress,
10015880Snordmark 		    zone_t *, zone, zone_key_t, key);
10025880Snordmark 		mutex_exit(&zone->zone_lock);
10035880Snordmark 		if (lockp != NULL)
10045880Snordmark 			mutex_exit(lockp);
10055880Snordmark 
10065880Snordmark 		dropped = B_TRUE;
10075880Snordmark 		ASSERT(t->zsd_create != NULL);
10085880Snordmark 		DTRACE_PROBE2(zsd__create__start,
10095880Snordmark 		    zone_t *, zone, zone_key_t, key);
10105880Snordmark 
10115880Snordmark 		result = (*t->zsd_create)(zone->zone_id);
10125880Snordmark 
10135880Snordmark 		DTRACE_PROBE2(zsd__create__end,
10145880Snordmark 		    zone_t *, zone, voidn *, result);
10155880Snordmark 
10165880Snordmark 		ASSERT(result != NULL);
10175880Snordmark 		if (lockp != NULL)
10185880Snordmark 			mutex_enter(lockp);
10195880Snordmark 		mutex_enter(&zone->zone_lock);
10205880Snordmark 		t->zsd_data = result;
10215880Snordmark 		t->zsd_flags &= ~ZSD_CREATE_INPROGRESS;
10225880Snordmark 		t->zsd_flags |= ZSD_CREATE_COMPLETED;
10235880Snordmark 		cv_broadcast(&t->zsd_cv);
10245880Snordmark 		DTRACE_PROBE2(zsd__create__completed,
10255880Snordmark 		    zone_t *, zone, zone_key_t, key);
10265880Snordmark 	}
10275880Snordmark 	if (!zone_lock_held)
10285880Snordmark 		mutex_exit(&zone->zone_lock);
10295880Snordmark 	return (dropped);
10305880Snordmark }
10315880Snordmark 
10325880Snordmark /*
10335880Snordmark  * Call the shutdown function for the zone and key if SHUTDOWN_NEEDED
10345880Snordmark  * is set.
10355880Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
10365880Snordmark  * we wait for that thread to complete so that we can ensure that
10375880Snordmark  * all the callbacks are done when we've looped over all zones/keys.
10385880Snordmark  *
10395880Snordmark  * When we call the shutdown function, we drop the global held by the
10405880Snordmark  * caller, and return true to tell the caller it needs to re-evalute the
10415880Snordmark  * state.
10425880Snordmark  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
10435880Snordmark  * remains held on exit.
10445880Snordmark  */
10455880Snordmark static boolean_t
10465880Snordmark zsd_apply_shutdown(kmutex_t *lockp, boolean_t zone_lock_held,
10475880Snordmark     zone_t *zone, zone_key_t key)
10485880Snordmark {
10495880Snordmark 	struct zsd_entry *t;
10505880Snordmark 	void *data;
10515880Snordmark 	boolean_t dropped;
10525880Snordmark 
10535880Snordmark 	if (lockp != NULL) {
10545880Snordmark 		ASSERT(MUTEX_HELD(lockp));
10555880Snordmark 	}
10565880Snordmark 	if (zone_lock_held) {
10575880Snordmark 		ASSERT(MUTEX_HELD(&zone->zone_lock));
10585880Snordmark 	} else {
10595880Snordmark 		mutex_enter(&zone->zone_lock);
10605880Snordmark 	}
10615880Snordmark 
10625880Snordmark 	t = zsd_find(&zone->zone_zsd, key);
10635880Snordmark 	if (t == NULL) {
10645880Snordmark 		/*
10655880Snordmark 		 * Somebody else got here first e.g the zone going
10665880Snordmark 		 * away.
10675880Snordmark 		 */
10685880Snordmark 		if (!zone_lock_held)
10695880Snordmark 			mutex_exit(&zone->zone_lock);
10705880Snordmark 		return (B_FALSE);
10715880Snordmark 	}
10725880Snordmark 	dropped = B_FALSE;
10735880Snordmark 	if (zsd_wait_for_creator(zone, t, lockp))
10745880Snordmark 		dropped = B_TRUE;
10755880Snordmark 
10765880Snordmark 	if (zsd_wait_for_inprogress(zone, t, lockp))
10775880Snordmark 		dropped = B_TRUE;
10785880Snordmark 
10795880Snordmark 	if (t->zsd_flags & ZSD_SHUTDOWN_NEEDED) {
10805880Snordmark 		t->zsd_flags &= ~ZSD_SHUTDOWN_NEEDED;
10815880Snordmark 		t->zsd_flags |= ZSD_SHUTDOWN_INPROGRESS;
10825880Snordmark 		DTRACE_PROBE2(zsd__shutdown__inprogress,
10835880Snordmark 		    zone_t *, zone, zone_key_t, key);
10845880Snordmark 		mutex_exit(&zone->zone_lock);
10855880Snordmark 		if (lockp != NULL)
10865880Snordmark 			mutex_exit(lockp);
10875880Snordmark 		dropped = B_TRUE;
10885880Snordmark 
10895880Snordmark 		ASSERT(t->zsd_shutdown != NULL);
10905880Snordmark 		data = t->zsd_data;
10915880Snordmark 
10925880Snordmark 		DTRACE_PROBE2(zsd__shutdown__start,
10935880Snordmark 		    zone_t *, zone, zone_key_t, key);
10945880Snordmark 
10955880Snordmark 		(t->zsd_shutdown)(zone->zone_id, data);
10965880Snordmark 		DTRACE_PROBE2(zsd__shutdown__end,
10975880Snordmark 		    zone_t *, zone, zone_key_t, key);
10985880Snordmark 
10995880Snordmark 		if (lockp != NULL)
11005880Snordmark 			mutex_enter(lockp);
11015880Snordmark 		mutex_enter(&zone->zone_lock);
11025880Snordmark 		t->zsd_flags &= ~ZSD_SHUTDOWN_INPROGRESS;
11035880Snordmark 		t->zsd_flags |= ZSD_SHUTDOWN_COMPLETED;
11045880Snordmark 		cv_broadcast(&t->zsd_cv);
11055880Snordmark 		DTRACE_PROBE2(zsd__shutdown__completed,
11065880Snordmark 		    zone_t *, zone, zone_key_t, key);
11075880Snordmark 	}
11085880Snordmark 	if (!zone_lock_held)
11095880Snordmark 		mutex_exit(&zone->zone_lock);
11105880Snordmark 	return (dropped);
11115880Snordmark }
11125880Snordmark 
11135880Snordmark /*
11145880Snordmark  * Call the destroy function for the zone and key if DESTROY_NEEDED
11155880Snordmark  * is set.
11165880Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
11175880Snordmark  * we wait for that thread to complete so that we can ensure that
11185880Snordmark  * all the callbacks are done when we've looped over all zones/keys.
11195880Snordmark  *
11205880Snordmark  * When we call the destroy function, we drop the global held by the
11215880Snordmark  * caller, and return true to tell the caller it needs to re-evalute the
11225880Snordmark  * state.
11235880Snordmark  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
11245880Snordmark  * remains held on exit.
11255880Snordmark  */
11265880Snordmark static boolean_t
11275880Snordmark zsd_apply_destroy(kmutex_t *lockp, boolean_t zone_lock_held,
11285880Snordmark     zone_t *zone, zone_key_t key)
11295880Snordmark {
11305880Snordmark 	struct zsd_entry *t;
11315880Snordmark 	void *data;
11325880Snordmark 	boolean_t dropped;
11335880Snordmark 
11345880Snordmark 	if (lockp != NULL) {
11355880Snordmark 		ASSERT(MUTEX_HELD(lockp));
11365880Snordmark 	}
11375880Snordmark 	if (zone_lock_held) {
11385880Snordmark 		ASSERT(MUTEX_HELD(&zone->zone_lock));
11395880Snordmark 	} else {
11405880Snordmark 		mutex_enter(&zone->zone_lock);
11415880Snordmark 	}
11425880Snordmark 
11435880Snordmark 	t = zsd_find(&zone->zone_zsd, key);
11445880Snordmark 	if (t == NULL) {
11455880Snordmark 		/*
11465880Snordmark 		 * Somebody else got here first e.g the zone going
11475880Snordmark 		 * away.
11485880Snordmark 		 */
11495880Snordmark 		if (!zone_lock_held)
11505880Snordmark 			mutex_exit(&zone->zone_lock);
11515880Snordmark 		return (B_FALSE);
11525880Snordmark 	}
11535880Snordmark 	dropped = B_FALSE;
11545880Snordmark 	if (zsd_wait_for_creator(zone, t, lockp))
11555880Snordmark 		dropped = B_TRUE;
11565880Snordmark 
11575880Snordmark 	if (zsd_wait_for_inprogress(zone, t, lockp))
11585880Snordmark 		dropped = B_TRUE;
11595880Snordmark 
11605880Snordmark 	if (t->zsd_flags & ZSD_DESTROY_NEEDED) {
11615880Snordmark 		t->zsd_flags &= ~ZSD_DESTROY_NEEDED;
11625880Snordmark 		t->zsd_flags |= ZSD_DESTROY_INPROGRESS;
11635880Snordmark 		DTRACE_PROBE2(zsd__destroy__inprogress,
11645880Snordmark 		    zone_t *, zone, zone_key_t, key);
11655880Snordmark 		mutex_exit(&zone->zone_lock);
11665880Snordmark 		if (lockp != NULL)
11675880Snordmark 			mutex_exit(lockp);
11685880Snordmark 		dropped = B_TRUE;
11695880Snordmark 
11705880Snordmark 		ASSERT(t->zsd_destroy != NULL);
11715880Snordmark 		data = t->zsd_data;
11725880Snordmark 		DTRACE_PROBE2(zsd__destroy__start,
11735880Snordmark 		    zone_t *, zone, zone_key_t, key);
11745880Snordmark 
11755880Snordmark 		(t->zsd_destroy)(zone->zone_id, data);
11765880Snordmark 		DTRACE_PROBE2(zsd__destroy__end,
11775880Snordmark 		    zone_t *, zone, zone_key_t, key);
11785880Snordmark 
11795880Snordmark 		if (lockp != NULL)
11805880Snordmark 			mutex_enter(lockp);
11815880Snordmark 		mutex_enter(&zone->zone_lock);
11825880Snordmark 		t->zsd_data = NULL;
11835880Snordmark 		t->zsd_flags &= ~ZSD_DESTROY_INPROGRESS;
11845880Snordmark 		t->zsd_flags |= ZSD_DESTROY_COMPLETED;
11855880Snordmark 		cv_broadcast(&t->zsd_cv);
11865880Snordmark 		DTRACE_PROBE2(zsd__destroy__completed,
11875880Snordmark 		    zone_t *, zone, zone_key_t, key);
11885880Snordmark 	}
11895880Snordmark 	if (!zone_lock_held)
11905880Snordmark 		mutex_exit(&zone->zone_lock);
11915880Snordmark 	return (dropped);
11925880Snordmark }
11935880Snordmark 
11945880Snordmark /*
11955880Snordmark  * Wait for any CREATE_NEEDED flag to be cleared.
11965880Snordmark  * Returns true if lockp was temporarily dropped while waiting.
11975880Snordmark  */
11985880Snordmark static boolean_t
11995880Snordmark zsd_wait_for_creator(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
12005880Snordmark {
12015880Snordmark 	boolean_t dropped = B_FALSE;
12025880Snordmark 
12035880Snordmark 	while (t->zsd_flags & ZSD_CREATE_NEEDED) {
12045880Snordmark 		DTRACE_PROBE2(zsd__wait__for__creator,
12055880Snordmark 		    zone_t *, zone, struct zsd_entry *, t);
12065880Snordmark 		if (lockp != NULL) {
12075880Snordmark 			dropped = B_TRUE;
12085880Snordmark 			mutex_exit(lockp);
12095880Snordmark 		}
12105880Snordmark 		cv_wait(&t->zsd_cv, &zone->zone_lock);
12115880Snordmark 		if (lockp != NULL) {
12125880Snordmark 			/* First drop zone_lock to preserve order */
12135880Snordmark 			mutex_exit(&zone->zone_lock);
12145880Snordmark 			mutex_enter(lockp);
12155880Snordmark 			mutex_enter(&zone->zone_lock);
12165880Snordmark 		}
12175880Snordmark 	}
12185880Snordmark 	return (dropped);
12195880Snordmark }
12205880Snordmark 
12215880Snordmark /*
12225880Snordmark  * Wait for any INPROGRESS flag to be cleared.
12235880Snordmark  * Returns true if lockp was temporarily dropped while waiting.
12245880Snordmark  */
12255880Snordmark static boolean_t
12265880Snordmark zsd_wait_for_inprogress(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
12275880Snordmark {
12285880Snordmark 	boolean_t dropped = B_FALSE;
12295880Snordmark 
12305880Snordmark 	while (t->zsd_flags & ZSD_ALL_INPROGRESS) {
12315880Snordmark 		DTRACE_PROBE2(zsd__wait__for__inprogress,
12325880Snordmark 		    zone_t *, zone, struct zsd_entry *, t);
12335880Snordmark 		if (lockp != NULL) {
12345880Snordmark 			dropped = B_TRUE;
12355880Snordmark 			mutex_exit(lockp);
12365880Snordmark 		}
12375880Snordmark 		cv_wait(&t->zsd_cv, &zone->zone_lock);
12385880Snordmark 		if (lockp != NULL) {
12395880Snordmark 			/* First drop zone_lock to preserve order */
12405880Snordmark 			mutex_exit(&zone->zone_lock);
12415880Snordmark 			mutex_enter(lockp);
12425880Snordmark 			mutex_enter(&zone->zone_lock);
12435880Snordmark 		}
12445880Snordmark 	}
12455880Snordmark 	return (dropped);
12460Sstevel@tonic-gate }
12470Sstevel@tonic-gate 
12480Sstevel@tonic-gate /*
1249789Sahrens  * Frees memory associated with the zone dataset list.
1250789Sahrens  */
1251789Sahrens static void
1252789Sahrens zone_free_datasets(zone_t *zone)
1253789Sahrens {
1254789Sahrens 	zone_dataset_t *t, *next;
1255789Sahrens 
1256789Sahrens 	for (t = list_head(&zone->zone_datasets); t != NULL; t = next) {
1257789Sahrens 		next = list_next(&zone->zone_datasets, t);
1258789Sahrens 		list_remove(&zone->zone_datasets, t);
1259789Sahrens 		kmem_free(t->zd_dataset, strlen(t->zd_dataset) + 1);
1260789Sahrens 		kmem_free(t, sizeof (*t));
1261789Sahrens 	}
1262789Sahrens 	list_destroy(&zone->zone_datasets);
1263789Sahrens }
1264789Sahrens 
1265789Sahrens /*
12660Sstevel@tonic-gate  * zone.cpu-shares resource control support.
12670Sstevel@tonic-gate  */
12680Sstevel@tonic-gate /*ARGSUSED*/
12690Sstevel@tonic-gate static rctl_qty_t
12700Sstevel@tonic-gate zone_cpu_shares_usage(rctl_t *rctl, struct proc *p)
12710Sstevel@tonic-gate {
12720Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
12730Sstevel@tonic-gate 	return (p->p_zone->zone_shares);
12740Sstevel@tonic-gate }
12750Sstevel@tonic-gate 
12760Sstevel@tonic-gate /*ARGSUSED*/
12770Sstevel@tonic-gate static int
12780Sstevel@tonic-gate zone_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
12790Sstevel@tonic-gate     rctl_qty_t nv)
12800Sstevel@tonic-gate {
12810Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
12820Sstevel@tonic-gate 	ASSERT(e->rcep_t == RCENTITY_ZONE);
12830Sstevel@tonic-gate 	if (e->rcep_p.zone == NULL)
12840Sstevel@tonic-gate 		return (0);
12850Sstevel@tonic-gate 
12860Sstevel@tonic-gate 	e->rcep_p.zone->zone_shares = nv;
12870Sstevel@tonic-gate 	return (0);
12880Sstevel@tonic-gate }
12890Sstevel@tonic-gate 
12900Sstevel@tonic-gate static rctl_ops_t zone_cpu_shares_ops = {
12910Sstevel@tonic-gate 	rcop_no_action,
12920Sstevel@tonic-gate 	zone_cpu_shares_usage,
12930Sstevel@tonic-gate 	zone_cpu_shares_set,
12940Sstevel@tonic-gate 	rcop_no_test
12950Sstevel@tonic-gate };
12960Sstevel@tonic-gate 
12973792Sakolb /*
12983792Sakolb  * zone.cpu-cap resource control support.
12993792Sakolb  */
13003792Sakolb /*ARGSUSED*/
13013792Sakolb static rctl_qty_t
13023792Sakolb zone_cpu_cap_get(rctl_t *rctl, struct proc *p)
13033792Sakolb {
13043792Sakolb 	ASSERT(MUTEX_HELD(&p->p_lock));
13053792Sakolb 	return (cpucaps_zone_get(p->p_zone));
13063792Sakolb }
13073792Sakolb 
13083792Sakolb /*ARGSUSED*/
13093792Sakolb static int
13103792Sakolb zone_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
13113792Sakolb     rctl_qty_t nv)
13123792Sakolb {
13133792Sakolb 	zone_t *zone = e->rcep_p.zone;
13143792Sakolb 
13153792Sakolb 	ASSERT(MUTEX_HELD(&p->p_lock));
13163792Sakolb 	ASSERT(e->rcep_t == RCENTITY_ZONE);
13173792Sakolb 
13183792Sakolb 	if (zone == NULL)
13193792Sakolb 		return (0);
13203792Sakolb 
13213792Sakolb 	/*
13223792Sakolb 	 * set cap to the new value.
13233792Sakolb 	 */
13243792Sakolb 	return (cpucaps_zone_set(zone, nv));
13253792Sakolb }
13263792Sakolb 
13273792Sakolb static rctl_ops_t zone_cpu_cap_ops = {
13283792Sakolb 	rcop_no_action,
13293792Sakolb 	zone_cpu_cap_get,
13303792Sakolb 	zone_cpu_cap_set,
13313792Sakolb 	rcop_no_test
13323792Sakolb };
13333792Sakolb 
13340Sstevel@tonic-gate /*ARGSUSED*/
13350Sstevel@tonic-gate static rctl_qty_t
13360Sstevel@tonic-gate zone_lwps_usage(rctl_t *r, proc_t *p)
13370Sstevel@tonic-gate {
13380Sstevel@tonic-gate 	rctl_qty_t nlwps;
13390Sstevel@tonic-gate 	zone_t *zone = p->p_zone;
13400Sstevel@tonic-gate 
13410Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 	mutex_enter(&zone->zone_nlwps_lock);
13440Sstevel@tonic-gate 	nlwps = zone->zone_nlwps;
13450Sstevel@tonic-gate 	mutex_exit(&zone->zone_nlwps_lock);
13460Sstevel@tonic-gate 
13470Sstevel@tonic-gate 	return (nlwps);
13480Sstevel@tonic-gate }
13490Sstevel@tonic-gate 
13500Sstevel@tonic-gate /*ARGSUSED*/
13510Sstevel@tonic-gate static int
13520Sstevel@tonic-gate zone_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
13530Sstevel@tonic-gate     rctl_qty_t incr, uint_t flags)
13540Sstevel@tonic-gate {
13550Sstevel@tonic-gate 	rctl_qty_t nlwps;
13560Sstevel@tonic-gate 
13570Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
13580Sstevel@tonic-gate 	ASSERT(e->rcep_t == RCENTITY_ZONE);
13590Sstevel@tonic-gate 	if (e->rcep_p.zone == NULL)
13600Sstevel@tonic-gate 		return (0);
13610Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
13620Sstevel@tonic-gate 	nlwps = e->rcep_p.zone->zone_nlwps;
13630Sstevel@tonic-gate 
13640Sstevel@tonic-gate 	if (nlwps + incr > rcntl->rcv_value)
13650Sstevel@tonic-gate 		return (1);
13660Sstevel@tonic-gate 
13670Sstevel@tonic-gate 	return (0);
13680Sstevel@tonic-gate }
13690Sstevel@tonic-gate 
13700Sstevel@tonic-gate /*ARGSUSED*/
13710Sstevel@tonic-gate static int
13722768Ssl108498 zone_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
13732768Ssl108498 {
13740Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
13750Sstevel@tonic-gate 	ASSERT(e->rcep_t == RCENTITY_ZONE);
13760Sstevel@tonic-gate 	if (e->rcep_p.zone == NULL)
13770Sstevel@tonic-gate 		return (0);
13780Sstevel@tonic-gate 	e->rcep_p.zone->zone_nlwps_ctl = nv;
13790Sstevel@tonic-gate 	return (0);
13800Sstevel@tonic-gate }
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate static rctl_ops_t zone_lwps_ops = {
13830Sstevel@tonic-gate 	rcop_no_action,
13840Sstevel@tonic-gate 	zone_lwps_usage,
13850Sstevel@tonic-gate 	zone_lwps_set,
13860Sstevel@tonic-gate 	zone_lwps_test,
13870Sstevel@tonic-gate };
13880Sstevel@tonic-gate 
13892677Sml93401 /*ARGSUSED*/
13902677Sml93401 static int
13912677Sml93401 zone_shmmax_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
13922677Sml93401     rctl_qty_t incr, uint_t flags)
13932677Sml93401 {
13942677Sml93401 	rctl_qty_t v;
13952677Sml93401 	ASSERT(MUTEX_HELD(&p->p_lock));
13962677Sml93401 	ASSERT(e->rcep_t == RCENTITY_ZONE);
13972677Sml93401 	v = e->rcep_p.zone->zone_shmmax + incr;
13982677Sml93401 	if (v > rval->rcv_value)
13992677Sml93401 		return (1);
14002677Sml93401 	return (0);
14012677Sml93401 }
14022677Sml93401 
14032677Sml93401 static rctl_ops_t zone_shmmax_ops = {
14042677Sml93401 	rcop_no_action,
14052677Sml93401 	rcop_no_usage,
14062677Sml93401 	rcop_no_set,
14072677Sml93401 	zone_shmmax_test
14082677Sml93401 };
14092677Sml93401 
14102677Sml93401 /*ARGSUSED*/
14112677Sml93401 static int
14122677Sml93401 zone_shmmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
14132677Sml93401     rctl_qty_t incr, uint_t flags)
14142677Sml93401 {
14152677Sml93401 	rctl_qty_t v;
14162677Sml93401 	ASSERT(MUTEX_HELD(&p->p_lock));
14172677Sml93401 	ASSERT(e->rcep_t == RCENTITY_ZONE);
14182677Sml93401 	v = e->rcep_p.zone->zone_ipc.ipcq_shmmni + incr;
14192677Sml93401 	if (v > rval->rcv_value)
14202677Sml93401 		return (1);
14212677Sml93401 	return (0);
14222677Sml93401 }
14232677Sml93401 
14242677Sml93401 static rctl_ops_t zone_shmmni_ops = {
14252677Sml93401 	rcop_no_action,
14262677Sml93401 	rcop_no_usage,
14272677Sml93401 	rcop_no_set,
14282677Sml93401 	zone_shmmni_test
14292677Sml93401 };
14302677Sml93401 
14312677Sml93401 /*ARGSUSED*/
14322677Sml93401 static int
14332677Sml93401 zone_semmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
14342677Sml93401     rctl_qty_t incr, uint_t flags)
14352677Sml93401 {
14362677Sml93401 	rctl_qty_t v;
14372677Sml93401 	ASSERT(MUTEX_HELD(&p->p_lock));
14382677Sml93401 	ASSERT(e->rcep_t == RCENTITY_ZONE);
14392677Sml93401 	v = e->rcep_p.zone->zone_ipc.ipcq_semmni + incr;
14402677Sml93401 	if (v > rval->rcv_value)
14412677Sml93401 		return (1);
14422677Sml93401 	return (0);
14432677Sml93401 }
14442677Sml93401 
14452677Sml93401 static rctl_ops_t zone_semmni_ops = {
14462677Sml93401 	rcop_no_action,
14472677Sml93401 	rcop_no_usage,
14482677Sml93401 	rcop_no_set,
14492677Sml93401 	zone_semmni_test
14502677Sml93401 };
14512677Sml93401 
14522677Sml93401 /*ARGSUSED*/
14532677Sml93401 static int
14542677Sml93401 zone_msgmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
14552677Sml93401     rctl_qty_t incr, uint_t flags)
14562677Sml93401 {
14572677Sml93401 	rctl_qty_t v;
14582677Sml93401 	ASSERT(MUTEX_HELD(&p->p_lock));
14592677Sml93401 	ASSERT(e->rcep_t == RCENTITY_ZONE);
14602677Sml93401 	v = e->rcep_p.zone->zone_ipc.ipcq_msgmni + incr;
14612677Sml93401 	if (v > rval->rcv_value)
14622677Sml93401 		return (1);
14632677Sml93401 	return (0);
14642677Sml93401 }
14652677Sml93401 
14662677Sml93401 static rctl_ops_t zone_msgmni_ops = {
14672677Sml93401 	rcop_no_action,
14682677Sml93401 	rcop_no_usage,
14692677Sml93401 	rcop_no_set,
14702677Sml93401 	zone_msgmni_test
14712677Sml93401 };
14722677Sml93401 
14732768Ssl108498 /*ARGSUSED*/
14742768Ssl108498 static rctl_qty_t
14752768Ssl108498 zone_locked_mem_usage(rctl_t *rctl, struct proc *p)
14762768Ssl108498 {
14772768Ssl108498 	rctl_qty_t q;
14782768Ssl108498 	ASSERT(MUTEX_HELD(&p->p_lock));
14793247Sgjelinek 	mutex_enter(&p->p_zone->zone_mem_lock);
14802768Ssl108498 	q = p->p_zone->zone_locked_mem;
14813247Sgjelinek 	mutex_exit(&p->p_zone->zone_mem_lock);
14822768Ssl108498 	return (q);
14832768Ssl108498 }
14842768Ssl108498 
14852768Ssl108498 /*ARGSUSED*/
14862768Ssl108498 static int
14872768Ssl108498 zone_locked_mem_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
14882768Ssl108498     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
14892768Ssl108498 {
14902768Ssl108498 	rctl_qty_t q;
14913247Sgjelinek 	zone_t *z;
14923247Sgjelinek 
14933247Sgjelinek 	z = e->rcep_p.zone;
14942768Ssl108498 	ASSERT(MUTEX_HELD(&p->p_lock));
14953247Sgjelinek 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
14963247Sgjelinek 	q = z->zone_locked_mem;
14972768Ssl108498 	if (q + incr > rcntl->rcv_value)
14982768Ssl108498 		return (1);
14992768Ssl108498 	return (0);
15002768Ssl108498 }
15012768Ssl108498 
15022768Ssl108498 /*ARGSUSED*/
15032768Ssl108498 static int
15042768Ssl108498 zone_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
15052768Ssl108498     rctl_qty_t nv)
15062768Ssl108498 {
15072768Ssl108498 	ASSERT(MUTEX_HELD(&p->p_lock));
15082768Ssl108498 	ASSERT(e->rcep_t == RCENTITY_ZONE);
15092768Ssl108498 	if (e->rcep_p.zone == NULL)
15102768Ssl108498 		return (0);
15112768Ssl108498 	e->rcep_p.zone->zone_locked_mem_ctl = nv;
15122768Ssl108498 	return (0);
15132768Ssl108498 }
15142768Ssl108498 
15152768Ssl108498 static rctl_ops_t zone_locked_mem_ops = {
15162768Ssl108498 	rcop_no_action,
15172768Ssl108498 	zone_locked_mem_usage,
15182768Ssl108498 	zone_locked_mem_set,
15192768Ssl108498 	zone_locked_mem_test
15202768Ssl108498 };
15212677Sml93401 
15223247Sgjelinek /*ARGSUSED*/
15233247Sgjelinek static rctl_qty_t
15243247Sgjelinek zone_max_swap_usage(rctl_t *rctl, struct proc *p)
15253247Sgjelinek {
15263247Sgjelinek 	rctl_qty_t q;
15273247Sgjelinek 	zone_t *z = p->p_zone;
15283247Sgjelinek 
15293247Sgjelinek 	ASSERT(MUTEX_HELD(&p->p_lock));
15303247Sgjelinek 	mutex_enter(&z->zone_mem_lock);
15313247Sgjelinek 	q = z->zone_max_swap;
15323247Sgjelinek 	mutex_exit(&z->zone_mem_lock);
15333247Sgjelinek 	return (q);
15343247Sgjelinek }
15353247Sgjelinek 
15363247Sgjelinek /*ARGSUSED*/
15373247Sgjelinek static int
15383247Sgjelinek zone_max_swap_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
15393247Sgjelinek     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
15403247Sgjelinek {
15413247Sgjelinek 	rctl_qty_t q;
15423247Sgjelinek 	zone_t *z;
15433247Sgjelinek 
15443247Sgjelinek 	z = e->rcep_p.zone;
15453247Sgjelinek 	ASSERT(MUTEX_HELD(&p->p_lock));
15463247Sgjelinek 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
15473247Sgjelinek 	q = z->zone_max_swap;
15483247Sgjelinek 	if (q + incr > rcntl->rcv_value)
15493247Sgjelinek 		return (1);
15503247Sgjelinek 	return (0);
15513247Sgjelinek }
15523247Sgjelinek 
15533247Sgjelinek /*ARGSUSED*/
15543247Sgjelinek static int
15553247Sgjelinek zone_max_swap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
15563247Sgjelinek     rctl_qty_t nv)
15573247Sgjelinek {
15583247Sgjelinek 	ASSERT(MUTEX_HELD(&p->p_lock));
15593247Sgjelinek 	ASSERT(e->rcep_t == RCENTITY_ZONE);
15603247Sgjelinek 	if (e->rcep_p.zone == NULL)
15613247Sgjelinek 		return (0);
15623247Sgjelinek 	e->rcep_p.zone->zone_max_swap_ctl = nv;
15633247Sgjelinek 	return (0);
15643247Sgjelinek }
15653247Sgjelinek 
15663247Sgjelinek static rctl_ops_t zone_max_swap_ops = {
15673247Sgjelinek 	rcop_no_action,
15683247Sgjelinek 	zone_max_swap_usage,
15693247Sgjelinek 	zone_max_swap_set,
15703247Sgjelinek 	zone_max_swap_test
15713247Sgjelinek };
15723247Sgjelinek 
15730Sstevel@tonic-gate /*
15740Sstevel@tonic-gate  * Helper function to brand the zone with a unique ID.
15750Sstevel@tonic-gate  */
15760Sstevel@tonic-gate static void
15770Sstevel@tonic-gate zone_uniqid(zone_t *zone)
15780Sstevel@tonic-gate {
15790Sstevel@tonic-gate 	static uint64_t uniqid = 0;
15800Sstevel@tonic-gate 
15810Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zonehash_lock));
15820Sstevel@tonic-gate 	zone->zone_uniqid = uniqid++;
15830Sstevel@tonic-gate }
15840Sstevel@tonic-gate 
15850Sstevel@tonic-gate /*
15860Sstevel@tonic-gate  * Returns a held pointer to the "kcred" for the specified zone.
15870Sstevel@tonic-gate  */
15880Sstevel@tonic-gate struct cred *
15890Sstevel@tonic-gate zone_get_kcred(zoneid_t zoneid)
15900Sstevel@tonic-gate {
15910Sstevel@tonic-gate 	zone_t *zone;
15920Sstevel@tonic-gate 	cred_t *cr;
15930Sstevel@tonic-gate 
15940Sstevel@tonic-gate 	if ((zone = zone_find_by_id(zoneid)) == NULL)
15950Sstevel@tonic-gate 		return (NULL);
15960Sstevel@tonic-gate 	cr = zone->zone_kcred;
15970Sstevel@tonic-gate 	crhold(cr);
15980Sstevel@tonic-gate 	zone_rele(zone);
15990Sstevel@tonic-gate 	return (cr);
16000Sstevel@tonic-gate }
16010Sstevel@tonic-gate 
16023247Sgjelinek static int
16033247Sgjelinek zone_lockedmem_kstat_update(kstat_t *ksp, int rw)
16043247Sgjelinek {
16053247Sgjelinek 	zone_t *zone = ksp->ks_private;
16063247Sgjelinek 	zone_kstat_t *zk = ksp->ks_data;
16073247Sgjelinek 
16083247Sgjelinek 	if (rw == KSTAT_WRITE)
16093247Sgjelinek 		return (EACCES);
16103247Sgjelinek 
16113247Sgjelinek 	zk->zk_usage.value.ui64 = zone->zone_locked_mem;
16123247Sgjelinek 	zk->zk_value.value.ui64 = zone->zone_locked_mem_ctl;
16133247Sgjelinek 	return (0);
16143247Sgjelinek }
16153247Sgjelinek 
16163247Sgjelinek static int
16173247Sgjelinek zone_swapresv_kstat_update(kstat_t *ksp, int rw)
16183247Sgjelinek {
16193247Sgjelinek 	zone_t *zone = ksp->ks_private;
16203247Sgjelinek 	zone_kstat_t *zk = ksp->ks_data;
16213247Sgjelinek 
16223247Sgjelinek 	if (rw == KSTAT_WRITE)
16233247Sgjelinek 		return (EACCES);
16243247Sgjelinek 
16253247Sgjelinek 	zk->zk_usage.value.ui64 = zone->zone_max_swap;
16263247Sgjelinek 	zk->zk_value.value.ui64 = zone->zone_max_swap_ctl;
16273247Sgjelinek 	return (0);
16283247Sgjelinek }
16293247Sgjelinek 
16303247Sgjelinek static void
16313247Sgjelinek zone_kstat_create(zone_t *zone)
16323247Sgjelinek {
16333247Sgjelinek 	kstat_t *ksp;
16343247Sgjelinek 	zone_kstat_t *zk;
16353247Sgjelinek 
16363247Sgjelinek 	ksp = rctl_kstat_create_zone(zone, "lockedmem", KSTAT_TYPE_NAMED,
16373247Sgjelinek 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
16383247Sgjelinek 	    KSTAT_FLAG_VIRTUAL);
16393247Sgjelinek 
16403247Sgjelinek 	if (ksp == NULL)
16413247Sgjelinek 		return;
16423247Sgjelinek 
16433247Sgjelinek 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
16443247Sgjelinek 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
16453247Sgjelinek 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
16463247Sgjelinek 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
16473247Sgjelinek 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
16483247Sgjelinek 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
16493247Sgjelinek 	ksp->ks_update = zone_lockedmem_kstat_update;
16503247Sgjelinek 	ksp->ks_private = zone;
16513247Sgjelinek 	kstat_install(ksp);
16523247Sgjelinek 
16533247Sgjelinek 	zone->zone_lockedmem_kstat = ksp;
16543247Sgjelinek 
16553247Sgjelinek 	ksp = rctl_kstat_create_zone(zone, "swapresv", KSTAT_TYPE_NAMED,
16563247Sgjelinek 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
16573247Sgjelinek 	    KSTAT_FLAG_VIRTUAL);
16583247Sgjelinek 
16593247Sgjelinek 	if (ksp == NULL)
16603247Sgjelinek 		return;
16613247Sgjelinek 
16623247Sgjelinek 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
16633247Sgjelinek 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
16643247Sgjelinek 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
16653247Sgjelinek 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
16663247Sgjelinek 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
16673247Sgjelinek 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
16683247Sgjelinek 	ksp->ks_update = zone_swapresv_kstat_update;
16693247Sgjelinek 	ksp->ks_private = zone;
16703247Sgjelinek 	kstat_install(ksp);
16713247Sgjelinek 
16723247Sgjelinek 	zone->zone_swapresv_kstat = ksp;
16733247Sgjelinek }
16743247Sgjelinek 
16753247Sgjelinek static void
16763247Sgjelinek zone_kstat_delete(zone_t *zone)
16773247Sgjelinek {
16783247Sgjelinek 	void *data;
16793247Sgjelinek 
16803247Sgjelinek 	if (zone->zone_lockedmem_kstat != NULL) {
16813247Sgjelinek 		data = zone->zone_lockedmem_kstat->ks_data;
16823247Sgjelinek 		kstat_delete(zone->zone_lockedmem_kstat);
16833247Sgjelinek 		kmem_free(data, sizeof (zone_kstat_t));
16843247Sgjelinek 	}
16853247Sgjelinek 	if (zone->zone_swapresv_kstat != NULL) {
16863247Sgjelinek 		data = zone->zone_swapresv_kstat->ks_data;
16873247Sgjelinek 		kstat_delete(zone->zone_swapresv_kstat);
16883247Sgjelinek 		kmem_free(data, sizeof (zone_kstat_t));
16893247Sgjelinek 	}
16903247Sgjelinek }
16913247Sgjelinek 
16920Sstevel@tonic-gate /*
16930Sstevel@tonic-gate  * Called very early on in boot to initialize the ZSD list so that
16940Sstevel@tonic-gate  * zone_key_create() can be called before zone_init().  It also initializes
16950Sstevel@tonic-gate  * portions of zone0 which may be used before zone_init() is called.  The
16960Sstevel@tonic-gate  * variable "global_zone" will be set when zone0 is fully initialized by
16970Sstevel@tonic-gate  * zone_init().
16980Sstevel@tonic-gate  */
16990Sstevel@tonic-gate void
17000Sstevel@tonic-gate zone_zsd_init(void)
17010Sstevel@tonic-gate {
17020Sstevel@tonic-gate 	mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL);
17030Sstevel@tonic-gate 	mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL);
17040Sstevel@tonic-gate 	list_create(&zsd_registered_keys, sizeof (struct zsd_entry),
17050Sstevel@tonic-gate 	    offsetof(struct zsd_entry, zsd_linkage));
17060Sstevel@tonic-gate 	list_create(&zone_active, sizeof (zone_t),
17070Sstevel@tonic-gate 	    offsetof(zone_t, zone_linkage));
17080Sstevel@tonic-gate 	list_create(&zone_deathrow, sizeof (zone_t),
17090Sstevel@tonic-gate 	    offsetof(zone_t, zone_linkage));
17100Sstevel@tonic-gate 
17110Sstevel@tonic-gate 	mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL);
17120Sstevel@tonic-gate 	mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
17133247Sgjelinek 	mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
17140Sstevel@tonic-gate 	zone0.zone_shares = 1;
17153247Sgjelinek 	zone0.zone_nlwps = 0;
17160Sstevel@tonic-gate 	zone0.zone_nlwps_ctl = INT_MAX;
17173247Sgjelinek 	zone0.zone_locked_mem = 0;
17183247Sgjelinek 	zone0.zone_locked_mem_ctl = UINT64_MAX;
17193247Sgjelinek 	ASSERT(zone0.zone_max_swap == 0);
17203247Sgjelinek 	zone0.zone_max_swap_ctl = UINT64_MAX;
17212677Sml93401 	zone0.zone_shmmax = 0;
17222677Sml93401 	zone0.zone_ipc.ipcq_shmmni = 0;
17232677Sml93401 	zone0.zone_ipc.ipcq_semmni = 0;
17242677Sml93401 	zone0.zone_ipc.ipcq_msgmni = 0;
17250Sstevel@tonic-gate 	zone0.zone_name = GLOBAL_ZONENAME;
17260Sstevel@tonic-gate 	zone0.zone_nodename = utsname.nodename;
17270Sstevel@tonic-gate 	zone0.zone_domain = srpc_domain;
17288662SJordan.Vaughan@Sun.com 	zone0.zone_hostid = HW_INVALID_HOSTID;
17290Sstevel@tonic-gate 	zone0.zone_ref = 1;
17300Sstevel@tonic-gate 	zone0.zone_id = GLOBAL_ZONEID;
17310Sstevel@tonic-gate 	zone0.zone_status = ZONE_IS_RUNNING;
17320Sstevel@tonic-gate 	zone0.zone_rootpath = "/";
17330Sstevel@tonic-gate 	zone0.zone_rootpathlen = 2;
17340Sstevel@tonic-gate 	zone0.zone_psetid = ZONE_PS_INVAL;
17350Sstevel@tonic-gate 	zone0.zone_ncpus = 0;
17360Sstevel@tonic-gate 	zone0.zone_ncpus_online = 0;
17370Sstevel@tonic-gate 	zone0.zone_proc_initpid = 1;
17382267Sdp 	zone0.zone_initname = initname;
17393247Sgjelinek 	zone0.zone_lockedmem_kstat = NULL;
17403247Sgjelinek 	zone0.zone_swapresv_kstat = NULL;
17410Sstevel@tonic-gate 	list_create(&zone0.zone_zsd, sizeof (struct zsd_entry),
17420Sstevel@tonic-gate 	    offsetof(struct zsd_entry, zsd_linkage));
17430Sstevel@tonic-gate 	list_insert_head(&zone_active, &zone0);
17440Sstevel@tonic-gate 
17450Sstevel@tonic-gate 	/*
17460Sstevel@tonic-gate 	 * The root filesystem is not mounted yet, so zone_rootvp cannot be set
17470Sstevel@tonic-gate 	 * to anything meaningful.  It is assigned to be 'rootdir' in
17480Sstevel@tonic-gate 	 * vfs_mountroot().
17490Sstevel@tonic-gate 	 */
17500Sstevel@tonic-gate 	zone0.zone_rootvp = NULL;
17510Sstevel@tonic-gate 	zone0.zone_vfslist = NULL;
17522267Sdp 	zone0.zone_bootargs = initargs;
17530Sstevel@tonic-gate 	zone0.zone_privset = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
17540Sstevel@tonic-gate 	/*
17550Sstevel@tonic-gate 	 * The global zone has all privileges
17560Sstevel@tonic-gate 	 */
17570Sstevel@tonic-gate 	priv_fillset(zone0.zone_privset);
17580Sstevel@tonic-gate 	/*
17590Sstevel@tonic-gate 	 * Add p0 to the global zone
17600Sstevel@tonic-gate 	 */
17610Sstevel@tonic-gate 	zone0.zone_zsched = &p0;
17620Sstevel@tonic-gate 	p0.p_zone = &zone0;
17630Sstevel@tonic-gate }
17640Sstevel@tonic-gate 
17650Sstevel@tonic-gate /*
17661676Sjpk  * Compute a hash value based on the contents of the label and the DOI.  The
17671676Sjpk  * hash algorithm is somewhat arbitrary, but is based on the observation that
17681676Sjpk  * humans will likely pick labels that differ by amounts that work out to be
17691676Sjpk  * multiples of the number of hash chains, and thus stirring in some primes
17701676Sjpk  * should help.
17711676Sjpk  */
17721676Sjpk static uint_t
17731676Sjpk hash_bylabel(void *hdata, mod_hash_key_t key)
17741676Sjpk {
17751676Sjpk 	const ts_label_t *lab = (ts_label_t *)key;
17761676Sjpk 	const uint32_t *up, *ue;
17771676Sjpk 	uint_t hash;
17781676Sjpk 	int i;
17791676Sjpk 
17801676Sjpk 	_NOTE(ARGUNUSED(hdata));
17811676Sjpk 
17821676Sjpk 	hash = lab->tsl_doi + (lab->tsl_doi << 1);
17831676Sjpk 	/* we depend on alignment of label, but not representation */
17841676Sjpk 	up = (const uint32_t *)&lab->tsl_label;
17851676Sjpk 	ue = up + sizeof (lab->tsl_label) / sizeof (*up);
17861676Sjpk 	i = 1;
17871676Sjpk 	while (up < ue) {
17881676Sjpk 		/* using 2^n + 1, 1 <= n <= 16 as source of many primes */
17891676Sjpk 		hash += *up + (*up << ((i % 16) + 1));
17901676Sjpk 		up++;
17911676Sjpk 		i++;
17921676Sjpk 	}
17931676Sjpk 	return (hash);
17941676Sjpk }
17951676Sjpk 
17961676Sjpk /*
17971676Sjpk  * All that mod_hash cares about here is zero (equal) versus non-zero (not
17981676Sjpk  * equal).  This may need to be changed if less than / greater than is ever
17991676Sjpk  * needed.
18001676Sjpk  */
18011676Sjpk static int
18021676Sjpk hash_labelkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
18031676Sjpk {
18041676Sjpk 	ts_label_t *lab1 = (ts_label_t *)key1;
18051676Sjpk 	ts_label_t *lab2 = (ts_label_t *)key2;
18061676Sjpk 
18071676Sjpk 	return (label_equal(lab1, lab2) ? 0 : 1);
18081676Sjpk }
18091676Sjpk 
18101676Sjpk /*
18110Sstevel@tonic-gate  * Called by main() to initialize the zones framework.
18120Sstevel@tonic-gate  */
18130Sstevel@tonic-gate void
18140Sstevel@tonic-gate zone_init(void)
18150Sstevel@tonic-gate {
18160Sstevel@tonic-gate 	rctl_dict_entry_t *rde;
18170Sstevel@tonic-gate 	rctl_val_t *dval;
18180Sstevel@tonic-gate 	rctl_set_t *set;
18190Sstevel@tonic-gate 	rctl_alloc_gp_t *gp;
18200Sstevel@tonic-gate 	rctl_entity_p_t e;
18211166Sdstaff 	int res;
18220Sstevel@tonic-gate 
18230Sstevel@tonic-gate 	ASSERT(curproc == &p0);
18240Sstevel@tonic-gate 
18250Sstevel@tonic-gate 	/*
18260Sstevel@tonic-gate 	 * Create ID space for zone IDs.  ID 0 is reserved for the
18270Sstevel@tonic-gate 	 * global zone.
18280Sstevel@tonic-gate 	 */
18290Sstevel@tonic-gate 	zoneid_space = id_space_create("zoneid_space", 1, MAX_ZONEID);
18300Sstevel@tonic-gate 
18310Sstevel@tonic-gate 	/*
18320Sstevel@tonic-gate 	 * Initialize generic zone resource controls, if any.
18330Sstevel@tonic-gate 	 */
18340Sstevel@tonic-gate 	rc_zone_cpu_shares = rctl_register("zone.cpu-shares",
18350Sstevel@tonic-gate 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_NEVER |
18361996Sml93401 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
18373792Sakolb 	    FSS_MAXSHARES, FSS_MAXSHARES, &zone_cpu_shares_ops);
18383792Sakolb 
18393792Sakolb 	rc_zone_cpu_cap = rctl_register("zone.cpu-cap",
18403792Sakolb 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_ALWAYS |
18413792Sakolb 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |RCTL_GLOBAL_SYSLOG_NEVER |
18423792Sakolb 	    RCTL_GLOBAL_INFINITE,
18433792Sakolb 	    MAXCAP, MAXCAP, &zone_cpu_cap_ops);
18440Sstevel@tonic-gate 
18450Sstevel@tonic-gate 	rc_zone_nlwps = rctl_register("zone.max-lwps", RCENTITY_ZONE,
18460Sstevel@tonic-gate 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
18470Sstevel@tonic-gate 	    INT_MAX, INT_MAX, &zone_lwps_ops);
18480Sstevel@tonic-gate 	/*
18492677Sml93401 	 * System V IPC resource controls
18502677Sml93401 	 */
18512677Sml93401 	rc_zone_msgmni = rctl_register("zone.max-msg-ids",
18522677Sml93401 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
18532677Sml93401 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_msgmni_ops);
18542677Sml93401 
18552677Sml93401 	rc_zone_semmni = rctl_register("zone.max-sem-ids",
18562677Sml93401 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
18572677Sml93401 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_semmni_ops);
18582677Sml93401 
18592677Sml93401 	rc_zone_shmmni = rctl_register("zone.max-shm-ids",
18602677Sml93401 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
18612677Sml93401 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_shmmni_ops);
18622677Sml93401 
18632677Sml93401 	rc_zone_shmmax = rctl_register("zone.max-shm-memory",
18642677Sml93401 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
18652677Sml93401 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &zone_shmmax_ops);
18662677Sml93401 
18672677Sml93401 	/*
18680Sstevel@tonic-gate 	 * Create a rctl_val with PRIVILEGED, NOACTION, value = 1.  Then attach
18690Sstevel@tonic-gate 	 * this at the head of the rctl_dict_entry for ``zone.cpu-shares''.
18700Sstevel@tonic-gate 	 */
18710Sstevel@tonic-gate 	dval = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
18720Sstevel@tonic-gate 	bzero(dval, sizeof (rctl_val_t));
18730Sstevel@tonic-gate 	dval->rcv_value = 1;
18740Sstevel@tonic-gate 	dval->rcv_privilege = RCPRIV_PRIVILEGED;
18750Sstevel@tonic-gate 	dval->rcv_flagaction = RCTL_LOCAL_NOACTION;
18760Sstevel@tonic-gate 	dval->rcv_action_recip_pid = -1;
18770Sstevel@tonic-gate 
18780Sstevel@tonic-gate 	rde = rctl_dict_lookup("zone.cpu-shares");
18790Sstevel@tonic-gate 	(void) rctl_val_list_insert(&rde->rcd_default_value, dval);
18800Sstevel@tonic-gate 
18812768Ssl108498 	rc_zone_locked_mem = rctl_register("zone.max-locked-memory",
18822768Ssl108498 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
18832768Ssl108498 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
18842768Ssl108498 	    &zone_locked_mem_ops);
18853247Sgjelinek 
18863247Sgjelinek 	rc_zone_max_swap = rctl_register("zone.max-swap",
18873247Sgjelinek 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
18883247Sgjelinek 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
18893247Sgjelinek 	    &zone_max_swap_ops);
18903247Sgjelinek 
18910Sstevel@tonic-gate 	/*
18920Sstevel@tonic-gate 	 * Initialize the ``global zone''.
18930Sstevel@tonic-gate 	 */
18940Sstevel@tonic-gate 	set = rctl_set_create();
18950Sstevel@tonic-gate 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
18960Sstevel@tonic-gate 	mutex_enter(&p0.p_lock);
18970Sstevel@tonic-gate 	e.rcep_p.zone = &zone0;
18980Sstevel@tonic-gate 	e.rcep_t = RCENTITY_ZONE;
18990Sstevel@tonic-gate 	zone0.zone_rctls = rctl_set_init(RCENTITY_ZONE, &p0, &e, set,
19000Sstevel@tonic-gate 	    gp);
19010Sstevel@tonic-gate 
19020Sstevel@tonic-gate 	zone0.zone_nlwps = p0.p_lwpcnt;
19030Sstevel@tonic-gate 	zone0.zone_ntasks = 1;
19040Sstevel@tonic-gate 	mutex_exit(&p0.p_lock);
19052712Snn35248 	zone0.zone_restart_init = B_TRUE;
19062712Snn35248 	zone0.zone_brand = &native_brand;
19070Sstevel@tonic-gate 	rctl_prealloc_destroy(gp);
19080Sstevel@tonic-gate 	/*
19093247Sgjelinek 	 * pool_default hasn't been initialized yet, so we let pool_init()
19103247Sgjelinek 	 * take care of making sure the global zone is in the default pool.
19110Sstevel@tonic-gate 	 */
19121676Sjpk 
19131676Sjpk 	/*
19143247Sgjelinek 	 * Initialize global zone kstats
19153247Sgjelinek 	 */
19163247Sgjelinek 	zone_kstat_create(&zone0);
19173247Sgjelinek 
19183247Sgjelinek 	/*
19191676Sjpk 	 * Initialize zone label.
19201676Sjpk 	 * mlp are initialized when tnzonecfg is loaded.
19211676Sjpk 	 */
19221676Sjpk 	zone0.zone_slabel = l_admin_low;
19231676Sjpk 	rw_init(&zone0.zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
19241676Sjpk 	label_hold(l_admin_low);
19251676Sjpk 
19260Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
19270Sstevel@tonic-gate 	zone_uniqid(&zone0);
19280Sstevel@tonic-gate 	ASSERT(zone0.zone_uniqid == GLOBAL_ZONEUNIQID);
19291676Sjpk 
19300Sstevel@tonic-gate 	zonehashbyid = mod_hash_create_idhash("zone_by_id", zone_hash_size,
19310Sstevel@tonic-gate 	    mod_hash_null_valdtor);
19320Sstevel@tonic-gate 	zonehashbyname = mod_hash_create_strhash("zone_by_name",
19330Sstevel@tonic-gate 	    zone_hash_size, mod_hash_null_valdtor);
19341676Sjpk 	/*
19351676Sjpk 	 * maintain zonehashbylabel only for labeled systems
19361676Sjpk 	 */
19371676Sjpk 	if (is_system_labeled())
19381676Sjpk 		zonehashbylabel = mod_hash_create_extended("zone_by_label",
19391676Sjpk 		    zone_hash_size, mod_hash_null_keydtor,
19401676Sjpk 		    mod_hash_null_valdtor, hash_bylabel, NULL,
19411676Sjpk 		    hash_labelkey_cmp, KM_SLEEP);
19420Sstevel@tonic-gate 	zonecount = 1;
19430Sstevel@tonic-gate 
19440Sstevel@tonic-gate 	(void) mod_hash_insert(zonehashbyid, (mod_hash_key_t)GLOBAL_ZONEID,
19450Sstevel@tonic-gate 	    (mod_hash_val_t)&zone0);
19460Sstevel@tonic-gate 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)zone0.zone_name,
19470Sstevel@tonic-gate 	    (mod_hash_val_t)&zone0);
19481769Scarlsonj 	if (is_system_labeled()) {
19491769Scarlsonj 		zone0.zone_flags |= ZF_HASHED_LABEL;
19501676Sjpk 		(void) mod_hash_insert(zonehashbylabel,
19511676Sjpk 		    (mod_hash_key_t)zone0.zone_slabel, (mod_hash_val_t)&zone0);
19521769Scarlsonj 	}
19531676Sjpk 	mutex_exit(&zonehash_lock);
19541676Sjpk 
19550Sstevel@tonic-gate 	/*
19560Sstevel@tonic-gate 	 * We avoid setting zone_kcred until now, since kcred is initialized
19570Sstevel@tonic-gate 	 * sometime after zone_zsd_init() and before zone_init().
19580Sstevel@tonic-gate 	 */
19590Sstevel@tonic-gate 	zone0.zone_kcred = kcred;
19600Sstevel@tonic-gate 	/*
19610Sstevel@tonic-gate 	 * The global zone is fully initialized (except for zone_rootvp which
19620Sstevel@tonic-gate 	 * will be set when the root filesystem is mounted).
19630Sstevel@tonic-gate 	 */
19640Sstevel@tonic-gate 	global_zone = &zone0;
19651166Sdstaff 
19661166Sdstaff 	/*
19671166Sdstaff 	 * Setup an event channel to send zone status change notifications on
19681166Sdstaff 	 */
19691166Sdstaff 	res = sysevent_evc_bind(ZONE_EVENT_CHANNEL, &zone_event_chan,
19701166Sdstaff 	    EVCH_CREAT);
19711166Sdstaff 
19721166Sdstaff 	if (res)
19731166Sdstaff 		panic("Sysevent_evc_bind failed during zone setup.\n");
19743247Sgjelinek 
19750Sstevel@tonic-gate }
19760Sstevel@tonic-gate 
19770Sstevel@tonic-gate static void
19780Sstevel@tonic-gate zone_free(zone_t *zone)
19790Sstevel@tonic-gate {
19800Sstevel@tonic-gate 	ASSERT(zone != global_zone);
19810Sstevel@tonic-gate 	ASSERT(zone->zone_ntasks == 0);
19820Sstevel@tonic-gate 	ASSERT(zone->zone_nlwps == 0);
19830Sstevel@tonic-gate 	ASSERT(zone->zone_cred_ref == 0);
19840Sstevel@tonic-gate 	ASSERT(zone->zone_kcred == NULL);
19850Sstevel@tonic-gate 	ASSERT(zone_status_get(zone) == ZONE_IS_DEAD ||
19860Sstevel@tonic-gate 	    zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
19870Sstevel@tonic-gate 
19883792Sakolb 	/*
19893792Sakolb 	 * Remove any zone caps.
19903792Sakolb 	 */
19913792Sakolb 	cpucaps_zone_remove(zone);
19923792Sakolb 
19933792Sakolb 	ASSERT(zone->zone_cpucap == NULL);
19943792Sakolb 
19950Sstevel@tonic-gate 	/* remove from deathrow list */
19960Sstevel@tonic-gate 	if (zone_status_get(zone) == ZONE_IS_DEAD) {
19970Sstevel@tonic-gate 		ASSERT(zone->zone_ref == 0);
19980Sstevel@tonic-gate 		mutex_enter(&zone_deathrow_lock);
19990Sstevel@tonic-gate 		list_remove(&zone_deathrow, zone);
20000Sstevel@tonic-gate 		mutex_exit(&zone_deathrow_lock);
20010Sstevel@tonic-gate 	}
20020Sstevel@tonic-gate 
20030Sstevel@tonic-gate 	zone_free_zsd(zone);
2004789Sahrens 	zone_free_datasets(zone);
20050Sstevel@tonic-gate 
20060Sstevel@tonic-gate 	if (zone->zone_rootvp != NULL)
20070Sstevel@tonic-gate 		VN_RELE(zone->zone_rootvp);
20080Sstevel@tonic-gate 	if (zone->zone_rootpath)
20090Sstevel@tonic-gate 		kmem_free(zone->zone_rootpath, zone->zone_rootpathlen);
20100Sstevel@tonic-gate 	if (zone->zone_name != NULL)
20110Sstevel@tonic-gate 		kmem_free(zone->zone_name, ZONENAME_MAX);
20121676Sjpk 	if (zone->zone_slabel != NULL)
20131676Sjpk 		label_rele(zone->zone_slabel);
20140Sstevel@tonic-gate 	if (zone->zone_nodename != NULL)
20150Sstevel@tonic-gate 		kmem_free(zone->zone_nodename, _SYS_NMLN);
20160Sstevel@tonic-gate 	if (zone->zone_domain != NULL)
20170Sstevel@tonic-gate 		kmem_free(zone->zone_domain, _SYS_NMLN);
20180Sstevel@tonic-gate 	if (zone->zone_privset != NULL)
20190Sstevel@tonic-gate 		kmem_free(zone->zone_privset, sizeof (priv_set_t));
20200Sstevel@tonic-gate 	if (zone->zone_rctls != NULL)
20210Sstevel@tonic-gate 		rctl_set_free(zone->zone_rctls);
20220Sstevel@tonic-gate 	if (zone->zone_bootargs != NULL)
20232267Sdp 		kmem_free(zone->zone_bootargs, strlen(zone->zone_bootargs) + 1);
20242267Sdp 	if (zone->zone_initname != NULL)
20252267Sdp 		kmem_free(zone->zone_initname, strlen(zone->zone_initname) + 1);
20260Sstevel@tonic-gate 	id_free(zoneid_space, zone->zone_id);
20270Sstevel@tonic-gate 	mutex_destroy(&zone->zone_lock);
20280Sstevel@tonic-gate 	cv_destroy(&zone->zone_cv);
20291676Sjpk 	rw_destroy(&zone->zone_mlps.mlpl_rwlock);
20300Sstevel@tonic-gate 	kmem_free(zone, sizeof (zone_t));
20310Sstevel@tonic-gate }
20320Sstevel@tonic-gate 
20330Sstevel@tonic-gate /*
20340Sstevel@tonic-gate  * See block comment at the top of this file for information about zone
20350Sstevel@tonic-gate  * status values.
20360Sstevel@tonic-gate  */
20370Sstevel@tonic-gate /*
20380Sstevel@tonic-gate  * Convenience function for setting zone status.
20390Sstevel@tonic-gate  */
20400Sstevel@tonic-gate static void
20410Sstevel@tonic-gate zone_status_set(zone_t *zone, zone_status_t status)
20420Sstevel@tonic-gate {
20431166Sdstaff 
20441166Sdstaff 	nvlist_t *nvl = NULL;
20450Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zone_status_lock));
20460Sstevel@tonic-gate 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE &&
20470Sstevel@tonic-gate 	    status >= zone_status_get(zone));
20481166Sdstaff 
20491166Sdstaff 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) ||
20501166Sdstaff 	    nvlist_add_string(nvl, ZONE_CB_NAME, zone->zone_name) ||
20511166Sdstaff 	    nvlist_add_string(nvl, ZONE_CB_NEWSTATE,
20522267Sdp 	    zone_status_table[status]) ||
20531166Sdstaff 	    nvlist_add_string(nvl, ZONE_CB_OLDSTATE,
20542267Sdp 	    zone_status_table[zone->zone_status]) ||
20551166Sdstaff 	    nvlist_add_int32(nvl, ZONE_CB_ZONEID, zone->zone_id) ||
20561166Sdstaff 	    nvlist_add_uint64(nvl, ZONE_CB_TIMESTAMP, (uint64_t)gethrtime()) ||
20571166Sdstaff 	    sysevent_evc_publish(zone_event_chan, ZONE_EVENT_STATUS_CLASS,
20582267Sdp 	    ZONE_EVENT_STATUS_SUBCLASS, "sun.com", "kernel", nvl, EVCH_SLEEP)) {
20591166Sdstaff #ifdef DEBUG
20601166Sdstaff 		(void) printf(
20611166Sdstaff 		    "Failed to allocate and send zone state change event.\n");
20621166Sdstaff #endif
20631166Sdstaff 	}
20641166Sdstaff 	nvlist_free(nvl);
20651166Sdstaff 
20660Sstevel@tonic-gate 	zone->zone_status = status;
20671166Sdstaff 
20680Sstevel@tonic-gate 	cv_broadcast(&zone->zone_cv);
20690Sstevel@tonic-gate }
20700Sstevel@tonic-gate 
20710Sstevel@tonic-gate /*
20720Sstevel@tonic-gate  * Public function to retrieve the zone status.  The zone status may
20730Sstevel@tonic-gate  * change after it is retrieved.
20740Sstevel@tonic-gate  */
20750Sstevel@tonic-gate zone_status_t
20760Sstevel@tonic-gate zone_status_get(zone_t *zone)
20770Sstevel@tonic-gate {
20780Sstevel@tonic-gate 	return (zone->zone_status);
20790Sstevel@tonic-gate }
20800Sstevel@tonic-gate 
20810Sstevel@tonic-gate static int
20820Sstevel@tonic-gate zone_set_bootargs(zone_t *zone, const char *zone_bootargs)
20830Sstevel@tonic-gate {
20842267Sdp 	char *bootargs = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
20852267Sdp 	int err = 0;
20862267Sdp 
20872267Sdp 	ASSERT(zone != global_zone);
20882267Sdp 	if ((err = copyinstr(zone_bootargs, bootargs, BOOTARGS_MAX, NULL)) != 0)
20892267Sdp 		goto done;	/* EFAULT or ENAMETOOLONG */
20902267Sdp 
20912267Sdp 	if (zone->zone_bootargs != NULL)
20922267Sdp 		kmem_free(zone->zone_bootargs, strlen(zone->zone_bootargs) + 1);
20932267Sdp 
20942267Sdp 	zone->zone_bootargs = kmem_alloc(strlen(bootargs) + 1, KM_SLEEP);
20952267Sdp 	(void) strcpy(zone->zone_bootargs, bootargs);
20962267Sdp 
20972267Sdp done:
20982267Sdp 	kmem_free(bootargs, BOOTARGS_MAX);
20992267Sdp 	return (err);
21002267Sdp }
21012267Sdp 
21022267Sdp static int
21034141Sedp zone_set_brand(zone_t *zone, const char *brand)
21044141Sedp {
21054141Sedp 	struct brand_attr *attrp;
21064141Sedp 	brand_t *bp;
21074141Sedp 
21084141Sedp 	attrp = kmem_alloc(sizeof (struct brand_attr), KM_SLEEP);
21094141Sedp 	if (copyin(brand, attrp, sizeof (struct brand_attr)) != 0) {
21104141Sedp 		kmem_free(attrp, sizeof (struct brand_attr));
21114141Sedp 		return (EFAULT);
21124141Sedp 	}
21134141Sedp 
21144141Sedp 	bp = brand_register_zone(attrp);
21154141Sedp 	kmem_free(attrp, sizeof (struct brand_attr));
21164141Sedp 	if (bp == NULL)
21174141Sedp 		return (EINVAL);
21184141Sedp 
21194141Sedp 	/*
21204141Sedp 	 * This is the only place where a zone can change it's brand.
21214141Sedp 	 * We already need to hold zone_status_lock to check the zone
21224141Sedp 	 * status, so we'll just use that lock to serialize zone
21234141Sedp 	 * branding requests as well.
21244141Sedp 	 */
21254141Sedp 	mutex_enter(&zone_status_lock);
21264141Sedp 
21274141Sedp 	/* Re-Branding is not allowed and the zone can't be booted yet */
21284141Sedp 	if ((ZONE_IS_BRANDED(zone)) ||
21294141Sedp 	    (zone_status_get(zone) >= ZONE_IS_BOOTING)) {
21304141Sedp 		mutex_exit(&zone_status_lock);
21314141Sedp 		brand_unregister_zone(bp);
21324141Sedp 		return (EINVAL);
21334141Sedp 	}
21344141Sedp 
21354888Seh208807 	/* set up the brand specific data */
21364141Sedp 	zone->zone_brand = bp;
21374888Seh208807 	ZBROP(zone)->b_init_brand_data(zone);
21384888Seh208807 
21394141Sedp 	mutex_exit(&zone_status_lock);
21404141Sedp 	return (0);
21414141Sedp }
21424141Sedp 
21434141Sedp static int
21442267Sdp zone_set_initname(zone_t *zone, const char *zone_initname)
21452267Sdp {
21462267Sdp 	char initname[INITNAME_SZ];
21470Sstevel@tonic-gate 	size_t len;
21482267Sdp 	int err = 0;
21492267Sdp 
21502267Sdp 	ASSERT(zone != global_zone);
21512267Sdp 	if ((err = copyinstr(zone_initname, initname, INITNAME_SZ, &len)) != 0)
21520Sstevel@tonic-gate 		return (err);	/* EFAULT or ENAMETOOLONG */
21532267Sdp 
21542267Sdp 	if (zone->zone_initname != NULL)
21552267Sdp 		kmem_free(zone->zone_initname, strlen(zone->zone_initname) + 1);
21562267Sdp 
21572267Sdp 	zone->zone_initname = kmem_alloc(strlen(initname) + 1, KM_SLEEP);
21582267Sdp 	(void) strcpy(zone->zone_initname, initname);
21590Sstevel@tonic-gate 	return (0);
21600Sstevel@tonic-gate }
21610Sstevel@tonic-gate 
21623247Sgjelinek static int
21633247Sgjelinek zone_set_phys_mcap(zone_t *zone, const uint64_t *zone_mcap)
21643247Sgjelinek {
21653247Sgjelinek 	uint64_t mcap;
21663247Sgjelinek 	int err = 0;
21673247Sgjelinek 
21683247Sgjelinek 	if ((err = copyin(zone_mcap, &mcap, sizeof (uint64_t))) == 0)
21693247Sgjelinek 		zone->zone_phys_mcap = mcap;
21703247Sgjelinek 
21713247Sgjelinek 	return (err);
21723247Sgjelinek }
21733247Sgjelinek 
21743247Sgjelinek static int
21753247Sgjelinek zone_set_sched_class(zone_t *zone, const char *new_class)
21763247Sgjelinek {
21773247Sgjelinek 	char sched_class[PC_CLNMSZ];
21783247Sgjelinek 	id_t classid;
21793247Sgjelinek 	int err;
21803247Sgjelinek 
21813247Sgjelinek 	ASSERT(zone != global_zone);
21823247Sgjelinek 	if ((err = copyinstr(new_class, sched_class, PC_CLNMSZ, NULL)) != 0)
21833247Sgjelinek 		return (err);	/* EFAULT or ENAMETOOLONG */
21843247Sgjelinek 
21853247Sgjelinek 	if (getcid(sched_class, &classid) != 0 || classid == syscid)
21863247Sgjelinek 		return (set_errno(EINVAL));
21873247Sgjelinek 	zone->zone_defaultcid = classid;
21883247Sgjelinek 	ASSERT(zone->zone_defaultcid > 0 &&
21893247Sgjelinek 	    zone->zone_defaultcid < loaded_classes);
21903247Sgjelinek 
21913247Sgjelinek 	return (0);
21923247Sgjelinek }
21933247Sgjelinek 
21940Sstevel@tonic-gate /*
21950Sstevel@tonic-gate  * Block indefinitely waiting for (zone_status >= status)
21960Sstevel@tonic-gate  */
21970Sstevel@tonic-gate void
21980Sstevel@tonic-gate zone_status_wait(zone_t *zone, zone_status_t status)
21990Sstevel@tonic-gate {
22000Sstevel@tonic-gate 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
22010Sstevel@tonic-gate 
22020Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
22030Sstevel@tonic-gate 	while (zone->zone_status < status) {
22040Sstevel@tonic-gate 		cv_wait(&zone->zone_cv, &zone_status_lock);
22050Sstevel@tonic-gate 	}
22060Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
22070Sstevel@tonic-gate }
22080Sstevel@tonic-gate 
22090Sstevel@tonic-gate /*
22100Sstevel@tonic-gate  * Private CPR-safe version of zone_status_wait().
22110Sstevel@tonic-gate  */
22120Sstevel@tonic-gate static void
22130Sstevel@tonic-gate zone_status_wait_cpr(zone_t *zone, zone_status_t status, char *str)
22140Sstevel@tonic-gate {
22150Sstevel@tonic-gate 	callb_cpr_t cprinfo;
22160Sstevel@tonic-gate 
22170Sstevel@tonic-gate 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
22180Sstevel@tonic-gate 
22190Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &zone_status_lock, callb_generic_cpr,
22200Sstevel@tonic-gate 	    str);
22210Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
22220Sstevel@tonic-gate 	while (zone->zone_status < status) {
22230Sstevel@tonic-gate 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
22240Sstevel@tonic-gate 		cv_wait(&zone->zone_cv, &zone_status_lock);
22250Sstevel@tonic-gate 		CALLB_CPR_SAFE_END(&cprinfo, &zone_status_lock);
22260Sstevel@tonic-gate 	}
22270Sstevel@tonic-gate 	/*
22280Sstevel@tonic-gate 	 * zone_status_lock is implicitly released by the following.
22290Sstevel@tonic-gate 	 */
22300Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
22310Sstevel@tonic-gate }
22320Sstevel@tonic-gate 
22330Sstevel@tonic-gate /*
22340Sstevel@tonic-gate  * Block until zone enters requested state or signal is received.  Return (0)
22350Sstevel@tonic-gate  * if signaled, non-zero otherwise.
22360Sstevel@tonic-gate  */
22370Sstevel@tonic-gate int
22380Sstevel@tonic-gate zone_status_wait_sig(zone_t *zone, zone_status_t status)
22390Sstevel@tonic-gate {
22400Sstevel@tonic-gate 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
22410Sstevel@tonic-gate 
22420Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
22430Sstevel@tonic-gate 	while (zone->zone_status < status) {
22440Sstevel@tonic-gate 		if (!cv_wait_sig(&zone->zone_cv, &zone_status_lock)) {
22450Sstevel@tonic-gate 			mutex_exit(&zone_status_lock);
22460Sstevel@tonic-gate 			return (0);
22470Sstevel@tonic-gate 		}
22480Sstevel@tonic-gate 	}
22490Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
22500Sstevel@tonic-gate 	return (1);
22510Sstevel@tonic-gate }
22520Sstevel@tonic-gate 
22530Sstevel@tonic-gate /*
22540Sstevel@tonic-gate  * Block until the zone enters the requested state or the timeout expires,
22550Sstevel@tonic-gate  * whichever happens first.  Return (-1) if operation timed out, time remaining
22560Sstevel@tonic-gate  * otherwise.
22570Sstevel@tonic-gate  */
22580Sstevel@tonic-gate clock_t
22590Sstevel@tonic-gate zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status)
22600Sstevel@tonic-gate {
22610Sstevel@tonic-gate 	clock_t timeleft = 0;
22620Sstevel@tonic-gate 
22630Sstevel@tonic-gate 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
22640Sstevel@tonic-gate 
22650Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
22660Sstevel@tonic-gate 	while (zone->zone_status < status && timeleft != -1) {
22670Sstevel@tonic-gate 		timeleft = cv_timedwait(&zone->zone_cv, &zone_status_lock, tim);
22680Sstevel@tonic-gate 	}
22690Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
22700Sstevel@tonic-gate 	return (timeleft);
22710Sstevel@tonic-gate }
22720Sstevel@tonic-gate 
22730Sstevel@tonic-gate /*
22740Sstevel@tonic-gate  * Block until the zone enters the requested state, the current process is
22750Sstevel@tonic-gate  * signaled,  or the timeout expires, whichever happens first.  Return (-1) if
22760Sstevel@tonic-gate  * operation timed out, 0 if signaled, time remaining otherwise.
22770Sstevel@tonic-gate  */
22780Sstevel@tonic-gate clock_t
22790Sstevel@tonic-gate zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status)
22800Sstevel@tonic-gate {
22810Sstevel@tonic-gate 	clock_t timeleft = tim - lbolt;
22820Sstevel@tonic-gate 
22830Sstevel@tonic-gate 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
22840Sstevel@tonic-gate 
22850Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
22860Sstevel@tonic-gate 	while (zone->zone_status < status) {
22870Sstevel@tonic-gate 		timeleft = cv_timedwait_sig(&zone->zone_cv, &zone_status_lock,
22880Sstevel@tonic-gate 		    tim);
22890Sstevel@tonic-gate 		if (timeleft <= 0)
22900Sstevel@tonic-gate 			break;
22910Sstevel@tonic-gate 	}
22920Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
22930Sstevel@tonic-gate 	return (timeleft);
22940Sstevel@tonic-gate }
22950Sstevel@tonic-gate 
22960Sstevel@tonic-gate /*
22970Sstevel@tonic-gate  * Zones have two reference counts: one for references from credential
22980Sstevel@tonic-gate  * structures (zone_cred_ref), and one (zone_ref) for everything else.
22990Sstevel@tonic-gate  * This is so we can allow a zone to be rebooted while there are still
23000Sstevel@tonic-gate  * outstanding cred references, since certain drivers cache dblks (which
23010Sstevel@tonic-gate  * implicitly results in cached creds).  We wait for zone_ref to drop to
23020Sstevel@tonic-gate  * 0 (actually 1), but not zone_cred_ref.  The zone structure itself is
23030Sstevel@tonic-gate  * later freed when the zone_cred_ref drops to 0, though nothing other
23040Sstevel@tonic-gate  * than the zone id and privilege set should be accessed once the zone
23050Sstevel@tonic-gate  * is "dead".
23060Sstevel@tonic-gate  *
23070Sstevel@tonic-gate  * A debugging flag, zone_wait_for_cred, can be set to a non-zero value
23080Sstevel@tonic-gate  * to force halt/reboot to block waiting for the zone_cred_ref to drop
23090Sstevel@tonic-gate  * to 0.  This can be useful to flush out other sources of cached creds
23100Sstevel@tonic-gate  * that may be less innocuous than the driver case.
23110Sstevel@tonic-gate  */
23120Sstevel@tonic-gate 
23130Sstevel@tonic-gate int zone_wait_for_cred = 0;
23140Sstevel@tonic-gate 
23150Sstevel@tonic-gate static void
23160Sstevel@tonic-gate zone_hold_locked(zone_t *z)
23170Sstevel@tonic-gate {
23180Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&z->zone_lock));
23190Sstevel@tonic-gate 	z->zone_ref++;
23200Sstevel@tonic-gate 	ASSERT(z->zone_ref != 0);
23210Sstevel@tonic-gate }
23220Sstevel@tonic-gate 
23230Sstevel@tonic-gate void
23240Sstevel@tonic-gate zone_hold(zone_t *z)
23250Sstevel@tonic-gate {
23260Sstevel@tonic-gate 	mutex_enter(&z->zone_lock);
23270Sstevel@tonic-gate 	zone_hold_locked(z);
23280Sstevel@tonic-gate 	mutex_exit(&z->zone_lock);
23290Sstevel@tonic-gate }
23300Sstevel@tonic-gate 
23310Sstevel@tonic-gate /*
23320Sstevel@tonic-gate  * If the non-cred ref count drops to 1 and either the cred ref count
23330Sstevel@tonic-gate  * is 0 or we aren't waiting for cred references, the zone is ready to
23340Sstevel@tonic-gate  * be destroyed.
23350Sstevel@tonic-gate  */
23360Sstevel@tonic-gate #define	ZONE_IS_UNREF(zone)	((zone)->zone_ref == 1 && \
23370Sstevel@tonic-gate 	    (!zone_wait_for_cred || (zone)->zone_cred_ref == 0))
23380Sstevel@tonic-gate 
23390Sstevel@tonic-gate void
23400Sstevel@tonic-gate zone_rele(zone_t *z)
23410Sstevel@tonic-gate {
23420Sstevel@tonic-gate 	boolean_t wakeup;
23430Sstevel@tonic-gate 
23440Sstevel@tonic-gate 	mutex_enter(&z->zone_lock);
23450Sstevel@tonic-gate 	ASSERT(z->zone_ref != 0);
23460Sstevel@tonic-gate 	z->zone_ref--;
23470Sstevel@tonic-gate 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
23480Sstevel@tonic-gate 		/* no more refs, free the structure */
23490Sstevel@tonic-gate 		mutex_exit(&z->zone_lock);
23500Sstevel@tonic-gate 		zone_free(z);
23510Sstevel@tonic-gate 		return;
23520Sstevel@tonic-gate 	}
23530Sstevel@tonic-gate 	/* signal zone_destroy so the zone can finish halting */
23540Sstevel@tonic-gate 	wakeup = (ZONE_IS_UNREF(z) && zone_status_get(z) >= ZONE_IS_DEAD);
23550Sstevel@tonic-gate 	mutex_exit(&z->zone_lock);
23560Sstevel@tonic-gate 
23570Sstevel@tonic-gate 	if (wakeup) {
23580Sstevel@tonic-gate 		/*
23590Sstevel@tonic-gate 		 * Grabbing zonehash_lock here effectively synchronizes with
23600Sstevel@tonic-gate 		 * zone_destroy() to avoid missed signals.
23610Sstevel@tonic-gate 		 */
23620Sstevel@tonic-gate 		mutex_enter(&zonehash_lock);
23630Sstevel@tonic-gate 		cv_broadcast(&zone_destroy_cv);
23640Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
23650Sstevel@tonic-gate 	}
23660Sstevel@tonic-gate }
23670Sstevel@tonic-gate 
23680Sstevel@tonic-gate void
23690Sstevel@tonic-gate zone_cred_hold(zone_t *z)
23700Sstevel@tonic-gate {
23710Sstevel@tonic-gate 	mutex_enter(&z->zone_lock);
23720Sstevel@tonic-gate 	z->zone_cred_ref++;
23730Sstevel@tonic-gate 	ASSERT(z->zone_cred_ref != 0);
23740Sstevel@tonic-gate 	mutex_exit(&z->zone_lock);
23750Sstevel@tonic-gate }
23760Sstevel@tonic-gate 
23770Sstevel@tonic-gate void
23780Sstevel@tonic-gate zone_cred_rele(zone_t *z)
23790Sstevel@tonic-gate {
23800Sstevel@tonic-gate 	boolean_t wakeup;
23810Sstevel@tonic-gate 
23820Sstevel@tonic-gate 	mutex_enter(&z->zone_lock);
23830Sstevel@tonic-gate 	ASSERT(z->zone_cred_ref != 0);
23840Sstevel@tonic-gate 	z->zone_cred_ref--;
23850Sstevel@tonic-gate 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
23860Sstevel@tonic-gate 		/* no more refs, free the structure */
23870Sstevel@tonic-gate 		mutex_exit(&z->zone_lock);
23880Sstevel@tonic-gate 		zone_free(z);
23890Sstevel@tonic-gate 		return;
23900Sstevel@tonic-gate 	}
23910Sstevel@tonic-gate 	/*
23920Sstevel@tonic-gate 	 * If zone_destroy is waiting for the cred references to drain
23930Sstevel@tonic-gate 	 * out, and they have, signal it.
23940Sstevel@tonic-gate 	 */
23950Sstevel@tonic-gate 	wakeup = (zone_wait_for_cred && ZONE_IS_UNREF(z) &&
23960Sstevel@tonic-gate 	    zone_status_get(z) >= ZONE_IS_DEAD);
23970Sstevel@tonic-gate 	mutex_exit(&z->zone_lock);
23980Sstevel@tonic-gate 
23990Sstevel@tonic-gate 	if (wakeup) {
24000Sstevel@tonic-gate 		/*
24010Sstevel@tonic-gate 		 * Grabbing zonehash_lock here effectively synchronizes with
24020Sstevel@tonic-gate 		 * zone_destroy() to avoid missed signals.
24030Sstevel@tonic-gate 		 */
24040Sstevel@tonic-gate 		mutex_enter(&zonehash_lock);
24050Sstevel@tonic-gate 		cv_broadcast(&zone_destroy_cv);
24060Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
24070Sstevel@tonic-gate 	}
24080Sstevel@tonic-gate }
24090Sstevel@tonic-gate 
24100Sstevel@tonic-gate void
24110Sstevel@tonic-gate zone_task_hold(zone_t *z)
24120Sstevel@tonic-gate {
24130Sstevel@tonic-gate 	mutex_enter(&z->zone_lock);
24140Sstevel@tonic-gate 	z->zone_ntasks++;
24150Sstevel@tonic-gate 	ASSERT(z->zone_ntasks != 0);
24160Sstevel@tonic-gate 	mutex_exit(&z->zone_lock);
24170Sstevel@tonic-gate }
24180Sstevel@tonic-gate 
24190Sstevel@tonic-gate void
24200Sstevel@tonic-gate zone_task_rele(zone_t *zone)
24210Sstevel@tonic-gate {
24220Sstevel@tonic-gate 	uint_t refcnt;
24230Sstevel@tonic-gate 
24240Sstevel@tonic-gate 	mutex_enter(&zone->zone_lock);
24250Sstevel@tonic-gate 	ASSERT(zone->zone_ntasks != 0);
24260Sstevel@tonic-gate 	refcnt = --zone->zone_ntasks;
24270Sstevel@tonic-gate 	if (refcnt > 1)	{	/* Common case */
24280Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
24290Sstevel@tonic-gate 		return;
24300Sstevel@tonic-gate 	}
24310Sstevel@tonic-gate 	zone_hold_locked(zone);	/* so we can use the zone_t later */
24320Sstevel@tonic-gate 	mutex_exit(&zone->zone_lock);
24330Sstevel@tonic-gate 	if (refcnt == 1) {
24340Sstevel@tonic-gate 		/*
24350Sstevel@tonic-gate 		 * See if the zone is shutting down.
24360Sstevel@tonic-gate 		 */
24370Sstevel@tonic-gate 		mutex_enter(&zone_status_lock);
24380Sstevel@tonic-gate 		if (zone_status_get(zone) != ZONE_IS_SHUTTING_DOWN) {
24390Sstevel@tonic-gate 			goto out;
24400Sstevel@tonic-gate 		}
24410Sstevel@tonic-gate 
24420Sstevel@tonic-gate 		/*
24430Sstevel@tonic-gate 		 * Make sure the ntasks didn't change since we
24440Sstevel@tonic-gate 		 * dropped zone_lock.
24450Sstevel@tonic-gate 		 */
24460Sstevel@tonic-gate 		mutex_enter(&zone->zone_lock);
24470Sstevel@tonic-gate 		if (refcnt != zone->zone_ntasks) {
24480Sstevel@tonic-gate 			mutex_exit(&zone->zone_lock);
24490Sstevel@tonic-gate 			goto out;
24500Sstevel@tonic-gate 		}
24510Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
24520Sstevel@tonic-gate 
24530Sstevel@tonic-gate 		/*
24540Sstevel@tonic-gate 		 * No more user processes in the zone.  The zone is empty.
24550Sstevel@tonic-gate 		 */
24560Sstevel@tonic-gate 		zone_status_set(zone, ZONE_IS_EMPTY);
24570Sstevel@tonic-gate 		goto out;
24580Sstevel@tonic-gate 	}
24590Sstevel@tonic-gate 
24600Sstevel@tonic-gate 	ASSERT(refcnt == 0);
24610Sstevel@tonic-gate 	/*
24620Sstevel@tonic-gate 	 * zsched has exited; the zone is dead.
24630Sstevel@tonic-gate 	 */
24640Sstevel@tonic-gate 	zone->zone_zsched = NULL;		/* paranoia */
24650Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
24660Sstevel@tonic-gate 	zone_status_set(zone, ZONE_IS_DEAD);
24670Sstevel@tonic-gate out:
24680Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
24690Sstevel@tonic-gate 	zone_rele(zone);
24700Sstevel@tonic-gate }
24710Sstevel@tonic-gate 
24720Sstevel@tonic-gate zoneid_t
24730Sstevel@tonic-gate getzoneid(void)
24740Sstevel@tonic-gate {
24750Sstevel@tonic-gate 	return (curproc->p_zone->zone_id);
24760Sstevel@tonic-gate }
24770Sstevel@tonic-gate 
24780Sstevel@tonic-gate /*
24790Sstevel@tonic-gate  * Internal versions of zone_find_by_*().  These don't zone_hold() or
24800Sstevel@tonic-gate  * check the validity of a zone's state.
24810Sstevel@tonic-gate  */
24820Sstevel@tonic-gate static zone_t *
24830Sstevel@tonic-gate zone_find_all_by_id(zoneid_t zoneid)
24840Sstevel@tonic-gate {
24850Sstevel@tonic-gate 	mod_hash_val_t hv;
24860Sstevel@tonic-gate 	zone_t *zone = NULL;
24870Sstevel@tonic-gate 
24880Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zonehash_lock));
24890Sstevel@tonic-gate 
24900Sstevel@tonic-gate 	if (mod_hash_find(zonehashbyid,
24910Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
24920Sstevel@tonic-gate 		zone = (zone_t *)hv;
24930Sstevel@tonic-gate 	return (zone);
24940Sstevel@tonic-gate }
24950Sstevel@tonic-gate 
24960Sstevel@tonic-gate static zone_t *
24971676Sjpk zone_find_all_by_label(const ts_label_t *label)
24981676Sjpk {
24991676Sjpk 	mod_hash_val_t hv;
25001676Sjpk 	zone_t *zone = NULL;
25011676Sjpk 
25021676Sjpk 	ASSERT(MUTEX_HELD(&zonehash_lock));
25031676Sjpk 
25041676Sjpk 	/*
25051676Sjpk 	 * zonehashbylabel is not maintained for unlabeled systems
25061676Sjpk 	 */
25071676Sjpk 	if (!is_system_labeled())
25081676Sjpk 		return (NULL);
25091676Sjpk 	if (mod_hash_find(zonehashbylabel, (mod_hash_key_t)label, &hv) == 0)
25101676Sjpk 		zone = (zone_t *)hv;
25111676Sjpk 	return (zone);
25121676Sjpk }
25131676Sjpk 
25141676Sjpk static zone_t *
25150Sstevel@tonic-gate zone_find_all_by_name(char *name)
25160Sstevel@tonic-gate {
25170Sstevel@tonic-gate 	mod_hash_val_t hv;
25180Sstevel@tonic-gate 	zone_t *zone = NULL;
25190Sstevel@tonic-gate 
25200Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zonehash_lock));
25210Sstevel@tonic-gate 
25220Sstevel@tonic-gate 	if (mod_hash_find(zonehashbyname, (mod_hash_key_t)name, &hv) == 0)
25230Sstevel@tonic-gate 		zone = (zone_t *)hv;
25240Sstevel@tonic-gate 	return (zone);
25250Sstevel@tonic-gate }
25260Sstevel@tonic-gate 
25270Sstevel@tonic-gate /*
25280Sstevel@tonic-gate  * Public interface for looking up a zone by zoneid.  Only returns the zone if
25290Sstevel@tonic-gate  * it is fully initialized, and has not yet begun the zone_destroy() sequence.
25300Sstevel@tonic-gate  * Caller must call zone_rele() once it is done with the zone.
25310Sstevel@tonic-gate  *
25320Sstevel@tonic-gate  * The zone may begin the zone_destroy() sequence immediately after this
25330Sstevel@tonic-gate  * function returns, but may be safely used until zone_rele() is called.
25340Sstevel@tonic-gate  */
25350Sstevel@tonic-gate zone_t *
25360Sstevel@tonic-gate zone_find_by_id(zoneid_t zoneid)
25370Sstevel@tonic-gate {
25380Sstevel@tonic-gate 	zone_t *zone;
25390Sstevel@tonic-gate 	zone_status_t status;
25400Sstevel@tonic-gate 
25410Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
25420Sstevel@tonic-gate 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
25430Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
25440Sstevel@tonic-gate 		return (NULL);
25450Sstevel@tonic-gate 	}
25460Sstevel@tonic-gate 	status = zone_status_get(zone);
25470Sstevel@tonic-gate 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
25480Sstevel@tonic-gate 		/*
25490Sstevel@tonic-gate 		 * For all practical purposes the zone doesn't exist.
25500Sstevel@tonic-gate 		 */
25510Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
25520Sstevel@tonic-gate 		return (NULL);
25530Sstevel@tonic-gate 	}
25540Sstevel@tonic-gate 	zone_hold(zone);
25550Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
25560Sstevel@tonic-gate 	return (zone);
25570Sstevel@tonic-gate }
25580Sstevel@tonic-gate 
25590Sstevel@tonic-gate /*
25601676Sjpk  * Similar to zone_find_by_id, but using zone label as the key.
25611676Sjpk  */
25621676Sjpk zone_t *
25631676Sjpk zone_find_by_label(const ts_label_t *label)
25641676Sjpk {
25651676Sjpk 	zone_t *zone;
25662110Srica 	zone_status_t status;
25671676Sjpk 
25681676Sjpk 	mutex_enter(&zonehash_lock);
25691676Sjpk 	if ((zone = zone_find_all_by_label(label)) == NULL) {
25701676Sjpk 		mutex_exit(&zonehash_lock);
25711676Sjpk 		return (NULL);
25721676Sjpk 	}
25732110Srica 
25742110Srica 	status = zone_status_get(zone);
25752110Srica 	if (status > ZONE_IS_DOWN) {
25761676Sjpk 		/*
25771676Sjpk 		 * For all practical purposes the zone doesn't exist.
25781676Sjpk 		 */
25792110Srica 		mutex_exit(&zonehash_lock);
25802110Srica 		return (NULL);
25811676Sjpk 	}
25822110Srica 	zone_hold(zone);
25831676Sjpk 	mutex_exit(&zonehash_lock);
25841676Sjpk 	return (zone);
25851676Sjpk }
25861676Sjpk 
25871676Sjpk /*
25880Sstevel@tonic-gate  * Similar to zone_find_by_id, but using zone name as the key.
25890Sstevel@tonic-gate  */
25900Sstevel@tonic-gate zone_t *
25910Sstevel@tonic-gate zone_find_by_name(char *name)
25920Sstevel@tonic-gate {
25930Sstevel@tonic-gate 	zone_t *zone;
25940Sstevel@tonic-gate 	zone_status_t status;
25950Sstevel@tonic-gate 
25960Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
25970Sstevel@tonic-gate 	if ((zone = zone_find_all_by_name(name)) == NULL) {
25980Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
25990Sstevel@tonic-gate 		return (NULL);
26000Sstevel@tonic-gate 	}
26010Sstevel@tonic-gate 	status = zone_status_get(zone);
26020Sstevel@tonic-gate 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
26030Sstevel@tonic-gate 		/*
26040Sstevel@tonic-gate 		 * For all practical purposes the zone doesn't exist.
26050Sstevel@tonic-gate 		 */
26060Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
26070Sstevel@tonic-gate 		return (NULL);
26080Sstevel@tonic-gate 	}
26090Sstevel@tonic-gate 	zone_hold(zone);
26100Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
26110Sstevel@tonic-gate 	return (zone);
26120Sstevel@tonic-gate }
26130Sstevel@tonic-gate 
26140Sstevel@tonic-gate /*
26150Sstevel@tonic-gate  * Similar to zone_find_by_id(), using the path as a key.  For instance,
26160Sstevel@tonic-gate  * if there is a zone "foo" rooted at /foo/root, and the path argument
26170Sstevel@tonic-gate  * is "/foo/root/proc", it will return the held zone_t corresponding to
26180Sstevel@tonic-gate  * zone "foo".
26190Sstevel@tonic-gate  *
26200Sstevel@tonic-gate  * zone_find_by_path() always returns a non-NULL value, since at the
26210Sstevel@tonic-gate  * very least every path will be contained in the global zone.
26220Sstevel@tonic-gate  *
26230Sstevel@tonic-gate  * As with the other zone_find_by_*() functions, the caller is
26240Sstevel@tonic-gate  * responsible for zone_rele()ing the return value of this function.
26250Sstevel@tonic-gate  */
26260Sstevel@tonic-gate zone_t *
26270Sstevel@tonic-gate zone_find_by_path(const char *path)
26280Sstevel@tonic-gate {
26290Sstevel@tonic-gate 	zone_t *zone;
26300Sstevel@tonic-gate 	zone_t *zret = NULL;
26310Sstevel@tonic-gate 	zone_status_t status;
26320Sstevel@tonic-gate 
26330Sstevel@tonic-gate 	if (path == NULL) {
26340Sstevel@tonic-gate 		/*
26350Sstevel@tonic-gate 		 * Call from rootconf().
26360Sstevel@tonic-gate 		 */
26370Sstevel@tonic-gate 		zone_hold(global_zone);
26380Sstevel@tonic-gate 		return (global_zone);
26390Sstevel@tonic-gate 	}
26400Sstevel@tonic-gate 	ASSERT(*path == '/');
26410Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
26420Sstevel@tonic-gate 	for (zone = list_head(&zone_active); zone != NULL;
26430Sstevel@tonic-gate 	    zone = list_next(&zone_active, zone)) {
26440Sstevel@tonic-gate 		if (ZONE_PATH_VISIBLE(path, zone))
26450Sstevel@tonic-gate 			zret = zone;
26460Sstevel@tonic-gate 	}
26470Sstevel@tonic-gate 	ASSERT(zret != NULL);
26480Sstevel@tonic-gate 	status = zone_status_get(zret);
26490Sstevel@tonic-gate 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
26500Sstevel@tonic-gate 		/*
26510Sstevel@tonic-gate 		 * Zone practically doesn't exist.
26520Sstevel@tonic-gate 		 */
26530Sstevel@tonic-gate 		zret = global_zone;
26540Sstevel@tonic-gate 	}
26550Sstevel@tonic-gate 	zone_hold(zret);
26560Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
26570Sstevel@tonic-gate 	return (zret);
26580Sstevel@tonic-gate }
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate /*
26610Sstevel@tonic-gate  * Get the number of cpus visible to this zone.  The system-wide global
26620Sstevel@tonic-gate  * 'ncpus' is returned if pools are disabled, the caller is in the
26630Sstevel@tonic-gate  * global zone, or a NULL zone argument is passed in.
26640Sstevel@tonic-gate  */
26650Sstevel@tonic-gate int
26660Sstevel@tonic-gate zone_ncpus_get(zone_t *zone)
26670Sstevel@tonic-gate {
26680Sstevel@tonic-gate 	int myncpus = zone == NULL ? 0 : zone->zone_ncpus;
26690Sstevel@tonic-gate 
26700Sstevel@tonic-gate 	return (myncpus != 0 ? myncpus : ncpus);
26710Sstevel@tonic-gate }
26720Sstevel@tonic-gate 
26730Sstevel@tonic-gate /*
26740Sstevel@tonic-gate  * Get the number of online cpus visible to this zone.  The system-wide
26750Sstevel@tonic-gate  * global 'ncpus_online' is returned if pools are disabled, the caller
26760Sstevel@tonic-gate  * is in the global zone, or a NULL zone argument is passed in.
26770Sstevel@tonic-gate  */
26780Sstevel@tonic-gate int
26790Sstevel@tonic-gate zone_ncpus_online_get(zone_t *zone)
26800Sstevel@tonic-gate {
26810Sstevel@tonic-gate 	int myncpus_online = zone == NULL ? 0 : zone->zone_ncpus_online;
26820Sstevel@tonic-gate 
26830Sstevel@tonic-gate 	return (myncpus_online != 0 ? myncpus_online : ncpus_online);
26840Sstevel@tonic-gate }
26850Sstevel@tonic-gate 
26860Sstevel@tonic-gate /*
26870Sstevel@tonic-gate  * Return the pool to which the zone is currently bound.
26880Sstevel@tonic-gate  */
26890Sstevel@tonic-gate pool_t *
26900Sstevel@tonic-gate zone_pool_get(zone_t *zone)
26910Sstevel@tonic-gate {
26920Sstevel@tonic-gate 	ASSERT(pool_lock_held());
26930Sstevel@tonic-gate 
26940Sstevel@tonic-gate 	return (zone->zone_pool);
26950Sstevel@tonic-gate }
26960Sstevel@tonic-gate 
26970Sstevel@tonic-gate /*
26980Sstevel@tonic-gate  * Set the zone's pool pointer and update the zone's visibility to match
26990Sstevel@tonic-gate  * the resources in the new pool.
27000Sstevel@tonic-gate  */
27010Sstevel@tonic-gate void
27020Sstevel@tonic-gate zone_pool_set(zone_t *zone, pool_t *pool)
27030Sstevel@tonic-gate {
27040Sstevel@tonic-gate 	ASSERT(pool_lock_held());
27050Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
27060Sstevel@tonic-gate 
27070Sstevel@tonic-gate 	zone->zone_pool = pool;
27080Sstevel@tonic-gate 	zone_pset_set(zone, pool->pool_pset->pset_id);
27090Sstevel@tonic-gate }
27100Sstevel@tonic-gate 
27110Sstevel@tonic-gate /*
27120Sstevel@tonic-gate  * Return the cached value of the id of the processor set to which the
27130Sstevel@tonic-gate  * zone is currently bound.  The value will be ZONE_PS_INVAL if the pools
27140Sstevel@tonic-gate  * facility is disabled.
27150Sstevel@tonic-gate  */
27160Sstevel@tonic-gate psetid_t
27170Sstevel@tonic-gate zone_pset_get(zone_t *zone)
27180Sstevel@tonic-gate {
27190Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
27200Sstevel@tonic-gate 
27210Sstevel@tonic-gate 	return (zone->zone_psetid);
27220Sstevel@tonic-gate }
27230Sstevel@tonic-gate 
27240Sstevel@tonic-gate /*
27250Sstevel@tonic-gate  * Set the cached value of the id of the processor set to which the zone
27260Sstevel@tonic-gate  * is currently bound.  Also update the zone's visibility to match the
27270Sstevel@tonic-gate  * resources in the new processor set.
27280Sstevel@tonic-gate  */
27290Sstevel@tonic-gate void
27300Sstevel@tonic-gate zone_pset_set(zone_t *zone, psetid_t newpsetid)
27310Sstevel@tonic-gate {
27320Sstevel@tonic-gate 	psetid_t oldpsetid;
27330Sstevel@tonic-gate 
27340Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
27350Sstevel@tonic-gate 	oldpsetid = zone_pset_get(zone);
27360Sstevel@tonic-gate 
27370Sstevel@tonic-gate 	if (oldpsetid == newpsetid)
27380Sstevel@tonic-gate 		return;
27390Sstevel@tonic-gate 	/*
27400Sstevel@tonic-gate 	 * Global zone sees all.
27410Sstevel@tonic-gate 	 */
27420Sstevel@tonic-gate 	if (zone != global_zone) {
27430Sstevel@tonic-gate 		zone->zone_psetid = newpsetid;
27440Sstevel@tonic-gate 		if (newpsetid != ZONE_PS_INVAL)
27450Sstevel@tonic-gate 			pool_pset_visibility_add(newpsetid, zone);
27460Sstevel@tonic-gate 		if (oldpsetid != ZONE_PS_INVAL)
27470Sstevel@tonic-gate 			pool_pset_visibility_remove(oldpsetid, zone);
27480Sstevel@tonic-gate 	}
27490Sstevel@tonic-gate 	/*
27500Sstevel@tonic-gate 	 * Disabling pools, so we should start using the global values
27510Sstevel@tonic-gate 	 * for ncpus and ncpus_online.
27520Sstevel@tonic-gate 	 */
27530Sstevel@tonic-gate 	if (newpsetid == ZONE_PS_INVAL) {
27540Sstevel@tonic-gate 		zone->zone_ncpus = 0;
27550Sstevel@tonic-gate 		zone->zone_ncpus_online = 0;
27560Sstevel@tonic-gate 	}
27570Sstevel@tonic-gate }
27580Sstevel@tonic-gate 
27590Sstevel@tonic-gate /*
27600Sstevel@tonic-gate  * Walk the list of active zones and issue the provided callback for
27610Sstevel@tonic-gate  * each of them.
27620Sstevel@tonic-gate  *
27630Sstevel@tonic-gate  * Caller must not be holding any locks that may be acquired under
27640Sstevel@tonic-gate  * zonehash_lock.  See comment at the beginning of the file for a list of
27650Sstevel@tonic-gate  * common locks and their interactions with zones.
27660Sstevel@tonic-gate  */
27670Sstevel@tonic-gate int
27680Sstevel@tonic-gate zone_walk(int (*cb)(zone_t *, void *), void *data)
27690Sstevel@tonic-gate {
27700Sstevel@tonic-gate 	zone_t *zone;
27710Sstevel@tonic-gate 	int ret = 0;
27720Sstevel@tonic-gate 	zone_status_t status;
27730Sstevel@tonic-gate 
27740Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
27750Sstevel@tonic-gate 	for (zone = list_head(&zone_active); zone != NULL;
27760Sstevel@tonic-gate 	    zone = list_next(&zone_active, zone)) {
27770Sstevel@tonic-gate 		/*
27780Sstevel@tonic-gate 		 * Skip zones that shouldn't be externally visible.
27790Sstevel@tonic-gate 		 */
27800Sstevel@tonic-gate 		status = zone_status_get(zone);
27810Sstevel@tonic-gate 		if (status < ZONE_IS_READY || status > ZONE_IS_DOWN)
27820Sstevel@tonic-gate 			continue;
27830Sstevel@tonic-gate 		/*
27840Sstevel@tonic-gate 		 * Bail immediately if any callback invocation returns a
27850Sstevel@tonic-gate 		 * non-zero value.
27860Sstevel@tonic-gate 		 */
27870Sstevel@tonic-gate 		ret = (*cb)(zone, data);
27880Sstevel@tonic-gate 		if (ret != 0)
27890Sstevel@tonic-gate 			break;
27900Sstevel@tonic-gate 	}
27910Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
27920Sstevel@tonic-gate 	return (ret);
27930Sstevel@tonic-gate }
27940Sstevel@tonic-gate 
27950Sstevel@tonic-gate static int
27960Sstevel@tonic-gate zone_set_root(zone_t *zone, const char *upath)
27970Sstevel@tonic-gate {
27980Sstevel@tonic-gate 	vnode_t *vp;
27990Sstevel@tonic-gate 	int trycount;
28000Sstevel@tonic-gate 	int error = 0;
28010Sstevel@tonic-gate 	char *path;
28020Sstevel@tonic-gate 	struct pathname upn, pn;
28030Sstevel@tonic-gate 	size_t pathlen;
28040Sstevel@tonic-gate 
28050Sstevel@tonic-gate 	if ((error = pn_get((char *)upath, UIO_USERSPACE, &upn)) != 0)
28060Sstevel@tonic-gate 		return (error);
28070Sstevel@tonic-gate 
28080Sstevel@tonic-gate 	pn_alloc(&pn);
28090Sstevel@tonic-gate 
28100Sstevel@tonic-gate 	/* prevent infinite loop */
28110Sstevel@tonic-gate 	trycount = 10;
28120Sstevel@tonic-gate 	for (;;) {
28130Sstevel@tonic-gate 		if (--trycount <= 0) {
28140Sstevel@tonic-gate 			error = ESTALE;
28150Sstevel@tonic-gate 			goto out;
28160Sstevel@tonic-gate 		}
28170Sstevel@tonic-gate 
28180Sstevel@tonic-gate 		if ((error = lookuppn(&upn, &pn, FOLLOW, NULLVPP, &vp)) == 0) {
28190Sstevel@tonic-gate 			/*
28200Sstevel@tonic-gate 			 * VOP_ACCESS() may cover 'vp' with a new
28210Sstevel@tonic-gate 			 * filesystem, if 'vp' is an autoFS vnode.
28220Sstevel@tonic-gate 			 * Get the new 'vp' if so.
28230Sstevel@tonic-gate 			 */
28245331Samw 			if ((error =
28255331Samw 			    VOP_ACCESS(vp, VEXEC, 0, CRED(), NULL)) == 0 &&
28264417Seh208807 			    (!vn_ismntpt(vp) ||
28270Sstevel@tonic-gate 			    (error = traverse(&vp)) == 0)) {
28280Sstevel@tonic-gate 				pathlen = pn.pn_pathlen + 2;
28290Sstevel@tonic-gate 				path = kmem_alloc(pathlen, KM_SLEEP);
28300Sstevel@tonic-gate 				(void) strncpy(path, pn.pn_path,
28310Sstevel@tonic-gate 				    pn.pn_pathlen + 1);
28320Sstevel@tonic-gate 				path[pathlen - 2] = '/';
28330Sstevel@tonic-gate 				path[pathlen - 1] = '\0';
28340Sstevel@tonic-gate 				pn_free(&pn);
28350Sstevel@tonic-gate 				pn_free(&upn);
28360Sstevel@tonic-gate 
28370Sstevel@tonic-gate 				/* Success! */
28380Sstevel@tonic-gate 				break;
28390Sstevel@tonic-gate 			}
28400Sstevel@tonic-gate 			VN_RELE(vp);
28410Sstevel@tonic-gate 		}
28420Sstevel@tonic-gate 		if (error != ESTALE)
28430Sstevel@tonic-gate 			goto out;
28440Sstevel@tonic-gate 	}
28450Sstevel@tonic-gate 
28460Sstevel@tonic-gate 	ASSERT(error == 0);
28470Sstevel@tonic-gate 	zone->zone_rootvp = vp;		/* we hold a reference to vp */
28480Sstevel@tonic-gate 	zone->zone_rootpath = path;
28490Sstevel@tonic-gate 	zone->zone_rootpathlen = pathlen;
28501769Scarlsonj 	if (pathlen > 5 && strcmp(path + pathlen - 5, "/lu/") == 0)
28511769Scarlsonj 		zone->zone_flags |= ZF_IS_SCRATCH;
28520Sstevel@tonic-gate 	return (0);
28530Sstevel@tonic-gate 
28540Sstevel@tonic-gate out:
28550Sstevel@tonic-gate 	pn_free(&pn);
28560Sstevel@tonic-gate 	pn_free(&upn);
28570Sstevel@tonic-gate 	return (error);
28580Sstevel@tonic-gate }
28590Sstevel@tonic-gate 
28600Sstevel@tonic-gate #define	isalnum(c)	(((c) >= '0' && (c) <= '9') || \
28610Sstevel@tonic-gate 			((c) >= 'a' && (c) <= 'z') || \
28620Sstevel@tonic-gate 			((c) >= 'A' && (c) <= 'Z'))
28630Sstevel@tonic-gate 
28640Sstevel@tonic-gate static int
28650Sstevel@tonic-gate zone_set_name(zone_t *zone, const char *uname)
28660Sstevel@tonic-gate {
28670Sstevel@tonic-gate 	char *kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
28680Sstevel@tonic-gate 	size_t len;
28690Sstevel@tonic-gate 	int i, err;
28700Sstevel@tonic-gate 
28710Sstevel@tonic-gate 	if ((err = copyinstr(uname, kname, ZONENAME_MAX, &len)) != 0) {
28720Sstevel@tonic-gate 		kmem_free(kname, ZONENAME_MAX);
28730Sstevel@tonic-gate 		return (err);	/* EFAULT or ENAMETOOLONG */
28740Sstevel@tonic-gate 	}
28750Sstevel@tonic-gate 
28760Sstevel@tonic-gate 	/* must be less than ZONENAME_MAX */
28770Sstevel@tonic-gate 	if (len == ZONENAME_MAX && kname[ZONENAME_MAX - 1] != '\0') {
28780Sstevel@tonic-gate 		kmem_free(kname, ZONENAME_MAX);
28790Sstevel@tonic-gate 		return (EINVAL);
28800Sstevel@tonic-gate 	}
28810Sstevel@tonic-gate 
28820Sstevel@tonic-gate 	/*
28830Sstevel@tonic-gate 	 * Name must start with an alphanumeric and must contain only
28840Sstevel@tonic-gate 	 * alphanumerics, '-', '_' and '.'.
28850Sstevel@tonic-gate 	 */
28860Sstevel@tonic-gate 	if (!isalnum(kname[0])) {
28870Sstevel@tonic-gate 		kmem_free(kname, ZONENAME_MAX);
28880Sstevel@tonic-gate 		return (EINVAL);
28890Sstevel@tonic-gate 	}
28900Sstevel@tonic-gate 	for (i = 1; i < len - 1; i++) {
28910Sstevel@tonic-gate 		if (!isalnum(kname[i]) && kname[i] != '-' && kname[i] != '_' &&
28920Sstevel@tonic-gate 		    kname[i] != '.') {
28930Sstevel@tonic-gate 			kmem_free(kname, ZONENAME_MAX);
28940Sstevel@tonic-gate 			return (EINVAL);
28950Sstevel@tonic-gate 		}
28960Sstevel@tonic-gate 	}
28970Sstevel@tonic-gate 
28980Sstevel@tonic-gate 	zone->zone_name = kname;
28990Sstevel@tonic-gate 	return (0);
29000Sstevel@tonic-gate }
29010Sstevel@tonic-gate 
29020Sstevel@tonic-gate /*
29038662SJordan.Vaughan@Sun.com  * Gets the 32-bit hostid of the specified zone as an unsigned int.  If 'zonep'
29048662SJordan.Vaughan@Sun.com  * is NULL or it points to a zone with no hostid emulation, then the machine's
29058662SJordan.Vaughan@Sun.com  * hostid (i.e., the global zone's hostid) is returned.  This function returns
29068662SJordan.Vaughan@Sun.com  * zero if neither the zone nor the host machine (global zone) have hostids.  It
29078662SJordan.Vaughan@Sun.com  * returns HW_INVALID_HOSTID if the function attempts to return the machine's
29088662SJordan.Vaughan@Sun.com  * hostid and the machine's hostid is invalid.
29098662SJordan.Vaughan@Sun.com  */
29108662SJordan.Vaughan@Sun.com uint32_t
29118662SJordan.Vaughan@Sun.com zone_get_hostid(zone_t *zonep)
29128662SJordan.Vaughan@Sun.com {
29138662SJordan.Vaughan@Sun.com 	unsigned long machine_hostid;
29148662SJordan.Vaughan@Sun.com 
29158662SJordan.Vaughan@Sun.com 	if (zonep == NULL || zonep->zone_hostid == HW_INVALID_HOSTID) {
29168662SJordan.Vaughan@Sun.com 		if (ddi_strtoul(hw_serial, NULL, 10, &machine_hostid) != 0)
29178662SJordan.Vaughan@Sun.com 			return (HW_INVALID_HOSTID);
29188662SJordan.Vaughan@Sun.com 		return ((uint32_t)machine_hostid);
29198662SJordan.Vaughan@Sun.com 	}
29208662SJordan.Vaughan@Sun.com 	return (zonep->zone_hostid);
29218662SJordan.Vaughan@Sun.com }
29228662SJordan.Vaughan@Sun.com 
29238662SJordan.Vaughan@Sun.com /*
29240Sstevel@tonic-gate  * Similar to thread_create(), but makes sure the thread is in the appropriate
29250Sstevel@tonic-gate  * zone's zsched process (curproc->p_zone->zone_zsched) before returning.
29260Sstevel@tonic-gate  */
29270Sstevel@tonic-gate /*ARGSUSED*/
29280Sstevel@tonic-gate kthread_t *
29290Sstevel@tonic-gate zthread_create(
29300Sstevel@tonic-gate     caddr_t stk,
29310Sstevel@tonic-gate     size_t stksize,
29320Sstevel@tonic-gate     void (*proc)(),
29330Sstevel@tonic-gate     void *arg,
29340Sstevel@tonic-gate     size_t len,
29350Sstevel@tonic-gate     pri_t pri)
29360Sstevel@tonic-gate {
29370Sstevel@tonic-gate 	kthread_t *t;
29380Sstevel@tonic-gate 	zone_t *zone = curproc->p_zone;
29390Sstevel@tonic-gate 	proc_t *pp = zone->zone_zsched;
29400Sstevel@tonic-gate 
29410Sstevel@tonic-gate 	zone_hold(zone);	/* Reference to be dropped when thread exits */
29420Sstevel@tonic-gate 
29430Sstevel@tonic-gate 	/*
29440Sstevel@tonic-gate 	 * No-one should be trying to create threads if the zone is shutting
29450Sstevel@tonic-gate 	 * down and there aren't any kernel threads around.  See comment
29460Sstevel@tonic-gate 	 * in zthread_exit().
29470Sstevel@tonic-gate 	 */
29480Sstevel@tonic-gate 	ASSERT(!(zone->zone_kthreads == NULL &&
29490Sstevel@tonic-gate 	    zone_status_get(zone) >= ZONE_IS_EMPTY));
29500Sstevel@tonic-gate 	/*
29510Sstevel@tonic-gate 	 * Create a thread, but don't let it run until we've finished setting
29520Sstevel@tonic-gate 	 * things up.
29530Sstevel@tonic-gate 	 */
29540Sstevel@tonic-gate 	t = thread_create(stk, stksize, proc, arg, len, pp, TS_STOPPED, pri);
29550Sstevel@tonic-gate 	ASSERT(t->t_forw == NULL);
29560Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
29570Sstevel@tonic-gate 	if (zone->zone_kthreads == NULL) {
29580Sstevel@tonic-gate 		t->t_forw = t->t_back = t;
29590Sstevel@tonic-gate 	} else {
29600Sstevel@tonic-gate 		kthread_t *tx = zone->zone_kthreads;
29610Sstevel@tonic-gate 
29620Sstevel@tonic-gate 		t->t_forw = tx;
29630Sstevel@tonic-gate 		t->t_back = tx->t_back;
29640Sstevel@tonic-gate 		tx->t_back->t_forw = t;
29650Sstevel@tonic-gate 		tx->t_back = t;
29660Sstevel@tonic-gate 	}
29670Sstevel@tonic-gate 	zone->zone_kthreads = t;
29680Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
29690Sstevel@tonic-gate 
29700Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
29710Sstevel@tonic-gate 	t->t_proc_flag |= TP_ZTHREAD;
29720Sstevel@tonic-gate 	project_rele(t->t_proj);
29730Sstevel@tonic-gate 	t->t_proj = project_hold(pp->p_task->tk_proj);
29740Sstevel@tonic-gate 
29750Sstevel@tonic-gate 	/*
29760Sstevel@tonic-gate 	 * Setup complete, let it run.
29770Sstevel@tonic-gate 	 */
29780Sstevel@tonic-gate 	thread_lock(t);
29790Sstevel@tonic-gate 	t->t_schedflag |= TS_ALLSTART;
29800Sstevel@tonic-gate 	setrun_locked(t);
29810Sstevel@tonic-gate 	thread_unlock(t);
29820Sstevel@tonic-gate 
29830Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
29840Sstevel@tonic-gate 
29850Sstevel@tonic-gate 	return (t);
29860Sstevel@tonic-gate }
29870Sstevel@tonic-gate 
29880Sstevel@tonic-gate /*
29890Sstevel@tonic-gate  * Similar to thread_exit().  Must be called by threads created via
29900Sstevel@tonic-gate  * zthread_exit().
29910Sstevel@tonic-gate  */
29920Sstevel@tonic-gate void
29930Sstevel@tonic-gate zthread_exit(void)
29940Sstevel@tonic-gate {
29950Sstevel@tonic-gate 	kthread_t *t = curthread;
29960Sstevel@tonic-gate 	proc_t *pp = curproc;
29970Sstevel@tonic-gate 	zone_t *zone = pp->p_zone;
29980Sstevel@tonic-gate 
29990Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
30000Sstevel@tonic-gate 
30010Sstevel@tonic-gate 	/*
30020Sstevel@tonic-gate 	 * Reparent to p0
30030Sstevel@tonic-gate 	 */
30041075Sjosephb 	kpreempt_disable();
30050Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
30060Sstevel@tonic-gate 	t->t_proc_flag &= ~TP_ZTHREAD;
30070Sstevel@tonic-gate 	t->t_procp = &p0;
30080Sstevel@tonic-gate 	hat_thread_exit(t);
30090Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
30101075Sjosephb 	kpreempt_enable();
30110Sstevel@tonic-gate 
30120Sstevel@tonic-gate 	if (t->t_back == t) {
30130Sstevel@tonic-gate 		ASSERT(t->t_forw == t);
30140Sstevel@tonic-gate 		/*
30150Sstevel@tonic-gate 		 * If the zone is empty, once the thread count
30160Sstevel@tonic-gate 		 * goes to zero no further kernel threads can be
30170Sstevel@tonic-gate 		 * created.  This is because if the creator is a process
30180Sstevel@tonic-gate 		 * in the zone, then it must have exited before the zone
30190Sstevel@tonic-gate 		 * state could be set to ZONE_IS_EMPTY.
30200Sstevel@tonic-gate 		 * Otherwise, if the creator is a kernel thread in the
30210Sstevel@tonic-gate 		 * zone, the thread count is non-zero.
30220Sstevel@tonic-gate 		 *
30230Sstevel@tonic-gate 		 * This really means that non-zone kernel threads should
30240Sstevel@tonic-gate 		 * not create zone kernel threads.
30250Sstevel@tonic-gate 		 */
30260Sstevel@tonic-gate 		zone->zone_kthreads = NULL;
30270Sstevel@tonic-gate 		if (zone_status_get(zone) == ZONE_IS_EMPTY) {
30280Sstevel@tonic-gate 			zone_status_set(zone, ZONE_IS_DOWN);
30293792Sakolb 			/*
30303792Sakolb 			 * Remove any CPU caps on this zone.
30313792Sakolb 			 */
30323792Sakolb 			cpucaps_zone_remove(zone);
30330Sstevel@tonic-gate 		}
30340Sstevel@tonic-gate 	} else {
30350Sstevel@tonic-gate 		t->t_forw->t_back = t->t_back;
30360Sstevel@tonic-gate 		t->t_back->t_forw = t->t_forw;
30370Sstevel@tonic-gate 		if (zone->zone_kthreads == t)
30380Sstevel@tonic-gate 			zone->zone_kthreads = t->t_forw;
30390Sstevel@tonic-gate 	}
30400Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
30410Sstevel@tonic-gate 	zone_rele(zone);
30420Sstevel@tonic-gate 	thread_exit();
30430Sstevel@tonic-gate 	/* NOTREACHED */
30440Sstevel@tonic-gate }
30450Sstevel@tonic-gate 
30460Sstevel@tonic-gate static void
30470Sstevel@tonic-gate zone_chdir(vnode_t *vp, vnode_t **vpp, proc_t *pp)
30480Sstevel@tonic-gate {
30490Sstevel@tonic-gate 	vnode_t *oldvp;
30500Sstevel@tonic-gate 
30510Sstevel@tonic-gate 	/* we're going to hold a reference here to the directory */
30520Sstevel@tonic-gate 	VN_HOLD(vp);
30530Sstevel@tonic-gate 
30540Sstevel@tonic-gate 	if (audit_active)	/* update abs cwd/root path see c2audit.c */
30550Sstevel@tonic-gate 		audit_chdirec(vp, vpp);
30560Sstevel@tonic-gate 
30570Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
30580Sstevel@tonic-gate 	oldvp = *vpp;
30590Sstevel@tonic-gate 	*vpp = vp;
30600Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
30610Sstevel@tonic-gate 	if (oldvp != NULL)
30620Sstevel@tonic-gate 		VN_RELE(oldvp);
30630Sstevel@tonic-gate }
30640Sstevel@tonic-gate 
30650Sstevel@tonic-gate /*
30660Sstevel@tonic-gate  * Convert an rctl value represented by an nvlist_t into an rctl_val_t.
30670Sstevel@tonic-gate  */
30680Sstevel@tonic-gate static int
30690Sstevel@tonic-gate nvlist2rctlval(nvlist_t *nvl, rctl_val_t *rv)
30700Sstevel@tonic-gate {
30710Sstevel@tonic-gate 	nvpair_t *nvp = NULL;
30720Sstevel@tonic-gate 	boolean_t priv_set = B_FALSE;
30730Sstevel@tonic-gate 	boolean_t limit_set = B_FALSE;
30740Sstevel@tonic-gate 	boolean_t action_set = B_FALSE;
30750Sstevel@tonic-gate 
30760Sstevel@tonic-gate 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
30770Sstevel@tonic-gate 		const char *name;
30780Sstevel@tonic-gate 		uint64_t ui64;
30790Sstevel@tonic-gate 
30800Sstevel@tonic-gate 		name = nvpair_name(nvp);
30810Sstevel@tonic-gate 		if (nvpair_type(nvp) != DATA_TYPE_UINT64)
30820Sstevel@tonic-gate 			return (EINVAL);
30830Sstevel@tonic-gate 		(void) nvpair_value_uint64(nvp, &ui64);
30840Sstevel@tonic-gate 		if (strcmp(name, "privilege") == 0) {
30850Sstevel@tonic-gate 			/*
30860Sstevel@tonic-gate 			 * Currently only privileged values are allowed, but
30870Sstevel@tonic-gate 			 * this may change in the future.
30880Sstevel@tonic-gate 			 */
30890Sstevel@tonic-gate 			if (ui64 != RCPRIV_PRIVILEGED)
30900Sstevel@tonic-gate 				return (EINVAL);
30910Sstevel@tonic-gate 			rv->rcv_privilege = ui64;
30920Sstevel@tonic-gate 			priv_set = B_TRUE;
30930Sstevel@tonic-gate 		} else if (strcmp(name, "limit") == 0) {
30940Sstevel@tonic-gate 			rv->rcv_value = ui64;
30950Sstevel@tonic-gate 			limit_set = B_TRUE;
30960Sstevel@tonic-gate 		} else if (strcmp(name, "action") == 0) {
30970Sstevel@tonic-gate 			if (ui64 != RCTL_LOCAL_NOACTION &&
30980Sstevel@tonic-gate 			    ui64 != RCTL_LOCAL_DENY)
30990Sstevel@tonic-gate 				return (EINVAL);
31000Sstevel@tonic-gate 			rv->rcv_flagaction = ui64;
31010Sstevel@tonic-gate 			action_set = B_TRUE;
31020Sstevel@tonic-gate 		} else {
31030Sstevel@tonic-gate 			return (EINVAL);
31040Sstevel@tonic-gate 		}
31050Sstevel@tonic-gate 	}
31060Sstevel@tonic-gate 
31070Sstevel@tonic-gate 	if (!(priv_set && limit_set && action_set))
31080Sstevel@tonic-gate 		return (EINVAL);
31090Sstevel@tonic-gate 	rv->rcv_action_signal = 0;
31100Sstevel@tonic-gate 	rv->rcv_action_recipient = NULL;
31110Sstevel@tonic-gate 	rv->rcv_action_recip_pid = -1;
31120Sstevel@tonic-gate 	rv->rcv_firing_time = 0;
31130Sstevel@tonic-gate 
31140Sstevel@tonic-gate 	return (0);
31150Sstevel@tonic-gate }
31160Sstevel@tonic-gate 
31172267Sdp /*
31182267Sdp  * Non-global zone version of start_init.
31192267Sdp  */
31200Sstevel@tonic-gate void
31212267Sdp zone_start_init(void)
31220Sstevel@tonic-gate {
31230Sstevel@tonic-gate 	proc_t *p = ttoproc(curthread);
31242712Snn35248 	zone_t *z = p->p_zone;
31252267Sdp 
31262267Sdp 	ASSERT(!INGLOBALZONE(curproc));
31270Sstevel@tonic-gate 
31280Sstevel@tonic-gate 	/*
31292712Snn35248 	 * For all purposes (ZONE_ATTR_INITPID and restart_init),
31302712Snn35248 	 * storing just the pid of init is sufficient.
31312712Snn35248 	 */
31322712Snn35248 	z->zone_proc_initpid = p->p_pid;
31332712Snn35248 
31342712Snn35248 	/*
31352267Sdp 	 * We maintain zone_boot_err so that we can return the cause of the
31362267Sdp 	 * failure back to the caller of the zone_boot syscall.
31370Sstevel@tonic-gate 	 */
31382267Sdp 	p->p_zone->zone_boot_err = start_init_common();
31390Sstevel@tonic-gate 
31408364SJordan.Vaughan@Sun.com 	/*
31418364SJordan.Vaughan@Sun.com 	 * We will prevent booting zones from becoming running zones if the
31428364SJordan.Vaughan@Sun.com 	 * global zone is shutting down.
31438364SJordan.Vaughan@Sun.com 	 */
31440Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
31458364SJordan.Vaughan@Sun.com 	if (z->zone_boot_err != 0 || zone_status_get(global_zone) >=
31468364SJordan.Vaughan@Sun.com 	    ZONE_IS_SHUTTING_DOWN) {
31470Sstevel@tonic-gate 		/*
31480Sstevel@tonic-gate 		 * Make sure we are still in the booting state-- we could have
31490Sstevel@tonic-gate 		 * raced and already be shutting down, or even further along.
31500Sstevel@tonic-gate 		 */
31513792Sakolb 		if (zone_status_get(z) == ZONE_IS_BOOTING) {
31522712Snn35248 			zone_status_set(z, ZONE_IS_SHUTTING_DOWN);
31533792Sakolb 		}
31540Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
31550Sstevel@tonic-gate 		/* It's gone bad, dispose of the process */
31562712Snn35248 		if (proc_exit(CLD_EXITED, z->zone_boot_err) != 0) {
3157390Sraf 			mutex_enter(&p->p_lock);
3158390Sraf 			ASSERT(p->p_flag & SEXITLWPS);
31590Sstevel@tonic-gate 			lwp_exit();
31600Sstevel@tonic-gate 		}
31610Sstevel@tonic-gate 	} else {
31622712Snn35248 		if (zone_status_get(z) == ZONE_IS_BOOTING)
31632712Snn35248 			zone_status_set(z, ZONE_IS_RUNNING);
31640Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
31650Sstevel@tonic-gate 		/* cause the process to return to userland. */
31660Sstevel@tonic-gate 		lwp_rtt();
31670Sstevel@tonic-gate 	}
31680Sstevel@tonic-gate }
31690Sstevel@tonic-gate 
31700Sstevel@tonic-gate struct zsched_arg {
31710Sstevel@tonic-gate 	zone_t *zone;
31720Sstevel@tonic-gate 	nvlist_t *nvlist;
31730Sstevel@tonic-gate };
31740Sstevel@tonic-gate 
31750Sstevel@tonic-gate /*
31760Sstevel@tonic-gate  * Per-zone "sched" workalike.  The similarity to "sched" doesn't have
31770Sstevel@tonic-gate  * anything to do with scheduling, but rather with the fact that
31780Sstevel@tonic-gate  * per-zone kernel threads are parented to zsched, just like regular
31790Sstevel@tonic-gate  * kernel threads are parented to sched (p0).
31800Sstevel@tonic-gate  *
31810Sstevel@tonic-gate  * zsched is also responsible for launching init for the zone.
31820Sstevel@tonic-gate  */
31830Sstevel@tonic-gate static void
31840Sstevel@tonic-gate zsched(void *arg)
31850Sstevel@tonic-gate {
31860Sstevel@tonic-gate 	struct zsched_arg *za = arg;
31870Sstevel@tonic-gate 	proc_t *pp = curproc;
31880Sstevel@tonic-gate 	proc_t *initp = proc_init;
31890Sstevel@tonic-gate 	zone_t *zone = za->zone;
31900Sstevel@tonic-gate 	cred_t *cr, *oldcred;
31910Sstevel@tonic-gate 	rctl_set_t *set;
31920Sstevel@tonic-gate 	rctl_alloc_gp_t *gp;
31930Sstevel@tonic-gate 	contract_t *ct = NULL;
31940Sstevel@tonic-gate 	task_t *tk, *oldtk;
31950Sstevel@tonic-gate 	rctl_entity_p_t e;
31960Sstevel@tonic-gate 	kproject_t *pj;
31970Sstevel@tonic-gate 
31980Sstevel@tonic-gate 	nvlist_t *nvl = za->nvlist;
31990Sstevel@tonic-gate 	nvpair_t *nvp = NULL;
32000Sstevel@tonic-gate 
32013446Smrj 	bcopy("zsched", PTOU(pp)->u_psargs, sizeof ("zsched"));
32023446Smrj 	bcopy("zsched", PTOU(pp)->u_comm, sizeof ("zsched"));
32033446Smrj 	PTOU(pp)->u_argc = 0;
32043446Smrj 	PTOU(pp)->u_argv = NULL;
32053446Smrj 	PTOU(pp)->u_envp = NULL;
32060Sstevel@tonic-gate 	closeall(P_FINFO(pp));
32070Sstevel@tonic-gate 
32080Sstevel@tonic-gate 	/*
32090Sstevel@tonic-gate 	 * We are this zone's "zsched" process.  As the zone isn't generally
32100Sstevel@tonic-gate 	 * visible yet we don't need to grab any locks before initializing its
32110Sstevel@tonic-gate 	 * zone_proc pointer.
32120Sstevel@tonic-gate 	 */
32130Sstevel@tonic-gate 	zone_hold(zone);  /* this hold is released by zone_destroy() */
32140Sstevel@tonic-gate 	zone->zone_zsched = pp;
32150Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
32160Sstevel@tonic-gate 	pp->p_zone = zone;
32170Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
32180Sstevel@tonic-gate 
32190Sstevel@tonic-gate 	/*
32200Sstevel@tonic-gate 	 * Disassociate process from its 'parent'; parent ourselves to init
32210Sstevel@tonic-gate 	 * (pid 1) and change other values as needed.
32220Sstevel@tonic-gate 	 */
32230Sstevel@tonic-gate 	sess_create();
32240Sstevel@tonic-gate 
32250Sstevel@tonic-gate 	mutex_enter(&pidlock);
32260Sstevel@tonic-gate 	proc_detach(pp);
32270Sstevel@tonic-gate 	pp->p_ppid = 1;
32280Sstevel@tonic-gate 	pp->p_flag |= SZONETOP;
32290Sstevel@tonic-gate 	pp->p_ancpid = 1;
32300Sstevel@tonic-gate 	pp->p_parent = initp;
32310Sstevel@tonic-gate 	pp->p_psibling = NULL;
32320Sstevel@tonic-gate 	if (initp->p_child)
32330Sstevel@tonic-gate 		initp->p_child->p_psibling = pp;
32340Sstevel@tonic-gate 	pp->p_sibling = initp->p_child;
32350Sstevel@tonic-gate 	initp->p_child = pp;
32360Sstevel@tonic-gate 
32370Sstevel@tonic-gate 	/* Decrement what newproc() incremented. */
32380Sstevel@tonic-gate 	upcount_dec(crgetruid(CRED()), GLOBAL_ZONEID);
32390Sstevel@tonic-gate 	/*
32400Sstevel@tonic-gate 	 * Our credentials are about to become kcred-like, so we don't care
32410Sstevel@tonic-gate 	 * about the caller's ruid.
32420Sstevel@tonic-gate 	 */
32430Sstevel@tonic-gate 	upcount_inc(crgetruid(kcred), zone->zone_id);
32440Sstevel@tonic-gate 	mutex_exit(&pidlock);
32450Sstevel@tonic-gate 
32460Sstevel@tonic-gate 	/*
32470Sstevel@tonic-gate 	 * getting out of global zone, so decrement lwp counts
32480Sstevel@tonic-gate 	 */
32490Sstevel@tonic-gate 	pj = pp->p_task->tk_proj;
32500Sstevel@tonic-gate 	mutex_enter(&global_zone->zone_nlwps_lock);
32510Sstevel@tonic-gate 	pj->kpj_nlwps -= pp->p_lwpcnt;
32520Sstevel@tonic-gate 	global_zone->zone_nlwps -= pp->p_lwpcnt;
32530Sstevel@tonic-gate 	mutex_exit(&global_zone->zone_nlwps_lock);
32540Sstevel@tonic-gate 
32550Sstevel@tonic-gate 	/*
32562768Ssl108498 	 * Decrement locked memory counts on old zone and project.
32572768Ssl108498 	 */
32583247Sgjelinek 	mutex_enter(&global_zone->zone_mem_lock);
32592768Ssl108498 	global_zone->zone_locked_mem -= pp->p_locked_mem;
32602768Ssl108498 	pj->kpj_data.kpd_locked_mem -= pp->p_locked_mem;
32613247Sgjelinek 	mutex_exit(&global_zone->zone_mem_lock);
32622768Ssl108498 
32632768Ssl108498 	/*
32640Sstevel@tonic-gate 	 * Create and join a new task in project '0' of this zone.
32650Sstevel@tonic-gate 	 *
32660Sstevel@tonic-gate 	 * We don't need to call holdlwps() since we know we're the only lwp in
32670Sstevel@tonic-gate 	 * this process.
32680Sstevel@tonic-gate 	 *
32690Sstevel@tonic-gate 	 * task_join() returns with p_lock held.
32700Sstevel@tonic-gate 	 */
32710Sstevel@tonic-gate 	tk = task_create(0, zone);
32720Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
32730Sstevel@tonic-gate 	oldtk = task_join(tk, 0);
32742768Ssl108498 
32752768Ssl108498 	pj = pp->p_task->tk_proj;
32762768Ssl108498 
32773247Sgjelinek 	mutex_enter(&zone->zone_mem_lock);
32782768Ssl108498 	zone->zone_locked_mem += pp->p_locked_mem;
32792768Ssl108498 	pj->kpj_data.kpd_locked_mem += pp->p_locked_mem;
32803247Sgjelinek 	mutex_exit(&zone->zone_mem_lock);
32810Sstevel@tonic-gate 
32820Sstevel@tonic-gate 	/*
32830Sstevel@tonic-gate 	 * add lwp counts to zsched's zone, and increment project's task count
32840Sstevel@tonic-gate 	 * due to the task created in the above tasksys_settaskid
32850Sstevel@tonic-gate 	 */
32862768Ssl108498 
32870Sstevel@tonic-gate 	mutex_enter(&zone->zone_nlwps_lock);
32880Sstevel@tonic-gate 	pj->kpj_nlwps += pp->p_lwpcnt;
32890Sstevel@tonic-gate 	pj->kpj_ntasks += 1;
32900Sstevel@tonic-gate 	zone->zone_nlwps += pp->p_lwpcnt;
32910Sstevel@tonic-gate 	mutex_exit(&zone->zone_nlwps_lock);
32920Sstevel@tonic-gate 
32932768Ssl108498 	mutex_exit(&curproc->p_lock);
32942768Ssl108498 	mutex_exit(&cpu_lock);
32952768Ssl108498 	task_rele(oldtk);
32962768Ssl108498 
32970Sstevel@tonic-gate 	/*
32980Sstevel@tonic-gate 	 * The process was created by a process in the global zone, hence the
32990Sstevel@tonic-gate 	 * credentials are wrong.  We might as well have kcred-ish credentials.
33000Sstevel@tonic-gate 	 */
33010Sstevel@tonic-gate 	cr = zone->zone_kcred;
33020Sstevel@tonic-gate 	crhold(cr);
33030Sstevel@tonic-gate 	mutex_enter(&pp->p_crlock);
33040Sstevel@tonic-gate 	oldcred = pp->p_cred;
33050Sstevel@tonic-gate 	pp->p_cred = cr;
33060Sstevel@tonic-gate 	mutex_exit(&pp->p_crlock);
33070Sstevel@tonic-gate 	crfree(oldcred);
33080Sstevel@tonic-gate 
33090Sstevel@tonic-gate 	/*
33100Sstevel@tonic-gate 	 * Hold credentials again (for thread)
33110Sstevel@tonic-gate 	 */
33120Sstevel@tonic-gate 	crhold(cr);
33130Sstevel@tonic-gate 
33140Sstevel@tonic-gate 	/*
33150Sstevel@tonic-gate 	 * p_lwpcnt can't change since this is a kernel process.
33160Sstevel@tonic-gate 	 */
33170Sstevel@tonic-gate 	crset(pp, cr);
33180Sstevel@tonic-gate 
33190Sstevel@tonic-gate 	/*
33200Sstevel@tonic-gate 	 * Chroot
33210Sstevel@tonic-gate 	 */
33220Sstevel@tonic-gate 	zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_cdir, pp);
33230Sstevel@tonic-gate 	zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_rdir, pp);
33240Sstevel@tonic-gate 
33250Sstevel@tonic-gate 	/*
33260Sstevel@tonic-gate 	 * Initialize zone's rctl set.
33270Sstevel@tonic-gate 	 */
33280Sstevel@tonic-gate 	set = rctl_set_create();
33290Sstevel@tonic-gate 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
33300Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
33310Sstevel@tonic-gate 	e.rcep_p.zone = zone;
33320Sstevel@tonic-gate 	e.rcep_t = RCENTITY_ZONE;
33330Sstevel@tonic-gate 	zone->zone_rctls = rctl_set_init(RCENTITY_ZONE, pp, &e, set, gp);
33340Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
33350Sstevel@tonic-gate 	rctl_prealloc_destroy(gp);
33360Sstevel@tonic-gate 
33370Sstevel@tonic-gate 	/*
33380Sstevel@tonic-gate 	 * Apply the rctls passed in to zone_create().  This is basically a list
33390Sstevel@tonic-gate 	 * assignment: all of the old values are removed and the new ones
33400Sstevel@tonic-gate 	 * inserted.  That is, if an empty list is passed in, all values are
33410Sstevel@tonic-gate 	 * removed.
33420Sstevel@tonic-gate 	 */
33430Sstevel@tonic-gate 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
33440Sstevel@tonic-gate 		rctl_dict_entry_t *rde;
33450Sstevel@tonic-gate 		rctl_hndl_t hndl;
33460Sstevel@tonic-gate 		char *name;
33470Sstevel@tonic-gate 		nvlist_t **nvlarray;
33480Sstevel@tonic-gate 		uint_t i, nelem;
33490Sstevel@tonic-gate 		int error;	/* For ASSERT()s */
33500Sstevel@tonic-gate 
33510Sstevel@tonic-gate 		name = nvpair_name(nvp);
33520Sstevel@tonic-gate 		hndl = rctl_hndl_lookup(name);
33530Sstevel@tonic-gate 		ASSERT(hndl != -1);
33540Sstevel@tonic-gate 		rde = rctl_dict_lookup_hndl(hndl);
33550Sstevel@tonic-gate 		ASSERT(rde != NULL);
33560Sstevel@tonic-gate 
33570Sstevel@tonic-gate 		for (; /* ever */; ) {
33580Sstevel@tonic-gate 			rctl_val_t oval;
33590Sstevel@tonic-gate 
33600Sstevel@tonic-gate 			mutex_enter(&pp->p_lock);
33610Sstevel@tonic-gate 			error = rctl_local_get(hndl, NULL, &oval, pp);
33620Sstevel@tonic-gate 			mutex_exit(&pp->p_lock);
33630Sstevel@tonic-gate 			ASSERT(error == 0);	/* Can't fail for RCTL_FIRST */
33640Sstevel@tonic-gate 			ASSERT(oval.rcv_privilege != RCPRIV_BASIC);
33650Sstevel@tonic-gate 			if (oval.rcv_privilege == RCPRIV_SYSTEM)
33660Sstevel@tonic-gate 				break;
33670Sstevel@tonic-gate 			mutex_enter(&pp->p_lock);
33680Sstevel@tonic-gate 			error = rctl_local_delete(hndl, &oval, pp);
33690Sstevel@tonic-gate 			mutex_exit(&pp->p_lock);
33700Sstevel@tonic-gate 			ASSERT(error == 0);
33710Sstevel@tonic-gate 		}
33720Sstevel@tonic-gate 		error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem);
33730Sstevel@tonic-gate 		ASSERT(error == 0);
33740Sstevel@tonic-gate 		for (i = 0; i < nelem; i++) {
33750Sstevel@tonic-gate 			rctl_val_t *nvalp;
33760Sstevel@tonic-gate 
33770Sstevel@tonic-gate 			nvalp = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
33780Sstevel@tonic-gate 			error = nvlist2rctlval(nvlarray[i], nvalp);
33790Sstevel@tonic-gate 			ASSERT(error == 0);
33800Sstevel@tonic-gate 			/*
33810Sstevel@tonic-gate 			 * rctl_local_insert can fail if the value being
33820Sstevel@tonic-gate 			 * inserted is a duplicate; this is OK.
33830Sstevel@tonic-gate 			 */
33840Sstevel@tonic-gate 			mutex_enter(&pp->p_lock);
33850Sstevel@tonic-gate 			if (rctl_local_insert(hndl, nvalp, pp) != 0)
33860Sstevel@tonic-gate 				kmem_cache_free(rctl_val_cache, nvalp);
33870Sstevel@tonic-gate 			mutex_exit(&pp->p_lock);
33880Sstevel@tonic-gate 		}
33890Sstevel@tonic-gate 	}
33900Sstevel@tonic-gate 	/*
33910Sstevel@tonic-gate 	 * Tell the world that we're done setting up.
33920Sstevel@tonic-gate 	 *
33935880Snordmark 	 * At this point we want to set the zone status to ZONE_IS_INITIALIZED
33940Sstevel@tonic-gate 	 * and atomically set the zone's processor set visibility.  Once
33950Sstevel@tonic-gate 	 * we drop pool_lock() this zone will automatically get updated
33960Sstevel@tonic-gate 	 * to reflect any future changes to the pools configuration.
33975880Snordmark 	 *
33985880Snordmark 	 * Note that after we drop the locks below (zonehash_lock in
33995880Snordmark 	 * particular) other operations such as a zone_getattr call can
34005880Snordmark 	 * now proceed and observe the zone. That is the reason for doing a
34015880Snordmark 	 * state transition to the INITIALIZED state.
34020Sstevel@tonic-gate 	 */
34030Sstevel@tonic-gate 	pool_lock();
34040Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
34050Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
34060Sstevel@tonic-gate 	zone_uniqid(zone);
34070Sstevel@tonic-gate 	zone_zsd_configure(zone);
34080Sstevel@tonic-gate 	if (pool_state == POOL_ENABLED)
34090Sstevel@tonic-gate 		zone_pset_set(zone, pool_default->pool_pset->pset_id);
34100Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
34110Sstevel@tonic-gate 	ASSERT(zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
34125880Snordmark 	zone_status_set(zone, ZONE_IS_INITIALIZED);
34130Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
34140Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
34150Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
34160Sstevel@tonic-gate 	pool_unlock();
34170Sstevel@tonic-gate 
34185880Snordmark 	/* Now call the create callback for this key */
34195880Snordmark 	zsd_apply_all_keys(zsd_apply_create, zone);
34205880Snordmark 
34215880Snordmark 	/* The callbacks are complete. Mark ZONE_IS_READY */
34225880Snordmark 	mutex_enter(&zone_status_lock);
34235880Snordmark 	ASSERT(zone_status_get(zone) == ZONE_IS_INITIALIZED);
34245880Snordmark 	zone_status_set(zone, ZONE_IS_READY);
34255880Snordmark 	mutex_exit(&zone_status_lock);
34265880Snordmark 
34270Sstevel@tonic-gate 	/*
34280Sstevel@tonic-gate 	 * Once we see the zone transition to the ZONE_IS_BOOTING state,
34290Sstevel@tonic-gate 	 * we launch init, and set the state to running.
34300Sstevel@tonic-gate 	 */
34310Sstevel@tonic-gate 	zone_status_wait_cpr(zone, ZONE_IS_BOOTING, "zsched");
34320Sstevel@tonic-gate 
34330Sstevel@tonic-gate 	if (zone_status_get(zone) == ZONE_IS_BOOTING) {
34340Sstevel@tonic-gate 		id_t cid;
34350Sstevel@tonic-gate 
34360Sstevel@tonic-gate 		/*
34370Sstevel@tonic-gate 		 * Ok, this is a little complicated.  We need to grab the
34380Sstevel@tonic-gate 		 * zone's pool's scheduling class ID; note that by now, we
34390Sstevel@tonic-gate 		 * are already bound to a pool if we need to be (zoneadmd
34400Sstevel@tonic-gate 		 * will have done that to us while we're in the READY
34410Sstevel@tonic-gate 		 * state).  *But* the scheduling class for the zone's 'init'
34420Sstevel@tonic-gate 		 * must be explicitly passed to newproc, which doesn't
34430Sstevel@tonic-gate 		 * respect pool bindings.
34440Sstevel@tonic-gate 		 *
34450Sstevel@tonic-gate 		 * We hold the pool_lock across the call to newproc() to
34460Sstevel@tonic-gate 		 * close the obvious race: the pool's scheduling class
34470Sstevel@tonic-gate 		 * could change before we manage to create the LWP with
34480Sstevel@tonic-gate 		 * classid 'cid'.
34490Sstevel@tonic-gate 		 */
34500Sstevel@tonic-gate 		pool_lock();
34513247Sgjelinek 		if (zone->zone_defaultcid > 0)
34523247Sgjelinek 			cid = zone->zone_defaultcid;
34533247Sgjelinek 		else
34543247Sgjelinek 			cid = pool_get_class(zone->zone_pool);
34550Sstevel@tonic-gate 		if (cid == -1)
34560Sstevel@tonic-gate 			cid = defaultcid;
34570Sstevel@tonic-gate 
34580Sstevel@tonic-gate 		/*
34590Sstevel@tonic-gate 		 * If this fails, zone_boot will ultimately fail.  The
34600Sstevel@tonic-gate 		 * state of the zone will be set to SHUTTING_DOWN-- userland
34610Sstevel@tonic-gate 		 * will have to tear down the zone, and fail, or try again.
34620Sstevel@tonic-gate 		 */
34632267Sdp 		if ((zone->zone_boot_err = newproc(zone_start_init, NULL, cid,
34640Sstevel@tonic-gate 		    minclsyspri - 1, &ct)) != 0) {
34650Sstevel@tonic-gate 			mutex_enter(&zone_status_lock);
34660Sstevel@tonic-gate 			zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
34670Sstevel@tonic-gate 			mutex_exit(&zone_status_lock);
34680Sstevel@tonic-gate 		}
34690Sstevel@tonic-gate 		pool_unlock();
34700Sstevel@tonic-gate 	}
34710Sstevel@tonic-gate 
34720Sstevel@tonic-gate 	/*
34730Sstevel@tonic-gate 	 * Wait for zone_destroy() to be called.  This is what we spend
34740Sstevel@tonic-gate 	 * most of our life doing.
34750Sstevel@tonic-gate 	 */
34760Sstevel@tonic-gate 	zone_status_wait_cpr(zone, ZONE_IS_DYING, "zsched");
34770Sstevel@tonic-gate 
34780Sstevel@tonic-gate 	if (ct)
34790Sstevel@tonic-gate 		/*
34800Sstevel@tonic-gate 		 * At this point the process contract should be empty.
34810Sstevel@tonic-gate 		 * (Though if it isn't, it's not the end of the world.)
34820Sstevel@tonic-gate 		 */
34830Sstevel@tonic-gate 		VERIFY(contract_abandon(ct, curproc, B_TRUE) == 0);
34840Sstevel@tonic-gate 
34850Sstevel@tonic-gate 	/*
34860Sstevel@tonic-gate 	 * Allow kcred to be freed when all referring processes
34870Sstevel@tonic-gate 	 * (including this one) go away.  We can't just do this in
34880Sstevel@tonic-gate 	 * zone_free because we need to wait for the zone_cred_ref to
34890Sstevel@tonic-gate 	 * drop to 0 before calling zone_free, and the existence of
34900Sstevel@tonic-gate 	 * zone_kcred will prevent that.  Thus, we call crfree here to
34910Sstevel@tonic-gate 	 * balance the crdup in zone_create.  The crhold calls earlier
34920Sstevel@tonic-gate 	 * in zsched will be dropped when the thread and process exit.
34930Sstevel@tonic-gate 	 */
34940Sstevel@tonic-gate 	crfree(zone->zone_kcred);
34950Sstevel@tonic-gate 	zone->zone_kcred = NULL;
34960Sstevel@tonic-gate 
34970Sstevel@tonic-gate 	exit(CLD_EXITED, 0);
34980Sstevel@tonic-gate }
34990Sstevel@tonic-gate 
35000Sstevel@tonic-gate /*
35010Sstevel@tonic-gate  * Helper function to determine if there are any submounts of the
35020Sstevel@tonic-gate  * provided path.  Used to make sure the zone doesn't "inherit" any
35030Sstevel@tonic-gate  * mounts from before it is created.
35040Sstevel@tonic-gate  */
35050Sstevel@tonic-gate static uint_t
35060Sstevel@tonic-gate zone_mount_count(const char *rootpath)
35070Sstevel@tonic-gate {
35080Sstevel@tonic-gate 	vfs_t *vfsp;
35090Sstevel@tonic-gate 	uint_t count = 0;
35100Sstevel@tonic-gate 	size_t rootpathlen = strlen(rootpath);
35110Sstevel@tonic-gate 
35120Sstevel@tonic-gate 	/*
35130Sstevel@tonic-gate 	 * Holding zonehash_lock prevents race conditions with
35140Sstevel@tonic-gate 	 * vfs_list_add()/vfs_list_remove() since we serialize with
35150Sstevel@tonic-gate 	 * zone_find_by_path().
35160Sstevel@tonic-gate 	 */
35170Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zonehash_lock));
35180Sstevel@tonic-gate 	/*
35190Sstevel@tonic-gate 	 * The rootpath must end with a '/'
35200Sstevel@tonic-gate 	 */
35210Sstevel@tonic-gate 	ASSERT(rootpath[rootpathlen - 1] == '/');
35220Sstevel@tonic-gate 
35230Sstevel@tonic-gate 	/*
35240Sstevel@tonic-gate 	 * This intentionally does not count the rootpath itself if that
35250Sstevel@tonic-gate 	 * happens to be a mount point.
35260Sstevel@tonic-gate 	 */
35270Sstevel@tonic-gate 	vfs_list_read_lock();
35280Sstevel@tonic-gate 	vfsp = rootvfs;
35290Sstevel@tonic-gate 	do {
35300Sstevel@tonic-gate 		if (strncmp(rootpath, refstr_value(vfsp->vfs_mntpt),
35310Sstevel@tonic-gate 		    rootpathlen) == 0)
35320Sstevel@tonic-gate 			count++;
35330Sstevel@tonic-gate 		vfsp = vfsp->vfs_next;
35340Sstevel@tonic-gate 	} while (vfsp != rootvfs);
35350Sstevel@tonic-gate 	vfs_list_unlock();
35360Sstevel@tonic-gate 	return (count);
35370Sstevel@tonic-gate }
35380Sstevel@tonic-gate 
35390Sstevel@tonic-gate /*
35400Sstevel@tonic-gate  * Helper function to make sure that a zone created on 'rootpath'
35410Sstevel@tonic-gate  * wouldn't end up containing other zones' rootpaths.
35420Sstevel@tonic-gate  */
35430Sstevel@tonic-gate static boolean_t
35440Sstevel@tonic-gate zone_is_nested(const char *rootpath)
35450Sstevel@tonic-gate {
35460Sstevel@tonic-gate 	zone_t *zone;
35470Sstevel@tonic-gate 	size_t rootpathlen = strlen(rootpath);
35480Sstevel@tonic-gate 	size_t len;
35490Sstevel@tonic-gate 
35500Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&zonehash_lock));
35510Sstevel@tonic-gate 
35528799SDhanaraj.M@Sun.COM 	/*
35538799SDhanaraj.M@Sun.COM 	 * zone_set_root() appended '/' and '\0' at the end of rootpath
35548799SDhanaraj.M@Sun.COM 	 */
35558799SDhanaraj.M@Sun.COM 	if ((rootpathlen <= 3) && (rootpath[0] == '/') &&
35568799SDhanaraj.M@Sun.COM 	    (rootpath[1] == '/') && (rootpath[2] == '\0'))
35578799SDhanaraj.M@Sun.COM 		return (B_TRUE);
35588799SDhanaraj.M@Sun.COM 
35590Sstevel@tonic-gate 	for (zone = list_head(&zone_active); zone != NULL;
35600Sstevel@tonic-gate 	    zone = list_next(&zone_active, zone)) {
35610Sstevel@tonic-gate 		if (zone == global_zone)
35620Sstevel@tonic-gate 			continue;
35630Sstevel@tonic-gate 		len = strlen(zone->zone_rootpath);
35640Sstevel@tonic-gate 		if (strncmp(rootpath, zone->zone_rootpath,
35650Sstevel@tonic-gate 		    MIN(rootpathlen, len)) == 0)
35660Sstevel@tonic-gate 			return (B_TRUE);
35670Sstevel@tonic-gate 	}
35680Sstevel@tonic-gate 	return (B_FALSE);
35690Sstevel@tonic-gate }
35700Sstevel@tonic-gate 
35710Sstevel@tonic-gate static int
3572813Sdp zone_set_privset(zone_t *zone, const priv_set_t *zone_privs,
3573813Sdp     size_t zone_privssz)
35740Sstevel@tonic-gate {
35750Sstevel@tonic-gate 	priv_set_t *privs = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
35760Sstevel@tonic-gate 
3577813Sdp 	if (zone_privssz < sizeof (priv_set_t))
3578813Sdp 		return (set_errno(ENOMEM));
3579813Sdp 
35800Sstevel@tonic-gate 	if (copyin(zone_privs, privs, sizeof (priv_set_t))) {
35810Sstevel@tonic-gate 		kmem_free(privs, sizeof (priv_set_t));
35820Sstevel@tonic-gate 		return (EFAULT);
35830Sstevel@tonic-gate 	}
35840Sstevel@tonic-gate 
35850Sstevel@tonic-gate 	zone->zone_privset = privs;
35860Sstevel@tonic-gate 	return (0);
35870Sstevel@tonic-gate }
35880Sstevel@tonic-gate 
35890Sstevel@tonic-gate /*
35900Sstevel@tonic-gate  * We make creative use of nvlists to pass in rctls from userland.  The list is
35910Sstevel@tonic-gate  * a list of the following structures:
35920Sstevel@tonic-gate  *
35930Sstevel@tonic-gate  * (name = rctl_name, value = nvpair_list_array)
35940Sstevel@tonic-gate  *
35950Sstevel@tonic-gate  * Where each element of the nvpair_list_array is of the form:
35960Sstevel@tonic-gate  *
35970Sstevel@tonic-gate  * [(name = "privilege", value = RCPRIV_PRIVILEGED),
35980Sstevel@tonic-gate  * 	(name = "limit", value = uint64_t),
35990Sstevel@tonic-gate  * 	(name = "action", value = (RCTL_LOCAL_NOACTION || RCTL_LOCAL_DENY))]
36000Sstevel@tonic-gate  */
36010Sstevel@tonic-gate static int
36020Sstevel@tonic-gate parse_rctls(caddr_t ubuf, size_t buflen, nvlist_t **nvlp)
36030Sstevel@tonic-gate {
36040Sstevel@tonic-gate 	nvpair_t *nvp = NULL;
36050Sstevel@tonic-gate 	nvlist_t *nvl = NULL;
36060Sstevel@tonic-gate 	char *kbuf;
36070Sstevel@tonic-gate 	int error;
36080Sstevel@tonic-gate 	rctl_val_t rv;
36090Sstevel@tonic-gate 
36100Sstevel@tonic-gate 	*nvlp = NULL;
36110Sstevel@tonic-gate 
36120Sstevel@tonic-gate 	if (buflen == 0)
36130Sstevel@tonic-gate 		return (0);
36140Sstevel@tonic-gate 
36150Sstevel@tonic-gate 	if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL)
36160Sstevel@tonic-gate 		return (ENOMEM);
36170Sstevel@tonic-gate 	if (copyin(ubuf, kbuf, buflen)) {
36180Sstevel@tonic-gate 		error = EFAULT;
36190Sstevel@tonic-gate 		goto out;
36200Sstevel@tonic-gate 	}
36210Sstevel@tonic-gate 	if (nvlist_unpack(kbuf, buflen, &nvl, KM_SLEEP) != 0) {
36220Sstevel@tonic-gate 		/*
36230Sstevel@tonic-gate 		 * nvl may have been allocated/free'd, but the value set to
36240Sstevel@tonic-gate 		 * non-NULL, so we reset it here.
36250Sstevel@tonic-gate 		 */
36260Sstevel@tonic-gate 		nvl = NULL;
36270Sstevel@tonic-gate 		error = EINVAL;
36280Sstevel@tonic-gate 		goto out;
36290Sstevel@tonic-gate 	}
36300Sstevel@tonic-gate 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
36310Sstevel@tonic-gate 		rctl_dict_entry_t *rde;
36320Sstevel@tonic-gate 		rctl_hndl_t hndl;
36330Sstevel@tonic-gate 		nvlist_t **nvlarray;
36340Sstevel@tonic-gate 		uint_t i, nelem;
36350Sstevel@tonic-gate 		char *name;
36360Sstevel@tonic-gate 
36370Sstevel@tonic-gate 		error = EINVAL;
36380Sstevel@tonic-gate 		name = nvpair_name(nvp);
36390Sstevel@tonic-gate 		if (strncmp(nvpair_name(nvp), "zone.", sizeof ("zone.") - 1)
36400Sstevel@tonic-gate 		    != 0 || nvpair_type(nvp) != DATA_TYPE_NVLIST_ARRAY) {
36410Sstevel@tonic-gate 			goto out;
36420Sstevel@tonic-gate 		}
36430Sstevel@tonic-gate 		if ((hndl = rctl_hndl_lookup(name)) == -1) {
36440Sstevel@tonic-gate 			goto out;
36450Sstevel@tonic-gate 		}
36460Sstevel@tonic-gate 		rde = rctl_dict_lookup_hndl(hndl);
36470Sstevel@tonic-gate 		error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem);
36480Sstevel@tonic-gate 		ASSERT(error == 0);
36490Sstevel@tonic-gate 		for (i = 0; i < nelem; i++) {
36500Sstevel@tonic-gate 			if (error = nvlist2rctlval(nvlarray[i], &rv))
36510Sstevel@tonic-gate 				goto out;
36520Sstevel@tonic-gate 		}
36530Sstevel@tonic-gate 		if (rctl_invalid_value(rde, &rv)) {
36540Sstevel@tonic-gate 			error = EINVAL;
36550Sstevel@tonic-gate 			goto out;
36560Sstevel@tonic-gate 		}
36570Sstevel@tonic-gate 	}
36580Sstevel@tonic-gate 	error = 0;
36590Sstevel@tonic-gate 	*nvlp = nvl;
36600Sstevel@tonic-gate out:
36610Sstevel@tonic-gate 	kmem_free(kbuf, buflen);
36620Sstevel@tonic-gate 	if (error && nvl != NULL)
36630Sstevel@tonic-gate 		nvlist_free(nvl);
36640Sstevel@tonic-gate 	return (error);
36650Sstevel@tonic-gate }
36660Sstevel@tonic-gate 
36670Sstevel@tonic-gate int
36680Sstevel@tonic-gate zone_create_error(int er_error, int er_ext, int *er_out) {
36690Sstevel@tonic-gate 	if (er_out != NULL) {
36700Sstevel@tonic-gate 		if (copyout(&er_ext, er_out, sizeof (int))) {
36710Sstevel@tonic-gate 			return (set_errno(EFAULT));
36720Sstevel@tonic-gate 		}
36730Sstevel@tonic-gate 	}
36740Sstevel@tonic-gate 	return (set_errno(er_error));
36750Sstevel@tonic-gate }
36760Sstevel@tonic-gate 
36771676Sjpk static int
36781676Sjpk zone_set_label(zone_t *zone, const bslabel_t *lab, uint32_t doi)
36791676Sjpk {
36801676Sjpk 	ts_label_t *tsl;
36811676Sjpk 	bslabel_t blab;
36821676Sjpk 
36831676Sjpk 	/* Get label from user */
36841676Sjpk 	if (copyin(lab, &blab, sizeof (blab)) != 0)
36851676Sjpk 		return (EFAULT);
36861676Sjpk 	tsl = labelalloc(&blab, doi, KM_NOSLEEP);
36871676Sjpk 	if (tsl == NULL)
36881676Sjpk 		return (ENOMEM);
36891676Sjpk 
36901676Sjpk 	zone->zone_slabel = tsl;
36911676Sjpk 	return (0);
36921676Sjpk }
36931676Sjpk 
36940Sstevel@tonic-gate /*
3695789Sahrens  * Parses a comma-separated list of ZFS datasets into a per-zone dictionary.
3696789Sahrens  */
3697789Sahrens static int
3698789Sahrens parse_zfs(zone_t *zone, caddr_t ubuf, size_t buflen)
3699789Sahrens {
3700789Sahrens 	char *kbuf;
3701789Sahrens 	char *dataset, *next;
3702789Sahrens 	zone_dataset_t *zd;
3703789Sahrens 	size_t len;
3704789Sahrens 
3705789Sahrens 	if (ubuf == NULL || buflen == 0)
3706789Sahrens 		return (0);
3707789Sahrens 
3708789Sahrens 	if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL)
3709789Sahrens 		return (ENOMEM);
3710789Sahrens 
3711789Sahrens 	if (copyin(ubuf, kbuf, buflen) != 0) {
3712789Sahrens 		kmem_free(kbuf, buflen);
3713789Sahrens 		return (EFAULT);
3714789Sahrens 	}
3715789Sahrens 
3716789Sahrens 	dataset = next = kbuf;
3717789Sahrens 	for (;;) {
3718789Sahrens 		zd = kmem_alloc(sizeof (zone_dataset_t), KM_SLEEP);
3719789Sahrens 
3720789Sahrens 		next = strchr(dataset, ',');
3721789Sahrens 
3722789Sahrens 		if (next == NULL)
3723789Sahrens 			len = strlen(dataset);
3724789Sahrens 		else
3725789Sahrens 			len = next - dataset;
3726789Sahrens 
3727789Sahrens 		zd->zd_dataset = kmem_alloc(len + 1, KM_SLEEP);
3728789Sahrens 		bcopy(dataset, zd->zd_dataset, len);
3729789Sahrens 		zd->zd_dataset[len] = '\0';
3730789Sahrens 
3731789Sahrens 		list_insert_head(&zone->zone_datasets, zd);
3732789Sahrens 
3733789Sahrens 		if (next == NULL)
3734789Sahrens 			break;
3735789Sahrens 
3736789Sahrens 		dataset = next + 1;
3737789Sahrens 	}
3738789Sahrens 
3739789Sahrens 	kmem_free(kbuf, buflen);
3740789Sahrens 	return (0);
3741789Sahrens }
3742789Sahrens 
3743789Sahrens /*
37440Sstevel@tonic-gate  * System call to create/initialize a new zone named 'zone_name', rooted
37450Sstevel@tonic-gate  * at 'zone_root', with a zone-wide privilege limit set of 'zone_privs',
37461676Sjpk  * and initialized with the zone-wide rctls described in 'rctlbuf', and
37471676Sjpk  * with labeling set by 'match', 'doi', and 'label'.
37480Sstevel@tonic-gate  *
37490Sstevel@tonic-gate  * If extended error is non-null, we may use it to return more detailed
37500Sstevel@tonic-gate  * error information.
37510Sstevel@tonic-gate  */
37520Sstevel@tonic-gate static zoneid_t
37530Sstevel@tonic-gate zone_create(const char *zone_name, const char *zone_root,
3754813Sdp     const priv_set_t *zone_privs, size_t zone_privssz,
3755813Sdp     caddr_t rctlbuf, size_t rctlbufsz,
37561676Sjpk     caddr_t zfsbuf, size_t zfsbufsz, int *extended_error,
37573448Sdh155122     int match, uint32_t doi, const bslabel_t *label,
37583448Sdh155122     int flags)
37590Sstevel@tonic-gate {
37600Sstevel@tonic-gate 	struct zsched_arg zarg;
37610Sstevel@tonic-gate 	nvlist_t *rctls = NULL;
37620Sstevel@tonic-gate 	proc_t *pp = curproc;
37630Sstevel@tonic-gate 	zone_t *zone, *ztmp;
37640Sstevel@tonic-gate 	zoneid_t zoneid;
37650Sstevel@tonic-gate 	int error;
37660Sstevel@tonic-gate 	int error2 = 0;
37670Sstevel@tonic-gate 	char *str;
37680Sstevel@tonic-gate 	cred_t *zkcr;
37691769Scarlsonj 	boolean_t insert_label_hash;
37700Sstevel@tonic-gate 
37710Sstevel@tonic-gate 	if (secpolicy_zone_config(CRED()) != 0)
37720Sstevel@tonic-gate 		return (set_errno(EPERM));
37730Sstevel@tonic-gate 
37740Sstevel@tonic-gate 	/* can't boot zone from within chroot environment */
37750Sstevel@tonic-gate 	if (PTOU(pp)->u_rdir != NULL && PTOU(pp)->u_rdir != rootdir)
37760Sstevel@tonic-gate 		return (zone_create_error(ENOTSUP, ZE_CHROOTED,
3777813Sdp 		    extended_error));
37780Sstevel@tonic-gate 
37790Sstevel@tonic-gate 	zone = kmem_zalloc(sizeof (zone_t), KM_SLEEP);
37800Sstevel@tonic-gate 	zoneid = zone->zone_id = id_alloc(zoneid_space);
37810Sstevel@tonic-gate 	zone->zone_status = ZONE_IS_UNINITIALIZED;
37820Sstevel@tonic-gate 	zone->zone_pool = pool_default;
37830Sstevel@tonic-gate 	zone->zone_pool_mod = gethrtime();
37840Sstevel@tonic-gate 	zone->zone_psetid = ZONE_PS_INVAL;
37850Sstevel@tonic-gate 	zone->zone_ncpus = 0;
37860Sstevel@tonic-gate 	zone->zone_ncpus_online = 0;
37872712Snn35248 	zone->zone_restart_init = B_TRUE;
37882712Snn35248 	zone->zone_brand = &native_brand;
37892712Snn35248 	zone->zone_initname = NULL;
37900Sstevel@tonic-gate 	mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL);
37910Sstevel@tonic-gate 	mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
37923247Sgjelinek 	mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
37930Sstevel@tonic-gate 	cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL);
37940Sstevel@tonic-gate 	list_create(&zone->zone_zsd, sizeof (struct zsd_entry),
37950Sstevel@tonic-gate 	    offsetof(struct zsd_entry, zsd_linkage));
3796789Sahrens 	list_create(&zone->zone_datasets, sizeof (zone_dataset_t),
3797789Sahrens 	    offsetof(zone_dataset_t, zd_linkage));
37981676Sjpk 	rw_init(&zone->zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
37990Sstevel@tonic-gate 
38003448Sdh155122 	if (flags & ZCF_NET_EXCL) {
38013448Sdh155122 		zone->zone_flags |= ZF_NET_EXCL;
38023448Sdh155122 	}
38033448Sdh155122 
38040Sstevel@tonic-gate 	if ((error = zone_set_name(zone, zone_name)) != 0) {
38050Sstevel@tonic-gate 		zone_free(zone);
38060Sstevel@tonic-gate 		return (zone_create_error(error, 0, extended_error));
38070Sstevel@tonic-gate 	}
38080Sstevel@tonic-gate 
38090Sstevel@tonic-gate 	if ((error = zone_set_root(zone, zone_root)) != 0) {
38100Sstevel@tonic-gate 		zone_free(zone);
38110Sstevel@tonic-gate 		return (zone_create_error(error, 0, extended_error));
38120Sstevel@tonic-gate 	}
3813813Sdp 	if ((error = zone_set_privset(zone, zone_privs, zone_privssz)) != 0) {
38140Sstevel@tonic-gate 		zone_free(zone);
38150Sstevel@tonic-gate 		return (zone_create_error(error, 0, extended_error));
38160Sstevel@tonic-gate 	}
38170Sstevel@tonic-gate 
38180Sstevel@tonic-gate 	/* initialize node name to be the same as zone name */
38190Sstevel@tonic-gate 	zone->zone_nodename = kmem_alloc(_SYS_NMLN, KM_SLEEP);
38200Sstevel@tonic-gate 	(void) strncpy(zone->zone_nodename, zone->zone_name, _SYS_NMLN);
38210Sstevel@tonic-gate 	zone->zone_nodename[_SYS_NMLN - 1] = '\0';
38220Sstevel@tonic-gate 
38230Sstevel@tonic-gate 	zone->zone_domain = kmem_alloc(_SYS_NMLN, KM_SLEEP);
38240Sstevel@tonic-gate 	zone->zone_domain[0] = '\0';
38258662SJordan.Vaughan@Sun.com 	zone->zone_hostid = HW_INVALID_HOSTID;
38260Sstevel@tonic-gate 	zone->zone_shares = 1;
38272677Sml93401 	zone->zone_shmmax = 0;
38282677Sml93401 	zone->zone_ipc.ipcq_shmmni = 0;
38292677Sml93401 	zone->zone_ipc.ipcq_semmni = 0;
38302677Sml93401 	zone->zone_ipc.ipcq_msgmni = 0;
38310Sstevel@tonic-gate 	zone->zone_bootargs = NULL;
38322267Sdp 	zone->zone_initname =
38332267Sdp 	    kmem_alloc(strlen(zone_default_initname) + 1, KM_SLEEP);
38342267Sdp 	(void) strcpy(zone->zone_initname, zone_default_initname);
38353247Sgjelinek 	zone->zone_nlwps = 0;
38363247Sgjelinek 	zone->zone_nlwps_ctl = INT_MAX;
38372768Ssl108498 	zone->zone_locked_mem = 0;
38382768Ssl108498 	zone->zone_locked_mem_ctl = UINT64_MAX;
38393247Sgjelinek 	zone->zone_max_swap = 0;
38403247Sgjelinek 	zone->zone_max_swap_ctl = UINT64_MAX;
38413247Sgjelinek 	zone0.zone_lockedmem_kstat = NULL;
38423247Sgjelinek 	zone0.zone_swapresv_kstat = NULL;
38430Sstevel@tonic-gate 
38440Sstevel@tonic-gate 	/*
38450Sstevel@tonic-gate 	 * Zsched initializes the rctls.
38460Sstevel@tonic-gate 	 */
38470Sstevel@tonic-gate 	zone->zone_rctls = NULL;
38480Sstevel@tonic-gate 
38490Sstevel@tonic-gate 	if ((error = parse_rctls(rctlbuf, rctlbufsz, &rctls)) != 0) {
38500Sstevel@tonic-gate 		zone_free(zone);
38510Sstevel@tonic-gate 		return (zone_create_error(error, 0, extended_error));
38520Sstevel@tonic-gate 	}
38530Sstevel@tonic-gate 
3854789Sahrens 	if ((error = parse_zfs(zone, zfsbuf, zfsbufsz)) != 0) {
3855789Sahrens 		zone_free(zone);
3856789Sahrens 		return (set_errno(error));
3857789Sahrens 	}
3858789Sahrens 
38590Sstevel@tonic-gate 	/*
38601676Sjpk 	 * Read in the trusted system parameters:
38611676Sjpk 	 * match flag and sensitivity label.
38621676Sjpk 	 */
38631676Sjpk 	zone->zone_match = match;
38641769Scarlsonj 	if (is_system_labeled() && !(zone->zone_flags & ZF_IS_SCRATCH)) {
38654462Skp158701 		/* Fail if requested to set doi to anything but system's doi */
38664462Skp158701 		if (doi != 0 && doi != default_doi) {
38674462Skp158701 			zone_free(zone);
38684462Skp158701 			return (set_errno(EINVAL));
38694462Skp158701 		}
38704462Skp158701 		/* Always apply system's doi to the zone */
38714462Skp158701 		error = zone_set_label(zone, label, default_doi);
38721676Sjpk 		if (error != 0) {
38731676Sjpk 			zone_free(zone);
38741676Sjpk 			return (set_errno(error));
38751676Sjpk 		}
38761769Scarlsonj 		insert_label_hash = B_TRUE;
38771676Sjpk 	} else {
38781676Sjpk 		/* all zones get an admin_low label if system is not labeled */
38791676Sjpk 		zone->zone_slabel = l_admin_low;
38801676Sjpk 		label_hold(l_admin_low);
38811769Scarlsonj 		insert_label_hash = B_FALSE;
38821676Sjpk 	}
38831676Sjpk 
38841676Sjpk 	/*
38850Sstevel@tonic-gate 	 * Stop all lwps since that's what normally happens as part of fork().
38860Sstevel@tonic-gate 	 * This needs to happen before we grab any locks to avoid deadlock
38870Sstevel@tonic-gate 	 * (another lwp in the process could be waiting for the held lock).
38880Sstevel@tonic-gate 	 */
38890Sstevel@tonic-gate 	if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK)) {
38900Sstevel@tonic-gate 		zone_free(zone);
38910Sstevel@tonic-gate 		if (rctls)
38920Sstevel@tonic-gate 			nvlist_free(rctls);
38930Sstevel@tonic-gate 		return (zone_create_error(error, 0, extended_error));
38940Sstevel@tonic-gate 	}
38950Sstevel@tonic-gate 
38960Sstevel@tonic-gate 	if (block_mounts() == 0) {
38970Sstevel@tonic-gate 		mutex_enter(&pp->p_lock);
38980Sstevel@tonic-gate 		if (curthread != pp->p_agenttp)
38990Sstevel@tonic-gate 			continuelwps(pp);
39000Sstevel@tonic-gate 		mutex_exit(&pp->p_lock);
39010Sstevel@tonic-gate 		zone_free(zone);
39020Sstevel@tonic-gate 		if (rctls)
39030Sstevel@tonic-gate 			nvlist_free(rctls);
39040Sstevel@tonic-gate 		return (zone_create_error(error, 0, extended_error));
39050Sstevel@tonic-gate 	}
39060Sstevel@tonic-gate 
39070Sstevel@tonic-gate 	/*
39080Sstevel@tonic-gate 	 * Set up credential for kernel access.  After this, any errors
39090Sstevel@tonic-gate 	 * should go through the dance in errout rather than calling
39100Sstevel@tonic-gate 	 * zone_free directly.
39110Sstevel@tonic-gate 	 */
39120Sstevel@tonic-gate 	zone->zone_kcred = crdup(kcred);
39130Sstevel@tonic-gate 	crsetzone(zone->zone_kcred, zone);
39140Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_PPRIV(zone->zone_kcred));
39150Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_EPRIV(zone->zone_kcred));
39160Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_IPRIV(zone->zone_kcred));
39170Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_LPRIV(zone->zone_kcred));
39180Sstevel@tonic-gate 
39190Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
39200Sstevel@tonic-gate 	/*
39210Sstevel@tonic-gate 	 * Make sure zone doesn't already exist.
39221676Sjpk 	 *
39231676Sjpk 	 * If the system and zone are labeled,
39241676Sjpk 	 * make sure no other zone exists that has the same label.
39250Sstevel@tonic-gate 	 */
39261676Sjpk 	if ((ztmp = zone_find_all_by_name(zone->zone_name)) != NULL ||
39271769Scarlsonj 	    (insert_label_hash &&
39281676Sjpk 	    (ztmp = zone_find_all_by_label(zone->zone_slabel)) != NULL)) {
39290Sstevel@tonic-gate 		zone_status_t status;
39300Sstevel@tonic-gate 
39310Sstevel@tonic-gate 		status = zone_status_get(ztmp);
39320Sstevel@tonic-gate 		if (status == ZONE_IS_READY || status == ZONE_IS_RUNNING)
39330Sstevel@tonic-gate 			error = EEXIST;
39340Sstevel@tonic-gate 		else
39350Sstevel@tonic-gate 			error = EBUSY;
39364791Ston 
39374791Ston 		if (insert_label_hash)
39384791Ston 			error2 = ZE_LABELINUSE;
39394791Ston 
39400Sstevel@tonic-gate 		goto errout;
39410Sstevel@tonic-gate 	}
39420Sstevel@tonic-gate 
39430Sstevel@tonic-gate 	/*
39440Sstevel@tonic-gate 	 * Don't allow zone creations which would cause one zone's rootpath to
39450Sstevel@tonic-gate 	 * be accessible from that of another (non-global) zone.
39460Sstevel@tonic-gate 	 */
39470Sstevel@tonic-gate 	if (zone_is_nested(zone->zone_rootpath)) {
39480Sstevel@tonic-gate 		error = EBUSY;
39490Sstevel@tonic-gate 		goto errout;
39500Sstevel@tonic-gate 	}
39510Sstevel@tonic-gate 
39520Sstevel@tonic-gate 	ASSERT(zonecount != 0);		/* check for leaks */
39530Sstevel@tonic-gate 	if (zonecount + 1 > maxzones) {
39540Sstevel@tonic-gate 		error = ENOMEM;
39550Sstevel@tonic-gate 		goto errout;
39560Sstevel@tonic-gate 	}
39570Sstevel@tonic-gate 
39580Sstevel@tonic-gate 	if (zone_mount_count(zone->zone_rootpath) != 0) {
39590Sstevel@tonic-gate 		error = EBUSY;
39600Sstevel@tonic-gate 		error2 = ZE_AREMOUNTS;
39610Sstevel@tonic-gate 		goto errout;
39620Sstevel@tonic-gate 	}
39630Sstevel@tonic-gate 
39640Sstevel@tonic-gate 	/*
39650Sstevel@tonic-gate 	 * Zone is still incomplete, but we need to drop all locks while
39660Sstevel@tonic-gate 	 * zsched() initializes this zone's kernel process.  We
39670Sstevel@tonic-gate 	 * optimistically add the zone to the hashtable and associated
39680Sstevel@tonic-gate 	 * lists so a parallel zone_create() doesn't try to create the
39690Sstevel@tonic-gate 	 * same zone.
39700Sstevel@tonic-gate 	 */
39710Sstevel@tonic-gate 	zonecount++;
39720Sstevel@tonic-gate 	(void) mod_hash_insert(zonehashbyid,
39730Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)zone->zone_id,
39740Sstevel@tonic-gate 	    (mod_hash_val_t)(uintptr_t)zone);
39750Sstevel@tonic-gate 	str = kmem_alloc(strlen(zone->zone_name) + 1, KM_SLEEP);
39760Sstevel@tonic-gate 	(void) strcpy(str, zone->zone_name);
39770Sstevel@tonic-gate 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)str,
39780Sstevel@tonic-gate 	    (mod_hash_val_t)(uintptr_t)zone);
39791769Scarlsonj 	if (insert_label_hash) {
39801676Sjpk 		(void) mod_hash_insert(zonehashbylabel,
39811676Sjpk 		    (mod_hash_key_t)zone->zone_slabel, (mod_hash_val_t)zone);
39821769Scarlsonj 		zone->zone_flags |= ZF_HASHED_LABEL;
39831676Sjpk 	}
39841676Sjpk 
39850Sstevel@tonic-gate 	/*
39860Sstevel@tonic-gate 	 * Insert into active list.  At this point there are no 'hold's
39870Sstevel@tonic-gate 	 * on the zone, but everyone else knows not to use it, so we can
39880Sstevel@tonic-gate 	 * continue to use it.  zsched() will do a zone_hold() if the
39890Sstevel@tonic-gate 	 * newproc() is successful.
39900Sstevel@tonic-gate 	 */
39910Sstevel@tonic-gate 	list_insert_tail(&zone_active, zone);
39920Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
39930Sstevel@tonic-gate 
39940Sstevel@tonic-gate 	zarg.zone = zone;
39950Sstevel@tonic-gate 	zarg.nvlist = rctls;
39960Sstevel@tonic-gate 	/*
39970Sstevel@tonic-gate 	 * The process, task, and project rctls are probably wrong;
39980Sstevel@tonic-gate 	 * we need an interface to get the default values of all rctls,
39990Sstevel@tonic-gate 	 * and initialize zsched appropriately.  I'm not sure that that
40000Sstevel@tonic-gate 	 * makes much of a difference, though.
40010Sstevel@tonic-gate 	 */
40020Sstevel@tonic-gate 	if (error = newproc(zsched, (void *)&zarg, syscid, minclsyspri, NULL)) {
40030Sstevel@tonic-gate 		/*
40040Sstevel@tonic-gate 		 * We need to undo all globally visible state.
40050Sstevel@tonic-gate 		 */
40060Sstevel@tonic-gate 		mutex_enter(&zonehash_lock);
40070Sstevel@tonic-gate 		list_remove(&zone_active, zone);
40081769Scarlsonj 		if (zone->zone_flags & ZF_HASHED_LABEL) {
40091676Sjpk 			ASSERT(zone->zone_slabel != NULL);
40101676Sjpk 			(void) mod_hash_destroy(zonehashbylabel,
40111676Sjpk 			    (mod_hash_key_t)zone->zone_slabel);
40121676Sjpk 		}
40130Sstevel@tonic-gate 		(void) mod_hash_destroy(zonehashbyname,
40140Sstevel@tonic-gate 		    (mod_hash_key_t)(uintptr_t)zone->zone_name);
40150Sstevel@tonic-gate 		(void) mod_hash_destroy(zonehashbyid,
40160Sstevel@tonic-gate 		    (mod_hash_key_t)(uintptr_t)zone->zone_id);
40170Sstevel@tonic-gate 		ASSERT(zonecount > 1);
40180Sstevel@tonic-gate 		zonecount--;
40190Sstevel@tonic-gate 		goto errout;
40200Sstevel@tonic-gate 	}
40210Sstevel@tonic-gate 
40220Sstevel@tonic-gate 	/*
40230Sstevel@tonic-gate 	 * Zone creation can't fail from now on.
40240Sstevel@tonic-gate 	 */
40250Sstevel@tonic-gate 
40260Sstevel@tonic-gate 	/*
40273247Sgjelinek 	 * Create zone kstats
40283247Sgjelinek 	 */
40293247Sgjelinek 	zone_kstat_create(zone);
40303247Sgjelinek 
40313247Sgjelinek 	/*
40320Sstevel@tonic-gate 	 * Let the other lwps continue.
40330Sstevel@tonic-gate 	 */
40340Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
40350Sstevel@tonic-gate 	if (curthread != pp->p_agenttp)
40360Sstevel@tonic-gate 		continuelwps(pp);
40370Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
40380Sstevel@tonic-gate 
40390Sstevel@tonic-gate 	/*
40400Sstevel@tonic-gate 	 * Wait for zsched to finish initializing the zone.
40410Sstevel@tonic-gate 	 */
40420Sstevel@tonic-gate 	zone_status_wait(zone, ZONE_IS_READY);
40430Sstevel@tonic-gate 	/*
40440Sstevel@tonic-gate 	 * The zone is fully visible, so we can let mounts progress.
40450Sstevel@tonic-gate 	 */
40460Sstevel@tonic-gate 	resume_mounts();
40470Sstevel@tonic-gate 	if (rctls)
40480Sstevel@tonic-gate 		nvlist_free(rctls);
40490Sstevel@tonic-gate 
40500Sstevel@tonic-gate 	return (zoneid);
40510Sstevel@tonic-gate 
40520Sstevel@tonic-gate errout:
40530Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
40540Sstevel@tonic-gate 	/*
40550Sstevel@tonic-gate 	 * Let the other lwps continue.
40560Sstevel@tonic-gate 	 */
40570Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
40580Sstevel@tonic-gate 	if (curthread != pp->p_agenttp)
40590Sstevel@tonic-gate 		continuelwps(pp);
40600Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
40610Sstevel@tonic-gate 
40620Sstevel@tonic-gate 	resume_mounts();
40630Sstevel@tonic-gate 	if (rctls)
40640Sstevel@tonic-gate 		nvlist_free(rctls);
40650Sstevel@tonic-gate 	/*
40660Sstevel@tonic-gate 	 * There is currently one reference to the zone, a cred_ref from
40670Sstevel@tonic-gate 	 * zone_kcred.  To free the zone, we call crfree, which will call
40680Sstevel@tonic-gate 	 * zone_cred_rele, which will call zone_free.
40690Sstevel@tonic-gate 	 */
40700Sstevel@tonic-gate 	ASSERT(zone->zone_cred_ref == 1);	/* for zone_kcred */
40710Sstevel@tonic-gate 	ASSERT(zone->zone_kcred->cr_ref == 1);
40720Sstevel@tonic-gate 	ASSERT(zone->zone_ref == 0);
40730Sstevel@tonic-gate 	zkcr = zone->zone_kcred;
40740Sstevel@tonic-gate 	zone->zone_kcred = NULL;
40750Sstevel@tonic-gate 	crfree(zkcr);				/* triggers call to zone_free */
40760Sstevel@tonic-gate 	return (zone_create_error(error, error2, extended_error));
40770Sstevel@tonic-gate }
40780Sstevel@tonic-gate 
40790Sstevel@tonic-gate /*
40800Sstevel@tonic-gate  * Cause the zone to boot.  This is pretty simple, since we let zoneadmd do
40812267Sdp  * the heavy lifting.  initname is the path to the program to launch
40822267Sdp  * at the "top" of the zone; if this is NULL, we use the system default,
40832267Sdp  * which is stored at zone_default_initname.
40840Sstevel@tonic-gate  */
40850Sstevel@tonic-gate static int
40862267Sdp zone_boot(zoneid_t zoneid)
40870Sstevel@tonic-gate {
40880Sstevel@tonic-gate 	int err;
40890Sstevel@tonic-gate 	zone_t *zone;
40900Sstevel@tonic-gate 
40910Sstevel@tonic-gate 	if (secpolicy_zone_config(CRED()) != 0)
40920Sstevel@tonic-gate 		return (set_errno(EPERM));
40930Sstevel@tonic-gate 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
40940Sstevel@tonic-gate 		return (set_errno(EINVAL));
40950Sstevel@tonic-gate 
40960Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
40970Sstevel@tonic-gate 	/*
40980Sstevel@tonic-gate 	 * Look for zone under hash lock to prevent races with calls to
40990Sstevel@tonic-gate 	 * zone_shutdown, zone_destroy, etc.
41000Sstevel@tonic-gate 	 */
41010Sstevel@tonic-gate 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
41020Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
41030Sstevel@tonic-gate 		return (set_errno(EINVAL));
41040Sstevel@tonic-gate 	}
41050Sstevel@tonic-gate 
41060Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
41070Sstevel@tonic-gate 	if (zone_status_get(zone) != ZONE_IS_READY) {
41080Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
41090Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
41100Sstevel@tonic-gate 		return (set_errno(EINVAL));
41110Sstevel@tonic-gate 	}
41120Sstevel@tonic-gate 	zone_status_set(zone, ZONE_IS_BOOTING);
41130Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
41140Sstevel@tonic-gate 
41150Sstevel@tonic-gate 	zone_hold(zone);	/* so we can use the zone_t later */
41160Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
41170Sstevel@tonic-gate 
41180Sstevel@tonic-gate 	if (zone_status_wait_sig(zone, ZONE_IS_RUNNING) == 0) {
41190Sstevel@tonic-gate 		zone_rele(zone);
41200Sstevel@tonic-gate 		return (set_errno(EINTR));
41210Sstevel@tonic-gate 	}
41220Sstevel@tonic-gate 
41230Sstevel@tonic-gate 	/*
41240Sstevel@tonic-gate 	 * Boot (starting init) might have failed, in which case the zone
41250Sstevel@tonic-gate 	 * will go to the SHUTTING_DOWN state; an appropriate errno will
41260Sstevel@tonic-gate 	 * be placed in zone->zone_boot_err, and so we return that.
41270Sstevel@tonic-gate 	 */
41280Sstevel@tonic-gate 	err = zone->zone_boot_err;
41290Sstevel@tonic-gate 	zone_rele(zone);
41300Sstevel@tonic-gate 	return (err ? set_errno(err) : 0);
41310Sstevel@tonic-gate }
41320Sstevel@tonic-gate 
41330Sstevel@tonic-gate /*
41340Sstevel@tonic-gate  * Kills all user processes in the zone, waiting for them all to exit
41350Sstevel@tonic-gate  * before returning.
41360Sstevel@tonic-gate  */
41370Sstevel@tonic-gate static int
41380Sstevel@tonic-gate zone_empty(zone_t *zone)
41390Sstevel@tonic-gate {
41400Sstevel@tonic-gate 	int waitstatus;
41410Sstevel@tonic-gate 
41420Sstevel@tonic-gate 	/*
41430Sstevel@tonic-gate 	 * We need to drop zonehash_lock before killing all
41440Sstevel@tonic-gate 	 * processes, otherwise we'll deadlock with zone_find_*
41450Sstevel@tonic-gate 	 * which can be called from the exit path.
41460Sstevel@tonic-gate 	 */
41470Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
41480Sstevel@tonic-gate 	while ((waitstatus = zone_status_timedwait_sig(zone, lbolt + hz,
41490Sstevel@tonic-gate 	    ZONE_IS_EMPTY)) == -1) {
41500Sstevel@tonic-gate 		killall(zone->zone_id);
41510Sstevel@tonic-gate 	}
41520Sstevel@tonic-gate 	/*
41530Sstevel@tonic-gate 	 * return EINTR if we were signaled
41540Sstevel@tonic-gate 	 */
41550Sstevel@tonic-gate 	if (waitstatus == 0)
41560Sstevel@tonic-gate 		return (EINTR);
41570Sstevel@tonic-gate 	return (0);
41580Sstevel@tonic-gate }
41590Sstevel@tonic-gate 
41600Sstevel@tonic-gate /*
41611676Sjpk  * This function implements the policy for zone visibility.
41621676Sjpk  *
41631676Sjpk  * In standard Solaris, a non-global zone can only see itself.
41641676Sjpk  *
41651676Sjpk  * In Trusted Extensions, a labeled zone can lookup any zone whose label
41661676Sjpk  * it dominates. For this test, the label of the global zone is treated as
41671676Sjpk  * admin_high so it is special-cased instead of being checked for dominance.
41681676Sjpk  *
41691676Sjpk  * Returns true if zone attributes are viewable, false otherwise.
41701676Sjpk  */
41711676Sjpk static boolean_t
41721676Sjpk zone_list_access(zone_t *zone)
41731676Sjpk {
41741676Sjpk 
41751676Sjpk 	if (curproc->p_zone == global_zone ||
41761676Sjpk 	    curproc->p_zone == zone) {
41771676Sjpk 		return (B_TRUE);
41781769Scarlsonj 	} else if (is_system_labeled() && !(zone->zone_flags & ZF_IS_SCRATCH)) {
41791676Sjpk 		bslabel_t *curproc_label;
41801676Sjpk 		bslabel_t *zone_label;
41811676Sjpk 
41821676Sjpk 		curproc_label = label2bslabel(curproc->p_zone->zone_slabel);
41831676Sjpk 		zone_label = label2bslabel(zone->zone_slabel);
41841676Sjpk 
41851676Sjpk 		if (zone->zone_id != GLOBAL_ZONEID &&
41861676Sjpk 		    bldominates(curproc_label, zone_label)) {
41871676Sjpk 			return (B_TRUE);
41881676Sjpk 		} else {
41891676Sjpk 			return (B_FALSE);
41901676Sjpk 		}
41911676Sjpk 	} else {
41921676Sjpk 		return (B_FALSE);
41931676Sjpk 	}
41941676Sjpk }
41951676Sjpk 
41961676Sjpk /*
41970Sstevel@tonic-gate  * Systemcall to start the zone's halt sequence.  By the time this
41980Sstevel@tonic-gate  * function successfully returns, all user processes and kernel threads
41990Sstevel@tonic-gate  * executing in it will have exited, ZSD shutdown callbacks executed,
42000Sstevel@tonic-gate  * and the zone status set to ZONE_IS_DOWN.
42010Sstevel@tonic-gate  *
42020Sstevel@tonic-gate  * It is possible that the call will interrupt itself if the caller is the
42030Sstevel@tonic-gate  * parent of any process running in the zone, and doesn't have SIGCHLD blocked.
42040Sstevel@tonic-gate  */
42050Sstevel@tonic-gate static int
42060Sstevel@tonic-gate zone_shutdown(zoneid_t zoneid)
42070Sstevel@tonic-gate {
42080Sstevel@tonic-gate 	int error;
42090Sstevel@tonic-gate 	zone_t *zone;
42100Sstevel@tonic-gate 	zone_status_t status;
42110Sstevel@tonic-gate 
42120Sstevel@tonic-gate 	if (secpolicy_zone_config(CRED()) != 0)
42130Sstevel@tonic-gate 		return (set_errno(EPERM));
42140Sstevel@tonic-gate 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
42150Sstevel@tonic-gate 		return (set_errno(EINVAL));
42160Sstevel@tonic-gate 
42170Sstevel@tonic-gate 	/*
42180Sstevel@tonic-gate 	 * Block mounts so that VFS_MOUNT() can get an accurate view of
42190Sstevel@tonic-gate 	 * the zone's status with regards to ZONE_IS_SHUTTING down.
42200Sstevel@tonic-gate 	 *
42210Sstevel@tonic-gate 	 * e.g. NFS can fail the mount if it determines that the zone
42220Sstevel@tonic-gate 	 * has already begun the shutdown sequence.
42230Sstevel@tonic-gate 	 */
42240Sstevel@tonic-gate 	if (block_mounts() == 0)
42250Sstevel@tonic-gate 		return (set_errno(EINTR));
42260Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
42270Sstevel@tonic-gate 	/*
42280Sstevel@tonic-gate 	 * Look for zone under hash lock to prevent races with other
42290Sstevel@tonic-gate 	 * calls to zone_shutdown and zone_destroy.
42300Sstevel@tonic-gate 	 */
42310Sstevel@tonic-gate 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
42320Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
42330Sstevel@tonic-gate 		resume_mounts();
42340Sstevel@tonic-gate 		return (set_errno(EINVAL));
42350Sstevel@tonic-gate 	}
42360Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
42370Sstevel@tonic-gate 	status = zone_status_get(zone);
42380Sstevel@tonic-gate 	/*
42390Sstevel@tonic-gate 	 * Fail if the zone isn't fully initialized yet.
42400Sstevel@tonic-gate 	 */
42410Sstevel@tonic-gate 	if (status < ZONE_IS_READY) {
42420Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
42430Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
42440Sstevel@tonic-gate 		resume_mounts();
42450Sstevel@tonic-gate 		return (set_errno(EINVAL));
42460Sstevel@tonic-gate 	}
42470Sstevel@tonic-gate 	/*
42480Sstevel@tonic-gate 	 * If conditions required for zone_shutdown() to return have been met,
42490Sstevel@tonic-gate 	 * return success.
42500Sstevel@tonic-gate 	 */
42510Sstevel@tonic-gate 	if (status >= ZONE_IS_DOWN) {
42520Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
42530Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
42540Sstevel@tonic-gate 		resume_mounts();
42550Sstevel@tonic-gate 		return (0);
42560Sstevel@tonic-gate 	}
42570Sstevel@tonic-gate 	/*
42580Sstevel@tonic-gate 	 * If zone_shutdown() hasn't been called before, go through the motions.
42590Sstevel@tonic-gate 	 * If it has, there's nothing to do but wait for the kernel threads to
42600Sstevel@tonic-gate 	 * drain.
42610Sstevel@tonic-gate 	 */
42620Sstevel@tonic-gate 	if (status < ZONE_IS_EMPTY) {
42630Sstevel@tonic-gate 		uint_t ntasks;
42640Sstevel@tonic-gate 
42650Sstevel@tonic-gate 		mutex_enter(&zone->zone_lock);
42660Sstevel@tonic-gate 		if ((ntasks = zone->zone_ntasks) != 1) {
42670Sstevel@tonic-gate 			/*
42680Sstevel@tonic-gate 			 * There's still stuff running.
42690Sstevel@tonic-gate 			 */
42700Sstevel@tonic-gate 			zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
42710Sstevel@tonic-gate 		}
42720Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
42730Sstevel@tonic-gate 		if (ntasks == 1) {
42740Sstevel@tonic-gate 			/*
42750Sstevel@tonic-gate 			 * The only way to create another task is through
42760Sstevel@tonic-gate 			 * zone_enter(), which will block until we drop
42770Sstevel@tonic-gate 			 * zonehash_lock.  The zone is empty.
42780Sstevel@tonic-gate 			 */
42790Sstevel@tonic-gate 			if (zone->zone_kthreads == NULL) {
42800Sstevel@tonic-gate 				/*
42810Sstevel@tonic-gate 				 * Skip ahead to ZONE_IS_DOWN
42820Sstevel@tonic-gate 				 */
42830Sstevel@tonic-gate 				zone_status_set(zone, ZONE_IS_DOWN);
42840Sstevel@tonic-gate 			} else {
42850Sstevel@tonic-gate 				zone_status_set(zone, ZONE_IS_EMPTY);
42860Sstevel@tonic-gate 			}
42870Sstevel@tonic-gate 		}
42880Sstevel@tonic-gate 	}
42890Sstevel@tonic-gate 	zone_hold(zone);	/* so we can use the zone_t later */
42900Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
42910Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
42920Sstevel@tonic-gate 	resume_mounts();
42930Sstevel@tonic-gate 
42940Sstevel@tonic-gate 	if (error = zone_empty(zone)) {
42950Sstevel@tonic-gate 		zone_rele(zone);
42960Sstevel@tonic-gate 		return (set_errno(error));
42970Sstevel@tonic-gate 	}
42980Sstevel@tonic-gate 	/*
42990Sstevel@tonic-gate 	 * After the zone status goes to ZONE_IS_DOWN this zone will no
43000Sstevel@tonic-gate 	 * longer be notified of changes to the pools configuration, so
43010Sstevel@tonic-gate 	 * in order to not end up with a stale pool pointer, we point
43020Sstevel@tonic-gate 	 * ourselves at the default pool and remove all resource
43030Sstevel@tonic-gate 	 * visibility.  This is especially important as the zone_t may
43040Sstevel@tonic-gate 	 * languish on the deathrow for a very long time waiting for
43050Sstevel@tonic-gate 	 * cred's to drain out.
43060Sstevel@tonic-gate 	 *
43070Sstevel@tonic-gate 	 * This rebinding of the zone can happen multiple times
43080Sstevel@tonic-gate 	 * (presumably due to interrupted or parallel systemcalls)
43090Sstevel@tonic-gate 	 * without any adverse effects.
43100Sstevel@tonic-gate 	 */
43110Sstevel@tonic-gate 	if (pool_lock_intr() != 0) {
43120Sstevel@tonic-gate 		zone_rele(zone);
43130Sstevel@tonic-gate 		return (set_errno(EINTR));
43140Sstevel@tonic-gate 	}
43150Sstevel@tonic-gate 	if (pool_state == POOL_ENABLED) {
43160Sstevel@tonic-gate 		mutex_enter(&cpu_lock);
43170Sstevel@tonic-gate 		zone_pool_set(zone, pool_default);
43180Sstevel@tonic-gate 		/*
43190Sstevel@tonic-gate 		 * The zone no longer needs to be able to see any cpus.
43200Sstevel@tonic-gate 		 */
43210Sstevel@tonic-gate 		zone_pset_set(zone, ZONE_PS_INVAL);
43220Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
43230Sstevel@tonic-gate 	}
43240Sstevel@tonic-gate 	pool_unlock();
43250Sstevel@tonic-gate 
43260Sstevel@tonic-gate 	/*
43270Sstevel@tonic-gate 	 * ZSD shutdown callbacks can be executed multiple times, hence
43280Sstevel@tonic-gate 	 * it is safe to not be holding any locks across this call.
43290Sstevel@tonic-gate 	 */
43300Sstevel@tonic-gate 	zone_zsd_callbacks(zone, ZSD_SHUTDOWN);
43310Sstevel@tonic-gate 
43320Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
43330Sstevel@tonic-gate 	if (zone->zone_kthreads == NULL && zone_status_get(zone) < ZONE_IS_DOWN)
43340Sstevel@tonic-gate 		zone_status_set(zone, ZONE_IS_DOWN);
43350Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
43360Sstevel@tonic-gate 
43370Sstevel@tonic-gate 	/*
43380Sstevel@tonic-gate 	 * Wait for kernel threads to drain.
43390Sstevel@tonic-gate 	 */
43400Sstevel@tonic-gate 	if (!zone_status_wait_sig(zone, ZONE_IS_DOWN)) {
43410Sstevel@tonic-gate 		zone_rele(zone);
43420Sstevel@tonic-gate 		return (set_errno(EINTR));
43430Sstevel@tonic-gate 	}
43442712Snn35248 
43453671Ssl108498 	/*
43463671Ssl108498 	 * Zone can be become down/destroyable even if the above wait
43473671Ssl108498 	 * returns EINTR, so any code added here may never execute.
43483671Ssl108498 	 * (i.e. don't add code here)
43493671Ssl108498 	 */
43502712Snn35248 
43510Sstevel@tonic-gate 	zone_rele(zone);
43520Sstevel@tonic-gate 	return (0);
43530Sstevel@tonic-gate }
43540Sstevel@tonic-gate 
43550Sstevel@tonic-gate /*
43560Sstevel@tonic-gate  * Systemcall entry point to finalize the zone halt process.  The caller
43572677Sml93401  * must have already successfully called zone_shutdown().
43580Sstevel@tonic-gate  *
43590Sstevel@tonic-gate  * Upon successful completion, the zone will have been fully destroyed:
43600Sstevel@tonic-gate  * zsched will have exited, destructor callbacks executed, and the zone
43610Sstevel@tonic-gate  * removed from the list of active zones.
43620Sstevel@tonic-gate  */
43630Sstevel@tonic-gate static int
43640Sstevel@tonic-gate zone_destroy(zoneid_t zoneid)
43650Sstevel@tonic-gate {
43660Sstevel@tonic-gate 	uint64_t uniqid;
43670Sstevel@tonic-gate 	zone_t *zone;
43680Sstevel@tonic-gate 	zone_status_t status;
43690Sstevel@tonic-gate 
43700Sstevel@tonic-gate 	if (secpolicy_zone_config(CRED()) != 0)
43710Sstevel@tonic-gate 		return (set_errno(EPERM));
43720Sstevel@tonic-gate 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
43730Sstevel@tonic-gate 		return (set_errno(EINVAL));
43740Sstevel@tonic-gate 
43750Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
43760Sstevel@tonic-gate 	/*
43770Sstevel@tonic-gate 	 * Look for zone under hash lock to prevent races with other
43780Sstevel@tonic-gate 	 * calls to zone_destroy.
43790Sstevel@tonic-gate 	 */
43800Sstevel@tonic-gate 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
43810Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
43820Sstevel@tonic-gate 		return (set_errno(EINVAL));
43830Sstevel@tonic-gate 	}
43840Sstevel@tonic-gate 
43850Sstevel@tonic-gate 	if (zone_mount_count(zone->zone_rootpath) != 0) {
43860Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
43870Sstevel@tonic-gate 		return (set_errno(EBUSY));
43880Sstevel@tonic-gate 	}
43890Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
43900Sstevel@tonic-gate 	status = zone_status_get(zone);
43910Sstevel@tonic-gate 	if (status < ZONE_IS_DOWN) {
43920Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
43930Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
43940Sstevel@tonic-gate 		return (set_errno(EBUSY));
43950Sstevel@tonic-gate 	} else if (status == ZONE_IS_DOWN) {
43960Sstevel@tonic-gate 		zone_status_set(zone, ZONE_IS_DYING); /* Tell zsched to exit */
43970Sstevel@tonic-gate 	}
43980Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
43990Sstevel@tonic-gate 	zone_hold(zone);
44000Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
44010Sstevel@tonic-gate 
44020Sstevel@tonic-gate 	/*
44030Sstevel@tonic-gate 	 * wait for zsched to exit
44040Sstevel@tonic-gate 	 */
44050Sstevel@tonic-gate 	zone_status_wait(zone, ZONE_IS_DEAD);
44060Sstevel@tonic-gate 	zone_zsd_callbacks(zone, ZSD_DESTROY);
44073448Sdh155122 	zone->zone_netstack = NULL;
44080Sstevel@tonic-gate 	uniqid = zone->zone_uniqid;
44090Sstevel@tonic-gate 	zone_rele(zone);
44100Sstevel@tonic-gate 	zone = NULL;	/* potentially free'd */
44110Sstevel@tonic-gate 
44120Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
44130Sstevel@tonic-gate 	for (; /* ever */; ) {
44140Sstevel@tonic-gate 		boolean_t unref;
44150Sstevel@tonic-gate 
44160Sstevel@tonic-gate 		if ((zone = zone_find_all_by_id(zoneid)) == NULL ||
44170Sstevel@tonic-gate 		    zone->zone_uniqid != uniqid) {
44180Sstevel@tonic-gate 			/*
44190Sstevel@tonic-gate 			 * The zone has gone away.  Necessary conditions
44200Sstevel@tonic-gate 			 * are met, so we return success.
44210Sstevel@tonic-gate 			 */
44220Sstevel@tonic-gate 			mutex_exit(&zonehash_lock);
44230Sstevel@tonic-gate 			return (0);
44240Sstevel@tonic-gate 		}
44250Sstevel@tonic-gate 		mutex_enter(&zone->zone_lock);
44260Sstevel@tonic-gate 		unref = ZONE_IS_UNREF(zone);
44270Sstevel@tonic-gate 		mutex_exit(&zone->zone_lock);
44280Sstevel@tonic-gate 		if (unref) {
44290Sstevel@tonic-gate 			/*
44300Sstevel@tonic-gate 			 * There is only one reference to the zone -- that
44310Sstevel@tonic-gate 			 * added when the zone was added to the hashtables --
44320Sstevel@tonic-gate 			 * and things will remain this way until we drop
44330Sstevel@tonic-gate 			 * zonehash_lock... we can go ahead and cleanup the
44340Sstevel@tonic-gate 			 * zone.
44350Sstevel@tonic-gate 			 */
44360Sstevel@tonic-gate 			break;
44370Sstevel@tonic-gate 		}
44380Sstevel@tonic-gate 
44390Sstevel@tonic-gate 		if (cv_wait_sig(&zone_destroy_cv, &zonehash_lock) == 0) {
44400Sstevel@tonic-gate 			/* Signaled */
44410Sstevel@tonic-gate 			mutex_exit(&zonehash_lock);
44420Sstevel@tonic-gate 			return (set_errno(EINTR));
44430Sstevel@tonic-gate 		}
44440Sstevel@tonic-gate 
44450Sstevel@tonic-gate 	}
44460Sstevel@tonic-gate 
44473792Sakolb 	/*
44483792Sakolb 	 * Remove CPU cap for this zone now since we're not going to
44493792Sakolb 	 * fail below this point.
44503792Sakolb 	 */
44513792Sakolb 	cpucaps_zone_remove(zone);
44523792Sakolb 
44533792Sakolb 	/* Get rid of the zone's kstats */
44543247Sgjelinek 	zone_kstat_delete(zone);
44553247Sgjelinek 
44564888Seh208807 	/* free brand specific data */
44574888Seh208807 	if (ZONE_IS_BRANDED(zone))
44584888Seh208807 		ZBROP(zone)->b_free_brand_data(zone);
44594888Seh208807 
44603671Ssl108498 	/* Say goodbye to brand framework. */
44613671Ssl108498 	brand_unregister_zone(zone->zone_brand);
44623671Ssl108498 
44630Sstevel@tonic-gate 	/*
44640Sstevel@tonic-gate 	 * It is now safe to let the zone be recreated; remove it from the
44650Sstevel@tonic-gate 	 * lists.  The memory will not be freed until the last cred
44660Sstevel@tonic-gate 	 * reference goes away.
44670Sstevel@tonic-gate 	 */
44680Sstevel@tonic-gate 	ASSERT(zonecount > 1);	/* must be > 1; can't destroy global zone */
44690Sstevel@tonic-gate 	zonecount--;
44700Sstevel@tonic-gate 	/* remove from active list and hash tables */
44710Sstevel@tonic-gate 	list_remove(&zone_active, zone);
44720Sstevel@tonic-gate 	(void) mod_hash_destroy(zonehashbyname,
44730Sstevel@tonic-gate 	    (mod_hash_key_t)zone->zone_name);
44740Sstevel@tonic-gate 	(void) mod_hash_destroy(zonehashbyid,
44750Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)zone->zone_id);
44761769Scarlsonj 	if (zone->zone_flags & ZF_HASHED_LABEL)
44771676Sjpk 		(void) mod_hash_destroy(zonehashbylabel,
44781676Sjpk 		    (mod_hash_key_t)zone->zone_slabel);
44790Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
44800Sstevel@tonic-gate 
4481766Scarlsonj 	/*
4482766Scarlsonj 	 * Release the root vnode; we're not using it anymore.  Nor should any
4483766Scarlsonj 	 * other thread that might access it exist.
4484766Scarlsonj 	 */
4485766Scarlsonj 	if (zone->zone_rootvp != NULL) {
4486766Scarlsonj 		VN_RELE(zone->zone_rootvp);
4487766Scarlsonj 		zone->zone_rootvp = NULL;
4488766Scarlsonj 	}
4489766Scarlsonj 
44900Sstevel@tonic-gate 	/* add to deathrow list */
44910Sstevel@tonic-gate 	mutex_enter(&zone_deathrow_lock);
44920Sstevel@tonic-gate 	list_insert_tail(&zone_deathrow, zone);
44930Sstevel@tonic-gate 	mutex_exit(&zone_deathrow_lock);
44940Sstevel@tonic-gate 
44950Sstevel@tonic-gate 	/*
44960Sstevel@tonic-gate 	 * Drop last reference (which was added by zsched()), this will
44970Sstevel@tonic-gate 	 * free the zone unless there are outstanding cred references.
44980Sstevel@tonic-gate 	 */
44990Sstevel@tonic-gate 	zone_rele(zone);
45000Sstevel@tonic-gate 	return (0);
45010Sstevel@tonic-gate }
45020Sstevel@tonic-gate 
45030Sstevel@tonic-gate /*
45040Sstevel@tonic-gate  * Systemcall entry point for zone_getattr(2).
45050Sstevel@tonic-gate  */
45060Sstevel@tonic-gate static ssize_t
45070Sstevel@tonic-gate zone_getattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
45080Sstevel@tonic-gate {
45090Sstevel@tonic-gate 	size_t size;
45100Sstevel@tonic-gate 	int error = 0, err;
45110Sstevel@tonic-gate 	zone_t *zone;
45120Sstevel@tonic-gate 	char *zonepath;
45132267Sdp 	char *outstr;
45140Sstevel@tonic-gate 	zone_status_t zone_status;
45150Sstevel@tonic-gate 	pid_t initpid;
45163792Sakolb 	boolean_t global = (curzone == global_zone);
45173792Sakolb 	boolean_t inzone = (curzone->zone_id == zoneid);
45183448Sdh155122 	ushort_t flags;
45190Sstevel@tonic-gate 
45200Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
45210Sstevel@tonic-gate 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
45220Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
45230Sstevel@tonic-gate 		return (set_errno(EINVAL));
45240Sstevel@tonic-gate 	}
45250Sstevel@tonic-gate 	zone_status = zone_status_get(zone);
45265880Snordmark 	if (zone_status < ZONE_IS_INITIALIZED) {
45270Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
45280Sstevel@tonic-gate 		return (set_errno(EINVAL));
45290Sstevel@tonic-gate 	}
45300Sstevel@tonic-gate 	zone_hold(zone);
45310Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
45320Sstevel@tonic-gate 
45330Sstevel@tonic-gate 	/*
45341676Sjpk 	 * If not in the global zone, don't show information about other zones,
45351676Sjpk 	 * unless the system is labeled and the local zone's label dominates
45361676Sjpk 	 * the other zone.
45370Sstevel@tonic-gate 	 */
45381676Sjpk 	if (!zone_list_access(zone)) {
45390Sstevel@tonic-gate 		zone_rele(zone);
45400Sstevel@tonic-gate 		return (set_errno(EINVAL));
45410Sstevel@tonic-gate 	}
45420Sstevel@tonic-gate 
45430Sstevel@tonic-gate 	switch (attr) {
45440Sstevel@tonic-gate 	case ZONE_ATTR_ROOT:
45450Sstevel@tonic-gate 		if (global) {
45460Sstevel@tonic-gate 			/*
45470Sstevel@tonic-gate 			 * Copy the path to trim the trailing "/" (except for
45480Sstevel@tonic-gate 			 * the global zone).
45490Sstevel@tonic-gate 			 */
45500Sstevel@tonic-gate 			if (zone != global_zone)
45510Sstevel@tonic-gate 				size = zone->zone_rootpathlen - 1;
45520Sstevel@tonic-gate 			else
45530Sstevel@tonic-gate 				size = zone->zone_rootpathlen;
45540Sstevel@tonic-gate 			zonepath = kmem_alloc(size, KM_SLEEP);
45550Sstevel@tonic-gate 			bcopy(zone->zone_rootpath, zonepath, size);
45560Sstevel@tonic-gate 			zonepath[size - 1] = '\0';
45570Sstevel@tonic-gate 		} else {
45583792Sakolb 			if (inzone || !is_system_labeled()) {
45591676Sjpk 				/*
45601676Sjpk 				 * Caller is not in the global zone.
45611676Sjpk 				 * if the query is on the current zone
45621676Sjpk 				 * or the system is not labeled,
45631676Sjpk 				 * just return faked-up path for current zone.
45641676Sjpk 				 */
45651676Sjpk 				zonepath = "/";
45661676Sjpk 				size = 2;
45671676Sjpk 			} else {
45681676Sjpk 				/*
45691676Sjpk 				 * Return related path for current zone.
45701676Sjpk 				 */
45711676Sjpk 				int prefix_len = strlen(zone_prefix);
45721676Sjpk 				int zname_len = strlen(zone->zone_name);
45731676Sjpk 
45741676Sjpk 				size = prefix_len + zname_len + 1;
45751676Sjpk 				zonepath = kmem_alloc(size, KM_SLEEP);
45761676Sjpk 				bcopy(zone_prefix, zonepath, prefix_len);
45771676Sjpk 				bcopy(zone->zone_name, zonepath +
45782267Sdp 				    prefix_len, zname_len);
45791676Sjpk 				zonepath[size - 1] = '\0';
45801676Sjpk 			}
45810Sstevel@tonic-gate 		}
45820Sstevel@tonic-gate 		if (bufsize > size)
45830Sstevel@tonic-gate 			bufsize = size;
45840Sstevel@tonic-gate 		if (buf != NULL) {
45850Sstevel@tonic-gate 			err = copyoutstr(zonepath, buf, bufsize, NULL);
45860Sstevel@tonic-gate 			if (err != 0 && err != ENAMETOOLONG)
45870Sstevel@tonic-gate 				error = EFAULT;
45880Sstevel@tonic-gate 		}
45893792Sakolb 		if (global || (is_system_labeled() && !inzone))
45900Sstevel@tonic-gate 			kmem_free(zonepath, size);
45910Sstevel@tonic-gate 		break;
45920Sstevel@tonic-gate 
45930Sstevel@tonic-gate 	case ZONE_ATTR_NAME:
45940Sstevel@tonic-gate 		size = strlen(zone->zone_name) + 1;
45950Sstevel@tonic-gate 		if (bufsize > size)
45960Sstevel@tonic-gate 			bufsize = size;
45970Sstevel@tonic-gate 		if (buf != NULL) {
45980Sstevel@tonic-gate 			err = copyoutstr(zone->zone_name, buf, bufsize, NULL);
45990Sstevel@tonic-gate 			if (err != 0 && err != ENAMETOOLONG)
46000Sstevel@tonic-gate 				error = EFAULT;
46010Sstevel@tonic-gate 		}
46020Sstevel@tonic-gate 		break;
46030Sstevel@tonic-gate 
46040Sstevel@tonic-gate 	case ZONE_ATTR_STATUS:
46050Sstevel@tonic-gate 		/*
46060Sstevel@tonic-gate 		 * Since we're not holding zonehash_lock, the zone status
46070Sstevel@tonic-gate 		 * may be anything; leave it up to userland to sort it out.
46080Sstevel@tonic-gate 		 */
46090Sstevel@tonic-gate 		size = sizeof (zone_status);
46100Sstevel@tonic-gate 		if (bufsize > size)
46110Sstevel@tonic-gate 			bufsize = size;
46120Sstevel@tonic-gate 		zone_status = zone_status_get(zone);
46130Sstevel@tonic-gate 		if (buf != NULL &&
46140Sstevel@tonic-gate 		    copyout(&zone_status, buf, bufsize) != 0)
46150Sstevel@tonic-gate 			error = EFAULT;
46160Sstevel@tonic-gate 		break;
46173448Sdh155122 	case ZONE_ATTR_FLAGS:
46183448Sdh155122 		size = sizeof (zone->zone_flags);
46193448Sdh155122 		if (bufsize > size)
46203448Sdh155122 			bufsize = size;
46213448Sdh155122 		flags = zone->zone_flags;
46223448Sdh155122 		if (buf != NULL &&
46233448Sdh155122 		    copyout(&flags, buf, bufsize) != 0)
46243448Sdh155122 			error = EFAULT;
46253448Sdh155122 		break;
46260Sstevel@tonic-gate 	case ZONE_ATTR_PRIVSET:
46270Sstevel@tonic-gate 		size = sizeof (priv_set_t);
46280Sstevel@tonic-gate 		if (bufsize > size)
46290Sstevel@tonic-gate 			bufsize = size;
46300Sstevel@tonic-gate 		if (buf != NULL &&
46310Sstevel@tonic-gate 		    copyout(zone->zone_privset, buf, bufsize) != 0)
46320Sstevel@tonic-gate 			error = EFAULT;
46330Sstevel@tonic-gate 		break;
46340Sstevel@tonic-gate 	case ZONE_ATTR_UNIQID:
46350Sstevel@tonic-gate 		size = sizeof (zone->zone_uniqid);
46360Sstevel@tonic-gate 		if (bufsize > size)
46370Sstevel@tonic-gate 			bufsize = size;
46380Sstevel@tonic-gate 		if (buf != NULL &&
46390Sstevel@tonic-gate 		    copyout(&zone->zone_uniqid, buf, bufsize) != 0)
46400Sstevel@tonic-gate 			error = EFAULT;
46410Sstevel@tonic-gate 		break;
46420Sstevel@tonic-gate 	case ZONE_ATTR_POOLID:
46430Sstevel@tonic-gate 		{
46440Sstevel@tonic-gate 			pool_t *pool;
46450Sstevel@tonic-gate 			poolid_t poolid;
46460Sstevel@tonic-gate 
46470Sstevel@tonic-gate 			if (pool_lock_intr() != 0) {
46480Sstevel@tonic-gate 				error = EINTR;
46490Sstevel@tonic-gate 				break;
46500Sstevel@tonic-gate 			}
46510Sstevel@tonic-gate 			pool = zone_pool_get(zone);
46520Sstevel@tonic-gate 			poolid = pool->pool_id;
46530Sstevel@tonic-gate 			pool_unlock();
46540Sstevel@tonic-gate 			size = sizeof (poolid);
46550Sstevel@tonic-gate 			if (bufsize > size)
46560Sstevel@tonic-gate 				bufsize = size;
46570Sstevel@tonic-gate 			if (buf != NULL && copyout(&poolid, buf, size) != 0)
46580Sstevel@tonic-gate 				error = EFAULT;
46590Sstevel@tonic-gate 		}
46600Sstevel@tonic-gate 		break;
46611676Sjpk 	case ZONE_ATTR_SLBL:
46621676Sjpk 		size = sizeof (bslabel_t);
46631676Sjpk 		if (bufsize > size)
46641676Sjpk 			bufsize = size;
46651676Sjpk 		if (zone->zone_slabel == NULL)
46661676Sjpk 			error = EINVAL;
46671676Sjpk 		else if (buf != NULL &&
46681676Sjpk 		    copyout(label2bslabel(zone->zone_slabel), buf,
46691676Sjpk 		    bufsize) != 0)
46701676Sjpk 			error = EFAULT;
46711676Sjpk 		break;
46720Sstevel@tonic-gate 	case ZONE_ATTR_INITPID:
46730Sstevel@tonic-gate 		size = sizeof (initpid);
46740Sstevel@tonic-gate 		if (bufsize > size)
46750Sstevel@tonic-gate 			bufsize = size;
46760Sstevel@tonic-gate 		initpid = zone->zone_proc_initpid;
46770Sstevel@tonic-gate 		if (initpid == -1) {
46780Sstevel@tonic-gate 			error = ESRCH;
46790Sstevel@tonic-gate 			break;
46800Sstevel@tonic-gate 		}
46810Sstevel@tonic-gate 		if (buf != NULL &&
46820Sstevel@tonic-gate 		    copyout(&initpid, buf, bufsize) != 0)
46830Sstevel@tonic-gate 			error = EFAULT;
46840Sstevel@tonic-gate 		break;
46852712Snn35248 	case ZONE_ATTR_BRAND:
46862712Snn35248 		size = strlen(zone->zone_brand->b_name) + 1;
46872712Snn35248 
46882712Snn35248 		if (bufsize > size)
46892712Snn35248 			bufsize = size;
46902712Snn35248 		if (buf != NULL) {
46912712Snn35248 			err = copyoutstr(zone->zone_brand->b_name, buf,
46922712Snn35248 			    bufsize, NULL);
46932712Snn35248 			if (err != 0 && err != ENAMETOOLONG)
46942712Snn35248 				error = EFAULT;
46952712Snn35248 		}
46962712Snn35248 		break;
46972267Sdp 	case ZONE_ATTR_INITNAME:
46982267Sdp 		size = strlen(zone->zone_initname) + 1;
46992267Sdp 		if (bufsize > size)
47002267Sdp 			bufsize = size;
47012267Sdp 		if (buf != NULL) {
47022267Sdp 			err = copyoutstr(zone->zone_initname, buf, bufsize,
47032267Sdp 			    NULL);
47042267Sdp 			if (err != 0 && err != ENAMETOOLONG)
47052267Sdp 				error = EFAULT;
47062267Sdp 		}
47072267Sdp 		break;
47082267Sdp 	case ZONE_ATTR_BOOTARGS:
47092267Sdp 		if (zone->zone_bootargs == NULL)
47102267Sdp 			outstr = "";
47112267Sdp 		else
47122267Sdp 			outstr = zone->zone_bootargs;
47132267Sdp 		size = strlen(outstr) + 1;
47142267Sdp 		if (bufsize > size)
47152267Sdp 			bufsize = size;
47162267Sdp 		if (buf != NULL) {
47172267Sdp 			err = copyoutstr(outstr, buf, bufsize, NULL);
47182267Sdp 			if (err != 0 && err != ENAMETOOLONG)
47192267Sdp 				error = EFAULT;
47202267Sdp 		}
47212267Sdp 		break;
47223247Sgjelinek 	case ZONE_ATTR_PHYS_MCAP:
47233247Sgjelinek 		size = sizeof (zone->zone_phys_mcap);
47243247Sgjelinek 		if (bufsize > size)
47253247Sgjelinek 			bufsize = size;
47263247Sgjelinek 		if (buf != NULL &&
47273247Sgjelinek 		    copyout(&zone->zone_phys_mcap, buf, bufsize) != 0)
47283247Sgjelinek 			error = EFAULT;
47293247Sgjelinek 		break;
47303247Sgjelinek 	case ZONE_ATTR_SCHED_CLASS:
47313247Sgjelinek 		mutex_enter(&class_lock);
47323247Sgjelinek 
47333247Sgjelinek 		if (zone->zone_defaultcid >= loaded_classes)
47343247Sgjelinek 			outstr = "";
47353247Sgjelinek 		else
47363247Sgjelinek 			outstr = sclass[zone->zone_defaultcid].cl_name;
47373247Sgjelinek 		size = strlen(outstr) + 1;
47383247Sgjelinek 		if (bufsize > size)
47393247Sgjelinek 			bufsize = size;
47403247Sgjelinek 		if (buf != NULL) {
47413247Sgjelinek 			err = copyoutstr(outstr, buf, bufsize, NULL);
47423247Sgjelinek 			if (err != 0 && err != ENAMETOOLONG)
47433247Sgjelinek 				error = EFAULT;
47443247Sgjelinek 		}
47453247Sgjelinek 
47463247Sgjelinek 		mutex_exit(&class_lock);
47473247Sgjelinek 		break;
47488662SJordan.Vaughan@Sun.com 	case ZONE_ATTR_HOSTID:
47498662SJordan.Vaughan@Sun.com 		if (zone->zone_hostid != HW_INVALID_HOSTID &&
47508662SJordan.Vaughan@Sun.com 		    bufsize == sizeof (zone->zone_hostid)) {
47518662SJordan.Vaughan@Sun.com 			size = sizeof (zone->zone_hostid);
47528662SJordan.Vaughan@Sun.com 			if (buf != NULL && copyout(&zone->zone_hostid, buf,
47538662SJordan.Vaughan@Sun.com 			    bufsize) != 0)
47548662SJordan.Vaughan@Sun.com 				error = EFAULT;
47558662SJordan.Vaughan@Sun.com 		} else {
47568662SJordan.Vaughan@Sun.com 			error = EINVAL;
47578662SJordan.Vaughan@Sun.com 		}
47588662SJordan.Vaughan@Sun.com 		break;
47590Sstevel@tonic-gate 	default:
47602712Snn35248 		if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone)) {
47612712Snn35248 			size = bufsize;
47622712Snn35248 			error = ZBROP(zone)->b_getattr(zone, attr, buf, &size);
47632712Snn35248 		} else {
47642712Snn35248 			error = EINVAL;
47652712Snn35248 		}
47660Sstevel@tonic-gate 	}
47670Sstevel@tonic-gate 	zone_rele(zone);
47680Sstevel@tonic-gate 
47690Sstevel@tonic-gate 	if (error)
47700Sstevel@tonic-gate 		return (set_errno(error));
47710Sstevel@tonic-gate 	return ((ssize_t)size);
47720Sstevel@tonic-gate }
47730Sstevel@tonic-gate 
47740Sstevel@tonic-gate /*
47752267Sdp  * Systemcall entry point for zone_setattr(2).
47762267Sdp  */
47772267Sdp /*ARGSUSED*/
47782267Sdp static int
47792267Sdp zone_setattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
47802267Sdp {
47812267Sdp 	zone_t *zone;
47822267Sdp 	zone_status_t zone_status;
47832267Sdp 	int err;
47842267Sdp 
47852267Sdp 	if (secpolicy_zone_config(CRED()) != 0)
47862267Sdp 		return (set_errno(EPERM));
47872267Sdp 
47882267Sdp 	/*
47893247Sgjelinek 	 * Only the ZONE_ATTR_PHYS_MCAP attribute can be set on the
47903247Sgjelinek 	 * global zone.
47912267Sdp 	 */
47923247Sgjelinek 	if (zoneid == GLOBAL_ZONEID && attr != ZONE_ATTR_PHYS_MCAP) {
47932267Sdp 		return (set_errno(EINVAL));
47942267Sdp 	}
47952267Sdp 
47962267Sdp 	mutex_enter(&zonehash_lock);
47972267Sdp 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
47982267Sdp 		mutex_exit(&zonehash_lock);
47992267Sdp 		return (set_errno(EINVAL));
48002267Sdp 	}
48012267Sdp 	zone_hold(zone);
48022267Sdp 	mutex_exit(&zonehash_lock);
48032267Sdp 
48043247Sgjelinek 	/*
48053247Sgjelinek 	 * At present most attributes can only be set on non-running,
48063247Sgjelinek 	 * non-global zones.
48073247Sgjelinek 	 */
48082267Sdp 	zone_status = zone_status_get(zone);
48093247Sgjelinek 	if (attr != ZONE_ATTR_PHYS_MCAP && zone_status > ZONE_IS_READY)
48102267Sdp 		goto done;
48112267Sdp 
48122267Sdp 	switch (attr) {
48132267Sdp 	case ZONE_ATTR_INITNAME:
48142267Sdp 		err = zone_set_initname(zone, (const char *)buf);
48152267Sdp 		break;
48162267Sdp 	case ZONE_ATTR_BOOTARGS:
48172267Sdp 		err = zone_set_bootargs(zone, (const char *)buf);
48182267Sdp 		break;
48192712Snn35248 	case ZONE_ATTR_BRAND:
48204141Sedp 		err = zone_set_brand(zone, (const char *)buf);
48212712Snn35248 		break;
48223247Sgjelinek 	case ZONE_ATTR_PHYS_MCAP:
48233247Sgjelinek 		err = zone_set_phys_mcap(zone, (const uint64_t *)buf);
48243247Sgjelinek 		break;
48253247Sgjelinek 	case ZONE_ATTR_SCHED_CLASS:
48263247Sgjelinek 		err = zone_set_sched_class(zone, (const char *)buf);
48273247Sgjelinek 		break;
48288662SJordan.Vaughan@Sun.com 	case ZONE_ATTR_HOSTID:
48298662SJordan.Vaughan@Sun.com 		if (bufsize == sizeof (zone->zone_hostid)) {
48308662SJordan.Vaughan@Sun.com 			if (copyin(buf, &zone->zone_hostid, bufsize) == 0)
48318662SJordan.Vaughan@Sun.com 				err = 0;
48328662SJordan.Vaughan@Sun.com 			else
48338662SJordan.Vaughan@Sun.com 				err = EFAULT;
48348662SJordan.Vaughan@Sun.com 		} else {
48358662SJordan.Vaughan@Sun.com 			err = EINVAL;
48368662SJordan.Vaughan@Sun.com 		}
48378662SJordan.Vaughan@Sun.com 		break;
48382267Sdp 	default:
48392712Snn35248 		if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone))
48402712Snn35248 			err = ZBROP(zone)->b_setattr(zone, attr, buf, bufsize);
48412712Snn35248 		else
48422712Snn35248 			err = EINVAL;
48432267Sdp 	}
48442267Sdp 
48452267Sdp done:
48462267Sdp 	zone_rele(zone);
48472267Sdp 	return (err != 0 ? set_errno(err) : 0);
48482267Sdp }
48492267Sdp 
48502267Sdp /*
48510Sstevel@tonic-gate  * Return zero if the process has at least one vnode mapped in to its
48520Sstevel@tonic-gate  * address space which shouldn't be allowed to change zones.
48533247Sgjelinek  *
48543247Sgjelinek  * Also return zero if the process has any shared mappings which reserve
48553247Sgjelinek  * swap.  This is because the counting for zone.max-swap does not allow swap
48565331Samw  * reservation to be shared between zones.  zone swap reservation is counted
48573247Sgjelinek  * on zone->zone_max_swap.
48580Sstevel@tonic-gate  */
48590Sstevel@tonic-gate static int
48600Sstevel@tonic-gate as_can_change_zones(void)
48610Sstevel@tonic-gate {
48620Sstevel@tonic-gate 	proc_t *pp = curproc;
48630Sstevel@tonic-gate 	struct seg *seg;
48640Sstevel@tonic-gate 	struct as *as = pp->p_as;
48650Sstevel@tonic-gate 	vnode_t *vp;
48660Sstevel@tonic-gate 	int allow = 1;
48670Sstevel@tonic-gate 
48680Sstevel@tonic-gate 	ASSERT(pp->p_as != &kas);
48693247Sgjelinek 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
48700Sstevel@tonic-gate 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
48713247Sgjelinek 
48723247Sgjelinek 		/*
48733247Sgjelinek 		 * Cannot enter zone with shared anon memory which
48743247Sgjelinek 		 * reserves swap.  See comment above.
48753247Sgjelinek 		 */
48763247Sgjelinek 		if (seg_can_change_zones(seg) == B_FALSE) {
48773247Sgjelinek 			allow = 0;
48783247Sgjelinek 			break;
48793247Sgjelinek 		}
48800Sstevel@tonic-gate 		/*
48810Sstevel@tonic-gate 		 * if we can't get a backing vnode for this segment then skip
48820Sstevel@tonic-gate 		 * it.
48830Sstevel@tonic-gate 		 */
48840Sstevel@tonic-gate 		vp = NULL;
48850Sstevel@tonic-gate 		if (SEGOP_GETVP(seg, seg->s_base, &vp) != 0 || vp == NULL)
48860Sstevel@tonic-gate 			continue;
48870Sstevel@tonic-gate 		if (!vn_can_change_zones(vp)) { /* bail on first match */
48880Sstevel@tonic-gate 			allow = 0;
48890Sstevel@tonic-gate 			break;
48900Sstevel@tonic-gate 		}
48910Sstevel@tonic-gate 	}
48923247Sgjelinek 	AS_LOCK_EXIT(as, &as->a_lock);
48930Sstevel@tonic-gate 	return (allow);
48940Sstevel@tonic-gate }
48950Sstevel@tonic-gate 
48960Sstevel@tonic-gate /*
48973247Sgjelinek  * Count swap reserved by curproc's address space
48983247Sgjelinek  */
48993247Sgjelinek static size_t
49003247Sgjelinek as_swresv(void)
49013247Sgjelinek {
49023247Sgjelinek 	proc_t *pp = curproc;
49033247Sgjelinek 	struct seg *seg;
49043247Sgjelinek 	struct as *as = pp->p_as;
49053247Sgjelinek 	size_t swap = 0;
49063247Sgjelinek 
49073247Sgjelinek 	ASSERT(pp->p_as != &kas);
49083247Sgjelinek 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
49093247Sgjelinek 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg))
49103247Sgjelinek 		swap += seg_swresv(seg);
49113247Sgjelinek 
49123247Sgjelinek 	return (swap);
49133247Sgjelinek }
49143247Sgjelinek 
49153247Sgjelinek /*
49160Sstevel@tonic-gate  * Systemcall entry point for zone_enter().
49170Sstevel@tonic-gate  *
49180Sstevel@tonic-gate  * The current process is injected into said zone.  In the process
49190Sstevel@tonic-gate  * it will change its project membership, privileges, rootdir/cwd,
49200Sstevel@tonic-gate  * zone-wide rctls, and pool association to match those of the zone.
49210Sstevel@tonic-gate  *
49220Sstevel@tonic-gate  * The first zone_enter() called while the zone is in the ZONE_IS_READY
49230Sstevel@tonic-gate  * state will transition it to ZONE_IS_RUNNING.  Processes may only
49240Sstevel@tonic-gate  * enter a zone that is "ready" or "running".
49250Sstevel@tonic-gate  */
49260Sstevel@tonic-gate static int
49270Sstevel@tonic-gate zone_enter(zoneid_t zoneid)
49280Sstevel@tonic-gate {
49290Sstevel@tonic-gate 	zone_t *zone;
49300Sstevel@tonic-gate 	vnode_t *vp;
49310Sstevel@tonic-gate 	proc_t *pp = curproc;
49320Sstevel@tonic-gate 	contract_t *ct;
49330Sstevel@tonic-gate 	cont_process_t *ctp;
49340Sstevel@tonic-gate 	task_t *tk, *oldtk;
49350Sstevel@tonic-gate 	kproject_t *zone_proj0;
49360Sstevel@tonic-gate 	cred_t *cr, *newcr;
49370Sstevel@tonic-gate 	pool_t *oldpool, *newpool;
49380Sstevel@tonic-gate 	sess_t *sp;
49390Sstevel@tonic-gate 	uid_t uid;
49400Sstevel@tonic-gate 	zone_status_t status;
49410Sstevel@tonic-gate 	int err = 0;
49420Sstevel@tonic-gate 	rctl_entity_p_t e;
49433247Sgjelinek 	size_t swap;
49443792Sakolb 	kthread_id_t t;
49450Sstevel@tonic-gate 
49460Sstevel@tonic-gate 	if (secpolicy_zone_config(CRED()) != 0)
49470Sstevel@tonic-gate 		return (set_errno(EPERM));
49480Sstevel@tonic-gate 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
49490Sstevel@tonic-gate 		return (set_errno(EINVAL));
49500Sstevel@tonic-gate 
49510Sstevel@tonic-gate 	/*
49520Sstevel@tonic-gate 	 * Stop all lwps so we don't need to hold a lock to look at
49530Sstevel@tonic-gate 	 * curproc->p_zone.  This needs to happen before we grab any
49540Sstevel@tonic-gate 	 * locks to avoid deadlock (another lwp in the process could
49550Sstevel@tonic-gate 	 * be waiting for the held lock).
49560Sstevel@tonic-gate 	 */
49570Sstevel@tonic-gate 	if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK))
49580Sstevel@tonic-gate 		return (set_errno(EINTR));
49590Sstevel@tonic-gate 
49600Sstevel@tonic-gate 	/*
49610Sstevel@tonic-gate 	 * Make sure we're not changing zones with files open or mapped in
49620Sstevel@tonic-gate 	 * to our address space which shouldn't be changing zones.
49630Sstevel@tonic-gate 	 */
49640Sstevel@tonic-gate 	if (!files_can_change_zones()) {
49650Sstevel@tonic-gate 		err = EBADF;
49660Sstevel@tonic-gate 		goto out;
49670Sstevel@tonic-gate 	}
49680Sstevel@tonic-gate 	if (!as_can_change_zones()) {
49690Sstevel@tonic-gate 		err = EFAULT;
49700Sstevel@tonic-gate 		goto out;
49710Sstevel@tonic-gate 	}
49720Sstevel@tonic-gate 
49730Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
49740Sstevel@tonic-gate 	if (pp->p_zone != global_zone) {
49750Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
49760Sstevel@tonic-gate 		err = EINVAL;
49770Sstevel@tonic-gate 		goto out;
49780Sstevel@tonic-gate 	}
49790Sstevel@tonic-gate 
49800Sstevel@tonic-gate 	zone = zone_find_all_by_id(zoneid);
49810Sstevel@tonic-gate 	if (zone == NULL) {
49820Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
49830Sstevel@tonic-gate 		err = EINVAL;
49840Sstevel@tonic-gate 		goto out;
49850Sstevel@tonic-gate 	}
49860Sstevel@tonic-gate 
49870Sstevel@tonic-gate 	/*
49880Sstevel@tonic-gate 	 * To prevent processes in a zone from holding contracts on
49890Sstevel@tonic-gate 	 * extrazonal resources, and to avoid process contract
49900Sstevel@tonic-gate 	 * memberships which span zones, contract holders and processes
49910Sstevel@tonic-gate 	 * which aren't the sole members of their encapsulating process
49920Sstevel@tonic-gate 	 * contracts are not allowed to zone_enter.
49930Sstevel@tonic-gate 	 */
49940Sstevel@tonic-gate 	ctp = pp->p_ct_process;
49950Sstevel@tonic-gate 	ct = &ctp->conp_contract;
49960Sstevel@tonic-gate 	mutex_enter(&ct->ct_lock);
49970Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
49980Sstevel@tonic-gate 	if ((avl_numnodes(&pp->p_ct_held) != 0) || (ctp->conp_nmembers != 1)) {
49990Sstevel@tonic-gate 		mutex_exit(&pp->p_lock);
50000Sstevel@tonic-gate 		mutex_exit(&ct->ct_lock);
50010Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
50020Sstevel@tonic-gate 		err = EINVAL;
50030Sstevel@tonic-gate 		goto out;
50040Sstevel@tonic-gate 	}
50050Sstevel@tonic-gate 
50060Sstevel@tonic-gate 	/*
50070Sstevel@tonic-gate 	 * Moreover, we don't allow processes whose encapsulating
50080Sstevel@tonic-gate 	 * process contracts have inherited extrazonal contracts.
50090Sstevel@tonic-gate 	 * While it would be easier to eliminate all process contracts
50100Sstevel@tonic-gate 	 * with inherited contracts, we need to be able to give a
50110Sstevel@tonic-gate 	 * restarted init (or other zone-penetrating process) its
50120Sstevel@tonic-gate 	 * predecessor's contracts.
50130Sstevel@tonic-gate 	 */
50140Sstevel@tonic-gate 	if (ctp->conp_ninherited != 0) {
50150Sstevel@tonic-gate 		contract_t *next;
50160Sstevel@tonic-gate 		for (next = list_head(&ctp->conp_inherited); next;
50170Sstevel@tonic-gate 		    next = list_next(&ctp->conp_inherited, next)) {
50180Sstevel@tonic-gate 			if (contract_getzuniqid(next) != zone->zone_uniqid) {
50190Sstevel@tonic-gate 				mutex_exit(&pp->p_lock);
50200Sstevel@tonic-gate 				mutex_exit(&ct->ct_lock);
50210Sstevel@tonic-gate 				mutex_exit(&zonehash_lock);
50220Sstevel@tonic-gate 				err = EINVAL;
50230Sstevel@tonic-gate 				goto out;
50240Sstevel@tonic-gate 			}
50250Sstevel@tonic-gate 		}
50260Sstevel@tonic-gate 	}
50276073Sacruz 
50280Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
50290Sstevel@tonic-gate 	mutex_exit(&ct->ct_lock);
50300Sstevel@tonic-gate 
50310Sstevel@tonic-gate 	status = zone_status_get(zone);
50320Sstevel@tonic-gate 	if (status < ZONE_IS_READY || status >= ZONE_IS_SHUTTING_DOWN) {
50330Sstevel@tonic-gate 		/*
50340Sstevel@tonic-gate 		 * Can't join
50350Sstevel@tonic-gate 		 */
50360Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
50370Sstevel@tonic-gate 		err = EINVAL;
50380Sstevel@tonic-gate 		goto out;
50390Sstevel@tonic-gate 	}
50400Sstevel@tonic-gate 
50410Sstevel@tonic-gate 	/*
50420Sstevel@tonic-gate 	 * Make sure new priv set is within the permitted set for caller
50430Sstevel@tonic-gate 	 */
50440Sstevel@tonic-gate 	if (!priv_issubset(zone->zone_privset, &CR_OPPRIV(CRED()))) {
50450Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
50460Sstevel@tonic-gate 		err = EPERM;
50470Sstevel@tonic-gate 		goto out;
50480Sstevel@tonic-gate 	}
50490Sstevel@tonic-gate 	/*
50500Sstevel@tonic-gate 	 * We want to momentarily drop zonehash_lock while we optimistically
50510Sstevel@tonic-gate 	 * bind curproc to the pool it should be running in.  This is safe
50520Sstevel@tonic-gate 	 * since the zone can't disappear (we have a hold on it).
50530Sstevel@tonic-gate 	 */
50540Sstevel@tonic-gate 	zone_hold(zone);
50550Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
50560Sstevel@tonic-gate 
50570Sstevel@tonic-gate 	/*
50580Sstevel@tonic-gate 	 * Grab pool_lock to keep the pools configuration from changing
50590Sstevel@tonic-gate 	 * and to stop ourselves from getting rebound to another pool
50600Sstevel@tonic-gate 	 * until we join the zone.
50610Sstevel@tonic-gate 	 */
50620Sstevel@tonic-gate 	if (pool_lock_intr() != 0) {
50630Sstevel@tonic-gate 		zone_rele(zone);
50640Sstevel@tonic-gate 		err = EINTR;
50650Sstevel@tonic-gate 		goto out;
50660Sstevel@tonic-gate 	}
50670Sstevel@tonic-gate 	ASSERT(secpolicy_pool(CRED()) == 0);
50680Sstevel@tonic-gate 	/*
50690Sstevel@tonic-gate 	 * Bind ourselves to the pool currently associated with the zone.
50700Sstevel@tonic-gate 	 */
50710Sstevel@tonic-gate 	oldpool = curproc->p_pool;
50720Sstevel@tonic-gate 	newpool = zone_pool_get(zone);
50730Sstevel@tonic-gate 	if (pool_state == POOL_ENABLED && newpool != oldpool &&
50740Sstevel@tonic-gate 	    (err = pool_do_bind(newpool, P_PID, P_MYID,
50750Sstevel@tonic-gate 	    POOL_BIND_ALL)) != 0) {
50760Sstevel@tonic-gate 		pool_unlock();
50770Sstevel@tonic-gate 		zone_rele(zone);
50780Sstevel@tonic-gate 		goto out;
50790Sstevel@tonic-gate 	}
50800Sstevel@tonic-gate 
50810Sstevel@tonic-gate 	/*
50820Sstevel@tonic-gate 	 * Grab cpu_lock now; we'll need it later when we call
50830Sstevel@tonic-gate 	 * task_join().
50840Sstevel@tonic-gate 	 */
50850Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
50860Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
50870Sstevel@tonic-gate 	/*
50880Sstevel@tonic-gate 	 * Make sure the zone hasn't moved on since we dropped zonehash_lock.
50890Sstevel@tonic-gate 	 */
50900Sstevel@tonic-gate 	if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) {
50910Sstevel@tonic-gate 		/*
50920Sstevel@tonic-gate 		 * Can't join anymore.
50930Sstevel@tonic-gate 		 */
50940Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
50950Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
50960Sstevel@tonic-gate 		if (pool_state == POOL_ENABLED &&
50970Sstevel@tonic-gate 		    newpool != oldpool)
50980Sstevel@tonic-gate 			(void) pool_do_bind(oldpool, P_PID, P_MYID,
50990Sstevel@tonic-gate 			    POOL_BIND_ALL);
51000Sstevel@tonic-gate 		pool_unlock();
51010Sstevel@tonic-gate 		zone_rele(zone);
51020Sstevel@tonic-gate 		err = EINVAL;
51030Sstevel@tonic-gate 		goto out;
51040Sstevel@tonic-gate 	}
51050Sstevel@tonic-gate 
51063247Sgjelinek 	/*
51073247Sgjelinek 	 * a_lock must be held while transfering locked memory and swap
51083247Sgjelinek 	 * reservation from the global zone to the non global zone because
51093247Sgjelinek 	 * asynchronous faults on the processes' address space can lock
51103247Sgjelinek 	 * memory and reserve swap via MCL_FUTURE and MAP_NORESERVE
51113247Sgjelinek 	 * segments respectively.
51123247Sgjelinek 	 */
51133247Sgjelinek 	AS_LOCK_ENTER(pp->as, &pp->p_as->a_lock, RW_WRITER);
51143247Sgjelinek 	swap = as_swresv();
51150Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
51160Sstevel@tonic-gate 	zone_proj0 = zone->zone_zsched->p_task->tk_proj;
51170Sstevel@tonic-gate 	/* verify that we do not exceed and task or lwp limits */
51180Sstevel@tonic-gate 	mutex_enter(&zone->zone_nlwps_lock);
51190Sstevel@tonic-gate 	/* add new lwps to zone and zone's proj0 */
51200Sstevel@tonic-gate 	zone_proj0->kpj_nlwps += pp->p_lwpcnt;
51210Sstevel@tonic-gate 	zone->zone_nlwps += pp->p_lwpcnt;
51220Sstevel@tonic-gate 	/* add 1 task to zone's proj0 */
51230Sstevel@tonic-gate 	zone_proj0->kpj_ntasks += 1;
51240Sstevel@tonic-gate 	mutex_exit(&zone->zone_nlwps_lock);
51250Sstevel@tonic-gate 
51263247Sgjelinek 	mutex_enter(&zone->zone_mem_lock);
51272768Ssl108498 	zone->zone_locked_mem += pp->p_locked_mem;
51282768Ssl108498 	zone_proj0->kpj_data.kpd_locked_mem += pp->p_locked_mem;
51293247Sgjelinek 	zone->zone_max_swap += swap;
51303247Sgjelinek 	mutex_exit(&zone->zone_mem_lock);
51312768Ssl108498 
51323916Skrishna 	mutex_enter(&(zone_proj0->kpj_data.kpd_crypto_lock));
51333916Skrishna 	zone_proj0->kpj_data.kpd_crypto_mem += pp->p_crypto_mem;
51343916Skrishna 	mutex_exit(&(zone_proj0->kpj_data.kpd_crypto_lock));
51353916Skrishna 
51360Sstevel@tonic-gate 	/* remove lwps from proc's old zone and old project */
51370Sstevel@tonic-gate 	mutex_enter(&pp->p_zone->zone_nlwps_lock);
51380Sstevel@tonic-gate 	pp->p_zone->zone_nlwps -= pp->p_lwpcnt;
51390Sstevel@tonic-gate 	pp->p_task->tk_proj->kpj_nlwps -= pp->p_lwpcnt;
51400Sstevel@tonic-gate 	mutex_exit(&pp->p_zone->zone_nlwps_lock);
51410Sstevel@tonic-gate 
51423247Sgjelinek 	mutex_enter(&pp->p_zone->zone_mem_lock);
51432768Ssl108498 	pp->p_zone->zone_locked_mem -= pp->p_locked_mem;
51442768Ssl108498 	pp->p_task->tk_proj->kpj_data.kpd_locked_mem -= pp->p_locked_mem;
51453247Sgjelinek 	pp->p_zone->zone_max_swap -= swap;
51463247Sgjelinek 	mutex_exit(&pp->p_zone->zone_mem_lock);
51472768Ssl108498 
51483916Skrishna 	mutex_enter(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
51493916Skrishna 	pp->p_task->tk_proj->kpj_data.kpd_crypto_mem -= pp->p_crypto_mem;
51503916Skrishna 	mutex_exit(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
51513916Skrishna 
5152*9121SVamsi.Krishna@Sun.COM 	pp->p_flag |= SZONETOP;
5153*9121SVamsi.Krishna@Sun.COM 	pp->p_zone = zone;
51542768Ssl108498 	mutex_exit(&pp->p_lock);
51553247Sgjelinek 	AS_LOCK_EXIT(pp->p_as, &pp->p_as->a_lock);
51562768Ssl108498 
51570Sstevel@tonic-gate 	/*
51580Sstevel@tonic-gate 	 * Joining the zone cannot fail from now on.
51590Sstevel@tonic-gate 	 *
51600Sstevel@tonic-gate 	 * This means that a lot of the following code can be commonized and
51610Sstevel@tonic-gate 	 * shared with zsched().
51620Sstevel@tonic-gate 	 */
51630Sstevel@tonic-gate 
51640Sstevel@tonic-gate 	/*
51656073Sacruz 	 * If the process contract fmri was inherited, we need to
51666073Sacruz 	 * flag this so that any contract status will not leak
51676073Sacruz 	 * extra zone information, svc_fmri in this case
51686073Sacruz 	 */
51696073Sacruz 	if (ctp->conp_svc_ctid != ct->ct_id) {
51706073Sacruz 		mutex_enter(&ct->ct_lock);
51716073Sacruz 		ctp->conp_svc_zone_enter = ct->ct_id;
51726073Sacruz 		mutex_exit(&ct->ct_lock);
51736073Sacruz 	}
51746073Sacruz 
51756073Sacruz 	/*
51760Sstevel@tonic-gate 	 * Reset the encapsulating process contract's zone.
51770Sstevel@tonic-gate 	 */
51780Sstevel@tonic-gate 	ASSERT(ct->ct_mzuniqid == GLOBAL_ZONEUNIQID);
51790Sstevel@tonic-gate 	contract_setzuniqid(ct, zone->zone_uniqid);
51800Sstevel@tonic-gate 
51810Sstevel@tonic-gate 	/*
51820Sstevel@tonic-gate 	 * Create a new task and associate the process with the project keyed
51830Sstevel@tonic-gate 	 * by (projid,zoneid).
51840Sstevel@tonic-gate 	 *
51850Sstevel@tonic-gate 	 * We might as well be in project 0; the global zone's projid doesn't
51860Sstevel@tonic-gate 	 * make much sense in a zone anyhow.
51870Sstevel@tonic-gate 	 *
51880Sstevel@tonic-gate 	 * This also increments zone_ntasks, and returns with p_lock held.
51890Sstevel@tonic-gate 	 */
51900Sstevel@tonic-gate 	tk = task_create(0, zone);
51910Sstevel@tonic-gate 	oldtk = task_join(tk, 0);
51920Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
51930Sstevel@tonic-gate 
51940Sstevel@tonic-gate 	/*
51950Sstevel@tonic-gate 	 * call RCTLOP_SET functions on this proc
51960Sstevel@tonic-gate 	 */
51970Sstevel@tonic-gate 	e.rcep_p.zone = zone;
51980Sstevel@tonic-gate 	e.rcep_t = RCENTITY_ZONE;
51990Sstevel@tonic-gate 	(void) rctl_set_dup(NULL, NULL, pp, &e, zone->zone_rctls, NULL,
52000Sstevel@tonic-gate 	    RCD_CALLBACK);
52010Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
52020Sstevel@tonic-gate 
52030Sstevel@tonic-gate 	/*
52040Sstevel@tonic-gate 	 * We don't need to hold any of zsched's locks here; not only do we know
52050Sstevel@tonic-gate 	 * the process and zone aren't going away, we know its session isn't
52060Sstevel@tonic-gate 	 * changing either.
52070Sstevel@tonic-gate 	 *
52080Sstevel@tonic-gate 	 * By joining zsched's session here, we mimic the behavior in the
52090Sstevel@tonic-gate 	 * global zone of init's sid being the pid of sched.  We extend this
52100Sstevel@tonic-gate 	 * to all zlogin-like zone_enter()'ing processes as well.
52110Sstevel@tonic-gate 	 */
52120Sstevel@tonic-gate 	mutex_enter(&pidlock);
52130Sstevel@tonic-gate 	sp = zone->zone_zsched->p_sessp;
52142712Snn35248 	sess_hold(zone->zone_zsched);
52150Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
52160Sstevel@tonic-gate 	pgexit(pp);
52172712Snn35248 	sess_rele(pp->p_sessp, B_TRUE);
52180Sstevel@tonic-gate 	pp->p_sessp = sp;
52190Sstevel@tonic-gate 	pgjoin(pp, zone->zone_zsched->p_pidp);
52203247Sgjelinek 
52213247Sgjelinek 	/*
52223792Sakolb 	 * If any threads are scheduled to be placed on zone wait queue they
52233792Sakolb 	 * should abandon the idea since the wait queue is changing.
52243792Sakolb 	 * We need to be holding pidlock & p_lock to do this.
52253792Sakolb 	 */
52263792Sakolb 	if ((t = pp->p_tlist) != NULL) {
52273792Sakolb 		do {
52283792Sakolb 			thread_lock(t);
52293792Sakolb 			/*
52303792Sakolb 			 * Kick this thread so that he doesn't sit
52313792Sakolb 			 * on a wrong wait queue.
52323792Sakolb 			 */
52333792Sakolb 			if (ISWAITING(t))
52343792Sakolb 				setrun_locked(t);
52353792Sakolb 
52363792Sakolb 			if (t->t_schedflag & TS_ANYWAITQ)
52373792Sakolb 				t->t_schedflag &= ~ TS_ANYWAITQ;
52383792Sakolb 
52393792Sakolb 			thread_unlock(t);
52403792Sakolb 		} while ((t = t->t_forw) != pp->p_tlist);
52413792Sakolb 	}
52423792Sakolb 
52433792Sakolb 	/*
52443247Sgjelinek 	 * If there is a default scheduling class for the zone and it is not
52453247Sgjelinek 	 * the class we are currently in, change all of the threads in the
52463247Sgjelinek 	 * process to the new class.  We need to be holding pidlock & p_lock
52473247Sgjelinek 	 * when we call parmsset so this is a good place to do it.
52483247Sgjelinek 	 */
52493247Sgjelinek 	if (zone->zone_defaultcid > 0 &&
52503247Sgjelinek 	    zone->zone_defaultcid != curthread->t_cid) {
52513247Sgjelinek 		pcparms_t pcparms;
52523247Sgjelinek 
52533247Sgjelinek 		pcparms.pc_cid = zone->zone_defaultcid;
52543247Sgjelinek 		pcparms.pc_clparms[0] = 0;
52553247Sgjelinek 
52563247Sgjelinek 		/*
52573247Sgjelinek 		 * If setting the class fails, we still want to enter the zone.
52583247Sgjelinek 		 */
52593247Sgjelinek 		if ((t = pp->p_tlist) != NULL) {
52603247Sgjelinek 			do {
52613247Sgjelinek 				(void) parmsset(&pcparms, t);
52623247Sgjelinek 			} while ((t = t->t_forw) != pp->p_tlist);
52633247Sgjelinek 		}
52643247Sgjelinek 	}
52653247Sgjelinek 
52660Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
52670Sstevel@tonic-gate 	mutex_exit(&pidlock);
52680Sstevel@tonic-gate 
52690Sstevel@tonic-gate 	mutex_exit(&zonehash_lock);
52700Sstevel@tonic-gate 	/*
52710Sstevel@tonic-gate 	 * We're firmly in the zone; let pools progress.
52720Sstevel@tonic-gate 	 */
52730Sstevel@tonic-gate 	pool_unlock();
52740Sstevel@tonic-gate 	task_rele(oldtk);
52750Sstevel@tonic-gate 	/*
52760Sstevel@tonic-gate 	 * We don't need to retain a hold on the zone since we already
52770Sstevel@tonic-gate 	 * incremented zone_ntasks, so the zone isn't going anywhere.
52780Sstevel@tonic-gate 	 */
52790Sstevel@tonic-gate 	zone_rele(zone);
52800Sstevel@tonic-gate 
52810Sstevel@tonic-gate 	/*
52820Sstevel@tonic-gate 	 * Chroot
52830Sstevel@tonic-gate 	 */
52840Sstevel@tonic-gate 	vp = zone->zone_rootvp;
52850Sstevel@tonic-gate 	zone_chdir(vp, &PTOU(pp)->u_cdir, pp);
52860Sstevel@tonic-gate 	zone_chdir(vp, &PTOU(pp)->u_rdir, pp);
52870Sstevel@tonic-gate 
52880Sstevel@tonic-gate 	/*
52890Sstevel@tonic-gate 	 * Change process credentials
52900Sstevel@tonic-gate 	 */
52910Sstevel@tonic-gate 	newcr = cralloc();
52920Sstevel@tonic-gate 	mutex_enter(&pp->p_crlock);
52930Sstevel@tonic-gate 	cr = pp->p_cred;
52940Sstevel@tonic-gate 	crcopy_to(cr, newcr);
52950Sstevel@tonic-gate 	crsetzone(newcr, zone);
52960Sstevel@tonic-gate 	pp->p_cred = newcr;
52970Sstevel@tonic-gate 
52980Sstevel@tonic-gate 	/*
52990Sstevel@tonic-gate 	 * Restrict all process privilege sets to zone limit
53000Sstevel@tonic-gate 	 */
53010Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_PPRIV(newcr));
53020Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_EPRIV(newcr));
53030Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_IPRIV(newcr));
53040Sstevel@tonic-gate 	priv_intersect(zone->zone_privset, &CR_LPRIV(newcr));
53050Sstevel@tonic-gate 	mutex_exit(&pp->p_crlock);
53060Sstevel@tonic-gate 	crset(pp, newcr);
53070Sstevel@tonic-gate 
53080Sstevel@tonic-gate 	/*
53090Sstevel@tonic-gate 	 * Adjust upcount to reflect zone entry.
53100Sstevel@tonic-gate 	 */
53110Sstevel@tonic-gate 	uid = crgetruid(newcr);
53120Sstevel@tonic-gate 	mutex_enter(&pidlock);
53130Sstevel@tonic-gate 	upcount_dec(uid, GLOBAL_ZONEID);
53140Sstevel@tonic-gate 	upcount_inc(uid, zoneid);
53150Sstevel@tonic-gate 	mutex_exit(&pidlock);
53160Sstevel@tonic-gate 
53170Sstevel@tonic-gate 	/*
53180Sstevel@tonic-gate 	 * Set up core file path and content.
53190Sstevel@tonic-gate 	 */
53200Sstevel@tonic-gate 	set_core_defaults();
53210Sstevel@tonic-gate 
53220Sstevel@tonic-gate out:
53230Sstevel@tonic-gate 	/*
53240Sstevel@tonic-gate 	 * Let the other lwps continue.
53250Sstevel@tonic-gate 	 */
53260Sstevel@tonic-gate 	mutex_enter(&pp->p_lock);
53270Sstevel@tonic-gate 	if (curthread != pp->p_agenttp)
53280Sstevel@tonic-gate 		continuelwps(pp);
53290Sstevel@tonic-gate 	mutex_exit(&pp->p_lock);
53300Sstevel@tonic-gate 
53310Sstevel@tonic-gate 	return (err != 0 ? set_errno(err) : 0);
53320Sstevel@tonic-gate }
53330Sstevel@tonic-gate 
53340Sstevel@tonic-gate /*
53350Sstevel@tonic-gate  * Systemcall entry point for zone_list(2).
53360Sstevel@tonic-gate  *
53370Sstevel@tonic-gate  * Processes running in a (non-global) zone only see themselves.
53381676Sjpk  * On labeled systems, they see all zones whose label they dominate.
53390Sstevel@tonic-gate  */
53400Sstevel@tonic-gate static int
53410Sstevel@tonic-gate zone_list(zoneid_t *zoneidlist, uint_t *numzones)
53420Sstevel@tonic-gate {
53430Sstevel@tonic-gate 	zoneid_t *zoneids;
53441769Scarlsonj 	zone_t *zone, *myzone;
53450Sstevel@tonic-gate 	uint_t user_nzones, real_nzones;
53461676Sjpk 	uint_t domi_nzones;
53471676Sjpk 	int error;
53480Sstevel@tonic-gate 
53490Sstevel@tonic-gate 	if (copyin(numzones, &user_nzones, sizeof (uint_t)) != 0)
53500Sstevel@tonic-gate 		return (set_errno(EFAULT));
53510Sstevel@tonic-gate 
53521769Scarlsonj 	myzone = curproc->p_zone;
53531769Scarlsonj 	if (myzone != global_zone) {
53541676Sjpk 		bslabel_t *mybslab;
53551676Sjpk 
53561676Sjpk 		if (!is_system_labeled()) {
53571676Sjpk 			/* just return current zone */
53581676Sjpk 			real_nzones = domi_nzones = 1;
53591676Sjpk 			zoneids = kmem_alloc(sizeof (zoneid_t), KM_SLEEP);
53601769Scarlsonj 			zoneids[0] = myzone->zone_id;
53611676Sjpk 		} else {
53621676Sjpk 			/* return all zones that are dominated */
53631676Sjpk 			mutex_enter(&zonehash_lock);
53641676Sjpk 			real_nzones = zonecount;
53651676Sjpk 			domi_nzones = 0;
53661676Sjpk 			if (real_nzones > 0) {
53671676Sjpk 				zoneids = kmem_alloc(real_nzones *
53681676Sjpk 				    sizeof (zoneid_t), KM_SLEEP);
53691769Scarlsonj 				mybslab = label2bslabel(myzone->zone_slabel);
53701676Sjpk 				for (zone = list_head(&zone_active);
53711676Sjpk 				    zone != NULL;
53721676Sjpk 				    zone = list_next(&zone_active, zone)) {
53731676Sjpk 					if (zone->zone_id == GLOBAL_ZONEID)
53741676Sjpk 						continue;
53751769Scarlsonj 					if (zone != myzone &&
53761769Scarlsonj 					    (zone->zone_flags & ZF_IS_SCRATCH))
53771769Scarlsonj 						continue;
53781769Scarlsonj 					/*
53791769Scarlsonj 					 * Note that a label always dominates
53801769Scarlsonj 					 * itself, so myzone is always included
53811769Scarlsonj 					 * in the list.
53821769Scarlsonj 					 */
53831676Sjpk 					if (bldominates(mybslab,
53841676Sjpk 					    label2bslabel(zone->zone_slabel))) {
53851676Sjpk 						zoneids[domi_nzones++] =
53861676Sjpk 						    zone->zone_id;
53871676Sjpk 					}
53881676Sjpk 				}
53891676Sjpk 			}
53901676Sjpk 			mutex_exit(&zonehash_lock);
53911676Sjpk 		}
53920Sstevel@tonic-gate 	} else {
53930Sstevel@tonic-gate 		mutex_enter(&zonehash_lock);
53940Sstevel@tonic-gate 		real_nzones = zonecount;
53951676Sjpk 		domi_nzones = 0;
53961676Sjpk 		if (real_nzones > 0) {
53970Sstevel@tonic-gate 			zoneids = kmem_alloc(real_nzones * sizeof (zoneid_t),
53980Sstevel@tonic-gate 			    KM_SLEEP);
53990Sstevel@tonic-gate 			for (zone = list_head(&zone_active); zone != NULL;
54000Sstevel@tonic-gate 			    zone = list_next(&zone_active, zone))
54011676Sjpk 				zoneids[domi_nzones++] = zone->zone_id;
54021676Sjpk 			ASSERT(domi_nzones == real_nzones);
54030Sstevel@tonic-gate 		}
54040Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
54050Sstevel@tonic-gate 	}
54060Sstevel@tonic-gate 
54071676Sjpk 	/*
54081676Sjpk 	 * If user has allocated space for fewer entries than we found, then
54091676Sjpk 	 * return only up to his limit.  Either way, tell him exactly how many
54101676Sjpk 	 * we found.
54111676Sjpk 	 */
54121676Sjpk 	if (domi_nzones < user_nzones)
54131676Sjpk 		user_nzones = domi_nzones;
54141676Sjpk 	error = 0;
54151676Sjpk 	if (copyout(&domi_nzones, numzones, sizeof (uint_t)) != 0) {
54160Sstevel@tonic-gate 		error = EFAULT;
54171676Sjpk 	} else if (zoneidlist != NULL && user_nzones != 0) {
54180Sstevel@tonic-gate 		if (copyout(zoneids, zoneidlist,
54190Sstevel@tonic-gate 		    user_nzones * sizeof (zoneid_t)) != 0)
54200Sstevel@tonic-gate 			error = EFAULT;
54210Sstevel@tonic-gate 	}
54220Sstevel@tonic-gate 
54231676Sjpk 	if (real_nzones > 0)
54240Sstevel@tonic-gate 		kmem_free(zoneids, real_nzones * sizeof (zoneid_t));
54250Sstevel@tonic-gate 
54261676Sjpk 	if (error != 0)
54270Sstevel@tonic-gate 		return (set_errno(error));
54280Sstevel@tonic-gate 	else
54290Sstevel@tonic-gate 		return (0);
54300Sstevel@tonic-gate }
54310Sstevel@tonic-gate 
54320Sstevel@tonic-gate /*
54330Sstevel@tonic-gate  * Systemcall entry point for zone_lookup(2).
54340Sstevel@tonic-gate  *
54351676Sjpk  * Non-global zones are only able to see themselves and (on labeled systems)
54361676Sjpk  * the zones they dominate.
54370Sstevel@tonic-gate  */
54380Sstevel@tonic-gate static zoneid_t
54390Sstevel@tonic-gate zone_lookup(const char *zone_name)
54400Sstevel@tonic-gate {
54410Sstevel@tonic-gate 	char *kname;
54420Sstevel@tonic-gate 	zone_t *zone;
54430Sstevel@tonic-gate 	zoneid_t zoneid;
54440Sstevel@tonic-gate 	int err;
54450Sstevel@tonic-gate 
54460Sstevel@tonic-gate 	if (zone_name == NULL) {
54470Sstevel@tonic-gate 		/* return caller's zone id */
54480Sstevel@tonic-gate 		return (getzoneid());
54490Sstevel@tonic-gate 	}
54500Sstevel@tonic-gate 
54510Sstevel@tonic-gate 	kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
54520Sstevel@tonic-gate 	if ((err = copyinstr(zone_name, kname, ZONENAME_MAX, NULL)) != 0) {
54530Sstevel@tonic-gate 		kmem_free(kname, ZONENAME_MAX);
54540Sstevel@tonic-gate 		return (set_errno(err));
54550Sstevel@tonic-gate 	}
54560Sstevel@tonic-gate 
54570Sstevel@tonic-gate 	mutex_enter(&zonehash_lock);
54580Sstevel@tonic-gate 	zone = zone_find_all_by_name(kname);
54590Sstevel@tonic-gate 	kmem_free(kname, ZONENAME_MAX);
54601676Sjpk 	/*
54611676Sjpk 	 * In a non-global zone, can only lookup global and own name.
54621676Sjpk 	 * In Trusted Extensions zone label dominance rules apply.
54631676Sjpk 	 */
54641676Sjpk 	if (zone == NULL ||
54651676Sjpk 	    zone_status_get(zone) < ZONE_IS_READY ||
54661676Sjpk 	    !zone_list_access(zone)) {
54670Sstevel@tonic-gate 		mutex_exit(&zonehash_lock);
54680Sstevel@tonic-gate 		return (set_errno(EINVAL));
54691676Sjpk 	} else {
54701676Sjpk 		zoneid = zone->zone_id;
54711676Sjpk 		mutex_exit(&zonehash_lock);
54721676Sjpk 		return (zoneid);
54730Sstevel@tonic-gate 	}
54740Sstevel@tonic-gate }
54750Sstevel@tonic-gate 
5476813Sdp static int
5477813Sdp zone_version(int *version_arg)
5478813Sdp {
5479813Sdp 	int version = ZONE_SYSCALL_API_VERSION;
5480813Sdp 
5481813Sdp 	if (copyout(&version, version_arg, sizeof (int)) != 0)
5482813Sdp 		return (set_errno(EFAULT));
5483813Sdp 	return (0);
5484813Sdp }
5485813Sdp 
54860Sstevel@tonic-gate /* ARGSUSED */
54870Sstevel@tonic-gate long
5488789Sahrens zone(int cmd, void *arg1, void *arg2, void *arg3, void *arg4)
54890Sstevel@tonic-gate {
54900Sstevel@tonic-gate 	zone_def zs;
54910Sstevel@tonic-gate 
54920Sstevel@tonic-gate 	switch (cmd) {
54930Sstevel@tonic-gate 	case ZONE_CREATE:
54940Sstevel@tonic-gate 		if (get_udatamodel() == DATAMODEL_NATIVE) {
54950Sstevel@tonic-gate 			if (copyin(arg1, &zs, sizeof (zone_def))) {
54960Sstevel@tonic-gate 				return (set_errno(EFAULT));
54970Sstevel@tonic-gate 			}
54980Sstevel@tonic-gate 		} else {
54990Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
55000Sstevel@tonic-gate 			zone_def32 zs32;
55010Sstevel@tonic-gate 
55020Sstevel@tonic-gate 			if (copyin(arg1, &zs32, sizeof (zone_def32))) {
55030Sstevel@tonic-gate 				return (set_errno(EFAULT));
55040Sstevel@tonic-gate 			}
55050Sstevel@tonic-gate 			zs.zone_name =
55060Sstevel@tonic-gate 			    (const char *)(unsigned long)zs32.zone_name;
55070Sstevel@tonic-gate 			zs.zone_root =
55080Sstevel@tonic-gate 			    (const char *)(unsigned long)zs32.zone_root;
55090Sstevel@tonic-gate 			zs.zone_privs =
55100Sstevel@tonic-gate 			    (const struct priv_set *)
55110Sstevel@tonic-gate 			    (unsigned long)zs32.zone_privs;
55121409Sdp 			zs.zone_privssz = zs32.zone_privssz;
55130Sstevel@tonic-gate 			zs.rctlbuf = (caddr_t)(unsigned long)zs32.rctlbuf;
55140Sstevel@tonic-gate 			zs.rctlbufsz = zs32.rctlbufsz;
5515789Sahrens 			zs.zfsbuf = (caddr_t)(unsigned long)zs32.zfsbuf;
5516789Sahrens 			zs.zfsbufsz = zs32.zfsbufsz;
55170Sstevel@tonic-gate 			zs.extended_error =
55180Sstevel@tonic-gate 			    (int *)(unsigned long)zs32.extended_error;
55191676Sjpk 			zs.match = zs32.match;
55201676Sjpk 			zs.doi = zs32.doi;
55211676Sjpk 			zs.label = (const bslabel_t *)(uintptr_t)zs32.label;
55223448Sdh155122 			zs.flags = zs32.flags;
55230Sstevel@tonic-gate #else
55240Sstevel@tonic-gate 			panic("get_udatamodel() returned bogus result\n");
55250Sstevel@tonic-gate #endif
55260Sstevel@tonic-gate 		}
55270Sstevel@tonic-gate 
55280Sstevel@tonic-gate 		return (zone_create(zs.zone_name, zs.zone_root,
5529813Sdp 		    zs.zone_privs, zs.zone_privssz,
5530813Sdp 		    (caddr_t)zs.rctlbuf, zs.rctlbufsz,
5531813Sdp 		    (caddr_t)zs.zfsbuf, zs.zfsbufsz,
55321676Sjpk 		    zs.extended_error, zs.match, zs.doi,
55333448Sdh155122 		    zs.label, zs.flags));
55340Sstevel@tonic-gate 	case ZONE_BOOT:
55352267Sdp 		return (zone_boot((zoneid_t)(uintptr_t)arg1));
55360Sstevel@tonic-gate 	case ZONE_DESTROY:
55370Sstevel@tonic-gate 		return (zone_destroy((zoneid_t)(uintptr_t)arg1));
55380Sstevel@tonic-gate 	case ZONE_GETATTR:
55390Sstevel@tonic-gate 		return (zone_getattr((zoneid_t)(uintptr_t)arg1,
55400Sstevel@tonic-gate 		    (int)(uintptr_t)arg2, arg3, (size_t)arg4));
55412267Sdp 	case ZONE_SETATTR:
55422267Sdp 		return (zone_setattr((zoneid_t)(uintptr_t)arg1,
55432267Sdp 		    (int)(uintptr_t)arg2, arg3, (size_t)arg4));
55440Sstevel@tonic-gate 	case ZONE_ENTER:
55450Sstevel@tonic-gate 		return (zone_enter((zoneid_t)(uintptr_t)arg1));
55460Sstevel@tonic-gate 	case ZONE_LIST:
55470Sstevel@tonic-gate 		return (zone_list((zoneid_t *)arg1, (uint_t *)arg2));
55480Sstevel@tonic-gate 	case ZONE_SHUTDOWN:
55490Sstevel@tonic-gate 		return (zone_shutdown((zoneid_t)(uintptr_t)arg1));
55500Sstevel@tonic-gate 	case ZONE_LOOKUP:
55510Sstevel@tonic-gate 		return (zone_lookup((const char *)arg1));
5552813Sdp 	case ZONE_VERSION:
5553813Sdp 		return (zone_version((int *)arg1));
55543448Sdh155122 	case ZONE_ADD_DATALINK:
55553448Sdh155122 		return (zone_add_datalink((zoneid_t)(uintptr_t)arg1,
55563448Sdh155122 		    (char *)arg2));
55573448Sdh155122 	case ZONE_DEL_DATALINK:
55583448Sdh155122 		return (zone_remove_datalink((zoneid_t)(uintptr_t)arg1,
55593448Sdh155122 		    (char *)arg2));
55603448Sdh155122 	case ZONE_CHECK_DATALINK:
55613448Sdh155122 		return (zone_check_datalink((zoneid_t *)arg1, (char *)arg2));
55623448Sdh155122 	case ZONE_LIST_DATALINK:
55633448Sdh155122 		return (zone_list_datalink((zoneid_t)(uintptr_t)arg1,
55643448Sdh155122 		    (int *)arg2, (char *)arg3));
55650Sstevel@tonic-gate 	default:
55660Sstevel@tonic-gate 		return (set_errno(EINVAL));
55670Sstevel@tonic-gate 	}
55680Sstevel@tonic-gate }
55690Sstevel@tonic-gate 
55700Sstevel@tonic-gate struct zarg {
55710Sstevel@tonic-gate 	zone_t *zone;
55720Sstevel@tonic-gate 	zone_cmd_arg_t arg;
55730Sstevel@tonic-gate };
55740Sstevel@tonic-gate 
55750Sstevel@tonic-gate static int
55760Sstevel@tonic-gate zone_lookup_door(const char *zone_name, door_handle_t *doorp)
55770Sstevel@tonic-gate {
55780Sstevel@tonic-gate 	char *buf;
55790Sstevel@tonic-gate 	size_t buflen;
55800Sstevel@tonic-gate 	int error;
55810Sstevel@tonic-gate 
55820Sstevel@tonic-gate 	buflen = sizeof (ZONE_DOOR_PATH) + strlen(zone_name);
55830Sstevel@tonic-gate 	buf = kmem_alloc(buflen, KM_SLEEP);
55840Sstevel@tonic-gate 	(void) snprintf(buf, buflen, ZONE_DOOR_PATH, zone_name);
55850Sstevel@tonic-gate 	error = door_ki_open(buf, doorp);
55860Sstevel@tonic-gate 	kmem_free(buf, buflen);
55870Sstevel@tonic-gate 	return (error);
55880Sstevel@tonic-gate }
55890Sstevel@tonic-gate 
55900Sstevel@tonic-gate static void
55910Sstevel@tonic-gate zone_release_door(door_handle_t *doorp)
55920Sstevel@tonic-gate {
55930Sstevel@tonic-gate 	door_ki_rele(*doorp);
55940Sstevel@tonic-gate 	*doorp = NULL;
55950Sstevel@tonic-gate }
55960Sstevel@tonic-gate 
55970Sstevel@tonic-gate static void
55980Sstevel@tonic-gate zone_ki_call_zoneadmd(struct zarg *zargp)
55990Sstevel@tonic-gate {
56000Sstevel@tonic-gate 	door_handle_t door = NULL;
56010Sstevel@tonic-gate 	door_arg_t darg, save_arg;
56020Sstevel@tonic-gate 	char *zone_name;
56030Sstevel@tonic-gate 	size_t zone_namelen;
56040Sstevel@tonic-gate 	zoneid_t zoneid;
56050Sstevel@tonic-gate 	zone_t *zone;
56060Sstevel@tonic-gate 	zone_cmd_arg_t arg;
56070Sstevel@tonic-gate 	uint64_t uniqid;
56080Sstevel@tonic-gate 	size_t size;
56090Sstevel@tonic-gate 	int error;
56100Sstevel@tonic-gate 	int retry;
56110Sstevel@tonic-gate 
56120Sstevel@tonic-gate 	zone = zargp->zone;
56130Sstevel@tonic-gate 	arg = zargp->arg;
56140Sstevel@tonic-gate 	kmem_free(zargp, sizeof (*zargp));
56150Sstevel@tonic-gate 
56160Sstevel@tonic-gate 	zone_namelen = strlen(zone->zone_name) + 1;
56170Sstevel@tonic-gate 	zone_name = kmem_alloc(zone_namelen, KM_SLEEP);
56180Sstevel@tonic-gate 	bcopy(zone->zone_name, zone_name, zone_namelen);
56190Sstevel@tonic-gate 	zoneid = zone->zone_id;
56200Sstevel@tonic-gate 	uniqid = zone->zone_uniqid;
56210Sstevel@tonic-gate 	/*
56220Sstevel@tonic-gate 	 * zoneadmd may be down, but at least we can empty out the zone.
56230Sstevel@tonic-gate 	 * We can ignore the return value of zone_empty() since we're called
56240Sstevel@tonic-gate 	 * from a kernel thread and know we won't be delivered any signals.
56250Sstevel@tonic-gate 	 */
56260Sstevel@tonic-gate 	ASSERT(curproc == &p0);
56270Sstevel@tonic-gate 	(void) zone_empty(zone);
56280Sstevel@tonic-gate 	ASSERT(zone_status_get(zone) >= ZONE_IS_EMPTY);
56290Sstevel@tonic-gate 	zone_rele(zone);
56300Sstevel@tonic-gate 
56310Sstevel@tonic-gate 	size = sizeof (arg);
56320Sstevel@tonic-gate 	darg.rbuf = (char *)&arg;
56330Sstevel@tonic-gate 	darg.data_ptr = (char *)&arg;
56340Sstevel@tonic-gate 	darg.rsize = size;
56350Sstevel@tonic-gate 	darg.data_size = size;
56360Sstevel@tonic-gate 	darg.desc_ptr = NULL;
56370Sstevel@tonic-gate 	darg.desc_num = 0;
56380Sstevel@tonic-gate 
56390Sstevel@tonic-gate 	save_arg = darg;
56400Sstevel@tonic-gate 	/*
56410Sstevel@tonic-gate 	 * Since we're not holding a reference to the zone, any number of
56420Sstevel@tonic-gate 	 * things can go wrong, including the zone disappearing before we get a
56430Sstevel@tonic-gate 	 * chance to talk to zoneadmd.
56440Sstevel@tonic-gate 	 */
56450Sstevel@tonic-gate 	for (retry = 0; /* forever */; retry++) {
56460Sstevel@tonic-gate 		if (door == NULL &&
56470Sstevel@tonic-gate 		    (error = zone_lookup_door(zone_name, &door)) != 0) {
56480Sstevel@tonic-gate 			goto next;
56490Sstevel@tonic-gate 		}
56500Sstevel@tonic-gate 		ASSERT(door != NULL);
56510Sstevel@tonic-gate 
56526997Sjwadams 		if ((error = door_ki_upcall_limited(door, &darg, NULL,
56536997Sjwadams 		    SIZE_MAX, 0)) == 0) {
56540Sstevel@tonic-gate 			break;
56550Sstevel@tonic-gate 		}
56560Sstevel@tonic-gate 		switch (error) {
56570Sstevel@tonic-gate 		case EINTR:
56580Sstevel@tonic-gate 			/* FALLTHROUGH */
56590Sstevel@tonic-gate 		case EAGAIN:	/* process may be forking */
56600Sstevel@tonic-gate 			/*
56610Sstevel@tonic-gate 			 * Back off for a bit
56620Sstevel@tonic-gate 			 */
56630Sstevel@tonic-gate 			break;
56640Sstevel@tonic-gate 		case EBADF:
56650Sstevel@tonic-gate 			zone_release_door(&door);
56660Sstevel@tonic-gate 			if (zone_lookup_door(zone_name, &door) != 0) {
56670Sstevel@tonic-gate 				/*
56680Sstevel@tonic-gate 				 * zoneadmd may be dead, but it may come back to
56690Sstevel@tonic-gate 				 * life later.
56700Sstevel@tonic-gate 				 */
56710Sstevel@tonic-gate 				break;
56720Sstevel@tonic-gate 			}
56730Sstevel@tonic-gate 			break;
56740Sstevel@tonic-gate 		default:
56750Sstevel@tonic-gate 			cmn_err(CE_WARN,
56760Sstevel@tonic-gate 			    "zone_ki_call_zoneadmd: door_ki_upcall error %d\n",
56770Sstevel@tonic-gate 			    error);
56780Sstevel@tonic-gate 			goto out;
56790Sstevel@tonic-gate 		}
56800Sstevel@tonic-gate next:
56810Sstevel@tonic-gate 		/*
56820Sstevel@tonic-gate 		 * If this isn't the same zone_t that we originally had in mind,
56830Sstevel@tonic-gate 		 * then this is the same as if two kadmin requests come in at
56840Sstevel@tonic-gate 		 * the same time: the first one wins.  This means we lose, so we
56850Sstevel@tonic-gate 		 * bail.
56860Sstevel@tonic-gate 		 */
56870Sstevel@tonic-gate 		if ((zone = zone_find_by_id(zoneid)) == NULL) {
56880Sstevel@tonic-gate 			/*
56890Sstevel@tonic-gate 			 * Problem is solved.
56900Sstevel@tonic-gate 			 */
56910Sstevel@tonic-gate 			break;
56920Sstevel@tonic-gate 		}
56930Sstevel@tonic-gate 		if (zone->zone_uniqid != uniqid) {
56940Sstevel@tonic-gate 			/*
56950Sstevel@tonic-gate 			 * zoneid recycled
56960Sstevel@tonic-gate 			 */
56970Sstevel@tonic-gate 			zone_rele(zone);
56980Sstevel@tonic-gate 			break;
56990Sstevel@tonic-gate 		}
57000Sstevel@tonic-gate 		/*
57010Sstevel@tonic-gate 		 * We could zone_status_timedwait(), but there doesn't seem to
57020Sstevel@tonic-gate 		 * be much point in doing that (plus, it would mean that
57030Sstevel@tonic-gate 		 * zone_free() isn't called until this thread exits).
57040Sstevel@tonic-gate 		 */
57050Sstevel@tonic-gate 		zone_rele(zone);
57060Sstevel@tonic-gate 		delay(hz);
57070Sstevel@tonic-gate 		darg = save_arg;
57080Sstevel@tonic-gate 	}
57090Sstevel@tonic-gate out:
57100Sstevel@tonic-gate 	if (door != NULL) {
57110Sstevel@tonic-gate 		zone_release_door(&door);
57120Sstevel@tonic-gate 	}
57130Sstevel@tonic-gate 	kmem_free(zone_name, zone_namelen);
57140Sstevel@tonic-gate 	thread_exit();
57150Sstevel@tonic-gate }
57160Sstevel@tonic-gate 
57170Sstevel@tonic-gate /*
57182267Sdp  * Entry point for uadmin() to tell the zone to go away or reboot.  Analog to
57192267Sdp  * kadmin().  The caller is a process in the zone.
57200Sstevel@tonic-gate  *
57210Sstevel@tonic-gate  * In order to shutdown the zone, we will hand off control to zoneadmd
57220Sstevel@tonic-gate  * (running in the global zone) via a door.  We do a half-hearted job at
57230Sstevel@tonic-gate  * killing all processes in the zone, create a kernel thread to contact
57240Sstevel@tonic-gate  * zoneadmd, and make note of the "uniqid" of the zone.  The uniqid is
57250Sstevel@tonic-gate  * a form of generation number used to let zoneadmd (as well as
57260Sstevel@tonic-gate  * zone_destroy()) know exactly which zone they're re talking about.
57270Sstevel@tonic-gate  */
57280Sstevel@tonic-gate int
57292267Sdp zone_kadmin(int cmd, int fcn, const char *mdep, cred_t *credp)
57300Sstevel@tonic-gate {
57310Sstevel@tonic-gate 	struct zarg *zargp;
57320Sstevel@tonic-gate 	zone_cmd_t zcmd;
57330Sstevel@tonic-gate 	zone_t *zone;
57340Sstevel@tonic-gate 
57350Sstevel@tonic-gate 	zone = curproc->p_zone;
57360Sstevel@tonic-gate 	ASSERT(getzoneid() != GLOBAL_ZONEID);
57370Sstevel@tonic-gate 
57380Sstevel@tonic-gate 	switch (cmd) {
57390Sstevel@tonic-gate 	case A_SHUTDOWN:
57400Sstevel@tonic-gate 		switch (fcn) {
57410Sstevel@tonic-gate 		case AD_HALT:
57420Sstevel@tonic-gate 		case AD_POWEROFF:
57430Sstevel@tonic-gate 			zcmd = Z_HALT;
57440Sstevel@tonic-gate 			break;
57450Sstevel@tonic-gate 		case AD_BOOT:
57460Sstevel@tonic-gate 			zcmd = Z_REBOOT;
57470Sstevel@tonic-gate 			break;
57480Sstevel@tonic-gate 		case AD_IBOOT:
57490Sstevel@tonic-gate 		case AD_SBOOT:
57500Sstevel@tonic-gate 		case AD_SIBOOT:
57510Sstevel@tonic-gate 		case AD_NOSYNC:
57520Sstevel@tonic-gate 			return (ENOTSUP);
57530Sstevel@tonic-gate 		default:
57540Sstevel@tonic-gate 			return (EINVAL);
57550Sstevel@tonic-gate 		}
57560Sstevel@tonic-gate 		break;
57570Sstevel@tonic-gate 	case A_REBOOT:
57580Sstevel@tonic-gate 		zcmd = Z_REBOOT;
57590Sstevel@tonic-gate 		break;
57600Sstevel@tonic-gate 	case A_FTRACE:
57610Sstevel@tonic-gate 	case A_REMOUNT:
57620Sstevel@tonic-gate 	case A_FREEZE:
57630Sstevel@tonic-gate 	case A_DUMP:
57640Sstevel@tonic-gate 		return (ENOTSUP);
57650Sstevel@tonic-gate 	default:
57660Sstevel@tonic-gate 		ASSERT(cmd != A_SWAPCTL);	/* handled by uadmin() */
57670Sstevel@tonic-gate 		return (EINVAL);
57680Sstevel@tonic-gate 	}
57690Sstevel@tonic-gate 
57700Sstevel@tonic-gate 	if (secpolicy_zone_admin(credp, B_FALSE))
57710Sstevel@tonic-gate 		return (EPERM);
57720Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
57732267Sdp 
57740Sstevel@tonic-gate 	/*
57750Sstevel@tonic-gate 	 * zone_status can't be ZONE_IS_EMPTY or higher since curproc
57760Sstevel@tonic-gate 	 * is in the zone.
57770Sstevel@tonic-gate 	 */
57780Sstevel@tonic-gate 	ASSERT(zone_status_get(zone) < ZONE_IS_EMPTY);
57790Sstevel@tonic-gate 	if (zone_status_get(zone) > ZONE_IS_RUNNING) {
57800Sstevel@tonic-gate 		/*
57810Sstevel@tonic-gate 		 * This zone is already on its way down.
57820Sstevel@tonic-gate 		 */
57830Sstevel@tonic-gate 		mutex_exit(&zone_status_lock);
57840Sstevel@tonic-gate 		return (0);
57850Sstevel@tonic-gate 	}
57860Sstevel@tonic-gate 	/*
57870Sstevel@tonic-gate 	 * Prevent future zone_enter()s
57880Sstevel@tonic-gate 	 */
57890Sstevel@tonic-gate 	zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
57900Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
57910Sstevel@tonic-gate 
57920Sstevel@tonic-gate 	/*
57930Sstevel@tonic-gate 	 * Kill everyone now and call zoneadmd later.
57940Sstevel@tonic-gate 	 * zone_ki_call_zoneadmd() will do a more thorough job of this
57950Sstevel@tonic-gate 	 * later.
57960Sstevel@tonic-gate 	 */
57970Sstevel@tonic-gate 	killall(zone->zone_id);
57980Sstevel@tonic-gate 	/*
57990Sstevel@tonic-gate 	 * Now, create the thread to contact zoneadmd and do the rest of the
58000Sstevel@tonic-gate 	 * work.  This thread can't be created in our zone otherwise
58010Sstevel@tonic-gate 	 * zone_destroy() would deadlock.
58020Sstevel@tonic-gate 	 */
58032267Sdp 	zargp = kmem_zalloc(sizeof (*zargp), KM_SLEEP);
58040Sstevel@tonic-gate 	zargp->arg.cmd = zcmd;
58050Sstevel@tonic-gate 	zargp->arg.uniqid = zone->zone_uniqid;
58062267Sdp 	zargp->zone = zone;
58070Sstevel@tonic-gate 	(void) strcpy(zargp->arg.locale, "C");
58082267Sdp 	/* mdep was already copied in for us by uadmin */
58092267Sdp 	if (mdep != NULL)
58102267Sdp 		(void) strlcpy(zargp->arg.bootbuf, mdep,
58112267Sdp 		    sizeof (zargp->arg.bootbuf));
58122267Sdp 	zone_hold(zone);
58130Sstevel@tonic-gate 
58140Sstevel@tonic-gate 	(void) thread_create(NULL, 0, zone_ki_call_zoneadmd, zargp, 0, &p0,
58150Sstevel@tonic-gate 	    TS_RUN, minclsyspri);
58160Sstevel@tonic-gate 	exit(CLD_EXITED, 0);
58170Sstevel@tonic-gate 
58180Sstevel@tonic-gate 	return (EINVAL);
58190Sstevel@tonic-gate }
58200Sstevel@tonic-gate 
58210Sstevel@tonic-gate /*
58220Sstevel@tonic-gate  * Entry point so kadmin(A_SHUTDOWN, ...) can set the global zone's
58230Sstevel@tonic-gate  * status to ZONE_IS_SHUTTING_DOWN.
58248364SJordan.Vaughan@Sun.com  *
58258364SJordan.Vaughan@Sun.com  * This function also shuts down all running zones to ensure that they won't
58268364SJordan.Vaughan@Sun.com  * fork new processes.
58270Sstevel@tonic-gate  */
58280Sstevel@tonic-gate void
58290Sstevel@tonic-gate zone_shutdown_global(void)
58300Sstevel@tonic-gate {
58318364SJordan.Vaughan@Sun.com 	zone_t *current_zonep;
58328364SJordan.Vaughan@Sun.com 
58338364SJordan.Vaughan@Sun.com 	ASSERT(INGLOBALZONE(curproc));
58348364SJordan.Vaughan@Sun.com 	mutex_enter(&zonehash_lock);
58350Sstevel@tonic-gate 	mutex_enter(&zone_status_lock);
58368364SJordan.Vaughan@Sun.com 
58378364SJordan.Vaughan@Sun.com 	/* Modify the global zone's status first. */
58380Sstevel@tonic-gate 	ASSERT(zone_status_get(global_zone) == ZONE_IS_RUNNING);
58390Sstevel@tonic-gate 	zone_status_set(global_zone, ZONE_IS_SHUTTING_DOWN);
58408364SJordan.Vaughan@Sun.com 
58418364SJordan.Vaughan@Sun.com 	/*
58428364SJordan.Vaughan@Sun.com 	 * Now change the states of all running zones to ZONE_IS_SHUTTING_DOWN.
58438364SJordan.Vaughan@Sun.com 	 * We don't mark all zones with ZONE_IS_SHUTTING_DOWN because doing so
58448364SJordan.Vaughan@Sun.com 	 * could cause assertions to fail (e.g., assertions about a zone's
58458364SJordan.Vaughan@Sun.com 	 * state during initialization, readying, or booting) or produce races.
58468364SJordan.Vaughan@Sun.com 	 * We'll let threads continue to initialize and ready new zones: they'll
58478364SJordan.Vaughan@Sun.com 	 * fail to boot the new zones when they see that the global zone is
58488364SJordan.Vaughan@Sun.com 	 * shutting down.
58498364SJordan.Vaughan@Sun.com 	 */
58508364SJordan.Vaughan@Sun.com 	for (current_zonep = list_head(&zone_active); current_zonep != NULL;
58518364SJordan.Vaughan@Sun.com 	    current_zonep = list_next(&zone_active, current_zonep)) {
58528364SJordan.Vaughan@Sun.com 		if (zone_status_get(current_zonep) == ZONE_IS_RUNNING)
58538364SJordan.Vaughan@Sun.com 			zone_status_set(current_zonep, ZONE_IS_SHUTTING_DOWN);
58548364SJordan.Vaughan@Sun.com 	}
58550Sstevel@tonic-gate 	mutex_exit(&zone_status_lock);
58568364SJordan.Vaughan@Sun.com 	mutex_exit(&zonehash_lock);
58570Sstevel@tonic-gate }
5858789Sahrens 
5859789Sahrens /*
5860789Sahrens  * Returns true if the named dataset is visible in the current zone.
5861789Sahrens  * The 'write' parameter is set to 1 if the dataset is also writable.
5862789Sahrens  */
5863789Sahrens int
5864789Sahrens zone_dataset_visible(const char *dataset, int *write)
5865789Sahrens {
5866789Sahrens 	zone_dataset_t *zd;
5867789Sahrens 	size_t len;
5868789Sahrens 	zone_t *zone = curproc->p_zone;
5869789Sahrens 
5870789Sahrens 	if (dataset[0] == '\0')
5871789Sahrens 		return (0);
5872789Sahrens 
5873789Sahrens 	/*
5874789Sahrens 	 * Walk the list once, looking for datasets which match exactly, or
5875789Sahrens 	 * specify a dataset underneath an exported dataset.  If found, return
5876789Sahrens 	 * true and note that it is writable.
5877789Sahrens 	 */
5878789Sahrens 	for (zd = list_head(&zone->zone_datasets); zd != NULL;
5879789Sahrens 	    zd = list_next(&zone->zone_datasets, zd)) {
5880789Sahrens 
5881789Sahrens 		len = strlen(zd->zd_dataset);
5882789Sahrens 		if (strlen(dataset) >= len &&
5883789Sahrens 		    bcmp(dataset, zd->zd_dataset, len) == 0 &&
5884816Smaybee 		    (dataset[len] == '\0' || dataset[len] == '/' ||
5885816Smaybee 		    dataset[len] == '@')) {
5886789Sahrens 			if (write)
5887789Sahrens 				*write = 1;
5888789Sahrens 			return (1);
5889789Sahrens 		}
5890789Sahrens 	}
5891789Sahrens 
5892789Sahrens 	/*
5893789Sahrens 	 * Walk the list a second time, searching for datasets which are parents
5894789Sahrens 	 * of exported datasets.  These should be visible, but read-only.
5895789Sahrens 	 *
5896789Sahrens 	 * Note that we also have to support forms such as 'pool/dataset/', with
5897789Sahrens 	 * a trailing slash.
5898789Sahrens 	 */
5899789Sahrens 	for (zd = list_head(&zone->zone_datasets); zd != NULL;
5900789Sahrens 	    zd = list_next(&zone->zone_datasets, zd)) {
5901789Sahrens 
5902789Sahrens 		len = strlen(dataset);
5903789Sahrens 		if (dataset[len - 1] == '/')
5904789Sahrens 			len--;	/* Ignore trailing slash */
5905789Sahrens 		if (len < strlen(zd->zd_dataset) &&
5906789Sahrens 		    bcmp(dataset, zd->zd_dataset, len) == 0 &&
5907789Sahrens 		    zd->zd_dataset[len] == '/') {
5908789Sahrens 			if (write)
5909789Sahrens 				*write = 0;
5910789Sahrens 			return (1);
5911789Sahrens 		}
5912789Sahrens 	}
5913789Sahrens 
5914789Sahrens 	return (0);
5915789Sahrens }
59161676Sjpk 
59171676Sjpk /*
59181676Sjpk  * zone_find_by_any_path() -
59191676Sjpk  *
59201676Sjpk  * kernel-private routine similar to zone_find_by_path(), but which
59211676Sjpk  * effectively compares against zone paths rather than zonerootpath
59221676Sjpk  * (i.e., the last component of zonerootpaths, which should be "root/",
59231676Sjpk  * are not compared.)  This is done in order to accurately identify all
59241676Sjpk  * paths, whether zone-visible or not, including those which are parallel
59251676Sjpk  * to /root/, such as /dev/, /home/, etc...
59261676Sjpk  *
59271676Sjpk  * If the specified path does not fall under any zone path then global
59281676Sjpk  * zone is returned.
59291676Sjpk  *
59301676Sjpk  * The treat_abs parameter indicates whether the path should be treated as
59311676Sjpk  * an absolute path although it does not begin with "/".  (This supports
59321676Sjpk  * nfs mount syntax such as host:any/path.)
59331676Sjpk  *
59341676Sjpk  * The caller is responsible for zone_rele of the returned zone.
59351676Sjpk  */
59361676Sjpk zone_t *
59371676Sjpk zone_find_by_any_path(const char *path, boolean_t treat_abs)
59381676Sjpk {
59391676Sjpk 	zone_t *zone;
59401676Sjpk 	int path_offset = 0;
59411676Sjpk 
59421676Sjpk 	if (path == NULL) {
59431676Sjpk 		zone_hold(global_zone);
59441676Sjpk 		return (global_zone);
59451676Sjpk 	}
59461676Sjpk 
59471676Sjpk 	if (*path != '/') {
59481676Sjpk 		ASSERT(treat_abs);
59491676Sjpk 		path_offset = 1;
59501676Sjpk 	}
59511676Sjpk 
59521676Sjpk 	mutex_enter(&zonehash_lock);
59531676Sjpk 	for (zone = list_head(&zone_active); zone != NULL;
59541676Sjpk 	    zone = list_next(&zone_active, zone)) {
59551676Sjpk 		char	*c;
59561676Sjpk 		size_t	pathlen;
59571876Smp46848 		char *rootpath_start;
59581676Sjpk 
59591676Sjpk 		if (zone == global_zone)	/* skip global zone */
59601676Sjpk 			continue;
59611676Sjpk 
59621676Sjpk 		/* scan backwards to find start of last component */
59631676Sjpk 		c = zone->zone_rootpath + zone->zone_rootpathlen - 2;
59641676Sjpk 		do {
59651676Sjpk 			c--;
59661676Sjpk 		} while (*c != '/');
59671676Sjpk 
59681876Smp46848 		pathlen = c - zone->zone_rootpath + 1 - path_offset;
59691876Smp46848 		rootpath_start = (zone->zone_rootpath + path_offset);
59701876Smp46848 		if (strncmp(path, rootpath_start, pathlen) == 0)
59711676Sjpk 			break;
59721676Sjpk 	}
59731676Sjpk 	if (zone == NULL)
59741676Sjpk 		zone = global_zone;
59751676Sjpk 	zone_hold(zone);
59761676Sjpk 	mutex_exit(&zonehash_lock);
59771676Sjpk 	return (zone);
59781676Sjpk }
59793448Sdh155122 
59803448Sdh155122 /* List of data link names which are accessible from the zone */
59813448Sdh155122 struct dlnamelist {
59823448Sdh155122 	char			dlnl_name[LIFNAMSIZ];
59833448Sdh155122 	struct dlnamelist	*dlnl_next;
59843448Sdh155122 };
59853448Sdh155122 
59863448Sdh155122 
59873448Sdh155122 /*
59883448Sdh155122  * Check whether the datalink name (dlname) itself is present.
59893448Sdh155122  * Return true if found.
59903448Sdh155122  */
59913448Sdh155122 static boolean_t
59923448Sdh155122 zone_dlname(zone_t *zone, char *dlname)
59933448Sdh155122 {
59943448Sdh155122 	struct dlnamelist *dlnl;
59953448Sdh155122 	boolean_t found = B_FALSE;
59963448Sdh155122 
59973448Sdh155122 	mutex_enter(&zone->zone_lock);
59983448Sdh155122 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
59993448Sdh155122 		if (strncmp(dlnl->dlnl_name, dlname, LIFNAMSIZ) == 0) {
60003448Sdh155122 			found = B_TRUE;
60013448Sdh155122 			break;
60023448Sdh155122 		}
60033448Sdh155122 	}
60043448Sdh155122 	mutex_exit(&zone->zone_lock);
60053448Sdh155122 	return (found);
60063448Sdh155122 }
60073448Sdh155122 
60083448Sdh155122 /*
60093448Sdh155122  * Add an data link name for the zone. Does not check for duplicates.
60103448Sdh155122  */
60113448Sdh155122 static int
60123448Sdh155122 zone_add_datalink(zoneid_t zoneid, char *dlname)
60133448Sdh155122 {
60143448Sdh155122 	struct dlnamelist *dlnl;
60153448Sdh155122 	zone_t *zone;
60163448Sdh155122 	zone_t *thiszone;
60173448Sdh155122 	int err;
60183448Sdh155122 
60193448Sdh155122 	dlnl = kmem_zalloc(sizeof (struct dlnamelist), KM_SLEEP);
60203448Sdh155122 	if ((err = copyinstr(dlname, dlnl->dlnl_name, LIFNAMSIZ, NULL)) != 0) {
60213448Sdh155122 		kmem_free(dlnl, sizeof (struct dlnamelist));
60223448Sdh155122 		return (set_errno(err));
60233448Sdh155122 	}
60243448Sdh155122 
60253448Sdh155122 	thiszone = zone_find_by_id(zoneid);
60263448Sdh155122 	if (thiszone == NULL) {
60273448Sdh155122 		kmem_free(dlnl, sizeof (struct dlnamelist));
60283448Sdh155122 		return (set_errno(ENXIO));
60293448Sdh155122 	}
60303448Sdh155122 
60313448Sdh155122 	/*
60323448Sdh155122 	 * Verify that the datalink name isn't already used by a different
60333448Sdh155122 	 * zone while allowing duplicate entries for the same zone (e.g. due
60343448Sdh155122 	 * to both using IPv4 and IPv6 on an interface)
60353448Sdh155122 	 */
60363448Sdh155122 	mutex_enter(&zonehash_lock);
60373448Sdh155122 	for (zone = list_head(&zone_active); zone != NULL;
60383448Sdh155122 	    zone = list_next(&zone_active, zone)) {
60393448Sdh155122 		if (zone->zone_id == zoneid)
60403448Sdh155122 			continue;
60413448Sdh155122 
60423448Sdh155122 		if (zone_dlname(zone, dlnl->dlnl_name)) {
60433448Sdh155122 			mutex_exit(&zonehash_lock);
60443448Sdh155122 			zone_rele(thiszone);
60453448Sdh155122 			kmem_free(dlnl, sizeof (struct dlnamelist));
60463448Sdh155122 			return (set_errno(EPERM));
60473448Sdh155122 		}
60483448Sdh155122 	}
60493448Sdh155122 	mutex_enter(&thiszone->zone_lock);
60503448Sdh155122 	dlnl->dlnl_next = thiszone->zone_dl_list;
60513448Sdh155122 	thiszone->zone_dl_list = dlnl;
60523448Sdh155122 	mutex_exit(&thiszone->zone_lock);
60533448Sdh155122 	mutex_exit(&zonehash_lock);
60543448Sdh155122 	zone_rele(thiszone);
60553448Sdh155122 	return (0);
60563448Sdh155122 }
60573448Sdh155122 
60583448Sdh155122 static int
60593448Sdh155122 zone_remove_datalink(zoneid_t zoneid, char *dlname)
60603448Sdh155122 {
60613448Sdh155122 	struct dlnamelist *dlnl, *odlnl, **dlnlp;
60623448Sdh155122 	zone_t *zone;
60633448Sdh155122 	int err;
60643448Sdh155122 
60653448Sdh155122 	dlnl = kmem_zalloc(sizeof (struct dlnamelist), KM_SLEEP);
60663448Sdh155122 	if ((err = copyinstr(dlname, dlnl->dlnl_name, LIFNAMSIZ, NULL)) != 0) {
60673448Sdh155122 		kmem_free(dlnl, sizeof (struct dlnamelist));
60683448Sdh155122 		return (set_errno(err));
60693448Sdh155122 	}
60703448Sdh155122 	zone = zone_find_by_id(zoneid);
60713448Sdh155122 	if (zone == NULL) {
60723448Sdh155122 		kmem_free(dlnl, sizeof (struct dlnamelist));
60733448Sdh155122 		return (set_errno(EINVAL));
60743448Sdh155122 	}
60753448Sdh155122 
60763448Sdh155122 	mutex_enter(&zone->zone_lock);
60773448Sdh155122 	/* Look for match */
60783448Sdh155122 	dlnlp = &zone->zone_dl_list;
60793448Sdh155122 	while (*dlnlp != NULL) {
60803448Sdh155122 		if (strncmp(dlnl->dlnl_name, (*dlnlp)->dlnl_name,
60813448Sdh155122 		    LIFNAMSIZ) == 0)
60823448Sdh155122 			goto found;
60833448Sdh155122 		dlnlp = &((*dlnlp)->dlnl_next);
60843448Sdh155122 	}
60853448Sdh155122 	mutex_exit(&zone->zone_lock);
60863448Sdh155122 	zone_rele(zone);
60873448Sdh155122 	kmem_free(dlnl, sizeof (struct dlnamelist));
60883448Sdh155122 	return (set_errno(ENXIO));
60893448Sdh155122 
60903448Sdh155122 found:
60913448Sdh155122 	odlnl = *dlnlp;
60923448Sdh155122 	*dlnlp = (*dlnlp)->dlnl_next;
60933448Sdh155122 	kmem_free(odlnl, sizeof (struct dlnamelist));
60943448Sdh155122 
60953448Sdh155122 	mutex_exit(&zone->zone_lock);
60963448Sdh155122 	zone_rele(zone);
60973448Sdh155122 	kmem_free(dlnl, sizeof (struct dlnamelist));
60983448Sdh155122 	return (0);
60993448Sdh155122 }
61003448Sdh155122 
61013448Sdh155122 /*
61023448Sdh155122  * Using the zoneidp as ALL_ZONES, we can lookup which zone is using datalink
61033448Sdh155122  * name (dlname); otherwise we just check if the specified zoneidp has access
61043448Sdh155122  * to the datalink name.
61053448Sdh155122  */
61063448Sdh155122 static int
61073448Sdh155122 zone_check_datalink(zoneid_t *zoneidp, char *dlname)
61083448Sdh155122 {
61093448Sdh155122 	zoneid_t id;
61103448Sdh155122 	char *dln;
61113448Sdh155122 	zone_t *zone;
61123448Sdh155122 	int err = 0;
61133448Sdh155122 	boolean_t allzones = B_FALSE;
61143448Sdh155122 
61153448Sdh155122 	if (copyin(zoneidp, &id, sizeof (id)) != 0) {
61163448Sdh155122 		return (set_errno(EFAULT));
61173448Sdh155122 	}
61183448Sdh155122 	dln = kmem_zalloc(LIFNAMSIZ, KM_SLEEP);
61193448Sdh155122 	if ((err = copyinstr(dlname, dln, LIFNAMSIZ, NULL)) != 0) {
61203448Sdh155122 		kmem_free(dln, LIFNAMSIZ);
61213448Sdh155122 		return (set_errno(err));
61223448Sdh155122 	}
61233448Sdh155122 
61243448Sdh155122 	if (id == ALL_ZONES)
61253448Sdh155122 		allzones = B_TRUE;
61263448Sdh155122 
61273448Sdh155122 	/*
61283448Sdh155122 	 * Check whether datalink name is already used.
61293448Sdh155122 	 */
61303448Sdh155122 	mutex_enter(&zonehash_lock);
61313448Sdh155122 	for (zone = list_head(&zone_active); zone != NULL;
61323448Sdh155122 	    zone = list_next(&zone_active, zone)) {
61333448Sdh155122 		if (allzones || (id == zone->zone_id)) {
61343448Sdh155122 			if (!zone_dlname(zone, dln))
61353448Sdh155122 				continue;
61363448Sdh155122 			if (allzones)
61373448Sdh155122 				err = copyout(&zone->zone_id, zoneidp,
61383448Sdh155122 				    sizeof (*zoneidp));
61393448Sdh155122 
61403448Sdh155122 			mutex_exit(&zonehash_lock);
61413448Sdh155122 			kmem_free(dln, LIFNAMSIZ);
61423448Sdh155122 			return (err ? set_errno(EFAULT) : 0);
61433448Sdh155122 		}
61443448Sdh155122 	}
61453448Sdh155122 
61463448Sdh155122 	/* datalink name is not found in any active zone. */
61473448Sdh155122 	mutex_exit(&zonehash_lock);
61483448Sdh155122 	kmem_free(dln, LIFNAMSIZ);
61493448Sdh155122 	return (set_errno(ENXIO));
61503448Sdh155122 }
61513448Sdh155122 
61523448Sdh155122 /*
61533448Sdh155122  * Get the names of the datalinks assigned to a zone.
61543448Sdh155122  * Here *nump is the number of datalinks, and the assumption
61555331Samw  * is that the caller will guarantee that the the supplied buffer is
61563448Sdh155122  * big enough to hold at least #*nump datalink names, that is,
61573448Sdh155122  * LIFNAMSIZ X *nump
61583448Sdh155122  * On return, *nump will be the "new" number of datalinks, if it
61593448Sdh155122  * ever changed.
61603448Sdh155122  */
61613448Sdh155122 static int
61623448Sdh155122 zone_list_datalink(zoneid_t zoneid, int *nump, char *buf)
61633448Sdh155122 {
61643448Sdh155122 	int num, dlcount;
61653448Sdh155122 	zone_t *zone;
61663448Sdh155122 	struct dlnamelist *dlnl;
61673448Sdh155122 	char *ptr;
61683448Sdh155122 
61693448Sdh155122 	if (copyin(nump, &dlcount, sizeof (dlcount)) != 0)
61703448Sdh155122 		return (set_errno(EFAULT));
61713448Sdh155122 
61723448Sdh155122 	zone = zone_find_by_id(zoneid);
61733448Sdh155122 	if (zone == NULL) {
61743448Sdh155122 		return (set_errno(ENXIO));
61753448Sdh155122 	}
61763448Sdh155122 
61773448Sdh155122 	num = 0;
61783448Sdh155122 	mutex_enter(&zone->zone_lock);
61793448Sdh155122 	ptr = buf;
61803448Sdh155122 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
61813448Sdh155122 		/*
61823448Sdh155122 		 * If the list changed and the new number is bigger
61833448Sdh155122 		 * than what the caller supplied, just count, don't
61843448Sdh155122 		 * do copyout
61853448Sdh155122 		 */
61863448Sdh155122 		if (++num > dlcount)
61873448Sdh155122 			continue;
61883448Sdh155122 		if (copyout(dlnl->dlnl_name, ptr, LIFNAMSIZ) != 0) {
61893448Sdh155122 			mutex_exit(&zone->zone_lock);
61903448Sdh155122 			zone_rele(zone);
61913448Sdh155122 			return (set_errno(EFAULT));
61923448Sdh155122 		}
61933448Sdh155122 		ptr += LIFNAMSIZ;
61943448Sdh155122 	}
61953448Sdh155122 	mutex_exit(&zone->zone_lock);
61963448Sdh155122 	zone_rele(zone);
61973448Sdh155122 
61983448Sdh155122 	/* Increased or decreased, caller should be notified. */
61993448Sdh155122 	if (num != dlcount) {
62003448Sdh155122 		if (copyout(&num, nump, sizeof (num)) != 0) {
62013448Sdh155122 			return (set_errno(EFAULT));
62023448Sdh155122 		}
62033448Sdh155122 	}
62043448Sdh155122 	return (0);
62053448Sdh155122 }
62063448Sdh155122 
62073448Sdh155122 /*
62083448Sdh155122  * Public interface for looking up a zone by zoneid. It's a customized version
62095880Snordmark  * for netstack_zone_create(). It can only be called from the zsd create
62105880Snordmark  * callbacks, since it doesn't have reference on the zone structure hence if
62115880Snordmark  * it is called elsewhere the zone could disappear after the zonehash_lock
62125880Snordmark  * is dropped.
62135880Snordmark  *
62145880Snordmark  * Furthermore it
62155880Snordmark  * 1. Doesn't check the status of the zone.
62165880Snordmark  * 2. It will be called even before zone_init is called, in that case the
62173448Sdh155122  *    address of zone0 is returned directly, and netstack_zone_create()
62183448Sdh155122  *    will only assign a value to zone0.zone_netstack, won't break anything.
62195880Snordmark  * 3. Returns without the zone being held.
62203448Sdh155122  */
62213448Sdh155122 zone_t *
62223448Sdh155122 zone_find_by_id_nolock(zoneid_t zoneid)
62233448Sdh155122 {
62245880Snordmark 	zone_t *zone;
62255880Snordmark 
62265880Snordmark 	mutex_enter(&zonehash_lock);
62273448Sdh155122 	if (zonehashbyid == NULL)
62285880Snordmark 		zone = &zone0;
62293448Sdh155122 	else
62305880Snordmark 		zone = zone_find_all_by_id(zoneid);
62315880Snordmark 	mutex_exit(&zonehash_lock);
62325880Snordmark 	return (zone);
62333448Sdh155122 }
62345895Syz147064 
62355895Syz147064 /*
62365895Syz147064  * Walk the datalinks for a given zone
62375895Syz147064  */
62385895Syz147064 int
62395895Syz147064 zone_datalink_walk(zoneid_t zoneid, int (*cb)(const char *, void *), void *data)
62405895Syz147064 {
62415895Syz147064 	zone_t *zone;
62425895Syz147064 	struct dlnamelist *dlnl;
62435895Syz147064 	int ret = 0;
62445895Syz147064 
62455895Syz147064 	if ((zone = zone_find_by_id(zoneid)) == NULL)
62465895Syz147064 		return (ENOENT);
62475895Syz147064 
62485895Syz147064 	mutex_enter(&zone->zone_lock);
62495895Syz147064 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
62505895Syz147064 		if ((ret = (*cb)(dlnl->dlnl_name, data)) != 0)
62515895Syz147064 			break;
62525895Syz147064 	}
62535895Syz147064 	mutex_exit(&zone->zone_lock);
62545895Syz147064 	zone_rele(zone);
62555895Syz147064 	return (ret);
62565895Syz147064 }
6257