10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
54845Svikram * Common Development and Distribution License (the "License").
64845Svikram * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
226073Sacruz * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate * Contracts
280Sstevel@tonic-gate * ---------
290Sstevel@tonic-gate *
300Sstevel@tonic-gate * Contracts are a primitive which enrich the relationships between
310Sstevel@tonic-gate * processes and system resources. The primary purpose of contracts is
320Sstevel@tonic-gate * to provide a means for the system to negotiate the departure from a
330Sstevel@tonic-gate * binding relationship (e.g. pages locked in memory or a thread bound
340Sstevel@tonic-gate * to processor), but they can also be used as a purely asynchronous
350Sstevel@tonic-gate * error reporting mechanism as they are with process contracts.
360Sstevel@tonic-gate *
370Sstevel@tonic-gate * More information on how one interfaces with contracts and what
380Sstevel@tonic-gate * contracts can do for you can be found in:
390Sstevel@tonic-gate * PSARC 2003/193 Solaris Contracts
400Sstevel@tonic-gate * PSARC 2004/460 Contracts addendum
410Sstevel@tonic-gate *
420Sstevel@tonic-gate * This file contains the core contracts framework. By itself it is
430Sstevel@tonic-gate * useless: it depends the contracts filesystem (ctfs) to provide an
440Sstevel@tonic-gate * interface to user processes and individual contract types to
450Sstevel@tonic-gate * implement the process/resource relationships.
460Sstevel@tonic-gate *
470Sstevel@tonic-gate * Data structure overview
480Sstevel@tonic-gate * -----------------------
490Sstevel@tonic-gate *
500Sstevel@tonic-gate * A contract is represented by a contract_t, which itself points to an
510Sstevel@tonic-gate * encapsulating contract-type specific contract object. A contract_t
520Sstevel@tonic-gate * contains the contract's static identity (including its terms), its
530Sstevel@tonic-gate * linkage to various bookkeeping structures, the contract-specific
540Sstevel@tonic-gate * event queue, and a reference count.
550Sstevel@tonic-gate *
560Sstevel@tonic-gate * A contract template is represented by a ct_template_t, which, like a
570Sstevel@tonic-gate * contract, points to an encapsulating contract-type specific template
580Sstevel@tonic-gate * object. A ct_template_t contains the template's terms.
590Sstevel@tonic-gate *
600Sstevel@tonic-gate * An event queue is represented by a ct_equeue_t, and consists of a
610Sstevel@tonic-gate * list of events, a list of listeners, and a list of listeners who are
620Sstevel@tonic-gate * waiting for new events (affectionately referred to as "tail
630Sstevel@tonic-gate * listeners"). There are three queue types, defined by ct_listnum_t
640Sstevel@tonic-gate * (an enum). An event may be on one of each type of queue
650Sstevel@tonic-gate * simultaneously; the list linkage used by a queue is determined by
660Sstevel@tonic-gate * its type.
670Sstevel@tonic-gate *
680Sstevel@tonic-gate * An event is represented by a ct_kevent_t, which contains mostly
690Sstevel@tonic-gate * static event data (e.g. id, payload). It also has an array of
700Sstevel@tonic-gate * ct_member_t structures, each of which contains a list_node_t and
710Sstevel@tonic-gate * represent the event's linkage in a specific event queue.
720Sstevel@tonic-gate *
730Sstevel@tonic-gate * Each open of an event endpoint results in the creation of a new
740Sstevel@tonic-gate * listener, represented by a ct_listener_t. In addition to linkage
750Sstevel@tonic-gate * into the aforementioned lists in the event_queue, a ct_listener_t
760Sstevel@tonic-gate * contains a pointer to the ct_kevent_t it is currently positioned at
770Sstevel@tonic-gate * as well as a set of status flags and other administrative data.
780Sstevel@tonic-gate *
790Sstevel@tonic-gate * Each process has a list of contracts it owns, p_ct_held; a pointer
800Sstevel@tonic-gate * to the process contract it is a member of, p_ct_process; the linkage
810Sstevel@tonic-gate * for that membership, p_ct_member; and an array of event queue
820Sstevel@tonic-gate * structures representing the process bundle queues.
830Sstevel@tonic-gate *
840Sstevel@tonic-gate * Each LWP has an array of its active templates, lwp_ct_active; and
850Sstevel@tonic-gate * the most recently created contracts, lwp_ct_latest.
860Sstevel@tonic-gate *
870Sstevel@tonic-gate * A process contract has a list of member processes and a list of
880Sstevel@tonic-gate * inherited contracts.
890Sstevel@tonic-gate *
900Sstevel@tonic-gate * There is a system-wide list of all contracts, as well as per-type
910Sstevel@tonic-gate * lists of contracts.
920Sstevel@tonic-gate *
930Sstevel@tonic-gate * Lock ordering overview
940Sstevel@tonic-gate * ----------------------
950Sstevel@tonic-gate *
960Sstevel@tonic-gate * Locks at the top are taken first:
970Sstevel@tonic-gate *
980Sstevel@tonic-gate * ct_evtlock
990Sstevel@tonic-gate * regent ct_lock
1000Sstevel@tonic-gate * member ct_lock
1010Sstevel@tonic-gate * pidlock
1020Sstevel@tonic-gate * p_lock
1030Sstevel@tonic-gate * contract ctq_lock contract_lock
1040Sstevel@tonic-gate * pbundle ctq_lock
1050Sstevel@tonic-gate * cte_lock
1060Sstevel@tonic-gate * ct_reflock
1070Sstevel@tonic-gate *
1080Sstevel@tonic-gate * contract_lock and ctq_lock/cte_lock are not currently taken at the
1090Sstevel@tonic-gate * same time.
1100Sstevel@tonic-gate *
1110Sstevel@tonic-gate * Reference counting and locking
1120Sstevel@tonic-gate * ------------------------------
1130Sstevel@tonic-gate *
1140Sstevel@tonic-gate * A contract has a reference count, protected by ct_reflock.
1150Sstevel@tonic-gate * (ct_reflock is also used in a couple other places where atomic
1160Sstevel@tonic-gate * access to a variable is needed in an innermost context). A process
1170Sstevel@tonic-gate * maintains a hold on each contract it owns. A process contract has a
1180Sstevel@tonic-gate * hold on each contract is has inherited. Each event has a hold on
1190Sstevel@tonic-gate * the contract which generated it. Process contract templates have
1200Sstevel@tonic-gate * holds on the contracts referred to by their transfer terms. CTFS
1210Sstevel@tonic-gate * contract directory nodes have holds on contracts. Lastly, various
1220Sstevel@tonic-gate * code paths may temporarily take holds on contracts to prevent them
1230Sstevel@tonic-gate * from disappearing while other processing is going on. It is
1240Sstevel@tonic-gate * important to note that the global contract lists do not hold
1250Sstevel@tonic-gate * references on contracts; a contract is removed from these structures
1260Sstevel@tonic-gate * atomically with the release of its last reference.
1270Sstevel@tonic-gate *
1280Sstevel@tonic-gate * At a given point in time, a contract can either be owned by a
1290Sstevel@tonic-gate * process, inherited by a regent process contract, or orphaned. A
1300Sstevel@tonic-gate * contract_t's owner and regent pointers, ct_owner and ct_regent, are
1310Sstevel@tonic-gate * protected by its ct_lock. The linkage in the holder's (holder =
1320Sstevel@tonic-gate * owner or regent) list of contracts, ct_ctlist, is protected by
1330Sstevel@tonic-gate * whatever lock protects the holder's data structure. In order for
1340Sstevel@tonic-gate * these two directions to remain consistent, changing the holder of a
1350Sstevel@tonic-gate * contract requires that both locks be held.
1360Sstevel@tonic-gate *
1370Sstevel@tonic-gate * Events also have reference counts. There is one hold on an event
1380Sstevel@tonic-gate * per queue it is present on, in addition to those needed for the
1390Sstevel@tonic-gate * usual sundry reasons. Individual listeners are associated with
1400Sstevel@tonic-gate * specific queues, and increase a queue-specific reference count
1410Sstevel@tonic-gate * stored in the ct_member_t structure.
1420Sstevel@tonic-gate *
1430Sstevel@tonic-gate * The dynamic contents of an event (reference count and flags) are
1440Sstevel@tonic-gate * protected by its cte_lock, while the contents of the embedded
1450Sstevel@tonic-gate * ct_member_t structures are protected by the locks of the queues they
1460Sstevel@tonic-gate * are linked into. A ct_listener_t's contents are also protected by
1470Sstevel@tonic-gate * its event queue's ctq_lock.
1480Sstevel@tonic-gate *
1490Sstevel@tonic-gate * Resource controls
1500Sstevel@tonic-gate * -----------------
1510Sstevel@tonic-gate *
1520Sstevel@tonic-gate * Control: project.max-contracts (rc_project_contract)
1530Sstevel@tonic-gate * Description: Maximum number of contracts allowed a project.
1540Sstevel@tonic-gate *
1550Sstevel@tonic-gate * When a contract is created, the project's allocation is tested and
1560Sstevel@tonic-gate * (assuming success) increased. When the last reference to a
1570Sstevel@tonic-gate * contract is released, the creating project's allocation is
1580Sstevel@tonic-gate * decreased.
1590Sstevel@tonic-gate */
1600Sstevel@tonic-gate
1610Sstevel@tonic-gate #include <sys/mutex.h>
1620Sstevel@tonic-gate #include <sys/debug.h>
1630Sstevel@tonic-gate #include <sys/types.h>
1640Sstevel@tonic-gate #include <sys/param.h>
1650Sstevel@tonic-gate #include <sys/kmem.h>
1660Sstevel@tonic-gate #include <sys/thread.h>
1670Sstevel@tonic-gate #include <sys/id_space.h>
1680Sstevel@tonic-gate #include <sys/avl.h>
1690Sstevel@tonic-gate #include <sys/list.h>
1700Sstevel@tonic-gate #include <sys/sysmacros.h>
1710Sstevel@tonic-gate #include <sys/proc.h>
172*7937SAntonello.Cruz@Sun.COM #include <sys/ctfs.h>
1730Sstevel@tonic-gate #include <sys/contract_impl.h>
1740Sstevel@tonic-gate #include <sys/contract/process_impl.h>
1754845Svikram #include <sys/dditypes.h>
1764845Svikram #include <sys/contract/device_impl.h>
1770Sstevel@tonic-gate #include <sys/systm.h>
1780Sstevel@tonic-gate #include <sys/atomic.h>
1790Sstevel@tonic-gate #include <sys/cmn_err.h>
1800Sstevel@tonic-gate #include <sys/model.h>
1810Sstevel@tonic-gate #include <sys/policy.h>
1820Sstevel@tonic-gate #include <sys/zone.h>
1830Sstevel@tonic-gate #include <sys/task.h>
1844845Svikram #include <sys/ddi.h>
1854845Svikram #include <sys/sunddi.h>
1860Sstevel@tonic-gate
1870Sstevel@tonic-gate extern rctl_hndl_t rc_project_contract;
1880Sstevel@tonic-gate
1890Sstevel@tonic-gate static id_space_t *contract_ids;
1900Sstevel@tonic-gate static avl_tree_t contract_avl;
1910Sstevel@tonic-gate static kmutex_t contract_lock;
1920Sstevel@tonic-gate
1930Sstevel@tonic-gate int ct_ntypes = CTT_MAXTYPE;
1940Sstevel@tonic-gate static ct_type_t *ct_types_static[CTT_MAXTYPE];
1950Sstevel@tonic-gate ct_type_t **ct_types = ct_types_static;
1964845Svikram int ct_debug;
1970Sstevel@tonic-gate
1980Sstevel@tonic-gate static void cte_queue_create(ct_equeue_t *, ct_listnum_t, int, int);
1990Sstevel@tonic-gate static void cte_queue_destroy(ct_equeue_t *);
2000Sstevel@tonic-gate static void cte_queue_drain(ct_equeue_t *, int);
2010Sstevel@tonic-gate static void cte_trim(ct_equeue_t *, contract_t *);
2020Sstevel@tonic-gate static void cte_copy(ct_equeue_t *, ct_equeue_t *);
2030Sstevel@tonic-gate
2040Sstevel@tonic-gate /*
2050Sstevel@tonic-gate * contract_compar
2060Sstevel@tonic-gate *
2070Sstevel@tonic-gate * A contract comparator which sorts on contract ID.
2080Sstevel@tonic-gate */
2090Sstevel@tonic-gate int
contract_compar(const void * x,const void * y)2100Sstevel@tonic-gate contract_compar(const void *x, const void *y)
2110Sstevel@tonic-gate {
2120Sstevel@tonic-gate const contract_t *ct1 = x;
2130Sstevel@tonic-gate const contract_t *ct2 = y;
2140Sstevel@tonic-gate
2150Sstevel@tonic-gate if (ct1->ct_id < ct2->ct_id)
2160Sstevel@tonic-gate return (-1);
2170Sstevel@tonic-gate if (ct1->ct_id > ct2->ct_id)
2180Sstevel@tonic-gate return (1);
2190Sstevel@tonic-gate return (0);
2200Sstevel@tonic-gate }
2210Sstevel@tonic-gate
2220Sstevel@tonic-gate /*
2230Sstevel@tonic-gate * contract_init
2240Sstevel@tonic-gate *
2250Sstevel@tonic-gate * Initializes the contract subsystem, the specific contract types, and
2260Sstevel@tonic-gate * process 0.
2270Sstevel@tonic-gate */
2280Sstevel@tonic-gate void
contract_init(void)2290Sstevel@tonic-gate contract_init(void)
2300Sstevel@tonic-gate {
2310Sstevel@tonic-gate /*
2320Sstevel@tonic-gate * Initialize contract subsystem.
2330Sstevel@tonic-gate */
2340Sstevel@tonic-gate contract_ids = id_space_create("contracts", 1, INT_MAX);
2350Sstevel@tonic-gate avl_create(&contract_avl, contract_compar, sizeof (contract_t),
2360Sstevel@tonic-gate offsetof(contract_t, ct_ctavl));
2370Sstevel@tonic-gate mutex_init(&contract_lock, NULL, MUTEX_DEFAULT, NULL);
2380Sstevel@tonic-gate
2390Sstevel@tonic-gate /*
2400Sstevel@tonic-gate * Initialize contract types.
2410Sstevel@tonic-gate */
2420Sstevel@tonic-gate contract_process_init();
2434845Svikram contract_device_init();
2440Sstevel@tonic-gate
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate * Initialize p0/lwp0 contract state.
2470Sstevel@tonic-gate */
2480Sstevel@tonic-gate avl_create(&p0.p_ct_held, contract_compar, sizeof (contract_t),
2490Sstevel@tonic-gate offsetof(contract_t, ct_ctlist));
2500Sstevel@tonic-gate }
2510Sstevel@tonic-gate
2520Sstevel@tonic-gate /*
2530Sstevel@tonic-gate * contract_dtor
2540Sstevel@tonic-gate *
2550Sstevel@tonic-gate * Performs basic destruction of the common portions of a contract.
2560Sstevel@tonic-gate * Called from the failure path of contract_ctor and from
2570Sstevel@tonic-gate * contract_rele.
2580Sstevel@tonic-gate */
2590Sstevel@tonic-gate static void
contract_dtor(contract_t * ct)2600Sstevel@tonic-gate contract_dtor(contract_t *ct)
2610Sstevel@tonic-gate {
2620Sstevel@tonic-gate cte_queue_destroy(&ct->ct_events);
2630Sstevel@tonic-gate list_destroy(&ct->ct_vnodes);
2640Sstevel@tonic-gate mutex_destroy(&ct->ct_reflock);
2650Sstevel@tonic-gate mutex_destroy(&ct->ct_lock);
2660Sstevel@tonic-gate mutex_destroy(&ct->ct_evtlock);
2670Sstevel@tonic-gate }
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate /*
2700Sstevel@tonic-gate * contract_ctor
2710Sstevel@tonic-gate *
2720Sstevel@tonic-gate * Called by a contract type to initialize a contract. Fails if the
2730Sstevel@tonic-gate * max-contract resource control would have been exceeded. After a
2740Sstevel@tonic-gate * successful call to contract_ctor, the contract is unlocked and
2750Sstevel@tonic-gate * visible in all namespaces; any type-specific initialization should
2760Sstevel@tonic-gate * be completed before calling contract_ctor. Returns 0 on success.
2770Sstevel@tonic-gate *
2780Sstevel@tonic-gate * Because not all callers can tolerate failure, a 0 value for canfail
2790Sstevel@tonic-gate * instructs contract_ctor to ignore the project.max-contracts resource
2800Sstevel@tonic-gate * control. Obviously, this "out" should only be employed by callers
2810Sstevel@tonic-gate * who are sufficiently constrained in other ways (e.g. newproc).
2820Sstevel@tonic-gate */
2830Sstevel@tonic-gate int
contract_ctor(contract_t * ct,ct_type_t * type,ct_template_t * tmpl,void * data,ctflags_t flags,proc_t * author,int canfail)2840Sstevel@tonic-gate contract_ctor(contract_t *ct, ct_type_t *type, ct_template_t *tmpl, void *data,
2850Sstevel@tonic-gate ctflags_t flags, proc_t *author, int canfail)
2860Sstevel@tonic-gate {
2870Sstevel@tonic-gate avl_index_t where;
2880Sstevel@tonic-gate klwp_t *curlwp = ttolwp(curthread);
2890Sstevel@tonic-gate
2900Sstevel@tonic-gate ASSERT(author == curproc);
2910Sstevel@tonic-gate
2920Sstevel@tonic-gate mutex_init(&ct->ct_lock, NULL, MUTEX_DEFAULT, NULL);
2930Sstevel@tonic-gate mutex_init(&ct->ct_reflock, NULL, MUTEX_DEFAULT, NULL);
2940Sstevel@tonic-gate mutex_init(&ct->ct_evtlock, NULL, MUTEX_DEFAULT, NULL);
2950Sstevel@tonic-gate ct->ct_id = id_alloc(contract_ids);
2960Sstevel@tonic-gate
2970Sstevel@tonic-gate cte_queue_create(&ct->ct_events, CTEL_CONTRACT, 20, 0);
2980Sstevel@tonic-gate list_create(&ct->ct_vnodes, sizeof (contract_vnode_t),
2990Sstevel@tonic-gate offsetof(contract_vnode_t, ctv_node));
3000Sstevel@tonic-gate
3010Sstevel@tonic-gate /*
3020Sstevel@tonic-gate * Instance data
3030Sstevel@tonic-gate */
3040Sstevel@tonic-gate ct->ct_ref = 2; /* one for the holder, one for "latest" */
3050Sstevel@tonic-gate ct->ct_cuid = crgetuid(CRED());
3060Sstevel@tonic-gate ct->ct_type = type;
3070Sstevel@tonic-gate ct->ct_data = data;
3080Sstevel@tonic-gate gethrestime(&ct->ct_ctime);
3090Sstevel@tonic-gate ct->ct_state = CTS_OWNED;
3100Sstevel@tonic-gate ct->ct_flags = flags;
3110Sstevel@tonic-gate ct->ct_regent = author->p_ct_process ?
3120Sstevel@tonic-gate &author->p_ct_process->conp_contract : NULL;
3130Sstevel@tonic-gate ct->ct_ev_info = tmpl->ctmpl_ev_info;
3140Sstevel@tonic-gate ct->ct_ev_crit = tmpl->ctmpl_ev_crit;
3150Sstevel@tonic-gate ct->ct_cookie = tmpl->ctmpl_cookie;
3160Sstevel@tonic-gate ct->ct_owner = author;
3174845Svikram ct->ct_ntime.ctm_total = -1;
3184845Svikram ct->ct_qtime.ctm_total = -1;
3194845Svikram ct->ct_nevent = NULL;
3200Sstevel@tonic-gate
3210Sstevel@tonic-gate /*
3220Sstevel@tonic-gate * Test project.max-contracts.
3230Sstevel@tonic-gate */
3240Sstevel@tonic-gate mutex_enter(&author->p_lock);
3250Sstevel@tonic-gate mutex_enter(&contract_lock);
3260Sstevel@tonic-gate if (canfail && rctl_test(rc_project_contract,
3270Sstevel@tonic-gate author->p_task->tk_proj->kpj_rctls, author, 1,
3280Sstevel@tonic-gate RCA_SAFE) & RCT_DENY) {
3290Sstevel@tonic-gate id_free(contract_ids, ct->ct_id);
3300Sstevel@tonic-gate mutex_exit(&contract_lock);
3310Sstevel@tonic-gate mutex_exit(&author->p_lock);
3320Sstevel@tonic-gate ct->ct_events.ctq_flags |= CTQ_DEAD;
3330Sstevel@tonic-gate contract_dtor(ct);
3340Sstevel@tonic-gate return (1);
3350Sstevel@tonic-gate }
3360Sstevel@tonic-gate ct->ct_proj = author->p_task->tk_proj;
3370Sstevel@tonic-gate ct->ct_proj->kpj_data.kpd_contract++;
3380Sstevel@tonic-gate (void) project_hold(ct->ct_proj);
3390Sstevel@tonic-gate mutex_exit(&contract_lock);
3400Sstevel@tonic-gate
3410Sstevel@tonic-gate /*
3420Sstevel@tonic-gate * Insert into holder's avl of contracts.
3430Sstevel@tonic-gate * We use an avl not because order is important, but because
3440Sstevel@tonic-gate * readdir of /proc/contracts requires we be able to use a
3450Sstevel@tonic-gate * scalar as an index into the process's list of contracts
3460Sstevel@tonic-gate */
3470Sstevel@tonic-gate ct->ct_zoneid = author->p_zone->zone_id;
3480Sstevel@tonic-gate ct->ct_czuniqid = ct->ct_mzuniqid = author->p_zone->zone_uniqid;
3490Sstevel@tonic-gate VERIFY(avl_find(&author->p_ct_held, ct, &where) == NULL);
3500Sstevel@tonic-gate avl_insert(&author->p_ct_held, ct, where);
3510Sstevel@tonic-gate mutex_exit(&author->p_lock);
3520Sstevel@tonic-gate
3530Sstevel@tonic-gate /*
3540Sstevel@tonic-gate * Insert into global contract AVL
3550Sstevel@tonic-gate */
3560Sstevel@tonic-gate mutex_enter(&contract_lock);
3570Sstevel@tonic-gate VERIFY(avl_find(&contract_avl, ct, &where) == NULL);
3580Sstevel@tonic-gate avl_insert(&contract_avl, ct, where);
3590Sstevel@tonic-gate mutex_exit(&contract_lock);
3600Sstevel@tonic-gate
3610Sstevel@tonic-gate /*
3620Sstevel@tonic-gate * Insert into type AVL
3630Sstevel@tonic-gate */
3640Sstevel@tonic-gate mutex_enter(&type->ct_type_lock);
3650Sstevel@tonic-gate VERIFY(avl_find(&type->ct_type_avl, ct, &where) == NULL);
3660Sstevel@tonic-gate avl_insert(&type->ct_type_avl, ct, where);
3670Sstevel@tonic-gate type->ct_type_timestruc = ct->ct_ctime;
3680Sstevel@tonic-gate mutex_exit(&type->ct_type_lock);
3690Sstevel@tonic-gate
3700Sstevel@tonic-gate if (curlwp->lwp_ct_latest[type->ct_type_index])
3710Sstevel@tonic-gate contract_rele(curlwp->lwp_ct_latest[type->ct_type_index]);
3720Sstevel@tonic-gate curlwp->lwp_ct_latest[type->ct_type_index] = ct;
3730Sstevel@tonic-gate
3740Sstevel@tonic-gate return (0);
3750Sstevel@tonic-gate }
3760Sstevel@tonic-gate
3770Sstevel@tonic-gate /*
3780Sstevel@tonic-gate * contract_rele
3790Sstevel@tonic-gate *
3800Sstevel@tonic-gate * Releases a reference to a contract. If the caller had the last
3810Sstevel@tonic-gate * reference, the contract is removed from all namespaces, its
3820Sstevel@tonic-gate * allocation against the max-contracts resource control is released,
3830Sstevel@tonic-gate * and the contract type's free entry point is invoked for any
3840Sstevel@tonic-gate * type-specific deconstruction and to (presumably) free the object.
3850Sstevel@tonic-gate */
3860Sstevel@tonic-gate void
contract_rele(contract_t * ct)3870Sstevel@tonic-gate contract_rele(contract_t *ct)
3880Sstevel@tonic-gate {
3890Sstevel@tonic-gate uint64_t nref;
3900Sstevel@tonic-gate
3910Sstevel@tonic-gate mutex_enter(&ct->ct_reflock);
3920Sstevel@tonic-gate ASSERT(ct->ct_ref > 0);
3930Sstevel@tonic-gate nref = --ct->ct_ref;
3940Sstevel@tonic-gate mutex_exit(&ct->ct_reflock);
3950Sstevel@tonic-gate if (nref == 0) {
3960Sstevel@tonic-gate /*
3970Sstevel@tonic-gate * ct_owner is cleared when it drops its reference.
3980Sstevel@tonic-gate */
3990Sstevel@tonic-gate ASSERT(ct->ct_owner == NULL);
4000Sstevel@tonic-gate ASSERT(ct->ct_evcnt == 0);
4010Sstevel@tonic-gate
4020Sstevel@tonic-gate /*
4030Sstevel@tonic-gate * Remove from global contract AVL
4040Sstevel@tonic-gate */
4050Sstevel@tonic-gate mutex_enter(&contract_lock);
4060Sstevel@tonic-gate avl_remove(&contract_avl, ct);
4070Sstevel@tonic-gate mutex_exit(&contract_lock);
4080Sstevel@tonic-gate
4090Sstevel@tonic-gate /*
4100Sstevel@tonic-gate * Remove from type AVL
4110Sstevel@tonic-gate */
4120Sstevel@tonic-gate mutex_enter(&ct->ct_type->ct_type_lock);
4130Sstevel@tonic-gate avl_remove(&ct->ct_type->ct_type_avl, ct);
4140Sstevel@tonic-gate mutex_exit(&ct->ct_type->ct_type_lock);
4150Sstevel@tonic-gate
4160Sstevel@tonic-gate /*
4170Sstevel@tonic-gate * Release the contract's ID
4180Sstevel@tonic-gate */
4190Sstevel@tonic-gate id_free(contract_ids, ct->ct_id);
4200Sstevel@tonic-gate
4210Sstevel@tonic-gate /*
4220Sstevel@tonic-gate * Release project hold
4230Sstevel@tonic-gate */
4240Sstevel@tonic-gate mutex_enter(&contract_lock);
4250Sstevel@tonic-gate ct->ct_proj->kpj_data.kpd_contract--;
4260Sstevel@tonic-gate project_rele(ct->ct_proj);
4270Sstevel@tonic-gate mutex_exit(&contract_lock);
4280Sstevel@tonic-gate
4290Sstevel@tonic-gate /*
4300Sstevel@tonic-gate * Free the contract
4310Sstevel@tonic-gate */
4320Sstevel@tonic-gate contract_dtor(ct);
4330Sstevel@tonic-gate ct->ct_type->ct_type_ops->contop_free(ct);
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate }
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate /*
4380Sstevel@tonic-gate * contract_hold
4390Sstevel@tonic-gate *
4400Sstevel@tonic-gate * Adds a reference to a contract
4410Sstevel@tonic-gate */
4420Sstevel@tonic-gate void
contract_hold(contract_t * ct)4430Sstevel@tonic-gate contract_hold(contract_t *ct)
4440Sstevel@tonic-gate {
4450Sstevel@tonic-gate mutex_enter(&ct->ct_reflock);
4460Sstevel@tonic-gate ASSERT(ct->ct_ref < UINT64_MAX);
4470Sstevel@tonic-gate ct->ct_ref++;
4480Sstevel@tonic-gate mutex_exit(&ct->ct_reflock);
4490Sstevel@tonic-gate }
4500Sstevel@tonic-gate
4510Sstevel@tonic-gate /*
4520Sstevel@tonic-gate * contract_getzuniqid
4530Sstevel@tonic-gate *
4540Sstevel@tonic-gate * Get a contract's zone unique ID. Needed because 64-bit reads and
4550Sstevel@tonic-gate * writes aren't atomic on x86. Since there are contexts where we are
4560Sstevel@tonic-gate * unable to take ct_lock, we instead use ct_reflock; in actuality any
4570Sstevel@tonic-gate * lock would do.
4580Sstevel@tonic-gate */
4590Sstevel@tonic-gate uint64_t
contract_getzuniqid(contract_t * ct)4600Sstevel@tonic-gate contract_getzuniqid(contract_t *ct)
4610Sstevel@tonic-gate {
4620Sstevel@tonic-gate uint64_t zuniqid;
4630Sstevel@tonic-gate
4640Sstevel@tonic-gate mutex_enter(&ct->ct_reflock);
4650Sstevel@tonic-gate zuniqid = ct->ct_mzuniqid;
4660Sstevel@tonic-gate mutex_exit(&ct->ct_reflock);
4670Sstevel@tonic-gate
4680Sstevel@tonic-gate return (zuniqid);
4690Sstevel@tonic-gate }
4700Sstevel@tonic-gate
4710Sstevel@tonic-gate /*
4720Sstevel@tonic-gate * contract_setzuniqid
4730Sstevel@tonic-gate *
4740Sstevel@tonic-gate * Sets a contract's zone unique ID. See contract_getzuniqid.
4750Sstevel@tonic-gate */
4760Sstevel@tonic-gate void
contract_setzuniqid(contract_t * ct,uint64_t zuniqid)4770Sstevel@tonic-gate contract_setzuniqid(contract_t *ct, uint64_t zuniqid)
4780Sstevel@tonic-gate {
4790Sstevel@tonic-gate mutex_enter(&ct->ct_reflock);
4800Sstevel@tonic-gate ct->ct_mzuniqid = zuniqid;
4810Sstevel@tonic-gate mutex_exit(&ct->ct_reflock);
4820Sstevel@tonic-gate }
4830Sstevel@tonic-gate
4840Sstevel@tonic-gate /*
4850Sstevel@tonic-gate * contract_abandon
4860Sstevel@tonic-gate *
4870Sstevel@tonic-gate * Abandons the specified contract. If "explicit" is clear, the
4880Sstevel@tonic-gate * contract was implicitly abandoned (by process exit) and should be
4890Sstevel@tonic-gate * inherited if its terms allow it and its owner was a member of a
4900Sstevel@tonic-gate * regent contract. Otherwise, the contract type's abandon entry point
4910Sstevel@tonic-gate * is invoked to either destroy or orphan the contract.
4920Sstevel@tonic-gate */
4930Sstevel@tonic-gate int
contract_abandon(contract_t * ct,proc_t * p,int explicit)4940Sstevel@tonic-gate contract_abandon(contract_t *ct, proc_t *p, int explicit)
4950Sstevel@tonic-gate {
4960Sstevel@tonic-gate ct_equeue_t *q = NULL;
4970Sstevel@tonic-gate contract_t *parent = &p->p_ct_process->conp_contract;
4980Sstevel@tonic-gate int inherit = 0;
4990Sstevel@tonic-gate
5000Sstevel@tonic-gate ASSERT(p == curproc);
5010Sstevel@tonic-gate
5020Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
5030Sstevel@tonic-gate
5040Sstevel@tonic-gate /*
5050Sstevel@tonic-gate * Multiple contract locks are taken contract -> subcontract.
5060Sstevel@tonic-gate * Check if the contract will be inherited so we can acquire
5070Sstevel@tonic-gate * all the necessary locks before making sensitive changes.
5080Sstevel@tonic-gate */
5090Sstevel@tonic-gate if (!explicit && (ct->ct_flags & CTF_INHERIT) &&
5100Sstevel@tonic-gate contract_process_accept(parent)) {
5110Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
5120Sstevel@tonic-gate mutex_enter(&parent->ct_lock);
5130Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
5140Sstevel@tonic-gate inherit = 1;
5150Sstevel@tonic-gate }
5160Sstevel@tonic-gate
5170Sstevel@tonic-gate if (ct->ct_owner != p) {
5180Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
5190Sstevel@tonic-gate if (inherit)
5200Sstevel@tonic-gate mutex_exit(&parent->ct_lock);
5210Sstevel@tonic-gate return (EINVAL);
5220Sstevel@tonic-gate }
5230Sstevel@tonic-gate
5240Sstevel@tonic-gate mutex_enter(&p->p_lock);
5250Sstevel@tonic-gate if (explicit)
5260Sstevel@tonic-gate avl_remove(&p->p_ct_held, ct);
5270Sstevel@tonic-gate ct->ct_owner = NULL;
5280Sstevel@tonic-gate mutex_exit(&p->p_lock);
5290Sstevel@tonic-gate
5300Sstevel@tonic-gate /*
5310Sstevel@tonic-gate * Since we can't call cte_trim with the contract lock held,
5320Sstevel@tonic-gate * we grab the queue pointer here.
5330Sstevel@tonic-gate */
5340Sstevel@tonic-gate if (p->p_ct_equeue)
5350Sstevel@tonic-gate q = p->p_ct_equeue[ct->ct_type->ct_type_index];
5360Sstevel@tonic-gate
5370Sstevel@tonic-gate /*
5380Sstevel@tonic-gate * contop_abandon may destroy the contract so we rely on it to
5390Sstevel@tonic-gate * drop ct_lock. We retain a reference on the contract so that
5400Sstevel@tonic-gate * the cte_trim which follows functions properly. Even though
5410Sstevel@tonic-gate * cte_trim doesn't dereference the contract pointer, it is
5420Sstevel@tonic-gate * still necessary to retain a reference to the contract so
5430Sstevel@tonic-gate * that we don't trim events which are sent by a subsequently
5440Sstevel@tonic-gate * allocated contract infortuitously located at the same address.
5450Sstevel@tonic-gate */
5460Sstevel@tonic-gate contract_hold(ct);
5470Sstevel@tonic-gate
5480Sstevel@tonic-gate if (inherit) {
5490Sstevel@tonic-gate ct->ct_state = CTS_INHERITED;
5500Sstevel@tonic-gate ASSERT(ct->ct_regent == parent);
5510Sstevel@tonic-gate contract_process_take(parent, ct);
5520Sstevel@tonic-gate
5530Sstevel@tonic-gate /*
5540Sstevel@tonic-gate * We are handing off the process's reference to the
5550Sstevel@tonic-gate * parent contract. For this reason, the order in
5560Sstevel@tonic-gate * which we drop the contract locks is also important.
5570Sstevel@tonic-gate */
5580Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
5590Sstevel@tonic-gate mutex_exit(&parent->ct_lock);
5600Sstevel@tonic-gate } else {
5610Sstevel@tonic-gate ct->ct_regent = NULL;
5620Sstevel@tonic-gate ct->ct_type->ct_type_ops->contop_abandon(ct);
5630Sstevel@tonic-gate }
5640Sstevel@tonic-gate
5650Sstevel@tonic-gate /*
5660Sstevel@tonic-gate * ct_lock has been dropped; we can safely trim the event
5670Sstevel@tonic-gate * queue now.
5680Sstevel@tonic-gate */
5690Sstevel@tonic-gate if (q) {
5700Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
5710Sstevel@tonic-gate cte_trim(q, ct);
5720Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
5730Sstevel@tonic-gate }
5740Sstevel@tonic-gate
5750Sstevel@tonic-gate contract_rele(ct);
5760Sstevel@tonic-gate
5770Sstevel@tonic-gate return (0);
5780Sstevel@tonic-gate }
5790Sstevel@tonic-gate
5804845Svikram int
contract_newct(contract_t * ct)5814845Svikram contract_newct(contract_t *ct)
5824845Svikram {
5834845Svikram return (ct->ct_type->ct_type_ops->contop_newct(ct));
5844845Svikram }
5854845Svikram
5860Sstevel@tonic-gate /*
5870Sstevel@tonic-gate * contract_adopt
5880Sstevel@tonic-gate *
5890Sstevel@tonic-gate * Adopts a contract. After a successful call to this routine, the
5900Sstevel@tonic-gate * previously inherited contract will belong to the calling process,
5910Sstevel@tonic-gate * and its events will have been appended to its new owner's process
5920Sstevel@tonic-gate * bundle queue.
5930Sstevel@tonic-gate */
5940Sstevel@tonic-gate int
contract_adopt(contract_t * ct,proc_t * p)5950Sstevel@tonic-gate contract_adopt(contract_t *ct, proc_t *p)
5960Sstevel@tonic-gate {
5970Sstevel@tonic-gate avl_index_t where;
5980Sstevel@tonic-gate ct_equeue_t *q;
5990Sstevel@tonic-gate contract_t *parent;
6000Sstevel@tonic-gate
6010Sstevel@tonic-gate ASSERT(p == curproc);
6020Sstevel@tonic-gate
6030Sstevel@tonic-gate /*
6040Sstevel@tonic-gate * Ensure the process has an event queue. Checked by ASSERTs
6050Sstevel@tonic-gate * below.
6060Sstevel@tonic-gate */
6070Sstevel@tonic-gate (void) contract_type_pbundle(ct->ct_type, p);
6080Sstevel@tonic-gate
6090Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
6100Sstevel@tonic-gate parent = ct->ct_regent;
6110Sstevel@tonic-gate if (ct->ct_state != CTS_INHERITED ||
6120Sstevel@tonic-gate &p->p_ct_process->conp_contract != parent ||
6130Sstevel@tonic-gate p->p_zone->zone_uniqid != ct->ct_czuniqid) {
6140Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
6150Sstevel@tonic-gate return (EINVAL);
6160Sstevel@tonic-gate }
6170Sstevel@tonic-gate
6180Sstevel@tonic-gate /*
6190Sstevel@tonic-gate * Multiple contract locks are taken contract -> subcontract.
6200Sstevel@tonic-gate */
6210Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
6220Sstevel@tonic-gate mutex_enter(&parent->ct_lock);
6230Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
6240Sstevel@tonic-gate
6250Sstevel@tonic-gate /*
6260Sstevel@tonic-gate * It is possible that the contract was adopted by someone else
6270Sstevel@tonic-gate * while its lock was dropped. It isn't possible for the
6280Sstevel@tonic-gate * contract to have been inherited by a different regent
6290Sstevel@tonic-gate * contract.
6300Sstevel@tonic-gate */
6310Sstevel@tonic-gate if (ct->ct_state != CTS_INHERITED) {
6320Sstevel@tonic-gate mutex_exit(&parent->ct_lock);
6330Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
6340Sstevel@tonic-gate return (EBUSY);
6350Sstevel@tonic-gate }
6360Sstevel@tonic-gate ASSERT(ct->ct_regent == parent);
6370Sstevel@tonic-gate
6380Sstevel@tonic-gate ct->ct_state = CTS_OWNED;
6390Sstevel@tonic-gate
6400Sstevel@tonic-gate contract_process_adopt(ct, p);
6410Sstevel@tonic-gate
6420Sstevel@tonic-gate mutex_enter(&p->p_lock);
6430Sstevel@tonic-gate ct->ct_owner = p;
6440Sstevel@tonic-gate VERIFY(avl_find(&p->p_ct_held, ct, &where) == NULL);
6450Sstevel@tonic-gate avl_insert(&p->p_ct_held, ct, where);
6460Sstevel@tonic-gate mutex_exit(&p->p_lock);
6470Sstevel@tonic-gate
6480Sstevel@tonic-gate ASSERT(ct->ct_owner->p_ct_equeue);
6490Sstevel@tonic-gate ASSERT(ct->ct_owner->p_ct_equeue[ct->ct_type->ct_type_index]);
6500Sstevel@tonic-gate q = ct->ct_owner->p_ct_equeue[ct->ct_type->ct_type_index];
6510Sstevel@tonic-gate cte_copy(&ct->ct_events, q);
6520Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate return (0);
6550Sstevel@tonic-gate }
6560Sstevel@tonic-gate
6570Sstevel@tonic-gate /*
6580Sstevel@tonic-gate * contract_ack
6590Sstevel@tonic-gate *
6600Sstevel@tonic-gate * Acknowledges receipt of a critical event.
6610Sstevel@tonic-gate */
6620Sstevel@tonic-gate int
contract_ack(contract_t * ct,uint64_t evid,int ack)6634845Svikram contract_ack(contract_t *ct, uint64_t evid, int ack)
6640Sstevel@tonic-gate {
6650Sstevel@tonic-gate ct_kevent_t *ev;
6660Sstevel@tonic-gate list_t *queue = &ct->ct_events.ctq_events;
6670Sstevel@tonic-gate int error = ESRCH;
6684845Svikram int nego = 0;
6694845Svikram uint_t evtype;
6704845Svikram
6714845Svikram ASSERT(ack == CT_ACK || ack == CT_NACK);
6720Sstevel@tonic-gate
6730Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
6740Sstevel@tonic-gate mutex_enter(&ct->ct_events.ctq_lock);
6750Sstevel@tonic-gate /*
6760Sstevel@tonic-gate * We are probably ACKing something near the head of the queue.
6770Sstevel@tonic-gate */
6780Sstevel@tonic-gate for (ev = list_head(queue); ev; ev = list_next(queue, ev)) {
6790Sstevel@tonic-gate if (ev->cte_id == evid) {
6804845Svikram if (ev->cte_flags & CTE_NEG)
6814845Svikram nego = 1;
6824845Svikram else if (ack == CT_NACK)
6834845Svikram break;
6840Sstevel@tonic-gate if ((ev->cte_flags & (CTE_INFO | CTE_ACK)) == 0) {
6850Sstevel@tonic-gate ev->cte_flags |= CTE_ACK;
6860Sstevel@tonic-gate ct->ct_evcnt--;
6874845Svikram evtype = ev->cte_type;
6880Sstevel@tonic-gate error = 0;
6890Sstevel@tonic-gate }
6900Sstevel@tonic-gate break;
6910Sstevel@tonic-gate }
6920Sstevel@tonic-gate }
6930Sstevel@tonic-gate mutex_exit(&ct->ct_events.ctq_lock);
6940Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
6950Sstevel@tonic-gate
6964845Svikram /*
6974845Svikram * Not all critical events are negotiation events, however
6984845Svikram * every negotiation event is a critical event. NEGEND events
6994845Svikram * are critical events but are not negotiation events
7004845Svikram */
7014845Svikram if (error || !nego)
7024845Svikram return (error);
7034845Svikram
7044845Svikram if (ack == CT_ACK)
7054845Svikram error = ct->ct_type->ct_type_ops->contop_ack(ct, evtype, evid);
7064845Svikram else
7074845Svikram error = ct->ct_type->ct_type_ops->contop_nack(ct, evtype, evid);
7084845Svikram
7090Sstevel@tonic-gate return (error);
7100Sstevel@tonic-gate }
7110Sstevel@tonic-gate
7124845Svikram /*ARGSUSED*/
7134845Svikram int
contract_ack_inval(contract_t * ct,uint_t evtype,uint64_t evid)7144845Svikram contract_ack_inval(contract_t *ct, uint_t evtype, uint64_t evid)
7154845Svikram {
7164845Svikram cmn_err(CE_PANIC, "contract_ack_inval: unsupported call: ctid: %u",
7174845Svikram ct->ct_id);
7184845Svikram return (ENOSYS);
7194845Svikram }
7204845Svikram
7214845Svikram /*ARGSUSED*/
7224845Svikram int
contract_qack_inval(contract_t * ct,uint_t evtype,uint64_t evid)7234845Svikram contract_qack_inval(contract_t *ct, uint_t evtype, uint64_t evid)
7244845Svikram {
7254845Svikram cmn_err(CE_PANIC, "contract_ack_inval: unsupported call: ctid: %u",
7264845Svikram ct->ct_id);
7274845Svikram return (ENOSYS);
7284845Svikram }
7294845Svikram
7304845Svikram /*ARGSUSED*/
7314845Svikram int
contract_qack_notsup(contract_t * ct,uint_t evtype,uint64_t evid)7324845Svikram contract_qack_notsup(contract_t *ct, uint_t evtype, uint64_t evid)
7334845Svikram {
7344845Svikram return (ERANGE);
7354845Svikram }
7364845Svikram
7374845Svikram /*
7384845Svikram * contract_qack
7394845Svikram *
7404845Svikram * Asks that negotiations be extended by another time quantum
7414845Svikram */
7424845Svikram int
contract_qack(contract_t * ct,uint64_t evid)7434845Svikram contract_qack(contract_t *ct, uint64_t evid)
7444845Svikram {
7454845Svikram ct_kevent_t *ev;
7464845Svikram list_t *queue = &ct->ct_events.ctq_events;
7474845Svikram int nego = 0;
7484845Svikram uint_t evtype;
7494845Svikram
7504845Svikram mutex_enter(&ct->ct_lock);
7514845Svikram mutex_enter(&ct->ct_events.ctq_lock);
7524845Svikram
7534845Svikram for (ev = list_head(queue); ev; ev = list_next(queue, ev)) {
7544845Svikram if (ev->cte_id == evid) {
7554845Svikram if ((ev->cte_flags & (CTE_NEG | CTE_ACK)) == CTE_NEG) {
7564845Svikram evtype = ev->cte_type;
7574845Svikram nego = 1;
7584845Svikram }
7594845Svikram break;
7604845Svikram }
7614845Svikram }
7624845Svikram mutex_exit(&ct->ct_events.ctq_lock);
7634845Svikram mutex_exit(&ct->ct_lock);
7644845Svikram
7654845Svikram /*
7664845Svikram * Only a negotiated event (which is by definition also a critical
7674845Svikram * event) which has not yet been acknowledged can provide
7684845Svikram * time quanta to a negotiating owner process.
7694845Svikram */
7704845Svikram if (!nego)
7714845Svikram return (ESRCH);
7724845Svikram
7734845Svikram return (ct->ct_type->ct_type_ops->contop_qack(ct, evtype, evid));
7744845Svikram }
7754845Svikram
7760Sstevel@tonic-gate /*
7770Sstevel@tonic-gate * contract_orphan
7780Sstevel@tonic-gate *
7790Sstevel@tonic-gate * Icky-poo. This is a process-contract special, used to ACK all
7800Sstevel@tonic-gate * critical messages when a contract is orphaned.
7810Sstevel@tonic-gate */
7820Sstevel@tonic-gate void
contract_orphan(contract_t * ct)7830Sstevel@tonic-gate contract_orphan(contract_t *ct)
7840Sstevel@tonic-gate {
7850Sstevel@tonic-gate ct_kevent_t *ev;
7860Sstevel@tonic-gate list_t *queue = &ct->ct_events.ctq_events;
7870Sstevel@tonic-gate
7880Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ct->ct_lock));
7890Sstevel@tonic-gate ASSERT(ct->ct_state != CTS_ORPHAN);
7900Sstevel@tonic-gate
7910Sstevel@tonic-gate mutex_enter(&ct->ct_events.ctq_lock);
7920Sstevel@tonic-gate ct->ct_state = CTS_ORPHAN;
7930Sstevel@tonic-gate for (ev = list_head(queue); ev; ev = list_next(queue, ev)) {
7940Sstevel@tonic-gate if ((ev->cte_flags & (CTE_INFO | CTE_ACK)) == 0) {
7950Sstevel@tonic-gate ev->cte_flags |= CTE_ACK;
7960Sstevel@tonic-gate ct->ct_evcnt--;
7970Sstevel@tonic-gate }
7980Sstevel@tonic-gate }
7990Sstevel@tonic-gate mutex_exit(&ct->ct_events.ctq_lock);
8000Sstevel@tonic-gate
8010Sstevel@tonic-gate ASSERT(ct->ct_evcnt == 0);
8020Sstevel@tonic-gate }
8030Sstevel@tonic-gate
8040Sstevel@tonic-gate /*
8050Sstevel@tonic-gate * contract_destroy
8060Sstevel@tonic-gate *
8070Sstevel@tonic-gate * Explicit contract destruction. Called when contract is empty.
8080Sstevel@tonic-gate * The contract will actually stick around until all of its events are
8090Sstevel@tonic-gate * removed from the bundle and and process bundle queues, and all fds
8100Sstevel@tonic-gate * which refer to it are closed. See contract_dtor if you are looking
8110Sstevel@tonic-gate * for what destroys the contract structure.
8120Sstevel@tonic-gate */
8130Sstevel@tonic-gate void
contract_destroy(contract_t * ct)8140Sstevel@tonic-gate contract_destroy(contract_t *ct)
8150Sstevel@tonic-gate {
8160Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ct->ct_lock));
8170Sstevel@tonic-gate ASSERT(ct->ct_state != CTS_DEAD);
8180Sstevel@tonic-gate ASSERT(ct->ct_owner == NULL);
8190Sstevel@tonic-gate
8200Sstevel@tonic-gate ct->ct_state = CTS_DEAD;
8210Sstevel@tonic-gate cte_queue_drain(&ct->ct_events, 1);
8220Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
8230Sstevel@tonic-gate mutex_enter(&ct->ct_type->ct_type_events.ctq_lock);
8240Sstevel@tonic-gate cte_trim(&ct->ct_type->ct_type_events, ct);
8250Sstevel@tonic-gate mutex_exit(&ct->ct_type->ct_type_events.ctq_lock);
8260Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
8270Sstevel@tonic-gate ct->ct_type->ct_type_ops->contop_destroy(ct);
8280Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
8290Sstevel@tonic-gate contract_rele(ct);
8300Sstevel@tonic-gate }
8310Sstevel@tonic-gate
8320Sstevel@tonic-gate /*
8330Sstevel@tonic-gate * contract_vnode_get
8340Sstevel@tonic-gate *
8350Sstevel@tonic-gate * Obtains the contract directory vnode for this contract, if there is
8360Sstevel@tonic-gate * one. The caller must VN_RELE the vnode when they are through using
8370Sstevel@tonic-gate * it.
8380Sstevel@tonic-gate */
8390Sstevel@tonic-gate vnode_t *
contract_vnode_get(contract_t * ct,vfs_t * vfsp)8400Sstevel@tonic-gate contract_vnode_get(contract_t *ct, vfs_t *vfsp)
8410Sstevel@tonic-gate {
8420Sstevel@tonic-gate contract_vnode_t *ctv;
8430Sstevel@tonic-gate vnode_t *vp = NULL;
8440Sstevel@tonic-gate
8450Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
8460Sstevel@tonic-gate for (ctv = list_head(&ct->ct_vnodes); ctv != NULL;
8470Sstevel@tonic-gate ctv = list_next(&ct->ct_vnodes, ctv))
8480Sstevel@tonic-gate if (ctv->ctv_vnode->v_vfsp == vfsp) {
8490Sstevel@tonic-gate vp = ctv->ctv_vnode;
8500Sstevel@tonic-gate VN_HOLD(vp);
8510Sstevel@tonic-gate break;
8520Sstevel@tonic-gate }
8530Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
8540Sstevel@tonic-gate return (vp);
8550Sstevel@tonic-gate }
8560Sstevel@tonic-gate
8570Sstevel@tonic-gate /*
8580Sstevel@tonic-gate * contract_vnode_set
8590Sstevel@tonic-gate *
8600Sstevel@tonic-gate * Sets the contract directory vnode for this contract. We don't hold
8610Sstevel@tonic-gate * a reference on the vnode because we don't want to prevent it from
8620Sstevel@tonic-gate * being freed. The vnode's inactive entry point will take care of
8630Sstevel@tonic-gate * notifying us when it should be removed.
8640Sstevel@tonic-gate */
8650Sstevel@tonic-gate void
contract_vnode_set(contract_t * ct,contract_vnode_t * ctv,vnode_t * vnode)8660Sstevel@tonic-gate contract_vnode_set(contract_t *ct, contract_vnode_t *ctv, vnode_t *vnode)
8670Sstevel@tonic-gate {
8680Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
8690Sstevel@tonic-gate ctv->ctv_vnode = vnode;
8700Sstevel@tonic-gate list_insert_head(&ct->ct_vnodes, ctv);
8710Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
8720Sstevel@tonic-gate }
8730Sstevel@tonic-gate
8740Sstevel@tonic-gate /*
8750Sstevel@tonic-gate * contract_vnode_clear
8760Sstevel@tonic-gate *
8770Sstevel@tonic-gate * Removes this vnode as the contract directory vnode for this
8780Sstevel@tonic-gate * contract. Called from a contract directory's inactive entry point,
8790Sstevel@tonic-gate * this may return 0 indicating that the vnode gained another reference
8800Sstevel@tonic-gate * because of a simultaneous call to contract_vnode_get.
8810Sstevel@tonic-gate */
8820Sstevel@tonic-gate int
contract_vnode_clear(contract_t * ct,contract_vnode_t * ctv)8830Sstevel@tonic-gate contract_vnode_clear(contract_t *ct, contract_vnode_t *ctv)
8840Sstevel@tonic-gate {
8850Sstevel@tonic-gate vnode_t *vp = ctv->ctv_vnode;
8860Sstevel@tonic-gate int result;
8870Sstevel@tonic-gate
8880Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
8890Sstevel@tonic-gate mutex_enter(&vp->v_lock);
8900Sstevel@tonic-gate if (vp->v_count == 1) {
8910Sstevel@tonic-gate list_remove(&ct->ct_vnodes, ctv);
8920Sstevel@tonic-gate result = 1;
8930Sstevel@tonic-gate } else {
8940Sstevel@tonic-gate vp->v_count--;
8950Sstevel@tonic-gate result = 0;
8960Sstevel@tonic-gate }
8970Sstevel@tonic-gate mutex_exit(&vp->v_lock);
8980Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
8990Sstevel@tonic-gate
9000Sstevel@tonic-gate return (result);
9010Sstevel@tonic-gate }
9020Sstevel@tonic-gate
9030Sstevel@tonic-gate /*
9040Sstevel@tonic-gate * contract_exit
9050Sstevel@tonic-gate *
9060Sstevel@tonic-gate * Abandons all contracts held by process p, and drains process p's
9070Sstevel@tonic-gate * bundle queues. Called on process exit.
9080Sstevel@tonic-gate */
9090Sstevel@tonic-gate void
contract_exit(proc_t * p)9100Sstevel@tonic-gate contract_exit(proc_t *p)
9110Sstevel@tonic-gate {
9120Sstevel@tonic-gate contract_t *ct;
9130Sstevel@tonic-gate void *cookie = NULL;
9140Sstevel@tonic-gate int i;
9150Sstevel@tonic-gate
9160Sstevel@tonic-gate ASSERT(p == curproc);
9170Sstevel@tonic-gate
9180Sstevel@tonic-gate /*
9190Sstevel@tonic-gate * Abandon held contracts. contract_abandon knows enough not
9200Sstevel@tonic-gate * to remove the contract from the list a second time. We are
9210Sstevel@tonic-gate * exiting, so no locks are needed here. But because
9220Sstevel@tonic-gate * contract_abandon will take p_lock, we need to make sure we
9230Sstevel@tonic-gate * aren't holding it.
9240Sstevel@tonic-gate */
9250Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&p->p_lock));
9260Sstevel@tonic-gate while ((ct = avl_destroy_nodes(&p->p_ct_held, &cookie)) != NULL)
9270Sstevel@tonic-gate VERIFY(contract_abandon(ct, p, 0) == 0);
9280Sstevel@tonic-gate
9290Sstevel@tonic-gate /*
9300Sstevel@tonic-gate * Drain pbundles. Because a process bundle queue could have
9310Sstevel@tonic-gate * been passed to another process, they may not be freed right
9320Sstevel@tonic-gate * away.
9330Sstevel@tonic-gate */
9340Sstevel@tonic-gate if (p->p_ct_equeue) {
9350Sstevel@tonic-gate for (i = 0; i < CTT_MAXTYPE; i++)
9360Sstevel@tonic-gate if (p->p_ct_equeue[i])
9370Sstevel@tonic-gate cte_queue_drain(p->p_ct_equeue[i], 0);
9380Sstevel@tonic-gate kmem_free(p->p_ct_equeue, CTT_MAXTYPE * sizeof (ct_equeue_t *));
9390Sstevel@tonic-gate }
9400Sstevel@tonic-gate }
9410Sstevel@tonic-gate
9424845Svikram static int
get_time_left(struct ct_time * t)9434845Svikram get_time_left(struct ct_time *t)
9444845Svikram {
9454845Svikram clock_t ticks_elapsed;
9464845Svikram int secs_elapsed;
9474845Svikram
9484845Svikram if (t->ctm_total == -1)
9494845Svikram return (-1);
9504845Svikram
9514845Svikram ticks_elapsed = ddi_get_lbolt() - t->ctm_start;
9524845Svikram secs_elapsed = t->ctm_total - (drv_hztousec(ticks_elapsed)/MICROSEC);
9534845Svikram return (secs_elapsed > 0 ? secs_elapsed : 0);
9544845Svikram }
9554845Svikram
9560Sstevel@tonic-gate /*
9570Sstevel@tonic-gate * contract_status_common
9580Sstevel@tonic-gate *
9590Sstevel@tonic-gate * Populates a ct_status structure. Used by contract types in their
9600Sstevel@tonic-gate * status entry points and ctfs when only common information is
9610Sstevel@tonic-gate * requested.
9620Sstevel@tonic-gate */
9630Sstevel@tonic-gate void
contract_status_common(contract_t * ct,zone_t * zone,void * status,model_t model)9640Sstevel@tonic-gate contract_status_common(contract_t *ct, zone_t *zone, void *status,
9650Sstevel@tonic-gate model_t model)
9660Sstevel@tonic-gate {
9670Sstevel@tonic-gate STRUCT_HANDLE(ct_status, lstatus);
9680Sstevel@tonic-gate
9690Sstevel@tonic-gate STRUCT_SET_HANDLE(lstatus, model, status);
9700Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ct->ct_lock));
9710Sstevel@tonic-gate if (zone->zone_uniqid == GLOBAL_ZONEUNIQID ||
9720Sstevel@tonic-gate zone->zone_uniqid == ct->ct_czuniqid) {
9730Sstevel@tonic-gate zone_t *czone;
9740Sstevel@tonic-gate zoneid_t zoneid = -1;
9750Sstevel@tonic-gate
9760Sstevel@tonic-gate /*
9770Sstevel@tonic-gate * Contracts don't have holds on the zones they were
9780Sstevel@tonic-gate * created by. If the contract's zone no longer
9790Sstevel@tonic-gate * exists, we say its zoneid is -1.
9800Sstevel@tonic-gate */
9810Sstevel@tonic-gate if (zone->zone_uniqid == ct->ct_czuniqid ||
9820Sstevel@tonic-gate ct->ct_czuniqid == GLOBAL_ZONEUNIQID) {
9830Sstevel@tonic-gate zoneid = ct->ct_zoneid;
9840Sstevel@tonic-gate } else if ((czone = zone_find_by_id(ct->ct_zoneid)) != NULL) {
9850Sstevel@tonic-gate if (czone->zone_uniqid == ct->ct_mzuniqid)
9860Sstevel@tonic-gate zoneid = ct->ct_zoneid;
9870Sstevel@tonic-gate zone_rele(czone);
9880Sstevel@tonic-gate }
9890Sstevel@tonic-gate
9900Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_zoneid, zoneid);
9910Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_holder,
9920Sstevel@tonic-gate (ct->ct_state == CTS_OWNED) ? ct->ct_owner->p_pid :
9930Sstevel@tonic-gate (ct->ct_state == CTS_INHERITED) ? ct->ct_regent->ct_id : 0);
9940Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_state, ct->ct_state);
9950Sstevel@tonic-gate } else {
9960Sstevel@tonic-gate /*
9970Sstevel@tonic-gate * We are looking at a contract which was created by a
9980Sstevel@tonic-gate * process outside of our zone. We provide fake zone,
9990Sstevel@tonic-gate * holder, and state information.
10000Sstevel@tonic-gate */
10010Sstevel@tonic-gate
10020Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_zoneid, zone->zone_id);
10030Sstevel@tonic-gate /*
10040Sstevel@tonic-gate * Since "zone" can't disappear until the calling ctfs
10050Sstevel@tonic-gate * is unmounted, zone_zsched must be valid.
10060Sstevel@tonic-gate */
10070Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_holder, (ct->ct_state < CTS_ORPHAN) ?
10080Sstevel@tonic-gate zone->zone_zsched->p_pid : 0);
10090Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_state, (ct->ct_state < CTS_ORPHAN) ?
10100Sstevel@tonic-gate CTS_OWNED : ct->ct_state);
10110Sstevel@tonic-gate }
10120Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_nevents, ct->ct_evcnt);
10134845Svikram STRUCT_FSET(lstatus, ctst_ntime, get_time_left(&ct->ct_ntime));
10144845Svikram STRUCT_FSET(lstatus, ctst_qtime, get_time_left(&ct->ct_qtime));
10150Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_nevid,
10160Sstevel@tonic-gate ct->ct_nevent ? ct->ct_nevent->cte_id : 0);
10170Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_critical, ct->ct_ev_crit);
10180Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_informative, ct->ct_ev_info);
10190Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_cookie, ct->ct_cookie);
10200Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_type, ct->ct_type->ct_type_index);
10210Sstevel@tonic-gate STRUCT_FSET(lstatus, ctst_id, ct->ct_id);
10220Sstevel@tonic-gate }
10230Sstevel@tonic-gate
10240Sstevel@tonic-gate /*
10250Sstevel@tonic-gate * contract_checkcred
10260Sstevel@tonic-gate *
10270Sstevel@tonic-gate * Determines if the specified contract is owned by a process with the
10280Sstevel@tonic-gate * same effective uid as the specified credential. The caller must
10290Sstevel@tonic-gate * ensure that the uid spaces are the same. Returns 1 on success.
10300Sstevel@tonic-gate */
10310Sstevel@tonic-gate static int
contract_checkcred(contract_t * ct,const cred_t * cr)10320Sstevel@tonic-gate contract_checkcred(contract_t *ct, const cred_t *cr)
10330Sstevel@tonic-gate {
10340Sstevel@tonic-gate proc_t *p;
10350Sstevel@tonic-gate int fail = 1;
10360Sstevel@tonic-gate
10370Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
10380Sstevel@tonic-gate if ((p = ct->ct_owner) != NULL) {
10390Sstevel@tonic-gate mutex_enter(&p->p_crlock);
10400Sstevel@tonic-gate fail = crgetuid(cr) != crgetuid(p->p_cred);
10410Sstevel@tonic-gate mutex_exit(&p->p_crlock);
10420Sstevel@tonic-gate }
10430Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
10440Sstevel@tonic-gate
10450Sstevel@tonic-gate return (!fail);
10460Sstevel@tonic-gate }
10470Sstevel@tonic-gate
10480Sstevel@tonic-gate /*
10490Sstevel@tonic-gate * contract_owned
10500Sstevel@tonic-gate *
10510Sstevel@tonic-gate * Determines if the specified credential can view an event generated
10520Sstevel@tonic-gate * by the specified contract. If locked is set, the contract's ct_lock
10530Sstevel@tonic-gate * is held and the caller will need to do additional work to determine
10540Sstevel@tonic-gate * if they truly can see the event. Returns 1 on success.
10550Sstevel@tonic-gate */
10560Sstevel@tonic-gate int
contract_owned(contract_t * ct,const cred_t * cr,int locked)10570Sstevel@tonic-gate contract_owned(contract_t *ct, const cred_t *cr, int locked)
10580Sstevel@tonic-gate {
10590Sstevel@tonic-gate int owner, cmatch, zmatch;
10600Sstevel@tonic-gate uint64_t zuniqid, mzuniqid;
10610Sstevel@tonic-gate uid_t euid;
10620Sstevel@tonic-gate
10630Sstevel@tonic-gate ASSERT(locked || MUTEX_NOT_HELD(&ct->ct_lock));
10640Sstevel@tonic-gate
10650Sstevel@tonic-gate zuniqid = curproc->p_zone->zone_uniqid;
10660Sstevel@tonic-gate mzuniqid = contract_getzuniqid(ct);
10670Sstevel@tonic-gate euid = crgetuid(cr);
10680Sstevel@tonic-gate
10690Sstevel@tonic-gate /*
10700Sstevel@tonic-gate * owner: we own the contract
10710Sstevel@tonic-gate * cmatch: we are in the creator's (and holder's) zone and our
10720Sstevel@tonic-gate * uid matches the creator's or holder's
10730Sstevel@tonic-gate * zmatch: we are in the effective zone of a contract created
10740Sstevel@tonic-gate * in the global zone, and our uid matches that of the
10750Sstevel@tonic-gate * virtualized holder's (zsched/kcred)
10760Sstevel@tonic-gate */
10770Sstevel@tonic-gate owner = (ct->ct_owner == curproc);
10780Sstevel@tonic-gate cmatch = (zuniqid == ct->ct_czuniqid) &&
10790Sstevel@tonic-gate ((ct->ct_cuid == euid) || (!locked && contract_checkcred(ct, cr)));
10800Sstevel@tonic-gate zmatch = (ct->ct_czuniqid != mzuniqid) && (zuniqid == mzuniqid) &&
10810Sstevel@tonic-gate (crgetuid(kcred) == euid);
10820Sstevel@tonic-gate
10830Sstevel@tonic-gate return (owner || cmatch || zmatch);
10840Sstevel@tonic-gate }
10850Sstevel@tonic-gate
10860Sstevel@tonic-gate
10870Sstevel@tonic-gate /*
10880Sstevel@tonic-gate * contract_type_init
10890Sstevel@tonic-gate *
10900Sstevel@tonic-gate * Called by contract types to register themselves with the contracts
10910Sstevel@tonic-gate * framework.
10920Sstevel@tonic-gate */
10930Sstevel@tonic-gate ct_type_t *
contract_type_init(ct_typeid_t type,const char * name,contops_t * ops,ct_f_default_t * dfault)10940Sstevel@tonic-gate contract_type_init(ct_typeid_t type, const char *name, contops_t *ops,
10950Sstevel@tonic-gate ct_f_default_t *dfault)
10960Sstevel@tonic-gate {
10970Sstevel@tonic-gate ct_type_t *result;
10980Sstevel@tonic-gate
10990Sstevel@tonic-gate ASSERT(type < CTT_MAXTYPE);
11000Sstevel@tonic-gate
11010Sstevel@tonic-gate result = kmem_alloc(sizeof (ct_type_t), KM_SLEEP);
11020Sstevel@tonic-gate
11030Sstevel@tonic-gate mutex_init(&result->ct_type_lock, NULL, MUTEX_DEFAULT, NULL);
11040Sstevel@tonic-gate avl_create(&result->ct_type_avl, contract_compar, sizeof (contract_t),
11050Sstevel@tonic-gate offsetof(contract_t, ct_cttavl));
11060Sstevel@tonic-gate cte_queue_create(&result->ct_type_events, CTEL_BUNDLE, 20, 0);
11070Sstevel@tonic-gate result->ct_type_name = name;
11080Sstevel@tonic-gate result->ct_type_ops = ops;
11090Sstevel@tonic-gate result->ct_type_default = dfault;
11100Sstevel@tonic-gate result->ct_type_evid = 0;
11110Sstevel@tonic-gate gethrestime(&result->ct_type_timestruc);
11120Sstevel@tonic-gate result->ct_type_index = type;
11130Sstevel@tonic-gate
11140Sstevel@tonic-gate ct_types[type] = result;
11150Sstevel@tonic-gate
11160Sstevel@tonic-gate return (result);
11170Sstevel@tonic-gate }
11180Sstevel@tonic-gate
11190Sstevel@tonic-gate /*
11200Sstevel@tonic-gate * contract_type_count
11210Sstevel@tonic-gate *
11220Sstevel@tonic-gate * Obtains the number of contracts of a particular type.
11230Sstevel@tonic-gate */
11240Sstevel@tonic-gate int
contract_type_count(ct_type_t * type)11250Sstevel@tonic-gate contract_type_count(ct_type_t *type)
11260Sstevel@tonic-gate {
11270Sstevel@tonic-gate ulong_t count;
11280Sstevel@tonic-gate
11290Sstevel@tonic-gate mutex_enter(&type->ct_type_lock);
11300Sstevel@tonic-gate count = avl_numnodes(&type->ct_type_avl);
11310Sstevel@tonic-gate mutex_exit(&type->ct_type_lock);
11320Sstevel@tonic-gate
11330Sstevel@tonic-gate return (count);
11340Sstevel@tonic-gate }
11350Sstevel@tonic-gate
11360Sstevel@tonic-gate /*
11370Sstevel@tonic-gate * contract_type_max
11380Sstevel@tonic-gate *
11390Sstevel@tonic-gate * Obtains the maximum contract id of of a particular type.
11400Sstevel@tonic-gate */
11410Sstevel@tonic-gate ctid_t
contract_type_max(ct_type_t * type)11420Sstevel@tonic-gate contract_type_max(ct_type_t *type)
11430Sstevel@tonic-gate {
11440Sstevel@tonic-gate contract_t *ct;
11450Sstevel@tonic-gate ctid_t res;
11460Sstevel@tonic-gate
11470Sstevel@tonic-gate mutex_enter(&type->ct_type_lock);
11480Sstevel@tonic-gate ct = avl_last(&type->ct_type_avl);
11490Sstevel@tonic-gate res = ct ? ct->ct_id : -1;
11500Sstevel@tonic-gate mutex_exit(&type->ct_type_lock);
11510Sstevel@tonic-gate
11520Sstevel@tonic-gate return (res);
11530Sstevel@tonic-gate }
11540Sstevel@tonic-gate
11550Sstevel@tonic-gate /*
11560Sstevel@tonic-gate * contract_max
11570Sstevel@tonic-gate *
11580Sstevel@tonic-gate * Obtains the maximum contract id.
11590Sstevel@tonic-gate */
11600Sstevel@tonic-gate ctid_t
contract_max(void)11610Sstevel@tonic-gate contract_max(void)
11620Sstevel@tonic-gate {
11630Sstevel@tonic-gate contract_t *ct;
11640Sstevel@tonic-gate ctid_t res;
11650Sstevel@tonic-gate
11660Sstevel@tonic-gate mutex_enter(&contract_lock);
11670Sstevel@tonic-gate ct = avl_last(&contract_avl);
11680Sstevel@tonic-gate res = ct ? ct->ct_id : -1;
11690Sstevel@tonic-gate mutex_exit(&contract_lock);
11700Sstevel@tonic-gate
11710Sstevel@tonic-gate return (res);
11720Sstevel@tonic-gate }
11730Sstevel@tonic-gate
11740Sstevel@tonic-gate /*
11750Sstevel@tonic-gate * contract_lookup_common
11760Sstevel@tonic-gate *
11770Sstevel@tonic-gate * Common code for contract_lookup and contract_type_lookup. Takes a
11780Sstevel@tonic-gate * pointer to an AVL tree to search in. Should be called with the
11790Sstevel@tonic-gate * appropriate tree-protecting lock held (unfortunately unassertable).
11800Sstevel@tonic-gate */
11810Sstevel@tonic-gate static ctid_t
contract_lookup_common(avl_tree_t * tree,uint64_t zuniqid,ctid_t current)11820Sstevel@tonic-gate contract_lookup_common(avl_tree_t *tree, uint64_t zuniqid, ctid_t current)
11830Sstevel@tonic-gate {
11840Sstevel@tonic-gate contract_t template, *ct;
11850Sstevel@tonic-gate avl_index_t where;
11860Sstevel@tonic-gate ctid_t res;
11870Sstevel@tonic-gate
11880Sstevel@tonic-gate template.ct_id = current;
11890Sstevel@tonic-gate ct = avl_find(tree, &template, &where);
11900Sstevel@tonic-gate if (ct == NULL)
11910Sstevel@tonic-gate ct = avl_nearest(tree, where, AVL_AFTER);
11920Sstevel@tonic-gate if (zuniqid != GLOBAL_ZONEUNIQID)
11930Sstevel@tonic-gate while (ct && (contract_getzuniqid(ct) != zuniqid))
11940Sstevel@tonic-gate ct = AVL_NEXT(tree, ct);
11950Sstevel@tonic-gate res = ct ? ct->ct_id : -1;
11960Sstevel@tonic-gate
11970Sstevel@tonic-gate return (res);
11980Sstevel@tonic-gate }
11990Sstevel@tonic-gate
12000Sstevel@tonic-gate /*
12010Sstevel@tonic-gate * contract_type_lookup
12020Sstevel@tonic-gate *
12030Sstevel@tonic-gate * Returns the next type contract after the specified id, visible from
12040Sstevel@tonic-gate * the specified zone.
12050Sstevel@tonic-gate */
12060Sstevel@tonic-gate ctid_t
contract_type_lookup(ct_type_t * type,uint64_t zuniqid,ctid_t current)12070Sstevel@tonic-gate contract_type_lookup(ct_type_t *type, uint64_t zuniqid, ctid_t current)
12080Sstevel@tonic-gate {
12090Sstevel@tonic-gate ctid_t res;
12100Sstevel@tonic-gate
12110Sstevel@tonic-gate mutex_enter(&type->ct_type_lock);
12120Sstevel@tonic-gate res = contract_lookup_common(&type->ct_type_avl, zuniqid, current);
12130Sstevel@tonic-gate mutex_exit(&type->ct_type_lock);
12140Sstevel@tonic-gate
12150Sstevel@tonic-gate return (res);
12160Sstevel@tonic-gate }
12170Sstevel@tonic-gate
12180Sstevel@tonic-gate /*
12190Sstevel@tonic-gate * contract_lookup
12200Sstevel@tonic-gate *
12210Sstevel@tonic-gate * Returns the next contract after the specified id, visible from the
12220Sstevel@tonic-gate * specified zone.
12230Sstevel@tonic-gate */
12240Sstevel@tonic-gate ctid_t
contract_lookup(uint64_t zuniqid,ctid_t current)12250Sstevel@tonic-gate contract_lookup(uint64_t zuniqid, ctid_t current)
12260Sstevel@tonic-gate {
12270Sstevel@tonic-gate ctid_t res;
12280Sstevel@tonic-gate
12290Sstevel@tonic-gate mutex_enter(&contract_lock);
12300Sstevel@tonic-gate res = contract_lookup_common(&contract_avl, zuniqid, current);
12310Sstevel@tonic-gate mutex_exit(&contract_lock);
12320Sstevel@tonic-gate
12330Sstevel@tonic-gate return (res);
12340Sstevel@tonic-gate }
12350Sstevel@tonic-gate
12360Sstevel@tonic-gate /*
12370Sstevel@tonic-gate * contract_plookup
12380Sstevel@tonic-gate *
12390Sstevel@tonic-gate * Returns the next contract held by process p after the specified id,
12400Sstevel@tonic-gate * visible from the specified zone. Made complicated by the fact that
12410Sstevel@tonic-gate * contracts visible in a zone but held by processes outside of the
12420Sstevel@tonic-gate * zone need to appear as being held by zsched to zone members.
12430Sstevel@tonic-gate */
12440Sstevel@tonic-gate ctid_t
contract_plookup(proc_t * p,ctid_t current,uint64_t zuniqid)12450Sstevel@tonic-gate contract_plookup(proc_t *p, ctid_t current, uint64_t zuniqid)
12460Sstevel@tonic-gate {
12470Sstevel@tonic-gate contract_t template, *ct;
12480Sstevel@tonic-gate avl_index_t where;
12490Sstevel@tonic-gate ctid_t res;
12500Sstevel@tonic-gate
12510Sstevel@tonic-gate template.ct_id = current;
12520Sstevel@tonic-gate if (zuniqid != GLOBAL_ZONEUNIQID &&
12530Sstevel@tonic-gate (p->p_flag & (SSYS|SZONETOP)) == (SSYS|SZONETOP)) {
12540Sstevel@tonic-gate /* This is inelegant. */
12550Sstevel@tonic-gate mutex_enter(&contract_lock);
12560Sstevel@tonic-gate ct = avl_find(&contract_avl, &template, &where);
12570Sstevel@tonic-gate if (ct == NULL)
12580Sstevel@tonic-gate ct = avl_nearest(&contract_avl, where, AVL_AFTER);
12590Sstevel@tonic-gate while (ct && !(ct->ct_state < CTS_ORPHAN &&
12600Sstevel@tonic-gate contract_getzuniqid(ct) == zuniqid &&
12610Sstevel@tonic-gate ct->ct_czuniqid == GLOBAL_ZONEUNIQID))
12620Sstevel@tonic-gate ct = AVL_NEXT(&contract_avl, ct);
12630Sstevel@tonic-gate res = ct ? ct->ct_id : -1;
12640Sstevel@tonic-gate mutex_exit(&contract_lock);
12650Sstevel@tonic-gate } else {
12660Sstevel@tonic-gate mutex_enter(&p->p_lock);
12670Sstevel@tonic-gate ct = avl_find(&p->p_ct_held, &template, &where);
12680Sstevel@tonic-gate if (ct == NULL)
12690Sstevel@tonic-gate ct = avl_nearest(&p->p_ct_held, where, AVL_AFTER);
12700Sstevel@tonic-gate res = ct ? ct->ct_id : -1;
12710Sstevel@tonic-gate mutex_exit(&p->p_lock);
12720Sstevel@tonic-gate }
12730Sstevel@tonic-gate
12740Sstevel@tonic-gate return (res);
12750Sstevel@tonic-gate }
12760Sstevel@tonic-gate
12770Sstevel@tonic-gate /*
12780Sstevel@tonic-gate * contract_ptr_common
12790Sstevel@tonic-gate *
12800Sstevel@tonic-gate * Common code for contract_ptr and contract_type_ptr. Takes a pointer
12810Sstevel@tonic-gate * to an AVL tree to search in. Should be called with the appropriate
12820Sstevel@tonic-gate * tree-protecting lock held (unfortunately unassertable).
12830Sstevel@tonic-gate */
12840Sstevel@tonic-gate static contract_t *
contract_ptr_common(avl_tree_t * tree,ctid_t id,uint64_t zuniqid)12850Sstevel@tonic-gate contract_ptr_common(avl_tree_t *tree, ctid_t id, uint64_t zuniqid)
12860Sstevel@tonic-gate {
12870Sstevel@tonic-gate contract_t template, *ct;
12880Sstevel@tonic-gate
12890Sstevel@tonic-gate template.ct_id = id;
12900Sstevel@tonic-gate ct = avl_find(tree, &template, NULL);
12910Sstevel@tonic-gate if (ct == NULL || (zuniqid != GLOBAL_ZONEUNIQID &&
12920Sstevel@tonic-gate contract_getzuniqid(ct) != zuniqid)) {
12930Sstevel@tonic-gate return (NULL);
12940Sstevel@tonic-gate }
12950Sstevel@tonic-gate
12960Sstevel@tonic-gate /*
12970Sstevel@tonic-gate * Check to see if a thread is in the window in contract_rele
12980Sstevel@tonic-gate * between dropping the reference count and removing the
12990Sstevel@tonic-gate * contract from the type AVL.
13000Sstevel@tonic-gate */
13010Sstevel@tonic-gate mutex_enter(&ct->ct_reflock);
13020Sstevel@tonic-gate if (ct->ct_ref) {
13030Sstevel@tonic-gate ct->ct_ref++;
13040Sstevel@tonic-gate mutex_exit(&ct->ct_reflock);
13050Sstevel@tonic-gate } else {
13060Sstevel@tonic-gate mutex_exit(&ct->ct_reflock);
13070Sstevel@tonic-gate ct = NULL;
13080Sstevel@tonic-gate }
13090Sstevel@tonic-gate
13100Sstevel@tonic-gate return (ct);
13110Sstevel@tonic-gate }
13120Sstevel@tonic-gate
13130Sstevel@tonic-gate /*
13140Sstevel@tonic-gate * contract_type_ptr
13150Sstevel@tonic-gate *
13160Sstevel@tonic-gate * Returns a pointer to the contract with the specified id. The
13170Sstevel@tonic-gate * contract is held, so the caller needs to release the reference when
13180Sstevel@tonic-gate * it is through with the contract.
13190Sstevel@tonic-gate */
13200Sstevel@tonic-gate contract_t *
contract_type_ptr(ct_type_t * type,ctid_t id,uint64_t zuniqid)13210Sstevel@tonic-gate contract_type_ptr(ct_type_t *type, ctid_t id, uint64_t zuniqid)
13220Sstevel@tonic-gate {
13230Sstevel@tonic-gate contract_t *ct;
13240Sstevel@tonic-gate
13250Sstevel@tonic-gate mutex_enter(&type->ct_type_lock);
13260Sstevel@tonic-gate ct = contract_ptr_common(&type->ct_type_avl, id, zuniqid);
13270Sstevel@tonic-gate mutex_exit(&type->ct_type_lock);
13280Sstevel@tonic-gate
13290Sstevel@tonic-gate return (ct);
13300Sstevel@tonic-gate }
13310Sstevel@tonic-gate
13320Sstevel@tonic-gate /*
13330Sstevel@tonic-gate * contract_ptr
13340Sstevel@tonic-gate *
13350Sstevel@tonic-gate * Returns a pointer to the contract with the specified id. The
13360Sstevel@tonic-gate * contract is held, so the caller needs to release the reference when
13370Sstevel@tonic-gate * it is through with the contract.
13380Sstevel@tonic-gate */
13390Sstevel@tonic-gate contract_t *
contract_ptr(ctid_t id,uint64_t zuniqid)13400Sstevel@tonic-gate contract_ptr(ctid_t id, uint64_t zuniqid)
13410Sstevel@tonic-gate {
13420Sstevel@tonic-gate contract_t *ct;
13430Sstevel@tonic-gate
13440Sstevel@tonic-gate mutex_enter(&contract_lock);
13450Sstevel@tonic-gate ct = contract_ptr_common(&contract_avl, id, zuniqid);
13460Sstevel@tonic-gate mutex_exit(&contract_lock);
13470Sstevel@tonic-gate
13480Sstevel@tonic-gate return (ct);
13490Sstevel@tonic-gate }
13500Sstevel@tonic-gate
13510Sstevel@tonic-gate /*
13520Sstevel@tonic-gate * contract_type_time
13530Sstevel@tonic-gate *
13540Sstevel@tonic-gate * Obtains the last time a contract of a particular type was created.
13550Sstevel@tonic-gate */
13560Sstevel@tonic-gate void
contract_type_time(ct_type_t * type,timestruc_t * time)13570Sstevel@tonic-gate contract_type_time(ct_type_t *type, timestruc_t *time)
13580Sstevel@tonic-gate {
13590Sstevel@tonic-gate mutex_enter(&type->ct_type_lock);
13600Sstevel@tonic-gate *time = type->ct_type_timestruc;
13610Sstevel@tonic-gate mutex_exit(&type->ct_type_lock);
13620Sstevel@tonic-gate }
13630Sstevel@tonic-gate
13640Sstevel@tonic-gate /*
13650Sstevel@tonic-gate * contract_type_bundle
13660Sstevel@tonic-gate *
13670Sstevel@tonic-gate * Obtains a type's bundle queue.
13680Sstevel@tonic-gate */
13690Sstevel@tonic-gate ct_equeue_t *
contract_type_bundle(ct_type_t * type)13700Sstevel@tonic-gate contract_type_bundle(ct_type_t *type)
13710Sstevel@tonic-gate {
13720Sstevel@tonic-gate return (&type->ct_type_events);
13730Sstevel@tonic-gate }
13740Sstevel@tonic-gate
13750Sstevel@tonic-gate /*
13760Sstevel@tonic-gate * contract_type_pbundle
13770Sstevel@tonic-gate *
13780Sstevel@tonic-gate * Obtain's a process's bundle queue. If one doesn't exist, one is
13790Sstevel@tonic-gate * created. Often used simply to ensure that a bundle queue is
13800Sstevel@tonic-gate * allocated.
13810Sstevel@tonic-gate */
13820Sstevel@tonic-gate ct_equeue_t *
contract_type_pbundle(ct_type_t * type,proc_t * pp)13830Sstevel@tonic-gate contract_type_pbundle(ct_type_t *type, proc_t *pp)
13840Sstevel@tonic-gate {
13850Sstevel@tonic-gate /*
13860Sstevel@tonic-gate * If there isn't an array of bundle queues, allocate one.
13870Sstevel@tonic-gate */
13880Sstevel@tonic-gate if (pp->p_ct_equeue == NULL) {
13890Sstevel@tonic-gate size_t size = CTT_MAXTYPE * sizeof (ct_equeue_t *);
13900Sstevel@tonic-gate ct_equeue_t **qa = kmem_zalloc(size, KM_SLEEP);
13910Sstevel@tonic-gate
13920Sstevel@tonic-gate mutex_enter(&pp->p_lock);
13930Sstevel@tonic-gate if (pp->p_ct_equeue)
13940Sstevel@tonic-gate kmem_free(qa, size);
13950Sstevel@tonic-gate else
13960Sstevel@tonic-gate pp->p_ct_equeue = qa;
13970Sstevel@tonic-gate mutex_exit(&pp->p_lock);
13980Sstevel@tonic-gate }
13990Sstevel@tonic-gate
14000Sstevel@tonic-gate /*
14010Sstevel@tonic-gate * If there isn't a bundle queue of the required type, allocate
14020Sstevel@tonic-gate * one.
14030Sstevel@tonic-gate */
14040Sstevel@tonic-gate if (pp->p_ct_equeue[type->ct_type_index] == NULL) {
14050Sstevel@tonic-gate ct_equeue_t *q = kmem_zalloc(sizeof (ct_equeue_t), KM_SLEEP);
14060Sstevel@tonic-gate cte_queue_create(q, CTEL_PBUNDLE, 20, 1);
14070Sstevel@tonic-gate
14080Sstevel@tonic-gate mutex_enter(&pp->p_lock);
14090Sstevel@tonic-gate if (pp->p_ct_equeue[type->ct_type_index])
14100Sstevel@tonic-gate cte_queue_drain(q, 0);
14110Sstevel@tonic-gate else
14120Sstevel@tonic-gate pp->p_ct_equeue[type->ct_type_index] = q;
14130Sstevel@tonic-gate mutex_exit(&pp->p_lock);
14140Sstevel@tonic-gate }
14150Sstevel@tonic-gate
14160Sstevel@tonic-gate return (pp->p_ct_equeue[type->ct_type_index]);
14170Sstevel@tonic-gate }
14180Sstevel@tonic-gate
14190Sstevel@tonic-gate /*
1420*7937SAntonello.Cruz@Sun.COM * ctparam_copyin
1421*7937SAntonello.Cruz@Sun.COM *
1422*7937SAntonello.Cruz@Sun.COM * copyin a ct_param_t for CT_TSET or CT_TGET commands.
1423*7937SAntonello.Cruz@Sun.COM * If ctparam_copyout() is not called after ctparam_copyin(), then
1424*7937SAntonello.Cruz@Sun.COM * the caller must kmem_free() the buffer pointed by kparam->ctpm_kbuf.
1425*7937SAntonello.Cruz@Sun.COM *
1426*7937SAntonello.Cruz@Sun.COM * The copyin/out of ct_param_t is not done in ctmpl_set() and ctmpl_get()
1427*7937SAntonello.Cruz@Sun.COM * because prctioctl() calls ctmpl_set() and ctmpl_get() while holding a
1428*7937SAntonello.Cruz@Sun.COM * process lock.
1429*7937SAntonello.Cruz@Sun.COM */
1430*7937SAntonello.Cruz@Sun.COM int
ctparam_copyin(const void * uaddr,ct_kparam_t * kparam,int flag,int cmd)1431*7937SAntonello.Cruz@Sun.COM ctparam_copyin(const void *uaddr, ct_kparam_t *kparam, int flag, int cmd)
1432*7937SAntonello.Cruz@Sun.COM {
1433*7937SAntonello.Cruz@Sun.COM uint32_t size;
1434*7937SAntonello.Cruz@Sun.COM void *ubuf;
1435*7937SAntonello.Cruz@Sun.COM ct_param_t *param = &kparam->param;
1436*7937SAntonello.Cruz@Sun.COM STRUCT_DECL(ct_param, uarg);
1437*7937SAntonello.Cruz@Sun.COM
1438*7937SAntonello.Cruz@Sun.COM STRUCT_INIT(uarg, flag);
1439*7937SAntonello.Cruz@Sun.COM if (copyin(uaddr, STRUCT_BUF(uarg), STRUCT_SIZE(uarg)))
1440*7937SAntonello.Cruz@Sun.COM return (EFAULT);
1441*7937SAntonello.Cruz@Sun.COM size = STRUCT_FGET(uarg, ctpm_size);
1442*7937SAntonello.Cruz@Sun.COM ubuf = STRUCT_FGETP(uarg, ctpm_value);
1443*7937SAntonello.Cruz@Sun.COM
1444*7937SAntonello.Cruz@Sun.COM if (size > CT_PARAM_MAX_SIZE || size == 0)
1445*7937SAntonello.Cruz@Sun.COM return (EINVAL);
1446*7937SAntonello.Cruz@Sun.COM
1447*7937SAntonello.Cruz@Sun.COM kparam->ctpm_kbuf = kmem_alloc(size, KM_SLEEP);
1448*7937SAntonello.Cruz@Sun.COM if (cmd == CT_TSET) {
1449*7937SAntonello.Cruz@Sun.COM if (copyin(ubuf, kparam->ctpm_kbuf, size)) {
1450*7937SAntonello.Cruz@Sun.COM kmem_free(kparam->ctpm_kbuf, size);
1451*7937SAntonello.Cruz@Sun.COM return (EFAULT);
1452*7937SAntonello.Cruz@Sun.COM }
1453*7937SAntonello.Cruz@Sun.COM }
1454*7937SAntonello.Cruz@Sun.COM param->ctpm_id = STRUCT_FGET(uarg, ctpm_id);
1455*7937SAntonello.Cruz@Sun.COM param->ctpm_size = size;
1456*7937SAntonello.Cruz@Sun.COM param->ctpm_value = ubuf;
1457*7937SAntonello.Cruz@Sun.COM kparam->ret_size = 0;
1458*7937SAntonello.Cruz@Sun.COM
1459*7937SAntonello.Cruz@Sun.COM return (0);
1460*7937SAntonello.Cruz@Sun.COM }
1461*7937SAntonello.Cruz@Sun.COM
1462*7937SAntonello.Cruz@Sun.COM /*
1463*7937SAntonello.Cruz@Sun.COM * ctparam_copyout
1464*7937SAntonello.Cruz@Sun.COM *
1465*7937SAntonello.Cruz@Sun.COM * copyout a ct_kparam_t and frees the buffer pointed by the member
1466*7937SAntonello.Cruz@Sun.COM * ctpm_kbuf of ct_kparam_t
1467*7937SAntonello.Cruz@Sun.COM */
1468*7937SAntonello.Cruz@Sun.COM int
ctparam_copyout(ct_kparam_t * kparam,void * uaddr,int flag)1469*7937SAntonello.Cruz@Sun.COM ctparam_copyout(ct_kparam_t *kparam, void *uaddr, int flag)
1470*7937SAntonello.Cruz@Sun.COM {
1471*7937SAntonello.Cruz@Sun.COM int r = 0;
1472*7937SAntonello.Cruz@Sun.COM ct_param_t *param = &kparam->param;
1473*7937SAntonello.Cruz@Sun.COM STRUCT_DECL(ct_param, uarg);
1474*7937SAntonello.Cruz@Sun.COM
1475*7937SAntonello.Cruz@Sun.COM STRUCT_INIT(uarg, flag);
1476*7937SAntonello.Cruz@Sun.COM
1477*7937SAntonello.Cruz@Sun.COM STRUCT_FSET(uarg, ctpm_id, param->ctpm_id);
1478*7937SAntonello.Cruz@Sun.COM STRUCT_FSET(uarg, ctpm_size, kparam->ret_size);
1479*7937SAntonello.Cruz@Sun.COM STRUCT_FSETP(uarg, ctpm_value, param->ctpm_value);
1480*7937SAntonello.Cruz@Sun.COM if (copyout(STRUCT_BUF(uarg), uaddr, STRUCT_SIZE(uarg))) {
1481*7937SAntonello.Cruz@Sun.COM r = EFAULT;
1482*7937SAntonello.Cruz@Sun.COM goto error;
1483*7937SAntonello.Cruz@Sun.COM }
1484*7937SAntonello.Cruz@Sun.COM if (copyout(kparam->ctpm_kbuf, param->ctpm_value,
1485*7937SAntonello.Cruz@Sun.COM MIN(kparam->ret_size, param->ctpm_size))) {
1486*7937SAntonello.Cruz@Sun.COM r = EFAULT;
1487*7937SAntonello.Cruz@Sun.COM }
1488*7937SAntonello.Cruz@Sun.COM
1489*7937SAntonello.Cruz@Sun.COM error:
1490*7937SAntonello.Cruz@Sun.COM kmem_free(kparam->ctpm_kbuf, param->ctpm_size);
1491*7937SAntonello.Cruz@Sun.COM
1492*7937SAntonello.Cruz@Sun.COM return (r);
1493*7937SAntonello.Cruz@Sun.COM }
1494*7937SAntonello.Cruz@Sun.COM
1495*7937SAntonello.Cruz@Sun.COM /*
14960Sstevel@tonic-gate * ctmpl_free
14970Sstevel@tonic-gate *
14980Sstevel@tonic-gate * Frees a template.
14990Sstevel@tonic-gate */
15000Sstevel@tonic-gate void
ctmpl_free(ct_template_t * template)15010Sstevel@tonic-gate ctmpl_free(ct_template_t *template)
15020Sstevel@tonic-gate {
15030Sstevel@tonic-gate mutex_destroy(&template->ctmpl_lock);
15040Sstevel@tonic-gate template->ctmpl_ops->ctop_free(template);
15050Sstevel@tonic-gate }
15060Sstevel@tonic-gate
15070Sstevel@tonic-gate /*
15080Sstevel@tonic-gate * ctmpl_dup
15090Sstevel@tonic-gate *
15100Sstevel@tonic-gate * Creates a copy of a template.
15110Sstevel@tonic-gate */
15120Sstevel@tonic-gate ct_template_t *
ctmpl_dup(ct_template_t * template)15130Sstevel@tonic-gate ctmpl_dup(ct_template_t *template)
15140Sstevel@tonic-gate {
15150Sstevel@tonic-gate ct_template_t *new;
15160Sstevel@tonic-gate
15170Sstevel@tonic-gate if (template == NULL)
15180Sstevel@tonic-gate return (NULL);
15190Sstevel@tonic-gate
15200Sstevel@tonic-gate new = template->ctmpl_ops->ctop_dup(template);
15210Sstevel@tonic-gate /*
15220Sstevel@tonic-gate * ctmpl_lock was taken by ctop_dup's call to ctmpl_copy and
15230Sstevel@tonic-gate * should have remain held until now.
15240Sstevel@tonic-gate */
15250Sstevel@tonic-gate mutex_exit(&template->ctmpl_lock);
15260Sstevel@tonic-gate
15270Sstevel@tonic-gate return (new);
15280Sstevel@tonic-gate }
15290Sstevel@tonic-gate
15300Sstevel@tonic-gate /*
15310Sstevel@tonic-gate * ctmpl_set
15320Sstevel@tonic-gate *
15330Sstevel@tonic-gate * Sets the requested terms of a template.
15340Sstevel@tonic-gate */
15350Sstevel@tonic-gate int
ctmpl_set(ct_template_t * template,ct_kparam_t * kparam,const cred_t * cr)1536*7937SAntonello.Cruz@Sun.COM ctmpl_set(ct_template_t *template, ct_kparam_t *kparam, const cred_t *cr)
15370Sstevel@tonic-gate {
15380Sstevel@tonic-gate int result = 0;
1539*7937SAntonello.Cruz@Sun.COM ct_param_t *param = &kparam->param;
15406196Sacruz uint64_t param_value;
15416196Sacruz
15426196Sacruz if (param->ctpm_id == CTP_COOKIE ||
15436196Sacruz param->ctpm_id == CTP_EV_INFO ||
15446196Sacruz param->ctpm_id == CTP_EV_CRITICAL) {
15456196Sacruz if (param->ctpm_size < sizeof (uint64_t)) {
15466196Sacruz return (EINVAL);
15476196Sacruz } else {
1548*7937SAntonello.Cruz@Sun.COM param_value = *(uint64_t *)kparam->ctpm_kbuf;
15496196Sacruz }
15506196Sacruz }
15510Sstevel@tonic-gate
15520Sstevel@tonic-gate mutex_enter(&template->ctmpl_lock);
15530Sstevel@tonic-gate switch (param->ctpm_id) {
15540Sstevel@tonic-gate case CTP_COOKIE:
15556073Sacruz template->ctmpl_cookie = param_value;
15560Sstevel@tonic-gate break;
15570Sstevel@tonic-gate case CTP_EV_INFO:
15586073Sacruz if (param_value & ~(uint64_t)template->ctmpl_ops->allevents)
15590Sstevel@tonic-gate result = EINVAL;
15600Sstevel@tonic-gate else
15616073Sacruz template->ctmpl_ev_info = param_value;
15620Sstevel@tonic-gate break;
15630Sstevel@tonic-gate case CTP_EV_CRITICAL:
15646073Sacruz if (param_value & ~(uint64_t)template->ctmpl_ops->allevents) {
15650Sstevel@tonic-gate result = EINVAL;
15660Sstevel@tonic-gate break;
15676073Sacruz } else if ((~template->ctmpl_ev_crit & param_value) == 0) {
15680Sstevel@tonic-gate /*
15690Sstevel@tonic-gate * Assume that a pure reduction of the critical
15700Sstevel@tonic-gate * set is allowed by the contract type.
15710Sstevel@tonic-gate */
15726073Sacruz template->ctmpl_ev_crit = param_value;
15730Sstevel@tonic-gate break;
15740Sstevel@tonic-gate }
15750Sstevel@tonic-gate /*
15760Sstevel@tonic-gate * There may be restrictions on what we can make
15770Sstevel@tonic-gate * critical, so we defer to the judgement of the
15780Sstevel@tonic-gate * contract type.
15790Sstevel@tonic-gate */
15800Sstevel@tonic-gate /* FALLTHROUGH */
15810Sstevel@tonic-gate default:
1582*7937SAntonello.Cruz@Sun.COM result = template->ctmpl_ops->ctop_set(template, kparam, cr);
15830Sstevel@tonic-gate }
15840Sstevel@tonic-gate mutex_exit(&template->ctmpl_lock);
15850Sstevel@tonic-gate
15860Sstevel@tonic-gate return (result);
15870Sstevel@tonic-gate }
15880Sstevel@tonic-gate
15890Sstevel@tonic-gate /*
15900Sstevel@tonic-gate * ctmpl_get
15910Sstevel@tonic-gate *
15920Sstevel@tonic-gate * Obtains the requested terms from a template.
15936196Sacruz *
15946196Sacruz * If the term requested is a variable-sized term and the buffer
15956196Sacruz * provided is too small for the data, we truncate the data and return
1596*7937SAntonello.Cruz@Sun.COM * the buffer size necessary to fit the term in kparam->ret_size. If the
15976196Sacruz * term requested is fix-sized (uint64_t) and the buffer provided is too
15986196Sacruz * small, we return EINVAL. This should never happen if you're using
15996196Sacruz * libcontract(3LIB), only if you call ioctl with a hand constructed
16006196Sacruz * ct_param_t argument.
16016196Sacruz *
16026196Sacruz * Currently, only contract specific parameters have variable-sized
16036196Sacruz * parameters.
16040Sstevel@tonic-gate */
16050Sstevel@tonic-gate int
ctmpl_get(ct_template_t * template,ct_kparam_t * kparam)1606*7937SAntonello.Cruz@Sun.COM ctmpl_get(ct_template_t *template, ct_kparam_t *kparam)
16070Sstevel@tonic-gate {
16080Sstevel@tonic-gate int result = 0;
1609*7937SAntonello.Cruz@Sun.COM ct_param_t *param = &kparam->param;
16106196Sacruz uint64_t *param_value;
16116196Sacruz
16126196Sacruz if (param->ctpm_id == CTP_COOKIE ||
16136196Sacruz param->ctpm_id == CTP_EV_INFO ||
16146196Sacruz param->ctpm_id == CTP_EV_CRITICAL) {
16156196Sacruz if (param->ctpm_size < sizeof (uint64_t)) {
16166196Sacruz return (EINVAL);
16176196Sacruz } else {
1618*7937SAntonello.Cruz@Sun.COM param_value = kparam->ctpm_kbuf;
1619*7937SAntonello.Cruz@Sun.COM kparam->ret_size = sizeof (uint64_t);
16206196Sacruz }
16216196Sacruz }
16220Sstevel@tonic-gate
16230Sstevel@tonic-gate mutex_enter(&template->ctmpl_lock);
16240Sstevel@tonic-gate switch (param->ctpm_id) {
16250Sstevel@tonic-gate case CTP_COOKIE:
16266073Sacruz *param_value = template->ctmpl_cookie;
16270Sstevel@tonic-gate break;
16280Sstevel@tonic-gate case CTP_EV_INFO:
16296073Sacruz *param_value = template->ctmpl_ev_info;
16300Sstevel@tonic-gate break;
16310Sstevel@tonic-gate case CTP_EV_CRITICAL:
16326073Sacruz *param_value = template->ctmpl_ev_crit;
16330Sstevel@tonic-gate break;
16340Sstevel@tonic-gate default:
1635*7937SAntonello.Cruz@Sun.COM result = template->ctmpl_ops->ctop_get(template, kparam);
16360Sstevel@tonic-gate }
16370Sstevel@tonic-gate mutex_exit(&template->ctmpl_lock);
16380Sstevel@tonic-gate
16390Sstevel@tonic-gate return (result);
16400Sstevel@tonic-gate }
16410Sstevel@tonic-gate
16420Sstevel@tonic-gate /*
16430Sstevel@tonic-gate * ctmpl_makecurrent
16440Sstevel@tonic-gate *
16450Sstevel@tonic-gate * Used by ctmpl_activate and ctmpl_clear to set the current thread's
16460Sstevel@tonic-gate * active template. Frees the old active template, if there was one.
16470Sstevel@tonic-gate */
16480Sstevel@tonic-gate static void
ctmpl_makecurrent(ct_template_t * template,ct_template_t * new)16490Sstevel@tonic-gate ctmpl_makecurrent(ct_template_t *template, ct_template_t *new)
16500Sstevel@tonic-gate {
16510Sstevel@tonic-gate klwp_t *curlwp = ttolwp(curthread);
16520Sstevel@tonic-gate proc_t *p = curproc;
16530Sstevel@tonic-gate ct_template_t *old;
16540Sstevel@tonic-gate
16550Sstevel@tonic-gate mutex_enter(&p->p_lock);
16560Sstevel@tonic-gate old = curlwp->lwp_ct_active[template->ctmpl_type->ct_type_index];
16570Sstevel@tonic-gate curlwp->lwp_ct_active[template->ctmpl_type->ct_type_index] = new;
16580Sstevel@tonic-gate mutex_exit(&p->p_lock);
16590Sstevel@tonic-gate
16600Sstevel@tonic-gate if (old)
16610Sstevel@tonic-gate ctmpl_free(old);
16620Sstevel@tonic-gate }
16630Sstevel@tonic-gate
16640Sstevel@tonic-gate /*
16650Sstevel@tonic-gate * ctmpl_activate
16660Sstevel@tonic-gate *
16670Sstevel@tonic-gate * Copy the specified template as the current thread's activate
16680Sstevel@tonic-gate * template of that type.
16690Sstevel@tonic-gate */
16700Sstevel@tonic-gate void
ctmpl_activate(ct_template_t * template)16710Sstevel@tonic-gate ctmpl_activate(ct_template_t *template)
16720Sstevel@tonic-gate {
16730Sstevel@tonic-gate ctmpl_makecurrent(template, ctmpl_dup(template));
16740Sstevel@tonic-gate }
16750Sstevel@tonic-gate
16760Sstevel@tonic-gate /*
16770Sstevel@tonic-gate * ctmpl_clear
16780Sstevel@tonic-gate *
16790Sstevel@tonic-gate * Clears the current thread's activate template of the same type as
16800Sstevel@tonic-gate * the specified template.
16810Sstevel@tonic-gate */
16820Sstevel@tonic-gate void
ctmpl_clear(ct_template_t * template)16830Sstevel@tonic-gate ctmpl_clear(ct_template_t *template)
16840Sstevel@tonic-gate {
16850Sstevel@tonic-gate ctmpl_makecurrent(template, NULL);
16860Sstevel@tonic-gate }
16870Sstevel@tonic-gate
16880Sstevel@tonic-gate /*
16890Sstevel@tonic-gate * ctmpl_create
16900Sstevel@tonic-gate *
16910Sstevel@tonic-gate * Creates a new contract using the specified template.
16920Sstevel@tonic-gate */
16930Sstevel@tonic-gate int
ctmpl_create(ct_template_t * template,ctid_t * ctidp)16944845Svikram ctmpl_create(ct_template_t *template, ctid_t *ctidp)
16950Sstevel@tonic-gate {
16964845Svikram return (template->ctmpl_ops->ctop_create(template, ctidp));
16970Sstevel@tonic-gate }
16980Sstevel@tonic-gate
16990Sstevel@tonic-gate /*
17000Sstevel@tonic-gate * ctmpl_init
17010Sstevel@tonic-gate *
17020Sstevel@tonic-gate * Initializes the common portion of a new contract template.
17030Sstevel@tonic-gate */
17040Sstevel@tonic-gate void
ctmpl_init(ct_template_t * new,ctmplops_t * ops,ct_type_t * type,void * data)17050Sstevel@tonic-gate ctmpl_init(ct_template_t *new, ctmplops_t *ops, ct_type_t *type, void *data)
17060Sstevel@tonic-gate {
17070Sstevel@tonic-gate mutex_init(&new->ctmpl_lock, NULL, MUTEX_DEFAULT, NULL);
17080Sstevel@tonic-gate new->ctmpl_ops = ops;
17090Sstevel@tonic-gate new->ctmpl_type = type;
17100Sstevel@tonic-gate new->ctmpl_data = data;
17110Sstevel@tonic-gate new->ctmpl_ev_info = new->ctmpl_ev_crit = 0;
17120Sstevel@tonic-gate new->ctmpl_cookie = 0;
17130Sstevel@tonic-gate }
17140Sstevel@tonic-gate
17150Sstevel@tonic-gate /*
17160Sstevel@tonic-gate * ctmpl_copy
17170Sstevel@tonic-gate *
17180Sstevel@tonic-gate * Copies the common portions of a contract template. Intended for use
17190Sstevel@tonic-gate * by a contract type's ctop_dup template op. Returns with the old
17200Sstevel@tonic-gate * template's lock held, which will should remain held until the
17210Sstevel@tonic-gate * template op returns (it is dropped by ctmpl_dup).
17220Sstevel@tonic-gate */
17230Sstevel@tonic-gate void
ctmpl_copy(ct_template_t * new,ct_template_t * old)17240Sstevel@tonic-gate ctmpl_copy(ct_template_t *new, ct_template_t *old)
17250Sstevel@tonic-gate {
17260Sstevel@tonic-gate mutex_init(&new->ctmpl_lock, NULL, MUTEX_DEFAULT, NULL);
17270Sstevel@tonic-gate mutex_enter(&old->ctmpl_lock);
17280Sstevel@tonic-gate new->ctmpl_ops = old->ctmpl_ops;
17290Sstevel@tonic-gate new->ctmpl_type = old->ctmpl_type;
17300Sstevel@tonic-gate new->ctmpl_ev_crit = old->ctmpl_ev_crit;
17310Sstevel@tonic-gate new->ctmpl_ev_info = old->ctmpl_ev_info;
17320Sstevel@tonic-gate new->ctmpl_cookie = old->ctmpl_cookie;
17330Sstevel@tonic-gate }
17340Sstevel@tonic-gate
17350Sstevel@tonic-gate /*
17360Sstevel@tonic-gate * ctmpl_create_inval
17370Sstevel@tonic-gate *
17380Sstevel@tonic-gate * Returns EINVAL. Provided for the convenience of those contract
17390Sstevel@tonic-gate * types which don't support ct_tmpl_create(3contract) and would
17400Sstevel@tonic-gate * otherwise need to create their own stub for the ctop_create template
17410Sstevel@tonic-gate * op.
17420Sstevel@tonic-gate */
17430Sstevel@tonic-gate /*ARGSUSED*/
17440Sstevel@tonic-gate int
ctmpl_create_inval(ct_template_t * template,ctid_t * ctidp)17454845Svikram ctmpl_create_inval(ct_template_t *template, ctid_t *ctidp)
17460Sstevel@tonic-gate {
17470Sstevel@tonic-gate return (EINVAL);
17480Sstevel@tonic-gate }
17490Sstevel@tonic-gate
17500Sstevel@tonic-gate
17510Sstevel@tonic-gate /*
17520Sstevel@tonic-gate * cte_queue_create
17530Sstevel@tonic-gate *
17540Sstevel@tonic-gate * Initializes a queue of a particular type. If dynamic is set, the
17550Sstevel@tonic-gate * queue is to be freed when its last listener is removed after being
17560Sstevel@tonic-gate * drained.
17570Sstevel@tonic-gate */
17580Sstevel@tonic-gate static void
cte_queue_create(ct_equeue_t * q,ct_listnum_t list,int maxinf,int dynamic)17590Sstevel@tonic-gate cte_queue_create(ct_equeue_t *q, ct_listnum_t list, int maxinf, int dynamic)
17600Sstevel@tonic-gate {
17610Sstevel@tonic-gate mutex_init(&q->ctq_lock, NULL, MUTEX_DEFAULT, NULL);
17620Sstevel@tonic-gate q->ctq_listno = list;
17630Sstevel@tonic-gate list_create(&q->ctq_events, sizeof (ct_kevent_t),
17640Sstevel@tonic-gate offsetof(ct_kevent_t, cte_nodes[list].ctm_node));
17650Sstevel@tonic-gate list_create(&q->ctq_listeners, sizeof (ct_listener_t),
17660Sstevel@tonic-gate offsetof(ct_listener_t, ctl_allnode));
17670Sstevel@tonic-gate list_create(&q->ctq_tail, sizeof (ct_listener_t),
17680Sstevel@tonic-gate offsetof(ct_listener_t, ctl_tailnode));
17690Sstevel@tonic-gate gethrestime(&q->ctq_atime);
17700Sstevel@tonic-gate q->ctq_nlisteners = 0;
17710Sstevel@tonic-gate q->ctq_nreliable = 0;
17720Sstevel@tonic-gate q->ctq_ninf = 0;
17730Sstevel@tonic-gate q->ctq_max = maxinf;
17740Sstevel@tonic-gate
17750Sstevel@tonic-gate /*
17760Sstevel@tonic-gate * Bundle queues and contract queues are embedded in other
17770Sstevel@tonic-gate * structures and are implicitly referenced counted by virtue
17780Sstevel@tonic-gate * of their vnodes' indirect hold on their contracts. Process
17790Sstevel@tonic-gate * bundle queues are dynamically allocated and may persist
17800Sstevel@tonic-gate * after the death of the process, so they must be explicitly
17810Sstevel@tonic-gate * reference counted.
17820Sstevel@tonic-gate */
17830Sstevel@tonic-gate q->ctq_flags = dynamic ? CTQ_REFFED : 0;
17840Sstevel@tonic-gate }
17850Sstevel@tonic-gate
17860Sstevel@tonic-gate /*
17870Sstevel@tonic-gate * cte_queue_destroy
17880Sstevel@tonic-gate *
17890Sstevel@tonic-gate * Destroys the specified queue. The queue is freed if referenced
17900Sstevel@tonic-gate * counted.
17910Sstevel@tonic-gate */
17920Sstevel@tonic-gate static void
cte_queue_destroy(ct_equeue_t * q)17930Sstevel@tonic-gate cte_queue_destroy(ct_equeue_t *q)
17940Sstevel@tonic-gate {
17950Sstevel@tonic-gate ASSERT(q->ctq_flags & CTQ_DEAD);
17960Sstevel@tonic-gate ASSERT(q->ctq_nlisteners == 0);
17970Sstevel@tonic-gate ASSERT(q->ctq_nreliable == 0);
17980Sstevel@tonic-gate list_destroy(&q->ctq_events);
17990Sstevel@tonic-gate list_destroy(&q->ctq_listeners);
18000Sstevel@tonic-gate list_destroy(&q->ctq_tail);
18010Sstevel@tonic-gate mutex_destroy(&q->ctq_lock);
18020Sstevel@tonic-gate if (q->ctq_flags & CTQ_REFFED)
18030Sstevel@tonic-gate kmem_free(q, sizeof (ct_equeue_t));
18040Sstevel@tonic-gate }
18050Sstevel@tonic-gate
18060Sstevel@tonic-gate /*
18070Sstevel@tonic-gate * cte_hold
18080Sstevel@tonic-gate *
18090Sstevel@tonic-gate * Takes a hold on the specified event.
18100Sstevel@tonic-gate */
18110Sstevel@tonic-gate static void
cte_hold(ct_kevent_t * e)18120Sstevel@tonic-gate cte_hold(ct_kevent_t *e)
18130Sstevel@tonic-gate {
18140Sstevel@tonic-gate mutex_enter(&e->cte_lock);
18150Sstevel@tonic-gate ASSERT(e->cte_refs > 0);
18160Sstevel@tonic-gate e->cte_refs++;
18170Sstevel@tonic-gate mutex_exit(&e->cte_lock);
18180Sstevel@tonic-gate }
18190Sstevel@tonic-gate
18200Sstevel@tonic-gate /*
18210Sstevel@tonic-gate * cte_rele
18220Sstevel@tonic-gate *
18230Sstevel@tonic-gate * Releases a hold on the specified event. If the caller had the last
18240Sstevel@tonic-gate * reference, frees the event and releases its hold on the contract
18250Sstevel@tonic-gate * that generated it.
18260Sstevel@tonic-gate */
18270Sstevel@tonic-gate static void
cte_rele(ct_kevent_t * e)18280Sstevel@tonic-gate cte_rele(ct_kevent_t *e)
18290Sstevel@tonic-gate {
18300Sstevel@tonic-gate mutex_enter(&e->cte_lock);
18310Sstevel@tonic-gate ASSERT(e->cte_refs > 0);
18320Sstevel@tonic-gate if (--e->cte_refs) {
18330Sstevel@tonic-gate mutex_exit(&e->cte_lock);
18340Sstevel@tonic-gate return;
18350Sstevel@tonic-gate }
18360Sstevel@tonic-gate
18370Sstevel@tonic-gate contract_rele(e->cte_contract);
18380Sstevel@tonic-gate
18390Sstevel@tonic-gate mutex_destroy(&e->cte_lock);
18400Sstevel@tonic-gate if (e->cte_data)
18410Sstevel@tonic-gate nvlist_free(e->cte_data);
18420Sstevel@tonic-gate if (e->cte_gdata)
18430Sstevel@tonic-gate nvlist_free(e->cte_gdata);
18440Sstevel@tonic-gate kmem_free(e, sizeof (ct_kevent_t));
18450Sstevel@tonic-gate }
18460Sstevel@tonic-gate
18470Sstevel@tonic-gate /*
18480Sstevel@tonic-gate * cte_qrele
18490Sstevel@tonic-gate *
18500Sstevel@tonic-gate * Remove this listener's hold on the specified event, removing and
18510Sstevel@tonic-gate * releasing the queue's hold on the event if appropriate.
18520Sstevel@tonic-gate */
18530Sstevel@tonic-gate static void
cte_qrele(ct_equeue_t * q,ct_listener_t * l,ct_kevent_t * e)18540Sstevel@tonic-gate cte_qrele(ct_equeue_t *q, ct_listener_t *l, ct_kevent_t *e)
18550Sstevel@tonic-gate {
18560Sstevel@tonic-gate ct_member_t *member = &e->cte_nodes[q->ctq_listno];
18570Sstevel@tonic-gate
18580Sstevel@tonic-gate ASSERT(MUTEX_HELD(&q->ctq_lock));
18590Sstevel@tonic-gate
18600Sstevel@tonic-gate if (l->ctl_flags & CTLF_RELIABLE)
18610Sstevel@tonic-gate member->ctm_nreliable--;
18620Sstevel@tonic-gate if ((--member->ctm_refs == 0) && member->ctm_trimmed) {
18630Sstevel@tonic-gate member->ctm_trimmed = 0;
18640Sstevel@tonic-gate list_remove(&q->ctq_events, e);
18650Sstevel@tonic-gate cte_rele(e);
18660Sstevel@tonic-gate }
18670Sstevel@tonic-gate }
18680Sstevel@tonic-gate
18690Sstevel@tonic-gate /*
18700Sstevel@tonic-gate * cte_qmove
18710Sstevel@tonic-gate *
18720Sstevel@tonic-gate * Move this listener to the specified event in the queue.
18730Sstevel@tonic-gate */
18740Sstevel@tonic-gate static ct_kevent_t *
cte_qmove(ct_equeue_t * q,ct_listener_t * l,ct_kevent_t * e)18750Sstevel@tonic-gate cte_qmove(ct_equeue_t *q, ct_listener_t *l, ct_kevent_t *e)
18760Sstevel@tonic-gate {
18770Sstevel@tonic-gate ct_kevent_t *olde;
18780Sstevel@tonic-gate
18790Sstevel@tonic-gate ASSERT(MUTEX_HELD(&q->ctq_lock));
18800Sstevel@tonic-gate ASSERT(l->ctl_equeue == q);
18810Sstevel@tonic-gate
18820Sstevel@tonic-gate if ((olde = l->ctl_position) == NULL)
18830Sstevel@tonic-gate list_remove(&q->ctq_tail, l);
18840Sstevel@tonic-gate
18850Sstevel@tonic-gate while (e != NULL && e->cte_nodes[q->ctq_listno].ctm_trimmed)
18860Sstevel@tonic-gate e = list_next(&q->ctq_events, e);
18870Sstevel@tonic-gate
18880Sstevel@tonic-gate if (e != NULL) {
18890Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_refs++;
18900Sstevel@tonic-gate if (l->ctl_flags & CTLF_RELIABLE)
18910Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_nreliable++;
18920Sstevel@tonic-gate } else {
18930Sstevel@tonic-gate list_insert_tail(&q->ctq_tail, l);
18940Sstevel@tonic-gate }
18950Sstevel@tonic-gate
18960Sstevel@tonic-gate l->ctl_position = e;
18970Sstevel@tonic-gate if (olde)
18980Sstevel@tonic-gate cte_qrele(q, l, olde);
18990Sstevel@tonic-gate
19000Sstevel@tonic-gate return (e);
19010Sstevel@tonic-gate }
19020Sstevel@tonic-gate
19030Sstevel@tonic-gate /*
19040Sstevel@tonic-gate * cte_checkcred
19050Sstevel@tonic-gate *
19060Sstevel@tonic-gate * Determines if the specified event's contract is owned by a process
19070Sstevel@tonic-gate * with the same effective uid as the specified credential. Called
19080Sstevel@tonic-gate * after a failed call to contract_owned with locked set. Because it
19090Sstevel@tonic-gate * drops the queue lock, its caller (cte_qreadable) needs to make sure
19100Sstevel@tonic-gate * we're still in the same place after we return. Returns 1 on
19110Sstevel@tonic-gate * success.
19120Sstevel@tonic-gate */
19130Sstevel@tonic-gate static int
cte_checkcred(ct_equeue_t * q,ct_kevent_t * e,const cred_t * cr)19140Sstevel@tonic-gate cte_checkcred(ct_equeue_t *q, ct_kevent_t *e, const cred_t *cr)
19150Sstevel@tonic-gate {
19160Sstevel@tonic-gate int result;
19170Sstevel@tonic-gate contract_t *ct = e->cte_contract;
19180Sstevel@tonic-gate
19190Sstevel@tonic-gate cte_hold(e);
19200Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
19210Sstevel@tonic-gate result = curproc->p_zone->zone_uniqid == ct->ct_czuniqid &&
19220Sstevel@tonic-gate contract_checkcred(ct, cr);
19230Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
19240Sstevel@tonic-gate cte_rele(e);
19250Sstevel@tonic-gate
19260Sstevel@tonic-gate return (result);
19270Sstevel@tonic-gate }
19280Sstevel@tonic-gate
19290Sstevel@tonic-gate /*
19300Sstevel@tonic-gate * cte_qreadable
19310Sstevel@tonic-gate *
19320Sstevel@tonic-gate * Ensures that the listener is pointing to a valid event that the
19330Sstevel@tonic-gate * caller has the credentials to read. Returns 0 if we can read the
19340Sstevel@tonic-gate * event we're pointing to.
19350Sstevel@tonic-gate */
19360Sstevel@tonic-gate static int
cte_qreadable(ct_equeue_t * q,ct_listener_t * l,const cred_t * cr,uint64_t zuniqid,int crit)19370Sstevel@tonic-gate cte_qreadable(ct_equeue_t *q, ct_listener_t *l, const cred_t *cr,
19380Sstevel@tonic-gate uint64_t zuniqid, int crit)
19390Sstevel@tonic-gate {
19400Sstevel@tonic-gate ct_kevent_t *e, *next;
19410Sstevel@tonic-gate contract_t *ct;
19420Sstevel@tonic-gate
19430Sstevel@tonic-gate ASSERT(MUTEX_HELD(&q->ctq_lock));
19440Sstevel@tonic-gate ASSERT(l->ctl_equeue == q);
19450Sstevel@tonic-gate
19460Sstevel@tonic-gate if (l->ctl_flags & CTLF_COPYOUT)
19470Sstevel@tonic-gate return (1);
19480Sstevel@tonic-gate
19490Sstevel@tonic-gate next = l->ctl_position;
19500Sstevel@tonic-gate while (e = cte_qmove(q, l, next)) {
19510Sstevel@tonic-gate ct = e->cte_contract;
19520Sstevel@tonic-gate /*
19530Sstevel@tonic-gate * Check obvious things first. If we are looking for a
19540Sstevel@tonic-gate * critical message, is this one? If we aren't in the
19550Sstevel@tonic-gate * global zone, is this message meant for us?
19560Sstevel@tonic-gate */
19570Sstevel@tonic-gate if ((crit && (e->cte_flags & (CTE_INFO | CTE_ACK))) ||
19580Sstevel@tonic-gate (cr != NULL && zuniqid != GLOBAL_ZONEUNIQID &&
19590Sstevel@tonic-gate zuniqid != contract_getzuniqid(ct))) {
19600Sstevel@tonic-gate
19610Sstevel@tonic-gate next = list_next(&q->ctq_events, e);
19620Sstevel@tonic-gate
19630Sstevel@tonic-gate /*
19640Sstevel@tonic-gate * Next, see if our effective uid equals that of owner
19650Sstevel@tonic-gate * or author of the contract. Since we are holding the
19660Sstevel@tonic-gate * queue lock, contract_owned can't always check if we
19670Sstevel@tonic-gate * have the same effective uid as the contract's
19680Sstevel@tonic-gate * owner. If it comes to that, it fails and we take
19690Sstevel@tonic-gate * the slow(er) path.
19700Sstevel@tonic-gate */
19710Sstevel@tonic-gate } else if (cr != NULL && !contract_owned(ct, cr, B_TRUE)) {
19720Sstevel@tonic-gate
19730Sstevel@tonic-gate /*
19740Sstevel@tonic-gate * At this point we either don't have any claim
19750Sstevel@tonic-gate * to this contract or we match the effective
19760Sstevel@tonic-gate * uid of the owner but couldn't tell. We
19770Sstevel@tonic-gate * first test for a NULL holder so that events
19780Sstevel@tonic-gate * from orphans and inherited contracts avoid
19790Sstevel@tonic-gate * the penalty phase.
19800Sstevel@tonic-gate */
19810Sstevel@tonic-gate if (e->cte_contract->ct_owner == NULL &&
19820Sstevel@tonic-gate !secpolicy_contract_observer_choice(cr))
19830Sstevel@tonic-gate next = list_next(&q->ctq_events, e);
19840Sstevel@tonic-gate
19850Sstevel@tonic-gate /*
19860Sstevel@tonic-gate * cte_checkcred will juggle locks to see if we
19870Sstevel@tonic-gate * have the same uid as the event's contract's
19880Sstevel@tonic-gate * current owner. If it succeeds, we have to
19890Sstevel@tonic-gate * make sure we are in the same point in the
19900Sstevel@tonic-gate * queue.
19910Sstevel@tonic-gate */
19920Sstevel@tonic-gate else if (cte_checkcred(q, e, cr) &&
19930Sstevel@tonic-gate l->ctl_position == e)
19940Sstevel@tonic-gate break;
19950Sstevel@tonic-gate
19960Sstevel@tonic-gate /*
19970Sstevel@tonic-gate * cte_checkcred failed; see if we're in the
19980Sstevel@tonic-gate * same place.
19990Sstevel@tonic-gate */
20000Sstevel@tonic-gate else if (l->ctl_position == e)
20010Sstevel@tonic-gate if (secpolicy_contract_observer_choice(cr))
20020Sstevel@tonic-gate break;
20030Sstevel@tonic-gate else
20040Sstevel@tonic-gate next = list_next(&q->ctq_events, e);
20050Sstevel@tonic-gate
20060Sstevel@tonic-gate /*
20070Sstevel@tonic-gate * cte_checkcred failed, and our position was
20080Sstevel@tonic-gate * changed. Start from there.
20090Sstevel@tonic-gate */
20100Sstevel@tonic-gate else
20110Sstevel@tonic-gate next = l->ctl_position;
20120Sstevel@tonic-gate } else {
20130Sstevel@tonic-gate break;
20140Sstevel@tonic-gate }
20150Sstevel@tonic-gate }
20160Sstevel@tonic-gate
20170Sstevel@tonic-gate /*
20180Sstevel@tonic-gate * We check for CTLF_COPYOUT again in case we dropped the queue
20190Sstevel@tonic-gate * lock in cte_checkcred.
20200Sstevel@tonic-gate */
20210Sstevel@tonic-gate return ((l->ctl_flags & CTLF_COPYOUT) || (l->ctl_position == NULL));
20220Sstevel@tonic-gate }
20230Sstevel@tonic-gate
20240Sstevel@tonic-gate /*
20250Sstevel@tonic-gate * cte_qwakeup
20260Sstevel@tonic-gate *
20270Sstevel@tonic-gate * Wakes up any waiting listeners and points them at the specified event.
20280Sstevel@tonic-gate */
20290Sstevel@tonic-gate static void
cte_qwakeup(ct_equeue_t * q,ct_kevent_t * e)20300Sstevel@tonic-gate cte_qwakeup(ct_equeue_t *q, ct_kevent_t *e)
20310Sstevel@tonic-gate {
20320Sstevel@tonic-gate ct_listener_t *l;
20330Sstevel@tonic-gate
20340Sstevel@tonic-gate ASSERT(MUTEX_HELD(&q->ctq_lock));
20350Sstevel@tonic-gate
20360Sstevel@tonic-gate while (l = list_head(&q->ctq_tail)) {
20370Sstevel@tonic-gate list_remove(&q->ctq_tail, l);
20380Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_refs++;
20390Sstevel@tonic-gate if (l->ctl_flags & CTLF_RELIABLE)
20400Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_nreliable++;
20410Sstevel@tonic-gate l->ctl_position = e;
20420Sstevel@tonic-gate cv_signal(&l->ctl_cv);
20430Sstevel@tonic-gate pollwakeup(&l->ctl_pollhead, POLLIN);
20440Sstevel@tonic-gate }
20450Sstevel@tonic-gate }
20460Sstevel@tonic-gate
20470Sstevel@tonic-gate /*
20480Sstevel@tonic-gate * cte_copy
20490Sstevel@tonic-gate *
20500Sstevel@tonic-gate * Copies events from the specified contract event queue to the
20510Sstevel@tonic-gate * end of the specified process bundle queue. Only called from
20520Sstevel@tonic-gate * contract_adopt.
20530Sstevel@tonic-gate *
20540Sstevel@tonic-gate * We copy to the end of the target queue instead of mixing the events
20550Sstevel@tonic-gate * in their proper order because otherwise the act of adopting a
20560Sstevel@tonic-gate * contract would require a process to reset all process bundle
20570Sstevel@tonic-gate * listeners it needed to see the new events. This would, in turn,
20580Sstevel@tonic-gate * require the process to keep track of which preexisting events had
20590Sstevel@tonic-gate * already been processed.
20600Sstevel@tonic-gate */
20610Sstevel@tonic-gate static void
cte_copy(ct_equeue_t * q,ct_equeue_t * newq)20620Sstevel@tonic-gate cte_copy(ct_equeue_t *q, ct_equeue_t *newq)
20630Sstevel@tonic-gate {
20640Sstevel@tonic-gate ct_kevent_t *e, *first = NULL;
20650Sstevel@tonic-gate
20660Sstevel@tonic-gate ASSERT(q->ctq_listno == CTEL_CONTRACT);
20670Sstevel@tonic-gate ASSERT(newq->ctq_listno == CTEL_PBUNDLE);
20680Sstevel@tonic-gate
20690Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
20700Sstevel@tonic-gate mutex_enter(&newq->ctq_lock);
20710Sstevel@tonic-gate
20720Sstevel@tonic-gate /*
20730Sstevel@tonic-gate * For now, only copy critical events.
20740Sstevel@tonic-gate */
20750Sstevel@tonic-gate for (e = list_head(&q->ctq_events); e != NULL;
20760Sstevel@tonic-gate e = list_next(&q->ctq_events, e)) {
20770Sstevel@tonic-gate if ((e->cte_flags & (CTE_INFO | CTE_ACK)) == 0) {
20780Sstevel@tonic-gate if (first == NULL)
20790Sstevel@tonic-gate first = e;
20800Sstevel@tonic-gate list_insert_tail(&newq->ctq_events, e);
20810Sstevel@tonic-gate cte_hold(e);
20820Sstevel@tonic-gate }
20830Sstevel@tonic-gate }
20840Sstevel@tonic-gate
20850Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
20860Sstevel@tonic-gate
20870Sstevel@tonic-gate if (first)
20880Sstevel@tonic-gate cte_qwakeup(newq, first);
20890Sstevel@tonic-gate
20900Sstevel@tonic-gate mutex_exit(&newq->ctq_lock);
20910Sstevel@tonic-gate }
20920Sstevel@tonic-gate
20930Sstevel@tonic-gate /*
20940Sstevel@tonic-gate * cte_trim
20950Sstevel@tonic-gate *
20960Sstevel@tonic-gate * Trims unneeded events from an event queue. Algorithm works as
20970Sstevel@tonic-gate * follows:
20980Sstevel@tonic-gate *
20990Sstevel@tonic-gate * Removes all informative and acknowledged critical events until the
21000Sstevel@tonic-gate * first referenced event is found.
21010Sstevel@tonic-gate *
21020Sstevel@tonic-gate * If a contract is specified, removes all events (regardless of
21030Sstevel@tonic-gate * acknowledgement) generated by that contract until the first event
21040Sstevel@tonic-gate * referenced by a reliable listener is found. Reference events are
21050Sstevel@tonic-gate * removed by marking them "trimmed". Such events will be removed
21060Sstevel@tonic-gate * when the last reference is dropped and will be skipped by future
21070Sstevel@tonic-gate * listeners.
21080Sstevel@tonic-gate *
21090Sstevel@tonic-gate * This is pretty basic. Ideally this should remove from the middle of
21100Sstevel@tonic-gate * the list (i.e. beyond the first referenced event), and even
21110Sstevel@tonic-gate * referenced events.
21120Sstevel@tonic-gate */
21130Sstevel@tonic-gate static void
cte_trim(ct_equeue_t * q,contract_t * ct)21140Sstevel@tonic-gate cte_trim(ct_equeue_t *q, contract_t *ct)
21150Sstevel@tonic-gate {
21160Sstevel@tonic-gate ct_kevent_t *e, *next;
21170Sstevel@tonic-gate int flags, stopper;
21180Sstevel@tonic-gate int start = 1;
21190Sstevel@tonic-gate
21200Sstevel@tonic-gate ASSERT(MUTEX_HELD(&q->ctq_lock));
21210Sstevel@tonic-gate
21220Sstevel@tonic-gate for (e = list_head(&q->ctq_events); e != NULL; e = next) {
21230Sstevel@tonic-gate next = list_next(&q->ctq_events, e);
21240Sstevel@tonic-gate flags = e->cte_flags;
21250Sstevel@tonic-gate stopper = (q->ctq_listno != CTEL_PBUNDLE) &&
21260Sstevel@tonic-gate (e->cte_nodes[q->ctq_listno].ctm_nreliable > 0);
21270Sstevel@tonic-gate if (e->cte_nodes[q->ctq_listno].ctm_refs == 0) {
21280Sstevel@tonic-gate if ((start && (flags & (CTE_INFO | CTE_ACK))) ||
21290Sstevel@tonic-gate (e->cte_contract == ct)) {
21300Sstevel@tonic-gate /*
21310Sstevel@tonic-gate * Toss informative and ACKed critical messages.
21320Sstevel@tonic-gate */
21330Sstevel@tonic-gate list_remove(&q->ctq_events, e);
21340Sstevel@tonic-gate cte_rele(e);
21350Sstevel@tonic-gate }
21360Sstevel@tonic-gate } else if ((e->cte_contract == ct) && !stopper) {
21370Sstevel@tonic-gate ASSERT(q->ctq_nlisteners != 0);
21380Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_trimmed = 1;
21390Sstevel@tonic-gate } else if (ct && !stopper) {
21400Sstevel@tonic-gate start = 0;
21410Sstevel@tonic-gate } else {
21420Sstevel@tonic-gate /*
21430Sstevel@tonic-gate * Don't free messages past the first reader.
21440Sstevel@tonic-gate */
21450Sstevel@tonic-gate break;
21460Sstevel@tonic-gate }
21470Sstevel@tonic-gate }
21480Sstevel@tonic-gate }
21490Sstevel@tonic-gate
21500Sstevel@tonic-gate /*
21510Sstevel@tonic-gate * cte_queue_drain
21520Sstevel@tonic-gate *
21530Sstevel@tonic-gate * Drain all events from the specified queue, and mark it dead. If
21540Sstevel@tonic-gate * "ack" is set, acknowledge any critical events we find along the
21550Sstevel@tonic-gate * way.
21560Sstevel@tonic-gate */
21570Sstevel@tonic-gate static void
cte_queue_drain(ct_equeue_t * q,int ack)21580Sstevel@tonic-gate cte_queue_drain(ct_equeue_t *q, int ack)
21590Sstevel@tonic-gate {
21600Sstevel@tonic-gate ct_kevent_t *e, *next;
21610Sstevel@tonic-gate ct_listener_t *l;
21620Sstevel@tonic-gate
21630Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
21640Sstevel@tonic-gate
21650Sstevel@tonic-gate for (e = list_head(&q->ctq_events); e != NULL; e = next) {
21660Sstevel@tonic-gate next = list_next(&q->ctq_events, e);
21670Sstevel@tonic-gate if (ack && ((e->cte_flags & (CTE_INFO | CTE_ACK)) == 0)) {
21680Sstevel@tonic-gate /*
21690Sstevel@tonic-gate * Make sure critical messages are eventually
21700Sstevel@tonic-gate * removed from the bundle queues.
21710Sstevel@tonic-gate */
21720Sstevel@tonic-gate mutex_enter(&e->cte_lock);
21730Sstevel@tonic-gate e->cte_flags |= CTE_ACK;
21740Sstevel@tonic-gate mutex_exit(&e->cte_lock);
21750Sstevel@tonic-gate ASSERT(MUTEX_HELD(&e->cte_contract->ct_lock));
21760Sstevel@tonic-gate e->cte_contract->ct_evcnt--;
21770Sstevel@tonic-gate }
21780Sstevel@tonic-gate list_remove(&q->ctq_events, e);
21790Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_refs = 0;
21800Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_nreliable = 0;
21810Sstevel@tonic-gate e->cte_nodes[q->ctq_listno].ctm_trimmed = 0;
21820Sstevel@tonic-gate cte_rele(e);
21830Sstevel@tonic-gate }
21840Sstevel@tonic-gate
21850Sstevel@tonic-gate /*
21860Sstevel@tonic-gate * This is necessary only because of CTEL_PBUNDLE listeners;
21870Sstevel@tonic-gate * the events they point to can move from one pbundle to
21880Sstevel@tonic-gate * another. Fortunately, this only happens if the contract is
21890Sstevel@tonic-gate * inherited, which (in turn) only happens if the process
21900Sstevel@tonic-gate * exits, which means it's an all-or-nothing deal. If this
21910Sstevel@tonic-gate * wasn't the case, we would instead need to keep track of
21920Sstevel@tonic-gate * listeners on a per-event basis, not just a per-queue basis.
21930Sstevel@tonic-gate * This would have the side benefit of letting us clean up
21940Sstevel@tonic-gate * trimmed events sooner (i.e. immediately), but would
21950Sstevel@tonic-gate * unfortunately make events even bigger than they already
21960Sstevel@tonic-gate * are.
21970Sstevel@tonic-gate */
21980Sstevel@tonic-gate for (l = list_head(&q->ctq_listeners); l;
21990Sstevel@tonic-gate l = list_next(&q->ctq_listeners, l)) {
22000Sstevel@tonic-gate l->ctl_flags |= CTLF_DEAD;
22010Sstevel@tonic-gate if (l->ctl_position) {
22020Sstevel@tonic-gate l->ctl_position = NULL;
22030Sstevel@tonic-gate list_insert_tail(&q->ctq_tail, l);
22040Sstevel@tonic-gate }
22050Sstevel@tonic-gate cv_broadcast(&l->ctl_cv);
22060Sstevel@tonic-gate }
22070Sstevel@tonic-gate
22080Sstevel@tonic-gate /*
22090Sstevel@tonic-gate * Disallow events.
22100Sstevel@tonic-gate */
22110Sstevel@tonic-gate q->ctq_flags |= CTQ_DEAD;
22120Sstevel@tonic-gate
22130Sstevel@tonic-gate /*
22140Sstevel@tonic-gate * If we represent the last reference to a reference counted
22150Sstevel@tonic-gate * process bundle queue, free it.
22160Sstevel@tonic-gate */
22170Sstevel@tonic-gate if ((q->ctq_flags & CTQ_REFFED) && (q->ctq_nlisteners == 0))
22180Sstevel@tonic-gate cte_queue_destroy(q);
22190Sstevel@tonic-gate else
22200Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
22210Sstevel@tonic-gate }
22220Sstevel@tonic-gate
22230Sstevel@tonic-gate /*
22240Sstevel@tonic-gate * cte_publish
22250Sstevel@tonic-gate *
22260Sstevel@tonic-gate * Publishes an event to a specific queue. Only called by
22270Sstevel@tonic-gate * cte_publish_all.
22280Sstevel@tonic-gate */
22290Sstevel@tonic-gate static void
cte_publish(ct_equeue_t * q,ct_kevent_t * e,timespec_t * tsp)22300Sstevel@tonic-gate cte_publish(ct_equeue_t *q, ct_kevent_t *e, timespec_t *tsp)
22310Sstevel@tonic-gate {
22320Sstevel@tonic-gate ASSERT(MUTEX_HELD(&q->ctq_lock));
22330Sstevel@tonic-gate
22340Sstevel@tonic-gate q->ctq_atime = *tsp;
22350Sstevel@tonic-gate
22360Sstevel@tonic-gate /*
22370Sstevel@tonic-gate * Don't publish if the event is informative and there aren't
22380Sstevel@tonic-gate * any listeners, or if the queue has been shut down.
22390Sstevel@tonic-gate */
22400Sstevel@tonic-gate if (((q->ctq_nlisteners == 0) && (e->cte_flags & (CTE_INFO|CTE_ACK))) ||
22410Sstevel@tonic-gate (q->ctq_flags & CTQ_DEAD)) {
22420Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
22430Sstevel@tonic-gate cte_rele(e);
22440Sstevel@tonic-gate return;
22450Sstevel@tonic-gate }
22460Sstevel@tonic-gate
22470Sstevel@tonic-gate /*
22480Sstevel@tonic-gate * Enqueue event
22490Sstevel@tonic-gate */
22500Sstevel@tonic-gate list_insert_tail(&q->ctq_events, e);
22510Sstevel@tonic-gate
22520Sstevel@tonic-gate /*
22530Sstevel@tonic-gate * Check for waiting listeners
22540Sstevel@tonic-gate */
22550Sstevel@tonic-gate cte_qwakeup(q, e);
22560Sstevel@tonic-gate
22570Sstevel@tonic-gate /*
22580Sstevel@tonic-gate * Trim unnecessary events from the queue.
22590Sstevel@tonic-gate */
22600Sstevel@tonic-gate cte_trim(q, NULL);
22610Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
22620Sstevel@tonic-gate }
22630Sstevel@tonic-gate
22640Sstevel@tonic-gate /*
22650Sstevel@tonic-gate * cte_publish_all
22660Sstevel@tonic-gate *
22670Sstevel@tonic-gate * Publish an event to all necessary event queues. The event, e, must
22680Sstevel@tonic-gate * be zallocated by the caller, and the event's flags and type must be
22690Sstevel@tonic-gate * set. The rest of the event's fields are initialized here.
22700Sstevel@tonic-gate */
22714845Svikram uint64_t
cte_publish_all(contract_t * ct,ct_kevent_t * e,nvlist_t * data,nvlist_t * gdata)22720Sstevel@tonic-gate cte_publish_all(contract_t *ct, ct_kevent_t *e, nvlist_t *data, nvlist_t *gdata)
22730Sstevel@tonic-gate {
22740Sstevel@tonic-gate ct_equeue_t *q;
22750Sstevel@tonic-gate timespec_t ts;
22764845Svikram uint64_t evid;
22774845Svikram ct_kevent_t *negev;
22784845Svikram int negend;
22790Sstevel@tonic-gate
22800Sstevel@tonic-gate e->cte_contract = ct;
22810Sstevel@tonic-gate e->cte_data = data;
22820Sstevel@tonic-gate e->cte_gdata = gdata;
22830Sstevel@tonic-gate e->cte_refs = 3;
22844845Svikram evid = e->cte_id = atomic_add_64_nv(&ct->ct_type->ct_type_evid, 1);
22850Sstevel@tonic-gate contract_hold(ct);
22860Sstevel@tonic-gate
22874845Svikram /*
22884845Svikram * For a negotiation event we set the ct->ct_nevent field of the
22894845Svikram * contract for the duration of the negotiation
22904845Svikram */
22914845Svikram negend = 0;
22924845Svikram if (e->cte_flags & CTE_NEG) {
22934845Svikram cte_hold(e);
22944845Svikram ct->ct_nevent = e;
22954845Svikram } else if (e->cte_type == CT_EV_NEGEND) {
22964845Svikram negend = 1;
22974845Svikram }
22984845Svikram
22990Sstevel@tonic-gate gethrestime(&ts);
23000Sstevel@tonic-gate
23010Sstevel@tonic-gate /*
23020Sstevel@tonic-gate * ct_evtlock simply (and only) ensures that two events sent
23030Sstevel@tonic-gate * from the same contract are delivered to all queues in the
23040Sstevel@tonic-gate * same order.
23050Sstevel@tonic-gate */
23060Sstevel@tonic-gate mutex_enter(&ct->ct_evtlock);
23070Sstevel@tonic-gate
23080Sstevel@tonic-gate /*
23090Sstevel@tonic-gate * CTEL_CONTRACT - First deliver to the contract queue, acking
23100Sstevel@tonic-gate * the event if the contract has been orphaned.
23110Sstevel@tonic-gate */
23120Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
23130Sstevel@tonic-gate mutex_enter(&ct->ct_events.ctq_lock);
23140Sstevel@tonic-gate if ((e->cte_flags & CTE_INFO) == 0) {
23150Sstevel@tonic-gate if (ct->ct_state >= CTS_ORPHAN)
23160Sstevel@tonic-gate e->cte_flags |= CTE_ACK;
23170Sstevel@tonic-gate else
23180Sstevel@tonic-gate ct->ct_evcnt++;
23190Sstevel@tonic-gate }
23200Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
23210Sstevel@tonic-gate cte_publish(&ct->ct_events, e, &ts);
23220Sstevel@tonic-gate
23230Sstevel@tonic-gate /*
23240Sstevel@tonic-gate * CTEL_BUNDLE - Next deliver to the contract type's bundle
23250Sstevel@tonic-gate * queue.
23260Sstevel@tonic-gate */
23270Sstevel@tonic-gate mutex_enter(&ct->ct_type->ct_type_events.ctq_lock);
23280Sstevel@tonic-gate cte_publish(&ct->ct_type->ct_type_events, e, &ts);
23290Sstevel@tonic-gate
23300Sstevel@tonic-gate /*
23310Sstevel@tonic-gate * CTEL_PBUNDLE - Finally, if the contract has an owner,
23320Sstevel@tonic-gate * deliver to the owner's process bundle queue.
23330Sstevel@tonic-gate */
23340Sstevel@tonic-gate mutex_enter(&ct->ct_lock);
23350Sstevel@tonic-gate if (ct->ct_owner) {
23360Sstevel@tonic-gate /*
23370Sstevel@tonic-gate * proc_exit doesn't free event queues until it has
23380Sstevel@tonic-gate * abandoned all contracts.
23390Sstevel@tonic-gate */
23400Sstevel@tonic-gate ASSERT(ct->ct_owner->p_ct_equeue);
23410Sstevel@tonic-gate ASSERT(ct->ct_owner->p_ct_equeue[ct->ct_type->ct_type_index]);
23420Sstevel@tonic-gate q = ct->ct_owner->p_ct_equeue[ct->ct_type->ct_type_index];
23430Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
23440Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
23450Sstevel@tonic-gate cte_publish(q, e, &ts);
23460Sstevel@tonic-gate } else {
23470Sstevel@tonic-gate mutex_exit(&ct->ct_lock);
23480Sstevel@tonic-gate cte_rele(e);
23490Sstevel@tonic-gate }
23500Sstevel@tonic-gate
23514845Svikram if (negend) {
23524845Svikram mutex_enter(&ct->ct_lock);
23534845Svikram negev = ct->ct_nevent;
23544845Svikram ct->ct_nevent = NULL;
23554845Svikram cte_rele(negev);
23564845Svikram mutex_exit(&ct->ct_lock);
23574845Svikram }
23584845Svikram
23590Sstevel@tonic-gate mutex_exit(&ct->ct_evtlock);
23604845Svikram
23614845Svikram return (evid);
23620Sstevel@tonic-gate }
23630Sstevel@tonic-gate
23640Sstevel@tonic-gate /*
23650Sstevel@tonic-gate * cte_add_listener
23660Sstevel@tonic-gate *
23670Sstevel@tonic-gate * Add a new listener to an event queue.
23680Sstevel@tonic-gate */
23690Sstevel@tonic-gate void
cte_add_listener(ct_equeue_t * q,ct_listener_t * l)23700Sstevel@tonic-gate cte_add_listener(ct_equeue_t *q, ct_listener_t *l)
23710Sstevel@tonic-gate {
23720Sstevel@tonic-gate cv_init(&l->ctl_cv, NULL, CV_DEFAULT, NULL);
23730Sstevel@tonic-gate l->ctl_equeue = q;
23740Sstevel@tonic-gate l->ctl_position = NULL;
23750Sstevel@tonic-gate l->ctl_flags = 0;
23760Sstevel@tonic-gate
23770Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
23780Sstevel@tonic-gate list_insert_head(&q->ctq_tail, l);
23790Sstevel@tonic-gate list_insert_head(&q->ctq_listeners, l);
23800Sstevel@tonic-gate q->ctq_nlisteners++;
23810Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
23820Sstevel@tonic-gate }
23830Sstevel@tonic-gate
23840Sstevel@tonic-gate /*
23850Sstevel@tonic-gate * cte_remove_listener
23860Sstevel@tonic-gate *
23870Sstevel@tonic-gate * Remove a listener from an event queue. No other queue activities
23880Sstevel@tonic-gate * (e.g. cte_get event) may be in progress at this endpoint when this
23890Sstevel@tonic-gate * is called.
23900Sstevel@tonic-gate */
23910Sstevel@tonic-gate void
cte_remove_listener(ct_listener_t * l)23920Sstevel@tonic-gate cte_remove_listener(ct_listener_t *l)
23930Sstevel@tonic-gate {
23940Sstevel@tonic-gate ct_equeue_t *q = l->ctl_equeue;
23950Sstevel@tonic-gate ct_kevent_t *e;
23960Sstevel@tonic-gate
23970Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
23980Sstevel@tonic-gate
23990Sstevel@tonic-gate ASSERT((l->ctl_flags & (CTLF_COPYOUT|CTLF_RESET)) == 0);
24000Sstevel@tonic-gate
24010Sstevel@tonic-gate if ((e = l->ctl_position) != NULL)
24020Sstevel@tonic-gate cte_qrele(q, l, e);
24030Sstevel@tonic-gate else
24040Sstevel@tonic-gate list_remove(&q->ctq_tail, l);
24050Sstevel@tonic-gate l->ctl_position = NULL;
24060Sstevel@tonic-gate
24070Sstevel@tonic-gate q->ctq_nlisteners--;
24080Sstevel@tonic-gate list_remove(&q->ctq_listeners, l);
24090Sstevel@tonic-gate
24100Sstevel@tonic-gate if (l->ctl_flags & CTLF_RELIABLE)
24110Sstevel@tonic-gate q->ctq_nreliable--;
24120Sstevel@tonic-gate
24130Sstevel@tonic-gate /*
24140Sstevel@tonic-gate * If we are a the last listener of a dead reference counted
24150Sstevel@tonic-gate * queue (i.e. a process bundle) we free it. Otherwise we just
24160Sstevel@tonic-gate * trim any events which may have been kept around for our
24170Sstevel@tonic-gate * benefit.
24180Sstevel@tonic-gate */
24190Sstevel@tonic-gate if ((q->ctq_flags & CTQ_REFFED) && (q->ctq_flags & CTQ_DEAD) &&
24200Sstevel@tonic-gate (q->ctq_nlisteners == 0)) {
24210Sstevel@tonic-gate cte_queue_destroy(q);
24220Sstevel@tonic-gate } else {
24230Sstevel@tonic-gate cte_trim(q, NULL);
24240Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
24250Sstevel@tonic-gate }
24260Sstevel@tonic-gate }
24270Sstevel@tonic-gate
24280Sstevel@tonic-gate /*
24290Sstevel@tonic-gate * cte_reset_listener
24300Sstevel@tonic-gate *
24310Sstevel@tonic-gate * Moves a listener's queue pointer to the beginning of the queue.
24320Sstevel@tonic-gate */
24330Sstevel@tonic-gate void
cte_reset_listener(ct_listener_t * l)24340Sstevel@tonic-gate cte_reset_listener(ct_listener_t *l)
24350Sstevel@tonic-gate {
24360Sstevel@tonic-gate ct_equeue_t *q = l->ctl_equeue;
24370Sstevel@tonic-gate
24380Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
24390Sstevel@tonic-gate
24400Sstevel@tonic-gate /*
24410Sstevel@tonic-gate * We allow an asynchronous reset because it doesn't make a
24420Sstevel@tonic-gate * whole lot of sense to make reset block or fail. We already
24430Sstevel@tonic-gate * have most of the mechanism needed thanks to queue trimming,
24440Sstevel@tonic-gate * so implementing it isn't a big deal.
24450Sstevel@tonic-gate */
24460Sstevel@tonic-gate if (l->ctl_flags & CTLF_COPYOUT)
24470Sstevel@tonic-gate l->ctl_flags |= CTLF_RESET;
24480Sstevel@tonic-gate
24490Sstevel@tonic-gate (void) cte_qmove(q, l, list_head(&q->ctq_events));
24500Sstevel@tonic-gate
24510Sstevel@tonic-gate /*
24520Sstevel@tonic-gate * Inform blocked readers.
24530Sstevel@tonic-gate */
24540Sstevel@tonic-gate cv_broadcast(&l->ctl_cv);
24550Sstevel@tonic-gate pollwakeup(&l->ctl_pollhead, POLLIN);
24560Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
24570Sstevel@tonic-gate }
24580Sstevel@tonic-gate
24590Sstevel@tonic-gate /*
24600Sstevel@tonic-gate * cte_next_event
24610Sstevel@tonic-gate *
24620Sstevel@tonic-gate * Moves the event pointer for the specified listener to the next event
24630Sstevel@tonic-gate * on the queue. To avoid races, this movement only occurs if the
24640Sstevel@tonic-gate * specified event id matches that of the current event. This is used
24650Sstevel@tonic-gate * primarily to skip events that have been read but whose extended data
24660Sstevel@tonic-gate * haven't been copied out.
24670Sstevel@tonic-gate */
24680Sstevel@tonic-gate int
cte_next_event(ct_listener_t * l,uint64_t id)24690Sstevel@tonic-gate cte_next_event(ct_listener_t *l, uint64_t id)
24700Sstevel@tonic-gate {
24710Sstevel@tonic-gate ct_equeue_t *q = l->ctl_equeue;
24720Sstevel@tonic-gate ct_kevent_t *old;
24730Sstevel@tonic-gate
24740Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
24750Sstevel@tonic-gate
24760Sstevel@tonic-gate if (l->ctl_flags & CTLF_COPYOUT)
24770Sstevel@tonic-gate l->ctl_flags |= CTLF_RESET;
24780Sstevel@tonic-gate
24790Sstevel@tonic-gate if (((old = l->ctl_position) != NULL) && (old->cte_id == id))
24800Sstevel@tonic-gate (void) cte_qmove(q, l, list_next(&q->ctq_events, old));
24810Sstevel@tonic-gate
24820Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
24830Sstevel@tonic-gate
24840Sstevel@tonic-gate return (0);
24850Sstevel@tonic-gate }
24860Sstevel@tonic-gate
24870Sstevel@tonic-gate /*
24880Sstevel@tonic-gate * cte_get_event
24890Sstevel@tonic-gate *
24900Sstevel@tonic-gate * Reads an event from an event endpoint. If "nonblock" is clear, we
24910Sstevel@tonic-gate * block until a suitable event is ready. If "crit" is set, we only
24920Sstevel@tonic-gate * read critical events. Note that while "cr" is the caller's cred,
24930Sstevel@tonic-gate * "zuniqid" is the unique id of the zone the calling contract
24940Sstevel@tonic-gate * filesystem was mounted in.
24950Sstevel@tonic-gate */
24960Sstevel@tonic-gate int
cte_get_event(ct_listener_t * l,int nonblock,void * uaddr,const cred_t * cr,uint64_t zuniqid,int crit)24970Sstevel@tonic-gate cte_get_event(ct_listener_t *l, int nonblock, void *uaddr, const cred_t *cr,
24980Sstevel@tonic-gate uint64_t zuniqid, int crit)
24990Sstevel@tonic-gate {
25000Sstevel@tonic-gate ct_equeue_t *q = l->ctl_equeue;
25010Sstevel@tonic-gate ct_kevent_t *temp;
25020Sstevel@tonic-gate int result = 0;
25030Sstevel@tonic-gate int partial = 0;
25040Sstevel@tonic-gate size_t size, gsize, len;
25050Sstevel@tonic-gate model_t mdl = get_udatamodel();
25060Sstevel@tonic-gate STRUCT_DECL(ct_event, ev);
25070Sstevel@tonic-gate STRUCT_INIT(ev, mdl);
25080Sstevel@tonic-gate
25090Sstevel@tonic-gate /*
25100Sstevel@tonic-gate * cte_qreadable checks for CTLF_COPYOUT as well as ensures
25110Sstevel@tonic-gate * that there exists, and we are pointing to, an appropriate
25120Sstevel@tonic-gate * event. It may temporarily drop ctq_lock, but that doesn't
25130Sstevel@tonic-gate * really matter to us.
25140Sstevel@tonic-gate */
25150Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
25160Sstevel@tonic-gate while (cte_qreadable(q, l, cr, zuniqid, crit)) {
25170Sstevel@tonic-gate if (nonblock) {
25180Sstevel@tonic-gate result = EAGAIN;
25190Sstevel@tonic-gate goto error;
25200Sstevel@tonic-gate }
25210Sstevel@tonic-gate if (q->ctq_flags & CTQ_DEAD) {
25220Sstevel@tonic-gate result = EIDRM;
25230Sstevel@tonic-gate goto error;
25240Sstevel@tonic-gate }
25250Sstevel@tonic-gate result = cv_wait_sig(&l->ctl_cv, &q->ctq_lock);
25260Sstevel@tonic-gate if (result == 0) {
25270Sstevel@tonic-gate result = EINTR;
25280Sstevel@tonic-gate goto error;
25290Sstevel@tonic-gate }
25300Sstevel@tonic-gate }
25310Sstevel@tonic-gate temp = l->ctl_position;
25320Sstevel@tonic-gate cte_hold(temp);
25330Sstevel@tonic-gate l->ctl_flags |= CTLF_COPYOUT;
25340Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
25350Sstevel@tonic-gate
25360Sstevel@tonic-gate /*
25370Sstevel@tonic-gate * We now have an event. Copy in the user event structure to
25380Sstevel@tonic-gate * see how much space we have to work with.
25390Sstevel@tonic-gate */
25400Sstevel@tonic-gate result = copyin(uaddr, STRUCT_BUF(ev), STRUCT_SIZE(ev));
25410Sstevel@tonic-gate if (result)
25420Sstevel@tonic-gate goto copyerr;
25430Sstevel@tonic-gate
25440Sstevel@tonic-gate /*
25450Sstevel@tonic-gate * Determine what data we have and what the user should be
25460Sstevel@tonic-gate * allowed to see.
25470Sstevel@tonic-gate */
25480Sstevel@tonic-gate size = gsize = 0;
25490Sstevel@tonic-gate if (temp->cte_data) {
25500Sstevel@tonic-gate VERIFY(nvlist_size(temp->cte_data, &size,
25510Sstevel@tonic-gate NV_ENCODE_NATIVE) == 0);
25520Sstevel@tonic-gate ASSERT(size != 0);
25530Sstevel@tonic-gate }
25540Sstevel@tonic-gate if (zuniqid == GLOBAL_ZONEUNIQID && temp->cte_gdata) {
25550Sstevel@tonic-gate VERIFY(nvlist_size(temp->cte_gdata, &gsize,
25560Sstevel@tonic-gate NV_ENCODE_NATIVE) == 0);
25570Sstevel@tonic-gate ASSERT(gsize != 0);
25580Sstevel@tonic-gate }
25590Sstevel@tonic-gate
25600Sstevel@tonic-gate /*
25610Sstevel@tonic-gate * If we have enough space, copy out the extended event data.
25620Sstevel@tonic-gate */
25630Sstevel@tonic-gate len = size + gsize;
25640Sstevel@tonic-gate if (len) {
25650Sstevel@tonic-gate if (STRUCT_FGET(ev, ctev_nbytes) >= len) {
25660Sstevel@tonic-gate char *buf = kmem_alloc(len, KM_SLEEP);
25670Sstevel@tonic-gate
25680Sstevel@tonic-gate if (size)
25690Sstevel@tonic-gate VERIFY(nvlist_pack(temp->cte_data, &buf, &size,
25700Sstevel@tonic-gate NV_ENCODE_NATIVE, KM_SLEEP) == 0);
25710Sstevel@tonic-gate if (gsize) {
25720Sstevel@tonic-gate char *tmp = buf + size;
25730Sstevel@tonic-gate
25740Sstevel@tonic-gate VERIFY(nvlist_pack(temp->cte_gdata, &tmp,
25750Sstevel@tonic-gate &gsize, NV_ENCODE_NATIVE, KM_SLEEP) == 0);
25760Sstevel@tonic-gate }
25770Sstevel@tonic-gate
25780Sstevel@tonic-gate /* This shouldn't have changed */
25790Sstevel@tonic-gate ASSERT(size + gsize == len);
25800Sstevel@tonic-gate result = copyout(buf, STRUCT_FGETP(ev, ctev_buffer),
25810Sstevel@tonic-gate len);
25820Sstevel@tonic-gate kmem_free(buf, len);
25830Sstevel@tonic-gate if (result)
25840Sstevel@tonic-gate goto copyerr;
25850Sstevel@tonic-gate } else {
25860Sstevel@tonic-gate partial = 1;
25870Sstevel@tonic-gate }
25880Sstevel@tonic-gate }
25890Sstevel@tonic-gate
25900Sstevel@tonic-gate /*
25910Sstevel@tonic-gate * Copy out the common event data.
25920Sstevel@tonic-gate */
25930Sstevel@tonic-gate STRUCT_FSET(ev, ctev_id, temp->cte_contract->ct_id);
25940Sstevel@tonic-gate STRUCT_FSET(ev, ctev_evid, temp->cte_id);
25950Sstevel@tonic-gate STRUCT_FSET(ev, ctev_cttype,
25960Sstevel@tonic-gate temp->cte_contract->ct_type->ct_type_index);
25974845Svikram STRUCT_FSET(ev, ctev_flags, temp->cte_flags &
25984845Svikram (CTE_ACK|CTE_INFO|CTE_NEG));
25990Sstevel@tonic-gate STRUCT_FSET(ev, ctev_type, temp->cte_type);
26000Sstevel@tonic-gate STRUCT_FSET(ev, ctev_nbytes, len);
26010Sstevel@tonic-gate STRUCT_FSET(ev, ctev_goffset, size);
26020Sstevel@tonic-gate result = copyout(STRUCT_BUF(ev), uaddr, STRUCT_SIZE(ev));
26030Sstevel@tonic-gate
26040Sstevel@tonic-gate copyerr:
26050Sstevel@tonic-gate /*
26060Sstevel@tonic-gate * Only move our location in the queue if all copyouts were
26070Sstevel@tonic-gate * successful, the caller provided enough space for the entire
26080Sstevel@tonic-gate * event, and our endpoint wasn't reset or otherwise moved by
26090Sstevel@tonic-gate * another thread.
26100Sstevel@tonic-gate */
26110Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
26120Sstevel@tonic-gate if (result)
26130Sstevel@tonic-gate result = EFAULT;
26140Sstevel@tonic-gate else if (!partial && ((l->ctl_flags & CTLF_RESET) == 0) &&
26150Sstevel@tonic-gate (l->ctl_position == temp))
26160Sstevel@tonic-gate (void) cte_qmove(q, l, list_next(&q->ctq_events, temp));
26170Sstevel@tonic-gate l->ctl_flags &= ~(CTLF_COPYOUT|CTLF_RESET);
26180Sstevel@tonic-gate /*
26190Sstevel@tonic-gate * Signal any readers blocked on our CTLF_COPYOUT.
26200Sstevel@tonic-gate */
26210Sstevel@tonic-gate cv_signal(&l->ctl_cv);
26220Sstevel@tonic-gate cte_rele(temp);
26230Sstevel@tonic-gate
26240Sstevel@tonic-gate error:
26250Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
26260Sstevel@tonic-gate return (result);
26270Sstevel@tonic-gate }
26280Sstevel@tonic-gate
26290Sstevel@tonic-gate /*
26300Sstevel@tonic-gate * cte_set_reliable
26310Sstevel@tonic-gate *
26320Sstevel@tonic-gate * Requests that events be reliably delivered to an event endpoint.
26330Sstevel@tonic-gate * Unread informative and acknowledged critical events will not be
26340Sstevel@tonic-gate * removed from the queue until this listener reads or skips them.
26350Sstevel@tonic-gate * Because a listener could maliciously request reliable delivery and
26360Sstevel@tonic-gate * then do nothing, this requires that PRIV_CONTRACT_EVENT be in the
26370Sstevel@tonic-gate * caller's effective set.
26380Sstevel@tonic-gate */
26390Sstevel@tonic-gate int
cte_set_reliable(ct_listener_t * l,const cred_t * cr)26400Sstevel@tonic-gate cte_set_reliable(ct_listener_t *l, const cred_t *cr)
26410Sstevel@tonic-gate {
26420Sstevel@tonic-gate ct_equeue_t *q = l->ctl_equeue;
26430Sstevel@tonic-gate int error;
26440Sstevel@tonic-gate
26450Sstevel@tonic-gate if ((error = secpolicy_contract_event(cr)) != 0)
26460Sstevel@tonic-gate return (error);
26470Sstevel@tonic-gate
26480Sstevel@tonic-gate mutex_enter(&q->ctq_lock);
26490Sstevel@tonic-gate if ((l->ctl_flags & CTLF_RELIABLE) == 0) {
26500Sstevel@tonic-gate l->ctl_flags |= CTLF_RELIABLE;
26510Sstevel@tonic-gate q->ctq_nreliable++;
26520Sstevel@tonic-gate if (l->ctl_position != NULL)
26530Sstevel@tonic-gate l->ctl_position->cte_nodes[q->ctq_listno].
26540Sstevel@tonic-gate ctm_nreliable++;
26550Sstevel@tonic-gate }
26560Sstevel@tonic-gate mutex_exit(&q->ctq_lock);
26570Sstevel@tonic-gate
26580Sstevel@tonic-gate return (0);
26590Sstevel@tonic-gate }
2660