xref: /onnv-gate/usr/src/uts/common/os/task.c (revision 12725:334fd88ae67c)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
53247Sgjelinek  * Common Development and Distribution License (the "License").
63247Sgjelinek  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*12725SMenno.Lageman@Sun.COM  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
240Sstevel@tonic-gate 
250Sstevel@tonic-gate #include <sys/atomic.h>
26*12725SMenno.Lageman@Sun.COM #include <sys/callb.h>
270Sstevel@tonic-gate #include <sys/cmn_err.h>
280Sstevel@tonic-gate #include <sys/exacct.h>
290Sstevel@tonic-gate #include <sys/id_space.h>
300Sstevel@tonic-gate #include <sys/kmem.h>
31*12725SMenno.Lageman@Sun.COM #include <sys/kstat.h>
320Sstevel@tonic-gate #include <sys/modhash.h>
330Sstevel@tonic-gate #include <sys/mutex.h>
340Sstevel@tonic-gate #include <sys/proc.h>
350Sstevel@tonic-gate #include <sys/project.h>
360Sstevel@tonic-gate #include <sys/rctl.h>
370Sstevel@tonic-gate #include <sys/systm.h>
380Sstevel@tonic-gate #include <sys/task.h>
390Sstevel@tonic-gate #include <sys/time.h>
400Sstevel@tonic-gate #include <sys/types.h>
410Sstevel@tonic-gate #include <sys/zone.h>
420Sstevel@tonic-gate #include <sys/cpuvar.h>
430Sstevel@tonic-gate #include <sys/fss.h>
440Sstevel@tonic-gate #include <sys/class.h>
450Sstevel@tonic-gate #include <sys/project.h>
460Sstevel@tonic-gate 
470Sstevel@tonic-gate /*
480Sstevel@tonic-gate  * Tasks
490Sstevel@tonic-gate  *
500Sstevel@tonic-gate  *   A task is a collection of processes, associated with a common project ID
510Sstevel@tonic-gate  *   and related by a common initial parent.  The task primarily represents a
520Sstevel@tonic-gate  *   natural process sequence with known resource usage, although it can also be
530Sstevel@tonic-gate  *   viewed as a convenient grouping of processes for signal delivery, processor
540Sstevel@tonic-gate  *   binding, and administrative operations.
550Sstevel@tonic-gate  *
560Sstevel@tonic-gate  * Membership and observership
570Sstevel@tonic-gate  *   We can conceive of situations where processes outside of the task may wish
580Sstevel@tonic-gate  *   to examine the resource usage of the task.  Similarly, a number of the
590Sstevel@tonic-gate  *   administrative operations on a task can be performed by processes who are
600Sstevel@tonic-gate  *   not members of the task.  Accordingly, we must design a locking strategy
610Sstevel@tonic-gate  *   where observers of the task, who wish to examine or operate on the task,
620Sstevel@tonic-gate  *   and members of task, who can perform the mentioned operations, as well as
630Sstevel@tonic-gate  *   leave the task, see a consistent and correct representation of the task at
640Sstevel@tonic-gate  *   all times.
650Sstevel@tonic-gate  *
660Sstevel@tonic-gate  * Locking
670Sstevel@tonic-gate  *   Because the task membership is a new relation between processes, its
680Sstevel@tonic-gate  *   locking becomes an additional responsibility of the pidlock/p_lock locking
690Sstevel@tonic-gate  *   sequence; however, tasks closely resemble sessions and the session locking
700Sstevel@tonic-gate  *   model is mostly appropriate for the interaction of tasks, processes, and
710Sstevel@tonic-gate  *   procfs.
720Sstevel@tonic-gate  *
730Sstevel@tonic-gate  *   kmutex_t task_hash_lock
740Sstevel@tonic-gate  *     task_hash_lock is a global lock protecting the contents of the task
750Sstevel@tonic-gate  *     ID-to-task pointer hash.  Holders of task_hash_lock must not attempt to
760Sstevel@tonic-gate  *     acquire pidlock or p_lock.
770Sstevel@tonic-gate  *   uint_t tk_hold_count
780Sstevel@tonic-gate  *     tk_hold_count, the number of members and observers of the current task,
790Sstevel@tonic-gate  *     must be manipulated atomically.
800Sstevel@tonic-gate  *   proc_t *tk_memb_list
810Sstevel@tonic-gate  *   proc_t *p_tasknext
820Sstevel@tonic-gate  *   proc_t *p_taskprev
830Sstevel@tonic-gate  *     The task's membership list is protected by pidlock, and is therefore
840Sstevel@tonic-gate  *     always acquired before any of its members' p_lock mutexes.  The p_task
850Sstevel@tonic-gate  *     member of the proc structure is protected by pidlock or p_lock for
860Sstevel@tonic-gate  *     reading, and by both pidlock and p_lock for modification, as is done for
870Sstevel@tonic-gate  *     p_sessp.  The key point is that only the process can modify its p_task,
880Sstevel@tonic-gate  *     and not any entity on the system.  (/proc will use prlock() to prevent
890Sstevel@tonic-gate  *     the process from leaving, as opposed to pidlock.)
900Sstevel@tonic-gate  *   kmutex_t tk_usage_lock
910Sstevel@tonic-gate  *     tk_usage_lock is a per-task lock protecting the contents of the task
920Sstevel@tonic-gate  *     usage structure and tk_nlwps counter for the task.max-lwps resource
930Sstevel@tonic-gate  *     control.
940Sstevel@tonic-gate  */
950Sstevel@tonic-gate 
960Sstevel@tonic-gate int task_hash_size = 256;
970Sstevel@tonic-gate static kmutex_t task_hash_lock;
980Sstevel@tonic-gate static mod_hash_t *task_hash;
990Sstevel@tonic-gate 
1000Sstevel@tonic-gate static id_space_t *taskid_space;	/* global taskid space */
1010Sstevel@tonic-gate static kmem_cache_t *task_cache;	/* kmem cache for task structures */
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate rctl_hndl_t rc_task_lwps;
104*12725SMenno.Lageman@Sun.COM rctl_hndl_t rc_task_nprocs;
1050Sstevel@tonic-gate rctl_hndl_t rc_task_cpu_time;
1060Sstevel@tonic-gate 
1070Sstevel@tonic-gate /*
108*12725SMenno.Lageman@Sun.COM  * Resource usage is committed using task queues; if taskq_dispatch() fails
109*12725SMenno.Lageman@Sun.COM  * due to resource constraints, the task is placed on a list for background
110*12725SMenno.Lageman@Sun.COM  * processing by the task_commit_thread() backup thread.
111*12725SMenno.Lageman@Sun.COM  */
112*12725SMenno.Lageman@Sun.COM static kmutex_t task_commit_lock;	/* protects list pointers and cv */
113*12725SMenno.Lageman@Sun.COM static kcondvar_t task_commit_cv;	/* wakeup task_commit_thread */
114*12725SMenno.Lageman@Sun.COM static task_t *task_commit_head = NULL;
115*12725SMenno.Lageman@Sun.COM static task_t *task_commit_tail = NULL;
116*12725SMenno.Lageman@Sun.COM kthread_t *task_commit_thread;
117*12725SMenno.Lageman@Sun.COM 
118*12725SMenno.Lageman@Sun.COM static void task_commit();
119*12725SMenno.Lageman@Sun.COM static kstat_t *task_kstat_create(task_t *, zone_t *);
120*12725SMenno.Lageman@Sun.COM static void task_kstat_delete(task_t *);
121*12725SMenno.Lageman@Sun.COM 
122*12725SMenno.Lageman@Sun.COM /*
1230Sstevel@tonic-gate  * static rctl_qty_t task_usage_lwps(void *taskp)
1240Sstevel@tonic-gate  *
1250Sstevel@tonic-gate  * Overview
1260Sstevel@tonic-gate  *   task_usage_lwps() is the usage operation for the resource control
1270Sstevel@tonic-gate  *   associated with the number of LWPs in a task.
1280Sstevel@tonic-gate  *
1290Sstevel@tonic-gate  * Return values
1300Sstevel@tonic-gate  *   The number of LWPs in the given task is returned.
1310Sstevel@tonic-gate  *
1320Sstevel@tonic-gate  * Caller's context
1330Sstevel@tonic-gate  *   The p->p_lock must be held across the call.
1340Sstevel@tonic-gate  */
1350Sstevel@tonic-gate /*ARGSUSED*/
1360Sstevel@tonic-gate static rctl_qty_t
task_lwps_usage(rctl_t * r,proc_t * p)1370Sstevel@tonic-gate task_lwps_usage(rctl_t *r, proc_t *p)
1380Sstevel@tonic-gate {
1390Sstevel@tonic-gate 	task_t *t;
1400Sstevel@tonic-gate 	rctl_qty_t nlwps;
1410Sstevel@tonic-gate 
1420Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate 	t = p->p_task;
1450Sstevel@tonic-gate 	mutex_enter(&p->p_zone->zone_nlwps_lock);
1460Sstevel@tonic-gate 	nlwps = t->tk_nlwps;
1470Sstevel@tonic-gate 	mutex_exit(&p->p_zone->zone_nlwps_lock);
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate 	return (nlwps);
1500Sstevel@tonic-gate }
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate /*
1530Sstevel@tonic-gate  * static int task_test_lwps(void *taskp, rctl_val_t *, int64_t incr,
1540Sstevel@tonic-gate  *   int flags)
1550Sstevel@tonic-gate  *
1560Sstevel@tonic-gate  * Overview
1570Sstevel@tonic-gate  *   task_test_lwps() is the test-if-valid-increment for the resource control
1580Sstevel@tonic-gate  *   for the number of processes in a task.
1590Sstevel@tonic-gate  *
1600Sstevel@tonic-gate  * Return values
1610Sstevel@tonic-gate  *   0 if the threshold limit was not passed, 1 if the limit was passed.
1620Sstevel@tonic-gate  *
1630Sstevel@tonic-gate  * Caller's context
1640Sstevel@tonic-gate  *   p->p_lock must be held across the call.
1650Sstevel@tonic-gate  */
1660Sstevel@tonic-gate /*ARGSUSED*/
1670Sstevel@tonic-gate static int
task_lwps_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)1680Sstevel@tonic-gate task_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
1690Sstevel@tonic-gate     rctl_qty_t incr,
1700Sstevel@tonic-gate     uint_t flags)
1710Sstevel@tonic-gate {
1720Sstevel@tonic-gate 	rctl_qty_t nlwps;
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
1750Sstevel@tonic-gate 	ASSERT(e->rcep_t == RCENTITY_TASK);
1760Sstevel@tonic-gate 	if (e->rcep_p.task == NULL)
1770Sstevel@tonic-gate 		return (0);
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(e->rcep_p.task->tk_zone->zone_nlwps_lock)));
1800Sstevel@tonic-gate 	nlwps = e->rcep_p.task->tk_nlwps;
1810Sstevel@tonic-gate 
1820Sstevel@tonic-gate 	if (nlwps + incr > rcntl->rcv_value)
1830Sstevel@tonic-gate 		return (1);
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate 	return (0);
1860Sstevel@tonic-gate }
187*12725SMenno.Lageman@Sun.COM 
1880Sstevel@tonic-gate /*ARGSUSED*/
1890Sstevel@tonic-gate static int
task_lwps_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1900Sstevel@tonic-gate task_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv) {
1910Sstevel@tonic-gate 
1920Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
1930Sstevel@tonic-gate 	ASSERT(e->rcep_t == RCENTITY_TASK);
1940Sstevel@tonic-gate 	if (e->rcep_p.task == NULL)
1950Sstevel@tonic-gate 		return (0);
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 	e->rcep_p.task->tk_nlwps_ctl = nv;
1980Sstevel@tonic-gate 	return (0);
1990Sstevel@tonic-gate }
2000Sstevel@tonic-gate 
201*12725SMenno.Lageman@Sun.COM /*ARGSUSED*/
202*12725SMenno.Lageman@Sun.COM static rctl_qty_t
task_nprocs_usage(rctl_t * r,proc_t * p)203*12725SMenno.Lageman@Sun.COM task_nprocs_usage(rctl_t *r, proc_t *p)
204*12725SMenno.Lageman@Sun.COM {
205*12725SMenno.Lageman@Sun.COM 	task_t *t;
206*12725SMenno.Lageman@Sun.COM 	rctl_qty_t nprocs;
207*12725SMenno.Lageman@Sun.COM 
208*12725SMenno.Lageman@Sun.COM 	ASSERT(MUTEX_HELD(&p->p_lock));
209*12725SMenno.Lageman@Sun.COM 
210*12725SMenno.Lageman@Sun.COM 	t = p->p_task;
211*12725SMenno.Lageman@Sun.COM 	mutex_enter(&p->p_zone->zone_nlwps_lock);
212*12725SMenno.Lageman@Sun.COM 	nprocs = t->tk_nprocs;
213*12725SMenno.Lageman@Sun.COM 	mutex_exit(&p->p_zone->zone_nlwps_lock);
214*12725SMenno.Lageman@Sun.COM 
215*12725SMenno.Lageman@Sun.COM 	return (nprocs);
216*12725SMenno.Lageman@Sun.COM }
217*12725SMenno.Lageman@Sun.COM 
218*12725SMenno.Lageman@Sun.COM /*ARGSUSED*/
219*12725SMenno.Lageman@Sun.COM static int
task_nprocs_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)220*12725SMenno.Lageman@Sun.COM task_nprocs_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
221*12725SMenno.Lageman@Sun.COM     rctl_qty_t incr, uint_t flags)
222*12725SMenno.Lageman@Sun.COM {
223*12725SMenno.Lageman@Sun.COM 	rctl_qty_t nprocs;
224*12725SMenno.Lageman@Sun.COM 
225*12725SMenno.Lageman@Sun.COM 	ASSERT(MUTEX_HELD(&p->p_lock));
226*12725SMenno.Lageman@Sun.COM 	ASSERT(e->rcep_t == RCENTITY_TASK);
227*12725SMenno.Lageman@Sun.COM 	if (e->rcep_p.task == NULL)
228*12725SMenno.Lageman@Sun.COM 		return (0);
229*12725SMenno.Lageman@Sun.COM 
230*12725SMenno.Lageman@Sun.COM 	ASSERT(MUTEX_HELD(&(e->rcep_p.task->tk_zone->zone_nlwps_lock)));
231*12725SMenno.Lageman@Sun.COM 	nprocs = e->rcep_p.task->tk_nprocs;
232*12725SMenno.Lageman@Sun.COM 
233*12725SMenno.Lageman@Sun.COM 	if (nprocs + incr > rcntl->rcv_value)
234*12725SMenno.Lageman@Sun.COM 		return (1);
235*12725SMenno.Lageman@Sun.COM 
236*12725SMenno.Lageman@Sun.COM 	return (0);
237*12725SMenno.Lageman@Sun.COM }
238*12725SMenno.Lageman@Sun.COM 
239*12725SMenno.Lageman@Sun.COM /*ARGSUSED*/
240*12725SMenno.Lageman@Sun.COM static int
task_nprocs_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)241*12725SMenno.Lageman@Sun.COM task_nprocs_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
242*12725SMenno.Lageman@Sun.COM     rctl_qty_t nv) {
243*12725SMenno.Lageman@Sun.COM 
244*12725SMenno.Lageman@Sun.COM 	ASSERT(MUTEX_HELD(&p->p_lock));
245*12725SMenno.Lageman@Sun.COM 	ASSERT(e->rcep_t == RCENTITY_TASK);
246*12725SMenno.Lageman@Sun.COM 	if (e->rcep_p.task == NULL)
247*12725SMenno.Lageman@Sun.COM 		return (0);
248*12725SMenno.Lageman@Sun.COM 
249*12725SMenno.Lageman@Sun.COM 	e->rcep_p.task->tk_nprocs_ctl = nv;
250*12725SMenno.Lageman@Sun.COM 	return (0);
251*12725SMenno.Lageman@Sun.COM }
252*12725SMenno.Lageman@Sun.COM 
2530Sstevel@tonic-gate /*
2540Sstevel@tonic-gate  * static rctl_qty_t task_usage_cpu_secs(void *taskp)
2550Sstevel@tonic-gate  *
2560Sstevel@tonic-gate  * Overview
2570Sstevel@tonic-gate  *   task_usage_cpu_secs() is the usage operation for the resource control
2580Sstevel@tonic-gate  *   associated with the total accrued CPU seconds for a task.
2590Sstevel@tonic-gate  *
2600Sstevel@tonic-gate  * Return values
2610Sstevel@tonic-gate  *   The number of CPU seconds consumed by the task is returned.
2620Sstevel@tonic-gate  *
2630Sstevel@tonic-gate  * Caller's context
2640Sstevel@tonic-gate  *   The given task must be held across the call.
2650Sstevel@tonic-gate  */
2660Sstevel@tonic-gate /*ARGSUSED*/
2670Sstevel@tonic-gate static rctl_qty_t
task_cpu_time_usage(rctl_t * r,proc_t * p)2680Sstevel@tonic-gate task_cpu_time_usage(rctl_t *r, proc_t *p)
2690Sstevel@tonic-gate {
2700Sstevel@tonic-gate 	task_t *t = p->p_task;
2710Sstevel@tonic-gate 
2720Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
2735788Smv143129 	return (t->tk_cpu_time);
2745788Smv143129 }
2755788Smv143129 
2765788Smv143129 /*
2775788Smv143129  * int task_cpu_time_incr(task_t *t, rctl_qty_t incr)
2785788Smv143129  *
2795788Smv143129  * Overview
2805788Smv143129  *   task_cpu_time_incr() increments the amount of CPU time used
2815788Smv143129  *   by this task.
2825788Smv143129  *
2835788Smv143129  * Return values
2845788Smv143129  *   1   if a second or more time is accumulated
2855788Smv143129  *   0   otherwise
2865788Smv143129  *
2875788Smv143129  * Caller's context
2885788Smv143129  *   This is called by the clock tick accounting function to charge
2895788Smv143129  *   CPU time to a task.
2905788Smv143129  */
2915788Smv143129 rctl_qty_t
task_cpu_time_incr(task_t * t,rctl_qty_t incr)2925788Smv143129 task_cpu_time_incr(task_t *t, rctl_qty_t incr)
2935788Smv143129 {
2945788Smv143129 	rctl_qty_t ret = 0;
2955788Smv143129 
2965788Smv143129 	mutex_enter(&t->tk_cpu_time_lock);
2975788Smv143129 	t->tk_cpu_ticks += incr;
2985788Smv143129 	if (t->tk_cpu_ticks >= hz) {
2995788Smv143129 		t->tk_cpu_time += t->tk_cpu_ticks / hz;
3005788Smv143129 		t->tk_cpu_ticks = t->tk_cpu_ticks % hz;
3015788Smv143129 		ret = t->tk_cpu_time;
3025788Smv143129 	}
3035788Smv143129 	mutex_exit(&t->tk_cpu_time_lock);
3045788Smv143129 
3055788Smv143129 	return (ret);
3060Sstevel@tonic-gate }
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate /*
3090Sstevel@tonic-gate  * static int task_test_cpu_secs(void *taskp, rctl_val_t *, int64_t incr,
3100Sstevel@tonic-gate  *   int flags)
3110Sstevel@tonic-gate  *
3120Sstevel@tonic-gate  * Overview
3130Sstevel@tonic-gate  *   task_test_cpu_secs() is the test-if-valid-increment for the resource
3140Sstevel@tonic-gate  *   control for the total accrued CPU seconds for a task.
3150Sstevel@tonic-gate  *
3160Sstevel@tonic-gate  * Return values
3170Sstevel@tonic-gate  *   0 if the threshold limit was not passed, 1 if the limit was passed.
3180Sstevel@tonic-gate  *
3190Sstevel@tonic-gate  * Caller's context
3200Sstevel@tonic-gate  *   The given task must be held across the call.
3210Sstevel@tonic-gate  */
3220Sstevel@tonic-gate /*ARGSUSED*/
3230Sstevel@tonic-gate static int
task_cpu_time_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,struct rctl_val * rcntl,rctl_qty_t incr,uint_t flags)3240Sstevel@tonic-gate task_cpu_time_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
3250Sstevel@tonic-gate     struct rctl_val *rcntl, rctl_qty_t incr, uint_t flags)
3260Sstevel@tonic-gate {
3270Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
3280Sstevel@tonic-gate 	ASSERT(e->rcep_t == RCENTITY_TASK);
3290Sstevel@tonic-gate 	if (e->rcep_p.task == NULL)
3300Sstevel@tonic-gate 		return (0);
3310Sstevel@tonic-gate 
3325788Smv143129 	if (incr >= rcntl->rcv_value)
3330Sstevel@tonic-gate 		return (1);
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	return (0);
3360Sstevel@tonic-gate }
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate static task_t *
task_find(taskid_t id,zoneid_t zoneid)3390Sstevel@tonic-gate task_find(taskid_t id, zoneid_t zoneid)
3400Sstevel@tonic-gate {
3410Sstevel@tonic-gate 	task_t *tk;
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&task_hash_lock));
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	if (mod_hash_find(task_hash, (mod_hash_key_t)(uintptr_t)id,
3460Sstevel@tonic-gate 	    (mod_hash_val_t *)&tk) == MH_ERR_NOTFOUND ||
3470Sstevel@tonic-gate 	    (zoneid != ALL_ZONES && zoneid != tk->tk_zone->zone_id))
3480Sstevel@tonic-gate 		return (NULL);
3490Sstevel@tonic-gate 
3500Sstevel@tonic-gate 	return (tk);
3510Sstevel@tonic-gate }
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate /*
3540Sstevel@tonic-gate  * task_hold_by_id(), task_hold_by_id_zone()
3550Sstevel@tonic-gate  *
3560Sstevel@tonic-gate  * Overview
3570Sstevel@tonic-gate  *   task_hold_by_id() is used to take a reference on a task by its task id,
3580Sstevel@tonic-gate  *   supporting the various system call interfaces for obtaining resource data,
3590Sstevel@tonic-gate  *   delivering signals, and so forth.
3600Sstevel@tonic-gate  *
3610Sstevel@tonic-gate  * Return values
3620Sstevel@tonic-gate  *   Returns a pointer to the task_t with taskid_t id.  The task is returned
3630Sstevel@tonic-gate  *   with its hold count incremented by one.  Returns NULL if there
3640Sstevel@tonic-gate  *   is no task with the requested id.
3650Sstevel@tonic-gate  *
3660Sstevel@tonic-gate  * Caller's context
3670Sstevel@tonic-gate  *   Caller must not be holding task_hash_lock.  No restrictions on context.
3680Sstevel@tonic-gate  */
3690Sstevel@tonic-gate task_t *
task_hold_by_id_zone(taskid_t id,zoneid_t zoneid)3700Sstevel@tonic-gate task_hold_by_id_zone(taskid_t id, zoneid_t zoneid)
3710Sstevel@tonic-gate {
3720Sstevel@tonic-gate 	task_t *tk;
3730Sstevel@tonic-gate 
3740Sstevel@tonic-gate 	mutex_enter(&task_hash_lock);
3750Sstevel@tonic-gate 	if ((tk = task_find(id, zoneid)) != NULL)
3760Sstevel@tonic-gate 		atomic_add_32(&tk->tk_hold_count, 1);
3770Sstevel@tonic-gate 	mutex_exit(&task_hash_lock);
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate 	return (tk);
3800Sstevel@tonic-gate }
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate task_t *
task_hold_by_id(taskid_t id)3830Sstevel@tonic-gate task_hold_by_id(taskid_t id)
3840Sstevel@tonic-gate {
3850Sstevel@tonic-gate 	zoneid_t zoneid;
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate 	if (INGLOBALZONE(curproc))
3880Sstevel@tonic-gate 		zoneid = ALL_ZONES;
3890Sstevel@tonic-gate 	else
3900Sstevel@tonic-gate 		zoneid = getzoneid();
3910Sstevel@tonic-gate 	return (task_hold_by_id_zone(id, zoneid));
3920Sstevel@tonic-gate }
3930Sstevel@tonic-gate 
3940Sstevel@tonic-gate /*
3950Sstevel@tonic-gate  * void task_hold(task_t *)
3960Sstevel@tonic-gate  *
3970Sstevel@tonic-gate  * Overview
3980Sstevel@tonic-gate  *   task_hold() is used to take an additional reference to the given task.
3990Sstevel@tonic-gate  *
4000Sstevel@tonic-gate  * Return values
4010Sstevel@tonic-gate  *   None.
4020Sstevel@tonic-gate  *
4030Sstevel@tonic-gate  * Caller's context
4040Sstevel@tonic-gate  *   No restriction on context.
4050Sstevel@tonic-gate  */
4060Sstevel@tonic-gate void
task_hold(task_t * tk)4070Sstevel@tonic-gate task_hold(task_t *tk)
4080Sstevel@tonic-gate {
4090Sstevel@tonic-gate 	atomic_add_32(&tk->tk_hold_count, 1);
4100Sstevel@tonic-gate }
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate /*
4130Sstevel@tonic-gate  * void task_rele(task_t *)
4140Sstevel@tonic-gate  *
4150Sstevel@tonic-gate  * Overview
4160Sstevel@tonic-gate  *   task_rele() relinquishes a reference on the given task, which was acquired
4170Sstevel@tonic-gate  *   via task_hold() or task_hold_by_id().  If this is the last member or
4180Sstevel@tonic-gate  *   observer of the task, dispatch it for commitment via the accounting
4190Sstevel@tonic-gate  *   subsystem.
4200Sstevel@tonic-gate  *
4210Sstevel@tonic-gate  * Return values
4220Sstevel@tonic-gate  *   None.
4230Sstevel@tonic-gate  *
4240Sstevel@tonic-gate  * Caller's context
4250Sstevel@tonic-gate  *   Caller must not be holding the task_hash_lock.
4260Sstevel@tonic-gate  */
4270Sstevel@tonic-gate void
task_rele(task_t * tk)4280Sstevel@tonic-gate task_rele(task_t *tk)
4290Sstevel@tonic-gate {
4300Sstevel@tonic-gate 	mutex_enter(&task_hash_lock);
4310Sstevel@tonic-gate 	if (atomic_add_32_nv(&tk->tk_hold_count, -1) > 0) {
4320Sstevel@tonic-gate 		mutex_exit(&task_hash_lock);
4330Sstevel@tonic-gate 		return;
4340Sstevel@tonic-gate 	}
4350Sstevel@tonic-gate 
436*12725SMenno.Lageman@Sun.COM 	ASSERT(tk->tk_nprocs == 0);
437*12725SMenno.Lageman@Sun.COM 
4380Sstevel@tonic-gate 	mutex_enter(&tk->tk_zone->zone_nlwps_lock);
4390Sstevel@tonic-gate 	tk->tk_proj->kpj_ntasks--;
4400Sstevel@tonic-gate 	mutex_exit(&tk->tk_zone->zone_nlwps_lock);
4410Sstevel@tonic-gate 
442*12725SMenno.Lageman@Sun.COM 	task_kstat_delete(tk);
443*12725SMenno.Lageman@Sun.COM 
4440Sstevel@tonic-gate 	if (mod_hash_destroy(task_hash,
4450Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)tk->tk_tkid) != 0)
4460Sstevel@tonic-gate 		panic("unable to delete task %d", tk->tk_tkid);
4470Sstevel@tonic-gate 	mutex_exit(&task_hash_lock);
4480Sstevel@tonic-gate 
4490Sstevel@tonic-gate 	/*
4500Sstevel@tonic-gate 	 * At this point, there are no members or observers of the task, so we
4510Sstevel@tonic-gate 	 * can safely send it on for commitment to the accounting subsystem.
4520Sstevel@tonic-gate 	 * The task will be destroyed in task_end() subsequent to commitment.
453*12725SMenno.Lageman@Sun.COM 	 * Since we may be called with pidlock held, taskq_dispatch() cannot
454*12725SMenno.Lageman@Sun.COM 	 * sleep. Commitment is handled by a backup thread in case dispatching
455*12725SMenno.Lageman@Sun.COM 	 * the task fails.
4560Sstevel@tonic-gate 	 */
457*12725SMenno.Lageman@Sun.COM 	if (taskq_dispatch(exacct_queue, exacct_commit_task, tk,
458*12725SMenno.Lageman@Sun.COM 	    TQ_NOSLEEP | TQ_NOQUEUE) == NULL) {
459*12725SMenno.Lageman@Sun.COM 		mutex_enter(&task_commit_lock);
460*12725SMenno.Lageman@Sun.COM 		if (task_commit_head == NULL) {
461*12725SMenno.Lageman@Sun.COM 			task_commit_head = task_commit_tail = tk;
462*12725SMenno.Lageman@Sun.COM 		} else {
463*12725SMenno.Lageman@Sun.COM 			task_commit_tail->tk_commit_next = tk;
464*12725SMenno.Lageman@Sun.COM 			task_commit_tail = tk;
465*12725SMenno.Lageman@Sun.COM 		}
466*12725SMenno.Lageman@Sun.COM 		cv_signal(&task_commit_cv);
467*12725SMenno.Lageman@Sun.COM 		mutex_exit(&task_commit_lock);
468*12725SMenno.Lageman@Sun.COM 	}
4690Sstevel@tonic-gate }
4700Sstevel@tonic-gate 
4710Sstevel@tonic-gate /*
4720Sstevel@tonic-gate  * task_t *task_create(projid_t, zone *)
4730Sstevel@tonic-gate  *
4740Sstevel@tonic-gate  * Overview
4750Sstevel@tonic-gate  *   A process constructing a new task calls task_create() to construct and
4760Sstevel@tonic-gate  *   preinitialize the task for the appropriate destination project.  Only one
4770Sstevel@tonic-gate  *   task, the primordial task0, is not created with task_create().
4780Sstevel@tonic-gate  *
4790Sstevel@tonic-gate  * Return values
4800Sstevel@tonic-gate  *   None.
4810Sstevel@tonic-gate  *
4820Sstevel@tonic-gate  * Caller's context
4830Sstevel@tonic-gate  *   Caller's context should be safe for KM_SLEEP allocations.
4840Sstevel@tonic-gate  *   The caller should appropriately bump the kpj_ntasks counter on the
4850Sstevel@tonic-gate  *   project that contains this task.
4860Sstevel@tonic-gate  */
4870Sstevel@tonic-gate task_t *
task_create(projid_t projid,zone_t * zone)4880Sstevel@tonic-gate task_create(projid_t projid, zone_t *zone)
4890Sstevel@tonic-gate {
4900Sstevel@tonic-gate 	task_t *tk = kmem_cache_alloc(task_cache, KM_SLEEP);
4910Sstevel@tonic-gate 	task_t *ancestor_tk;
4920Sstevel@tonic-gate 	taskid_t tkid;
4930Sstevel@tonic-gate 	task_usage_t *tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
4940Sstevel@tonic-gate 	mod_hash_hndl_t hndl;
4950Sstevel@tonic-gate 	rctl_set_t *set = rctl_set_create();
4960Sstevel@tonic-gate 	rctl_alloc_gp_t *gp;
4970Sstevel@tonic-gate 	rctl_entity_p_t e;
4980Sstevel@tonic-gate 
4990Sstevel@tonic-gate 	bzero(tk, sizeof (task_t));
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate 	tk->tk_tkid = tkid = id_alloc(taskid_space);
5020Sstevel@tonic-gate 	tk->tk_nlwps = 0;
5030Sstevel@tonic-gate 	tk->tk_nlwps_ctl = INT_MAX;
504*12725SMenno.Lageman@Sun.COM 	tk->tk_nprocs = 0;
505*12725SMenno.Lageman@Sun.COM 	tk->tk_nprocs_ctl = INT_MAX;
5060Sstevel@tonic-gate 	tk->tk_usage = tu;
5074584Srh87107 	tk->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
5083792Sakolb 	tk->tk_proj = project_hold_by_id(projid, zone, PROJECT_HOLD_INSERT);
5090Sstevel@tonic-gate 	tk->tk_flags = TASK_NORMAL;
510*12725SMenno.Lageman@Sun.COM 	tk->tk_commit_next = NULL;
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	/*
5130Sstevel@tonic-gate 	 * Copy ancestor task's resource controls.
5140Sstevel@tonic-gate 	 */
5150Sstevel@tonic-gate 	zone_task_hold(zone);
5160Sstevel@tonic-gate 	mutex_enter(&curproc->p_lock);
5170Sstevel@tonic-gate 	ancestor_tk = curproc->p_task;
5180Sstevel@tonic-gate 	task_hold(ancestor_tk);
5190Sstevel@tonic-gate 	tk->tk_zone = zone;
5200Sstevel@tonic-gate 	mutex_exit(&curproc->p_lock);
5210Sstevel@tonic-gate 
5220Sstevel@tonic-gate 	for (;;) {
5230Sstevel@tonic-gate 		gp = rctl_set_dup_prealloc(ancestor_tk->tk_rctls);
5240Sstevel@tonic-gate 
5250Sstevel@tonic-gate 		mutex_enter(&ancestor_tk->tk_rctls->rcs_lock);
5260Sstevel@tonic-gate 		if (rctl_set_dup_ready(ancestor_tk->tk_rctls, gp))
5270Sstevel@tonic-gate 			break;
5280Sstevel@tonic-gate 
5290Sstevel@tonic-gate 		mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate 		rctl_prealloc_destroy(gp);
5320Sstevel@tonic-gate 	}
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate 	/*
5350Sstevel@tonic-gate 	 * At this point, curproc does not have the appropriate linkage
5360Sstevel@tonic-gate 	 * through the task to the project. So, rctl_set_dup should only
5370Sstevel@tonic-gate 	 * copy the rctls, and leave the callbacks for later.
5380Sstevel@tonic-gate 	 */
5390Sstevel@tonic-gate 	e.rcep_p.task = tk;
5400Sstevel@tonic-gate 	e.rcep_t = RCENTITY_TASK;
5410Sstevel@tonic-gate 	tk->tk_rctls = rctl_set_dup(ancestor_tk->tk_rctls, curproc, curproc, &e,
5420Sstevel@tonic-gate 	    set, gp, RCD_DUP);
5430Sstevel@tonic-gate 	mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);
5440Sstevel@tonic-gate 
5450Sstevel@tonic-gate 	rctl_prealloc_destroy(gp);
5460Sstevel@tonic-gate 
5470Sstevel@tonic-gate 	/*
5480Sstevel@tonic-gate 	 * Record the ancestor task's ID for use by extended accounting.
5490Sstevel@tonic-gate 	 */
5500Sstevel@tonic-gate 	tu->tu_anctaskid = ancestor_tk->tk_tkid;
5510Sstevel@tonic-gate 	task_rele(ancestor_tk);
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	/*
5540Sstevel@tonic-gate 	 * Put new task structure in the hash table.
5550Sstevel@tonic-gate 	 */
5560Sstevel@tonic-gate 	(void) mod_hash_reserve(task_hash, &hndl);
5570Sstevel@tonic-gate 	mutex_enter(&task_hash_lock);
5589121SVamsi.Krishna@Sun.COM 	ASSERT(task_find(tkid, zone->zone_id) == NULL);
5590Sstevel@tonic-gate 	if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)tkid,
5600Sstevel@tonic-gate 	    (mod_hash_val_t *)tk, hndl) != 0) {
5610Sstevel@tonic-gate 		mod_hash_cancel(task_hash, &hndl);
5620Sstevel@tonic-gate 		panic("unable to insert task %d(%p)", tkid, (void *)tk);
5630Sstevel@tonic-gate 	}
5640Sstevel@tonic-gate 	mutex_exit(&task_hash_lock);
5650Sstevel@tonic-gate 
566*12725SMenno.Lageman@Sun.COM 	tk->tk_nprocs_kstat = task_kstat_create(tk, zone);
5670Sstevel@tonic-gate 	return (tk);
5680Sstevel@tonic-gate }
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate /*
5710Sstevel@tonic-gate  * void task_attach(task_t *, proc_t *)
5720Sstevel@tonic-gate  *
5730Sstevel@tonic-gate  * Overview
5740Sstevel@tonic-gate  *   task_attach() is used to attach a process to a task; this operation is only
5750Sstevel@tonic-gate  *   performed as a result of a fork() or settaskid() system call.  The proc_t's
5760Sstevel@tonic-gate  *   p_tasknext and p_taskprev fields will be set such that the proc_t is a
5770Sstevel@tonic-gate  *   member of the doubly-linked list of proc_t's that make up the task.
5780Sstevel@tonic-gate  *
5790Sstevel@tonic-gate  * Return values
5800Sstevel@tonic-gate  *   None.
5810Sstevel@tonic-gate  *
5820Sstevel@tonic-gate  * Caller's context
5830Sstevel@tonic-gate  *   pidlock and p->p_lock must be held on entry.
5840Sstevel@tonic-gate  */
5850Sstevel@tonic-gate void
task_attach(task_t * tk,proc_t * p)5860Sstevel@tonic-gate task_attach(task_t *tk, proc_t *p)
5870Sstevel@tonic-gate {
5880Sstevel@tonic-gate 	proc_t *first, *prev;
5890Sstevel@tonic-gate 	ASSERT(tk != NULL);
5900Sstevel@tonic-gate 	ASSERT(p != NULL);
5910Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pidlock));
5920Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	if (tk->tk_memb_list == NULL) {
5950Sstevel@tonic-gate 		p->p_tasknext = p;
5960Sstevel@tonic-gate 		p->p_taskprev = p;
5970Sstevel@tonic-gate 	} else {
5980Sstevel@tonic-gate 		first = tk->tk_memb_list;
5990Sstevel@tonic-gate 		prev = first->p_taskprev;
6000Sstevel@tonic-gate 		first->p_taskprev = p;
6010Sstevel@tonic-gate 		p->p_tasknext = first;
6020Sstevel@tonic-gate 		p->p_taskprev = prev;
6030Sstevel@tonic-gate 		prev->p_tasknext = p;
6040Sstevel@tonic-gate 	}
6050Sstevel@tonic-gate 	tk->tk_memb_list = p;
6060Sstevel@tonic-gate 	task_hold(tk);
6070Sstevel@tonic-gate 	p->p_task = tk;
6080Sstevel@tonic-gate }
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate /*
6110Sstevel@tonic-gate  * task_begin()
6120Sstevel@tonic-gate  *
6130Sstevel@tonic-gate  * Overview
6140Sstevel@tonic-gate  *   A process constructing a new task calls task_begin() to initialize the
6150Sstevel@tonic-gate  *   task, by attaching itself as a member.
6160Sstevel@tonic-gate  *
6170Sstevel@tonic-gate  * Return values
6180Sstevel@tonic-gate  *   None.
6190Sstevel@tonic-gate  *
6200Sstevel@tonic-gate  * Caller's context
6210Sstevel@tonic-gate  *   pidlock and p_lock must be held across the call to task_begin().
6220Sstevel@tonic-gate  */
6230Sstevel@tonic-gate void
task_begin(task_t * tk,proc_t * p)6240Sstevel@tonic-gate task_begin(task_t *tk, proc_t *p)
6250Sstevel@tonic-gate {
6260Sstevel@tonic-gate 	timestruc_t ts;
6270Sstevel@tonic-gate 	task_usage_t *tu;
628*12725SMenno.Lageman@Sun.COM 	rctl_entity_p_t e;
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pidlock));
6310Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
6320Sstevel@tonic-gate 
6330Sstevel@tonic-gate 	mutex_enter(&tk->tk_usage_lock);
6340Sstevel@tonic-gate 	tu = tk->tk_usage;
6350Sstevel@tonic-gate 	gethrestime(&ts);
6360Sstevel@tonic-gate 	tu->tu_startsec = (uint64_t)ts.tv_sec;
6370Sstevel@tonic-gate 	tu->tu_startnsec = (uint64_t)ts.tv_nsec;
6380Sstevel@tonic-gate 	mutex_exit(&tk->tk_usage_lock);
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate 	/*
6410Sstevel@tonic-gate 	 * Join process to the task as a member.
6420Sstevel@tonic-gate 	 */
6430Sstevel@tonic-gate 	task_attach(tk, p);
644*12725SMenno.Lageman@Sun.COM 
645*12725SMenno.Lageman@Sun.COM 	/*
646*12725SMenno.Lageman@Sun.COM 	 * Now that the linkage from process to task is complete, do the
647*12725SMenno.Lageman@Sun.COM 	 * required callback for the task rctl set.
648*12725SMenno.Lageman@Sun.COM 	 */
649*12725SMenno.Lageman@Sun.COM 	e.rcep_p.task = tk;
650*12725SMenno.Lageman@Sun.COM 	e.rcep_t = RCENTITY_TASK;
651*12725SMenno.Lageman@Sun.COM 	(void) rctl_set_dup(NULL, NULL, p, &e, tk->tk_rctls, NULL,
652*12725SMenno.Lageman@Sun.COM 	    RCD_CALLBACK);
6530Sstevel@tonic-gate }
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate /*
6560Sstevel@tonic-gate  * void task_detach(proc_t *)
6570Sstevel@tonic-gate  *
6580Sstevel@tonic-gate  * Overview
6590Sstevel@tonic-gate  *   task_detach() removes the specified process from its task.  task_detach
6600Sstevel@tonic-gate  *   sets the process's task membership to NULL, in anticipation of a final exit
6610Sstevel@tonic-gate  *   or of joining a new task.  Because task_rele() requires a context safe for
6620Sstevel@tonic-gate  *   KM_SLEEP allocations, a task_detach() is followed by a subsequent
6630Sstevel@tonic-gate  *   task_rele() once appropriate context is available.
6640Sstevel@tonic-gate  *
6650Sstevel@tonic-gate  *   Because task_detach() involves relinquishing the process's membership in
6660Sstevel@tonic-gate  *   the project, any observational rctls the process may have had on the task
6670Sstevel@tonic-gate  *   or project are destroyed.
6680Sstevel@tonic-gate  *
6690Sstevel@tonic-gate  * Return values
6700Sstevel@tonic-gate  *   None.
6710Sstevel@tonic-gate  *
6720Sstevel@tonic-gate  * Caller's context
6730Sstevel@tonic-gate  *   pidlock and p_lock held across task_detach().
6740Sstevel@tonic-gate  */
6750Sstevel@tonic-gate void
task_detach(proc_t * p)6760Sstevel@tonic-gate task_detach(proc_t *p)
6770Sstevel@tonic-gate {
6780Sstevel@tonic-gate 	task_t *tk = p->p_task;
6790Sstevel@tonic-gate 
6800Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pidlock));
6810Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
6820Sstevel@tonic-gate 	ASSERT(p->p_task != NULL);
6830Sstevel@tonic-gate 	ASSERT(tk->tk_memb_list != NULL);
6840Sstevel@tonic-gate 
6850Sstevel@tonic-gate 	if (tk->tk_memb_list == p)
6860Sstevel@tonic-gate 		tk->tk_memb_list = p->p_tasknext;
6870Sstevel@tonic-gate 	if (tk->tk_memb_list == p)
6880Sstevel@tonic-gate 		tk->tk_memb_list = NULL;
6890Sstevel@tonic-gate 	p->p_taskprev->p_tasknext = p->p_tasknext;
6900Sstevel@tonic-gate 	p->p_tasknext->p_taskprev = p->p_taskprev;
6910Sstevel@tonic-gate 
6920Sstevel@tonic-gate 	rctl_set_tearoff(p->p_task->tk_rctls, p);
6930Sstevel@tonic-gate 	rctl_set_tearoff(p->p_task->tk_proj->kpj_rctls, p);
6940Sstevel@tonic-gate 
6950Sstevel@tonic-gate 	p->p_task = NULL;
6960Sstevel@tonic-gate 	p->p_tasknext = p->p_taskprev = NULL;
6970Sstevel@tonic-gate }
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate /*
7000Sstevel@tonic-gate  * task_change(task_t *, proc_t *)
7010Sstevel@tonic-gate  *
7020Sstevel@tonic-gate  * Overview
7030Sstevel@tonic-gate  *   task_change() removes the specified process from its current task.  The
7040Sstevel@tonic-gate  *   process is then attached to the specified task.  This routine is called
7050Sstevel@tonic-gate  *   from settaskid() when process is being moved to a new task.
7060Sstevel@tonic-gate  *
7070Sstevel@tonic-gate  * Return values
7080Sstevel@tonic-gate  *   None.
7090Sstevel@tonic-gate  *
7100Sstevel@tonic-gate  * Caller's context
7110Sstevel@tonic-gate  *   pidlock and p_lock held across task_change()
7120Sstevel@tonic-gate  */
7130Sstevel@tonic-gate void
task_change(task_t * newtk,proc_t * p)7140Sstevel@tonic-gate task_change(task_t *newtk, proc_t *p)
7150Sstevel@tonic-gate {
7160Sstevel@tonic-gate 	task_t *oldtk = p->p_task;
7170Sstevel@tonic-gate 
7180Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pidlock));
7190Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
7200Sstevel@tonic-gate 	ASSERT(oldtk != NULL);
7210Sstevel@tonic-gate 	ASSERT(oldtk->tk_memb_list != NULL);
7220Sstevel@tonic-gate 
7239121SVamsi.Krishna@Sun.COM 	mutex_enter(&oldtk->tk_zone->zone_nlwps_lock);
7240Sstevel@tonic-gate 	oldtk->tk_nlwps -= p->p_lwpcnt;
725*12725SMenno.Lageman@Sun.COM 	oldtk->tk_nprocs--;
7269121SVamsi.Krishna@Sun.COM 	mutex_exit(&oldtk->tk_zone->zone_nlwps_lock);
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate 	mutex_enter(&newtk->tk_zone->zone_nlwps_lock);
7290Sstevel@tonic-gate 	newtk->tk_nlwps += p->p_lwpcnt;
730*12725SMenno.Lageman@Sun.COM 	newtk->tk_nprocs++;
7310Sstevel@tonic-gate 	mutex_exit(&newtk->tk_zone->zone_nlwps_lock);
7320Sstevel@tonic-gate 
7330Sstevel@tonic-gate 	task_detach(p);
7340Sstevel@tonic-gate 	task_begin(newtk, p);
7354584Srh87107 	exacct_move_mstate(p, oldtk, newtk);
7360Sstevel@tonic-gate }
7370Sstevel@tonic-gate 
7380Sstevel@tonic-gate /*
7390Sstevel@tonic-gate  * task_end()
7400Sstevel@tonic-gate  *
7410Sstevel@tonic-gate  * Overview
7420Sstevel@tonic-gate  *   task_end() contains the actions executed once the final member of
7430Sstevel@tonic-gate  *   a task has released the task, and all actions connected with the task, such
7440Sstevel@tonic-gate  *   as committing an accounting record to a file, are completed.  It is called
7450Sstevel@tonic-gate  *   by the known last consumer of the task information.  Additionally,
7460Sstevel@tonic-gate  *   task_end() must never refer to any process in the system.
7470Sstevel@tonic-gate  *
7480Sstevel@tonic-gate  * Return values
7490Sstevel@tonic-gate  *   None.
7500Sstevel@tonic-gate  *
7510Sstevel@tonic-gate  * Caller's context
7520Sstevel@tonic-gate  *   No restrictions on context, beyond that given above.
7530Sstevel@tonic-gate  */
7540Sstevel@tonic-gate void
task_end(task_t * tk)7550Sstevel@tonic-gate task_end(task_t *tk)
7560Sstevel@tonic-gate {
7570Sstevel@tonic-gate 	ASSERT(tk->tk_hold_count == 0);
7580Sstevel@tonic-gate 
7590Sstevel@tonic-gate 	project_rele(tk->tk_proj);
7600Sstevel@tonic-gate 	kmem_free(tk->tk_usage, sizeof (task_usage_t));
7614584Srh87107 	kmem_free(tk->tk_inherited, sizeof (task_usage_t));
7620Sstevel@tonic-gate 	if (tk->tk_prevusage != NULL)
7630Sstevel@tonic-gate 		kmem_free(tk->tk_prevusage, sizeof (task_usage_t));
7640Sstevel@tonic-gate 	if (tk->tk_zoneusage != NULL)
7650Sstevel@tonic-gate 		kmem_free(tk->tk_zoneusage, sizeof (task_usage_t));
7660Sstevel@tonic-gate 	rctl_set_free(tk->tk_rctls);
7670Sstevel@tonic-gate 	id_free(taskid_space, tk->tk_tkid);
7680Sstevel@tonic-gate 	zone_task_rele(tk->tk_zone);
7690Sstevel@tonic-gate 	kmem_cache_free(task_cache, tk);
7700Sstevel@tonic-gate }
7710Sstevel@tonic-gate 
7720Sstevel@tonic-gate static void
changeproj(proc_t * p,kproject_t * kpj,zone_t * zone,void * projbuf,void * zonebuf)7730Sstevel@tonic-gate changeproj(proc_t *p, kproject_t *kpj, zone_t *zone, void *projbuf,
7740Sstevel@tonic-gate     void *zonebuf)
7750Sstevel@tonic-gate {
7760Sstevel@tonic-gate 	kproject_t *oldkpj;
7770Sstevel@tonic-gate 	kthread_t *t;
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pidlock));
7800Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	if ((t = p->p_tlist) != NULL) {
7830Sstevel@tonic-gate 		do {
7840Sstevel@tonic-gate 			(void) project_hold(kpj);
7850Sstevel@tonic-gate 
7860Sstevel@tonic-gate 			thread_lock(t);
7870Sstevel@tonic-gate 			oldkpj = ttoproj(t);
7883792Sakolb 
7893792Sakolb 			/*
7903792Sakolb 			 * Kick this thread so that he doesn't sit
7913792Sakolb 			 * on a wrong wait queue.
7923792Sakolb 			 */
7933792Sakolb 			if (ISWAITING(t))
7943792Sakolb 				setrun_locked(t);
7953792Sakolb 
7963792Sakolb 			/*
7973792Sakolb 			 * The thread wants to go on the project wait queue, but
7983792Sakolb 			 * the waitq is changing.
7993792Sakolb 			 */
8003792Sakolb 			if (t->t_schedflag & TS_PROJWAITQ)
8013792Sakolb 				t->t_schedflag &= ~ TS_PROJWAITQ;
8023792Sakolb 
8030Sstevel@tonic-gate 			t->t_proj = kpj;
8040Sstevel@tonic-gate 			t->t_pre_sys = 1;		/* For cred update */
8050Sstevel@tonic-gate 			thread_unlock(t);
8060Sstevel@tonic-gate 			fss_changeproj(t, kpj, zone, projbuf, zonebuf);
8070Sstevel@tonic-gate 
8080Sstevel@tonic-gate 			project_rele(oldkpj);
8090Sstevel@tonic-gate 		} while ((t = t->t_forw) != p->p_tlist);
8100Sstevel@tonic-gate 	}
8110Sstevel@tonic-gate }
8120Sstevel@tonic-gate 
8130Sstevel@tonic-gate /*
8140Sstevel@tonic-gate  * task_join()
8150Sstevel@tonic-gate  *
8160Sstevel@tonic-gate  * Overview
8170Sstevel@tonic-gate  *   task_join() contains the actions that must be executed when the first
8180Sstevel@tonic-gate  *   member (curproc) of a newly created task joins it.  It may never fail.
8190Sstevel@tonic-gate  *
8200Sstevel@tonic-gate  *   The caller must make sure holdlwps() is called so that all other lwps are
8210Sstevel@tonic-gate  *   stopped prior to calling this function.
8220Sstevel@tonic-gate  *
8230Sstevel@tonic-gate  *   NB: It returns with curproc->p_lock held.
8240Sstevel@tonic-gate  *
8250Sstevel@tonic-gate  * Return values
8260Sstevel@tonic-gate  *   Pointer to the old task.
8270Sstevel@tonic-gate  *
8280Sstevel@tonic-gate  * Caller's context
8290Sstevel@tonic-gate  *   cpu_lock must be held entering the function.  It will acquire pidlock,
8300Sstevel@tonic-gate  *   p_crlock and p_lock during execution.
8310Sstevel@tonic-gate  */
8320Sstevel@tonic-gate task_t *
task_join(task_t * tk,uint_t flags)8330Sstevel@tonic-gate task_join(task_t *tk, uint_t flags)
8340Sstevel@tonic-gate {
8350Sstevel@tonic-gate 	proc_t *p = ttoproc(curthread);
8360Sstevel@tonic-gate 	task_t *prev_tk;
8370Sstevel@tonic-gate 	void *projbuf, *zonebuf;
8380Sstevel@tonic-gate 	zone_t *zone = tk->tk_zone;
8390Sstevel@tonic-gate 	projid_t projid = tk->tk_proj->kpj_id;
8400Sstevel@tonic-gate 	cred_t *oldcr;
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate 	/*
8430Sstevel@tonic-gate 	 * We can't know for sure if holdlwps() was called, but we can check to
8440Sstevel@tonic-gate 	 * ensure we're single-threaded.
8450Sstevel@tonic-gate 	 */
8460Sstevel@tonic-gate 	ASSERT(curthread == p->p_agenttp || p->p_lwprcnt == 1);
8470Sstevel@tonic-gate 
8480Sstevel@tonic-gate 	/*
8490Sstevel@tonic-gate 	 * Changing the credential is always hard because we cannot
8500Sstevel@tonic-gate 	 * allocate memory when holding locks but we don't know whether
8510Sstevel@tonic-gate 	 * we need to change it.  We first get a reference to the current
8520Sstevel@tonic-gate 	 * cred if we need to change it.  Then we create a credential
8530Sstevel@tonic-gate 	 * with an updated project id.  Finally we install it, first
8540Sstevel@tonic-gate 	 * releasing the reference we had on the p_cred at the time we
8550Sstevel@tonic-gate 	 * acquired the lock the first time and later we release the
8560Sstevel@tonic-gate 	 * reference to p_cred at the time we acquired the lock the
8570Sstevel@tonic-gate 	 * second time.
8580Sstevel@tonic-gate 	 */
8590Sstevel@tonic-gate 	mutex_enter(&p->p_crlock);
8600Sstevel@tonic-gate 	if (crgetprojid(p->p_cred) == projid)
8610Sstevel@tonic-gate 		oldcr = NULL;
8620Sstevel@tonic-gate 	else
8630Sstevel@tonic-gate 		crhold(oldcr = p->p_cred);
8640Sstevel@tonic-gate 	mutex_exit(&p->p_crlock);
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	if (oldcr != NULL) {
8670Sstevel@tonic-gate 		cred_t *newcr = crdup(oldcr);
8680Sstevel@tonic-gate 		crsetprojid(newcr, projid);
8690Sstevel@tonic-gate 		crfree(oldcr);
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 		mutex_enter(&p->p_crlock);
8720Sstevel@tonic-gate 		oldcr = p->p_cred;
8730Sstevel@tonic-gate 		p->p_cred = newcr;
8740Sstevel@tonic-gate 		mutex_exit(&p->p_crlock);
8750Sstevel@tonic-gate 		crfree(oldcr);
8760Sstevel@tonic-gate 	}
8770Sstevel@tonic-gate 
8780Sstevel@tonic-gate 	/*
8790Sstevel@tonic-gate 	 * Make sure that the number of processor sets is constant
8800Sstevel@tonic-gate 	 * across this operation.
8810Sstevel@tonic-gate 	 */
8820Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
8830Sstevel@tonic-gate 
8840Sstevel@tonic-gate 	projbuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_PROJ);
8850Sstevel@tonic-gate 	zonebuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_ZONE);
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	mutex_enter(&pidlock);
8880Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
8890Sstevel@tonic-gate 
8900Sstevel@tonic-gate 	prev_tk = p->p_task;
8910Sstevel@tonic-gate 	task_change(tk, p);
8920Sstevel@tonic-gate 
8930Sstevel@tonic-gate 	/*
8940Sstevel@tonic-gate 	 * Now move threads one by one to their new project.
8950Sstevel@tonic-gate 	 */
8960Sstevel@tonic-gate 	changeproj(p, tk->tk_proj, zone, projbuf, zonebuf);
8970Sstevel@tonic-gate 	if (flags & TASK_FINAL)
8980Sstevel@tonic-gate 		p->p_task->tk_flags |= TASK_FINAL;
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	mutex_exit(&pidlock);
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
9030Sstevel@tonic-gate 	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
9040Sstevel@tonic-gate 	return (prev_tk);
9050Sstevel@tonic-gate }
9060Sstevel@tonic-gate 
9070Sstevel@tonic-gate /*
9080Sstevel@tonic-gate  * rctl ops vectors
9090Sstevel@tonic-gate  */
9100Sstevel@tonic-gate static rctl_ops_t task_lwps_ops = {
9110Sstevel@tonic-gate 	rcop_no_action,
9120Sstevel@tonic-gate 	task_lwps_usage,
9130Sstevel@tonic-gate 	task_lwps_set,
9140Sstevel@tonic-gate 	task_lwps_test
9150Sstevel@tonic-gate };
9160Sstevel@tonic-gate 
917*12725SMenno.Lageman@Sun.COM static rctl_ops_t task_procs_ops = {
918*12725SMenno.Lageman@Sun.COM 	rcop_no_action,
919*12725SMenno.Lageman@Sun.COM 	task_nprocs_usage,
920*12725SMenno.Lageman@Sun.COM 	task_nprocs_set,
921*12725SMenno.Lageman@Sun.COM 	task_nprocs_test
922*12725SMenno.Lageman@Sun.COM };
923*12725SMenno.Lageman@Sun.COM 
9240Sstevel@tonic-gate static rctl_ops_t task_cpu_time_ops = {
9250Sstevel@tonic-gate 	rcop_no_action,
9260Sstevel@tonic-gate 	task_cpu_time_usage,
9270Sstevel@tonic-gate 	rcop_no_set,
9280Sstevel@tonic-gate 	task_cpu_time_test
9290Sstevel@tonic-gate };
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate /*ARGSUSED*/
9320Sstevel@tonic-gate /*
9330Sstevel@tonic-gate  * void task_init(void)
9340Sstevel@tonic-gate  *
9350Sstevel@tonic-gate  * Overview
9360Sstevel@tonic-gate  *   task_init() initializes task-related hashes, caches, and the task id
9370Sstevel@tonic-gate  *   space.  Additionally, task_init() establishes p0 as a member of task0.
9380Sstevel@tonic-gate  *   Called by main().
9390Sstevel@tonic-gate  *
9400Sstevel@tonic-gate  * Return values
9410Sstevel@tonic-gate  *   None.
9420Sstevel@tonic-gate  *
9430Sstevel@tonic-gate  * Caller's context
9440Sstevel@tonic-gate  *   task_init() must be called prior to MP startup.
9450Sstevel@tonic-gate  */
9460Sstevel@tonic-gate void
task_init(void)9470Sstevel@tonic-gate task_init(void)
9480Sstevel@tonic-gate {
9490Sstevel@tonic-gate 	proc_t *p = &p0;
9500Sstevel@tonic-gate 	mod_hash_hndl_t hndl;
9510Sstevel@tonic-gate 	rctl_set_t *set;
9520Sstevel@tonic-gate 	rctl_alloc_gp_t *gp;
9530Sstevel@tonic-gate 	rctl_entity_p_t e;
954*12725SMenno.Lageman@Sun.COM 
9550Sstevel@tonic-gate 	/*
9560Sstevel@tonic-gate 	 * Initialize task_cache and taskid_space.
9570Sstevel@tonic-gate 	 */
9580Sstevel@tonic-gate 	task_cache = kmem_cache_create("task_cache", sizeof (task_t),
9590Sstevel@tonic-gate 	    0, NULL, NULL, NULL, NULL, NULL, 0);
9600Sstevel@tonic-gate 	taskid_space = id_space_create("taskid_space", 0, MAX_TASKID);
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate 	/*
9630Sstevel@tonic-gate 	 * Initialize task hash table.
9640Sstevel@tonic-gate 	 */
9650Sstevel@tonic-gate 	task_hash = mod_hash_create_idhash("task_hash", task_hash_size,
9660Sstevel@tonic-gate 	    mod_hash_null_valdtor);
9670Sstevel@tonic-gate 
9680Sstevel@tonic-gate 	/*
9690Sstevel@tonic-gate 	 * Initialize task-based rctls.
9700Sstevel@tonic-gate 	 */
9710Sstevel@tonic-gate 	rc_task_lwps = rctl_register("task.max-lwps", RCENTITY_TASK,
9720Sstevel@tonic-gate 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
9730Sstevel@tonic-gate 	    &task_lwps_ops);
974*12725SMenno.Lageman@Sun.COM 	rc_task_nprocs = rctl_register("task.max-processes", RCENTITY_TASK,
975*12725SMenno.Lageman@Sun.COM 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
976*12725SMenno.Lageman@Sun.COM 	    &task_procs_ops);
9770Sstevel@tonic-gate 	rc_task_cpu_time = rctl_register("task.max-cpu-time", RCENTITY_TASK,
9780Sstevel@tonic-gate 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_DENY_NEVER |
9790Sstevel@tonic-gate 	    RCTL_GLOBAL_CPU_TIME | RCTL_GLOBAL_INFINITE |
9800Sstevel@tonic-gate 	    RCTL_GLOBAL_UNOBSERVABLE | RCTL_GLOBAL_SECONDS, UINT64_MAX,
9810Sstevel@tonic-gate 	    UINT64_MAX, &task_cpu_time_ops);
9820Sstevel@tonic-gate 
9830Sstevel@tonic-gate 	/*
9840Sstevel@tonic-gate 	 * Create task0 and place p0 in it as a member.
9850Sstevel@tonic-gate 	 */
9860Sstevel@tonic-gate 	task0p = kmem_cache_alloc(task_cache, KM_SLEEP);
9870Sstevel@tonic-gate 	bzero(task0p, sizeof (task_t));
9880Sstevel@tonic-gate 
9890Sstevel@tonic-gate 	task0p->tk_tkid = id_alloc(taskid_space);
9900Sstevel@tonic-gate 	task0p->tk_usage = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
9914584Srh87107 	task0p->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
9923247Sgjelinek 	task0p->tk_proj = project_hold_by_id(0, &zone0,
9930Sstevel@tonic-gate 	    PROJECT_HOLD_INSERT);
9940Sstevel@tonic-gate 	task0p->tk_flags = TASK_NORMAL;
9950Sstevel@tonic-gate 	task0p->tk_nlwps = p->p_lwpcnt;
996*12725SMenno.Lageman@Sun.COM 	task0p->tk_nprocs = 1;
9970Sstevel@tonic-gate 	task0p->tk_zone = global_zone;
998*12725SMenno.Lageman@Sun.COM 	task0p->tk_commit_next = NULL;
9990Sstevel@tonic-gate 
10000Sstevel@tonic-gate 	set = rctl_set_create();
10010Sstevel@tonic-gate 	gp = rctl_set_init_prealloc(RCENTITY_TASK);
10020Sstevel@tonic-gate 	mutex_enter(&curproc->p_lock);
10030Sstevel@tonic-gate 	e.rcep_p.task = task0p;
10040Sstevel@tonic-gate 	e.rcep_t = RCENTITY_TASK;
10050Sstevel@tonic-gate 	task0p->tk_rctls = rctl_set_init(RCENTITY_TASK, curproc, &e, set, gp);
10060Sstevel@tonic-gate 	mutex_exit(&curproc->p_lock);
10070Sstevel@tonic-gate 	rctl_prealloc_destroy(gp);
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate 	(void) mod_hash_reserve(task_hash, &hndl);
10100Sstevel@tonic-gate 	mutex_enter(&task_hash_lock);
10110Sstevel@tonic-gate 	ASSERT(task_find(task0p->tk_tkid, GLOBAL_ZONEID) == NULL);
10120Sstevel@tonic-gate 	if (mod_hash_insert_reserve(task_hash,
10130Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)task0p->tk_tkid,
10140Sstevel@tonic-gate 	    (mod_hash_val_t *)task0p, hndl) != 0) {
10150Sstevel@tonic-gate 		mod_hash_cancel(task_hash, &hndl);
10160Sstevel@tonic-gate 		panic("unable to insert task %d(%p)", task0p->tk_tkid,
10170Sstevel@tonic-gate 		    (void *)task0p);
10180Sstevel@tonic-gate 	}
10190Sstevel@tonic-gate 	mutex_exit(&task_hash_lock);
10200Sstevel@tonic-gate 
10210Sstevel@tonic-gate 	task0p->tk_memb_list = p;
10220Sstevel@tonic-gate 
1023*12725SMenno.Lageman@Sun.COM 	task0p->tk_nprocs_kstat = task_kstat_create(task0p, task0p->tk_zone);
1024*12725SMenno.Lageman@Sun.COM 
10250Sstevel@tonic-gate 	/*
10260Sstevel@tonic-gate 	 * Initialize task pointers for p0, including doubly linked list of task
10270Sstevel@tonic-gate 	 * members.
10280Sstevel@tonic-gate 	 */
10290Sstevel@tonic-gate 	p->p_task = task0p;
10300Sstevel@tonic-gate 	p->p_taskprev = p->p_tasknext = p;
10310Sstevel@tonic-gate 	task_hold(task0p);
10320Sstevel@tonic-gate }
1033*12725SMenno.Lageman@Sun.COM 
1034*12725SMenno.Lageman@Sun.COM static int
task_nprocs_kstat_update(kstat_t * ksp,int rw)1035*12725SMenno.Lageman@Sun.COM task_nprocs_kstat_update(kstat_t *ksp, int rw)
1036*12725SMenno.Lageman@Sun.COM {
1037*12725SMenno.Lageman@Sun.COM 	task_t *tk = ksp->ks_private;
1038*12725SMenno.Lageman@Sun.COM 	task_kstat_t *ktk = ksp->ks_data;
1039*12725SMenno.Lageman@Sun.COM 
1040*12725SMenno.Lageman@Sun.COM 	if (rw == KSTAT_WRITE)
1041*12725SMenno.Lageman@Sun.COM 		return (EACCES);
1042*12725SMenno.Lageman@Sun.COM 
1043*12725SMenno.Lageman@Sun.COM 	ktk->ktk_usage.value.ui64 = tk->tk_nprocs;
1044*12725SMenno.Lageman@Sun.COM 	ktk->ktk_value.value.ui64 = tk->tk_nprocs_ctl;
1045*12725SMenno.Lageman@Sun.COM 	return (0);
1046*12725SMenno.Lageman@Sun.COM }
1047*12725SMenno.Lageman@Sun.COM 
1048*12725SMenno.Lageman@Sun.COM static kstat_t *
task_kstat_create(task_t * tk,zone_t * zone)1049*12725SMenno.Lageman@Sun.COM task_kstat_create(task_t *tk, zone_t *zone)
1050*12725SMenno.Lageman@Sun.COM {
1051*12725SMenno.Lageman@Sun.COM 	kstat_t	*ksp;
1052*12725SMenno.Lageman@Sun.COM 	task_kstat_t *ktk;
1053*12725SMenno.Lageman@Sun.COM 	char *zonename = zone->zone_name;
1054*12725SMenno.Lageman@Sun.COM 
1055*12725SMenno.Lageman@Sun.COM 	ksp = rctl_kstat_create_task(tk, "nprocs", KSTAT_TYPE_NAMED,
1056*12725SMenno.Lageman@Sun.COM 	    sizeof (task_kstat_t) / sizeof (kstat_named_t),
1057*12725SMenno.Lageman@Sun.COM 	    KSTAT_FLAG_VIRTUAL);
1058*12725SMenno.Lageman@Sun.COM 
1059*12725SMenno.Lageman@Sun.COM 	if (ksp == NULL)
1060*12725SMenno.Lageman@Sun.COM 		return (NULL);
1061*12725SMenno.Lageman@Sun.COM 
1062*12725SMenno.Lageman@Sun.COM 	ktk = ksp->ks_data = kmem_alloc(sizeof (task_kstat_t), KM_SLEEP);
1063*12725SMenno.Lageman@Sun.COM 	ksp->ks_data_size += strlen(zonename) + 1;
1064*12725SMenno.Lageman@Sun.COM 	kstat_named_init(&ktk->ktk_zonename, "zonename", KSTAT_DATA_STRING);
1065*12725SMenno.Lageman@Sun.COM 	kstat_named_setstr(&ktk->ktk_zonename, zonename);
1066*12725SMenno.Lageman@Sun.COM 	kstat_named_init(&ktk->ktk_usage, "usage", KSTAT_DATA_UINT64);
1067*12725SMenno.Lageman@Sun.COM 	kstat_named_init(&ktk->ktk_value, "value", KSTAT_DATA_UINT64);
1068*12725SMenno.Lageman@Sun.COM 	ksp->ks_update = task_nprocs_kstat_update;
1069*12725SMenno.Lageman@Sun.COM 	ksp->ks_private = tk;
1070*12725SMenno.Lageman@Sun.COM 	kstat_install(ksp);
1071*12725SMenno.Lageman@Sun.COM 
1072*12725SMenno.Lageman@Sun.COM 	return (ksp);
1073*12725SMenno.Lageman@Sun.COM }
1074*12725SMenno.Lageman@Sun.COM 
1075*12725SMenno.Lageman@Sun.COM static void
task_kstat_delete(task_t * tk)1076*12725SMenno.Lageman@Sun.COM task_kstat_delete(task_t *tk)
1077*12725SMenno.Lageman@Sun.COM {
1078*12725SMenno.Lageman@Sun.COM 	void *data;
1079*12725SMenno.Lageman@Sun.COM 
1080*12725SMenno.Lageman@Sun.COM 	if (tk->tk_nprocs_kstat != NULL) {
1081*12725SMenno.Lageman@Sun.COM 		data = tk->tk_nprocs_kstat->ks_data;
1082*12725SMenno.Lageman@Sun.COM 		kstat_delete(tk->tk_nprocs_kstat);
1083*12725SMenno.Lageman@Sun.COM 		kmem_free(data, sizeof (task_kstat_t));
1084*12725SMenno.Lageman@Sun.COM 		tk->tk_nprocs_kstat = NULL;
1085*12725SMenno.Lageman@Sun.COM 	}
1086*12725SMenno.Lageman@Sun.COM }
1087*12725SMenno.Lageman@Sun.COM 
1088*12725SMenno.Lageman@Sun.COM void
task_commit_thread_init()1089*12725SMenno.Lageman@Sun.COM task_commit_thread_init()
1090*12725SMenno.Lageman@Sun.COM {
1091*12725SMenno.Lageman@Sun.COM 	mutex_init(&task_commit_lock, NULL, MUTEX_DEFAULT, NULL);
1092*12725SMenno.Lageman@Sun.COM 	cv_init(&task_commit_cv, NULL, CV_DEFAULT, NULL);
1093*12725SMenno.Lageman@Sun.COM 	task_commit_thread = thread_create(NULL, 0, task_commit, NULL, 0,
1094*12725SMenno.Lageman@Sun.COM 	    &p0, TS_RUN, minclsyspri);
1095*12725SMenno.Lageman@Sun.COM }
1096*12725SMenno.Lageman@Sun.COM 
1097*12725SMenno.Lageman@Sun.COM /*
1098*12725SMenno.Lageman@Sun.COM  * Backup thread to commit task resource usage when taskq_dispatch() fails.
1099*12725SMenno.Lageman@Sun.COM  */
1100*12725SMenno.Lageman@Sun.COM static void
task_commit()1101*12725SMenno.Lageman@Sun.COM task_commit()
1102*12725SMenno.Lageman@Sun.COM {
1103*12725SMenno.Lageman@Sun.COM 	callb_cpr_t cprinfo;
1104*12725SMenno.Lageman@Sun.COM 
1105*12725SMenno.Lageman@Sun.COM 	CALLB_CPR_INIT(&cprinfo, &task_commit_lock, callb_generic_cpr,
1106*12725SMenno.Lageman@Sun.COM 	    "task_commit_thread");
1107*12725SMenno.Lageman@Sun.COM 
1108*12725SMenno.Lageman@Sun.COM 	mutex_enter(&task_commit_lock);
1109*12725SMenno.Lageman@Sun.COM 
1110*12725SMenno.Lageman@Sun.COM 	for (;;) {
1111*12725SMenno.Lageman@Sun.COM 		while (task_commit_head == NULL) {
1112*12725SMenno.Lageman@Sun.COM 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
1113*12725SMenno.Lageman@Sun.COM 			cv_wait(&task_commit_cv, &task_commit_lock);
1114*12725SMenno.Lageman@Sun.COM 			CALLB_CPR_SAFE_END(&cprinfo, &task_commit_lock);
1115*12725SMenno.Lageman@Sun.COM 		}
1116*12725SMenno.Lageman@Sun.COM 		while (task_commit_head != NULL) {
1117*12725SMenno.Lageman@Sun.COM 			task_t *tk;
1118*12725SMenno.Lageman@Sun.COM 
1119*12725SMenno.Lageman@Sun.COM 			tk = task_commit_head;
1120*12725SMenno.Lageman@Sun.COM 			task_commit_head = task_commit_head->tk_commit_next;
1121*12725SMenno.Lageman@Sun.COM 			if (task_commit_head == NULL)
1122*12725SMenno.Lageman@Sun.COM 				task_commit_tail = NULL;
1123*12725SMenno.Lageman@Sun.COM 			mutex_exit(&task_commit_lock);
1124*12725SMenno.Lageman@Sun.COM 			exacct_commit_task(tk);
1125*12725SMenno.Lageman@Sun.COM 			mutex_enter(&task_commit_lock);
1126*12725SMenno.Lageman@Sun.COM 		}
1127*12725SMenno.Lageman@Sun.COM 	}
1128*12725SMenno.Lageman@Sun.COM }
1129