xref: /netbsd-src/sys/dev/tprof/tprof.c (revision a536ee5124e62c9a0051a252f7833dc8f50f44c9)
1 /*	$NetBSD: tprof.c,v 1.10 2011/04/14 16:23:59 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c)2008,2009,2010 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.10 2011/04/14 16:23:59 yamt Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 
36 #include <sys/cpu.h>
37 #include <sys/conf.h>
38 #include <sys/callout.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41 #include <sys/proc.h>
42 #include <sys/workqueue.h>
43 #include <sys/queue.h>
44 
45 #include <dev/tprof/tprof.h>
46 #include <dev/tprof/tprof_ioctl.h>
47 
48 /*
49  * locking order:
50  *	tprof_reader_lock -> tprof_lock
51  *	tprof_startstop_lock -> tprof_lock
52  */
53 
54 /*
55  * protected by:
56  *	L: tprof_lock
57  *	R: tprof_reader_lock
58  *	S: tprof_startstop_lock
59  *	s: writer should hold tprof_startstop_lock and tprof_lock
60  *	   reader should hold tprof_startstop_lock or tprof_lock
61  */
62 
63 typedef struct tprof_buf {
64 	u_int b_used;
65 	u_int b_size;
66 	u_int b_overflow;
67 	u_int b_unused;
68 	STAILQ_ENTRY(tprof_buf) b_list;
69 	tprof_sample_t b_data[];
70 } tprof_buf_t;
71 #define	TPROF_BUF_BYTESIZE(sz) \
72 	(sizeof(tprof_buf_t) + (sz) * sizeof(tprof_sample_t))
73 #define	TPROF_MAX_SAMPLES_PER_BUF	10000
74 
75 #define	TPROF_MAX_BUF			100
76 
77 typedef struct {
78 	tprof_buf_t *c_buf;
79 	uint32_t c_cpuid;
80 	struct work c_work;
81 	callout_t c_callout;
82 } __aligned(CACHE_LINE_SIZE) tprof_cpu_t;
83 
84 typedef struct tprof_backend {
85 	const char *tb_name;
86 	const tprof_backend_ops_t *tb_ops;
87 	LIST_ENTRY(tprof_backend) tb_list;
88 	int tb_usecount;	/* S: */
89 } tprof_backend_t;
90 
91 static kmutex_t tprof_lock;
92 static bool tprof_running;		/* s: */
93 static u_int tprof_nworker;		/* L: # of running worker LWPs */
94 static lwp_t *tprof_owner;
95 static STAILQ_HEAD(, tprof_buf) tprof_list; /* L: global buffer list */
96 static u_int tprof_nbuf_on_list;	/* L: # of buffers on tprof_list */
97 static struct workqueue *tprof_wq;
98 static tprof_cpu_t tprof_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);
99 static u_int tprof_samples_per_buf;
100 
101 static tprof_backend_t *tprof_backend;	/* S: */
102 static LIST_HEAD(, tprof_backend) tprof_backends =
103     LIST_HEAD_INITIALIZER(tprof_backend); /* S: */
104 
105 static kmutex_t tprof_reader_lock;
106 static kcondvar_t tprof_reader_cv;	/* L: */
107 static off_t tprof_reader_offset;	/* R: */
108 
109 static kmutex_t tprof_startstop_lock;
110 static kcondvar_t tprof_cv;		/* L: */
111 
112 static struct tprof_stat tprof_stat;	/* L: */
113 
114 static tprof_cpu_t *
115 tprof_cpu(struct cpu_info *ci)
116 {
117 
118 	return &tprof_cpus[cpu_index(ci)];
119 }
120 
121 static tprof_cpu_t *
122 tprof_curcpu(void)
123 {
124 
125 	return tprof_cpu(curcpu());
126 }
127 
128 static tprof_buf_t *
129 tprof_buf_alloc(void)
130 {
131 	tprof_buf_t *new;
132 	u_int size = tprof_samples_per_buf;
133 
134 	new = kmem_alloc(TPROF_BUF_BYTESIZE(size), KM_SLEEP);
135 	new->b_used = 0;
136 	new->b_size = size;
137 	new->b_overflow = 0;
138 	return new;
139 }
140 
141 static void
142 tprof_buf_free(tprof_buf_t *buf)
143 {
144 
145 	kmem_free(buf, TPROF_BUF_BYTESIZE(buf->b_size));
146 }
147 
148 static tprof_buf_t *
149 tprof_buf_switch(tprof_cpu_t *c, tprof_buf_t *new)
150 {
151 	tprof_buf_t *old;
152 
153 	old = c->c_buf;
154 	c->c_buf = new;
155 	return old;
156 }
157 
158 static tprof_buf_t *
159 tprof_buf_refresh(void)
160 {
161 	tprof_cpu_t * const c = tprof_curcpu();
162 	tprof_buf_t *new;
163 
164 	new = tprof_buf_alloc();
165 	return tprof_buf_switch(c, new);
166 }
167 
168 static void
169 tprof_worker(struct work *wk, void *dummy)
170 {
171 	tprof_cpu_t * const c = tprof_curcpu();
172 	tprof_buf_t *buf;
173 	bool shouldstop;
174 
175 	KASSERT(wk == &c->c_work);
176 	KASSERT(dummy == NULL);
177 
178 	/*
179 	 * get a per cpu buffer.
180 	 */
181 	buf = tprof_buf_refresh();
182 
183 	/*
184 	 * and put it on the global list for read(2).
185 	 */
186 	mutex_enter(&tprof_lock);
187 	shouldstop = !tprof_running;
188 	if (shouldstop) {
189 		KASSERT(tprof_nworker > 0);
190 		tprof_nworker--;
191 		cv_broadcast(&tprof_cv);
192 		cv_broadcast(&tprof_reader_cv);
193 	}
194 	if (buf->b_used == 0) {
195 		tprof_stat.ts_emptybuf++;
196 	} else if (tprof_nbuf_on_list < TPROF_MAX_BUF) {
197 		tprof_stat.ts_sample += buf->b_used;
198 		tprof_stat.ts_overflow += buf->b_overflow;
199 		tprof_stat.ts_buf++;
200 		STAILQ_INSERT_TAIL(&tprof_list, buf, b_list);
201 		tprof_nbuf_on_list++;
202 		buf = NULL;
203 		cv_broadcast(&tprof_reader_cv);
204 	} else {
205 		tprof_stat.ts_dropbuf_sample += buf->b_used;
206 		tprof_stat.ts_dropbuf++;
207 	}
208 	mutex_exit(&tprof_lock);
209 	if (buf) {
210 		tprof_buf_free(buf);
211 	}
212 	if (!shouldstop) {
213 		callout_schedule(&c->c_callout, hz);
214 	}
215 }
216 
217 static void
218 tprof_kick(void *vp)
219 {
220 	struct cpu_info * const ci = vp;
221 	tprof_cpu_t * const c = tprof_cpu(ci);
222 
223 	workqueue_enqueue(tprof_wq, &c->c_work, ci);
224 }
225 
226 static void
227 tprof_stop1(void)
228 {
229 	CPU_INFO_ITERATOR cii;
230 	struct cpu_info *ci;
231 
232 	KASSERT(mutex_owned(&tprof_startstop_lock));
233 	KASSERT(tprof_nworker == 0);
234 
235 	for (CPU_INFO_FOREACH(cii, ci)) {
236 		tprof_cpu_t * const c = tprof_cpu(ci);
237 		tprof_buf_t *old;
238 
239 		old = tprof_buf_switch(c, NULL);
240 		if (old != NULL) {
241 			tprof_buf_free(old);
242 		}
243 		callout_destroy(&c->c_callout);
244 	}
245 	workqueue_destroy(tprof_wq);
246 }
247 
248 static int
249 tprof_start(const struct tprof_param *param)
250 {
251 	CPU_INFO_ITERATOR cii;
252 	struct cpu_info *ci;
253 	int error;
254 	uint64_t freq;
255 	tprof_backend_t *tb;
256 
257 	KASSERT(mutex_owned(&tprof_startstop_lock));
258 	if (tprof_running) {
259 		error = EBUSY;
260 		goto done;
261 	}
262 
263 	tb = tprof_backend;
264 	if (tb == NULL) {
265 		error = ENOENT;
266 		goto done;
267 	}
268 	if (tb->tb_usecount > 0) {
269 		error = EBUSY;
270 		goto done;
271 	}
272 
273 	tb->tb_usecount++;
274 	freq = tb->tb_ops->tbo_estimate_freq();
275 	tprof_samples_per_buf = MIN(freq * 2, TPROF_MAX_SAMPLES_PER_BUF);
276 
277 	error = workqueue_create(&tprof_wq, "tprofmv", tprof_worker, NULL,
278 	    PRI_NONE, IPL_SOFTCLOCK, WQ_MPSAFE | WQ_PERCPU);
279 	if (error != 0) {
280 		goto done;
281 	}
282 
283 	for (CPU_INFO_FOREACH(cii, ci)) {
284 		tprof_cpu_t * const c = tprof_cpu(ci);
285 		tprof_buf_t *new;
286 		tprof_buf_t *old;
287 
288 		new = tprof_buf_alloc();
289 		old = tprof_buf_switch(c, new);
290 		if (old != NULL) {
291 			tprof_buf_free(old);
292 		}
293 		callout_init(&c->c_callout, CALLOUT_MPSAFE);
294 		callout_setfunc(&c->c_callout, tprof_kick, ci);
295 	}
296 
297 	error = tb->tb_ops->tbo_start(NULL);
298 	if (error != 0) {
299 		KASSERT(tb->tb_usecount > 0);
300 		tb->tb_usecount--;
301 		tprof_stop1();
302 		goto done;
303 	}
304 
305 	mutex_enter(&tprof_lock);
306 	tprof_running = true;
307 	mutex_exit(&tprof_lock);
308 	for (CPU_INFO_FOREACH(cii, ci)) {
309 		tprof_cpu_t * const c = tprof_cpu(ci);
310 
311 		mutex_enter(&tprof_lock);
312 		tprof_nworker++;
313 		mutex_exit(&tprof_lock);
314 		workqueue_enqueue(tprof_wq, &c->c_work, ci);
315 	}
316 done:
317 	return error;
318 }
319 
320 static void
321 tprof_stop(void)
322 {
323 	tprof_backend_t *tb;
324 
325 	KASSERT(mutex_owned(&tprof_startstop_lock));
326 	if (!tprof_running) {
327 		goto done;
328 	}
329 
330 	tb = tprof_backend;
331 	KASSERT(tb->tb_usecount > 0);
332 	tb->tb_ops->tbo_stop(NULL);
333 	tb->tb_usecount--;
334 
335 	mutex_enter(&tprof_lock);
336 	tprof_running = false;
337 	cv_broadcast(&tprof_reader_cv);
338 	while (tprof_nworker > 0) {
339 		cv_wait(&tprof_cv, &tprof_lock);
340 	}
341 	mutex_exit(&tprof_lock);
342 
343 	tprof_stop1();
344 done:
345 	;
346 }
347 
348 /*
349  * tprof_clear: drain unread samples.
350  */
351 
352 static void
353 tprof_clear(void)
354 {
355 	tprof_buf_t *buf;
356 
357 	mutex_enter(&tprof_reader_lock);
358 	mutex_enter(&tprof_lock);
359 	while ((buf = STAILQ_FIRST(&tprof_list)) != NULL) {
360 		if (buf != NULL) {
361 			STAILQ_REMOVE_HEAD(&tprof_list, b_list);
362 			KASSERT(tprof_nbuf_on_list > 0);
363 			tprof_nbuf_on_list--;
364 			mutex_exit(&tprof_lock);
365 			tprof_buf_free(buf);
366 			mutex_enter(&tprof_lock);
367 		}
368 	}
369 	KASSERT(tprof_nbuf_on_list == 0);
370 	mutex_exit(&tprof_lock);
371 	tprof_reader_offset = 0;
372 	mutex_exit(&tprof_reader_lock);
373 
374 	memset(&tprof_stat, 0, sizeof(tprof_stat));
375 }
376 
377 static tprof_backend_t *
378 tprof_backend_lookup(const char *name)
379 {
380 	tprof_backend_t *tb;
381 
382 	KASSERT(mutex_owned(&tprof_startstop_lock));
383 
384 	LIST_FOREACH(tb, &tprof_backends, tb_list) {
385 		if (!strcmp(tb->tb_name, name)) {
386 			return tb;
387 		}
388 	}
389 	return NULL;
390 }
391 
392 /* -------------------- backend interfaces */
393 
394 /*
395  * tprof_sample: record a sample on the per-cpu buffer.
396  *
397  * be careful; can be called in NMI context.
398  * we are bluntly assuming the followings are safe.
399  *	curcpu()
400  *	curlwp->l_lid
401  *	curlwp->l_proc->p_pid
402  */
403 
404 void
405 tprof_sample(tprof_backend_cookie_t *cookie, const tprof_frame_info_t *tfi)
406 {
407 	tprof_cpu_t * const c = tprof_curcpu();
408 	tprof_buf_t * const buf = c->c_buf;
409 	tprof_sample_t *sp;
410 	const uintptr_t pc = tfi->tfi_pc;
411 	const lwp_t * const l = curlwp;
412 	u_int idx;
413 
414 	idx = buf->b_used;
415 	if (__predict_false(idx >= buf->b_size)) {
416 		buf->b_overflow++;
417 		return;
418 	}
419 	sp = &buf->b_data[idx];
420 	sp->s_pid = l->l_proc->p_pid;
421 	sp->s_lwpid = l->l_lid;
422 	sp->s_cpuid = c->c_cpuid;
423 	sp->s_flags = (tfi->tfi_inkernel) ? TPROF_SAMPLE_INKERNEL : 0;
424 	sp->s_pc = pc;
425 	buf->b_used = idx + 1;
426 }
427 
428 /*
429  * tprof_backend_register:
430  */
431 
432 int
433 tprof_backend_register(const char *name, const tprof_backend_ops_t *ops,
434     int vers)
435 {
436 	tprof_backend_t *tb;
437 
438 	if (vers != TPROF_BACKEND_VERSION) {
439 		return EINVAL;
440 	}
441 
442 	mutex_enter(&tprof_startstop_lock);
443 	tb = tprof_backend_lookup(name);
444 	if (tb != NULL) {
445 		mutex_exit(&tprof_startstop_lock);
446 		return EEXIST;
447 	}
448 #if 1 /* XXX for now */
449 	if (!LIST_EMPTY(&tprof_backends)) {
450 		mutex_exit(&tprof_startstop_lock);
451 		return ENOTSUP;
452 	}
453 #endif
454 	tb = kmem_alloc(sizeof(*tb), KM_SLEEP);
455 	tb->tb_name = name;
456 	tb->tb_ops = ops;
457 	tb->tb_usecount = 0;
458 	LIST_INSERT_HEAD(&tprof_backends, tb, tb_list);
459 #if 1 /* XXX for now */
460 	if (tprof_backend == NULL) {
461 		tprof_backend = tb;
462 	}
463 #endif
464 	mutex_exit(&tprof_startstop_lock);
465 
466 	return 0;
467 }
468 
469 /*
470  * tprof_backend_unregister:
471  */
472 
473 int
474 tprof_backend_unregister(const char *name)
475 {
476 	tprof_backend_t *tb;
477 
478 	mutex_enter(&tprof_startstop_lock);
479 	tb = tprof_backend_lookup(name);
480 #if defined(DIAGNOSTIC)
481 	if (tb == NULL) {
482 		mutex_exit(&tprof_startstop_lock);
483 		panic("%s: not found '%s'", __func__, name);
484 	}
485 #endif /* defined(DIAGNOSTIC) */
486 	if (tb->tb_usecount > 0) {
487 		mutex_exit(&tprof_startstop_lock);
488 		return EBUSY;
489 	}
490 #if 1 /* XXX for now */
491 	if (tprof_backend == tb) {
492 		tprof_backend = NULL;
493 	}
494 #endif
495 	LIST_REMOVE(tb, tb_list);
496 	mutex_exit(&tprof_startstop_lock);
497 
498 	kmem_free(tb, sizeof(*tb));
499 
500 	return 0;
501 }
502 
503 /* -------------------- cdevsw interfaces */
504 
505 void tprofattach(int);
506 
507 static int
508 tprof_open(dev_t dev, int flags, int type, struct lwp *l)
509 {
510 
511 	if (minor(dev) != 0) {
512 		return EXDEV;
513 	}
514 	mutex_enter(&tprof_lock);
515 	if (tprof_owner != NULL) {
516 		mutex_exit(&tprof_lock);
517 		return  EBUSY;
518 	}
519 	tprof_owner = curlwp;
520 	mutex_exit(&tprof_lock);
521 
522 	return 0;
523 }
524 
525 static int
526 tprof_close(dev_t dev, int flags, int type, struct lwp *l)
527 {
528 
529 	KASSERT(minor(dev) == 0);
530 
531 	mutex_enter(&tprof_startstop_lock);
532 	mutex_enter(&tprof_lock);
533 	tprof_owner = NULL;
534 	mutex_exit(&tprof_lock);
535 	tprof_stop();
536 	tprof_clear();
537 	mutex_exit(&tprof_startstop_lock);
538 
539 	return 0;
540 }
541 
542 static int
543 tprof_read(dev_t dev, struct uio *uio, int flags)
544 {
545 	tprof_buf_t *buf;
546 	size_t bytes;
547 	size_t resid;
548 	size_t done;
549 	int error = 0;
550 
551 	KASSERT(minor(dev) == 0);
552 	mutex_enter(&tprof_reader_lock);
553 	while (uio->uio_resid > 0 && error == 0) {
554 		/*
555 		 * take the first buffer from the list.
556 		 */
557 		mutex_enter(&tprof_lock);
558 		buf = STAILQ_FIRST(&tprof_list);
559 		if (buf == NULL) {
560 			if (tprof_nworker == 0) {
561 				mutex_exit(&tprof_lock);
562 				error = 0;
563 				break;
564 			}
565 			mutex_exit(&tprof_reader_lock);
566 			error = cv_wait_sig(&tprof_reader_cv, &tprof_lock);
567 			mutex_exit(&tprof_lock);
568 			mutex_enter(&tprof_reader_lock);
569 			continue;
570 		}
571 		STAILQ_REMOVE_HEAD(&tprof_list, b_list);
572 		KASSERT(tprof_nbuf_on_list > 0);
573 		tprof_nbuf_on_list--;
574 		mutex_exit(&tprof_lock);
575 
576 		/*
577 		 * copy it out.
578 		 */
579 		bytes = MIN(buf->b_used * sizeof(tprof_sample_t) -
580 		    tprof_reader_offset, uio->uio_resid);
581 		resid = uio->uio_resid;
582 		error = uiomove((char *)buf->b_data + tprof_reader_offset,
583 		    bytes, uio);
584 		done = resid - uio->uio_resid;
585 		tprof_reader_offset += done;
586 
587 		/*
588 		 * if we didn't consume the whole buffer,
589 		 * put it back to the list.
590 		 */
591 		if (tprof_reader_offset <
592 		    buf->b_used * sizeof(tprof_sample_t)) {
593 			mutex_enter(&tprof_lock);
594 			STAILQ_INSERT_HEAD(&tprof_list, buf, b_list);
595 			tprof_nbuf_on_list++;
596 			cv_broadcast(&tprof_reader_cv);
597 			mutex_exit(&tprof_lock);
598 		} else {
599 			tprof_buf_free(buf);
600 			tprof_reader_offset = 0;
601 		}
602 	}
603 	mutex_exit(&tprof_reader_lock);
604 
605 	return error;
606 }
607 
608 static int
609 tprof_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
610 {
611 	const struct tprof_param *param;
612 	int error = 0;
613 
614 	KASSERT(minor(dev) == 0);
615 
616 	switch (cmd) {
617 	case TPROF_IOC_GETVERSION:
618 		*(int *)data = TPROF_VERSION;
619 		break;
620 	case TPROF_IOC_START:
621 		param = data;
622 		mutex_enter(&tprof_startstop_lock);
623 		error = tprof_start(param);
624 		mutex_exit(&tprof_startstop_lock);
625 		break;
626 	case TPROF_IOC_STOP:
627 		mutex_enter(&tprof_startstop_lock);
628 		tprof_stop();
629 		mutex_exit(&tprof_startstop_lock);
630 		break;
631 	case TPROF_IOC_GETSTAT:
632 		mutex_enter(&tprof_lock);
633 		memcpy(data, &tprof_stat, sizeof(tprof_stat));
634 		mutex_exit(&tprof_lock);
635 		break;
636 	default:
637 		error = EINVAL;
638 		break;
639 	}
640 
641 	return error;
642 }
643 
644 const struct cdevsw tprof_cdevsw = {
645 	.d_open = tprof_open,
646 	.d_close = tprof_close,
647 	.d_read = tprof_read,
648 	.d_write = nowrite,
649 	.d_ioctl = tprof_ioctl,
650 	.d_stop = nostop,
651 	.d_tty = notty,
652 	.d_poll = nopoll,
653 	.d_mmap = nommap,
654 	.d_kqfilter = nokqfilter,
655 	.d_flag = D_OTHER | D_MPSAFE,
656 };
657 
658 void
659 tprofattach(int nunits)
660 {
661 
662 	/* nothing */
663 }
664 
665 MODULE(MODULE_CLASS_DRIVER, tprof, NULL);
666 
667 static void
668 tprof_driver_init(void)
669 {
670 	unsigned int i;
671 
672 	mutex_init(&tprof_lock, MUTEX_DEFAULT, IPL_NONE);
673 	mutex_init(&tprof_reader_lock, MUTEX_DEFAULT, IPL_NONE);
674 	mutex_init(&tprof_startstop_lock, MUTEX_DEFAULT, IPL_NONE);
675 	cv_init(&tprof_cv, "tprof");
676 	cv_init(&tprof_reader_cv, "tprof_rd");
677 	STAILQ_INIT(&tprof_list);
678 	for (i = 0; i < __arraycount(tprof_cpus); i++) {
679 		tprof_cpu_t * const c = &tprof_cpus[i];
680 
681 		c->c_buf = NULL;
682 		c->c_cpuid = i;
683 	}
684 }
685 
686 static void
687 tprof_driver_fini(void)
688 {
689 
690 	mutex_destroy(&tprof_lock);
691 	mutex_destroy(&tprof_reader_lock);
692 	mutex_destroy(&tprof_startstop_lock);
693 	cv_destroy(&tprof_cv);
694 	cv_destroy(&tprof_reader_cv);
695 }
696 
697 static int
698 tprof_modcmd(modcmd_t cmd, void *arg)
699 {
700 
701 	switch (cmd) {
702 	case MODULE_CMD_INIT:
703 		tprof_driver_init();
704 #if defined(_MODULE)
705 		{
706 			devmajor_t bmajor = NODEVMAJOR;
707 			devmajor_t cmajor = NODEVMAJOR;
708 			int error;
709 
710 			error = devsw_attach("tprof", NULL, &bmajor,
711 			    &tprof_cdevsw, &cmajor);
712 			if (error) {
713 				tprof_driver_fini();
714 				return error;
715 			}
716 		}
717 #endif /* defined(_MODULE) */
718 		return 0;
719 
720 	case MODULE_CMD_FINI:
721 #if defined(_MODULE)
722 		{
723 			int error;
724 			error = devsw_detach(NULL, &tprof_cdevsw);
725 			if (error) {
726 				return error;
727 			}
728 		}
729 #endif /* defined(_MODULE) */
730 		tprof_driver_fini();
731 		return 0;
732 
733 	default:
734 		return ENOTTY;
735 	}
736 }
737