xref: /onnv-gate/usr/src/uts/common/avs/ns/sdbc/sd_misc.c (revision 8322:0c42019e85d2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #define	_SCM_
27 
28 #include <sys/types.h>
29 #include <sys/ksynch.h>
30 #include <sys/cmn_err.h>
31 #include <sys/modctl.h>
32 #include <sys/conf.h>
33 #include <sys/errno.h>
34 #include <sys/file.h>
35 #include <sys/kmem.h>
36 #include <sys/cred.h>
37 #include <sys/ddi.h>
38 #include <sys/nsc_thread.h>
39 
40 #include "sd_bcache.h"
41 #include "sd_misc.h"
42 #include "sd_trace.h"
43 #include "sd_ft.h"
44 #include "sd_io.h"
45 #include "sd_bio.h"
46 #include "sd_pcu.h"
47 #include "sd_tdaemon.h"
48 #include "sdbc_ioctl.h"
49 #include <sys/ncall/ncall.h>
50 #include <sys/nsctl/nsctl.h>
51 #include <sys/nsctl/nsvers.h>
52 
53 #include <sys/sdt.h>		/* dtrace is S10 or later */
54 
55 #include <sys/unistat/spcs_s.h>
56 #include <sys/unistat/spcs_s_k.h>
57 #include <sys/unistat/spcs_errors.h>
58 static dev_info_t *dev_dip;
59 dev_info_t *sdbc_get_dip();
60 
61 
62 /*
63  *  A global variable to set the threshold for large writes to
64  *  be in write through mode when NVRAM is present. This should
65  *  solve the NVRAM bandwidth problem.
66  */
67 
68 int sdbc_wrthru_len;
69 nsc_size_t sdbc_max_fbas = _SD_MAX_FBAS;
70 int sdbc_max_devs = 0;
71 
72 krwlock_t sdbc_queue_lock;
73 
74 static int _sd_debug_level = 0;
75 
76 static kmutex_t _sd_block_lk;
77 
78 #define	REGISTER_SVC(X, Y) (ncall_register_svc(X, Y))
79 #define	UNREGISTER_SVC(X) (ncall_unregister_svc(X))
80 
81 const int sdbc_major_rev = ISS_VERSION_MAJ;
82 const int sdbc_minor_rev = ISS_VERSION_MIN;
83 const int sdbc_micro_rev = ISS_VERSION_MIC;
84 const int sdbc_baseline_rev = ISS_VERSION_NUM;
85 static char sdbc_version[16];
86 
87 static int _sdbc_attached = 0;
88 
89 static int _sdbc_print(dev_t dev, char *s);
90 static int sdbcunload(void);
91 static int sdbcload(void);
92 static int sdbcopen(dev_t *devp, int flag, int otyp, cred_t *crp);
93 static int sdbcclose(dev_t dev, int flag, int otyp, cred_t *crp);
94 static int sdbcioctl(dev_t dev, int cmd, void *arg, int mode, cred_t *crp,
95     int *rvp);
96 static int _sdbc_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
97 static int _sdbc_probe(dev_info_t *dip);
98 static int _sdbc_attach(dev_info_t *, ddi_attach_cmd_t);
99 static int _sdbc_detach(dev_info_t *, ddi_detach_cmd_t);
100 static int _sdbc_reset(dev_info_t *, ddi_reset_cmd_t);
101 
102 #ifdef sun
103 /*
104  * Solaris specific driver module interface code.
105  */
106 
107 #ifdef USES_SOFT_STATE
108 struct	sdbc_state {
109 	dev_info_t	*dip;		/* everyone would need a devinfo */
110 };
111 
112 static	void	*sdbc_statep;		/* for soft state routines */
113 #endif /* USES_SOFT_STATE */
114 
115 static	struct	cb_ops sdbc_cb_ops = {
116 	sdbcopen,	/* open */
117 	sdbcclose,	/* close */
118 	nodev,		/* not a block driver, strategy not an entry point */
119 	_sdbc_print,	/* no print routine */
120 	nodev,		/* no dump routine */
121 	nodev,		/* read */
122 	nodev,		/* write */
123 	(int (*) ()) sdbcioctl,	/* ioctl */
124 	nodev,		/* no devmap routine */
125 	nodev,		/* no mmap routine */
126 	nodev,		/* no segmap routine */
127 	nochpoll,	/* no chpoll routine */
128 	ddi_prop_op,
129 	0,		/* not a STREAMS driver, no cb_str routine */
130 	D_NEW | D_MP,	/* safe for multi-thread/multi-processor */
131 };
132 
133 
134 static	struct	dev_ops sdbc_ops = {
135 	DEVO_REV,			/* Driver build version */
136 	0,				/* device reference count */
137 	_sdbc_getinfo,
138 	nulldev,
139 	_sdbc_probe,
140 	_sdbc_attach,
141 	_sdbc_detach,
142 	_sdbc_reset,
143 	&sdbc_cb_ops,
144 	(struct bus_ops *)NULL
145 };
146 
147 static struct modldrv sdbc_ldrv = {
148 	&mod_driverops,
149 	"nws:Storage Cache:" ISS_VERSION_STR,
150 	&sdbc_ops
151 };
152 
153 static	struct modlinkage sdbc_modlinkage = {
154 	MODREV_1,
155 	&sdbc_ldrv,
156 	NULL
157 };
158 
159 /*
160  * dynmem interface
161  */
162 static int mutex_and_condvar_flag;
163 
164 /*
165  * Solaris module load time code
166  */
167 int
168 _init(void)
169 {
170 
171 	int err;
172 
173 	mutex_and_condvar_flag = 0;
174 
175 #ifdef USES_SOFT_STATE
176 	ddi_soft_state_init(&sdbc_statep, sizeof (struct sdbc_state),
177 	    MAX_INSTANCES);
178 #endif /* USES_SOFT_STATE */
179 
180 	/*
181 	 * It is "load" time, call the unixware equivalent.
182 	 */
183 	err = sdbcload();
184 	if (!err)
185 		err = mod_install(&sdbc_modlinkage);
186 
187 	if (err) {
188 		(void) sdbcunload();
189 #ifdef USES_SOFT_STATE
190 		ddi_soft_state_fini(&sdbc_statep);
191 #endif /* USES_SOFT_STATE */
192 	}
193 
194 	if (!err) {
195 		mutex_and_condvar_flag = 1;
196 		mutex_init(&dynmem_processing_dm.thread_dm_lock, "dynmem",
197 		    MUTEX_DRIVER, NULL);
198 		cv_init(&dynmem_processing_dm.thread_dm_cv, "dynmem",
199 		    CV_DRIVER, NULL);
200 	}
201 
202 	return (err);
203 
204 }
205 /*
206  * Solaris module unload time code
207  */
208 
209 int
210 _fini(void)
211 {
212 	int err;
213 
214 	if (_sd_cache_initialized) {
215 		return (EBUSY);
216 	} else if (_sd_ioset &&
217 	    (_sd_ioset->set_nlive || _sd_ioset->set_nthread)) {
218 		cmn_err(CE_WARN, "sdbc:_fini() %d threads still "
219 		    "active; %d threads in set\n", _sd_ioset->set_nlive,
220 		    _sd_ioset->set_nthread);
221 		return (EBUSY);
222 	}
223 	if ((err = mod_remove(&sdbc_modlinkage)) == 0) {
224 		DTRACE_PROBE2(_sdbc_fini_mod_remove_succeeded,
225 		    int, err,
226 		    struct modlinkage *, &sdbc_modlinkage);
227 		err = sdbcunload();
228 #ifdef USES_SOFT_STATE
229 		ddi_soft_state_fini(&sdbc_statep);
230 #endif /* USES_SOFT_STATE */
231 
232 		if (mutex_and_condvar_flag) {
233 			cv_destroy(&dynmem_processing_dm.thread_dm_cv);
234 			mutex_destroy(&dynmem_processing_dm.thread_dm_lock);
235 			mutex_and_condvar_flag = 0;
236 		}
237 	}
238 
239 	return (err);
240 }
241 
242 /*
243  * Solaris module info code
244  */
245 int
246 _info(struct modinfo *modinfop)
247 {
248 	return (mod_info(&sdbc_modlinkage, modinfop));
249 }
250 
251 /*ARGSUSED*/
252 static int
253 _sdbc_probe(dev_info_t *dip)
254 {
255 	return (DDI_PROBE_SUCCESS);
256 }
257 
258 /*
259  * Attach an instance of the device. This happens before an open
260  * can succeed.
261  */
262 static int
263 _sdbc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
264 {
265 	_dm_process_vars_t local_dm_process_vars;
266 	struct buf bp;
267 
268 	if (cmd != DDI_ATTACH)
269 		return (DDI_FAILURE);
270 
271 	/*
272 	 *  Get the threshold value for setting large writes in
273 	 *  write through mode(when NVRAM is present)
274 	 */
275 
276 	sdbc_wrthru_len =  ddi_prop_get_int(DDI_DEV_T_ANY, dip,
277 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_wrthru_thresh", 64);
278 
279 	/* Get sdbc_max_fbas from sdbc.conf */
280 	sdbc_max_fbas =  ddi_prop_get_int(DDI_DEV_T_ANY, dip,
281 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_max_fbas",
282 	    _SD_MAX_FBAS);
283 
284 	bp.b_bcount = (size_t)FBA_SIZE(sdbc_max_fbas);
285 	minphys(&bp); /* clamps value to maxphys */
286 
287 	sdbc_max_fbas = FBA_NUM(bp.b_bcount);
288 
289 	if (sdbc_max_fbas > _SD_MAX_FBAS) {
290 		cmn_err(CE_WARN,
291 		    "_sdbc_attach: sdbc_max_fbas set to %d", _SD_MAX_FBAS);
292 		sdbc_max_fbas = _SD_MAX_FBAS;
293 	}
294 
295 	/*
296 	 * -get the maximum list length for multipage dynmem
297 	 * -time between aging
298 	 * -number of agings before dealloc
299 	 * -what to report D0=shutdown, D1=thread variables
300 	 */
301 	dynmem_processing_dm.max_dyn_list = MAX_DYN_LIST_DEFAULT;
302 	dynmem_processing_dm.monitor_dynmem_process =
303 	    MONITOR_DYNMEM_PROCESS_DEFAULT;
304 	dynmem_processing_dm.cache_aging_ct1 = CACHE_AGING_CT_DEFAULT;
305 	dynmem_processing_dm.cache_aging_ct2 = CACHE_AGING_CT_DEFAULT;
306 	dynmem_processing_dm.cache_aging_ct3 = CACHE_AGING_CT_DEFAULT;
307 	dynmem_processing_dm.cache_aging_sec1 = CACHE_AGING_SEC1_DEFAULT;
308 	dynmem_processing_dm.cache_aging_sec2 = CACHE_AGING_SEC2_DEFAULT;
309 	dynmem_processing_dm.cache_aging_sec3 = CACHE_AGING_SEC3_DEFAULT;
310 	dynmem_processing_dm.cache_aging_pcnt1 = CACHE_AGING_PCNT1_DEFAULT;
311 	dynmem_processing_dm.cache_aging_pcnt2 = CACHE_AGING_PCNT2_DEFAULT;
312 	dynmem_processing_dm.max_holds_pcnt = MAX_HOLDS_PCNT_DEFAULT;
313 	dynmem_processing_dm.process_directive = PROCESS_DIRECTIVE_DEFAULT;
314 
315 	local_dm_process_vars.max_dyn_list = ddi_prop_get_int(DDI_DEV_T_ANY,
316 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_max_dyn_list",
317 	    MAX_DYN_LIST_DEFAULT);
318 
319 	local_dm_process_vars.monitor_dynmem_process =
320 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
321 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_monitor_dynmem",
322 	    MONITOR_DYNMEM_PROCESS_DEFAULT);
323 
324 	local_dm_process_vars.cache_aging_ct1 = ddi_prop_get_int(DDI_DEV_T_ANY,
325 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_ct1",
326 	    CACHE_AGING_CT_DEFAULT);
327 
328 	local_dm_process_vars.cache_aging_ct2 = ddi_prop_get_int(DDI_DEV_T_ANY,
329 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_ct2",
330 	    CACHE_AGING_CT_DEFAULT);
331 
332 	local_dm_process_vars.cache_aging_ct3 = ddi_prop_get_int(DDI_DEV_T_ANY,
333 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_ct3",
334 	    CACHE_AGING_CT_DEFAULT);
335 
336 	local_dm_process_vars.cache_aging_sec1 = ddi_prop_get_int(DDI_DEV_T_ANY,
337 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_sec1",
338 	    CACHE_AGING_SEC1_DEFAULT);
339 
340 	local_dm_process_vars.cache_aging_sec2 = ddi_prop_get_int(DDI_DEV_T_ANY,
341 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_sec2",
342 	    CACHE_AGING_SEC2_DEFAULT);
343 
344 	local_dm_process_vars.cache_aging_sec3 = ddi_prop_get_int(DDI_DEV_T_ANY,
345 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_sec3",
346 	    CACHE_AGING_SEC3_DEFAULT);
347 
348 	local_dm_process_vars.cache_aging_pcnt1 =
349 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
350 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_pcnt1",
351 	    CACHE_AGING_PCNT1_DEFAULT);
352 
353 	local_dm_process_vars.cache_aging_pcnt2 =
354 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
355 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_pcnt2",
356 	    CACHE_AGING_PCNT2_DEFAULT);
357 
358 	local_dm_process_vars.process_directive =
359 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
360 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_process_directive",
361 	    PROCESS_DIRECTIVE_DEFAULT);
362 
363 	local_dm_process_vars.max_holds_pcnt = ddi_prop_get_int(DDI_DEV_T_ANY,
364 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_max_holds_pcnt",
365 	    MAX_HOLDS_PCNT_DEFAULT);
366 
367 	(void) sdbc_edit_xfer_process_vars_dm(&local_dm_process_vars);
368 
369 #define	MINOR_NAME	"c,sdbc"		/* character device */
370 #define	MINOR_NUMBER	0
371 #ifdef MINOR_NAME
372 	if (ddi_create_minor_node(dip, MINOR_NAME, S_IFCHR,
373 		MINOR_NUMBER, DDI_PSEUDO, 0)
374 		    != DDI_SUCCESS) {
375 			/* free anything we allocated here */
376 			return (DDI_FAILURE);
377 		}
378 #endif /* MINOR_NAME */
379 
380 	/* Announce presence of the device */
381 	ddi_report_dev(dip);
382 	dev_dip = dip;
383 	/* mark the device as attached, opens may proceed */
384 	_sdbc_attached = 1;
385 
386 	rw_init(&sdbc_queue_lock, NULL, RW_DRIVER, NULL);
387 
388 	return (DDI_SUCCESS);
389 }
390 
391 /*ARGSUSED*/
392 static int
393 _sdbc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
394 {
395 	if (cmd == DDI_DETACH) {
396 		/*
397 		 * Check first if the cache is still in use
398 		 * and if it is, prevent the detach.
399 		 */
400 		if (_sd_cache_initialized)
401 			return (EBUSY);
402 
403 		_sdbc_attached = 0;
404 
405 		rw_destroy(&sdbc_queue_lock);
406 		dev_dip = NULL;
407 
408 		return (DDI_SUCCESS);
409 	} else
410 		return (DDI_FAILURE);
411 }
412 
413 /*ARGSUSED*/
414 static int
415 _sdbc_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
416 {
417 	return (DDI_SUCCESS);
418 }
419 
420 /*ARGSUSED*/
421 static int
422 _sdbc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
423 {
424 	dev_t dev;
425 #ifdef USES_SOFT_STATE
426 	struct sdbc_state *xsp;
427 	int instance;
428 #endif /* USES_SOFT_STATE */
429 	int rc;
430 
431 	switch (cmd) {
432 		case DDI_INFO_DEVT2INSTANCE:
433 			dev = (dev_t)arg;
434 			/* The "instance" number is the minor number */
435 			*result = (void *)(unsigned long)getminor(dev);
436 			rc = DDI_SUCCESS;
437 			break;
438 
439 		case DDI_INFO_DEVT2DEVINFO:
440 			dev = (dev_t)arg;
441 #ifdef USES_SOFT_STATE
442 			/* the instance number is the minor number */
443 			instance = getminor(dev);
444 			xsp = ddi_get_soft_state(sdbc_statep, instance);
445 			if (xsp == NULL)
446 				return (DDI_FAILURE);
447 			*result = (void *) xsp->dip;
448 #else
449 			*result = (void *) NULL;
450 #endif /* USES_SOFT_STATE */
451 			rc = DDI_SUCCESS;
452 			break;
453 
454 		default:
455 			rc = DDI_FAILURE;
456 			break;
457 	}
458 	return (rc);
459 }
460 
461 /*ARGSUSED*/
462 int
463 _sdbc_print(dev_t dev, char *s)
464 {
465 	cmn_err(CE_WARN, "sdbc(_sdbc_print) %s", s);
466 	return (0);
467 }
468 #else
469 MOD_DRV_WRAPPER(sdbc, sdbcload, sdbcunload, NULL, "Storage Device Block Cache");
470 #endif /* sun */
471 
472 static int sdbc_inited;
473 
474 static int
475 sdbcinit(void)
476 {
477 	int rc;
478 
479 	sdbc_inited = 0;
480 
481 	(void) strncpy(sdbc_version, _VERSION_, sizeof (sdbc_version));
482 
483 	mutex_init(&_sd_cache_lock, NULL, MUTEX_DRIVER, NULL);
484 	mutex_init(&_sdbc_config_lock, NULL, MUTEX_DRIVER, NULL);
485 
486 #ifdef m88k
487 	REGISTER_SVC(SD_DUAL_WRITE,	r_sd_ifs_write);
488 	REGISTER_SVC(SD_DUAL_READ,	r_sd_ifs_read);
489 	REGISTER_SVC(SD_SET_CD,		r_sd_set_cd);
490 	REGISTER_SVC(SD_GETSIZE,	r_sd_getsize);
491 	REGISTER_SVC(SD_DUAL_OPEN,	r_sd_ifs_open);
492 	REGISTER_SVC(SD_REMOTE_FLUSH,	r_sd_remote_flush);
493 	REGISTER_SVC(SD_SGREMOTE_FLUSH,	r_sd_sgremote_flush);
494 	REGISTER_SVC(SD_DISK_IO,	r_sd_disk_io);
495 	REGISTER_SVC(SD_GET_BMAP,	r_rem_get_bmap);
496 
497 	if ((rc = hpf_register_module("SDBC", _sd_hpf_stats)) != 0)
498 		return (rc);
499 #endif
500 	REGISTER_SVC(SD_ENABLE,		r_sd_ifs_cache_enable);
501 	REGISTER_SVC(SD_DISABLE,	r_sd_ifs_cache_disable);
502 	REGISTER_SVC(SD_CD_DISCARD,	r_cd_discard);
503 
504 	cv_init(&_sd_flush_cv, NULL, CV_DRIVER, NULL);
505 
506 	mutex_init(&_sd_block_lk, NULL, MUTEX_DRIVER, NULL);
507 
508 	sdbc_max_devs = nsc_max_devices();
509 
510 	/*
511 	 * Initialize the bitmap array that would be useful in determining
512 	 * if the mask is not fragmented, instead of determinig this
513 	 * at run time. Also initialize a lookup array for each mask, with
514 	 * the starting position, the length, and the mask subset
515 	 */
516 	_sd_init_contig_bmap();
517 	_sd_init_lookup_map();
518 
519 	if ((rc = _sdbc_iobuf_load()) != 0)
520 		return (rc);
521 	if ((rc = _sdbc_handles_load()) != 0)
522 		return (rc);
523 	if ((rc = _sdbc_tr_load()) != 0)
524 		return (rc);
525 	if ((rc = _sdbc_ft_load()) != 0)
526 		return (rc);
527 	if ((rc = _sdbc_tdaemon_load()) != 0)
528 		return (rc);
529 	if ((rc = _sdbc_hash_load()) != 0)
530 		return (rc);
531 #ifdef DEBUG
532 	_sdbc_ioj_load();
533 #endif
534 	sdbc_inited = 1;
535 
536 	return (0);
537 }
538 
539 static int
540 sdbcunload(void)
541 {
542 	if (_sd_cache_initialized) {
543 		cmn_err(CE_WARN,
544 		    "sdbc(sdbcunload) cannot unload module - cache in use!");
545 		return (EEXIST);
546 	}
547 #ifdef m88k
548 	UNREGISTER_SVC(SD_DUAL_WRITE);
549 	UNREGISTER_SVC(SD_DUAL_READ);
550 	UNREGISTER_SVC(SD_SET_CD);
551 	UNREGISTER_SVC(SD_GETSIZE);
552 	UNREGISTER_SVC(SD_DUAL_OPEN);
553 	UNREGISTER_SVC(SD_REMOTE_FLUSH);
554 	UNREGISTER_SVC(SD_SGREMOTE_FLUSH);
555 	UNREGISTER_SVC(SD_DISK_IO);
556 	UNREGISTER_SVC(SD_GET_BMAP);
557 
558 	(void) hpf_unregister_module("SDBC");
559 #endif
560 	UNREGISTER_SVC(SD_ENABLE);
561 	UNREGISTER_SVC(SD_DISABLE);
562 	UNREGISTER_SVC(SD_CD_DISCARD);
563 
564 	cv_destroy(&_sd_flush_cv);
565 	mutex_destroy(&_sd_block_lk);
566 
567 	_sdbc_hash_unload();
568 	_sdbc_ft_unload();
569 	_sdbc_tr_unload();
570 	_sdbc_tdaemon_unload();
571 	_sdbc_handles_unload();
572 	_sdbc_iobuf_unload();
573 #ifdef DEBUG
574 	_sdbc_ioj_unload();
575 #endif
576 
577 	mutex_destroy(&_sd_cache_lock);
578 	mutex_destroy(&_sdbc_config_lock);
579 
580 	/*
581 	 * Normally we would unregister memory at deconfig time.
582 	 * However when chasing things like memory leaks it is
583 	 * useful to defer until unload time.
584 	 */
585 	if (_sdbc_memtype_deconfigure_delayed)
586 		_sdbc_memtype_deconfigure();
587 
588 	return (0);
589 }
590 
591 
592 static int
593 sdbcload(void)
594 {
595 	int err;
596 
597 	if ((err = sdbcinit()) != 0) {
598 		(void) sdbcunload();
599 		return (err);
600 	}
601 	return (0);
602 }
603 
604 
605 /* ARGSUSED */
606 
607 static int
608 sdbcopen(dev_t *devp, int flag, int otyp, cred_t *crp)
609 {
610 	int nd = nsc_node_id();
611 
612 	/*
613 	 * If we were statically linked in then returning an error out
614 	 * of sdbcinit won't prevent someone from coming thru here.
615 	 * We must prevent them from getting any further.
616 	 */
617 	if (!sdbc_inited)
618 		return (EINVAL);
619 
620 	if (nd < nsc_min_nodeid) {
621 		cmn_err(CE_WARN,
622 		    "sdbc(sdbcopen) open failed, systemid (%d) must be >= %d",
623 		    nd, nsc_min_nodeid);
624 		return (EINVAL);
625 	}
626 	if (!_sdbc_attached)
627 		return (ENXIO);
628 
629 	return (0);
630 }
631 
632 
633 /* ARGSUSED */
634 
635 static int
636 sdbcclose(dev_t dev, int flag, int otyp, cred_t *crp)
637 {
638 	return (0);
639 }
640 
641 #ifdef _MULTI_DATAMODEL
642 static int
643 convert_ioctl_args(int cmd, void *arg, int mode, _sdbc_ioctl_t *args)
644 /*
645  * convert_ioctl-args - Do a case by case conversion of a ILP32 ioctl
646  * structure to an LP64 structure.
647  * The main concern here is whether to sign-extend or not. The rule
648  * is that pointers are not sign extended, the rest are obvious.
649  * Since most everything is sign-extended the definition of
650  * _sdbc_ioctl32_t uses signed fields.
651  *
652  */
653 {
654 	_sdbc_ioctl32_t args32;
655 
656 	if (ddi_copyin(arg, &args32, sizeof (_sdbc_ioctl32_t), mode))
657 		return (EFAULT);
658 
659 	bzero((void *) args, sizeof (_sdbc_ioctl_t));
660 
661 	switch (cmd) {
662 
663 	case SDBC_UNUSED_1:
664 	case SDBC_UNUSED_2:
665 	case SDBC_UNUSED_3:
666 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
667 		cmn_err(CE_WARN,
668 		    "sdbc(convert_ioctl_args) obsolete sdbc ioctl used");
669 		return (EINVAL);
670 
671 	case SDBC_ADUMP:
672 		args->arg0 = args32.arg0; /* cd */
673 		args->arg1 = (uint32_t)args32.arg1; /* &tt */
674 		args->arg2 = (uint32_t)args32.arg2; /* NULL (buf) */
675 		args->arg3 = args32.arg3; /*  size of buf */
676 		args->arg4 = args32.arg4; /* flag */
677 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
678 		break;
679 
680 	case SDBC_TEST_INIT:
681 		args->arg0 = (uint32_t)args32.arg0; /* fname (char *) */
682 		args->arg1 = args32.arg1; /* index */
683 		args->arg2 = args32.arg2; /* len */
684 		args->arg3 = args32.arg3; /* track size */
685 		args->arg4 = args32.arg4; /* flag */
686 		break;
687 
688 	case SDBC_TEST_START:
689 		args->arg0 = args32.arg0; /* num */
690 		args->arg1 = args32.arg1; /* type */
691 		args->arg2 = args32.arg2; /* loops */
692 		args->arg3 = args32.arg3; /* from */
693 		args->arg4 = args32.arg4; /* seed */
694 		break;
695 
696 	case SDBC_TEST_END:
697 		break;
698 
699 	case SDBC_ENABLE:
700 	case SDBC_VERSION:
701 		args->arg0 = (uint32_t)args32.arg0; /* pointer */
702 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
703 		break;
704 
705 	case SDBC_DISABLE:
706 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
707 		break;
708 
709 	case SDBC_GET_CLUSTER_SIZE:
710 		args->arg0 = (uint32_t)args32.arg0; /* (int * ) */
711 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
712 		break;
713 
714 	/* get the gl_file data */
715 	case SDBC_GET_CLUSTER_DATA:
716 		/* pointer to array[2*cluster_size] */
717 		args->arg0 = (uint32_t)args32.arg0;
718 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
719 		break;
720 
721 	/*  get the size of the global info pages for each board */
722 	case SDBC_GET_GLMUL_SIZES:
723 		args->arg0 = (uint32_t)args32.arg0; /* int[CACHE_MEM_PAD] * */
724 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
725 		break;
726 
727 	/* get the global info about write blocks */
728 	case SDBC_GET_GLMUL_INFO:
729 		/* pointer to array[2*(sum of GLMUL_SIZES)] */
730 		args->arg0 = (uint32_t)args32.arg0;
731 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
732 		break;
733 
734 	case SDBC_SET_CD_HINT:
735 		args->arg0 = args32.arg0; /* cd */
736 		args->arg1 = args32.arg1; /* hint */
737 		args->arg2 = args32.arg2; /* flag */
738 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
739 		break;
740 
741 	case SDBC_GET_CD_HINT:
742 		args->arg0 = args32.arg0;
743 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
744 		break;
745 
746 	case SDBC_SET_NODE_HINT:
747 		args->arg0 = args32.arg0; /* hint */
748 		args->arg1 = args32.arg1; /* flag */
749 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
750 		break;
751 
752 	case SDBC_GET_NODE_HINT:
753 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
754 		break;
755 
756 	case SDBC_STATS:
757 		args->arg0 = (uint32_t)args32.arg0; /* (_sd_stats_t *) */
758 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
759 		break;
760 
761 	case SDBC_ZAP_STATS:
762 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
763 		break;
764 
765 	case SDBC_GET_CD_BLK:
766 		args->arg0 = args32.arg0; /* cd */
767 		args->arg1 = (uint32_t)args32.arg1; /* blk */
768 		args->arg2 = (uint32_t)args32.arg2; /* (addr[5] *) */
769 		break;
770 
771 	case SDBC_GET_CONFIG:
772 		args->arg0 = (uint32_t)args32.arg0; /* (_sdbc_config_t *) */
773 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
774 		break;
775 
776 	case SDBC_SET_CONFIG:
777 		args->arg0 = (uint32_t)args32.arg0; /* (_sdbc_config_t *) */
778 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
779 		break;
780 
781 	case SDBC_MAXFILES:
782 		args->arg0 = (uint32_t)args32.arg0; /* (int * ) */
783 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
784 		break;
785 
786 #ifdef DEBUG
787 	/* toggle flusher flag for testing */
788 	case SDBC_TOGGLE_FLUSH:
789 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
790 		break;
791 
792 	case SDBC_INJ_IOERR: /* cd, errnum */
793 		args->arg0 = args32.arg0; /* cd */
794 		args->arg1 = args32.arg1; /* i/o error number */
795 		args->arg2 = args32.arg2; /* countdown to issuing error */
796 		break;
797 
798 	/* clear injected i/o errors */
799 	case SDBC_CLR_IOERR: /* cd */
800 		args->arg0 = args32.arg0; /* cd */
801 		break;
802 #endif /* DEBUG */
803 	default:
804 		return (EINVAL);
805 	}
806 
807 	return (0);
808 }
809 #endif /* _MULTI_DATAMODEL */
810 
811 static int
812 sdbc_get_cd_blk(_sdbc_ioctl_t *args, int mode)
813 {
814 
815 	_sd_cctl_t *cc_ent;
816 	caddr_t data;
817 	char *taddr;
818 	intptr_t addr[5];
819 #ifdef _MULTI_DATAMODEL
820 	uint32_t addr_32[5];
821 #endif /* _MULTI_DATAMODEL */
822 	char *lookup_file = NULL;
823 	int rc;
824 	sdbc_info_t info;
825 	nsc_off_t fba_pos;	/* disk block number */
826 
827 	if (_sd_cache_initialized == 0) {
828 		return (EINVAL);
829 	}
830 
831 	/* copyin the block number */
832 	if (ddi_copyin((void *)args->arg1, &fba_pos, sizeof (nsc_off_t),
833 	    mode)) {
834 		return (EFAULT);
835 	}
836 
837 #ifdef _MULTI_DATAMODEL
838 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
839 		if (ddi_copyin((void *)args->arg2, addr_32, sizeof (addr_32),
840 		    mode)) {
841 			return (EFAULT);
842 		}
843 		addr[0] = addr_32[0]; /* (sdbc_info_t *) */
844 		addr[1] = addr_32[1]; /* (char *) cdata */
845 		addr[2] = addr_32[2]; /* ( int * ) cblk_size */
846 		addr[3] = addr_32[3]; /* ( char * ) filename */
847 		addr[4] = addr_32[4]; /* ( char *) wdata */
848 	} else {
849 		if (ddi_copyin((void *)args->arg2, addr, sizeof (addr), mode)) {
850 			return (EFAULT);
851 		}
852 	}
853 #else /* _MULTI_DATAMODEL */
854 	if (ddi_copyin((void *)args->arg2, addr, sizeof (addr), mode)) {
855 		return (EFAULT);
856 	}
857 #endif /* _MULTI_DATAMODEL */
858 
859 	(void) copyout(&CACHE_BLOCK_SIZE, (void *)addr[2], sizeof (int));
860 
861 	if (_sd_get_cd_blk((int)args->arg0, FBA_TO_BLK_NUM(fba_pos),
862 	    &cc_ent, &data, &lookup_file)) {
863 		if (lookup_file != NULL)
864 			(void) copyout(lookup_file, (void *)addr[3],
865 			    NSC_MAXPATH);
866 		return (ENOENT);
867 	}
868 	rc = 0;
869 	taddr = NULL;
870 
871 	info.ci_write = cc_ent->cc_write ? 1 : 0;
872 	info.ci_dirty = cc_ent->cc_dirty;
873 	info.ci_valid = cc_ent->cc_valid;
874 	info.ci_cd = CENTRY_CD(cc_ent);
875 	info.ci_dblk = BLK_TO_FBA_NUM(CENTRY_BLK(cc_ent));
876 	(void) copyout(lookup_file, (void *)addr[3], NSC_MAXPATH);
877 	(void) copyout(&info, (void *)addr[0], sizeof (sdbc_info_t));
878 
879 	(void) copyout(data, (void *)addr[1], CACHE_BLOCK_SIZE);
880 
881 	/* get the write data if any */
882 	if (cc_ent->cc_write) {
883 
884 		if (sdbc_safestore) {
885 			cmn_err(CE_WARN,
886 			    "sdbc(sdbc_get_cd_blk) cc_write 0x%p sc-res 0x%p",
887 			    (void *)cc_ent->cc_write,
888 			    (void *)cc_ent->cc_write->sc_res);
889 
890 			if ((taddr = kmem_alloc(CACHE_BLOCK_SIZE,
891 			    KM_NOSLEEP)) == NULL) {
892 				cmn_err(CE_WARN,
893 				    "sdbc(sdbc_get_cd_blk) kmem_alloc failed."
894 				    " cannot get write data");
895 				info.ci_write = NULL;
896 				rc = EFAULT;
897 			} else if (SSOP_READ_CBLOCK(sdbc_safestore,
898 			    cc_ent->cc_write->sc_res, taddr,
899 			    CACHE_BLOCK_SIZE, 0) == SS_ERR) {
900 
901 				cmn_err(CE_WARN, "sdbc(sdbc_get_cd_blk) "
902 				    "safestore read failed");
903 				rc = EFAULT;
904 
905 			} else if (copyout(taddr, (void *)addr[4],
906 			    CACHE_BLOCK_SIZE)) {
907 				cmn_err(CE_WARN,
908 				    "sdbc(sdbc_get_cd_blk) copyout failed."
909 				    " cannot get write data");
910 				rc = EFAULT;
911 			}
912 		}
913 
914 	}
915 
916 	if (taddr)
917 		kmem_free(taddr, CACHE_BLOCK_SIZE);
918 
919 	return (rc);
920 }
921 
922 /* ARGSUSED */
923 static int
924 sdbcioctl(dev_t dev, int cmd, void *arg, int mode, cred_t *crp, int *rvp)
925 {
926 	int rc = 0;
927 	_sdbc_ioctl_t args;
928 	int convert_32 = 0;
929 	spcs_s_info_t kstatus;
930 
931 	*rvp = 0;
932 
933 #ifdef _MULTI_DATAMODEL
934 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
935 		int rc;
936 		convert_32 = 1;
937 		if ((rc = convert_ioctl_args(cmd, arg, mode, &args)) != 0)
938 			return (rc);
939 	} else {
940 		if (ddi_copyin(arg, &args, sizeof (_sdbc_ioctl_t), mode)) {
941 			return (EFAULT);
942 		}
943 	}
944 #else /* _MULTI_DATAMODEL */
945 	if (ddi_copyin(arg, &args, sizeof (_sdbc_ioctl_t), mode)) {
946 		return (EFAULT);
947 	}
948 #endif /* _MULTI_DATAMODEL */
949 
950 	kstatus = spcs_s_kcreate();
951 	if (!kstatus)
952 		return (ENOMEM);
953 
954 	switch (cmd) {
955 
956 	case SDBC_UNUSED_1:
957 	case SDBC_UNUSED_2:
958 	case SDBC_UNUSED_3:
959 
960 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
961 		    SDBC_EOBSOLETE));
962 
963 	case SDBC_ADUMP:
964 		rc = _sd_adump(&args, rvp);
965 		break;
966 
967 	case SDBC_TEST_INIT:
968 		rc = _sd_test_init(&args);
969 		break;
970 
971 	case SDBC_TEST_START:
972 		rc = _sd_test_start(&args, rvp);
973 		break;
974 
975 	case SDBC_TEST_END:
976 		rc = _sd_test_end();
977 		break;
978 
979 	case SDBC_ENABLE:
980 		mutex_enter(&_sdbc_config_lock);
981 		rc = _sdbc_configure((_sd_cache_param_t *)args.arg0,
982 			NULL, kstatus);
983 		if (rc && rc != EALREADY && rc != SDBC_ENONETMEM) {
984 			(void) _sdbc_deconfigure(kstatus);
985 			mutex_exit(&_sdbc_config_lock);
986 			return (spcs_s_ocopyoutf(&kstatus,
987 				args.sdbc_ustatus, rc));
988 		}
989 		mutex_exit(&_sdbc_config_lock);
990 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
991 
992 	case SDBC_DISABLE:
993 		mutex_enter(&_sdbc_config_lock);
994 		if (_sd_cache_initialized == 0) {
995 
996 			mutex_exit(&_sdbc_config_lock);
997 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
998 			    SDBC_EDISABLE));
999 		}
1000 		rc = _sdbc_deconfigure(kstatus);
1001 		mutex_exit(&_sdbc_config_lock);
1002 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1003 
1004 	case SDBC_GET_CLUSTER_SIZE:
1005 		if (_sd_cache_initialized == 0) {
1006 
1007 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1008 			    SDBC_ECLUSTER_SIZE));
1009 		}
1010 
1011 		rc = sd_get_file_info_size((void *)args.arg0);
1012 		break;
1013 
1014 	/* get the gl_file data */
1015 	case SDBC_GET_CLUSTER_DATA:
1016 		if (_sd_cache_initialized == 0) {
1017 
1018 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1019 			    SDBC_ECLUSTER_DATA));
1020 		}
1021 		rc = sd_get_file_info_data((void *)args.arg0);
1022 		break;
1023 
1024 	/*  get the size of the global info pages for each board */
1025 	case SDBC_GET_GLMUL_SIZES:
1026 		if (_sd_cache_initialized == 0) {
1027 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1028 			    SDBC_EGLMUL_SIZE));
1029 		}
1030 		rc = sd_get_glmul_sizes((void *)args.arg0);
1031 		break;
1032 
1033 	/* get the global info about write blocks */
1034 	case SDBC_GET_GLMUL_INFO:
1035 		if (_sd_cache_initialized == 0) {
1036 
1037 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1038 			    SDBC_EGLMUL_INFO));
1039 
1040 		}
1041 		rc = sd_get_glmul_info((void *)args.arg0);
1042 		break;
1043 
1044 	case SDBC_SET_CD_HINT:
1045 		if (_sd_cache_initialized == 0)
1046 			return (spcs_s_ocopyoutf(&kstatus,
1047 			    args.sdbc_ustatus, EINVAL));
1048 		rc = ((args.arg2) ?
1049 		    _sd_set_hint((int)args.arg0, (uint_t)args.arg1) :
1050 		    _sd_clear_hint((int)args.arg0, (uint_t)args.arg1));
1051 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1052 
1053 	case SDBC_GET_CD_HINT:
1054 		{
1055 			uint_t hint;
1056 
1057 			if (_sd_cache_initialized == 0)
1058 				return (spcs_s_ocopyoutf(&kstatus,
1059 				    args.sdbc_ustatus, EINVAL));
1060 			if ((rc = _sd_get_cd_hint((int)args.arg0, &hint)) == 0)
1061 				*rvp = hint;
1062 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1063 			    rc));
1064 		}
1065 
1066 	case SDBC_SET_NODE_HINT:
1067 		rc = ((args.arg1) ? _sd_set_node_hint((uint_t)args.arg0) :
1068 		    _sd_clear_node_hint((uint_t)args.arg0));
1069 		if (rc)
1070 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1071 			    rc));
1072 		/* FALLTHRU */
1073 	case SDBC_GET_NODE_HINT:
1074 		{
1075 			uint_t hint;
1076 			if ((rc = _sd_get_node_hint(&hint)) == 0)
1077 				*rvp = hint;
1078 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1079 			    rc));
1080 		}
1081 
1082 	case SDBC_STATS:
1083 		rc = _sd_get_stats((void *)args.arg0, convert_32);
1084 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1085 
1086 	case SDBC_ZAP_STATS:
1087 		_sd_zap_stats();
1088 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, 0));
1089 
1090 	case SDBC_GET_CD_BLK:
1091 		if (_sd_cache_initialized == 0)
1092 			return (spcs_s_ocopyoutf(&kstatus,
1093 			    args.sdbc_ustatus, EINVAL));
1094 		rc = sdbc_get_cd_blk(&args, mode);
1095 		break;
1096 
1097 	case SDBC_GET_CONFIG:
1098 		{
1099 		_sdbc_config_t sdbc_config_info;
1100 
1101 		if (ddi_copyin((void *)args.arg0,
1102 		    &sdbc_config_info,
1103 		    sizeof (_sdbc_config_t),
1104 		    mode)) {
1105 			spcs_s_kfree(kstatus);
1106 			return (EFAULT);
1107 		}
1108 		rc = _sdbc_get_config(&sdbc_config_info);
1109 		(void) ddi_copyout(&sdbc_config_info,
1110 		    (void *)args.arg0,
1111 		    sizeof (_sdbc_config_t),
1112 		    mode);
1113 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1114 		}
1115 
1116 	case SDBC_SET_CONFIG:
1117 	{
1118 		_sdbc_config_t mgmt_config_info;
1119 
1120 		if (ddi_copyin((void *)args.arg0,
1121 		    &mgmt_config_info,
1122 		    sizeof (_sdbc_config_t),
1123 		    mode)) {
1124 			spcs_s_kfree(kstatus);
1125 			return (EFAULT);
1126 		}
1127 
1128 		rc = _sdbc_configure(NULL, &mgmt_config_info, kstatus);
1129 		if (rc && rc != EALREADY) {
1130 			(void) _sdbc_deconfigure(kstatus);
1131 			return (spcs_s_ocopyoutf(&kstatus,
1132 				args.sdbc_ustatus, rc));
1133 		}
1134 
1135 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1136 	}
1137 
1138 	case SDBC_MAXFILES:
1139 		if (copyout(&sdbc_max_devs, (void *)args.arg0,
1140 		    sizeof (sdbc_max_devs)))
1141 			rc = EFAULT;
1142 		else
1143 			rc = 0;
1144 
1145 		break;
1146 
1147 	case SDBC_VERSION:
1148 	{
1149 		cache_version_t cache_version;
1150 
1151 		cache_version.major = sdbc_major_rev;
1152 		cache_version.minor = sdbc_minor_rev;
1153 		cache_version.micro = sdbc_micro_rev;
1154 		cache_version.baseline = sdbc_baseline_rev;
1155 
1156 		if (ddi_copyout(&cache_version, (void *)args.arg0,
1157 			sizeof (cache_version_t), mode)) {
1158 			rc = EFAULT;
1159 			break;
1160 		}
1161 
1162 		break;
1163 	}
1164 
1165 
1166 #ifdef DEBUG
1167 	/* toggle flusher flag for testing */
1168 	case SDBC_TOGGLE_FLUSH:
1169 		_sdbc_flush_flag ^= 1;
1170 		*rvp = _sdbc_flush_flag;
1171 		rc = 0;
1172 
1173 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1174 		    SDBC_ETOGGLE_FLUSH, _sdbc_flush_flag ? "on" : "off"));
1175 
1176 
1177 	/* inject i/o errors */
1178 	case SDBC_INJ_IOERR: /* cd, errnum */
1179 		if (_sd_cache_initialized == 0)
1180 			return (spcs_s_ocopyoutf(&kstatus,
1181 			    args.sdbc_ustatus, EINVAL));
1182 		rc = _sdbc_inject_ioerr(args.arg0, args.arg1, args.arg2);
1183 		break;
1184 
1185 	/* clear injected i/o errors */
1186 	case SDBC_CLR_IOERR: /* cd */
1187 		if (_sd_cache_initialized == 0)
1188 			return (spcs_s_ocopyoutf(&kstatus,
1189 			    args.sdbc_ustatus, EINVAL));
1190 		rc = _sdbc_clear_ioerr(args.arg0);
1191 		break;
1192 
1193 #endif /* DEBUG */
1194 	default:
1195 		_sd_print(3, "SDBC unknown ioctl: 0x%x unsupported", cmd);
1196 		rc = EINVAL;
1197 		break;
1198 	}
1199 
1200 	spcs_s_kfree(kstatus);
1201 	return (rc);
1202 }
1203 
1204 
1205 /*
1206  * _sd_timed_block - sleep waiting for ticks time delay.
1207  * ticks - # of ticks to sleep
1208  * cvp - pointer to the cv we wait on while we delay.
1209  *
1210  * NO spin locks can be held at entry!
1211  *
1212  */
1213 void
1214 _sd_timed_block(clock_t ticks, kcondvar_t *cvp)
1215 {
1216 	clock_t ticker;
1217 
1218 	if (drv_getparm(LBOLT, &ticker) != 0)
1219 		cmn_err(CE_WARN, "_sd_timed_block:failed to get current time");
1220 
1221 	mutex_enter(&_sd_block_lk);
1222 	(void) cv_timedwait(cvp, &_sd_block_lk, ticks + ticker);
1223 	mutex_exit(&_sd_block_lk);
1224 
1225 }
1226 
1227 
1228 /*
1229  * _sd_unblock - awake a sleeper waiting on cv pointed to by cvp.
1230  *
1231  * NO spin locks can be held at entry as we may sleep.
1232  *
1233  */
1234 void
1235 _sd_unblock(kcondvar_t *cvp)
1236 {
1237 
1238 	mutex_enter(&_sd_block_lk);
1239 	cv_broadcast(cvp);
1240 	mutex_exit(&_sd_block_lk);
1241 }
1242 
1243 /* ARGSUSED */
1244 void
1245 _sd_data_log(int num, _sd_cctl_t *centry, nsc_off_t st, nsc_size_t len)
1246 {
1247 #if defined(_SD_FBA_DATA_LOG)
1248 	nsc_size_t i;
1249 	nsc_off_t blk;
1250 
1251 	blk = BLK_TO_FBA_NUM(CENTRY_BLK(centry));
1252 	for (i = st; i < (st + len); i++)
1253 		SDTRACE(num, CENTRY_CD(centry), 1, blk + i,
1254 			*(int *)(centry->cc_data + FBA_SIZE(i)),
1255 			*(int *)(centry->cc_data + FBA_SIZE(i) + 4));
1256 #endif /* _SD_FBA_DATA_LOG */
1257 }
1258 
1259 /* ARGSUSED */
1260 void
1261 _sd_data_log_chain(int num, _sd_cctl_t *centry, nsc_off_t fba_pos,
1262     nsc_size_t fba_len)
1263 {
1264 #if defined(_SD_FBA_DATA_LOG)
1265 	sdbc_cblk_fba_t st_cblk_len;	/* FBA len of starting cache block */
1266 	sdbc_cblk_fba_t end_cblk_len;	/* FBA len of ending cache block */
1267 	sdbc_cblk_fba_t st_cblk_off;	/* FBA offset into starting cblock */
1268 
1269 	while (CENTRY_BLK(centry) != FBA_TO_BLK_NUM(fba_pos))
1270 		centry = centry->cc_chain;
1271 
1272 	st_cblk_off = BLK_FBA_OFF(fba_pos);
1273 	st_cblk_len = BLK_FBAS - st_cblk_off;
1274 	if (st_cblk_len >= fba_len) {
1275 		end_cblk_len = 0;
1276 		st_cblk_len = fba_len;
1277 	} else {
1278 		end_cblk_len = BLK_FBA_OFF(fba_pos + fba_len);
1279 	}
1280 
1281 	DATA_LOG(num, centry, st_cblk_off, st_cblk_len);
1282 
1283 	fba_len -= st_cblk_len;
1284 	centry = centry->cc_chain;
1285 
1286 	while (fba_len > end_cblk_len) {
1287 		DATA_LOG(num, centry, 0, BLK_FBAS);
1288 		fba_len -= BLK_FBAS;
1289 		centry = centry->cc_chain;
1290 	}
1291 	if (end_cblk_len) DATA_LOG(num, centry, 0, end_cblk_len);
1292 #endif /* _SD_FBA_DATA_LOG */
1293 }
1294 
1295 
1296 void
1297 _sd_zap_stats(void)
1298 {
1299 	int i;
1300 
1301 	if (_sd_cache_stats == NULL)
1302 		return;
1303 
1304 	_sd_cache_stats->st_rdhits = 0;
1305 	_sd_cache_stats->st_rdmiss = 0;
1306 	_sd_cache_stats->st_wrhits = 0;
1307 	_sd_cache_stats->st_wrmiss = 0;
1308 	_sd_lru_q.sq_noreq_stat = 0;
1309 	_sd_lru_q.sq_req_stat = 0;
1310 
1311 	for (i = 0; i < sdbc_max_devs; i++) {
1312 		_sd_cache_stats->st_shared[i].sh_cache_read  = 0;
1313 		_sd_cache_stats->st_shared[i].sh_cache_write = 0;
1314 		_sd_cache_stats->st_shared[i].sh_disk_read   = 0;
1315 		_sd_cache_stats->st_shared[i].sh_disk_write  = 0;
1316 	}
1317 }
1318 
1319 
1320 /*
1321  * Return the cache sizes used by the Sense Subsystem Status CCW
1322  */
1323 int
1324 _sd_cache_sizes(int *asize, int *wsize)
1325 {
1326 	int	psize;
1327 
1328 	*asize = 0;
1329 	*wsize = 0;
1330 
1331 	/*
1332 	 * add in the total cache size and the
1333 	 * non-volatile (battery-backed) cache size.
1334 	 */
1335 	if (_sd_net_config.sn_configured) {
1336 		psize = _sd_net_config.sn_psize;
1337 		*asize += (_sd_net_config.sn_cpages * psize);
1338 		*wsize += (safestore_config.ssc_wsize);
1339 	}
1340 
1341 	return (0);
1342 }
1343 
1344 
1345 /*PRINTFLIKE2*/
1346 void
1347 _sd_print(int level, char *fmt, ...)
1348 {
1349 	va_list adx;
1350 	if (level <= _sd_debug_level) {
1351 		va_start(adx, fmt);
1352 		vcmn_err(CE_NOTE, fmt, adx);
1353 		va_end(adx);
1354 
1355 	}
1356 }
1357 
1358 
1359 int
1360 _sd_get_cd_blk(int cd, nsc_off_t cblk, _sd_cctl_t **cc, caddr_t *data,
1361     char **filename)
1362 {
1363 	_sd_cctl_t *cc_ent;
1364 
1365 	if (FILE_OPENED(cd) != 0) {
1366 		*filename = _sd_cache_files[cd].cd_info->sh_filename;
1367 		if (cc_ent = (_sd_cctl_t *)
1368 		    _sd_hash_search(cd, cblk, _sd_htable)) {
1369 			*cc = cc_ent;
1370 			*data = (caddr_t)cc_ent->cc_data;
1371 			return (0);
1372 		}
1373 	}
1374 	return (-1);
1375 }
1376 
1377 /*
1378  * central dyn mem processing vars edit rtn.
1379  * input a local copy and xfer to global
1380  *
1381  * sec0,sec1,sec2
1382  * range check 1 to 255 (arbitrary but in any case must be <= 2000 due to
1383  *	32bit signed int limits in later calc)
1384  * aging_ct
1385  * range check 1 to 255 (only 8 bits reserved for aging ctr)
1386  *
1387  */
1388 int
1389 sdbc_edit_xfer_process_vars_dm(_dm_process_vars_t *process_vars)
1390 {
1391 	if (process_vars->max_dyn_list > 0)
1392 		dynmem_processing_dm.max_dyn_list = process_vars->max_dyn_list;
1393 
1394 	/* no edit on monitor_dynmem_process */
1395 	dynmem_processing_dm.monitor_dynmem_process =
1396 	    process_vars->monitor_dynmem_process;
1397 	/* no edit on process_directive */
1398 	dynmem_processing_dm.process_directive =
1399 	    process_vars->process_directive;
1400 
1401 	if (process_vars->cache_aging_ct1 > 0 &&
1402 	    process_vars->cache_aging_ct1 <= CACHE_AGING_CT_MAX)
1403 		dynmem_processing_dm.cache_aging_ct1 =
1404 		    process_vars->cache_aging_ct1;
1405 	if (process_vars->cache_aging_ct2 > 0 &&
1406 	    process_vars->cache_aging_ct2 <= CACHE_AGING_CT_MAX)
1407 		dynmem_processing_dm.cache_aging_ct2 =
1408 		    process_vars->cache_aging_ct2;
1409 	if (process_vars->cache_aging_ct3 > 0 &&
1410 	    process_vars->cache_aging_ct3 <= CACHE_AGING_CT_MAX)
1411 		dynmem_processing_dm.cache_aging_ct3 =
1412 		    process_vars->cache_aging_ct3;
1413 	if (process_vars->cache_aging_sec1 > 0 &&
1414 	    process_vars->cache_aging_sec1 <= CACHE_AGING_SEC1_MAX)
1415 		dynmem_processing_dm.cache_aging_sec1 =
1416 		    process_vars->cache_aging_sec1;
1417 	if (process_vars->cache_aging_sec2 > 0 &&
1418 	    process_vars->cache_aging_sec2 <= CACHE_AGING_SEC2_MAX)
1419 		dynmem_processing_dm.cache_aging_sec2 =
1420 		    process_vars->cache_aging_sec2;
1421 	if (process_vars->cache_aging_sec3 > 0 &&
1422 	    process_vars->cache_aging_sec3 <= CACHE_AGING_SEC3_MAX)
1423 		dynmem_processing_dm.cache_aging_sec3 =
1424 		    process_vars->cache_aging_sec3;
1425 	if (process_vars->cache_aging_pcnt1 >= 0 &&
1426 	    process_vars->cache_aging_pcnt1 <= CACHE_AGING_PCNT1_MAX)
1427 		dynmem_processing_dm.cache_aging_pcnt1 =
1428 		    process_vars->cache_aging_pcnt1;
1429 	if (process_vars->cache_aging_pcnt2 >= 0 &&
1430 	    process_vars->cache_aging_pcnt2 <= CACHE_AGING_PCNT2_MAX)
1431 		dynmem_processing_dm.cache_aging_pcnt2 =
1432 		    process_vars->cache_aging_pcnt2;
1433 	if (process_vars->max_holds_pcnt >= 0 &&
1434 	    process_vars->max_holds_pcnt <= MAX_HOLDS_PCNT_MAX)
1435 		dynmem_processing_dm.max_holds_pcnt =
1436 		    process_vars->max_holds_pcnt;
1437 	return (0);
1438 }
1439 
1440 dev_info_t *
1441 sdbc_get_dip()
1442 {
1443 	return (dev_dip);
1444 }
1445