xref: /onnv-gate/usr/src/uts/common/os/devid_cache.c (revision 12121:aeafaf8daa5c)
12797Sjg /*
22797Sjg  * CDDL HEADER START
32797Sjg  *
42797Sjg  * The contents of this file are subject to the terms of the
52797Sjg  * Common Development and Distribution License (the "License").
62797Sjg  * You may not use this file except in compliance with the License.
72797Sjg  *
82797Sjg  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
92797Sjg  * or http://www.opensolaris.org/os/licensing.
102797Sjg  * See the License for the specific language governing permissions
112797Sjg  * and limitations under the License.
122797Sjg  *
132797Sjg  * When distributing Covered Code, include this CDDL HEADER in each
142797Sjg  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
152797Sjg  * If applicable, add the following below this CDDL HEADER, with the
162797Sjg  * fields enclosed by brackets "[]" replaced with your own identifying
172797Sjg  * information: Portions Copyright [yyyy] [name of copyright owner]
182797Sjg  *
192797Sjg  * CDDL HEADER END
202797Sjg  */
212797Sjg /*
22*12121SReed.Liu@Sun.COM  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
232797Sjg  */
242797Sjg 
252797Sjg #include <sys/note.h>
262797Sjg #include <sys/t_lock.h>
272797Sjg #include <sys/cmn_err.h>
282797Sjg #include <sys/instance.h>
292797Sjg #include <sys/conf.h>
302797Sjg #include <sys/stat.h>
312797Sjg #include <sys/ddi.h>
322797Sjg #include <sys/hwconf.h>
332797Sjg #include <sys/sunddi.h>
342797Sjg #include <sys/sunndi.h>
352797Sjg #include <sys/ddi_impldefs.h>
362797Sjg #include <sys/ndi_impldefs.h>
372797Sjg #include <sys/kobj.h>
382797Sjg #include <sys/devcache.h>
392797Sjg #include <sys/devid_cache.h>
402797Sjg #include <sys/sysmacros.h>
412797Sjg 
422797Sjg /*
432797Sjg  * Discovery refers to the heroic effort made to discover a device which
442797Sjg  * cannot be accessed at the physical path where it once resided.  Discovery
452797Sjg  * involves walking the entire device tree attaching all possible disk
462797Sjg  * instances, to search for the device referenced by a devid.  Obviously,
472797Sjg  * full device discovery is something to be avoided where possible.
482797Sjg  * Note that simply invoking devfsadm(1M) is equivalent to running full
492797Sjg  * discovery at the devid cache level.
502797Sjg  *
512797Sjg  * Reasons why a disk may not be accessible:
522797Sjg  *	disk powered off
532797Sjg  *	disk removed or cable disconnected
542797Sjg  *	disk or adapter broken
552797Sjg  *
562797Sjg  * Note that discovery is not needed and cannot succeed in any of these
572797Sjg  * cases.
582797Sjg  *
592797Sjg  * When discovery may succeed:
602797Sjg  *	Discovery will result in success when a device has been moved
612797Sjg  *	to a different address.  Note that it's recommended that
622797Sjg  *	devfsadm(1M) be invoked (no arguments required) whenever a system's
632797Sjg  *	h/w configuration has been updated.  Alternatively, a
642797Sjg  *	reconfiguration boot can be used to accomplish the same result.
652797Sjg  *
662797Sjg  * Note that discovery is not necessary to be able to correct an access
672797Sjg  * failure for a device which was powered off.  Assuming the cache has an
682797Sjg  * entry for such a device, simply powering it on should permit the system
692797Sjg  * to access it.  If problems persist after powering it on, invoke
702797Sjg  * devfsadm(1M).
712797Sjg  *
722797Sjg  * Discovery prior to mounting root is only of interest when booting
732797Sjg  * from a filesystem which accesses devices by device id, which of
742797Sjg  * not all do.
752797Sjg  *
762797Sjg  * Tunables
772797Sjg  *
782797Sjg  * devid_discovery_boot (default 1)
792797Sjg  *	Number of times discovery will be attempted prior to mounting root.
802797Sjg  *	Must be done at least once to recover from corrupted or missing
812797Sjg  *	devid cache backing store.  Probably there's no reason to ever
822797Sjg  * 	set this to greater than one as a missing device will remain
832797Sjg  *	unavailable no matter how often the system searches for it.
842797Sjg  *
852797Sjg  * devid_discovery_postboot (default 1)
862797Sjg  *	Number of times discovery will be attempted after mounting root.
872797Sjg  *	This must be performed at least once to discover any devices
882797Sjg  *	needed after root is mounted which may have been powered
892797Sjg  *	off and moved before booting.
902797Sjg  *	Setting this to a larger positive number will introduce
912797Sjg  *	some inconsistency in system operation.  Searching for a device
922797Sjg  *	will take an indeterminate amount of time, sometimes slower,
932797Sjg  *	sometimes faster.  In addition, the system will sometimes
942797Sjg  *	discover a newly powered on device, sometimes it won't.
952797Sjg  *	Use of this option is not therefore recommended.
962797Sjg  *
972797Sjg  * devid_discovery_postboot_always (default 0)
982797Sjg  *	Set to 1, the system will always attempt full discovery.
992797Sjg  *
1002797Sjg  * devid_discovery_secs (default 0)
1012797Sjg  *	Set to a positive value, the system will attempt full discovery
1022797Sjg  *	but with a minimum delay between attempts.  A device search
1032797Sjg  *	within the period of time specified will result in failure.
1042797Sjg  *
1052797Sjg  * devid_cache_read_disable (default 0)
1062797Sjg  *	Set to 1 to disable reading /etc/devices/devid_cache.
1072797Sjg  *	Devid cache will continue to operate normally but
1082797Sjg  *	at least one discovery attempt will be required.
1092797Sjg  *
1102797Sjg  * devid_cache_write_disable (default 0)
1112797Sjg  *	Set to 1 to disable updates to /etc/devices/devid_cache.
1122797Sjg  *	Any updates to the devid cache will not be preserved across a reboot.
1132797Sjg  *
1142797Sjg  * devid_report_error (default 0)
1152797Sjg  *	Set to 1 to enable some error messages related to devid
1162797Sjg  *	cache failures.
1172797Sjg  *
1182797Sjg  * The devid is packed in the cache file as a byte array.  For
1192797Sjg  * portability, this could be done in the encoded string format.
1202797Sjg  */
1212797Sjg 
1222797Sjg 
1232797Sjg int devid_discovery_boot = 1;
1242797Sjg int devid_discovery_postboot = 1;
1252797Sjg int devid_discovery_postboot_always = 0;
1262797Sjg int devid_discovery_secs = 0;
1272797Sjg 
1282797Sjg int devid_cache_read_disable = 0;
1292797Sjg int devid_cache_write_disable = 0;
1302797Sjg 
1312797Sjg int devid_report_error = 0;
1322797Sjg 
1332797Sjg 
1342797Sjg /*
1352797Sjg  * State to manage discovery of devices providing a devid
1362797Sjg  */
1372797Sjg static int		devid_discovery_busy = 0;
1382797Sjg static kmutex_t		devid_discovery_mutex;
1392797Sjg static kcondvar_t	devid_discovery_cv;
1402797Sjg static clock_t		devid_last_discovery = 0;
1412797Sjg 
1422797Sjg 
1432797Sjg #ifdef	DEBUG
1442797Sjg int nvp_devid_debug = 0;
1452797Sjg int devid_debug = 0;
1462797Sjg int devid_log_registers = 0;
1472797Sjg int devid_log_finds = 0;
1482797Sjg int devid_log_lookups = 0;
1492797Sjg int devid_log_discovery = 0;
1502797Sjg int devid_log_matches = 0;
1512797Sjg int devid_log_paths = 0;
1522797Sjg int devid_log_failures = 0;
1532797Sjg int devid_log_hold = 0;
1542797Sjg int devid_log_unregisters = 0;
1552797Sjg int devid_log_removes = 0;
1562797Sjg int devid_register_debug = 0;
1572797Sjg int devid_log_stale = 0;
1582797Sjg int devid_log_detaches = 0;
1592797Sjg #endif	/* DEBUG */
1602797Sjg 
1612797Sjg /*
1622797Sjg  * devid cache file registration for cache reads and updates
1632797Sjg  */
1642797Sjg static nvf_ops_t devid_cache_ops = {
1652797Sjg 	"/etc/devices/devid_cache",		/* path to cache */
1662797Sjg 	devid_cache_unpack_nvlist,		/* read: nvlist to nvp */
1672797Sjg 	devid_cache_pack_list,			/* write: nvp to nvlist */
1682797Sjg 	devid_list_free,			/* free data list */
1692797Sjg 	NULL					/* write complete callback */
1702797Sjg };
1712797Sjg 
1722797Sjg /*
1732797Sjg  * handle to registered devid cache handlers
1742797Sjg  */
1752797Sjg nvf_handle_t	dcfd_handle;
1762797Sjg 
1772797Sjg 
1782797Sjg /*
1792797Sjg  * Initialize devid cache file management
1802797Sjg  */
1812797Sjg void
1822797Sjg devid_cache_init(void)
1832797Sjg {
1842797Sjg 	dcfd_handle = nvf_register_file(&devid_cache_ops);
1852797Sjg 	ASSERT(dcfd_handle);
1862797Sjg 
1872797Sjg 	list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
1882797Sjg 	    offsetof(nvp_devid_t, nvp_link));
1892797Sjg 
1902797Sjg 	mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
1912797Sjg 	cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
1922797Sjg }
1932797Sjg 
1942797Sjg /*
1952797Sjg  * Read and initialize the devid cache from the persistent store
1962797Sjg  */
1972797Sjg void
1982797Sjg devid_cache_read(void)
1992797Sjg {
2002797Sjg 	if (!devid_cache_read_disable) {
2012797Sjg 		rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
2022797Sjg 		ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
2032797Sjg 		(void) nvf_read_file(dcfd_handle);
2042797Sjg 		rw_exit(nvf_lock(dcfd_handle));
2052797Sjg 	}
2062797Sjg }
2072797Sjg 
2082797Sjg static void
2092797Sjg devid_nvp_free(nvp_devid_t *dp)
2102797Sjg {
2112797Sjg 	if (dp->nvp_devpath)
2122797Sjg 		kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
2132797Sjg 	if (dp->nvp_devid)
2142797Sjg 		kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
2152797Sjg 
2162797Sjg 	kmem_free(dp, sizeof (nvp_devid_t));
2172797Sjg }
2182797Sjg 
2192797Sjg static void
2202797Sjg devid_list_free(nvf_handle_t fd)
2212797Sjg {
2222797Sjg 	list_t		*listp;
2232797Sjg 	nvp_devid_t	*np;
2242797Sjg 
2252797Sjg 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
2262797Sjg 
2272797Sjg 	listp = nvf_list(fd);
2282797Sjg 	while (np = list_head(listp)) {
2292797Sjg 		list_remove(listp, np);
2302797Sjg 		devid_nvp_free(np);
2312797Sjg 	}
2322797Sjg }
2332797Sjg 
2342797Sjg /*
2352797Sjg  * Free an nvp element in a list
2362797Sjg  */
2372797Sjg static void
2382797Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
2392797Sjg {
2402797Sjg 	list_remove(nvf_list(fd), np);
2412797Sjg 	devid_nvp_free(np);
2422797Sjg }
2432797Sjg 
2442797Sjg /*
2452797Sjg  * Unpack a device path/nvlist pair to the list of devid cache elements.
2462797Sjg  * Used to parse the nvlist format when reading
2472797Sjg  * /etc/devices/devid_cache
2482797Sjg  */
2492797Sjg static int
2502797Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
2512797Sjg {
2522797Sjg 	nvp_devid_t *np;
2532797Sjg 	ddi_devid_t devidp;
2542797Sjg 	int rval;
2552797Sjg 	uint_t n;
2562797Sjg 
2572797Sjg 	NVP_DEVID_DEBUG_PATH((name));
2582797Sjg 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
2592797Sjg 
2602797Sjg 	/*
2612797Sjg 	 * check path for a devid
2622797Sjg 	 */
2632797Sjg 	rval = nvlist_lookup_byte_array(nvl,
2647009Scth 	    DP_DEVID_ID, (uchar_t **)&devidp, &n);
2652797Sjg 	if (rval == 0) {
2662797Sjg 		if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
2672797Sjg 			ASSERT(n == ddi_devid_sizeof(devidp));
2682797Sjg 			np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
2692797Sjg 			np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
2702797Sjg 			np->nvp_devid = kmem_alloc(n, KM_SLEEP);
2712797Sjg 			(void) bcopy(devidp, np->nvp_devid, n);
2722797Sjg 			list_insert_tail(nvf_list(fd), np);
2732797Sjg 			NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
2742797Sjg 		} else {
2752797Sjg 			DEVIDERR((CE_CONT,
2762797Sjg 			    "%s: invalid devid\n", name));
2772797Sjg 		}
2782797Sjg 	} else {
2792797Sjg 		DEVIDERR((CE_CONT,
2802797Sjg 		    "%s: devid not available\n", name));
2812797Sjg 	}
2822797Sjg 
2832797Sjg 	return (0);
2842797Sjg }
2852797Sjg 
2862797Sjg /*
2872797Sjg  * Pack the list of devid cache elements into a single nvlist
2882797Sjg  * Used when writing the nvlist file.
2892797Sjg  */
2902797Sjg static int
2912797Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
2922797Sjg {
2932797Sjg 	nvlist_t	*nvl, *sub_nvl;
2942797Sjg 	nvp_devid_t	*np;
2952797Sjg 	int		rval;
2962797Sjg 	list_t		*listp;
2972797Sjg 
2982797Sjg 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
2992797Sjg 
3002797Sjg 	rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
3012797Sjg 	if (rval != 0) {
3022797Sjg 		nvf_error("%s: nvlist alloc error %d\n",
3037009Scth 		    nvf_cache_name(fd), rval);
3042797Sjg 		return (DDI_FAILURE);
3052797Sjg 	}
3062797Sjg 
3072797Sjg 	listp = nvf_list(fd);
3082797Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
3092797Sjg 		if (np->nvp_devid == NULL)
3107009Scth 			continue;
3112797Sjg 		NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
3122797Sjg 		rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
3132797Sjg 		if (rval != 0) {
3142797Sjg 			nvf_error("%s: nvlist alloc error %d\n",
3157009Scth 			    nvf_cache_name(fd), rval);
3162797Sjg 			sub_nvl = NULL;
3172797Sjg 			goto err;
3182797Sjg 		}
3192797Sjg 
3202797Sjg 		rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
3217009Scth 		    (uchar_t *)np->nvp_devid,
3227009Scth 		    ddi_devid_sizeof(np->nvp_devid));
3232797Sjg 		if (rval == 0) {
3242797Sjg 			NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
3252797Sjg 		} else {
3262797Sjg 			nvf_error(
3272797Sjg 			    "%s: nvlist add error %d (devid)\n",
3282797Sjg 			    nvf_cache_name(fd), rval);
3292797Sjg 			goto err;
3302797Sjg 		}
3312797Sjg 
3322797Sjg 		rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
3332797Sjg 		if (rval != 0) {
3342797Sjg 			nvf_error("%s: nvlist add error %d (sublist)\n",
3352797Sjg 			    nvf_cache_name(fd), rval);
3362797Sjg 			goto err;
3372797Sjg 		}
3382797Sjg 		nvlist_free(sub_nvl);
3392797Sjg 	}
3402797Sjg 
3412797Sjg 	*ret_nvl = nvl;
3422797Sjg 	return (DDI_SUCCESS);
3432797Sjg 
3442797Sjg err:
3452797Sjg 	if (sub_nvl)
3462797Sjg 		nvlist_free(sub_nvl);
3472797Sjg 	nvlist_free(nvl);
3482797Sjg 	*ret_nvl = NULL;
3492797Sjg 	return (DDI_FAILURE);
3502797Sjg }
3512797Sjg 
3522797Sjg static int
3532797Sjg e_devid_do_discovery(void)
3542797Sjg {
3552797Sjg 	ASSERT(mutex_owned(&devid_discovery_mutex));
3562797Sjg 
3572797Sjg 	if (i_ddi_io_initialized() == 0) {
3582797Sjg 		if (devid_discovery_boot > 0) {
3592797Sjg 			devid_discovery_boot--;
3602797Sjg 			return (1);
3612797Sjg 		}
3622797Sjg 	} else {
3632797Sjg 		if (devid_discovery_postboot_always > 0)
3642797Sjg 			return (1);
3652797Sjg 		if (devid_discovery_postboot > 0) {
3662797Sjg 			devid_discovery_postboot--;
3672797Sjg 			return (1);
3682797Sjg 		}
3692797Sjg 		if (devid_discovery_secs > 0) {
3702797Sjg 			if ((ddi_get_lbolt() - devid_last_discovery) >
3712797Sjg 			    drv_usectohz(devid_discovery_secs * MICROSEC)) {
3722797Sjg 				return (1);
3732797Sjg 			}
3742797Sjg 		}
3752797Sjg 	}
3762797Sjg 
3772797Sjg 	DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
3782797Sjg 	return (0);
3792797Sjg }
3802797Sjg 
3812797Sjg static void
3822797Sjg e_ddi_devid_hold_by_major(major_t major)
3832797Sjg {
3842797Sjg 	DEVID_LOG_DISC((CE_CONT,
3852797Sjg 	    "devid_discovery: ddi_hold_installed_driver %d\n", major));
3862797Sjg 
3872797Sjg 	if (ddi_hold_installed_driver(major) == NULL)
3882797Sjg 		return;
3892797Sjg 
3902797Sjg 	ddi_rele_driver(major);
3912797Sjg }
3922797Sjg 
3932797Sjg static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd", "dad" };
3942797Sjg 
3952797Sjg #define	N_DRIVERS_TO_HOLD	\
3962797Sjg 	(sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
3972797Sjg 
3982797Sjg 
3992797Sjg static void
4002797Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid)
4012797Sjg {
4022797Sjg 	impl_devid_t	*id = (impl_devid_t *)devid;
4032797Sjg 	major_t		major, hint_major;
4042797Sjg 	char		hint[DEVID_HINT_SIZE + 1];
4052797Sjg 	char		**drvp;
4062797Sjg 	int		i;
4072797Sjg 
4082797Sjg 	/* Count non-null bytes */
4092797Sjg 	for (i = 0; i < DEVID_HINT_SIZE; i++)
4102797Sjg 		if (id->did_driver[i] == '\0')
4112797Sjg 			break;
4122797Sjg 
4132797Sjg 	/* Make a copy of the driver hint */
4142797Sjg 	bcopy(id->did_driver, hint, i);
4152797Sjg 	hint[i] = '\0';
4162797Sjg 
4172797Sjg 	/* search for the devid using the hint driver */
4182797Sjg 	hint_major = ddi_name_to_major(hint);
4197009Scth 	if (hint_major != DDI_MAJOR_T_NONE) {
4202797Sjg 		e_ddi_devid_hold_by_major(hint_major);
4212797Sjg 	}
4222797Sjg 
4232797Sjg 	drvp = e_ddi_devid_hold_driver_list;
4242797Sjg 	for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
4252797Sjg 		major = ddi_name_to_major(*drvp);
4267009Scth 		if (major != DDI_MAJOR_T_NONE && major != hint_major) {
4272797Sjg 			e_ddi_devid_hold_by_major(major);
4282797Sjg 		}
4292797Sjg 	}
4302797Sjg }
4312797Sjg 
4322797Sjg 
4332797Sjg /*
4342797Sjg  * Return success if discovery was attempted, to indicate
4352797Sjg  * that the desired device may now be available.
4362797Sjg  */
4372797Sjg int
4382797Sjg e_ddi_devid_discovery(ddi_devid_t devid)
4392797Sjg {
4402797Sjg 	int flags;
4412797Sjg 	int rval = DDI_SUCCESS;
4422797Sjg 
4432797Sjg 	mutex_enter(&devid_discovery_mutex);
4442797Sjg 
4452797Sjg 	if (devid_discovery_busy) {
4462797Sjg 		DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
4472797Sjg 		while (devid_discovery_busy) {
4482797Sjg 			cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
4492797Sjg 		}
4502797Sjg 	} else if (e_devid_do_discovery()) {
4512797Sjg 		devid_discovery_busy = 1;
4522797Sjg 		mutex_exit(&devid_discovery_mutex);
4532797Sjg 
4542797Sjg 		if (i_ddi_io_initialized() == 0) {
4552797Sjg 			e_ddi_devid_hold_installed_driver(devid);
4562797Sjg 		} else {
4572797Sjg 			DEVID_LOG_DISC((CE_CONT,
4582797Sjg 			    "devid_discovery: ndi_devi_config\n"));
4592797Sjg 			flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
4602797Sjg 			if (i_ddi_io_initialized())
4612797Sjg 				flags |= NDI_DRV_CONF_REPROBE;
4622797Sjg 			(void) ndi_devi_config(ddi_root_node(), flags);
4632797Sjg 		}
4642797Sjg 
4652797Sjg 		mutex_enter(&devid_discovery_mutex);
4662797Sjg 		devid_discovery_busy = 0;
4672797Sjg 		cv_broadcast(&devid_discovery_cv);
4682797Sjg 		if (devid_discovery_secs > 0)
4692797Sjg 			devid_last_discovery = ddi_get_lbolt();
4702797Sjg 		DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
4712797Sjg 	} else {
4722797Sjg 		rval = DDI_FAILURE;
4732797Sjg 		DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
4742797Sjg 	}
4752797Sjg 
4762797Sjg 	mutex_exit(&devid_discovery_mutex);
4772797Sjg 
4782797Sjg 	return (rval);
4792797Sjg }
4802797Sjg 
4812797Sjg /*
4822797Sjg  * As part of registering a devid for a device,
4832797Sjg  * update the devid cache with this device/devid pair
4842797Sjg  * or note that this combination has registered.
4852797Sjg  */
4862797Sjg int
4872797Sjg e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
4882797Sjg {
4892797Sjg 	nvp_devid_t *np;
4902797Sjg 	nvp_devid_t *new_nvp;
4912797Sjg 	ddi_devid_t new_devid;
4922797Sjg 	int new_devid_size;
4932797Sjg 	char *path, *fullpath;
4942797Sjg 	ddi_devid_t free_devid = NULL;
4952797Sjg 	int pathlen;
4962797Sjg 	list_t *listp;
4972797Sjg 	int is_dirty = 0;
4982797Sjg 
499*12121SReed.Liu@Sun.COM 	/*
500*12121SReed.Liu@Sun.COM 	 * We are willing to accept DS_BOUND nodes if we can form a full
501*12121SReed.Liu@Sun.COM 	 * ddi_pathname (i.e. the node is part way to becomming
502*12121SReed.Liu@Sun.COM 	 * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL).
503*12121SReed.Liu@Sun.COM 	 */
504*12121SReed.Liu@Sun.COM 	if (ddi_get_name_addr(dip) == NULL) {
505*12121SReed.Liu@Sun.COM 		return (DDI_FAILURE);
506*12121SReed.Liu@Sun.COM 	}
507*12121SReed.Liu@Sun.COM 
5082797Sjg 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
5092797Sjg 
5102797Sjg 	fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5112797Sjg 	(void) ddi_pathname(dip, fullpath);
5122797Sjg 	pathlen = strlen(fullpath) + 1;
5132797Sjg 	path = kmem_alloc(pathlen, KM_SLEEP);
5142797Sjg 	bcopy(fullpath, path, pathlen);
5152797Sjg 	kmem_free(fullpath, MAXPATHLEN);
5162797Sjg 
5172797Sjg 	DEVID_LOG_REG(("register", devid, path));
5182797Sjg 
5192797Sjg 	new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
5202797Sjg 	new_devid_size = ddi_devid_sizeof(devid);
5212797Sjg 	new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
5222797Sjg 	(void) bcopy(devid, new_devid, new_devid_size);
5232797Sjg 
5242797Sjg 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
5252797Sjg 
5262797Sjg 	listp = nvf_list(dcfd_handle);
5272797Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
5282797Sjg 		if (strcmp(path, np->nvp_devpath) == 0) {
5292797Sjg 			DEVID_DEBUG2((CE_CONT,
5302797Sjg 			    "register: %s path match\n", path));
5312797Sjg 			if (np->nvp_devid == NULL) {
5327009Scth replace:			np->nvp_devid = new_devid;
5332797Sjg 				np->nvp_flags |=
5347009Scth 				    NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
5352797Sjg 				np->nvp_dip = dip;
5362797Sjg 				if (!devid_cache_write_disable) {
5372797Sjg 					nvf_mark_dirty(dcfd_handle);
5382797Sjg 					is_dirty = 1;
5392797Sjg 				}
5402797Sjg 				rw_exit(nvf_lock(dcfd_handle));
5412797Sjg 				kmem_free(new_nvp, sizeof (nvp_devid_t));
5422797Sjg 				kmem_free(path, pathlen);
5432797Sjg 				goto exit;
5442797Sjg 			}
5452797Sjg 			if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
5462797Sjg 				/* replace invalid devid */
5472797Sjg 				free_devid = np->nvp_devid;
5482797Sjg 				goto replace;
5492797Sjg 			}
5502797Sjg 			/*
5512797Sjg 			 * We're registering an already-cached path
5522797Sjg 			 * Does the device's devid match the cache?
5532797Sjg 			 */
5542797Sjg 			if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
5552797Sjg 				DEVID_DEBUG((CE_CONT, "devid register: "
5562797Sjg 				    "devid %s does not match\n", path));
5572797Sjg 				/*
5582797Sjg 				 * Replace cached devid for this path
5592797Sjg 				 * with newly registered devid.  A devid
5602797Sjg 				 * may map to multiple paths but one path
5612797Sjg 				 * should only map to one devid.
5622797Sjg 				 */
5632797Sjg 				devid_nvp_unlink_and_free(dcfd_handle, np);
5642797Sjg 				np = NULL;
5652797Sjg 				break;
5662797Sjg 			} else {
5672797Sjg 				DEVID_DEBUG2((CE_CONT,
5682797Sjg 				    "devid register: %s devid match\n", path));
5692797Sjg 				np->nvp_flags |=
5707009Scth 				    NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
5712797Sjg 				np->nvp_dip = dip;
5722797Sjg 				rw_exit(nvf_lock(dcfd_handle));
5732797Sjg 				kmem_free(new_nvp, sizeof (nvp_devid_t));
5742797Sjg 				kmem_free(path, pathlen);
5752797Sjg 				kmem_free(new_devid, new_devid_size);
5762797Sjg 				return (DDI_SUCCESS);
5772797Sjg 			}
5782797Sjg 		}
5792797Sjg 	}
5802797Sjg 
5812797Sjg 	/*
5822797Sjg 	 * Add newly registered devid to the cache
5832797Sjg 	 */
5842797Sjg 	ASSERT(np == NULL);
5852797Sjg 
5862797Sjg 	new_nvp->nvp_devpath = path;
5872797Sjg 	new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
5882797Sjg 	new_nvp->nvp_dip = dip;
5892797Sjg 	new_nvp->nvp_devid = new_devid;
5902797Sjg 
5912797Sjg 	if (!devid_cache_write_disable) {
5922797Sjg 		is_dirty = 1;
5932797Sjg 		nvf_mark_dirty(dcfd_handle);
5942797Sjg 	}
5952797Sjg 	list_insert_tail(nvf_list(dcfd_handle), new_nvp);
5962797Sjg 
5972797Sjg 	rw_exit(nvf_lock(dcfd_handle));
5982797Sjg 
5992797Sjg exit:
6002797Sjg 	if (free_devid)
6012797Sjg 		kmem_free(free_devid, ddi_devid_sizeof(free_devid));
6022797Sjg 
6032797Sjg 	if (is_dirty)
6042797Sjg 		nvf_wake_daemon();
6052797Sjg 
6062797Sjg 	return (DDI_SUCCESS);
6072797Sjg }
6082797Sjg 
6092797Sjg /*
6102797Sjg  * Unregister a device's devid
6112797Sjg  * Called as an instance detachs
6122797Sjg  * Invalidate the devid's devinfo reference
6132797Sjg  * Devid-path remains in the cache
6142797Sjg  */
6152797Sjg void
6162797Sjg e_devid_cache_unregister(dev_info_t *dip)
6172797Sjg {
6182797Sjg 	nvp_devid_t *np;
6192797Sjg 	list_t *listp;
6202797Sjg 
6212797Sjg 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
6222797Sjg 
6232797Sjg 	listp = nvf_list(dcfd_handle);
6242797Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
6252797Sjg 		if (np->nvp_devid == NULL)
6262797Sjg 			continue;
6272797Sjg 		if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
6282797Sjg 			DEVID_LOG_UNREG((CE_CONT,
6297009Scth 			    "unregister: %s\n", np->nvp_devpath));
6302797Sjg 			np->nvp_flags &= ~NVP_DEVID_DIP;
6312797Sjg 			np->nvp_dip = NULL;
6322797Sjg 			break;
6332797Sjg 		}
6342797Sjg 	}
6352797Sjg 
6362797Sjg 	rw_exit(nvf_lock(dcfd_handle));
6372797Sjg }
6382797Sjg 
6392797Sjg /*
6402797Sjg  * Purge devid cache of stale devids
6412797Sjg  */
6422797Sjg void
6432797Sjg devid_cache_cleanup(void)
6442797Sjg {
6452797Sjg 	nvp_devid_t *np, *next;
6462797Sjg 	list_t *listp;
6472797Sjg 	int is_dirty = 0;
6482797Sjg 
6492797Sjg 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
6502797Sjg 
6512797Sjg 	listp = nvf_list(dcfd_handle);
6522797Sjg 	for (np = list_head(listp); np; np = next) {
6532797Sjg 		next = list_next(listp, np);
6542797Sjg 		if (np->nvp_devid == NULL)
6552797Sjg 			continue;
6562797Sjg 		if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
6572797Sjg 			DEVID_LOG_REMOVE((CE_CONT,
6587009Scth 			    "cleanup: %s\n", np->nvp_devpath));
6592797Sjg 			if (!devid_cache_write_disable) {
6602797Sjg 				nvf_mark_dirty(dcfd_handle);
6612797Sjg 				is_dirty = 0;
6622797Sjg 			}
6632797Sjg 			devid_nvp_unlink_and_free(dcfd_handle, np);
6642797Sjg 		}
6652797Sjg 	}
6662797Sjg 
6672797Sjg 	rw_exit(nvf_lock(dcfd_handle));
6682797Sjg 
6692797Sjg 	if (is_dirty)
6702797Sjg 		nvf_wake_daemon();
6712797Sjg }
6722797Sjg 
6732797Sjg 
6742797Sjg /*
6752797Sjg  * Build a list of dev_t's for a device/devid
6762797Sjg  *
6772797Sjg  * The effect of this function is cumulative, adding dev_t's
6782797Sjg  * for the device to the list of all dev_t's for a given
6792797Sjg  * devid.
6802797Sjg  */
6812797Sjg static void
6822797Sjg e_devid_minor_to_devlist(
6832797Sjg 	dev_info_t	*dip,
6842797Sjg 	char		*minor_name,
6852797Sjg 	int		ndevts_alloced,
6862797Sjg 	int		*devtcntp,
6872797Sjg 	dev_t		*devtsp)
6882797Sjg {
6897224Scth 	int			circ;
6902797Sjg 	struct ddi_minor_data	*dmdp;
6912797Sjg 	int			minor_all = 0;
6922797Sjg 	int			ndevts = *devtcntp;
6932797Sjg 
6942797Sjg 	ASSERT(i_ddi_devi_attached(dip));
6952797Sjg 
6962797Sjg 	/* are we looking for a set of minor nodes? */
6972797Sjg 	if ((minor_name == DEVID_MINOR_NAME_ALL) ||
6982797Sjg 	    (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
6992797Sjg 	    (minor_name == DEVID_MINOR_NAME_ALL_BLK))
7002797Sjg 		minor_all = 1;
7012797Sjg 
7022797Sjg 	/* Find matching minor names */
7037224Scth 	ndi_devi_enter(dip, &circ);
7042797Sjg 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7052797Sjg 
7062797Sjg 		/* Skip non-minors, and non matching minor names */
7072797Sjg 		if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
7082797Sjg 		    strcmp(dmdp->ddm_name, minor_name)))
7092797Sjg 			continue;
7102797Sjg 
7112797Sjg 		/* filter out minor_all mismatches */
7122797Sjg 		if (minor_all &&
7132797Sjg 		    (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
7142797Sjg 		    (dmdp->ddm_spec_type != S_IFCHR)) ||
7152797Sjg 		    ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
7162797Sjg 		    (dmdp->ddm_spec_type != S_IFBLK))))
7172797Sjg 			continue;
7182797Sjg 
7192797Sjg 		if (ndevts < ndevts_alloced)
7202797Sjg 			devtsp[ndevts] = dmdp->ddm_dev;
7212797Sjg 		ndevts++;
7222797Sjg 	}
7237224Scth 	ndi_devi_exit(dip, circ);
7242797Sjg 
7252797Sjg 	*devtcntp = ndevts;
7262797Sjg }
7272797Sjg 
7282797Sjg /*
7292797Sjg  * Search for cached entries matching a devid
7302797Sjg  * Return two lists:
7312797Sjg  *	a list of dev_info nodes, for those devices in the attached state
7322797Sjg  *	a list of pathnames whose instances registered the given devid
7332797Sjg  * If the lists passed in are not sufficient to return the matching
7342797Sjg  * references, return the size of lists required.
7352797Sjg  * The dev_info nodes are returned with a hold that the caller must release.
7362797Sjg  */
7372797Sjg static int
7382797Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
7392797Sjg 	int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
7402797Sjg {
7412797Sjg 	nvp_devid_t *np;
7422797Sjg 	int ndevis, npaths;
7432797Sjg 	dev_info_t *dip, *pdip;
7442797Sjg 	int circ;
7452797Sjg 	int maxdevis = 0;
7462797Sjg 	int maxpaths = 0;
7472797Sjg 	list_t *listp;
7482797Sjg 
7492797Sjg 	ndevis = 0;
7502797Sjg 	npaths = 0;
7512797Sjg 	listp = nvf_list(dcfd_handle);
7522797Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
7532797Sjg 		if (np->nvp_devid == NULL)
7542797Sjg 			continue;
7552797Sjg 		if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
7562797Sjg 			DEVIDERR((CE_CONT,
7572797Sjg 			    "find: invalid devid %s\n",
7582797Sjg 			    np->nvp_devpath));
7592797Sjg 			continue;
7602797Sjg 		}
7612797Sjg 		if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
7622797Sjg 			DEVID_DEBUG2((CE_CONT,
7632797Sjg 			    "find: devid match: %s 0x%x\n",
7642797Sjg 			    np->nvp_devpath, np->nvp_flags));
7652797Sjg 			DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
7662797Sjg 			DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
7672797Sjg 
7682797Sjg 			/*
7692797Sjg 			 * Check if we have a cached devinfo reference for this
7702797Sjg 			 * devid.  Place a hold on it to prevent detach
7712797Sjg 			 * Otherwise, use the path instead.
7722797Sjg 			 * Note: returns with a hold on each dev_info
7732797Sjg 			 * node in the list.
7742797Sjg 			 */
7752797Sjg 			dip = NULL;
7762797Sjg 			if (np->nvp_flags & NVP_DEVID_DIP) {
7772797Sjg 				pdip = ddi_get_parent(np->nvp_dip);
7782797Sjg 				if (ndi_devi_tryenter(pdip, &circ)) {
7792797Sjg 					dip = np->nvp_dip;
7802797Sjg 					ndi_hold_devi(dip);
7812797Sjg 					ndi_devi_exit(pdip, circ);
7822797Sjg 					ASSERT(!DEVI_IS_ATTACHING(dip));
7832797Sjg 					ASSERT(!DEVI_IS_DETACHING(dip));
7842797Sjg 				} else {
7852797Sjg 					DEVID_LOG_DETACH((CE_CONT,
7862797Sjg 					    "may be detaching: %s\n",
7872797Sjg 					    np->nvp_devpath));
7882797Sjg 				}
7892797Sjg 			}
7902797Sjg 
7912797Sjg 			if (dip) {
7922797Sjg 				if (ndevis < retmax) {
7932797Sjg 					retdevis[ndevis++] = dip;
7942797Sjg 				} else {
7952797Sjg 					ndi_rele_devi(dip);
7962797Sjg 				}
7972797Sjg 				maxdevis++;
7982797Sjg 			} else {
7992797Sjg 				if (npaths < retmax)
8002797Sjg 					retpaths[npaths++] = np->nvp_devpath;
8012797Sjg 				maxpaths++;
8022797Sjg 			}
8032797Sjg 		}
8042797Sjg 	}
8052797Sjg 
8062797Sjg 	*retndevis = ndevis;
8072797Sjg 	*retnpaths = npaths;
8082797Sjg 	return (maxdevis > maxpaths ? maxdevis : maxpaths);
8092797Sjg }
8102797Sjg 
8112797Sjg 
8122797Sjg /*
8132797Sjg  * Search the devid cache, returning dev_t list for all
8142797Sjg  * device paths mapping to the device identified by the
8152797Sjg  * given devid.
8162797Sjg  *
8172797Sjg  * Primary interface used by ddi_lyr_devid_to_devlist()
8182797Sjg  */
8192797Sjg int
8202797Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
8212797Sjg 	int *retndevts, dev_t **retdevts)
8222797Sjg {
8232797Sjg 	char		*path, **paths;
8242797Sjg 	int		i, j, n;
8252797Sjg 	dev_t		*devts, *udevts;
8262797Sjg 	dev_t		tdevt;
8272797Sjg 	int		ndevts, undevts, ndevts_alloced;
8282797Sjg 	dev_info_t	*devi, **devis;
8292797Sjg 	int		ndevis, npaths, nalloced;
8302797Sjg 	ddi_devid_t	match_devid;
8312797Sjg 
8322797Sjg 	DEVID_LOG_FIND(("find", devid, NULL));
8332797Sjg 
8342797Sjg 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8352797Sjg 	if (ddi_devid_valid(devid) != DDI_SUCCESS) {
8362797Sjg 		DEVID_LOG_ERR(("invalid devid", devid, NULL));
8372797Sjg 		return (DDI_FAILURE);
8382797Sjg 	}
8392797Sjg 
8402797Sjg 	nalloced = 128;
8412797Sjg 
8422797Sjg 	for (;;) {
8432797Sjg 		paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
8442797Sjg 		devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
8452797Sjg 
8462797Sjg 		rw_enter(nvf_lock(dcfd_handle), RW_READER);
8472797Sjg 		n = e_devid_cache_devi_path_lists(devid, nalloced,
8487009Scth 		    &ndevis, devis, &npaths, paths);
8492797Sjg 		if (n <= nalloced)
8502797Sjg 			break;
8512797Sjg 		rw_exit(nvf_lock(dcfd_handle));
8522797Sjg 		for (i = 0; i < ndevis; i++)
8532797Sjg 			ndi_rele_devi(devis[i]);
8542797Sjg 		kmem_free(paths, nalloced * sizeof (char *));
8552797Sjg 		kmem_free(devis, nalloced * sizeof (dev_info_t *));
8562797Sjg 		nalloced = n + 128;
8572797Sjg 	}
8582797Sjg 
8592797Sjg 	for (i = 0; i < npaths; i++) {
8602797Sjg 		path = i_ddi_strdup(paths[i], KM_SLEEP);
8612797Sjg 		paths[i] = path;
8622797Sjg 	}
8632797Sjg 	rw_exit(nvf_lock(dcfd_handle));
8642797Sjg 
8652797Sjg 	if (ndevis == 0 && npaths == 0) {
8662797Sjg 		DEVID_LOG_ERR(("no devid found", devid, NULL));
8672797Sjg 		kmem_free(paths, nalloced * sizeof (char *));
8682797Sjg 		kmem_free(devis, nalloced * sizeof (dev_info_t *));
8692797Sjg 		return (DDI_FAILURE);
8702797Sjg 	}
8712797Sjg 
8722797Sjg 	ndevts_alloced = 128;
8732797Sjg restart:
8742797Sjg 	ndevts = 0;
8752797Sjg 	devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
8762797Sjg 	for (i = 0; i < ndevis; i++) {
8772797Sjg 		ASSERT(!DEVI_IS_ATTACHING(devis[i]));
8782797Sjg 		ASSERT(!DEVI_IS_DETACHING(devis[i]));
8792797Sjg 		e_devid_minor_to_devlist(devis[i], minor_name,
8807009Scth 		    ndevts_alloced, &ndevts, devts);
8812797Sjg 		if (ndevts > ndevts_alloced) {
8822797Sjg 			kmem_free(devts, ndevts_alloced * sizeof (dev_t));
8832797Sjg 			ndevts_alloced += 128;
8842797Sjg 			goto restart;
8852797Sjg 		}
8862797Sjg 	}
8872797Sjg 	for (i = 0; i < npaths; i++) {
8882797Sjg 		DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
8892797Sjg 		devi = e_ddi_hold_devi_by_path(paths[i], 0);
8902797Sjg 		if (devi == NULL) {
8912797Sjg 			DEVID_LOG_STALE(("stale device reference",
8922797Sjg 			    devid, paths[i]));
8932797Sjg 			continue;
8942797Sjg 		}
8952797Sjg 		/*
8962797Sjg 		 * Verify the newly attached device registered a matching devid
8972797Sjg 		 */
8982797Sjg 		if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
8992797Sjg 		    &match_devid) != DDI_SUCCESS) {
9002797Sjg 			DEVIDERR((CE_CONT,
9012797Sjg 			    "%s: no devid registered on attach\n",
9022797Sjg 			    paths[i]));
9032797Sjg 			ddi_release_devi(devi);
9042797Sjg 			continue;
9052797Sjg 		}
9062797Sjg 
9072797Sjg 		if (ddi_devid_compare(devid, match_devid) != 0) {
9082797Sjg 			DEVID_LOG_STALE(("new devid registered",
9092797Sjg 			    devid, paths[i]));
9102797Sjg 			ddi_release_devi(devi);
9112797Sjg 			ddi_devid_free(match_devid);
9122797Sjg 			continue;
9132797Sjg 		}
9142797Sjg 		ddi_devid_free(match_devid);
9152797Sjg 
9162797Sjg 		e_devid_minor_to_devlist(devi, minor_name,
9177009Scth 		    ndevts_alloced, &ndevts, devts);
9182797Sjg 		ddi_release_devi(devi);
9192797Sjg 		if (ndevts > ndevts_alloced) {
9202797Sjg 			kmem_free(devts,
9212797Sjg 			    ndevts_alloced * sizeof (dev_t));
9222797Sjg 			ndevts_alloced += 128;
9232797Sjg 			goto restart;
9242797Sjg 		}
9252797Sjg 	}
9262797Sjg 
9272797Sjg 	/* drop hold from e_devid_cache_devi_path_lists */
9282797Sjg 	for (i = 0; i < ndevis; i++) {
9292797Sjg 		ndi_rele_devi(devis[i]);
9302797Sjg 	}
9312797Sjg 	for (i = 0; i < npaths; i++) {
9322797Sjg 		kmem_free(paths[i], strlen(paths[i]) + 1);
9332797Sjg 	}
9342797Sjg 	kmem_free(paths, nalloced * sizeof (char *));
9352797Sjg 	kmem_free(devis, nalloced * sizeof (dev_info_t *));
9362797Sjg 
9372797Sjg 	if (ndevts == 0) {
9382797Sjg 		DEVID_LOG_ERR(("no devid found", devid, NULL));
9392797Sjg 		kmem_free(devts, ndevts_alloced * sizeof (dev_t));
9402797Sjg 		return (DDI_FAILURE);
9412797Sjg 	}
9422797Sjg 
9432797Sjg 	/*
9442797Sjg 	 * Build the final list of sorted dev_t's with duplicates collapsed so
9452797Sjg 	 * returned results are consistent. This prevents implementation
9462797Sjg 	 * artifacts from causing unnecessary changes in SVM namespace.
9472797Sjg 	 */
9482797Sjg 	/* bubble sort */
9492797Sjg 	for (i = 0; i < (ndevts - 1); i++) {
9502797Sjg 		for (j = 0; j < ((ndevts - 1) - i); j++) {
9512797Sjg 			if (devts[j + 1] < devts[j]) {
9522797Sjg 				tdevt = devts[j];
9532797Sjg 				devts[j] = devts[j + 1];
9542797Sjg 				devts[j + 1] = tdevt;
9552797Sjg 			}
9562797Sjg 		}
9572797Sjg 	}
9582797Sjg 
9592797Sjg 	/* determine number of unique values */
9602797Sjg 	for (undevts = ndevts, i = 1; i < ndevts; i++) {
9612797Sjg 		if (devts[i - 1] == devts[i])
9622797Sjg 			undevts--;
9632797Sjg 	}
9642797Sjg 
9652797Sjg 	/* allocate unique */
9662797Sjg 	udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
9672797Sjg 
9682797Sjg 	/* copy unique */
9692797Sjg 	udevts[0] = devts[0];
9702797Sjg 	for (i = 1, j = 1; i < ndevts; i++) {
9712797Sjg 		if (devts[i - 1] != devts[i])
9722797Sjg 			udevts[j++] = devts[i];
9732797Sjg 	}
9742797Sjg 	ASSERT(j == undevts);
9752797Sjg 
9762797Sjg 	kmem_free(devts, ndevts_alloced * sizeof (dev_t));
9772797Sjg 
9782797Sjg 	*retndevts = undevts;
9792797Sjg 	*retdevts = udevts;
9802797Sjg 
9812797Sjg 	return (DDI_SUCCESS);
9822797Sjg }
9832797Sjg 
9842797Sjg void
9852797Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list)
9862797Sjg {
9872797Sjg 	kmem_free(devt_list, ndevts * sizeof (dev_t *));
9882797Sjg }
9892797Sjg 
9902797Sjg #ifdef	DEBUG
9912797Sjg static void
9922797Sjg devid_log(char *fmt, ddi_devid_t devid, char *path)
9932797Sjg {
9942797Sjg 	char *devidstr = ddi_devid_str_encode(devid, NULL);
9952797Sjg 	if (path) {
9962797Sjg 		cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr);
9972797Sjg 	} else {
9982797Sjg 		cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr);
9992797Sjg 	}
10002797Sjg 	ddi_devid_str_free(devidstr);
10012797Sjg }
10022797Sjg #endif	/* DEBUG */
1003