12797Sjg /*
22797Sjg * CDDL HEADER START
32797Sjg *
42797Sjg * The contents of this file are subject to the terms of the
52797Sjg * Common Development and Distribution License (the "License").
62797Sjg * You may not use this file except in compliance with the License.
72797Sjg *
82797Sjg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
92797Sjg * or http://www.opensolaris.org/os/licensing.
102797Sjg * See the License for the specific language governing permissions
112797Sjg * and limitations under the License.
122797Sjg *
132797Sjg * When distributing Covered Code, include this CDDL HEADER in each
142797Sjg * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
152797Sjg * If applicable, add the following below this CDDL HEADER, with the
162797Sjg * fields enclosed by brackets "[]" replaced with your own identifying
172797Sjg * information: Portions Copyright [yyyy] [name of copyright owner]
182797Sjg *
192797Sjg * CDDL HEADER END
202797Sjg */
212797Sjg /*
2212121SReed.Liu@Sun.COM * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
232797Sjg */
242797Sjg
252797Sjg #include <sys/note.h>
262797Sjg #include <sys/t_lock.h>
272797Sjg #include <sys/cmn_err.h>
282797Sjg #include <sys/instance.h>
292797Sjg #include <sys/conf.h>
302797Sjg #include <sys/stat.h>
312797Sjg #include <sys/ddi.h>
322797Sjg #include <sys/hwconf.h>
332797Sjg #include <sys/sunddi.h>
342797Sjg #include <sys/sunndi.h>
3512213SGavin.Maltby@Sun.COM #include <sys/sunmdi.h>
362797Sjg #include <sys/ddi_impldefs.h>
372797Sjg #include <sys/ndi_impldefs.h>
382797Sjg #include <sys/kobj.h>
392797Sjg #include <sys/devcache.h>
402797Sjg #include <sys/devid_cache.h>
412797Sjg #include <sys/sysmacros.h>
422797Sjg
432797Sjg /*
442797Sjg * Discovery refers to the heroic effort made to discover a device which
452797Sjg * cannot be accessed at the physical path where it once resided. Discovery
462797Sjg * involves walking the entire device tree attaching all possible disk
472797Sjg * instances, to search for the device referenced by a devid. Obviously,
482797Sjg * full device discovery is something to be avoided where possible.
492797Sjg * Note that simply invoking devfsadm(1M) is equivalent to running full
502797Sjg * discovery at the devid cache level.
512797Sjg *
522797Sjg * Reasons why a disk may not be accessible:
532797Sjg * disk powered off
542797Sjg * disk removed or cable disconnected
552797Sjg * disk or adapter broken
562797Sjg *
572797Sjg * Note that discovery is not needed and cannot succeed in any of these
582797Sjg * cases.
592797Sjg *
602797Sjg * When discovery may succeed:
612797Sjg * Discovery will result in success when a device has been moved
622797Sjg * to a different address. Note that it's recommended that
632797Sjg * devfsadm(1M) be invoked (no arguments required) whenever a system's
642797Sjg * h/w configuration has been updated. Alternatively, a
652797Sjg * reconfiguration boot can be used to accomplish the same result.
662797Sjg *
672797Sjg * Note that discovery is not necessary to be able to correct an access
682797Sjg * failure for a device which was powered off. Assuming the cache has an
692797Sjg * entry for such a device, simply powering it on should permit the system
702797Sjg * to access it. If problems persist after powering it on, invoke
712797Sjg * devfsadm(1M).
722797Sjg *
732797Sjg * Discovery prior to mounting root is only of interest when booting
742797Sjg * from a filesystem which accesses devices by device id, which of
752797Sjg * not all do.
762797Sjg *
772797Sjg * Tunables
782797Sjg *
792797Sjg * devid_discovery_boot (default 1)
802797Sjg * Number of times discovery will be attempted prior to mounting root.
812797Sjg * Must be done at least once to recover from corrupted or missing
822797Sjg * devid cache backing store. Probably there's no reason to ever
8312213SGavin.Maltby@Sun.COM * set this to greater than one as a missing device will remain
842797Sjg * unavailable no matter how often the system searches for it.
852797Sjg *
862797Sjg * devid_discovery_postboot (default 1)
872797Sjg * Number of times discovery will be attempted after mounting root.
882797Sjg * This must be performed at least once to discover any devices
892797Sjg * needed after root is mounted which may have been powered
902797Sjg * off and moved before booting.
912797Sjg * Setting this to a larger positive number will introduce
922797Sjg * some inconsistency in system operation. Searching for a device
932797Sjg * will take an indeterminate amount of time, sometimes slower,
942797Sjg * sometimes faster. In addition, the system will sometimes
952797Sjg * discover a newly powered on device, sometimes it won't.
962797Sjg * Use of this option is not therefore recommended.
972797Sjg *
982797Sjg * devid_discovery_postboot_always (default 0)
992797Sjg * Set to 1, the system will always attempt full discovery.
1002797Sjg *
1012797Sjg * devid_discovery_secs (default 0)
1022797Sjg * Set to a positive value, the system will attempt full discovery
1032797Sjg * but with a minimum delay between attempts. A device search
1042797Sjg * within the period of time specified will result in failure.
1052797Sjg *
1062797Sjg * devid_cache_read_disable (default 0)
1072797Sjg * Set to 1 to disable reading /etc/devices/devid_cache.
1082797Sjg * Devid cache will continue to operate normally but
1092797Sjg * at least one discovery attempt will be required.
1102797Sjg *
1112797Sjg * devid_cache_write_disable (default 0)
1122797Sjg * Set to 1 to disable updates to /etc/devices/devid_cache.
1132797Sjg * Any updates to the devid cache will not be preserved across a reboot.
1142797Sjg *
1152797Sjg * devid_report_error (default 0)
1162797Sjg * Set to 1 to enable some error messages related to devid
1172797Sjg * cache failures.
1182797Sjg *
1192797Sjg * The devid is packed in the cache file as a byte array. For
1202797Sjg * portability, this could be done in the encoded string format.
1212797Sjg */
1222797Sjg
1232797Sjg
1242797Sjg int devid_discovery_boot = 1;
1252797Sjg int devid_discovery_postboot = 1;
1262797Sjg int devid_discovery_postboot_always = 0;
1272797Sjg int devid_discovery_secs = 0;
1282797Sjg
1292797Sjg int devid_cache_read_disable = 0;
1302797Sjg int devid_cache_write_disable = 0;
1312797Sjg
1322797Sjg int devid_report_error = 0;
1332797Sjg
1342797Sjg
1352797Sjg /*
1362797Sjg * State to manage discovery of devices providing a devid
1372797Sjg */
1382797Sjg static int devid_discovery_busy = 0;
1392797Sjg static kmutex_t devid_discovery_mutex;
1402797Sjg static kcondvar_t devid_discovery_cv;
1412797Sjg static clock_t devid_last_discovery = 0;
1422797Sjg
1432797Sjg
1442797Sjg #ifdef DEBUG
1452797Sjg int nvp_devid_debug = 0;
1462797Sjg int devid_debug = 0;
1472797Sjg int devid_log_registers = 0;
1482797Sjg int devid_log_finds = 0;
1492797Sjg int devid_log_lookups = 0;
1502797Sjg int devid_log_discovery = 0;
1512797Sjg int devid_log_matches = 0;
1522797Sjg int devid_log_paths = 0;
1532797Sjg int devid_log_failures = 0;
1542797Sjg int devid_log_hold = 0;
1552797Sjg int devid_log_unregisters = 0;
1562797Sjg int devid_log_removes = 0;
1572797Sjg int devid_register_debug = 0;
1582797Sjg int devid_log_stale = 0;
1592797Sjg int devid_log_detaches = 0;
1602797Sjg #endif /* DEBUG */
1612797Sjg
1622797Sjg /*
1632797Sjg * devid cache file registration for cache reads and updates
1642797Sjg */
1652797Sjg static nvf_ops_t devid_cache_ops = {
1662797Sjg "/etc/devices/devid_cache", /* path to cache */
1672797Sjg devid_cache_unpack_nvlist, /* read: nvlist to nvp */
1682797Sjg devid_cache_pack_list, /* write: nvp to nvlist */
1692797Sjg devid_list_free, /* free data list */
1702797Sjg NULL /* write complete callback */
1712797Sjg };
1722797Sjg
1732797Sjg /*
1742797Sjg * handle to registered devid cache handlers
1752797Sjg */
1762797Sjg nvf_handle_t dcfd_handle;
1772797Sjg
1782797Sjg
1792797Sjg /*
1802797Sjg * Initialize devid cache file management
1812797Sjg */
1822797Sjg void
devid_cache_init(void)1832797Sjg devid_cache_init(void)
1842797Sjg {
1852797Sjg dcfd_handle = nvf_register_file(&devid_cache_ops);
1862797Sjg ASSERT(dcfd_handle);
1872797Sjg
1882797Sjg list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
1892797Sjg offsetof(nvp_devid_t, nvp_link));
1902797Sjg
1912797Sjg mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
1922797Sjg cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
1932797Sjg }
1942797Sjg
1952797Sjg /*
1962797Sjg * Read and initialize the devid cache from the persistent store
1972797Sjg */
1982797Sjg void
devid_cache_read(void)1992797Sjg devid_cache_read(void)
2002797Sjg {
2012797Sjg if (!devid_cache_read_disable) {
2022797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
2032797Sjg ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
2042797Sjg (void) nvf_read_file(dcfd_handle);
2052797Sjg rw_exit(nvf_lock(dcfd_handle));
2062797Sjg }
2072797Sjg }
2082797Sjg
2092797Sjg static void
devid_nvp_free(nvp_devid_t * dp)2102797Sjg devid_nvp_free(nvp_devid_t *dp)
2112797Sjg {
2122797Sjg if (dp->nvp_devpath)
2132797Sjg kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
2142797Sjg if (dp->nvp_devid)
2152797Sjg kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
2162797Sjg
2172797Sjg kmem_free(dp, sizeof (nvp_devid_t));
2182797Sjg }
2192797Sjg
2202797Sjg static void
devid_list_free(nvf_handle_t fd)2212797Sjg devid_list_free(nvf_handle_t fd)
2222797Sjg {
2232797Sjg list_t *listp;
2242797Sjg nvp_devid_t *np;
2252797Sjg
2262797Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
2272797Sjg
2282797Sjg listp = nvf_list(fd);
2292797Sjg while (np = list_head(listp)) {
2302797Sjg list_remove(listp, np);
2312797Sjg devid_nvp_free(np);
2322797Sjg }
2332797Sjg }
2342797Sjg
2352797Sjg /*
2362797Sjg * Free an nvp element in a list
2372797Sjg */
2382797Sjg static void
devid_nvp_unlink_and_free(nvf_handle_t fd,nvp_devid_t * np)2392797Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
2402797Sjg {
2412797Sjg list_remove(nvf_list(fd), np);
2422797Sjg devid_nvp_free(np);
2432797Sjg }
2442797Sjg
2452797Sjg /*
2462797Sjg * Unpack a device path/nvlist pair to the list of devid cache elements.
2472797Sjg * Used to parse the nvlist format when reading
2482797Sjg * /etc/devices/devid_cache
2492797Sjg */
2502797Sjg static int
devid_cache_unpack_nvlist(nvf_handle_t fd,nvlist_t * nvl,char * name)2512797Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
2522797Sjg {
2532797Sjg nvp_devid_t *np;
2542797Sjg ddi_devid_t devidp;
2552797Sjg int rval;
2562797Sjg uint_t n;
2572797Sjg
2582797Sjg NVP_DEVID_DEBUG_PATH((name));
2592797Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
2602797Sjg
2612797Sjg /*
2622797Sjg * check path for a devid
2632797Sjg */
2642797Sjg rval = nvlist_lookup_byte_array(nvl,
2657009Scth DP_DEVID_ID, (uchar_t **)&devidp, &n);
2662797Sjg if (rval == 0) {
2672797Sjg if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
2682797Sjg ASSERT(n == ddi_devid_sizeof(devidp));
2692797Sjg np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
2702797Sjg np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
2712797Sjg np->nvp_devid = kmem_alloc(n, KM_SLEEP);
2722797Sjg (void) bcopy(devidp, np->nvp_devid, n);
2732797Sjg list_insert_tail(nvf_list(fd), np);
2742797Sjg NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
2752797Sjg } else {
2762797Sjg DEVIDERR((CE_CONT,
2772797Sjg "%s: invalid devid\n", name));
2782797Sjg }
2792797Sjg } else {
2802797Sjg DEVIDERR((CE_CONT,
2812797Sjg "%s: devid not available\n", name));
2822797Sjg }
2832797Sjg
2842797Sjg return (0);
2852797Sjg }
2862797Sjg
2872797Sjg /*
2882797Sjg * Pack the list of devid cache elements into a single nvlist
2892797Sjg * Used when writing the nvlist file.
2902797Sjg */
2912797Sjg static int
devid_cache_pack_list(nvf_handle_t fd,nvlist_t ** ret_nvl)2922797Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
2932797Sjg {
2942797Sjg nvlist_t *nvl, *sub_nvl;
2952797Sjg nvp_devid_t *np;
2962797Sjg int rval;
2972797Sjg list_t *listp;
2982797Sjg
2992797Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
3002797Sjg
3012797Sjg rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
3022797Sjg if (rval != 0) {
3032797Sjg nvf_error("%s: nvlist alloc error %d\n",
3047009Scth nvf_cache_name(fd), rval);
3052797Sjg return (DDI_FAILURE);
3062797Sjg }
3072797Sjg
3082797Sjg listp = nvf_list(fd);
3092797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
3102797Sjg if (np->nvp_devid == NULL)
3117009Scth continue;
3122797Sjg NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
3132797Sjg rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
3142797Sjg if (rval != 0) {
3152797Sjg nvf_error("%s: nvlist alloc error %d\n",
3167009Scth nvf_cache_name(fd), rval);
3172797Sjg sub_nvl = NULL;
3182797Sjg goto err;
3192797Sjg }
3202797Sjg
3212797Sjg rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
3227009Scth (uchar_t *)np->nvp_devid,
3237009Scth ddi_devid_sizeof(np->nvp_devid));
3242797Sjg if (rval == 0) {
3252797Sjg NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
3262797Sjg } else {
3272797Sjg nvf_error(
3282797Sjg "%s: nvlist add error %d (devid)\n",
3292797Sjg nvf_cache_name(fd), rval);
3302797Sjg goto err;
3312797Sjg }
3322797Sjg
3332797Sjg rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
3342797Sjg if (rval != 0) {
3352797Sjg nvf_error("%s: nvlist add error %d (sublist)\n",
3362797Sjg nvf_cache_name(fd), rval);
3372797Sjg goto err;
3382797Sjg }
3392797Sjg nvlist_free(sub_nvl);
3402797Sjg }
3412797Sjg
3422797Sjg *ret_nvl = nvl;
3432797Sjg return (DDI_SUCCESS);
3442797Sjg
3452797Sjg err:
3462797Sjg if (sub_nvl)
3472797Sjg nvlist_free(sub_nvl);
3482797Sjg nvlist_free(nvl);
3492797Sjg *ret_nvl = NULL;
3502797Sjg return (DDI_FAILURE);
3512797Sjg }
3522797Sjg
3532797Sjg static int
e_devid_do_discovery(void)3542797Sjg e_devid_do_discovery(void)
3552797Sjg {
3562797Sjg ASSERT(mutex_owned(&devid_discovery_mutex));
3572797Sjg
3582797Sjg if (i_ddi_io_initialized() == 0) {
3592797Sjg if (devid_discovery_boot > 0) {
3602797Sjg devid_discovery_boot--;
3612797Sjg return (1);
3622797Sjg }
3632797Sjg } else {
3642797Sjg if (devid_discovery_postboot_always > 0)
3652797Sjg return (1);
3662797Sjg if (devid_discovery_postboot > 0) {
3672797Sjg devid_discovery_postboot--;
3682797Sjg return (1);
3692797Sjg }
3702797Sjg if (devid_discovery_secs > 0) {
3712797Sjg if ((ddi_get_lbolt() - devid_last_discovery) >
3722797Sjg drv_usectohz(devid_discovery_secs * MICROSEC)) {
3732797Sjg return (1);
3742797Sjg }
3752797Sjg }
3762797Sjg }
3772797Sjg
3782797Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
3792797Sjg return (0);
3802797Sjg }
3812797Sjg
3822797Sjg static void
e_ddi_devid_hold_by_major(major_t major)3832797Sjg e_ddi_devid_hold_by_major(major_t major)
3842797Sjg {
3852797Sjg DEVID_LOG_DISC((CE_CONT,
3862797Sjg "devid_discovery: ddi_hold_installed_driver %d\n", major));
3872797Sjg
3882797Sjg if (ddi_hold_installed_driver(major) == NULL)
3892797Sjg return;
3902797Sjg
3912797Sjg ddi_rele_driver(major);
3922797Sjg }
3932797Sjg
394*12660SJerry.Gilliam@Sun.COM /* legacy support - see below */
395*12660SJerry.Gilliam@Sun.COM static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd" };
3962797Sjg
3972797Sjg #define N_DRIVERS_TO_HOLD \
3982797Sjg (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
3992797Sjg
4002797Sjg static void
e_ddi_devid_hold_installed_driver(ddi_devid_t devid)4012797Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid)
4022797Sjg {
4032797Sjg impl_devid_t *id = (impl_devid_t *)devid;
4042797Sjg major_t major, hint_major;
4052797Sjg char hint[DEVID_HINT_SIZE + 1];
406*12660SJerry.Gilliam@Sun.COM struct devnames *dnp;
4072797Sjg char **drvp;
4082797Sjg int i;
4092797Sjg
4102797Sjg /* Count non-null bytes */
4112797Sjg for (i = 0; i < DEVID_HINT_SIZE; i++)
4122797Sjg if (id->did_driver[i] == '\0')
4132797Sjg break;
4142797Sjg
4152797Sjg /* Make a copy of the driver hint */
4162797Sjg bcopy(id->did_driver, hint, i);
4172797Sjg hint[i] = '\0';
4182797Sjg
4192797Sjg /* search for the devid using the hint driver */
4202797Sjg hint_major = ddi_name_to_major(hint);
4217009Scth if (hint_major != DDI_MAJOR_T_NONE) {
4222797Sjg e_ddi_devid_hold_by_major(hint_major);
4232797Sjg }
4242797Sjg
425*12660SJerry.Gilliam@Sun.COM /*
426*12660SJerry.Gilliam@Sun.COM * search for the devid with each driver declaring
427*12660SJerry.Gilliam@Sun.COM * itself as a devid registrant.
428*12660SJerry.Gilliam@Sun.COM */
429*12660SJerry.Gilliam@Sun.COM for (major = 0; major < devcnt; major++) {
430*12660SJerry.Gilliam@Sun.COM if (major == hint_major)
431*12660SJerry.Gilliam@Sun.COM continue;
432*12660SJerry.Gilliam@Sun.COM dnp = &devnamesp[major];
433*12660SJerry.Gilliam@Sun.COM if (dnp->dn_flags & DN_DEVID_REGISTRANT) {
434*12660SJerry.Gilliam@Sun.COM e_ddi_devid_hold_by_major(major);
435*12660SJerry.Gilliam@Sun.COM }
436*12660SJerry.Gilliam@Sun.COM }
437*12660SJerry.Gilliam@Sun.COM
438*12660SJerry.Gilliam@Sun.COM /*
439*12660SJerry.Gilliam@Sun.COM * Legacy support: may be removed once an upgrade mechanism
440*12660SJerry.Gilliam@Sun.COM * for driver conf files is available.
441*12660SJerry.Gilliam@Sun.COM */
4422797Sjg drvp = e_ddi_devid_hold_driver_list;
4432797Sjg for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
4442797Sjg major = ddi_name_to_major(*drvp);
4457009Scth if (major != DDI_MAJOR_T_NONE && major != hint_major) {
4462797Sjg e_ddi_devid_hold_by_major(major);
4472797Sjg }
4482797Sjg }
4492797Sjg }
4502797Sjg
4512797Sjg /*
4522797Sjg * Return success if discovery was attempted, to indicate
4532797Sjg * that the desired device may now be available.
4542797Sjg */
4552797Sjg int
e_ddi_devid_discovery(ddi_devid_t devid)4562797Sjg e_ddi_devid_discovery(ddi_devid_t devid)
4572797Sjg {
4582797Sjg int flags;
4592797Sjg int rval = DDI_SUCCESS;
4602797Sjg
4612797Sjg mutex_enter(&devid_discovery_mutex);
4622797Sjg
4632797Sjg if (devid_discovery_busy) {
4642797Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
4652797Sjg while (devid_discovery_busy) {
4662797Sjg cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
4672797Sjg }
4682797Sjg } else if (e_devid_do_discovery()) {
4692797Sjg devid_discovery_busy = 1;
4702797Sjg mutex_exit(&devid_discovery_mutex);
4712797Sjg
4722797Sjg if (i_ddi_io_initialized() == 0) {
4732797Sjg e_ddi_devid_hold_installed_driver(devid);
4742797Sjg } else {
4752797Sjg DEVID_LOG_DISC((CE_CONT,
4762797Sjg "devid_discovery: ndi_devi_config\n"));
4772797Sjg flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
4782797Sjg if (i_ddi_io_initialized())
4792797Sjg flags |= NDI_DRV_CONF_REPROBE;
4802797Sjg (void) ndi_devi_config(ddi_root_node(), flags);
4812797Sjg }
4822797Sjg
4832797Sjg mutex_enter(&devid_discovery_mutex);
4842797Sjg devid_discovery_busy = 0;
4852797Sjg cv_broadcast(&devid_discovery_cv);
4862797Sjg if (devid_discovery_secs > 0)
4872797Sjg devid_last_discovery = ddi_get_lbolt();
4882797Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
4892797Sjg } else {
4902797Sjg rval = DDI_FAILURE;
4912797Sjg DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
4922797Sjg }
4932797Sjg
4942797Sjg mutex_exit(&devid_discovery_mutex);
4952797Sjg
4962797Sjg return (rval);
4972797Sjg }
4982797Sjg
4992797Sjg /*
5002797Sjg * As part of registering a devid for a device,
5012797Sjg * update the devid cache with this device/devid pair
5022797Sjg * or note that this combination has registered.
50312213SGavin.Maltby@Sun.COM *
50412213SGavin.Maltby@Sun.COM * If a devpath is provided it will be used as the path to register the
50512213SGavin.Maltby@Sun.COM * devid against, otherwise we use ddi_pathname(dip). In both cases
50612213SGavin.Maltby@Sun.COM * we duplicate the path string so that it can be cached/freed indepdently
50712213SGavin.Maltby@Sun.COM * of the original owner.
5082797Sjg */
50912213SGavin.Maltby@Sun.COM static int
e_devid_cache_register_cmn(dev_info_t * dip,ddi_devid_t devid,char * devpath)51012213SGavin.Maltby@Sun.COM e_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath)
5112797Sjg {
5122797Sjg nvp_devid_t *np;
5132797Sjg nvp_devid_t *new_nvp;
5142797Sjg ddi_devid_t new_devid;
5152797Sjg int new_devid_size;
5162797Sjg char *path, *fullpath;
5172797Sjg ddi_devid_t free_devid = NULL;
5182797Sjg int pathlen;
5192797Sjg list_t *listp;
5202797Sjg int is_dirty = 0;
5212797Sjg
52212121SReed.Liu@Sun.COM
5232797Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
5242797Sjg
52512213SGavin.Maltby@Sun.COM if (devpath) {
52612213SGavin.Maltby@Sun.COM pathlen = strlen(devpath) + 1;
52712213SGavin.Maltby@Sun.COM path = kmem_alloc(pathlen, KM_SLEEP);
52812213SGavin.Maltby@Sun.COM bcopy(devpath, path, pathlen);
52912213SGavin.Maltby@Sun.COM } else {
53012213SGavin.Maltby@Sun.COM /*
53112213SGavin.Maltby@Sun.COM * We are willing to accept DS_BOUND nodes if we can form a full
53212213SGavin.Maltby@Sun.COM * ddi_pathname (i.e. the node is part way to becomming
53312213SGavin.Maltby@Sun.COM * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL).
53412213SGavin.Maltby@Sun.COM */
53512213SGavin.Maltby@Sun.COM if (ddi_get_name_addr(dip) == NULL)
53612213SGavin.Maltby@Sun.COM return (DDI_FAILURE);
53712213SGavin.Maltby@Sun.COM
53812213SGavin.Maltby@Sun.COM fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
53912213SGavin.Maltby@Sun.COM (void) ddi_pathname(dip, fullpath);
54012213SGavin.Maltby@Sun.COM pathlen = strlen(fullpath) + 1;
54112213SGavin.Maltby@Sun.COM path = kmem_alloc(pathlen, KM_SLEEP);
54212213SGavin.Maltby@Sun.COM bcopy(fullpath, path, pathlen);
54312213SGavin.Maltby@Sun.COM kmem_free(fullpath, MAXPATHLEN);
54412213SGavin.Maltby@Sun.COM }
5452797Sjg
5462797Sjg DEVID_LOG_REG(("register", devid, path));
5472797Sjg
5482797Sjg new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
5492797Sjg new_devid_size = ddi_devid_sizeof(devid);
5502797Sjg new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
5512797Sjg (void) bcopy(devid, new_devid, new_devid_size);
5522797Sjg
5532797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
5542797Sjg
5552797Sjg listp = nvf_list(dcfd_handle);
5562797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
5572797Sjg if (strcmp(path, np->nvp_devpath) == 0) {
5582797Sjg DEVID_DEBUG2((CE_CONT,
5592797Sjg "register: %s path match\n", path));
5602797Sjg if (np->nvp_devid == NULL) {
5617009Scth replace: np->nvp_devid = new_devid;
5622797Sjg np->nvp_flags |=
5637009Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
5642797Sjg np->nvp_dip = dip;
5652797Sjg if (!devid_cache_write_disable) {
5662797Sjg nvf_mark_dirty(dcfd_handle);
5672797Sjg is_dirty = 1;
5682797Sjg }
5692797Sjg rw_exit(nvf_lock(dcfd_handle));
5702797Sjg kmem_free(new_nvp, sizeof (nvp_devid_t));
5712797Sjg kmem_free(path, pathlen);
5722797Sjg goto exit;
5732797Sjg }
5742797Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
5752797Sjg /* replace invalid devid */
5762797Sjg free_devid = np->nvp_devid;
5772797Sjg goto replace;
5782797Sjg }
5792797Sjg /*
5802797Sjg * We're registering an already-cached path
5812797Sjg * Does the device's devid match the cache?
5822797Sjg */
5832797Sjg if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
5842797Sjg DEVID_DEBUG((CE_CONT, "devid register: "
5852797Sjg "devid %s does not match\n", path));
5862797Sjg /*
5872797Sjg * Replace cached devid for this path
5882797Sjg * with newly registered devid. A devid
5892797Sjg * may map to multiple paths but one path
5902797Sjg * should only map to one devid.
5912797Sjg */
5922797Sjg devid_nvp_unlink_and_free(dcfd_handle, np);
5932797Sjg np = NULL;
5942797Sjg break;
5952797Sjg } else {
5962797Sjg DEVID_DEBUG2((CE_CONT,
5972797Sjg "devid register: %s devid match\n", path));
5982797Sjg np->nvp_flags |=
5997009Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
6002797Sjg np->nvp_dip = dip;
6012797Sjg rw_exit(nvf_lock(dcfd_handle));
6022797Sjg kmem_free(new_nvp, sizeof (nvp_devid_t));
6032797Sjg kmem_free(path, pathlen);
6042797Sjg kmem_free(new_devid, new_devid_size);
6052797Sjg return (DDI_SUCCESS);
6062797Sjg }
6072797Sjg }
6082797Sjg }
6092797Sjg
6102797Sjg /*
6112797Sjg * Add newly registered devid to the cache
6122797Sjg */
6132797Sjg ASSERT(np == NULL);
6142797Sjg
6152797Sjg new_nvp->nvp_devpath = path;
6162797Sjg new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
6172797Sjg new_nvp->nvp_dip = dip;
6182797Sjg new_nvp->nvp_devid = new_devid;
6192797Sjg
6202797Sjg if (!devid_cache_write_disable) {
6212797Sjg is_dirty = 1;
6222797Sjg nvf_mark_dirty(dcfd_handle);
6232797Sjg }
6242797Sjg list_insert_tail(nvf_list(dcfd_handle), new_nvp);
6252797Sjg
6262797Sjg rw_exit(nvf_lock(dcfd_handle));
6272797Sjg
6282797Sjg exit:
6292797Sjg if (free_devid)
6302797Sjg kmem_free(free_devid, ddi_devid_sizeof(free_devid));
6312797Sjg
6322797Sjg if (is_dirty)
6332797Sjg nvf_wake_daemon();
6342797Sjg
6352797Sjg return (DDI_SUCCESS);
6362797Sjg }
6372797Sjg
63812213SGavin.Maltby@Sun.COM int
e_devid_cache_register(dev_info_t * dip,ddi_devid_t devid)63912213SGavin.Maltby@Sun.COM e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
64012213SGavin.Maltby@Sun.COM {
64112213SGavin.Maltby@Sun.COM return (e_devid_cache_register_cmn(dip, devid, NULL));
64212213SGavin.Maltby@Sun.COM }
64312213SGavin.Maltby@Sun.COM
6442797Sjg /*
64512537SGavin.Maltby@Sun.COM * Unregister a device's devid; the devinfo may hit on multiple entries
64612537SGavin.Maltby@Sun.COM * arising from both pHCI and vHCI paths.
64712537SGavin.Maltby@Sun.COM * Called as an instance detachs.
64812537SGavin.Maltby@Sun.COM * Invalidate the devid's devinfo reference.
64912537SGavin.Maltby@Sun.COM * Devid-path remains in the cache.
6502797Sjg */
65112537SGavin.Maltby@Sun.COM
6522797Sjg void
e_devid_cache_unregister(dev_info_t * dip)6532797Sjg e_devid_cache_unregister(dev_info_t *dip)
6542797Sjg {
6552797Sjg nvp_devid_t *np;
6562797Sjg list_t *listp;
6572797Sjg
6582797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
6592797Sjg
6602797Sjg listp = nvf_list(dcfd_handle);
6612797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
6622797Sjg if (np->nvp_devid == NULL)
6632797Sjg continue;
6642797Sjg if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
6652797Sjg DEVID_LOG_UNREG((CE_CONT,
6667009Scth "unregister: %s\n", np->nvp_devpath));
6672797Sjg np->nvp_flags &= ~NVP_DEVID_DIP;
6682797Sjg np->nvp_dip = NULL;
6692797Sjg }
6702797Sjg }
6712797Sjg
6722797Sjg rw_exit(nvf_lock(dcfd_handle));
6732797Sjg }
6742797Sjg
67512213SGavin.Maltby@Sun.COM int
e_devid_cache_pathinfo(mdi_pathinfo_t * pip,ddi_devid_t devid)67612213SGavin.Maltby@Sun.COM e_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid)
67712213SGavin.Maltby@Sun.COM {
67812213SGavin.Maltby@Sun.COM char *path = mdi_pi_pathname(pip);
67912213SGavin.Maltby@Sun.COM
68012213SGavin.Maltby@Sun.COM return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid,
68112213SGavin.Maltby@Sun.COM path));
68212213SGavin.Maltby@Sun.COM }
68312213SGavin.Maltby@Sun.COM
6842797Sjg /*
6852797Sjg * Purge devid cache of stale devids
6862797Sjg */
6872797Sjg void
devid_cache_cleanup(void)6882797Sjg devid_cache_cleanup(void)
6892797Sjg {
6902797Sjg nvp_devid_t *np, *next;
6912797Sjg list_t *listp;
6922797Sjg int is_dirty = 0;
6932797Sjg
6942797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
6952797Sjg
6962797Sjg listp = nvf_list(dcfd_handle);
6972797Sjg for (np = list_head(listp); np; np = next) {
6982797Sjg next = list_next(listp, np);
6992797Sjg if (np->nvp_devid == NULL)
7002797Sjg continue;
7012797Sjg if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
7022797Sjg DEVID_LOG_REMOVE((CE_CONT,
7037009Scth "cleanup: %s\n", np->nvp_devpath));
7042797Sjg if (!devid_cache_write_disable) {
7052797Sjg nvf_mark_dirty(dcfd_handle);
7062797Sjg is_dirty = 0;
7072797Sjg }
7082797Sjg devid_nvp_unlink_and_free(dcfd_handle, np);
7092797Sjg }
7102797Sjg }
7112797Sjg
7122797Sjg rw_exit(nvf_lock(dcfd_handle));
7132797Sjg
7142797Sjg if (is_dirty)
7152797Sjg nvf_wake_daemon();
7162797Sjg }
7172797Sjg
7182797Sjg
7192797Sjg /*
7202797Sjg * Build a list of dev_t's for a device/devid
7212797Sjg *
7222797Sjg * The effect of this function is cumulative, adding dev_t's
7232797Sjg * for the device to the list of all dev_t's for a given
7242797Sjg * devid.
7252797Sjg */
7262797Sjg static void
e_devid_minor_to_devlist(dev_info_t * dip,char * minor_name,int ndevts_alloced,int * devtcntp,dev_t * devtsp)7272797Sjg e_devid_minor_to_devlist(
7282797Sjg dev_info_t *dip,
7292797Sjg char *minor_name,
7302797Sjg int ndevts_alloced,
7312797Sjg int *devtcntp,
7322797Sjg dev_t *devtsp)
7332797Sjg {
7347224Scth int circ;
7352797Sjg struct ddi_minor_data *dmdp;
7362797Sjg int minor_all = 0;
7372797Sjg int ndevts = *devtcntp;
7382797Sjg
7392797Sjg ASSERT(i_ddi_devi_attached(dip));
7402797Sjg
7412797Sjg /* are we looking for a set of minor nodes? */
7422797Sjg if ((minor_name == DEVID_MINOR_NAME_ALL) ||
7432797Sjg (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
7442797Sjg (minor_name == DEVID_MINOR_NAME_ALL_BLK))
7452797Sjg minor_all = 1;
7462797Sjg
7472797Sjg /* Find matching minor names */
7487224Scth ndi_devi_enter(dip, &circ);
7492797Sjg for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7502797Sjg
7512797Sjg /* Skip non-minors, and non matching minor names */
7522797Sjg if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
7532797Sjg strcmp(dmdp->ddm_name, minor_name)))
7542797Sjg continue;
7552797Sjg
7562797Sjg /* filter out minor_all mismatches */
7572797Sjg if (minor_all &&
7582797Sjg (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
7592797Sjg (dmdp->ddm_spec_type != S_IFCHR)) ||
7602797Sjg ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
7612797Sjg (dmdp->ddm_spec_type != S_IFBLK))))
7622797Sjg continue;
7632797Sjg
7642797Sjg if (ndevts < ndevts_alloced)
7652797Sjg devtsp[ndevts] = dmdp->ddm_dev;
7662797Sjg ndevts++;
7672797Sjg }
7687224Scth ndi_devi_exit(dip, circ);
7692797Sjg
7702797Sjg *devtcntp = ndevts;
7712797Sjg }
7722797Sjg
7732797Sjg /*
7742797Sjg * Search for cached entries matching a devid
7752797Sjg * Return two lists:
7762797Sjg * a list of dev_info nodes, for those devices in the attached state
7772797Sjg * a list of pathnames whose instances registered the given devid
7782797Sjg * If the lists passed in are not sufficient to return the matching
7792797Sjg * references, return the size of lists required.
7802797Sjg * The dev_info nodes are returned with a hold that the caller must release.
7812797Sjg */
7822797Sjg static int
e_devid_cache_devi_path_lists(ddi_devid_t devid,int retmax,int * retndevis,dev_info_t ** retdevis,int * retnpaths,char ** retpaths)7832797Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
7842797Sjg int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
7852797Sjg {
7862797Sjg nvp_devid_t *np;
7872797Sjg int ndevis, npaths;
7882797Sjg dev_info_t *dip, *pdip;
7892797Sjg int circ;
7902797Sjg int maxdevis = 0;
7912797Sjg int maxpaths = 0;
7922797Sjg list_t *listp;
7932797Sjg
7942797Sjg ndevis = 0;
7952797Sjg npaths = 0;
7962797Sjg listp = nvf_list(dcfd_handle);
7972797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
7982797Sjg if (np->nvp_devid == NULL)
7992797Sjg continue;
8002797Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
8012797Sjg DEVIDERR((CE_CONT,
8022797Sjg "find: invalid devid %s\n",
8032797Sjg np->nvp_devpath));
8042797Sjg continue;
8052797Sjg }
8062797Sjg if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
8072797Sjg DEVID_DEBUG2((CE_CONT,
8082797Sjg "find: devid match: %s 0x%x\n",
8092797Sjg np->nvp_devpath, np->nvp_flags));
8102797Sjg DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
8112797Sjg DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
8122797Sjg
8132797Sjg /*
8142797Sjg * Check if we have a cached devinfo reference for this
8152797Sjg * devid. Place a hold on it to prevent detach
8162797Sjg * Otherwise, use the path instead.
8172797Sjg * Note: returns with a hold on each dev_info
8182797Sjg * node in the list.
8192797Sjg */
8202797Sjg dip = NULL;
8212797Sjg if (np->nvp_flags & NVP_DEVID_DIP) {
8222797Sjg pdip = ddi_get_parent(np->nvp_dip);
8232797Sjg if (ndi_devi_tryenter(pdip, &circ)) {
8242797Sjg dip = np->nvp_dip;
8252797Sjg ndi_hold_devi(dip);
8262797Sjg ndi_devi_exit(pdip, circ);
8272797Sjg ASSERT(!DEVI_IS_ATTACHING(dip));
8282797Sjg ASSERT(!DEVI_IS_DETACHING(dip));
8292797Sjg } else {
8302797Sjg DEVID_LOG_DETACH((CE_CONT,
8312797Sjg "may be detaching: %s\n",
8322797Sjg np->nvp_devpath));
8332797Sjg }
8342797Sjg }
8352797Sjg
8362797Sjg if (dip) {
8372797Sjg if (ndevis < retmax) {
8382797Sjg retdevis[ndevis++] = dip;
8392797Sjg } else {
8402797Sjg ndi_rele_devi(dip);
8412797Sjg }
8422797Sjg maxdevis++;
8432797Sjg } else {
8442797Sjg if (npaths < retmax)
8452797Sjg retpaths[npaths++] = np->nvp_devpath;
8462797Sjg maxpaths++;
8472797Sjg }
8482797Sjg }
8492797Sjg }
8502797Sjg
8512797Sjg *retndevis = ndevis;
8522797Sjg *retnpaths = npaths;
8532797Sjg return (maxdevis > maxpaths ? maxdevis : maxpaths);
8542797Sjg }
8552797Sjg
8562797Sjg
8572797Sjg /*
8582797Sjg * Search the devid cache, returning dev_t list for all
8592797Sjg * device paths mapping to the device identified by the
8602797Sjg * given devid.
8612797Sjg *
8622797Sjg * Primary interface used by ddi_lyr_devid_to_devlist()
8632797Sjg */
8642797Sjg int
e_devid_cache_to_devt_list(ddi_devid_t devid,char * minor_name,int * retndevts,dev_t ** retdevts)8652797Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
8662797Sjg int *retndevts, dev_t **retdevts)
8672797Sjg {
8682797Sjg char *path, **paths;
8692797Sjg int i, j, n;
8702797Sjg dev_t *devts, *udevts;
8712797Sjg dev_t tdevt;
8722797Sjg int ndevts, undevts, ndevts_alloced;
8732797Sjg dev_info_t *devi, **devis;
8742797Sjg int ndevis, npaths, nalloced;
8752797Sjg ddi_devid_t match_devid;
8762797Sjg
8772797Sjg DEVID_LOG_FIND(("find", devid, NULL));
8782797Sjg
8792797Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8802797Sjg if (ddi_devid_valid(devid) != DDI_SUCCESS) {
8812797Sjg DEVID_LOG_ERR(("invalid devid", devid, NULL));
8822797Sjg return (DDI_FAILURE);
8832797Sjg }
8842797Sjg
8852797Sjg nalloced = 128;
8862797Sjg
8872797Sjg for (;;) {
8882797Sjg paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
8892797Sjg devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
8902797Sjg
8912797Sjg rw_enter(nvf_lock(dcfd_handle), RW_READER);
8922797Sjg n = e_devid_cache_devi_path_lists(devid, nalloced,
8937009Scth &ndevis, devis, &npaths, paths);
8942797Sjg if (n <= nalloced)
8952797Sjg break;
8962797Sjg rw_exit(nvf_lock(dcfd_handle));
8972797Sjg for (i = 0; i < ndevis; i++)
8982797Sjg ndi_rele_devi(devis[i]);
8992797Sjg kmem_free(paths, nalloced * sizeof (char *));
9002797Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *));
9012797Sjg nalloced = n + 128;
9022797Sjg }
9032797Sjg
9042797Sjg for (i = 0; i < npaths; i++) {
9052797Sjg path = i_ddi_strdup(paths[i], KM_SLEEP);
9062797Sjg paths[i] = path;
9072797Sjg }
9082797Sjg rw_exit(nvf_lock(dcfd_handle));
9092797Sjg
9102797Sjg if (ndevis == 0 && npaths == 0) {
9112797Sjg DEVID_LOG_ERR(("no devid found", devid, NULL));
9122797Sjg kmem_free(paths, nalloced * sizeof (char *));
9132797Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *));
9142797Sjg return (DDI_FAILURE);
9152797Sjg }
9162797Sjg
9172797Sjg ndevts_alloced = 128;
9182797Sjg restart:
9192797Sjg ndevts = 0;
9202797Sjg devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
9212797Sjg for (i = 0; i < ndevis; i++) {
9222797Sjg ASSERT(!DEVI_IS_ATTACHING(devis[i]));
9232797Sjg ASSERT(!DEVI_IS_DETACHING(devis[i]));
9242797Sjg e_devid_minor_to_devlist(devis[i], minor_name,
9257009Scth ndevts_alloced, &ndevts, devts);
9262797Sjg if (ndevts > ndevts_alloced) {
9272797Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t));
9282797Sjg ndevts_alloced += 128;
9292797Sjg goto restart;
9302797Sjg }
9312797Sjg }
9322797Sjg for (i = 0; i < npaths; i++) {
9332797Sjg DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
9342797Sjg devi = e_ddi_hold_devi_by_path(paths[i], 0);
9352797Sjg if (devi == NULL) {
9362797Sjg DEVID_LOG_STALE(("stale device reference",
9372797Sjg devid, paths[i]));
9382797Sjg continue;
9392797Sjg }
9402797Sjg /*
9412797Sjg * Verify the newly attached device registered a matching devid
9422797Sjg */
9432797Sjg if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
9442797Sjg &match_devid) != DDI_SUCCESS) {
9452797Sjg DEVIDERR((CE_CONT,
9462797Sjg "%s: no devid registered on attach\n",
9472797Sjg paths[i]));
9482797Sjg ddi_release_devi(devi);
9492797Sjg continue;
9502797Sjg }
9512797Sjg
9522797Sjg if (ddi_devid_compare(devid, match_devid) != 0) {
9532797Sjg DEVID_LOG_STALE(("new devid registered",
9542797Sjg devid, paths[i]));
9552797Sjg ddi_release_devi(devi);
9562797Sjg ddi_devid_free(match_devid);
9572797Sjg continue;
9582797Sjg }
9592797Sjg ddi_devid_free(match_devid);
9602797Sjg
9612797Sjg e_devid_minor_to_devlist(devi, minor_name,
9627009Scth ndevts_alloced, &ndevts, devts);
9632797Sjg ddi_release_devi(devi);
9642797Sjg if (ndevts > ndevts_alloced) {
9652797Sjg kmem_free(devts,
9662797Sjg ndevts_alloced * sizeof (dev_t));
9672797Sjg ndevts_alloced += 128;
9682797Sjg goto restart;
9692797Sjg }
9702797Sjg }
9712797Sjg
9722797Sjg /* drop hold from e_devid_cache_devi_path_lists */
9732797Sjg for (i = 0; i < ndevis; i++) {
9742797Sjg ndi_rele_devi(devis[i]);
9752797Sjg }
9762797Sjg for (i = 0; i < npaths; i++) {
9772797Sjg kmem_free(paths[i], strlen(paths[i]) + 1);
9782797Sjg }
9792797Sjg kmem_free(paths, nalloced * sizeof (char *));
9802797Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *));
9812797Sjg
9822797Sjg if (ndevts == 0) {
9832797Sjg DEVID_LOG_ERR(("no devid found", devid, NULL));
9842797Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t));
9852797Sjg return (DDI_FAILURE);
9862797Sjg }
9872797Sjg
9882797Sjg /*
9892797Sjg * Build the final list of sorted dev_t's with duplicates collapsed so
9902797Sjg * returned results are consistent. This prevents implementation
9912797Sjg * artifacts from causing unnecessary changes in SVM namespace.
9922797Sjg */
9932797Sjg /* bubble sort */
9942797Sjg for (i = 0; i < (ndevts - 1); i++) {
9952797Sjg for (j = 0; j < ((ndevts - 1) - i); j++) {
9962797Sjg if (devts[j + 1] < devts[j]) {
9972797Sjg tdevt = devts[j];
9982797Sjg devts[j] = devts[j + 1];
9992797Sjg devts[j + 1] = tdevt;
10002797Sjg }
10012797Sjg }
10022797Sjg }
10032797Sjg
10042797Sjg /* determine number of unique values */
10052797Sjg for (undevts = ndevts, i = 1; i < ndevts; i++) {
10062797Sjg if (devts[i - 1] == devts[i])
10072797Sjg undevts--;
10082797Sjg }
10092797Sjg
10102797Sjg /* allocate unique */
10112797Sjg udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
10122797Sjg
10132797Sjg /* copy unique */
10142797Sjg udevts[0] = devts[0];
10152797Sjg for (i = 1, j = 1; i < ndevts; i++) {
10162797Sjg if (devts[i - 1] != devts[i])
10172797Sjg udevts[j++] = devts[i];
10182797Sjg }
10192797Sjg ASSERT(j == undevts);
10202797Sjg
10212797Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t));
10222797Sjg
10232797Sjg *retndevts = undevts;
10242797Sjg *retdevts = udevts;
10252797Sjg
10262797Sjg return (DDI_SUCCESS);
10272797Sjg }
10282797Sjg
10292797Sjg void
e_devid_cache_free_devt_list(int ndevts,dev_t * devt_list)10302797Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list)
10312797Sjg {
10322797Sjg kmem_free(devt_list, ndevts * sizeof (dev_t *));
10332797Sjg }
10342797Sjg
103512213SGavin.Maltby@Sun.COM /*
103612213SGavin.Maltby@Sun.COM * If given a full path and NULL ua, search for a cache entry
103712213SGavin.Maltby@Sun.COM * whose path matches the full path. On a cache hit duplicate the
103812213SGavin.Maltby@Sun.COM * devid of the matched entry into the given devid (caller
103912213SGavin.Maltby@Sun.COM * must free); nodenamebuf is not touched for this usage.
104012213SGavin.Maltby@Sun.COM *
104112213SGavin.Maltby@Sun.COM * Given a path and a non-NULL unit address, search the cache for any entry
104212213SGavin.Maltby@Sun.COM * matching "<path>/%@<unit-address>" where '%' is a wildcard meaning
104312213SGavin.Maltby@Sun.COM * any node name. The path should not end a '/'. On a cache hit
104412213SGavin.Maltby@Sun.COM * duplicate the devid as before (caller must free) and copy into
104512213SGavin.Maltby@Sun.COM * the caller-provided nodenamebuf (if not NULL) the nodename of the
104612213SGavin.Maltby@Sun.COM * matched entry.
104712213SGavin.Maltby@Sun.COM *
104812213SGavin.Maltby@Sun.COM * We must not make use of nvp_dip since that may be NULL for cached
104912213SGavin.Maltby@Sun.COM * entries that are not present in the current tree.
105012213SGavin.Maltby@Sun.COM */
105112213SGavin.Maltby@Sun.COM int
e_devid_cache_path_to_devid(char * path,char * ua,char * nodenamebuf,ddi_devid_t * devidp)105212213SGavin.Maltby@Sun.COM e_devid_cache_path_to_devid(char *path, char *ua,
105312213SGavin.Maltby@Sun.COM char *nodenamebuf, ddi_devid_t *devidp)
105412213SGavin.Maltby@Sun.COM {
105512213SGavin.Maltby@Sun.COM size_t pathlen, ualen;
105612213SGavin.Maltby@Sun.COM int rv = DDI_FAILURE;
105712213SGavin.Maltby@Sun.COM nvp_devid_t *np;
105812213SGavin.Maltby@Sun.COM list_t *listp;
105912213SGavin.Maltby@Sun.COM char *cand;
106012213SGavin.Maltby@Sun.COM
106112213SGavin.Maltby@Sun.COM if (path == NULL || *path == '\0' || (ua && *ua == '\0') ||
106212213SGavin.Maltby@Sun.COM devidp == NULL)
106312213SGavin.Maltby@Sun.COM return (DDI_FAILURE);
106412213SGavin.Maltby@Sun.COM
106512213SGavin.Maltby@Sun.COM *devidp = NULL;
106612213SGavin.Maltby@Sun.COM
106712213SGavin.Maltby@Sun.COM if (ua) {
106812213SGavin.Maltby@Sun.COM pathlen = strlen(path);
106912213SGavin.Maltby@Sun.COM ualen = strlen(ua);
107012213SGavin.Maltby@Sun.COM }
107112213SGavin.Maltby@Sun.COM
107212213SGavin.Maltby@Sun.COM rw_enter(nvf_lock(dcfd_handle), RW_READER);
107312213SGavin.Maltby@Sun.COM
107412213SGavin.Maltby@Sun.COM listp = nvf_list(dcfd_handle);
107512213SGavin.Maltby@Sun.COM for (np = list_head(listp); np; np = list_next(listp, np)) {
107612213SGavin.Maltby@Sun.COM size_t nodelen, candlen, n;
107712213SGavin.Maltby@Sun.COM ddi_devid_t devid_dup;
107812213SGavin.Maltby@Sun.COM char *uasep, *node;
107912213SGavin.Maltby@Sun.COM
108012213SGavin.Maltby@Sun.COM if (np->nvp_devid == NULL)
108112213SGavin.Maltby@Sun.COM continue;
108212213SGavin.Maltby@Sun.COM
108312213SGavin.Maltby@Sun.COM if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
108412213SGavin.Maltby@Sun.COM DEVIDERR((CE_CONT,
108512213SGavin.Maltby@Sun.COM "pathsearch: invalid devid %s\n",
108612213SGavin.Maltby@Sun.COM np->nvp_devpath));
108712213SGavin.Maltby@Sun.COM continue;
108812213SGavin.Maltby@Sun.COM }
108912213SGavin.Maltby@Sun.COM
109012213SGavin.Maltby@Sun.COM cand = np->nvp_devpath; /* candidate path */
109112213SGavin.Maltby@Sun.COM
109212213SGavin.Maltby@Sun.COM /* If a full pathname was provided the compare is easy */
109312213SGavin.Maltby@Sun.COM if (ua == NULL) {
109412213SGavin.Maltby@Sun.COM if (strcmp(cand, path) == 0)
109512213SGavin.Maltby@Sun.COM goto match;
109612213SGavin.Maltby@Sun.COM else
109712213SGavin.Maltby@Sun.COM continue;
109812213SGavin.Maltby@Sun.COM }
109912213SGavin.Maltby@Sun.COM
110012213SGavin.Maltby@Sun.COM /*
110112213SGavin.Maltby@Sun.COM * The compare for initial path plus ua and unknown nodename
110212213SGavin.Maltby@Sun.COM * is trickier.
110312213SGavin.Maltby@Sun.COM *
110412213SGavin.Maltby@Sun.COM * Does the initial path component match 'path'?
110512213SGavin.Maltby@Sun.COM */
110612213SGavin.Maltby@Sun.COM if (strncmp(path, cand, pathlen) != 0)
110712213SGavin.Maltby@Sun.COM continue;
110812213SGavin.Maltby@Sun.COM
110912213SGavin.Maltby@Sun.COM candlen = strlen(cand);
111012213SGavin.Maltby@Sun.COM
111112213SGavin.Maltby@Sun.COM /*
111212213SGavin.Maltby@Sun.COM * The next character must be a '/' and there must be no
111312213SGavin.Maltby@Sun.COM * further '/' thereafter. Begin by checking that the
111412213SGavin.Maltby@Sun.COM * candidate is long enough to include at mininum a
111512213SGavin.Maltby@Sun.COM * "/<nodename>@<ua>" after the initial portion already
111612213SGavin.Maltby@Sun.COM * matched assuming a nodename length of 1.
111712213SGavin.Maltby@Sun.COM */
111812213SGavin.Maltby@Sun.COM if (candlen < pathlen + 1 + 1 + 1 + ualen ||
111912213SGavin.Maltby@Sun.COM cand[pathlen] != '/' ||
112012213SGavin.Maltby@Sun.COM strchr(cand + pathlen + 1, '/') != NULL)
112112213SGavin.Maltby@Sun.COM continue;
112212213SGavin.Maltby@Sun.COM
112312213SGavin.Maltby@Sun.COM node = cand + pathlen + 1; /* <node>@<ua> string */
112412213SGavin.Maltby@Sun.COM
112512213SGavin.Maltby@Sun.COM /*
112612213SGavin.Maltby@Sun.COM * Find the '@' before the unit address. Check for
112712213SGavin.Maltby@Sun.COM * unit address match.
112812213SGavin.Maltby@Sun.COM */
112912213SGavin.Maltby@Sun.COM if ((uasep = strchr(node, '@')) == NULL)
113012213SGavin.Maltby@Sun.COM continue;
113112213SGavin.Maltby@Sun.COM
113212213SGavin.Maltby@Sun.COM /*
113312213SGavin.Maltby@Sun.COM * Check we still have enough length and that ua matches
113412213SGavin.Maltby@Sun.COM */
113512213SGavin.Maltby@Sun.COM nodelen = (uintptr_t)uasep - (uintptr_t)node;
113612213SGavin.Maltby@Sun.COM if (candlen < pathlen + 1 + nodelen + 1 + ualen ||
113712213SGavin.Maltby@Sun.COM strncmp(ua, uasep + 1, ualen) != 0)
113812213SGavin.Maltby@Sun.COM continue;
113912213SGavin.Maltby@Sun.COM match:
114012213SGavin.Maltby@Sun.COM n = ddi_devid_sizeof(np->nvp_devid);
114112213SGavin.Maltby@Sun.COM devid_dup = kmem_alloc(n, KM_SLEEP); /* caller must free */
114212213SGavin.Maltby@Sun.COM (void) bcopy(np->nvp_devid, devid_dup, n);
114312213SGavin.Maltby@Sun.COM *devidp = devid_dup;
114412213SGavin.Maltby@Sun.COM
114512213SGavin.Maltby@Sun.COM if (ua && nodenamebuf) {
114612213SGavin.Maltby@Sun.COM (void) strncpy(nodenamebuf, node, nodelen);
114712213SGavin.Maltby@Sun.COM nodenamebuf[nodelen] = '\0';
114812213SGavin.Maltby@Sun.COM }
114912213SGavin.Maltby@Sun.COM
115012213SGavin.Maltby@Sun.COM rv = DDI_SUCCESS;
115112213SGavin.Maltby@Sun.COM break;
115212213SGavin.Maltby@Sun.COM }
115312213SGavin.Maltby@Sun.COM
115412213SGavin.Maltby@Sun.COM rw_exit(nvf_lock(dcfd_handle));
115512213SGavin.Maltby@Sun.COM
115612213SGavin.Maltby@Sun.COM return (rv);
115712213SGavin.Maltby@Sun.COM }
115812213SGavin.Maltby@Sun.COM
11592797Sjg #ifdef DEBUG
11602797Sjg static void
devid_log(char * fmt,ddi_devid_t devid,char * path)11612797Sjg devid_log(char *fmt, ddi_devid_t devid, char *path)
11622797Sjg {
11632797Sjg char *devidstr = ddi_devid_str_encode(devid, NULL);
11642797Sjg if (path) {
11652797Sjg cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr);
11662797Sjg } else {
11672797Sjg cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr);
11682797Sjg }
11692797Sjg ddi_devid_str_free(devidstr);
11702797Sjg }
11712797Sjg #endif /* DEBUG */
1172