12797Sjg /* 22797Sjg * CDDL HEADER START 32797Sjg * 42797Sjg * The contents of this file are subject to the terms of the 52797Sjg * Common Development and Distribution License (the "License"). 62797Sjg * You may not use this file except in compliance with the License. 72797Sjg * 82797Sjg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 92797Sjg * or http://www.opensolaris.org/os/licensing. 102797Sjg * See the License for the specific language governing permissions 112797Sjg * and limitations under the License. 122797Sjg * 132797Sjg * When distributing Covered Code, include this CDDL HEADER in each 142797Sjg * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 152797Sjg * If applicable, add the following below this CDDL HEADER, with the 162797Sjg * fields enclosed by brackets "[]" replaced with your own identifying 172797Sjg * information: Portions Copyright [yyyy] [name of copyright owner] 182797Sjg * 192797Sjg * CDDL HEADER END 202797Sjg */ 212797Sjg /* 227009Scth * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 232797Sjg * Use is subject to license terms. 242797Sjg */ 252797Sjg 262797Sjg #pragma ident "%Z%%M% %I% %E% SMI" 272797Sjg 282797Sjg #include <sys/note.h> 292797Sjg #include <sys/t_lock.h> 302797Sjg #include <sys/cmn_err.h> 312797Sjg #include <sys/instance.h> 322797Sjg #include <sys/conf.h> 332797Sjg #include <sys/stat.h> 342797Sjg #include <sys/ddi.h> 352797Sjg #include <sys/hwconf.h> 362797Sjg #include <sys/sunddi.h> 372797Sjg #include <sys/sunndi.h> 382797Sjg #include <sys/ddi_impldefs.h> 392797Sjg #include <sys/ndi_impldefs.h> 402797Sjg #include <sys/kobj.h> 412797Sjg #include <sys/devcache.h> 422797Sjg #include <sys/devid_cache.h> 432797Sjg #include <sys/sysmacros.h> 442797Sjg 452797Sjg /* 462797Sjg * Discovery refers to the heroic effort made to discover a device which 472797Sjg * cannot be accessed at the physical path where it once resided. Discovery 482797Sjg * involves walking the entire device tree attaching all possible disk 492797Sjg * instances, to search for the device referenced by a devid. Obviously, 502797Sjg * full device discovery is something to be avoided where possible. 512797Sjg * Note that simply invoking devfsadm(1M) is equivalent to running full 522797Sjg * discovery at the devid cache level. 532797Sjg * 542797Sjg * Reasons why a disk may not be accessible: 552797Sjg * disk powered off 562797Sjg * disk removed or cable disconnected 572797Sjg * disk or adapter broken 582797Sjg * 592797Sjg * Note that discovery is not needed and cannot succeed in any of these 602797Sjg * cases. 612797Sjg * 622797Sjg * When discovery may succeed: 632797Sjg * Discovery will result in success when a device has been moved 642797Sjg * to a different address. Note that it's recommended that 652797Sjg * devfsadm(1M) be invoked (no arguments required) whenever a system's 662797Sjg * h/w configuration has been updated. Alternatively, a 672797Sjg * reconfiguration boot can be used to accomplish the same result. 682797Sjg * 692797Sjg * Note that discovery is not necessary to be able to correct an access 702797Sjg * failure for a device which was powered off. Assuming the cache has an 712797Sjg * entry for such a device, simply powering it on should permit the system 722797Sjg * to access it. If problems persist after powering it on, invoke 732797Sjg * devfsadm(1M). 742797Sjg * 752797Sjg * Discovery prior to mounting root is only of interest when booting 762797Sjg * from a filesystem which accesses devices by device id, which of 772797Sjg * not all do. 782797Sjg * 792797Sjg * Tunables 802797Sjg * 812797Sjg * devid_discovery_boot (default 1) 822797Sjg * Number of times discovery will be attempted prior to mounting root. 832797Sjg * Must be done at least once to recover from corrupted or missing 842797Sjg * devid cache backing store. Probably there's no reason to ever 852797Sjg * set this to greater than one as a missing device will remain 862797Sjg * unavailable no matter how often the system searches for it. 872797Sjg * 882797Sjg * devid_discovery_postboot (default 1) 892797Sjg * Number of times discovery will be attempted after mounting root. 902797Sjg * This must be performed at least once to discover any devices 912797Sjg * needed after root is mounted which may have been powered 922797Sjg * off and moved before booting. 932797Sjg * Setting this to a larger positive number will introduce 942797Sjg * some inconsistency in system operation. Searching for a device 952797Sjg * will take an indeterminate amount of time, sometimes slower, 962797Sjg * sometimes faster. In addition, the system will sometimes 972797Sjg * discover a newly powered on device, sometimes it won't. 982797Sjg * Use of this option is not therefore recommended. 992797Sjg * 1002797Sjg * devid_discovery_postboot_always (default 0) 1012797Sjg * Set to 1, the system will always attempt full discovery. 1022797Sjg * 1032797Sjg * devid_discovery_secs (default 0) 1042797Sjg * Set to a positive value, the system will attempt full discovery 1052797Sjg * but with a minimum delay between attempts. A device search 1062797Sjg * within the period of time specified will result in failure. 1072797Sjg * 1082797Sjg * devid_cache_read_disable (default 0) 1092797Sjg * Set to 1 to disable reading /etc/devices/devid_cache. 1102797Sjg * Devid cache will continue to operate normally but 1112797Sjg * at least one discovery attempt will be required. 1122797Sjg * 1132797Sjg * devid_cache_write_disable (default 0) 1142797Sjg * Set to 1 to disable updates to /etc/devices/devid_cache. 1152797Sjg * Any updates to the devid cache will not be preserved across a reboot. 1162797Sjg * 1172797Sjg * devid_report_error (default 0) 1182797Sjg * Set to 1 to enable some error messages related to devid 1192797Sjg * cache failures. 1202797Sjg * 1212797Sjg * The devid is packed in the cache file as a byte array. For 1222797Sjg * portability, this could be done in the encoded string format. 1232797Sjg */ 1242797Sjg 1252797Sjg 1262797Sjg int devid_discovery_boot = 1; 1272797Sjg int devid_discovery_postboot = 1; 1282797Sjg int devid_discovery_postboot_always = 0; 1292797Sjg int devid_discovery_secs = 0; 1302797Sjg 1312797Sjg int devid_cache_read_disable = 0; 1322797Sjg int devid_cache_write_disable = 0; 1332797Sjg 1342797Sjg int devid_report_error = 0; 1352797Sjg 1362797Sjg 1372797Sjg /* 1382797Sjg * State to manage discovery of devices providing a devid 1392797Sjg */ 1402797Sjg static int devid_discovery_busy = 0; 1412797Sjg static kmutex_t devid_discovery_mutex; 1422797Sjg static kcondvar_t devid_discovery_cv; 1432797Sjg static clock_t devid_last_discovery = 0; 1442797Sjg 1452797Sjg 1462797Sjg #ifdef DEBUG 1472797Sjg int nvp_devid_debug = 0; 1482797Sjg int devid_debug = 0; 1492797Sjg int devid_log_registers = 0; 1502797Sjg int devid_log_finds = 0; 1512797Sjg int devid_log_lookups = 0; 1522797Sjg int devid_log_discovery = 0; 1532797Sjg int devid_log_matches = 0; 1542797Sjg int devid_log_paths = 0; 1552797Sjg int devid_log_failures = 0; 1562797Sjg int devid_log_hold = 0; 1572797Sjg int devid_log_unregisters = 0; 1582797Sjg int devid_log_removes = 0; 1592797Sjg int devid_register_debug = 0; 1602797Sjg int devid_log_stale = 0; 1612797Sjg int devid_log_detaches = 0; 1622797Sjg #endif /* DEBUG */ 1632797Sjg 1642797Sjg /* 1652797Sjg * devid cache file registration for cache reads and updates 1662797Sjg */ 1672797Sjg static nvf_ops_t devid_cache_ops = { 1682797Sjg "/etc/devices/devid_cache", /* path to cache */ 1692797Sjg devid_cache_unpack_nvlist, /* read: nvlist to nvp */ 1702797Sjg devid_cache_pack_list, /* write: nvp to nvlist */ 1712797Sjg devid_list_free, /* free data list */ 1722797Sjg NULL /* write complete callback */ 1732797Sjg }; 1742797Sjg 1752797Sjg /* 1762797Sjg * handle to registered devid cache handlers 1772797Sjg */ 1782797Sjg nvf_handle_t dcfd_handle; 1792797Sjg 1802797Sjg 1812797Sjg /* 1822797Sjg * Initialize devid cache file management 1832797Sjg */ 1842797Sjg void 1852797Sjg devid_cache_init(void) 1862797Sjg { 1872797Sjg dcfd_handle = nvf_register_file(&devid_cache_ops); 1882797Sjg ASSERT(dcfd_handle); 1892797Sjg 1902797Sjg list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t), 1912797Sjg offsetof(nvp_devid_t, nvp_link)); 1922797Sjg 1932797Sjg mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL); 1942797Sjg cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL); 1952797Sjg } 1962797Sjg 1972797Sjg /* 1982797Sjg * Read and initialize the devid cache from the persistent store 1992797Sjg */ 2002797Sjg void 2012797Sjg devid_cache_read(void) 2022797Sjg { 2032797Sjg if (!devid_cache_read_disable) { 2042797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 2052797Sjg ASSERT(list_head(nvf_list(dcfd_handle)) == NULL); 2062797Sjg (void) nvf_read_file(dcfd_handle); 2072797Sjg rw_exit(nvf_lock(dcfd_handle)); 2082797Sjg } 2092797Sjg } 2102797Sjg 2112797Sjg static void 2122797Sjg devid_nvp_free(nvp_devid_t *dp) 2132797Sjg { 2142797Sjg if (dp->nvp_devpath) 2152797Sjg kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1); 2162797Sjg if (dp->nvp_devid) 2172797Sjg kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid)); 2182797Sjg 2192797Sjg kmem_free(dp, sizeof (nvp_devid_t)); 2202797Sjg } 2212797Sjg 2222797Sjg static void 2232797Sjg devid_list_free(nvf_handle_t fd) 2242797Sjg { 2252797Sjg list_t *listp; 2262797Sjg nvp_devid_t *np; 2272797Sjg 2282797Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 2292797Sjg 2302797Sjg listp = nvf_list(fd); 2312797Sjg while (np = list_head(listp)) { 2322797Sjg list_remove(listp, np); 2332797Sjg devid_nvp_free(np); 2342797Sjg } 2352797Sjg } 2362797Sjg 2372797Sjg /* 2382797Sjg * Free an nvp element in a list 2392797Sjg */ 2402797Sjg static void 2412797Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np) 2422797Sjg { 2432797Sjg list_remove(nvf_list(fd), np); 2442797Sjg devid_nvp_free(np); 2452797Sjg } 2462797Sjg 2472797Sjg /* 2482797Sjg * Unpack a device path/nvlist pair to the list of devid cache elements. 2492797Sjg * Used to parse the nvlist format when reading 2502797Sjg * /etc/devices/devid_cache 2512797Sjg */ 2522797Sjg static int 2532797Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name) 2542797Sjg { 2552797Sjg nvp_devid_t *np; 2562797Sjg ddi_devid_t devidp; 2572797Sjg int rval; 2582797Sjg uint_t n; 2592797Sjg 2602797Sjg NVP_DEVID_DEBUG_PATH((name)); 2612797Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 2622797Sjg 2632797Sjg /* 2642797Sjg * check path for a devid 2652797Sjg */ 2662797Sjg rval = nvlist_lookup_byte_array(nvl, 2677009Scth DP_DEVID_ID, (uchar_t **)&devidp, &n); 2682797Sjg if (rval == 0) { 2692797Sjg if (ddi_devid_valid(devidp) == DDI_SUCCESS) { 2702797Sjg ASSERT(n == ddi_devid_sizeof(devidp)); 2712797Sjg np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 2722797Sjg np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP); 2732797Sjg np->nvp_devid = kmem_alloc(n, KM_SLEEP); 2742797Sjg (void) bcopy(devidp, np->nvp_devid, n); 2752797Sjg list_insert_tail(nvf_list(fd), np); 2762797Sjg NVP_DEVID_DEBUG_DEVID((np->nvp_devid)); 2772797Sjg } else { 2782797Sjg DEVIDERR((CE_CONT, 2792797Sjg "%s: invalid devid\n", name)); 2802797Sjg } 2812797Sjg } else { 2822797Sjg DEVIDERR((CE_CONT, 2832797Sjg "%s: devid not available\n", name)); 2842797Sjg } 2852797Sjg 2862797Sjg return (0); 2872797Sjg } 2882797Sjg 2892797Sjg /* 2902797Sjg * Pack the list of devid cache elements into a single nvlist 2912797Sjg * Used when writing the nvlist file. 2922797Sjg */ 2932797Sjg static int 2942797Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl) 2952797Sjg { 2962797Sjg nvlist_t *nvl, *sub_nvl; 2972797Sjg nvp_devid_t *np; 2982797Sjg int rval; 2992797Sjg list_t *listp; 3002797Sjg 3012797Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 3022797Sjg 3032797Sjg rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); 3042797Sjg if (rval != 0) { 3052797Sjg nvf_error("%s: nvlist alloc error %d\n", 3067009Scth nvf_cache_name(fd), rval); 3072797Sjg return (DDI_FAILURE); 3082797Sjg } 3092797Sjg 3102797Sjg listp = nvf_list(fd); 3112797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 3122797Sjg if (np->nvp_devid == NULL) 3137009Scth continue; 3142797Sjg NVP_DEVID_DEBUG_PATH(np->nvp_devpath); 3152797Sjg rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); 3162797Sjg if (rval != 0) { 3172797Sjg nvf_error("%s: nvlist alloc error %d\n", 3187009Scth nvf_cache_name(fd), rval); 3192797Sjg sub_nvl = NULL; 3202797Sjg goto err; 3212797Sjg } 3222797Sjg 3232797Sjg rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID, 3247009Scth (uchar_t *)np->nvp_devid, 3257009Scth ddi_devid_sizeof(np->nvp_devid)); 3262797Sjg if (rval == 0) { 3272797Sjg NVP_DEVID_DEBUG_DEVID(np->nvp_devid); 3282797Sjg } else { 3292797Sjg nvf_error( 3302797Sjg "%s: nvlist add error %d (devid)\n", 3312797Sjg nvf_cache_name(fd), rval); 3322797Sjg goto err; 3332797Sjg } 3342797Sjg 3352797Sjg rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl); 3362797Sjg if (rval != 0) { 3372797Sjg nvf_error("%s: nvlist add error %d (sublist)\n", 3382797Sjg nvf_cache_name(fd), rval); 3392797Sjg goto err; 3402797Sjg } 3412797Sjg nvlist_free(sub_nvl); 3422797Sjg } 3432797Sjg 3442797Sjg *ret_nvl = nvl; 3452797Sjg return (DDI_SUCCESS); 3462797Sjg 3472797Sjg err: 3482797Sjg if (sub_nvl) 3492797Sjg nvlist_free(sub_nvl); 3502797Sjg nvlist_free(nvl); 3512797Sjg *ret_nvl = NULL; 3522797Sjg return (DDI_FAILURE); 3532797Sjg } 3542797Sjg 3552797Sjg static int 3562797Sjg e_devid_do_discovery(void) 3572797Sjg { 3582797Sjg ASSERT(mutex_owned(&devid_discovery_mutex)); 3592797Sjg 3602797Sjg if (i_ddi_io_initialized() == 0) { 3612797Sjg if (devid_discovery_boot > 0) { 3622797Sjg devid_discovery_boot--; 3632797Sjg return (1); 3642797Sjg } 3652797Sjg } else { 3662797Sjg if (devid_discovery_postboot_always > 0) 3672797Sjg return (1); 3682797Sjg if (devid_discovery_postboot > 0) { 3692797Sjg devid_discovery_postboot--; 3702797Sjg return (1); 3712797Sjg } 3722797Sjg if (devid_discovery_secs > 0) { 3732797Sjg if ((ddi_get_lbolt() - devid_last_discovery) > 3742797Sjg drv_usectohz(devid_discovery_secs * MICROSEC)) { 3752797Sjg return (1); 3762797Sjg } 3772797Sjg } 3782797Sjg } 3792797Sjg 3802797Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n")); 3812797Sjg return (0); 3822797Sjg } 3832797Sjg 3842797Sjg static void 3852797Sjg e_ddi_devid_hold_by_major(major_t major) 3862797Sjg { 3872797Sjg DEVID_LOG_DISC((CE_CONT, 3882797Sjg "devid_discovery: ddi_hold_installed_driver %d\n", major)); 3892797Sjg 3902797Sjg if (ddi_hold_installed_driver(major) == NULL) 3912797Sjg return; 3922797Sjg 3932797Sjg ddi_rele_driver(major); 3942797Sjg } 3952797Sjg 3962797Sjg static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd", "dad" }; 3972797Sjg 3982797Sjg #define N_DRIVERS_TO_HOLD \ 3992797Sjg (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *)) 4002797Sjg 4012797Sjg 4022797Sjg static void 4032797Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid) 4042797Sjg { 4052797Sjg impl_devid_t *id = (impl_devid_t *)devid; 4062797Sjg major_t major, hint_major; 4072797Sjg char hint[DEVID_HINT_SIZE + 1]; 4082797Sjg char **drvp; 4092797Sjg int i; 4102797Sjg 4112797Sjg /* Count non-null bytes */ 4122797Sjg for (i = 0; i < DEVID_HINT_SIZE; i++) 4132797Sjg if (id->did_driver[i] == '\0') 4142797Sjg break; 4152797Sjg 4162797Sjg /* Make a copy of the driver hint */ 4172797Sjg bcopy(id->did_driver, hint, i); 4182797Sjg hint[i] = '\0'; 4192797Sjg 4202797Sjg /* search for the devid using the hint driver */ 4212797Sjg hint_major = ddi_name_to_major(hint); 4227009Scth if (hint_major != DDI_MAJOR_T_NONE) { 4232797Sjg e_ddi_devid_hold_by_major(hint_major); 4242797Sjg } 4252797Sjg 4262797Sjg drvp = e_ddi_devid_hold_driver_list; 4272797Sjg for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) { 4282797Sjg major = ddi_name_to_major(*drvp); 4297009Scth if (major != DDI_MAJOR_T_NONE && major != hint_major) { 4302797Sjg e_ddi_devid_hold_by_major(major); 4312797Sjg } 4322797Sjg } 4332797Sjg } 4342797Sjg 4352797Sjg 4362797Sjg /* 4372797Sjg * Return success if discovery was attempted, to indicate 4382797Sjg * that the desired device may now be available. 4392797Sjg */ 4402797Sjg int 4412797Sjg e_ddi_devid_discovery(ddi_devid_t devid) 4422797Sjg { 4432797Sjg int flags; 4442797Sjg int rval = DDI_SUCCESS; 4452797Sjg 4462797Sjg mutex_enter(&devid_discovery_mutex); 4472797Sjg 4482797Sjg if (devid_discovery_busy) { 4492797Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n")); 4502797Sjg while (devid_discovery_busy) { 4512797Sjg cv_wait(&devid_discovery_cv, &devid_discovery_mutex); 4522797Sjg } 4532797Sjg } else if (e_devid_do_discovery()) { 4542797Sjg devid_discovery_busy = 1; 4552797Sjg mutex_exit(&devid_discovery_mutex); 4562797Sjg 4572797Sjg if (i_ddi_io_initialized() == 0) { 4582797Sjg e_ddi_devid_hold_installed_driver(devid); 4592797Sjg } else { 4602797Sjg DEVID_LOG_DISC((CE_CONT, 4612797Sjg "devid_discovery: ndi_devi_config\n")); 4622797Sjg flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT; 4632797Sjg if (i_ddi_io_initialized()) 4642797Sjg flags |= NDI_DRV_CONF_REPROBE; 4652797Sjg (void) ndi_devi_config(ddi_root_node(), flags); 4662797Sjg } 4672797Sjg 4682797Sjg mutex_enter(&devid_discovery_mutex); 4692797Sjg devid_discovery_busy = 0; 4702797Sjg cv_broadcast(&devid_discovery_cv); 4712797Sjg if (devid_discovery_secs > 0) 4722797Sjg devid_last_discovery = ddi_get_lbolt(); 4732797Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n")); 4742797Sjg } else { 4752797Sjg rval = DDI_FAILURE; 4762797Sjg DEVID_LOG_DISC((CE_CONT, "no devid discovery\n")); 4772797Sjg } 4782797Sjg 4792797Sjg mutex_exit(&devid_discovery_mutex); 4802797Sjg 4812797Sjg return (rval); 4822797Sjg } 4832797Sjg 4842797Sjg /* 4852797Sjg * As part of registering a devid for a device, 4862797Sjg * update the devid cache with this device/devid pair 4872797Sjg * or note that this combination has registered. 4882797Sjg */ 4892797Sjg int 4902797Sjg e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid) 4912797Sjg { 4922797Sjg nvp_devid_t *np; 4932797Sjg nvp_devid_t *new_nvp; 4942797Sjg ddi_devid_t new_devid; 4952797Sjg int new_devid_size; 4962797Sjg char *path, *fullpath; 4972797Sjg ddi_devid_t free_devid = NULL; 4982797Sjg int pathlen; 4992797Sjg list_t *listp; 5002797Sjg int is_dirty = 0; 5012797Sjg 5022797Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 5032797Sjg 5042797Sjg fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5052797Sjg (void) ddi_pathname(dip, fullpath); 5062797Sjg pathlen = strlen(fullpath) + 1; 5072797Sjg path = kmem_alloc(pathlen, KM_SLEEP); 5082797Sjg bcopy(fullpath, path, pathlen); 5092797Sjg kmem_free(fullpath, MAXPATHLEN); 5102797Sjg 5112797Sjg DEVID_LOG_REG(("register", devid, path)); 5122797Sjg 5132797Sjg new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 5142797Sjg new_devid_size = ddi_devid_sizeof(devid); 5152797Sjg new_devid = kmem_alloc(new_devid_size, KM_SLEEP); 5162797Sjg (void) bcopy(devid, new_devid, new_devid_size); 5172797Sjg 5182797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 5192797Sjg 5202797Sjg listp = nvf_list(dcfd_handle); 5212797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 5222797Sjg if (strcmp(path, np->nvp_devpath) == 0) { 5232797Sjg DEVID_DEBUG2((CE_CONT, 5242797Sjg "register: %s path match\n", path)); 5252797Sjg if (np->nvp_devid == NULL) { 5267009Scth replace: np->nvp_devid = new_devid; 5272797Sjg np->nvp_flags |= 5287009Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 5292797Sjg np->nvp_dip = dip; 5302797Sjg if (!devid_cache_write_disable) { 5312797Sjg nvf_mark_dirty(dcfd_handle); 5322797Sjg is_dirty = 1; 5332797Sjg } 5342797Sjg rw_exit(nvf_lock(dcfd_handle)); 5352797Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 5362797Sjg kmem_free(path, pathlen); 5372797Sjg goto exit; 5382797Sjg } 5392797Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 5402797Sjg /* replace invalid devid */ 5412797Sjg free_devid = np->nvp_devid; 5422797Sjg goto replace; 5432797Sjg } 5442797Sjg /* 5452797Sjg * We're registering an already-cached path 5462797Sjg * Does the device's devid match the cache? 5472797Sjg */ 5482797Sjg if (ddi_devid_compare(devid, np->nvp_devid) != 0) { 5492797Sjg DEVID_DEBUG((CE_CONT, "devid register: " 5502797Sjg "devid %s does not match\n", path)); 5512797Sjg /* 5522797Sjg * Replace cached devid for this path 5532797Sjg * with newly registered devid. A devid 5542797Sjg * may map to multiple paths but one path 5552797Sjg * should only map to one devid. 5562797Sjg */ 5572797Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 5582797Sjg np = NULL; 5592797Sjg break; 5602797Sjg } else { 5612797Sjg DEVID_DEBUG2((CE_CONT, 5622797Sjg "devid register: %s devid match\n", path)); 5632797Sjg np->nvp_flags |= 5647009Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 5652797Sjg np->nvp_dip = dip; 5662797Sjg rw_exit(nvf_lock(dcfd_handle)); 5672797Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 5682797Sjg kmem_free(path, pathlen); 5692797Sjg kmem_free(new_devid, new_devid_size); 5702797Sjg return (DDI_SUCCESS); 5712797Sjg } 5722797Sjg } 5732797Sjg } 5742797Sjg 5752797Sjg /* 5762797Sjg * Add newly registered devid to the cache 5772797Sjg */ 5782797Sjg ASSERT(np == NULL); 5792797Sjg 5802797Sjg new_nvp->nvp_devpath = path; 5812797Sjg new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 5822797Sjg new_nvp->nvp_dip = dip; 5832797Sjg new_nvp->nvp_devid = new_devid; 5842797Sjg 5852797Sjg if (!devid_cache_write_disable) { 5862797Sjg is_dirty = 1; 5872797Sjg nvf_mark_dirty(dcfd_handle); 5882797Sjg } 5892797Sjg list_insert_tail(nvf_list(dcfd_handle), new_nvp); 5902797Sjg 5912797Sjg rw_exit(nvf_lock(dcfd_handle)); 5922797Sjg 5932797Sjg exit: 5942797Sjg if (free_devid) 5952797Sjg kmem_free(free_devid, ddi_devid_sizeof(free_devid)); 5962797Sjg 5972797Sjg if (is_dirty) 5982797Sjg nvf_wake_daemon(); 5992797Sjg 6002797Sjg return (DDI_SUCCESS); 6012797Sjg } 6022797Sjg 6032797Sjg /* 6042797Sjg * Unregister a device's devid 6052797Sjg * Called as an instance detachs 6062797Sjg * Invalidate the devid's devinfo reference 6072797Sjg * Devid-path remains in the cache 6082797Sjg */ 6092797Sjg void 6102797Sjg e_devid_cache_unregister(dev_info_t *dip) 6112797Sjg { 6122797Sjg nvp_devid_t *np; 6132797Sjg list_t *listp; 6142797Sjg 6152797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 6162797Sjg 6172797Sjg listp = nvf_list(dcfd_handle); 6182797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 6192797Sjg if (np->nvp_devid == NULL) 6202797Sjg continue; 6212797Sjg if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) { 6222797Sjg DEVID_LOG_UNREG((CE_CONT, 6237009Scth "unregister: %s\n", np->nvp_devpath)); 6242797Sjg np->nvp_flags &= ~NVP_DEVID_DIP; 6252797Sjg np->nvp_dip = NULL; 6262797Sjg break; 6272797Sjg } 6282797Sjg } 6292797Sjg 6302797Sjg rw_exit(nvf_lock(dcfd_handle)); 6312797Sjg } 6322797Sjg 6332797Sjg /* 6342797Sjg * Purge devid cache of stale devids 6352797Sjg */ 6362797Sjg void 6372797Sjg devid_cache_cleanup(void) 6382797Sjg { 6392797Sjg nvp_devid_t *np, *next; 6402797Sjg list_t *listp; 6412797Sjg int is_dirty = 0; 6422797Sjg 6432797Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 6442797Sjg 6452797Sjg listp = nvf_list(dcfd_handle); 6462797Sjg for (np = list_head(listp); np; np = next) { 6472797Sjg next = list_next(listp, np); 6482797Sjg if (np->nvp_devid == NULL) 6492797Sjg continue; 6502797Sjg if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) { 6512797Sjg DEVID_LOG_REMOVE((CE_CONT, 6527009Scth "cleanup: %s\n", np->nvp_devpath)); 6532797Sjg if (!devid_cache_write_disable) { 6542797Sjg nvf_mark_dirty(dcfd_handle); 6552797Sjg is_dirty = 0; 6562797Sjg } 6572797Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 6582797Sjg } 6592797Sjg } 6602797Sjg 6612797Sjg rw_exit(nvf_lock(dcfd_handle)); 6622797Sjg 6632797Sjg if (is_dirty) 6642797Sjg nvf_wake_daemon(); 6652797Sjg } 6662797Sjg 6672797Sjg 6682797Sjg /* 6692797Sjg * Build a list of dev_t's for a device/devid 6702797Sjg * 6712797Sjg * The effect of this function is cumulative, adding dev_t's 6722797Sjg * for the device to the list of all dev_t's for a given 6732797Sjg * devid. 6742797Sjg */ 6752797Sjg static void 6762797Sjg e_devid_minor_to_devlist( 6772797Sjg dev_info_t *dip, 6782797Sjg char *minor_name, 6792797Sjg int ndevts_alloced, 6802797Sjg int *devtcntp, 6812797Sjg dev_t *devtsp) 6822797Sjg { 683*7224Scth int circ; 6842797Sjg struct ddi_minor_data *dmdp; 6852797Sjg int minor_all = 0; 6862797Sjg int ndevts = *devtcntp; 6872797Sjg 6882797Sjg ASSERT(i_ddi_devi_attached(dip)); 6892797Sjg 6902797Sjg /* are we looking for a set of minor nodes? */ 6912797Sjg if ((minor_name == DEVID_MINOR_NAME_ALL) || 6922797Sjg (minor_name == DEVID_MINOR_NAME_ALL_CHR) || 6932797Sjg (minor_name == DEVID_MINOR_NAME_ALL_BLK)) 6942797Sjg minor_all = 1; 6952797Sjg 6962797Sjg /* Find matching minor names */ 697*7224Scth ndi_devi_enter(dip, &circ); 6982797Sjg for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 6992797Sjg 7002797Sjg /* Skip non-minors, and non matching minor names */ 7012797Sjg if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) && 7022797Sjg strcmp(dmdp->ddm_name, minor_name))) 7032797Sjg continue; 7042797Sjg 7052797Sjg /* filter out minor_all mismatches */ 7062797Sjg if (minor_all && 7072797Sjg (((minor_name == DEVID_MINOR_NAME_ALL_CHR) && 7082797Sjg (dmdp->ddm_spec_type != S_IFCHR)) || 7092797Sjg ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && 7102797Sjg (dmdp->ddm_spec_type != S_IFBLK)))) 7112797Sjg continue; 7122797Sjg 7132797Sjg if (ndevts < ndevts_alloced) 7142797Sjg devtsp[ndevts] = dmdp->ddm_dev; 7152797Sjg ndevts++; 7162797Sjg } 717*7224Scth ndi_devi_exit(dip, circ); 7182797Sjg 7192797Sjg *devtcntp = ndevts; 7202797Sjg } 7212797Sjg 7222797Sjg /* 7232797Sjg * Search for cached entries matching a devid 7242797Sjg * Return two lists: 7252797Sjg * a list of dev_info nodes, for those devices in the attached state 7262797Sjg * a list of pathnames whose instances registered the given devid 7272797Sjg * If the lists passed in are not sufficient to return the matching 7282797Sjg * references, return the size of lists required. 7292797Sjg * The dev_info nodes are returned with a hold that the caller must release. 7302797Sjg */ 7312797Sjg static int 7322797Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax, 7332797Sjg int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths) 7342797Sjg { 7352797Sjg nvp_devid_t *np; 7362797Sjg int ndevis, npaths; 7372797Sjg dev_info_t *dip, *pdip; 7382797Sjg int circ; 7392797Sjg int maxdevis = 0; 7402797Sjg int maxpaths = 0; 7412797Sjg list_t *listp; 7422797Sjg 7432797Sjg ndevis = 0; 7442797Sjg npaths = 0; 7452797Sjg listp = nvf_list(dcfd_handle); 7462797Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 7472797Sjg if (np->nvp_devid == NULL) 7482797Sjg continue; 7492797Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 7502797Sjg DEVIDERR((CE_CONT, 7512797Sjg "find: invalid devid %s\n", 7522797Sjg np->nvp_devpath)); 7532797Sjg continue; 7542797Sjg } 7552797Sjg if (ddi_devid_compare(devid, np->nvp_devid) == 0) { 7562797Sjg DEVID_DEBUG2((CE_CONT, 7572797Sjg "find: devid match: %s 0x%x\n", 7582797Sjg np->nvp_devpath, np->nvp_flags)); 7592797Sjg DEVID_LOG_MATCH(("find", devid, np->nvp_devpath)); 7602797Sjg DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath)); 7612797Sjg 7622797Sjg /* 7632797Sjg * Check if we have a cached devinfo reference for this 7642797Sjg * devid. Place a hold on it to prevent detach 7652797Sjg * Otherwise, use the path instead. 7662797Sjg * Note: returns with a hold on each dev_info 7672797Sjg * node in the list. 7682797Sjg */ 7692797Sjg dip = NULL; 7702797Sjg if (np->nvp_flags & NVP_DEVID_DIP) { 7712797Sjg pdip = ddi_get_parent(np->nvp_dip); 7722797Sjg if (ndi_devi_tryenter(pdip, &circ)) { 7732797Sjg dip = np->nvp_dip; 7742797Sjg ndi_hold_devi(dip); 7752797Sjg ndi_devi_exit(pdip, circ); 7762797Sjg ASSERT(!DEVI_IS_ATTACHING(dip)); 7772797Sjg ASSERT(!DEVI_IS_DETACHING(dip)); 7782797Sjg } else { 7792797Sjg DEVID_LOG_DETACH((CE_CONT, 7802797Sjg "may be detaching: %s\n", 7812797Sjg np->nvp_devpath)); 7822797Sjg } 7832797Sjg } 7842797Sjg 7852797Sjg if (dip) { 7862797Sjg if (ndevis < retmax) { 7872797Sjg retdevis[ndevis++] = dip; 7882797Sjg } else { 7892797Sjg ndi_rele_devi(dip); 7902797Sjg } 7912797Sjg maxdevis++; 7922797Sjg } else { 7932797Sjg if (npaths < retmax) 7942797Sjg retpaths[npaths++] = np->nvp_devpath; 7952797Sjg maxpaths++; 7962797Sjg } 7972797Sjg } 7982797Sjg } 7992797Sjg 8002797Sjg *retndevis = ndevis; 8012797Sjg *retnpaths = npaths; 8022797Sjg return (maxdevis > maxpaths ? maxdevis : maxpaths); 8032797Sjg } 8042797Sjg 8052797Sjg 8062797Sjg /* 8072797Sjg * Search the devid cache, returning dev_t list for all 8082797Sjg * device paths mapping to the device identified by the 8092797Sjg * given devid. 8102797Sjg * 8112797Sjg * Primary interface used by ddi_lyr_devid_to_devlist() 8122797Sjg */ 8132797Sjg int 8142797Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name, 8152797Sjg int *retndevts, dev_t **retdevts) 8162797Sjg { 8172797Sjg char *path, **paths; 8182797Sjg int i, j, n; 8192797Sjg dev_t *devts, *udevts; 8202797Sjg dev_t tdevt; 8212797Sjg int ndevts, undevts, ndevts_alloced; 8222797Sjg dev_info_t *devi, **devis; 8232797Sjg int ndevis, npaths, nalloced; 8242797Sjg ddi_devid_t match_devid; 8252797Sjg 8262797Sjg DEVID_LOG_FIND(("find", devid, NULL)); 8272797Sjg 8282797Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 8292797Sjg if (ddi_devid_valid(devid) != DDI_SUCCESS) { 8302797Sjg DEVID_LOG_ERR(("invalid devid", devid, NULL)); 8312797Sjg return (DDI_FAILURE); 8322797Sjg } 8332797Sjg 8342797Sjg nalloced = 128; 8352797Sjg 8362797Sjg for (;;) { 8372797Sjg paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP); 8382797Sjg devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP); 8392797Sjg 8402797Sjg rw_enter(nvf_lock(dcfd_handle), RW_READER); 8412797Sjg n = e_devid_cache_devi_path_lists(devid, nalloced, 8427009Scth &ndevis, devis, &npaths, paths); 8432797Sjg if (n <= nalloced) 8442797Sjg break; 8452797Sjg rw_exit(nvf_lock(dcfd_handle)); 8462797Sjg for (i = 0; i < ndevis; i++) 8472797Sjg ndi_rele_devi(devis[i]); 8482797Sjg kmem_free(paths, nalloced * sizeof (char *)); 8492797Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 8502797Sjg nalloced = n + 128; 8512797Sjg } 8522797Sjg 8532797Sjg for (i = 0; i < npaths; i++) { 8542797Sjg path = i_ddi_strdup(paths[i], KM_SLEEP); 8552797Sjg paths[i] = path; 8562797Sjg } 8572797Sjg rw_exit(nvf_lock(dcfd_handle)); 8582797Sjg 8592797Sjg if (ndevis == 0 && npaths == 0) { 8602797Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 8612797Sjg kmem_free(paths, nalloced * sizeof (char *)); 8622797Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 8632797Sjg return (DDI_FAILURE); 8642797Sjg } 8652797Sjg 8662797Sjg ndevts_alloced = 128; 8672797Sjg restart: 8682797Sjg ndevts = 0; 8692797Sjg devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP); 8702797Sjg for (i = 0; i < ndevis; i++) { 8712797Sjg ASSERT(!DEVI_IS_ATTACHING(devis[i])); 8722797Sjg ASSERT(!DEVI_IS_DETACHING(devis[i])); 8732797Sjg e_devid_minor_to_devlist(devis[i], minor_name, 8747009Scth ndevts_alloced, &ndevts, devts); 8752797Sjg if (ndevts > ndevts_alloced) { 8762797Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 8772797Sjg ndevts_alloced += 128; 8782797Sjg goto restart; 8792797Sjg } 8802797Sjg } 8812797Sjg for (i = 0; i < npaths; i++) { 8822797Sjg DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i])); 8832797Sjg devi = e_ddi_hold_devi_by_path(paths[i], 0); 8842797Sjg if (devi == NULL) { 8852797Sjg DEVID_LOG_STALE(("stale device reference", 8862797Sjg devid, paths[i])); 8872797Sjg continue; 8882797Sjg } 8892797Sjg /* 8902797Sjg * Verify the newly attached device registered a matching devid 8912797Sjg */ 8922797Sjg if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi, 8932797Sjg &match_devid) != DDI_SUCCESS) { 8942797Sjg DEVIDERR((CE_CONT, 8952797Sjg "%s: no devid registered on attach\n", 8962797Sjg paths[i])); 8972797Sjg ddi_release_devi(devi); 8982797Sjg continue; 8992797Sjg } 9002797Sjg 9012797Sjg if (ddi_devid_compare(devid, match_devid) != 0) { 9022797Sjg DEVID_LOG_STALE(("new devid registered", 9032797Sjg devid, paths[i])); 9042797Sjg ddi_release_devi(devi); 9052797Sjg ddi_devid_free(match_devid); 9062797Sjg continue; 9072797Sjg } 9082797Sjg ddi_devid_free(match_devid); 9092797Sjg 9102797Sjg e_devid_minor_to_devlist(devi, minor_name, 9117009Scth ndevts_alloced, &ndevts, devts); 9122797Sjg ddi_release_devi(devi); 9132797Sjg if (ndevts > ndevts_alloced) { 9142797Sjg kmem_free(devts, 9152797Sjg ndevts_alloced * sizeof (dev_t)); 9162797Sjg ndevts_alloced += 128; 9172797Sjg goto restart; 9182797Sjg } 9192797Sjg } 9202797Sjg 9212797Sjg /* drop hold from e_devid_cache_devi_path_lists */ 9222797Sjg for (i = 0; i < ndevis; i++) { 9232797Sjg ndi_rele_devi(devis[i]); 9242797Sjg } 9252797Sjg for (i = 0; i < npaths; i++) { 9262797Sjg kmem_free(paths[i], strlen(paths[i]) + 1); 9272797Sjg } 9282797Sjg kmem_free(paths, nalloced * sizeof (char *)); 9292797Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 9302797Sjg 9312797Sjg if (ndevts == 0) { 9322797Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 9332797Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 9342797Sjg return (DDI_FAILURE); 9352797Sjg } 9362797Sjg 9372797Sjg /* 9382797Sjg * Build the final list of sorted dev_t's with duplicates collapsed so 9392797Sjg * returned results are consistent. This prevents implementation 9402797Sjg * artifacts from causing unnecessary changes in SVM namespace. 9412797Sjg */ 9422797Sjg /* bubble sort */ 9432797Sjg for (i = 0; i < (ndevts - 1); i++) { 9442797Sjg for (j = 0; j < ((ndevts - 1) - i); j++) { 9452797Sjg if (devts[j + 1] < devts[j]) { 9462797Sjg tdevt = devts[j]; 9472797Sjg devts[j] = devts[j + 1]; 9482797Sjg devts[j + 1] = tdevt; 9492797Sjg } 9502797Sjg } 9512797Sjg } 9522797Sjg 9532797Sjg /* determine number of unique values */ 9542797Sjg for (undevts = ndevts, i = 1; i < ndevts; i++) { 9552797Sjg if (devts[i - 1] == devts[i]) 9562797Sjg undevts--; 9572797Sjg } 9582797Sjg 9592797Sjg /* allocate unique */ 9602797Sjg udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP); 9612797Sjg 9622797Sjg /* copy unique */ 9632797Sjg udevts[0] = devts[0]; 9642797Sjg for (i = 1, j = 1; i < ndevts; i++) { 9652797Sjg if (devts[i - 1] != devts[i]) 9662797Sjg udevts[j++] = devts[i]; 9672797Sjg } 9682797Sjg ASSERT(j == undevts); 9692797Sjg 9702797Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 9712797Sjg 9722797Sjg *retndevts = undevts; 9732797Sjg *retdevts = udevts; 9742797Sjg 9752797Sjg return (DDI_SUCCESS); 9762797Sjg } 9772797Sjg 9782797Sjg void 9792797Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list) 9802797Sjg { 9812797Sjg kmem_free(devt_list, ndevts * sizeof (dev_t *)); 9822797Sjg } 9832797Sjg 9842797Sjg #ifdef DEBUG 9852797Sjg static void 9862797Sjg devid_log(char *fmt, ddi_devid_t devid, char *path) 9872797Sjg { 9882797Sjg char *devidstr = ddi_devid_str_encode(devid, NULL); 9892797Sjg if (path) { 9902797Sjg cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr); 9912797Sjg } else { 9922797Sjg cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr); 9932797Sjg } 9942797Sjg ddi_devid_str_free(devidstr); 9952797Sjg } 9962797Sjg #endif /* DEBUG */ 997