xref: /onnv-gate/usr/src/uts/sun4v/io/n2rng/n2rng.c (revision 12929:f2051cc42292)
14625Sgm89044 /*
24625Sgm89044  * CDDL HEADER START
34625Sgm89044  *
44625Sgm89044  * The contents of this file are subject to the terms of the
54625Sgm89044  * Common Development and Distribution License (the "License").
64625Sgm89044  * You may not use this file except in compliance with the License.
74625Sgm89044  *
84625Sgm89044  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
94625Sgm89044  * or http://www.opensolaris.org/os/licensing.
104625Sgm89044  * See the License for the specific language governing permissions
114625Sgm89044  * and limitations under the License.
124625Sgm89044  *
134625Sgm89044  * When distributing Covered Code, include this CDDL HEADER in each
144625Sgm89044  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
154625Sgm89044  * If applicable, add the following below this CDDL HEADER, with the
164625Sgm89044  * fields enclosed by brackets "[]" replaced with your own identifying
174625Sgm89044  * information: Portions Copyright [yyyy] [name of copyright owner]
184625Sgm89044  *
194625Sgm89044  * CDDL HEADER END
204625Sgm89044  */
214625Sgm89044 /*
22*12929SMisaki.Miyashita@Oracle.COM  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
234625Sgm89044  */
244625Sgm89044 
254625Sgm89044 
264625Sgm89044 /*
274625Sgm89044  * Niagara 2 Random Number Generator (RNG) driver
284625Sgm89044  */
294625Sgm89044 
304625Sgm89044 #include <sys/types.h>
314625Sgm89044 #include <sys/sysmacros.h>
324625Sgm89044 #include <sys/modctl.h>
334625Sgm89044 #include <sys/conf.h>
344625Sgm89044 #include <sys/devops.h>
354625Sgm89044 #include <sys/cmn_err.h>
364625Sgm89044 #include <sys/ksynch.h>
374625Sgm89044 #include <sys/kmem.h>
384625Sgm89044 #include <sys/stat.h>
394625Sgm89044 #include <sys/open.h>
404625Sgm89044 #include <sys/file.h>
414625Sgm89044 #include <sys/ddi.h>
424625Sgm89044 #include <sys/sunddi.h>
434625Sgm89044 #include <sys/param.h>
444625Sgm89044 #include <sys/cpuvar.h>
454625Sgm89044 #include <sys/disp.h>
464625Sgm89044 #include <sys/hsvc.h>
474625Sgm89044 #include <sys/machsystm.h>
484625Sgm89044 #include <sys/hypervisor_api.h>
494625Sgm89044 #include <sys/n2rng.h>
50*12929SMisaki.Miyashita@Oracle.COM #include <fips/fips_checksum.h>
514625Sgm89044 
524625Sgm89044 static int	n2rng_attach(dev_info_t *, ddi_attach_cmd_t);
534625Sgm89044 static int	n2rng_detach(dev_info_t *, ddi_detach_cmd_t);
544625Sgm89044 static int	n2rng_suspend(n2rng_t *);
554625Sgm89044 static int	n2rng_resume(n2rng_t *);
564625Sgm89044 static uint64_t sticks_per_usec(void);
575650Stwelke u_longlong_t	gettick(void);
585650Stwelke static int	n2rng_init_ctl(n2rng_t *);
595650Stwelke static void	n2rng_uninit_ctl(n2rng_t *);
605650Stwelke static int	n2rng_config(n2rng_t *);
615650Stwelke static void	n2rng_config_task(void * targ);
624732Sdavemq 
634625Sgm89044 /*
644625Sgm89044  * Device operations.
654625Sgm89044  */
664625Sgm89044 
674625Sgm89044 static struct dev_ops devops = {
684625Sgm89044 	DEVO_REV,		/* devo_rev */
694625Sgm89044 	0,			/* devo_refcnt */
704625Sgm89044 	nodev,			/* devo_getinfo */
714625Sgm89044 	nulldev,		/* devo_identify */
724625Sgm89044 	nulldev,		/* devo_probe */
734625Sgm89044 	n2rng_attach,		/* devo_attach */
744625Sgm89044 	n2rng_detach,		/* devo_detach */
754625Sgm89044 	nodev,			/* devo_reset */
764625Sgm89044 	NULL,			/* devo_cb_ops */
774625Sgm89044 	NULL,			/* devo_bus_ops */
787656SSherry.Moore@Sun.COM 	ddi_power,		/* devo_power */
797656SSherry.Moore@Sun.COM 	ddi_quiesce_not_supported,	/* devo_quiesce */
804625Sgm89044 };
814625Sgm89044 
824625Sgm89044 /*
834625Sgm89044  * Module linkage.
844625Sgm89044  */
854625Sgm89044 static struct modldrv modldrv = {
864625Sgm89044 	&mod_driverops,			/* drv_modops */
876893Sgm89044 	"N2 RNG Driver",		/* drv_linkinfo */
884625Sgm89044 	&devops,			/* drv_dev_ops */
894625Sgm89044 };
904625Sgm89044 
914625Sgm89044 static struct modlinkage modlinkage = {
924625Sgm89044 	MODREV_1,			/* ml_rev */
934625Sgm89044 	&modldrv,			/* ml_linkage */
944625Sgm89044 	NULL
954625Sgm89044 };
964625Sgm89044 
974625Sgm89044 /*
984625Sgm89044  * Driver globals Soft state.
994625Sgm89044  */
1004625Sgm89044 static void	*n2rng_softstate = NULL;
1014625Sgm89044 
1024625Sgm89044 /*
1035650Stwelke  * Hypervisor NCS services information.
1044625Sgm89044  */
1055650Stwelke static boolean_t ncs_hsvc_available = B_FALSE;
1065650Stwelke 
1075650Stwelke #define	NVERSIONS	2
1084625Sgm89044 
1095650Stwelke /*
1105650Stwelke  * HV API versions supported by this driver.
1115650Stwelke  */
1125650Stwelke static hsvc_info_t ncs_hsvc[NVERSIONS] = {
1135650Stwelke 	{ HSVC_REV_1, NULL, HSVC_GROUP_RNG, 2, 0, DRIVER },	/* v2.0 */
1145650Stwelke 	{ HSVC_REV_1, NULL, HSVC_GROUP_RNG, 1, 0, DRIVER },	/* v1.0 */
1154625Sgm89044 };
1165650Stwelke int	ncs_version_index;	/* index into ncs_hsvc[] */
1174625Sgm89044 
1184625Sgm89044 /*
1194625Sgm89044  * DDI entry points.
1204625Sgm89044  */
1214625Sgm89044 int
_init(void)1224625Sgm89044 _init(void)
1234625Sgm89044 {
1244625Sgm89044 	int	rv;
1254625Sgm89044 
1264625Sgm89044 	rv = ddi_soft_state_init(&n2rng_softstate, sizeof (n2rng_t), 1);
1274625Sgm89044 	if (rv != 0) {
1284625Sgm89044 		/* this should *never* happen! */
1294625Sgm89044 		return (rv);
1304625Sgm89044 	}
1314625Sgm89044 
1324625Sgm89044 	if ((rv = mod_install(&modlinkage)) != 0) {
1334625Sgm89044 		/* cleanup here */
1344625Sgm89044 		ddi_soft_state_fini(&n2rng_softstate);
1354625Sgm89044 		return (rv);
1364625Sgm89044 	}
1374625Sgm89044 
1384625Sgm89044 	return (0);
1394625Sgm89044 }
1404625Sgm89044 
1414625Sgm89044 int
_fini(void)1424625Sgm89044 _fini(void)
1434625Sgm89044 {
1444625Sgm89044 	int	rv;
1454625Sgm89044 
1464625Sgm89044 	rv = mod_remove(&modlinkage);
1474625Sgm89044 	if (rv == 0) {
1484625Sgm89044 		/* cleanup here */
1494625Sgm89044 		ddi_soft_state_fini(&n2rng_softstate);
1504625Sgm89044 	}
1514625Sgm89044 
1524625Sgm89044 	return (rv);
1534625Sgm89044 }
1544625Sgm89044 
1554625Sgm89044 int
_info(struct modinfo * modinfop)1564625Sgm89044 _info(struct modinfo *modinfop)
1574625Sgm89044 {
1584625Sgm89044 	return (mod_info(&modlinkage, modinfop));
1594625Sgm89044 }
1604625Sgm89044 
1614625Sgm89044 static int
n2rng_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1624625Sgm89044 n2rng_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1634625Sgm89044 {
1644625Sgm89044 	n2rng_t		*n2rng = NULL;
1654625Sgm89044 	int		instance;
1664625Sgm89044 	int		rv;
1675650Stwelke 	int		version;
1685650Stwelke 	uint64_t	ncs_minor_ver;
1694625Sgm89044 
1704625Sgm89044 	instance = ddi_get_instance(dip);
1715650Stwelke 	DBG1(NULL, DENTRY, "n2rng_attach called, instance %d", instance);
1724625Sgm89044 	/*
1734625Sgm89044 	 * Only instance 0 of n2rng driver is allowed.
1744625Sgm89044 	 */
1754625Sgm89044 	if (instance != 0) {
1764625Sgm89044 		n2rng_diperror(dip, "only one instance (0) allowed");
1774625Sgm89044 		return (DDI_FAILURE);
1784625Sgm89044 	}
1794625Sgm89044 
1804625Sgm89044 	switch (cmd) {
1814625Sgm89044 	case DDI_RESUME:
1824625Sgm89044 		n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate,
1834625Sgm89044 		    instance);
1844625Sgm89044 		if (n2rng == NULL) {
1854625Sgm89044 			n2rng_diperror(dip, "no soft state in attach");
1864625Sgm89044 			return (DDI_FAILURE);
1874625Sgm89044 		}
1884625Sgm89044 		return (n2rng_resume(n2rng));
1894625Sgm89044 
1904625Sgm89044 	case DDI_ATTACH:
1914625Sgm89044 		break;
1924625Sgm89044 	default:
1934625Sgm89044 		return (DDI_FAILURE);
1944625Sgm89044 	}
1954625Sgm89044 
1964625Sgm89044 	rv = ddi_soft_state_zalloc(n2rng_softstate, instance);
1974625Sgm89044 	if (rv != DDI_SUCCESS) {
1984625Sgm89044 		n2rng_diperror(dip, "unable to allocate soft state");
1994625Sgm89044 		return (DDI_FAILURE);
2004625Sgm89044 	}
2014625Sgm89044 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
2024625Sgm89044 	ASSERT(n2rng != NULL);
2034625Sgm89044 	n2rng->n_dip = dip;
2044625Sgm89044 
2055650Stwelke 	mutex_init(&n2rng->n_lock, NULL, MUTEX_DRIVER, NULL);
2065650Stwelke 	n2rng->n_flags = 0;
2075650Stwelke 	n2rng->n_timeout_id = 0;
2085650Stwelke 	n2rng->n_sticks_per_usec = sticks_per_usec();
2095650Stwelke 
2105650Stwelke 	/* Determine binding type */
2115650Stwelke 	n2rng->n_binding_name = ddi_binding_name(dip);
2125650Stwelke 	if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_N2,
2135650Stwelke 	    strlen(N2RNG_BINDNAME_N2)) == 0) {
2145650Stwelke 		/*
2155650Stwelke 		 * Niagara 2
2165650Stwelke 		 */
2175650Stwelke 		n2rng->n_binding = N2RNG_CPU_N2;
2185650Stwelke 	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_VF,
2195650Stwelke 	    strlen(N2RNG_BINDNAME_VF)) == 0) {
2205650Stwelke 		/*
2215650Stwelke 		 * Victoria Falls
2225650Stwelke 		 */
2235650Stwelke 		n2rng->n_binding = N2RNG_CPU_VF;
22411304SJanie.Lu@Sun.COM 	} else if (strncmp(n2rng->n_binding_name, N2RNG_BINDNAME_KT,
22511304SJanie.Lu@Sun.COM 	    strlen(N2RNG_BINDNAME_KT)) == 0) {
22611304SJanie.Lu@Sun.COM 		/*
22711304SJanie.Lu@Sun.COM 		 * Rainbow Falls
22811304SJanie.Lu@Sun.COM 		 */
22911304SJanie.Lu@Sun.COM 		n2rng->n_binding = N2RNG_CPU_KT;
2305650Stwelke 	} else {
2315650Stwelke 		n2rng_diperror(dip,
2325650Stwelke 		    "unable to determine n2rng (cpu) binding (%s)",
2335650Stwelke 		    n2rng->n_binding_name);
2345650Stwelke 		goto errorexit;
2355650Stwelke 	}
2365650Stwelke 	DBG1(n2rng, DCHATTY, "n2rng_attach: n2rng->n_binding_name = %s",
2375650Stwelke 	    n2rng->n_binding_name);
2385650Stwelke 
2395650Stwelke 	/* Negotiate HV api version number */
2405650Stwelke 	for (version = 0; version < NVERSIONS; version++) {
2415650Stwelke 		rv = hsvc_register(&ncs_hsvc[version], &ncs_minor_ver);
2425650Stwelke 		if (rv == 0)
2435650Stwelke 			break;
2444625Sgm89044 
2455650Stwelke 		DBG4(n2rng, DCHATTY, "n2rng_attach: grp: 0x%lx, maj: %ld, "
2465650Stwelke 		    "min: %ld, errno: %d", ncs_hsvc[version].hsvc_group,
2475650Stwelke 		    ncs_hsvc[version].hsvc_major,
2485650Stwelke 		    ncs_hsvc[version].hsvc_minor, rv);
2495650Stwelke 	}
2505650Stwelke 	if (version == NVERSIONS) {
2515650Stwelke 		for (version = 0; version < NVERSIONS; version++) {
2525650Stwelke 			cmn_err(CE_WARN,
2535650Stwelke 			    "%s: cannot negotiate hypervisor services "
2545650Stwelke 			    "group: 0x%lx major: %ld minor: %ld errno: %d",
2555650Stwelke 			    ncs_hsvc[version].hsvc_modname,
2565650Stwelke 			    ncs_hsvc[version].hsvc_group,
2575650Stwelke 			    ncs_hsvc[version].hsvc_major,
2585650Stwelke 			    ncs_hsvc[version].hsvc_minor, rv);
2595650Stwelke 		}
2605650Stwelke 		goto errorexit;
2614625Sgm89044 	}
2625650Stwelke 	ncs_version_index = version;
2635650Stwelke 	ncs_hsvc_available = B_TRUE;
2645650Stwelke 	DBG2(n2rng, DATTACH, "n2rng_attach: ncs api version (%ld.%ld)",
2655650Stwelke 	    ncs_hsvc[ncs_version_index].hsvc_major, ncs_minor_ver);
2665650Stwelke 	n2rng->n_hvapi_major_version = ncs_hsvc[ncs_version_index].hsvc_major;
2675650Stwelke 	n2rng->n_hvapi_minor_version = (uint_t)ncs_minor_ver;
2685650Stwelke 
2695650Stwelke 	/*
2705650Stwelke 	 * Verify that we are running version 2.0 or later api on multiple
2715650Stwelke 	 * rng systems.
2725650Stwelke 	 */
2735650Stwelke 	if ((n2rng->n_binding != N2RNG_CPU_N2) &&
2745650Stwelke 	    (n2rng->n_hvapi_major_version < 2)) {
2755650Stwelke 		cmn_err(CE_NOTE, "n2rng: Incompatible hyperviser api "
2765650Stwelke 		    "version %d.%d detected", n2rng->n_hvapi_major_version,
2775650Stwelke 		    n2rng->n_hvapi_minor_version);
2785650Stwelke 	}
2795650Stwelke 
2805650Stwelke 	/* Initialize ctl structure if runnning in the control domain */
2815650Stwelke 	if (n2rng_init_ctl(n2rng) != DDI_SUCCESS) {
2825650Stwelke 		cmn_err(CE_WARN, "n2rng: unable to initialize rng "
2835650Stwelke 		    "control structures");
2845650Stwelke 		goto errorexit;
2855650Stwelke 	}
2864732Sdavemq 
2874732Sdavemq 	/* Allocate single thread task queue for rng diags and registration */
2884732Sdavemq 	n2rng->n_taskq = ddi_taskq_create(dip, "n2rng_taskq", 1,
2894732Sdavemq 	    TASKQ_DEFAULTPRI, 0);
2904732Sdavemq 
2914732Sdavemq 	if (n2rng->n_taskq == NULL) {
2924732Sdavemq 		n2rng_diperror(dip, "ddi_taskq_create() failed");
2934732Sdavemq 		goto errorexit;
2944732Sdavemq 	}
2954732Sdavemq 
2964732Sdavemq 	/* Dispatch task to configure the RNG and register with KCF */
2974732Sdavemq 	if (ddi_taskq_dispatch(n2rng->n_taskq, n2rng_config_task,
2984732Sdavemq 	    (void *)n2rng, DDI_SLEEP) != DDI_SUCCESS) {
2994732Sdavemq 		n2rng_diperror(dip, "ddi_taskq_dispatch() failed");
3004625Sgm89044 		goto errorexit;
3014625Sgm89044 	}
3024625Sgm89044 
303*12929SMisaki.Miyashita@Oracle.COM 	if (n2rng->n_is_fips == B_TRUE) {
304*12929SMisaki.Miyashita@Oracle.COM 		/*
305*12929SMisaki.Miyashita@Oracle.COM 		 * FIPs Post test: Feed the known seed and make sure it
306*12929SMisaki.Miyashita@Oracle.COM 		 * produces the known random number.
307*12929SMisaki.Miyashita@Oracle.COM 		 */
308*12929SMisaki.Miyashita@Oracle.COM 		if (n2rng_fips_rng_post() != CRYPTO_SUCCESS) {
309*12929SMisaki.Miyashita@Oracle.COM 			n2rng_diperror(dip, "n2rng: FIPs POST test failed\n");
310*12929SMisaki.Miyashita@Oracle.COM 			goto errorexit;
311*12929SMisaki.Miyashita@Oracle.COM 		}
312*12929SMisaki.Miyashita@Oracle.COM 	}
313*12929SMisaki.Miyashita@Oracle.COM 
3144625Sgm89044 	return (DDI_SUCCESS);
3154625Sgm89044 
3164625Sgm89044 errorexit:
3175650Stwelke 	/* Wait for pending config tasks to complete and delete the taskq */
3184732Sdavemq 	if (n2rng->n_taskq != NULL) {
3194732Sdavemq 		ddi_taskq_destroy(n2rng->n_taskq);
3204732Sdavemq 		n2rng->n_taskq = NULL;
3214732Sdavemq 	}
3224732Sdavemq 
3235650Stwelke 	n2rng_uninit_ctl(n2rng);
3245650Stwelke 
3255650Stwelke 	(void) n2rng_uninit(n2rng);
3265650Stwelke 
3275650Stwelke 	if (ncs_hsvc_available == B_TRUE) {
3285650Stwelke 		(void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
3295650Stwelke 		ncs_hsvc_available = B_FALSE;
3305650Stwelke 	}
3315650Stwelke 
3325650Stwelke 	mutex_destroy(&n2rng->n_lock);
3334625Sgm89044 	ddi_soft_state_free(n2rng_softstate, instance);
3344625Sgm89044 
3354625Sgm89044 	return (DDI_FAILURE);
3364625Sgm89044 }
3374625Sgm89044 
3384625Sgm89044 static int
n2rng_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)3394625Sgm89044 n2rng_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3404625Sgm89044 {
3414625Sgm89044 	int		instance;
3424625Sgm89044 	int		rv;
3434625Sgm89044 	n2rng_t		*n2rng;
3445650Stwelke 	timeout_id_t	tid;
3454625Sgm89044 
3464625Sgm89044 	instance = ddi_get_instance(dip);
3474625Sgm89044 	n2rng = (n2rng_t *)ddi_get_soft_state(n2rng_softstate, instance);
3484625Sgm89044 	if (n2rng == NULL) {
3494625Sgm89044 		n2rng_diperror(dip, "no soft state in detach");
3504625Sgm89044 		return (DDI_FAILURE);
3514625Sgm89044 	}
3524625Sgm89044 
3534625Sgm89044 	switch (cmd) {
3544625Sgm89044 	case DDI_SUSPEND:
3554625Sgm89044 		return (n2rng_suspend(n2rng));
3564625Sgm89044 	case DDI_DETACH:
3574625Sgm89044 		break;
3584625Sgm89044 	default:
3594625Sgm89044 		return (DDI_FAILURE);
3604625Sgm89044 	}
3614625Sgm89044 
3625268Stwelke 	/* Destroy task queue first to insure configuration has completed */
3634732Sdavemq 	if (n2rng->n_taskq != NULL) {
3644732Sdavemq 		ddi_taskq_destroy(n2rng->n_taskq);
3654732Sdavemq 		n2rng->n_taskq = NULL;
3664732Sdavemq 	}
3674732Sdavemq 
3685650Stwelke 	/* Untimeout pending config retry operations */
3695650Stwelke 	mutex_enter(&n2rng->n_lock);
3705650Stwelke 	tid = n2rng->n_timeout_id;
3715650Stwelke 	n2rng->n_timeout_id = 0;
3725650Stwelke 	mutex_exit(&n2rng->n_lock);
3735650Stwelke 	if (tid) {
3745650Stwelke 		DBG1(n2rng, DCHATTY, "n2rng_detach: untimeout pending retry "
3755650Stwelke 		    "id = %x", tid);
3765650Stwelke 		(void) untimeout(tid);
3775650Stwelke 	}
3785650Stwelke 
3795650Stwelke 	n2rng_uninit_ctl(n2rng);
3805650Stwelke 
3815268Stwelke 	/* unregister with KCF---also tears down FIPS state */
3825268Stwelke 	rv = n2rng_uninit(n2rng) ? DDI_FAILURE : DDI_SUCCESS;
3835268Stwelke 
3845650Stwelke 	if (ncs_hsvc_available == B_TRUE) {
3855650Stwelke 		(void) hsvc_unregister(&ncs_hsvc[ncs_version_index]);
3865650Stwelke 		ncs_hsvc_available = B_FALSE;
3874625Sgm89044 	}
3884625Sgm89044 
3895650Stwelke 	mutex_destroy(&n2rng->n_lock);
3904625Sgm89044 	ddi_soft_state_free(n2rng_softstate, instance);
3914625Sgm89044 
3924625Sgm89044 	return (rv);
3934625Sgm89044 }
3944625Sgm89044 
3954625Sgm89044 /*ARGSUSED*/
3964625Sgm89044 static int
n2rng_suspend(n2rng_t * n2rng)3974625Sgm89044 n2rng_suspend(n2rng_t *n2rng)
3984625Sgm89044 {
3995650Stwelke 	/* unregister with KCF---also tears down FIPS state */
4005650Stwelke 	if (n2rng_uninit(n2rng) != DDI_SUCCESS) {
4015650Stwelke 		cmn_err(CE_WARN, "n2rng: unable to unregister from KCF");
4025650Stwelke 		return (DDI_FAILURE);
4035650Stwelke 	}
4045650Stwelke 
4054625Sgm89044 	return (DDI_SUCCESS);
4064625Sgm89044 }
4074625Sgm89044 
4084625Sgm89044 /*ARGSUSED*/
4094625Sgm89044 static int
n2rng_resume(n2rng_t * n2rng)4104625Sgm89044 n2rng_resume(n2rng_t *n2rng)
4114625Sgm89044 {
4125650Stwelke 	/* Assume clock is same speed and all data structures are intact */
4134625Sgm89044 
4145650Stwelke 	/* Re-configure the RNG hardware and register with KCF */
4155650Stwelke 	return (n2rng_config(n2rng));
4164625Sgm89044 }
4174625Sgm89044 
4184625Sgm89044 /*
4194625Sgm89044  * Map hypervisor error code to solaris. Only
4204625Sgm89044  * H_ENORADDR, H_EBADALIGN, H_EWOULDBLOCK, and EIO
4214625Sgm89044  * are meaningful to this device. Any other error
4224625Sgm89044  * codes are mapped EINVAL.
4234625Sgm89044  */
4244625Sgm89044 int
n2rng_herr2kerr(uint64_t hv_errcode)4254625Sgm89044 n2rng_herr2kerr(uint64_t hv_errcode)
4264625Sgm89044 {
4274625Sgm89044 	int	s_errcode;
4284625Sgm89044 
4294625Sgm89044 	switch (hv_errcode) {
4304625Sgm89044 	case H_EWOULDBLOCK:
4314625Sgm89044 		s_errcode = EWOULDBLOCK;
4324625Sgm89044 		break;
4334625Sgm89044 	case H_EIO:
4344625Sgm89044 		s_errcode = EIO;
4354625Sgm89044 		break;
4365650Stwelke 	case H_EBUSY:
4375650Stwelke 		s_errcode = EBUSY;
4385650Stwelke 		break;
4394625Sgm89044 	case H_EOK:
4404625Sgm89044 		s_errcode = 0;
4414625Sgm89044 		break;
4424625Sgm89044 	case H_ENOACCESS:
4434625Sgm89044 		s_errcode = EPERM;
4444625Sgm89044 		break;
4455650Stwelke 	case H_ENORADDR:
4465650Stwelke 	case H_EBADALIGN:
4474625Sgm89044 	default:
4484625Sgm89044 		s_errcode = EINVAL;
4494625Sgm89044 		break;
4504625Sgm89044 	}
4514625Sgm89044 	return (s_errcode);
4524625Sgm89044 }
4534625Sgm89044 
4544625Sgm89044 /*
4554625Sgm89044  * Waits approximately delay_sticks counts of the stick register.
4564625Sgm89044  * Times shorter than one sys clock tick (10ms on most systems) are
4574625Sgm89044  * done by busy waiting.
4584625Sgm89044  */
4594625Sgm89044 void
cyclesleep(n2rng_t * n2rng,uint64_t delay_sticks)4604625Sgm89044 cyclesleep(n2rng_t *n2rng, uint64_t delay_sticks)
4614625Sgm89044 {
4624625Sgm89044 	uint64_t	end_stick = gettick() + delay_sticks;
4634625Sgm89044 	int64_t		sticks_to_wait;
4644625Sgm89044 	clock_t		sys_ticks_to_wait;
4654625Sgm89044 	clock_t		usecs_to_wait;
4664625Sgm89044 
4674625Sgm89044 	/*CONSTCOND*/
4684625Sgm89044 	while (1) {
4694625Sgm89044 		sticks_to_wait = end_stick - gettick();
4704625Sgm89044 		if (sticks_to_wait <= 0) {
4714625Sgm89044 			return;
4724625Sgm89044 		}
4734625Sgm89044 
4744625Sgm89044 		usecs_to_wait = sticks_to_wait / n2rng->n_sticks_per_usec;
4754625Sgm89044 		sys_ticks_to_wait = drv_usectohz(usecs_to_wait);
4764625Sgm89044 
4774625Sgm89044 		if (sys_ticks_to_wait > 0) {
4784625Sgm89044 			/* sleep */
4794625Sgm89044 			delay(sys_ticks_to_wait);
4804625Sgm89044 		} else if (usecs_to_wait > 0) {
4814625Sgm89044 			/* busy wait */
4824625Sgm89044 			drv_usecwait(usecs_to_wait);
4834625Sgm89044 		}
4844625Sgm89044 	}
4854625Sgm89044 }
4864625Sgm89044 
4874625Sgm89044 static void
log_internal_errors(uint64_t hverr,char * fname)4884625Sgm89044 log_internal_errors(uint64_t hverr, char *fname)
4894625Sgm89044 {
4904625Sgm89044 	switch (hverr) {
4914625Sgm89044 	case H_EBADALIGN:
4924625Sgm89044 		cmn_err(CE_WARN,
4934625Sgm89044 		    "n2rng: internal alignment "
4944625Sgm89044 		    "problem");
4954625Sgm89044 		break;
4964625Sgm89044 	case H_ENORADDR:
4974625Sgm89044 		cmn_err(CE_WARN, "n2rng: internal "
4984625Sgm89044 		    "invalid address");
4994625Sgm89044 		break;
5005650Stwelke 	case H_ENOACCESS:
5015650Stwelke 		cmn_err(CE_WARN, "n2rng: access failure");
5025650Stwelke 		break;
5035650Stwelke 	case H_EWOULDBLOCK:
5045650Stwelke 		cmn_err(CE_WARN, "n2rng: hardware busy");
5055650Stwelke 		break;
5064625Sgm89044 	default:
5074625Sgm89044 		cmn_err(CE_NOTE,
5084625Sgm89044 		    "n2rng: %s "
5094625Sgm89044 		    "unexpectedly "
5104625Sgm89044 		    "returned hverr %ld", fname, hverr);
5114625Sgm89044 		break;
5124625Sgm89044 	}
5134625Sgm89044 }
5144625Sgm89044 
5154625Sgm89044 /*
5164625Sgm89044  * Collects a buffer full of bits, using the specified setup. numbytes
5174625Sgm89044  * must be a multiple of 8. If a sub-operation fails with EIO (handle
5184625Sgm89044  * mismatch), returns EIO.  If collect_setupp is NULL, the current
5194625Sgm89044  * setup is used.  If exit_setupp is NULL, the control configuratin
5204625Sgm89044  * and state are not set at exit.  WARNING: the buffer must be 8-byte
5214625Sgm89044  * aligned and in contiguous physical addresses.  Contiguousness is
5224625Sgm89044  * not checked!
5234625Sgm89044  */
5244625Sgm89044 int
n2rng_collect_diag_bits(n2rng_t * n2rng,int rngid,n2rng_setup_t * collect_setupp,void * buffer,int numbytes,n2rng_setup_t * exit_setupp,uint64_t exitstate)5255650Stwelke n2rng_collect_diag_bits(n2rng_t *n2rng, int rngid,
5265650Stwelke     n2rng_setup_t *collect_setupp, void *buffer, int numbytes,
5275650Stwelke     n2rng_setup_t *exit_setupp, uint64_t exitstate)
5284625Sgm89044 {
5294625Sgm89044 	int		rv;
5304625Sgm89044 	int		override_rv = 0;
5314625Sgm89044 	uint64_t	hverr;
5324625Sgm89044 	int		i;
5334625Sgm89044 	uint64_t	tdelta;
5344625Sgm89044 	n2rng_setup_t	setupbuffer[2];
5354625Sgm89044 	n2rng_setup_t	*setupcontigp;
5364625Sgm89044 	uint64_t	setupphys;
5374625Sgm89044 	int		numchunks;
5384625Sgm89044 	boolean_t	rnglooping;
5395650Stwelke 	int		busycount = 0;
5405650Stwelke 	int		blockcount = 0;
5414625Sgm89044 
5424625Sgm89044 	if (numbytes % sizeof (uint64_t)) {
5434625Sgm89044 		return (EINVAL);
5444625Sgm89044 	}
5454625Sgm89044 
5464625Sgm89044 	if ((uint64_t)buffer % sizeof (uint64_t) != 0) {
5474625Sgm89044 		return (EINVAL);
5484625Sgm89044 	}
5494625Sgm89044 
5504625Sgm89044 	numchunks = ((numbytes / sizeof (uint64_t)) + RNG_DIAG_CHUNK_SIZE - 1)
5514625Sgm89044 	    / RNG_DIAG_CHUNK_SIZE;
5524625Sgm89044 	/*
5534625Sgm89044 	 * Use setupbuffer[0] if it is contiguous, otherwise
5544625Sgm89044 	 * setupbuffer[1].
5554625Sgm89044 	 */
5564625Sgm89044 	setupcontigp = &setupbuffer[
5574625Sgm89044 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
5584625Sgm89044 	setupphys = va_to_pa(setupcontigp);
5594625Sgm89044 
5604625Sgm89044 	/*
5614625Sgm89044 	 * If a non-null collect_setupp pointer has been provided,
5624625Sgm89044 	 * push the specified setup into the hardware.
5634625Sgm89044 	 */
5644625Sgm89044 	if (collect_setupp != NULL) {
5654625Sgm89044 		/* copy the specified state to the aligned buffer */
5664625Sgm89044 		*setupcontigp = *collect_setupp;
5674625Sgm89044 		rnglooping = B_TRUE;
5684625Sgm89044 		while (rnglooping) {
5695650Stwelke 			hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
5704625Sgm89044 			    CTL_STATE_HEALTHCHECK,
5715650Stwelke 			    n2rng->n_ctl_data->n_watchdog_cycles, &tdelta);
5724625Sgm89044 			rv = n2rng_herr2kerr(hverr);
5734625Sgm89044 			switch (hverr) {
5745650Stwelke 			case H_EOK:
5754625Sgm89044 				rnglooping = B_FALSE;
5764625Sgm89044 				break;
5774625Sgm89044 			case H_EIO: /* control yanked from us */
5784625Sgm89044 			case H_ENOACCESS: /* We are not control domain */
5794625Sgm89044 				return (rv);
5804625Sgm89044 			case H_EWOULDBLOCK:
5815650Stwelke 				/* Data currently not available, try again */
5825650Stwelke 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
5835650Stwelke 					DBG1(n2rng, DHEALTH,
5845650Stwelke 					    "n2rng_collect_diag_bits(1) : "
5855650Stwelke 					    "exceeded block count of %d",
5865650Stwelke 					    RNG_MAX_BLOCK_ATTEMPTS);
5875650Stwelke 					return (rv);
5885650Stwelke 				} else {
5895650Stwelke 					cyclesleep(n2rng, tdelta);
5905650Stwelke 				}
5915650Stwelke 				break;
5925650Stwelke 			case H_EBUSY:
5935650Stwelke 				/*
5945650Stwelke 				 * A control write is already in progress.
5955650Stwelke 				 * Note: This shouldn't happen since
5965650Stwelke 				 * n2rng_ctl_write() waits for the
5975650Stwelke 				 * write to complete.
5985650Stwelke 				 */
5995650Stwelke 				if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
6005650Stwelke 					DBG1(n2rng, DHEALTH,
6015650Stwelke 					    "n2rng_collect_diag_bits(1): "
6025650Stwelke 					    "exceeded busy count of %d",
6035650Stwelke 					    RNG_MAX_BUSY_ATTEMPTS);
6045650Stwelke 					return (rv);
6055650Stwelke 				} else {
6065650Stwelke 					delay(RNG_RETRY_BUSY_DELAY);
6075650Stwelke 				}
6084625Sgm89044 				break;
6094625Sgm89044 			default:
6104625Sgm89044 				log_internal_errors(hverr, "hv_rng_ctl_write");
6114625Sgm89044 				override_rv = rv;
6124625Sgm89044 				goto restore_state;
6134625Sgm89044 			}
6144625Sgm89044 		} /* while (rnglooping) */
6154625Sgm89044 	} /* if (collect_setupp != NULL) */
6164625Sgm89044 
6174625Sgm89044 	/* If the caller asks for some bytes, collect the data */
6184625Sgm89044 	if (numbytes > 0) {
6194625Sgm89044 		for (i = 0; i < numchunks; i++) {
6204625Sgm89044 			size_t thisnumbytes = (i == numchunks - 1) ?
6214625Sgm89044 			    numbytes - i * (RNG_DIAG_CHUNK_SIZE *
6224625Sgm89044 			    sizeof (uint64_t)) :
6234625Sgm89044 			    RNG_DIAG_CHUNK_SIZE * sizeof (uint64_t);
6245650Stwelke 
6254625Sgm89044 			/* try until we successfully read a word of data */
6264625Sgm89044 			rnglooping = B_TRUE;
6275650Stwelke 			busycount = 0;
6285650Stwelke 			blockcount = 0;
6294625Sgm89044 			while (rnglooping) {
6305650Stwelke 				hverr = n2rng_data_read_diag(n2rng, rngid,
6314625Sgm89044 				    va_to_pa((uint64_t *)buffer +
6324625Sgm89044 				    RNG_DIAG_CHUNK_SIZE * i),
6334625Sgm89044 				    thisnumbytes, &tdelta);
6344625Sgm89044 				rv = n2rng_herr2kerr(hverr);
6354625Sgm89044 				switch (hverr) {
6365650Stwelke 				case H_EOK:
6374625Sgm89044 					rnglooping = B_FALSE;
6384625Sgm89044 					break;
6394625Sgm89044 				case H_EIO:
6404625Sgm89044 				case H_ENOACCESS:
6414625Sgm89044 					return (rv);
6424625Sgm89044 				case H_EWOULDBLOCK:
6435650Stwelke 					/* Data not available, try again */
6445650Stwelke 					if (++blockcount >
6455650Stwelke 					    RNG_MAX_BLOCK_ATTEMPTS) {
6465650Stwelke 						DBG1(n2rng, DHEALTH,
6475650Stwelke 						    "n2rng_collect_diag_bits"
6485650Stwelke 						    "(2): exceeded block count"
6495650Stwelke 						    " of %d",
6505650Stwelke 						    RNG_MAX_BLOCK_ATTEMPTS);
6515650Stwelke 						return (rv);
6525650Stwelke 					} else {
6535650Stwelke 						cyclesleep(n2rng, tdelta);
6545650Stwelke 					}
6554625Sgm89044 					break;
6564625Sgm89044 				default:
6574625Sgm89044 					log_internal_errors(hverr,
6584625Sgm89044 					    "hv_rng_data_read_diag");
6594625Sgm89044 					override_rv = rv;
6604625Sgm89044 					goto restore_state;
6614625Sgm89044 				}
6624625Sgm89044 			} /* while (!rnglooping) */
6634625Sgm89044 		} /* for */
6645650Stwelke 	}
6654625Sgm89044 
6664625Sgm89044 restore_state:
6674625Sgm89044 
6684625Sgm89044 	/* restore the preferred configuration and set exit state */
6694625Sgm89044 	if (exit_setupp != NULL) {
6704625Sgm89044 
6714625Sgm89044 		*setupcontigp = *exit_setupp;
6724625Sgm89044 		rnglooping = B_TRUE;
6735650Stwelke 		busycount = 0;
6745650Stwelke 		blockcount = 0;
6754625Sgm89044 		while (rnglooping) {
6765650Stwelke 			hverr = n2rng_ctl_write(n2rng, rngid, setupphys,
6775650Stwelke 			    exitstate, n2rng->n_ctl_data->n_watchdog_cycles,
6785650Stwelke 			    &tdelta);
6794625Sgm89044 			rv = n2rng_herr2kerr(hverr);
6804625Sgm89044 			switch (hverr) {
6815650Stwelke 			case H_EOK:
6824625Sgm89044 			case H_EIO: /* control yanked from us */
6834625Sgm89044 			case H_EINVAL: /* some external error, probably */
6844625Sgm89044 			case H_ENOACCESS: /* We are not control domain */
6854625Sgm89044 				rnglooping = B_FALSE;
6864625Sgm89044 				break;
6874625Sgm89044 			case H_EWOULDBLOCK:
6885650Stwelke 				/* Data currently not available, try again */
6895650Stwelke 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
6905650Stwelke 					DBG1(n2rng, DHEALTH,
6915650Stwelke 					    "n2rng_collect_diag_bits(3): "
6925650Stwelke 					    "exceeded block count of %d",
6935650Stwelke 					    RNG_MAX_BLOCK_ATTEMPTS);
6945650Stwelke 					return (rv);
6955650Stwelke 				} else {
6965650Stwelke 					cyclesleep(n2rng, tdelta);
6975650Stwelke 				}
6984625Sgm89044 				break;
6995650Stwelke 			case H_EBUSY:
7005650Stwelke 				/*
7015650Stwelke 				 * A control write is already in progress.
7025650Stwelke 				 * Note: This shouldn't happen since
7035650Stwelke 				 * n2rng_ctl_write() waits for the
7045650Stwelke 				 * write to complete.
7055650Stwelke 				 */
7065650Stwelke 				if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
7075650Stwelke 					DBG1(n2rng, DHEALTH,
7085650Stwelke 					    "n2rng_collect_diag_bits(3): "
7095650Stwelke 					    "exceeded busy count of %d",
7105650Stwelke 					    RNG_MAX_BUSY_ATTEMPTS);
7115650Stwelke 					return (rv);
7125650Stwelke 				} else {
7135650Stwelke 					delay(RNG_RETRY_BUSY_DELAY);
7145650Stwelke 				}
7155650Stwelke 				break;
7164625Sgm89044 			default:
7174625Sgm89044 				rnglooping = B_FALSE;
7184625Sgm89044 				log_internal_errors(hverr, "hv_rng_ctl_write");
7194625Sgm89044 				break;
7204625Sgm89044 			}
7214625Sgm89044 		} /* while */
7224625Sgm89044 	} /* if */
7234625Sgm89044 
7244625Sgm89044 	/*
7254625Sgm89044 	 * override_rv takes care of the case where we abort becuase
7264625Sgm89044 	 * of some error, but still want to restore the peferred state
7274625Sgm89044 	 * and return the first error, even if other error occur.
7284625Sgm89044 	 */
7294625Sgm89044 	return (override_rv ? override_rv : rv);
7304625Sgm89044 }
7314625Sgm89044 
7324625Sgm89044 int
n2rng_getentropy(n2rng_t * n2rng,void * buffer,size_t size)7334625Sgm89044 n2rng_getentropy(n2rng_t *n2rng, void *buffer, size_t size)
7344625Sgm89044 {
7354625Sgm89044 	int		i, rv = 0;  /* so it works if size is zero */
7364625Sgm89044 	uint64_t	hverr;
7374625Sgm89044 	uint64_t	*buffer_w = (uint64_t *)buffer;
7384625Sgm89044 	int		num_w = size / sizeof (uint64_t);
7394625Sgm89044 	uint64_t	randval;
7404625Sgm89044 	uint64_t	randvalphys = va_to_pa(&randval);
7414625Sgm89044 	uint64_t	tdelta;
7424625Sgm89044 	int		failcount = 0;
7435650Stwelke 	int		blockcount = 0;
7444625Sgm89044 	boolean_t	rnglooping;
7454625Sgm89044 
7464625Sgm89044 	for (i = 0; i < num_w; i++) {
7474625Sgm89044 		rnglooping = B_TRUE;
7484625Sgm89044 		while (rnglooping) {
7494625Sgm89044 			hverr = hv_rng_data_read(randvalphys, &tdelta);
7504625Sgm89044 			rv = n2rng_herr2kerr(hverr);
7514625Sgm89044 			switch (hverr) {
7524625Sgm89044 			case H_EOK:
7534625Sgm89044 				buffer_w[i] = randval;
7544625Sgm89044 				failcount = 0;
7554625Sgm89044 				rnglooping = B_FALSE;
7564625Sgm89044 				break;
7574625Sgm89044 			case H_EIO:
7584625Sgm89044 				/*
7595650Stwelke 				 * Either a health check is in progress, or
7605650Stwelke 				 * the watchdog timer has expired while running
7615650Stwelke 				 * hv api version 2.0 or higher with health
7625650Stwelke 				 * checks enabled.
7634625Sgm89044 				 */
7645650Stwelke 				if (n2rng->n_hvapi_major_version < 2) {
7655650Stwelke 					/*
7665650Stwelke 					 * A health check is in progress.
7675650Stwelke 					 * Wait RNG_RETRY_HLCHK_USECS and fail
7685650Stwelke 					 * after RNG_MAX_DATA_READ_ATTEMPTS
7695650Stwelke 					 * failures.
7705650Stwelke 					 */
7715650Stwelke 					if (++failcount >
7725650Stwelke 					    RNG_MAX_DATA_READ_ATTEMPTS) {
7735650Stwelke 						DBG2(n2rng, DHEALTH,
7745650Stwelke 						    "n2rng_getentropy: exceeded"
7755650Stwelke 						    "EIO count of %d on cpu %d",
7765650Stwelke 						    RNG_MAX_DATA_READ_ATTEMPTS,
7775650Stwelke 						    CPU->cpu_id);
7785650Stwelke 						goto exitpoint;
7795650Stwelke 					} else {
7805650Stwelke 						delay(drv_usectohz
7815650Stwelke 						    (RNG_RETRY_HLCHK_USECS));
7825650Stwelke 					}
7834625Sgm89044 				} else {
7845650Stwelke 					/*
7855650Stwelke 					 * Just return the error. If a flurry of
7865650Stwelke 					 * random data requests happen to occur
7875650Stwelke 					 * during a health check, there are
7885650Stwelke 					 * multiple levels of defense:
7895650Stwelke 					 * - 2.0 HV provides random data pool
7905650Stwelke 					 * - FIPS algorithm tolerates failures
7915650Stwelke 					 * - Software failover
7925650Stwelke 					 * - Automatic configuration retries
7935650Stwelke 					 * - Hardware failover on some systems
7945650Stwelke 					 */
7955650Stwelke 					goto exitpoint;
7964625Sgm89044 				}
7974625Sgm89044 				break;
7984625Sgm89044 			case H_EWOULDBLOCK:
7995650Stwelke 				/* Data currently not available, try again */
8005650Stwelke 				if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
8015650Stwelke 					DBG1(n2rng, DHEALTH,
8025650Stwelke 					    "n2rng_getentropy: "
8035650Stwelke 					    "exceeded block count of %d",
8045650Stwelke 					    RNG_MAX_BLOCK_ATTEMPTS);
8055650Stwelke 					goto exitpoint;
8065650Stwelke 				} else {
8075650Stwelke 					cyclesleep(n2rng, tdelta);
8085650Stwelke 				}
8094625Sgm89044 				break;
8104625Sgm89044 			default:
8114625Sgm89044 				log_internal_errors(hverr, "hv_rng_data_read");
8124625Sgm89044 				goto exitpoint;
8134625Sgm89044 			}
8144625Sgm89044 		} /* while */
8154625Sgm89044 	} /* for */
8164625Sgm89044 
8174625Sgm89044 exitpoint:
8185650Stwelke 	return (rv);
8195650Stwelke }
8205650Stwelke 
8215650Stwelke uint64_t
n2rng_ctl_read(n2rng_t * n2rng,int rngid,uint64_t ctlregs_pa,uint64_t * state,uint64_t * tdelta,uint64_t * wdelta)8225650Stwelke n2rng_ctl_read(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa, uint64_t *state,
8235650Stwelke     uint64_t *tdelta, uint64_t *wdelta)
8245650Stwelke {
8255650Stwelke 	uint64_t	rv;
8265650Stwelke 	uint64_t	wstatus;
8275650Stwelke 
8285650Stwelke 	/* Call correct hv function based on api version */
8295650Stwelke 	if (n2rng->n_hvapi_major_version == 2) {
8305650Stwelke 		rv = hv_rng_ctl_read_v2(ctlregs_pa, (uint64_t)rngid, state,
8315650Stwelke 		    tdelta, wdelta, &wstatus);
8325650Stwelke 		if (rv == 0) {
8335650Stwelke 			rv = wstatus;
8345650Stwelke 		}
8355650Stwelke 	} else {
8365650Stwelke 		rv = hv_rng_ctl_read(ctlregs_pa, state, tdelta);
8375650Stwelke 		*wdelta = 0;
8385650Stwelke 	}
8395650Stwelke 
8405650Stwelke 	return (rv);
8415650Stwelke }
8425650Stwelke 
8435650Stwelke uint64_t
n2rng_ctl_wait(n2rng_t * n2rng,int rngid)8445650Stwelke n2rng_ctl_wait(n2rng_t *n2rng, int rngid)
8455650Stwelke {
8465650Stwelke 	uint64_t	state;
8475650Stwelke 	uint64_t	tdelta;
8485650Stwelke 	uint64_t	wdelta;
8495650Stwelke 	uint64_t	wstatus;
8505650Stwelke 	boolean_t	rnglooping = B_TRUE;
8515650Stwelke 	uint64_t	rv;
8525650Stwelke 	n2rng_setup_t	setupbuffer[2];
8535650Stwelke 	n2rng_setup_t	*setupcontigp;
8545650Stwelke 	uint64_t	setupphys;
8555650Stwelke 	int		busycount = 0;
8565650Stwelke 	int		blockcount = 0;
8575650Stwelke 
8585650Stwelke 	/*
8595650Stwelke 	 * Use setupbuffer[0] if it is contiguous, otherwise
8605650Stwelke 	 * setupbuffer[1].
8615650Stwelke 	 */
8625650Stwelke 	setupcontigp = &setupbuffer[
8635650Stwelke 	    CONTIGUOUS(&setupbuffer[0], n2rng_setup_t) ? 0 : 1];
8645650Stwelke 	setupphys = va_to_pa(setupcontigp);
8655650Stwelke 
8665650Stwelke 	while (rnglooping) {
8675650Stwelke 		rv = hv_rng_ctl_read_v2(setupphys, (uint64_t)rngid, &state,
8685650Stwelke 		    &tdelta, &wdelta, &wstatus);
8695650Stwelke 		switch (rv) {
8705650Stwelke 		case H_EOK:
8715650Stwelke 			rv = wstatus;
8725650Stwelke 			rnglooping = B_FALSE;
8735650Stwelke 			break;
8745650Stwelke 		case H_EWOULDBLOCK:
8755650Stwelke 			/* Data currently not available, try again */
8765650Stwelke 			if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
8775650Stwelke 				DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
8785650Stwelke 				    "exceeded block count of %d",
8795650Stwelke 				    RNG_MAX_BLOCK_ATTEMPTS);
8805650Stwelke 				return (rv);
8815650Stwelke 			} else {
8825650Stwelke 				cyclesleep(n2rng, tdelta);
8835650Stwelke 			}
8845650Stwelke 			break;
8855650Stwelke 		case H_EBUSY:
8865650Stwelke 			/* Control write still pending, try again */
8875650Stwelke 			if (++busycount > RNG_MAX_BUSY_ATTEMPTS) {
8885650Stwelke 				DBG1(n2rng, DHEALTH, "n2rng_ctl_wait: "
8895650Stwelke 				    "exceeded busy count of %d",
8905650Stwelke 				    RNG_MAX_BUSY_ATTEMPTS);
8915650Stwelke 				return (rv);
8925650Stwelke 			} else {
8935650Stwelke 				delay(RNG_RETRY_BUSY_DELAY);
8945650Stwelke 			}
8955650Stwelke 			break;
8965650Stwelke 		default:
8975650Stwelke 			log_internal_errors(rv, "n2rng_ctl_wait");
8985650Stwelke 			rnglooping = B_FALSE;
8995650Stwelke 		}
9005650Stwelke 	} /* while (rnglooping) */
9014625Sgm89044 
9024625Sgm89044 	return (rv);
9034625Sgm89044 }
9044625Sgm89044 
9055650Stwelke uint64_t
n2rng_ctl_write(n2rng_t * n2rng,int rngid,uint64_t ctlregs_pa,uint64_t newstate,uint64_t wtimeout,uint64_t * tdelta)9065650Stwelke n2rng_ctl_write(n2rng_t *n2rng, int rngid, uint64_t ctlregs_pa,
9075650Stwelke     uint64_t newstate, uint64_t wtimeout, uint64_t *tdelta)
9085650Stwelke {
9095650Stwelke 	uint64_t	rv;
9105650Stwelke 
9115650Stwelke 	/* Call correct hv function based on api version */
9125650Stwelke 	if (n2rng->n_hvapi_major_version == 2) {
9135650Stwelke 		rv = hv_rng_ctl_write_v2(ctlregs_pa, newstate, wtimeout,
9145650Stwelke 		    (uint64_t)rngid);
9155650Stwelke 		if (rv == H_EOK) {
9165650Stwelke 			/* Wait for control registers to be written */
9175650Stwelke 			rv = n2rng_ctl_wait(n2rng, rngid);
9185650Stwelke 		}
9195650Stwelke 		*tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
9205650Stwelke 	} else {
9215650Stwelke 		rv = hv_rng_ctl_write(ctlregs_pa, newstate, wtimeout, tdelta);
9225650Stwelke 	}
9235650Stwelke 
9245650Stwelke 	return (rv);
9255650Stwelke }
9265650Stwelke 
9275650Stwelke uint64_t
n2rng_data_read_diag(n2rng_t * n2rng,int rngid,uint64_t data_pa,size_t datalen,uint64_t * tdelta)9285650Stwelke n2rng_data_read_diag(n2rng_t *n2rng, int rngid, uint64_t data_pa,
9295650Stwelke     size_t  datalen, uint64_t *tdelta)
9305650Stwelke {
9315650Stwelke 	uint64_t	rv;
9325650Stwelke 
9335650Stwelke 	/* Call correct hv function based on api version */
9345650Stwelke 	if (n2rng->n_hvapi_major_version == 2) {
9355650Stwelke 		rv = hv_rng_data_read_diag_v2(data_pa, datalen,
9365650Stwelke 		    (uint64_t)rngid, tdelta);
9375650Stwelke 		if (*tdelta == 0) {
9385650Stwelke 			*tdelta = RNG_DEFAULT_ACCUMULATE_CYCLES;
9395650Stwelke 		}
9405650Stwelke 	} else {
9415650Stwelke 		rv = hv_rng_data_read_diag(data_pa, datalen, tdelta);
9425650Stwelke 	}
9435650Stwelke 
9445650Stwelke 	return (rv);
9455650Stwelke }
9465650Stwelke 
9475650Stwelke uint64_t
n2rng_check_ctl_access(n2rng_t * n2rng)9485650Stwelke n2rng_check_ctl_access(n2rng_t *n2rng)
9495650Stwelke {
9505650Stwelke 	uint64_t	rv;
9515650Stwelke 	uint64_t	unused_64;
9525650Stwelke 
9535650Stwelke 	/* Call correct hv function based on api version */
9545650Stwelke 	if (n2rng->n_hvapi_major_version == 2) {
9555650Stwelke 		/*
9565650Stwelke 		 * Attempt to read control registers with invalid ID and data
9575650Stwelke 		 * just to see if we get an access error
9585650Stwelke 		 */
9595650Stwelke 		rv = hv_rng_ctl_read_v2(NULL, N2RNG_INVALID_ID,
9605650Stwelke 		    &unused_64, &unused_64, &unused_64, &unused_64);
9615650Stwelke 	} else {
9625650Stwelke 		rv = hv_rng_get_diag_control();
9635650Stwelke 	}
9645650Stwelke 
9655650Stwelke 	return (rv);
9665650Stwelke }
9675650Stwelke 
9685650Stwelke /*
9695650Stwelke  * n2rng_config_retry()
9705650Stwelke  *
9715650Stwelke  * Schedule a timed call to n2rng_config() if one is not already pending
9725650Stwelke  */
9735650Stwelke void
n2rng_config_retry(n2rng_t * n2rng,clock_t seconds)9745650Stwelke n2rng_config_retry(n2rng_t *n2rng, clock_t seconds)
9755650Stwelke {
9765650Stwelke 	mutex_enter(&n2rng->n_lock);
9775650Stwelke 	/* Check if a config retry is already pending */
9785650Stwelke 	if (n2rng->n_timeout_id) {
9795650Stwelke 		DBG1(n2rng, DCFG, "n2rng_config_retry: retry pending "
9805650Stwelke 		    "id = %x", n2rng->n_timeout_id);
9815650Stwelke 	} else {
9825650Stwelke 		n2rng->n_timeout_id = timeout(n2rng_config_task,
9835650Stwelke 		    (void *)n2rng, drv_usectohz(seconds * SECOND));
9845650Stwelke 		DBG2(n2rng, DCFG, "n2rng_config_retry: retry scheduled in "
9855650Stwelke 		    "%d seconds, id = %x", seconds, n2rng->n_timeout_id);
9865650Stwelke 	}
9875650Stwelke 	mutex_exit(&n2rng->n_lock);
9885650Stwelke }
9895650Stwelke 
9904625Sgm89044 static uint64_t
sticks_per_usec(void)9914625Sgm89044 sticks_per_usec(void)
9924625Sgm89044 {
9934625Sgm89044 	uint64_t starttick = gettick();
9944625Sgm89044 	hrtime_t starttime = gethrtime();
9954625Sgm89044 	uint64_t endtick;
9964625Sgm89044 	hrtime_t endtime;
9974625Sgm89044 
9984625Sgm89044 	delay(2);
9994625Sgm89044 
10004625Sgm89044 	endtick = gettick();
10014625Sgm89044 	endtime = gethrtime();
10024625Sgm89044 
10034625Sgm89044 	return ((1000 * (endtick - starttick)) / (endtime - starttime));
10044625Sgm89044 }
10054732Sdavemq 
10065650Stwelke static int
n2rng_init_ctl(n2rng_t * n2rng)10075650Stwelke n2rng_init_ctl(n2rng_t *n2rng)
10085650Stwelke {
10095650Stwelke 	int		rv;
10105650Stwelke 	int		hverr;
10115650Stwelke 	rng_entry_t	*rng;
10125650Stwelke 	int		rngid;
10135650Stwelke 	int		blockcount = 0;
10145650Stwelke 
10155650Stwelke 	n2rng->n_ctl_data = NULL;
10165650Stwelke 
10175650Stwelke 	/* Attempt to gain diagnostic control */
10185650Stwelke 	do {
10195650Stwelke 		hverr = n2rng_check_ctl_access(n2rng);
10205650Stwelke 		rv = n2rng_herr2kerr(hverr);
10215650Stwelke 		if ((hverr == H_EWOULDBLOCK) &&
10225650Stwelke 		    (++blockcount > RNG_MAX_BUSY_ATTEMPTS)) {
10235650Stwelke 			DBG1(n2rng, DHEALTH, "n2rng_int_ctl: exceeded busy "
10245650Stwelke 			    "count of %d", RNG_MAX_BUSY_ATTEMPTS);
10255650Stwelke 			return (rv);
10265650Stwelke 		} else {
10275650Stwelke 			delay(RNG_RETRY_BUSY_DELAY);
10285650Stwelke 		}
10295650Stwelke 	} while (hverr == H_EWOULDBLOCK);
10305650Stwelke 
10315650Stwelke 	/*
10325650Stwelke 	 * If attempt fails with EPERM, the driver is not running in the
10335650Stwelke 	 * control domain
10345650Stwelke 	 */
10355650Stwelke 	if (rv == EPERM) {
10365650Stwelke 		DBG0(n2rng, DATTACH,
10375650Stwelke 		    "n2rng_init_ctl: Running in guest domain");
10385650Stwelke 		return (DDI_SUCCESS);
10395650Stwelke 	}
10405650Stwelke 
10415650Stwelke 	/* Allocate control stucture only used in control domain */
10425650Stwelke 	n2rng->n_ctl_data = kmem_alloc(sizeof (rng_ctl_data_t), KM_SLEEP);
10435650Stwelke 	n2rng->n_ctl_data->n_num_rngs_online = 0;
10445650Stwelke 
10455650Stwelke 	/*
10465650Stwelke 	 * If running with an API version less than 2.0 default to one rng.
10475650Stwelke 	 * Otherwise get number of rngs from device properties.
10485650Stwelke 	 */
10495650Stwelke 	if (n2rng->n_hvapi_major_version < 2) {
10505650Stwelke 		n2rng->n_ctl_data->n_num_rngs = 1;
10515650Stwelke 	} else {
10525650Stwelke 		n2rng->n_ctl_data->n_num_rngs =
10535650Stwelke 		    ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
10545650Stwelke 		    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
10555650Stwelke 		    N2RNG_PROP_NUM_UNITS, 0);
10565650Stwelke 		if (n2rng->n_ctl_data->n_num_rngs == 0) {
10575650Stwelke 			cmn_err(CE_WARN, "n2rng: %s property not found",
10585650Stwelke 			    N2RNG_PROP_NUM_UNITS);
10595650Stwelke 			return (DDI_FAILURE);
10605650Stwelke 		}
10615650Stwelke 	}
10625650Stwelke 
10635650Stwelke 	/* Allocate space for all rng entries */
10645650Stwelke 	n2rng->n_ctl_data->n_rngs =
10655650Stwelke 	    kmem_zalloc(n2rng->n_ctl_data->n_num_rngs *
10665650Stwelke 	    sizeof (rng_entry_t), KM_SLEEP);
10675650Stwelke 
10685650Stwelke 	/* Get accumulate cycles from .conf file. */
10695650Stwelke 	n2rng->n_ctl_data->n_accumulate_cycles =
10705650Stwelke 	    ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
10715650Stwelke 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "acc_cycles",
10725650Stwelke 	    RNG_DEFAULT_ACCUMULATE_CYCLES);
10735650Stwelke 
10745650Stwelke 	/* Get health check frequency from .conf file */
10755650Stwelke 	n2rng->n_ctl_data->n_hc_secs = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
10765650Stwelke 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "hc_seconds",
10775650Stwelke 	    RNG_DEFAULT_HC_SECS);
10785650Stwelke 
1079*12929SMisaki.Miyashita@Oracle.COM 	/* get fips configuration : FALSE by default */
1080*12929SMisaki.Miyashita@Oracle.COM 	n2rng->n_is_fips = ddi_getprop(DDI_DEV_T_ANY, n2rng->n_dip,
1081*12929SMisaki.Miyashita@Oracle.COM 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1082*12929SMisaki.Miyashita@Oracle.COM 	    N2RNG_FIPS_STRING, B_FALSE);
1083*12929SMisaki.Miyashita@Oracle.COM 
10845650Stwelke 	/* API versions prior to 2.0 do not support health checks */
10855650Stwelke 	if ((n2rng->n_hvapi_major_version < 2) &&
10865650Stwelke 	    (n2rng->n_ctl_data->n_hc_secs > 0)) {
10875650Stwelke 		cmn_err(CE_WARN, "n2rng: Hyperviser api "
10885650Stwelke 		    "version %d.%d does not support health checks",
10895650Stwelke 		    n2rng->n_hvapi_major_version,
10905650Stwelke 		    n2rng->n_hvapi_minor_version);
10915650Stwelke 		n2rng->n_ctl_data->n_hc_secs = 0;
10925650Stwelke 	}
10935650Stwelke 
1094*12929SMisaki.Miyashita@Oracle.COM 
1095*12929SMisaki.Miyashita@Oracle.COM 	if (n2rng->n_is_fips == B_TRUE) {
1096*12929SMisaki.Miyashita@Oracle.COM 		/* When in FIPs mode, run the module integrity test */
1097*12929SMisaki.Miyashita@Oracle.COM 		if (fips_check_module("drv/n2rng", (void *)_init) != 0) {
1098*12929SMisaki.Miyashita@Oracle.COM 			cmn_err(CE_WARN, "n2rng: FIPs Software Integrity Test "
1099*12929SMisaki.Miyashita@Oracle.COM 			    "failed\n");
1100*12929SMisaki.Miyashita@Oracle.COM 			return (DDI_FAILURE);
1101*12929SMisaki.Miyashita@Oracle.COM 		}
1102*12929SMisaki.Miyashita@Oracle.COM 	}
1103*12929SMisaki.Miyashita@Oracle.COM 
11045650Stwelke 	/* Calculate watchdog timeout value */
11055650Stwelke 	if (n2rng->n_ctl_data->n_hc_secs <= 0) {
11065650Stwelke 		n2rng->n_ctl_data->n_watchdog_cycles = 0;
11075650Stwelke 	} else {
11085650Stwelke 		n2rng->n_ctl_data->n_watchdog_cycles =
11095650Stwelke 		    ((uint64_t)(RNG_EXTRA_WATCHDOG_SECS) +
11105650Stwelke 		    n2rng->n_ctl_data->n_hc_secs) *
11115650Stwelke 		    n2rng->n_sticks_per_usec * 1000000;
11125650Stwelke 	}
11135650Stwelke 
11145650Stwelke 	/*
11155650Stwelke 	 * Set some plausible state into the preferred configuration.
11165650Stwelke 	 * The intent is that the health check will immediately overwrite it.
11175650Stwelke 	 */
11185650Stwelke 	for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs; rngid++) {
11195650Stwelke 
11205650Stwelke 		rng = &n2rng->n_ctl_data->n_rngs[rngid];
11215650Stwelke 
11225650Stwelke 		rng->n_preferred_config.ctlwds[0].word = 0;
11235650Stwelke 		rng->n_preferred_config.ctlwds[0].fields.rnc_anlg_sel =
11245650Stwelke 		    N2RNG_NOANALOGOUT;
11255650Stwelke 		rng->n_preferred_config.ctlwds[0].fields.rnc_cnt =
11265650Stwelke 		    RNG_DEFAULT_ACCUMULATE_CYCLES;
11275650Stwelke 		rng->n_preferred_config.ctlwds[0].fields.rnc_mode =
11285650Stwelke 		    RNG_MODE_NORMAL;
11295650Stwelke 		rng->n_preferred_config.ctlwds[1].word =
11305650Stwelke 		    rng->n_preferred_config.ctlwds[0].word;
11315650Stwelke 		rng->n_preferred_config.ctlwds[2].word =
11325650Stwelke 		    rng->n_preferred_config.ctlwds[0].word;
11335650Stwelke 		rng->n_preferred_config.ctlwds[3].word =
11345650Stwelke 		    rng->n_preferred_config.ctlwds[0].word;
11355650Stwelke 		rng->n_preferred_config.ctlwds[0].fields.rnc_vcoctl = 1;
11365650Stwelke 		rng->n_preferred_config.ctlwds[0].fields.rnc_selbits = 1;
11375650Stwelke 		rng->n_preferred_config.ctlwds[1].fields.rnc_vcoctl = 2;
11385650Stwelke 		rng->n_preferred_config.ctlwds[1].fields.rnc_selbits = 2;
11395650Stwelke 		rng->n_preferred_config.ctlwds[2].fields.rnc_vcoctl = 3;
11405650Stwelke 		rng->n_preferred_config.ctlwds[2].fields.rnc_selbits = 4;
11415650Stwelke 		rng->n_preferred_config.ctlwds[3].fields.rnc_vcoctl = 0;
11425650Stwelke 		rng->n_preferred_config.ctlwds[3].fields.rnc_selbits = 7;
11435650Stwelke 	}
11445650Stwelke 
11455650Stwelke 	n2rng_setcontrol(n2rng);
11465650Stwelke 	DBG2(n2rng, DATTACH,
11475650Stwelke 	    "n2rng_init_ctl: Running in control domain with %d rng device%s",
11485650Stwelke 	    n2rng->n_ctl_data->n_num_rngs,
11495650Stwelke 	    (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
11505650Stwelke 	DBG2(n2rng, DCFG,
11515650Stwelke 	    "n2rng_init_ctl: n_sticks_per_usec = %ld, n_hc_secs = %d",
11525650Stwelke 	    n2rng->n_sticks_per_usec,
11535650Stwelke 	    n2rng->n_ctl_data->n_hc_secs);
11545650Stwelke 	DBG2(n2rng, DCFG,
11555650Stwelke 	    "n2rng_init_ctl: n_watchdog_cycles = %ld, "
11565650Stwelke 	    "n_accumulate_cycles = %ld", n2rng->n_ctl_data->n_watchdog_cycles,
11575650Stwelke 	    n2rng->n_ctl_data->n_accumulate_cycles);
11585650Stwelke 
11595650Stwelke 	return (DDI_SUCCESS);
11605650Stwelke }
11615650Stwelke 
11625650Stwelke static void
n2rng_uninit_ctl(n2rng_t * n2rng)11635650Stwelke n2rng_uninit_ctl(n2rng_t *n2rng)
11645650Stwelke {
11655650Stwelke 	if (n2rng->n_ctl_data) {
11665650Stwelke 		if (n2rng->n_ctl_data->n_num_rngs) {
11675650Stwelke 			kmem_free(n2rng->n_ctl_data->n_rngs,
11685650Stwelke 			    n2rng->n_ctl_data->n_num_rngs *
11695650Stwelke 			    sizeof (rng_entry_t));
11705650Stwelke 			n2rng->n_ctl_data->n_rngs = NULL;
11715650Stwelke 			n2rng->n_ctl_data->n_num_rngs = 0;
11725650Stwelke 		}
11735650Stwelke 		kmem_free(n2rng->n_ctl_data, sizeof (rng_ctl_data_t));
11745650Stwelke 		n2rng->n_ctl_data = NULL;
11755650Stwelke 	}
11765650Stwelke }
11775650Stwelke 
11785650Stwelke 
11795650Stwelke /*
11805650Stwelke  * n2rng_config_test()
11815650Stwelke  *
11825650Stwelke  * Attempt read random data to see if the rng is configured.
11835650Stwelke  */
11845650Stwelke int
n2rng_config_test(n2rng_t * n2rng)11855650Stwelke n2rng_config_test(n2rng_t *n2rng)
11865650Stwelke {
11875650Stwelke 	int		rv = 0;
11885650Stwelke 	uint64_t	hverr;
11895650Stwelke 	uint64_t	randval = 0;
11905650Stwelke 	uint64_t	randvalphys = va_to_pa(&randval);
11915650Stwelke 	uint64_t	tdelta;
11925650Stwelke 	int		failcount = 0;
11935650Stwelke 	int		blockcount = 0;
11945650Stwelke 	boolean_t	rnglooping = B_TRUE;
11955650Stwelke 
11965650Stwelke 	while (rnglooping) {
11975650Stwelke 		hverr = hv_rng_data_read(randvalphys, &tdelta);
11985650Stwelke 		rv = n2rng_herr2kerr(hverr);
11995650Stwelke 		switch (hverr) {
12005650Stwelke 		case H_EOK:
12015650Stwelke 			failcount = 0;
12025650Stwelke 			rnglooping = B_FALSE;
12035650Stwelke 			break;
12045650Stwelke 		case H_EIO:
12055650Stwelke 			/*
12065650Stwelke 			 * A health check is in progress.
12075650Stwelke 			 * Wait RNG_RETRY_HLCHK_USECS and fail
12085650Stwelke 			 * after RNG_MAX_DATA_READ_ATTEMPTS
12095650Stwelke 			 * failures.
12105650Stwelke 			 */
12115650Stwelke 			if (++failcount > RNG_MAX_DATA_READ_ATTEMPTS) {
12125650Stwelke 				goto exitpoint;
12135650Stwelke 			} else {
12145650Stwelke 				delay(drv_usectohz(RNG_RETRY_HLCHK_USECS));
12155650Stwelke 			}
12165650Stwelke 			break;
12175650Stwelke 		case H_EWOULDBLOCK:
12185650Stwelke 			/* Data currently not available, try again */
12195650Stwelke 			if (++blockcount > RNG_MAX_BLOCK_ATTEMPTS) {
12205650Stwelke 				DBG1(n2rng, DHEALTH, "n2rng_config_test: "
12215650Stwelke 				    "exceeded block count of %d",
12225650Stwelke 				    RNG_MAX_BLOCK_ATTEMPTS);
12235650Stwelke 				goto exitpoint;
12245650Stwelke 			} else {
12255650Stwelke 				cyclesleep(n2rng, tdelta);
12265650Stwelke 			}
12275650Stwelke 			break;
12285650Stwelke 		case H_ENOACCESS:
12295650Stwelke 			/* An rng error has occured during health check */
12305650Stwelke 			goto exitpoint;
12315650Stwelke 		default:
12325650Stwelke 			log_internal_errors(hverr, "hv_rng_data_read");
12335650Stwelke 			goto exitpoint;
12345650Stwelke 		}
12355650Stwelke 	} /* while */
12365650Stwelke 
12375650Stwelke exitpoint:
12385650Stwelke 	return (rv);
12395650Stwelke }
12405650Stwelke 
12415650Stwelke /*
12425650Stwelke  * n2rng_config()
12435650Stwelke  *
12445650Stwelke  * Run health check on the RNG hardware
12455650Stwelke  * Configure the RNG hardware
12465650Stwelke  * Register with crypto framework
12475650Stwelke  */
12485650Stwelke static int
n2rng_config(n2rng_t * n2rng)12495650Stwelke n2rng_config(n2rng_t *n2rng)
12505650Stwelke {
12515650Stwelke 	int		rv;
12525650Stwelke 	rng_entry_t	*rng;
12535650Stwelke 	int		rngid;
12545650Stwelke 
12555650Stwelke 	/*
12565650Stwelke 	 * Run health checks and configure rngs if running in control domain,
12575650Stwelke 	 * otherwise just check if at least one rng is available.
12585650Stwelke 	 */
12595650Stwelke 	if (n2rng_iscontrol(n2rng)) {
12605650Stwelke 
12615650Stwelke 		for (rngid = 0; rngid < n2rng->n_ctl_data->n_num_rngs;
12625650Stwelke 		    rngid++) {
12635650Stwelke 
12645650Stwelke 			rng = &n2rng->n_ctl_data->n_rngs[rngid];
12655650Stwelke 
12665650Stwelke 			/* Only test rngs that have not already failed */
12675650Stwelke 			if (rng->n_rng_state == CTL_STATE_ERROR) {
12685650Stwelke 				continue;
12695650Stwelke 			}
12705650Stwelke 
12715650Stwelke 			if ((n2rng->n_binding == N2RNG_CPU_VF) &&
12725650Stwelke 			    (n2rng->n_hvapi_major_version < 2)) {
12735650Stwelke 				/*
12745650Stwelke 				 * Since api versions prior to 2.0 do not
12755650Stwelke 				 * support multiple rngs, bind to the current
12765650Stwelke 				 * processor for the entire health check
12775650Stwelke 				 * process.
12785650Stwelke 				 */
12795650Stwelke 				thread_affinity_set(curthread, CPU_CURRENT);
12805650Stwelke 				DBG1(n2rng, DCFG, "n2rng_config: "
12815650Stwelke 				    "Configuring single rng from cpu %d",
12825650Stwelke 				    CPU->cpu_id);
12835650Stwelke 				rv = n2rng_do_health_check(n2rng, rngid);
12845650Stwelke 				thread_affinity_clear(curthread);
12855650Stwelke 			} else {
12865650Stwelke 				rv = n2rng_do_health_check(n2rng, rngid);
12875650Stwelke 			}
12885650Stwelke 
12895650Stwelke 			switch (rv) {
12905650Stwelke 			case 0:
12915650Stwelke 				/*
12925650Stwelke 				 * Successful, increment online count if
12935650Stwelke 				 * necessary
12945650Stwelke 				 */
12955650Stwelke 				DBG1(n2rng, DCFG, "n2rng_config: rng(%d) "
12965650Stwelke 				    "passed health checks", rngid);
12975650Stwelke 				if (rng->n_rng_state != CTL_STATE_CONFIGURED) {
12985650Stwelke 					rng->n_rng_state =
12995650Stwelke 					    CTL_STATE_CONFIGURED;
13005650Stwelke 					n2rng->n_ctl_data->n_num_rngs_online++;
13015650Stwelke 				}
13025650Stwelke 				break;
13035650Stwelke 			default:
13045650Stwelke 				/*
13055650Stwelke 				 * Health checks failed, decrement online
13065650Stwelke 				 * count if necessary
13075650Stwelke 				 */
13085650Stwelke 				cmn_err(CE_WARN, "n2rng: rng(%d) "
13095650Stwelke 				    "failed health checks", rngid);
13105650Stwelke 				if (rng->n_rng_state == CTL_STATE_CONFIGURED) {
13115650Stwelke 					n2rng->n_ctl_data->n_num_rngs_online--;
13125650Stwelke 				}
13135650Stwelke 				rng->n_rng_state = CTL_STATE_ERROR;
13145650Stwelke 				break;
13155650Stwelke 			}
13165650Stwelke 		}
13175650Stwelke 		DBG2(n2rng, DCFG, "n2rng_config: %d rng%s online",
13185650Stwelke 		    n2rng->n_ctl_data->n_num_rngs_online,
13195650Stwelke 		    (n2rng->n_ctl_data->n_num_rngs_online == 1) ? "" : "s");
13205650Stwelke 
13215650Stwelke 		/* Check if all rngs have failed */
13225650Stwelke 		if (n2rng->n_ctl_data->n_num_rngs_online == 0) {
13235650Stwelke 			cmn_err(CE_WARN, "n2rng: %d RNG device%s failed",
13245650Stwelke 			    n2rng->n_ctl_data->n_num_rngs,
13255650Stwelke 			    (n2rng->n_ctl_data->n_num_rngs == 1) ? "" : "s");
13265650Stwelke 			goto errorexit;
13275650Stwelke 		} else {
13285650Stwelke 			n2rng_setconfigured(n2rng);
13295650Stwelke 		}
13305650Stwelke 	} else {
13315650Stwelke 		/* Running in guest domain, just check if rng is configured */
13325650Stwelke 		rv = n2rng_config_test(n2rng);
13335650Stwelke 		switch (rv) {
13345650Stwelke 		case 0:
13355650Stwelke 			n2rng_setconfigured(n2rng);
13365650Stwelke 			break;
13375650Stwelke 		case EIO:
13385650Stwelke 			/* Don't set configured to force a retry */
13395650Stwelke 			break;
13405650Stwelke 		default:
13415650Stwelke 			goto errorexit;
13425650Stwelke 		}
13435650Stwelke 	}
13445650Stwelke 
13455650Stwelke 	/*
13465650Stwelke 	 * Initialize FIPS state and register with KCF if we have at least one
13475650Stwelke 	 * RNG configured.  Otherwise schedule a retry if all rngs have not
13485650Stwelke 	 * failed.
13495650Stwelke 	 */
13505650Stwelke 	if (n2rng_isconfigured(n2rng)) {
13515650Stwelke 
13525650Stwelke 		if (n2rng_init(n2rng) != DDI_SUCCESS) {
13535650Stwelke 			cmn_err(CE_WARN, "n2rng: unable to register with KCF");
13545650Stwelke 			goto errorexit;
13555650Stwelke 		}
13565650Stwelke 
13575650Stwelke 		/*
13585650Stwelke 		 * Schedule a retry if running in the control domain and a
13595650Stwelke 		 * health check time has been specified.
13605650Stwelke 		 */
13615650Stwelke 		if (n2rng_iscontrol(n2rng) &&
13625650Stwelke 		    (n2rng->n_ctl_data->n_hc_secs > 0)) {
13635650Stwelke 			n2rng_config_retry(n2rng,
13645650Stwelke 			    n2rng->n_ctl_data->n_hc_secs);
13655650Stwelke 		}
13665650Stwelke 	} else if (!n2rng_isfailed(n2rng)) {
13675650Stwelke 		/* Schedule a retry if one is not already pending */
13685650Stwelke 		n2rng_config_retry(n2rng, RNG_CFG_RETRY_SECS);
13695650Stwelke 	}
13705650Stwelke 	return (DDI_SUCCESS);
13715650Stwelke 
13725650Stwelke errorexit:
13735650Stwelke 	/* Unregister from kCF if we are registered */
13745650Stwelke 	(void) n2rng_unregister_provider(n2rng);
13755650Stwelke 	n2rng_setfailed(n2rng);
13765650Stwelke 	cmn_err(CE_WARN, "n2rng: hardware failure detected");
13775650Stwelke 	return (DDI_FAILURE);
13785650Stwelke }
13795650Stwelke 
13804732Sdavemq /*
13814732Sdavemq  * n2rng_config_task()
13824732Sdavemq  *
13835650Stwelke  * Call n2rng_config() from the task queue or after a timeout, ignore result.
13844732Sdavemq  */
13854732Sdavemq static void
n2rng_config_task(void * targ)13865650Stwelke n2rng_config_task(void *targ)
13874732Sdavemq {
13885650Stwelke 	n2rng_t *n2rng = (n2rng_t *)targ;
13894732Sdavemq 
13905650Stwelke 	mutex_enter(&n2rng->n_lock);
13915650Stwelke 	n2rng->n_timeout_id = 0;
13925650Stwelke 	mutex_exit(&n2rng->n_lock);
13935650Stwelke 	(void) n2rng_config(n2rng);
13944732Sdavemq }
1395