xref: /onnv-gate/usr/src/uts/common/io/nxge/nxge_hio.c (revision 11304:3092d1e303d6)
16495Sspeer /*
26495Sspeer  * CDDL HEADER START
36495Sspeer  *
46495Sspeer  * The contents of this file are subject to the terms of the
56495Sspeer  * Common Development and Distribution License (the "License").
66495Sspeer  * You may not use this file except in compliance with the License.
76495Sspeer  *
86495Sspeer  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
96495Sspeer  * or http://www.opensolaris.org/os/licensing.
106495Sspeer  * See the License for the specific language governing permissions
116495Sspeer  * and limitations under the License.
126495Sspeer  *
136495Sspeer  * When distributing Covered Code, include this CDDL HEADER in each
146495Sspeer  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
156495Sspeer  * If applicable, add the following below this CDDL HEADER, with the
166495Sspeer  * fields enclosed by brackets "[]" replaced with your own identifying
176495Sspeer  * information: Portions Copyright [yyyy] [name of copyright owner]
186495Sspeer  *
196495Sspeer  * CDDL HEADER END
206495Sspeer  */
216495Sspeer 
226495Sspeer /*
238597SMichael.Speer@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
246495Sspeer  * Use is subject to license terms.
256495Sspeer  */
266495Sspeer 
276495Sspeer /*
286495Sspeer  * nxge_hio.c
296495Sspeer  *
306495Sspeer  * This file manages the virtualization resources for Neptune
316495Sspeer  * devices.  That is, it implements a hybrid I/O (HIO) approach in the
326495Sspeer  * Solaris kernel, whereby a guest domain on an LDOMs server may
336495Sspeer  * request & use hardware resources from the service domain.
346495Sspeer  *
356495Sspeer  */
366495Sspeer 
378275SEric Cheng #include <sys/mac_provider.h>
386495Sspeer #include <sys/nxge/nxge_impl.h>
396495Sspeer #include <sys/nxge/nxge_fzc.h>
406495Sspeer #include <sys/nxge/nxge_rxdma.h>
416495Sspeer #include <sys/nxge/nxge_txdma.h>
426495Sspeer #include <sys/nxge/nxge_hio.h>
436495Sspeer 
446495Sspeer /*
456495Sspeer  * External prototypes
466495Sspeer  */
476495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
486495Sspeer 
496495Sspeer /* The following function may be found in nxge_main.c */
508275SEric Cheng extern int nxge_m_mmac_remove(void *arg, int slot);
518275SEric Cheng extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
528275SEric Cheng 	boolean_t usetbl);
536495Sspeer 
546495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */
556495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
566495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
576495Sspeer 
586495Sspeer /*
596495Sspeer  * Local prototypes
606495Sspeer  */
616495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
626495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
636495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group);
646495Sspeer 
656495Sspeer /*
666495Sspeer  * These functions are used by both service & guest domains to
676495Sspeer  * decide whether they're running in an LDOMs/XEN environment
686495Sspeer  * or not.  If so, then the Hybrid I/O (HIO) module is initialized.
696495Sspeer  */
706495Sspeer 
716495Sspeer /*
726495Sspeer  * nxge_get_environs
736495Sspeer  *
746495Sspeer  *	Figure out if we are in a guest domain or not.
756495Sspeer  *
766495Sspeer  * Arguments:
776495Sspeer  * 	nxge
786495Sspeer  *
796495Sspeer  * Notes:
806495Sspeer  *
816495Sspeer  * Context:
826495Sspeer  *	Any domain
836495Sspeer  */
846495Sspeer void
856495Sspeer nxge_get_environs(
866495Sspeer 	nxge_t *nxge)
876495Sspeer {
886495Sspeer 	char *string;
896495Sspeer 
906495Sspeer 	/*
916495Sspeer 	 * In the beginning, assume that we are running sans LDOMs/XEN.
926495Sspeer 	 */
936495Sspeer 	nxge->environs = SOLARIS_DOMAIN;
946495Sspeer 
956495Sspeer 	/*
966495Sspeer 	 * Are we a hybrid I/O (HIO) guest domain driver?
976495Sspeer 	 */
986495Sspeer 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
996495Sspeer 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1006495Sspeer 	    "niutype", &string)) == DDI_PROP_SUCCESS) {
1016495Sspeer 		if (strcmp(string, "n2niu") == 0) {
1026495Sspeer 			nxge->environs = SOLARIS_GUEST_DOMAIN;
1036495Sspeer 			/* So we can allocate properly-aligned memory. */
1046495Sspeer 			nxge->niu_type = N2_NIU;
1056495Sspeer 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
1066495Sspeer 			    "Hybrid IO-capable guest domain"));
1076495Sspeer 		}
1086495Sspeer 		ddi_prop_free(string);
1096495Sspeer 	}
1106495Sspeer }
1116495Sspeer 
1126495Sspeer #if !defined(sun4v)
1136495Sspeer 
1146495Sspeer /*
1156495Sspeer  * nxge_hio_init
1166495Sspeer  *
1176495Sspeer  *	Initialize the HIO module of the NXGE driver.
1186495Sspeer  *
1196495Sspeer  * Arguments:
1206495Sspeer  * 	nxge
1216495Sspeer  *
1226495Sspeer  * Notes:
1236495Sspeer  *	This is the non-hybrid I/O version of this function.
1246495Sspeer  *
1256495Sspeer  * Context:
1266495Sspeer  *	Any domain
1276495Sspeer  */
1286495Sspeer int
1297587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge)
1306495Sspeer {
1316495Sspeer 	nxge_hio_data_t *nhd;
1328275SEric Cheng 	int i;
1336495Sspeer 
1346495Sspeer 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
13510577SMichael.Speer@Sun.COM 	if (nhd == NULL) {
1366495Sspeer 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
1376495Sspeer 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
13810577SMichael.Speer@Sun.COM 		nhd->type = NXGE_HIO_TYPE_SERVICE;
1396495Sspeer 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
1406495Sspeer 	}
1416495Sspeer 
1428275SEric Cheng 	/*
1438275SEric Cheng 	 * Initialize share and ring group structures.
1448275SEric Cheng 	 */
1458275SEric Cheng 	for (i = 0; i < NXGE_MAX_TDCS; i++)
1468275SEric Cheng 		nxge->tdc_is_shared[i] = B_FALSE;
1478275SEric Cheng 
1488275SEric Cheng 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1498275SEric Cheng 		nxge->tx_hio_groups[i].ghandle = NULL;
1508275SEric Cheng 		nxge->tx_hio_groups[i].nxgep = nxge;
1518275SEric Cheng 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1528275SEric Cheng 		nxge->tx_hio_groups[i].gindex = 0;
1538275SEric Cheng 		nxge->tx_hio_groups[i].sindex = 0;
1548275SEric Cheng 	}
1558275SEric Cheng 
1568275SEric Cheng 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1578275SEric Cheng 		nxge->rx_hio_groups[i].ghandle = NULL;
1588275SEric Cheng 		nxge->rx_hio_groups[i].nxgep = nxge;
1598275SEric Cheng 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1608275SEric Cheng 		nxge->rx_hio_groups[i].gindex = 0;
1618275SEric Cheng 		nxge->rx_hio_groups[i].sindex = 0;
1628275SEric Cheng 		nxge->rx_hio_groups[i].started = B_FALSE;
1638597SMichael.Speer@Sun.COM 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1648275SEric Cheng 		nxge->rx_hio_groups[i].rdctbl = -1;
1658275SEric Cheng 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
1668275SEric Cheng 	}
1678275SEric Cheng 
1686495Sspeer 	nhd->hio.ldoms = B_FALSE;
1696495Sspeer 
1706495Sspeer 	return (NXGE_OK);
1716495Sspeer }
1726495Sspeer 
1736495Sspeer #endif
1746495Sspeer 
1756495Sspeer void
1767587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge)
1776495Sspeer {
1786495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1796495Sspeer 
1806495Sspeer 	ASSERT(nxge->nxge_hw_p->ndevs == 0);
1816495Sspeer 
1827587SMichael.Speer@Sun.COM 	if (nhd != NULL) {
1837587SMichael.Speer@Sun.COM 		MUTEX_DESTROY(&nhd->lock);
1847587SMichael.Speer@Sun.COM 		KMEM_FREE(nhd, sizeof (*nhd));
1857587SMichael.Speer@Sun.COM 		nxge->nxge_hw_p->hio = 0;
1867587SMichael.Speer@Sun.COM 	}
1876495Sspeer }
1886495Sspeer 
1896495Sspeer /*
1906495Sspeer  * nxge_dci_map
1916495Sspeer  *
1926495Sspeer  *	Map a DMA channel index to a channel number.
1936495Sspeer  *
1946495Sspeer  * Arguments:
1956495Sspeer  * 	instance	The instance number of the driver.
1966495Sspeer  * 	type		The type of channel this is: Tx or Rx.
1976495Sspeer  * 	index		The index to convert to a channel number
1986495Sspeer  *
1996495Sspeer  * Notes:
2006495Sspeer  *	This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
2016495Sspeer  *
2026495Sspeer  * Context:
2036495Sspeer  *	Any domain
2046495Sspeer  */
2056495Sspeer int
2066495Sspeer nxge_dci_map(
2076495Sspeer 	nxge_t *nxge,
2086495Sspeer 	vpc_type_t type,
2096495Sspeer 	int index)
2106495Sspeer {
2116495Sspeer 	nxge_grp_set_t *set;
2126495Sspeer 	int dc;
2136495Sspeer 
2146495Sspeer 	switch (type) {
2156495Sspeer 	case VP_BOUND_TX:
2166495Sspeer 		set = &nxge->tx_set;
2176495Sspeer 		break;
2186495Sspeer 	case VP_BOUND_RX:
2196495Sspeer 		set = &nxge->rx_set;
2206495Sspeer 		break;
2216495Sspeer 	}
2226495Sspeer 
2236495Sspeer 	for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
2246495Sspeer 		if ((1 << dc) & set->owned.map) {
2256495Sspeer 			if (index == 0)
2266495Sspeer 				return (dc);
2276495Sspeer 			else
2286495Sspeer 				index--;
2296495Sspeer 		}
2306495Sspeer 	}
2316495Sspeer 
2326495Sspeer 	return (-1);
2336495Sspeer }
2346495Sspeer 
2356495Sspeer /*
2366495Sspeer  * ---------------------------------------------------------------------
2376495Sspeer  * These are the general-purpose DMA channel group functions.  That is,
2386495Sspeer  * these functions are used to manage groups of TDCs or RDCs in an HIO
2396495Sspeer  * environment.
2406495Sspeer  *
2416495Sspeer  * But is also expected that in the future they will be able to manage
2426495Sspeer  * Crossbow groups.
2436495Sspeer  * ---------------------------------------------------------------------
2446495Sspeer  */
2456495Sspeer 
2466495Sspeer /*
2477766SMichael.Speer@Sun.COM  * nxge_grp_cleanup(p_nxge_t nxge)
2487766SMichael.Speer@Sun.COM  *
2497766SMichael.Speer@Sun.COM  *	Remove all outstanding groups.
2507766SMichael.Speer@Sun.COM  *
2517766SMichael.Speer@Sun.COM  * Arguments:
2527766SMichael.Speer@Sun.COM  *	nxge
2537766SMichael.Speer@Sun.COM  */
2547766SMichael.Speer@Sun.COM void
2557766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge)
2567766SMichael.Speer@Sun.COM {
2577766SMichael.Speer@Sun.COM 	nxge_grp_set_t *set;
2587766SMichael.Speer@Sun.COM 	int i;
2597766SMichael.Speer@Sun.COM 
2607766SMichael.Speer@Sun.COM 	MUTEX_ENTER(&nxge->group_lock);
2617766SMichael.Speer@Sun.COM 
2627766SMichael.Speer@Sun.COM 	/*
2637766SMichael.Speer@Sun.COM 	 * Find RX groups that need to be cleaned up.
2647766SMichael.Speer@Sun.COM 	 */
2657766SMichael.Speer@Sun.COM 	set = &nxge->rx_set;
2667766SMichael.Speer@Sun.COM 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
2677766SMichael.Speer@Sun.COM 		if (set->group[i] != NULL) {
2687766SMichael.Speer@Sun.COM 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
2697766SMichael.Speer@Sun.COM 			set->group[i] = NULL;
2707766SMichael.Speer@Sun.COM 		}
2717766SMichael.Speer@Sun.COM 	}
2727766SMichael.Speer@Sun.COM 
2737766SMichael.Speer@Sun.COM 	/*
2747766SMichael.Speer@Sun.COM 	 * Find TX groups that need to be cleaned up.
2757766SMichael.Speer@Sun.COM 	 */
2767766SMichael.Speer@Sun.COM 	set = &nxge->tx_set;
2777766SMichael.Speer@Sun.COM 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
2787766SMichael.Speer@Sun.COM 		if (set->group[i] != NULL) {
2797766SMichael.Speer@Sun.COM 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
2807766SMichael.Speer@Sun.COM 			set->group[i] = NULL;
2817766SMichael.Speer@Sun.COM 		}
2827766SMichael.Speer@Sun.COM 	}
2837766SMichael.Speer@Sun.COM 	MUTEX_EXIT(&nxge->group_lock);
2847766SMichael.Speer@Sun.COM }
2857766SMichael.Speer@Sun.COM 
2867766SMichael.Speer@Sun.COM 
2877766SMichael.Speer@Sun.COM /*
2886495Sspeer  * nxge_grp_add
2896495Sspeer  *
2906495Sspeer  *	Add a group to an instance of NXGE.
2916495Sspeer  *
2926495Sspeer  * Arguments:
2936495Sspeer  * 	nxge
2946495Sspeer  * 	type	Tx or Rx
2956495Sspeer  *
2966495Sspeer  * Notes:
2976495Sspeer  *
2986495Sspeer  * Context:
2996495Sspeer  *	Any domain
3006495Sspeer  */
3017755SMisaki.Kataoka@Sun.COM nxge_grp_t *
3026495Sspeer nxge_grp_add(
3036495Sspeer 	nxge_t *nxge,
3046495Sspeer 	nxge_grp_type_t type)
3056495Sspeer {
3066495Sspeer 	nxge_grp_set_t *set;
3076495Sspeer 	nxge_grp_t *group;
3086495Sspeer 	int i;
3096495Sspeer 
3106495Sspeer 	group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
3116495Sspeer 	group->nxge = nxge;
3126495Sspeer 
3136495Sspeer 	MUTEX_ENTER(&nxge->group_lock);
3146495Sspeer 	switch (type) {
3156495Sspeer 	case NXGE_TRANSMIT_GROUP:
3166495Sspeer 	case EXT_TRANSMIT_GROUP:
3176495Sspeer 		set = &nxge->tx_set;
3186495Sspeer 		break;
3196495Sspeer 	default:
3206495Sspeer 		set = &nxge->rx_set;
3216495Sspeer 		break;
3226495Sspeer 	}
3236495Sspeer 
3246495Sspeer 	group->type = type;
3256495Sspeer 	group->active = B_TRUE;
3266495Sspeer 	group->sequence = set->sequence++;
3276495Sspeer 
3286495Sspeer 	/* Find an empty slot for this logical group. */
3296495Sspeer 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
3306495Sspeer 		if (set->group[i] == 0) {
3316495Sspeer 			group->index = i;
3326495Sspeer 			set->group[i] = group;
3336495Sspeer 			NXGE_DC_SET(set->lg.map, i);
3346495Sspeer 			set->lg.count++;
3356495Sspeer 			break;
3366495Sspeer 		}
3376495Sspeer 	}
3386495Sspeer 	MUTEX_EXIT(&nxge->group_lock);
3396495Sspeer 
3406495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
3416495Sspeer 	    "nxge_grp_add: %cgroup = %d.%d",
3426495Sspeer 	    type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
3436495Sspeer 	    nxge->mac.portnum, group->sequence));
3446495Sspeer 
3457755SMisaki.Kataoka@Sun.COM 	return (group);
3466495Sspeer }
3476495Sspeer 
3486495Sspeer void
3496495Sspeer nxge_grp_remove(
3506495Sspeer 	nxge_t *nxge,
3517755SMisaki.Kataoka@Sun.COM 	nxge_grp_t *group)	/* The group to remove. */
3526495Sspeer {
3536495Sspeer 	nxge_grp_set_t *set;
3546495Sspeer 	vpc_type_t type;
3556495Sspeer 
356*11304SJanie.Lu@Sun.COM 	if (group == NULL)
357*11304SJanie.Lu@Sun.COM 		return;
358*11304SJanie.Lu@Sun.COM 
3596495Sspeer 	MUTEX_ENTER(&nxge->group_lock);
3606495Sspeer 	switch (group->type) {
3616495Sspeer 	case NXGE_TRANSMIT_GROUP:
3626495Sspeer 	case EXT_TRANSMIT_GROUP:
3636495Sspeer 		set = &nxge->tx_set;
3646495Sspeer 		break;
3656495Sspeer 	default:
3666495Sspeer 		set = &nxge->rx_set;
3676495Sspeer 		break;
3686495Sspeer 	}
3696495Sspeer 
3706495Sspeer 	if (set->group[group->index] != group) {
3716495Sspeer 		MUTEX_EXIT(&nxge->group_lock);
3726495Sspeer 		return;
3736495Sspeer 	}
3746495Sspeer 
3756495Sspeer 	set->group[group->index] = 0;
3766495Sspeer 	NXGE_DC_RESET(set->lg.map, group->index);
3776495Sspeer 	set->lg.count--;
3786495Sspeer 
3796495Sspeer 	/* While inside the mutex, deactivate <group>. */
3806495Sspeer 	group->active = B_FALSE;
3816495Sspeer 
3826495Sspeer 	MUTEX_EXIT(&nxge->group_lock);
3836495Sspeer 
3846495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
3856495Sspeer 	    "nxge_grp_remove(%c.%d.%d) called",
3866495Sspeer 	    group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
3876495Sspeer 	    nxge->mac.portnum, group->sequence));
3886495Sspeer 
3896495Sspeer 	/* Now, remove any DCs which are still active. */
3906495Sspeer 	switch (group->type) {
3916495Sspeer 	default:
3926495Sspeer 		type = VP_BOUND_TX;
3936495Sspeer 		break;
3946495Sspeer 	case NXGE_RECEIVE_GROUP:
3956495Sspeer 	case EXT_RECEIVE_GROUP:
3966495Sspeer 		type = VP_BOUND_RX;
3976495Sspeer 	}
3986495Sspeer 
3996495Sspeer 	while (group->dc) {
4006495Sspeer 		nxge_grp_dc_remove(nxge, type, group->dc->channel);
4016495Sspeer 	}
4026495Sspeer 
4036495Sspeer 	KMEM_FREE(group, sizeof (*group));
4046495Sspeer }
4056495Sspeer 
4066495Sspeer /*
4077950SMichael.Speer@Sun.COM  * nxge_grp_dc_add
4086495Sspeer  *
4096495Sspeer  *	Add a DMA channel to a VR/Group.
4106495Sspeer  *
4116495Sspeer  * Arguments:
4126495Sspeer  * 	nxge
4136495Sspeer  * 	channel	The channel to add.
4146495Sspeer  * Notes:
4156495Sspeer  *
4166495Sspeer  * Context:
4176495Sspeer  *	Any domain
4186495Sspeer  */
4196495Sspeer /* ARGSUSED */
4206495Sspeer int
4216495Sspeer nxge_grp_dc_add(
4226495Sspeer 	nxge_t *nxge,
4237755SMisaki.Kataoka@Sun.COM 	nxge_grp_t *group,	/* The group to add <channel> to. */
4246495Sspeer 	vpc_type_t type,	/* Rx or Tx */
4256495Sspeer 	int channel)		/* A physical/logical channel number */
4266495Sspeer {
4276495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
4286495Sspeer 	nxge_hio_dc_t *dc;
4296495Sspeer 	nxge_grp_set_t *set;
4306602Sspeer 	nxge_status_t status = NXGE_OK;
4316495Sspeer 
4326495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
4336495Sspeer 
4348275SEric Cheng 	if (group == 0)
4356495Sspeer 		return (0);
4366495Sspeer 
4376495Sspeer 	switch (type) {
4387950SMichael.Speer@Sun.COM 	case VP_BOUND_TX:
4396495Sspeer 		set = &nxge->tx_set;
4406495Sspeer 		if (channel > NXGE_MAX_TDCS) {
4416495Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
4426495Sspeer 			    "nxge_grp_dc_add: TDC = %d", channel));
4436495Sspeer 			return (NXGE_ERROR);
4446495Sspeer 		}
4456495Sspeer 		break;
4466495Sspeer 	case VP_BOUND_RX:
4476495Sspeer 		set = &nxge->rx_set;
4486495Sspeer 		if (channel > NXGE_MAX_RDCS) {
4496495Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
4506495Sspeer 			    "nxge_grp_dc_add: RDC = %d", channel));
4516495Sspeer 			return (NXGE_ERROR);
4526495Sspeer 		}
4536495Sspeer 		break;
4547950SMichael.Speer@Sun.COM 
4557950SMichael.Speer@Sun.COM 	default:
4567950SMichael.Speer@Sun.COM 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
4577950SMichael.Speer@Sun.COM 		    "nxge_grp_dc_add: unknown type channel(%d)", channel));
4586495Sspeer 	}
4596495Sspeer 
4606495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
4616495Sspeer 	    "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
4626495Sspeer 	    type == VP_BOUND_TX ? 't' : 'r',
4636495Sspeer 	    nxge->mac.portnum, group->sequence, group->count, channel));
4646495Sspeer 
4656495Sspeer 	MUTEX_ENTER(&nxge->group_lock);
4666495Sspeer 	if (group->active != B_TRUE) {
4676495Sspeer 		/* We may be in the process of removing this group. */
4686495Sspeer 		MUTEX_EXIT(&nxge->group_lock);
4696495Sspeer 		return (NXGE_ERROR);
4706495Sspeer 	}
4716495Sspeer 	MUTEX_EXIT(&nxge->group_lock);
4726495Sspeer 
4736495Sspeer 	if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
4746495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
4756495Sspeer 		    "nxge_grp_dc_add(%d): DC FIND failed", channel));
4766495Sspeer 		return (NXGE_ERROR);
4776495Sspeer 	}
4786495Sspeer 
4796495Sspeer 	MUTEX_ENTER(&nhd->lock);
4806495Sspeer 
4816495Sspeer 	if (dc->group) {
4826495Sspeer 		MUTEX_EXIT(&nhd->lock);
4836495Sspeer 		/* This channel is already in use! */
4846495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
4856495Sspeer 		    "nxge_grp_dc_add(%d): channel already in group", channel));
4866495Sspeer 		return (NXGE_ERROR);
4876495Sspeer 	}
4886495Sspeer 
4896495Sspeer 	dc->next = 0;
4906495Sspeer 	dc->page = channel;
4916495Sspeer 	dc->channel = (nxge_channel_t)channel;
4926495Sspeer 
4936495Sspeer 	dc->type = type;
4946495Sspeer 	if (type == VP_BOUND_RX) {
4956495Sspeer 		dc->init = nxge_init_rxdma_channel;
4966495Sspeer 		dc->uninit = nxge_uninit_rxdma_channel;
4976495Sspeer 	} else {
4986495Sspeer 		dc->init = nxge_init_txdma_channel;
4996495Sspeer 		dc->uninit = nxge_uninit_txdma_channel;
5006495Sspeer 	}
5016495Sspeer 
5027755SMisaki.Kataoka@Sun.COM 	dc->group = group;
5036495Sspeer 
5046495Sspeer 	if (isLDOMguest(nxge))
5056495Sspeer 		(void) nxge_hio_ldsv_add(nxge, dc);
5066495Sspeer 
5076495Sspeer 	NXGE_DC_SET(set->owned.map, channel);
5086495Sspeer 	set->owned.count++;
5096495Sspeer 
5106495Sspeer 	MUTEX_EXIT(&nhd->lock);
5116495Sspeer 
5126602Sspeer 	if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
5136602Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5146602Sspeer 		    "nxge_grp_dc_add(%d): channel init failed", channel));
5157950SMichael.Speer@Sun.COM 		MUTEX_ENTER(&nhd->lock);
5167950SMichael.Speer@Sun.COM 		(void) memset(dc, 0, sizeof (*dc));
5177950SMichael.Speer@Sun.COM 		NXGE_DC_RESET(set->owned.map, channel);
5187950SMichael.Speer@Sun.COM 		set->owned.count--;
5197950SMichael.Speer@Sun.COM 		MUTEX_EXIT(&nhd->lock);
5206603Sspeer 		return (NXGE_ERROR);
5216602Sspeer 	}
5226602Sspeer 
5236495Sspeer 	nxge_grp_dc_append(nxge, group, dc);
5246495Sspeer 
5257812SMichael.Speer@Sun.COM 	if (type == VP_BOUND_TX) {
5267812SMichael.Speer@Sun.COM 		MUTEX_ENTER(&nhd->lock);
5277812SMichael.Speer@Sun.COM 		nxge->tdc_is_shared[channel] = B_FALSE;
5287812SMichael.Speer@Sun.COM 		MUTEX_EXIT(&nhd->lock);
5297812SMichael.Speer@Sun.COM 	}
5307812SMichael.Speer@Sun.COM 
5316495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
5326495Sspeer 
5336602Sspeer 	return ((int)status);
5346495Sspeer }
5356495Sspeer 
5366495Sspeer void
5376495Sspeer nxge_grp_dc_remove(
5386495Sspeer 	nxge_t *nxge,
5396495Sspeer 	vpc_type_t type,
5406495Sspeer 	int channel)
5416495Sspeer {
5426495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
5436495Sspeer 	nxge_hio_dc_t *dc;
5446495Sspeer 	nxge_grp_set_t *set;
5456495Sspeer 	nxge_grp_t *group;
5466495Sspeer 
5476495Sspeer 	dc_uninit_t uninit;
5486495Sspeer 
5496495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
5506495Sspeer 
5517950SMichael.Speer@Sun.COM 	if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
5527950SMichael.Speer@Sun.COM 		goto nxge_grp_dc_remove_exit;
5537950SMichael.Speer@Sun.COM 
5547950SMichael.Speer@Sun.COM 	if ((dc->group == NULL) && (dc->next == 0) &&
5557950SMichael.Speer@Sun.COM 	    (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
5567950SMichael.Speer@Sun.COM 		goto nxge_grp_dc_remove_exit;
5576495Sspeer 	}
5587950SMichael.Speer@Sun.COM 
5596495Sspeer 	group = (nxge_grp_t *)dc->group;
5606495Sspeer 
5616495Sspeer 	if (isLDOMguest(nxge)) {
5626495Sspeer 		(void) nxge_hio_intr_remove(nxge, type, channel);
5636495Sspeer 	}
5646495Sspeer 
5656495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
5666495Sspeer 	    "DC remove: group = %d.%d.%d, %cdc %d",
5676495Sspeer 	    nxge->mac.portnum, group->sequence, group->count,
5686495Sspeer 	    type == VP_BOUND_TX ? 't' : 'r', dc->channel));
5696495Sspeer 
5706495Sspeer 	MUTEX_ENTER(&nhd->lock);
5716495Sspeer 
5726602Sspeer 	set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
5736602Sspeer 
5746495Sspeer 	/* Remove the DC from its group. */
5756495Sspeer 	if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
5766495Sspeer 		MUTEX_EXIT(&nhd->lock);
5776495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5787950SMichael.Speer@Sun.COM 		    "nxge_grp_dc_remove(%d) failed", channel));
5797950SMichael.Speer@Sun.COM 		goto nxge_grp_dc_remove_exit;
5806495Sspeer 	}
5816495Sspeer 
5826495Sspeer 	uninit = dc->uninit;
5836495Sspeer 	channel = dc->channel;
5846495Sspeer 
5856495Sspeer 	NXGE_DC_RESET(set->owned.map, channel);
5866495Sspeer 	set->owned.count--;
5876495Sspeer 
5886495Sspeer 	(void) memset(dc, 0, sizeof (*dc));
5896495Sspeer 
5906495Sspeer 	MUTEX_EXIT(&nhd->lock);
5916495Sspeer 
5926495Sspeer 	(*uninit)(nxge, channel);
5936495Sspeer 
5947950SMichael.Speer@Sun.COM nxge_grp_dc_remove_exit:
5956495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
5966495Sspeer }
5976495Sspeer 
5986495Sspeer nxge_hio_dc_t *
5996495Sspeer nxge_grp_dc_find(
6006495Sspeer 	nxge_t *nxge,
6016495Sspeer 	vpc_type_t type,	/* Rx or Tx */
6026495Sspeer 	int channel)
6036495Sspeer {
6046495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
6056495Sspeer 	nxge_hio_dc_t *current;
6066495Sspeer 
6076495Sspeer 	current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
6086495Sspeer 
6096495Sspeer 	if (!isLDOMguest(nxge)) {
6106495Sspeer 		return (&current[channel]);
6116495Sspeer 	} else {
6126495Sspeer 		/* We're in a guest domain. */
6136495Sspeer 		int i, limit = (type == VP_BOUND_TX) ?
6146495Sspeer 		    NXGE_MAX_TDCS : NXGE_MAX_RDCS;
6156495Sspeer 
6166495Sspeer 		MUTEX_ENTER(&nhd->lock);
6176495Sspeer 		for (i = 0; i < limit; i++, current++) {
6186495Sspeer 			if (current->channel == channel) {
6196495Sspeer 				if (current->vr && current->vr->nxge ==
6206495Sspeer 				    (uintptr_t)nxge) {
6216495Sspeer 					MUTEX_EXIT(&nhd->lock);
6226495Sspeer 					return (current);
6236495Sspeer 				}
6246495Sspeer 			}
6256495Sspeer 		}
6266495Sspeer 		MUTEX_EXIT(&nhd->lock);
6276495Sspeer 	}
6286495Sspeer 
6296495Sspeer 	return (0);
6306495Sspeer }
6316495Sspeer 
6326495Sspeer /*
6336495Sspeer  * nxge_grp_dc_append
6346495Sspeer  *
6356495Sspeer  *	Append a DMA channel to a group.
6366495Sspeer  *
6376495Sspeer  * Arguments:
6386495Sspeer  * 	nxge
6396495Sspeer  * 	group	The group to append to
6406495Sspeer  * 	dc	The DMA channel to append
6416495Sspeer  *
6426495Sspeer  * Notes:
6436495Sspeer  *
6446495Sspeer  * Context:
6456495Sspeer  *	Any domain
6466495Sspeer  */
6476495Sspeer static
6486495Sspeer void
6496495Sspeer nxge_grp_dc_append(
6506495Sspeer 	nxge_t *nxge,
6516495Sspeer 	nxge_grp_t *group,
6526495Sspeer 	nxge_hio_dc_t *dc)
6536495Sspeer {
6546495Sspeer 	MUTEX_ENTER(&nxge->group_lock);
6556495Sspeer 
6566495Sspeer 	if (group->dc == 0) {
6576495Sspeer 		group->dc = dc;
6586495Sspeer 	} else {
6596495Sspeer 		nxge_hio_dc_t *current = group->dc;
6606495Sspeer 		do {
6616495Sspeer 			if (current->next == 0) {
6626495Sspeer 				current->next = dc;
6636495Sspeer 				break;
6646495Sspeer 			}
6656495Sspeer 			current = current->next;
6666495Sspeer 		} while (current);
6676495Sspeer 	}
6686495Sspeer 
6696495Sspeer 	NXGE_DC_SET(group->map, dc->channel);
6706495Sspeer 
6716495Sspeer 	nxge_grp_dc_map(group);
6726602Sspeer 	group->count++;
6736495Sspeer 
6746495Sspeer 	MUTEX_EXIT(&nxge->group_lock);
6756495Sspeer }
6766495Sspeer 
6776495Sspeer /*
6786495Sspeer  * nxge_grp_dc_unlink
6796495Sspeer  *
6806495Sspeer  *	Unlink a DMA channel fromits linked list (group).
6816495Sspeer  *
6826495Sspeer  * Arguments:
6836495Sspeer  * 	nxge
6846495Sspeer  * 	group	The group (linked list) to unlink from
6856495Sspeer  * 	dc	The DMA channel to append
6866495Sspeer  *
6876495Sspeer  * Notes:
6886495Sspeer  *
6896495Sspeer  * Context:
6906495Sspeer  *	Any domain
6916495Sspeer  */
6926495Sspeer nxge_hio_dc_t *
6938275SEric Cheng nxge_grp_dc_unlink(
6948275SEric Cheng 	nxge_t *nxge,
6958275SEric Cheng 	nxge_grp_t *group,
6968275SEric Cheng 	int channel)
6976495Sspeer {
6986495Sspeer 	nxge_hio_dc_t *current, *previous;
6996495Sspeer 
7006495Sspeer 	MUTEX_ENTER(&nxge->group_lock);
7016495Sspeer 
7027812SMichael.Speer@Sun.COM 	if (group == NULL) {
7037812SMichael.Speer@Sun.COM 		MUTEX_EXIT(&nxge->group_lock);
7047812SMichael.Speer@Sun.COM 		return (0);
7057812SMichael.Speer@Sun.COM 	}
7067812SMichael.Speer@Sun.COM 
7076495Sspeer 	if ((current = group->dc) == 0) {
7086495Sspeer 		MUTEX_EXIT(&nxge->group_lock);
7096495Sspeer 		return (0);
7106495Sspeer 	}
7116495Sspeer 
7126495Sspeer 	previous = 0;
7136495Sspeer 	do {
7146495Sspeer 		if (current->channel == channel) {
7156495Sspeer 			if (previous)
7166495Sspeer 				previous->next = current->next;
7176495Sspeer 			else
7186495Sspeer 				group->dc = current->next;
7196495Sspeer 			break;
7206495Sspeer 		}
7216495Sspeer 		previous = current;
7226495Sspeer 		current = current->next;
7236495Sspeer 	} while (current);
7246495Sspeer 
7256495Sspeer 	if (current == 0) {
7266495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
7276495Sspeer 		    "DC unlink: DC %d not found", channel));
7286495Sspeer 	} else {
7296495Sspeer 		current->next = 0;
7306495Sspeer 		current->group = 0;
7316495Sspeer 
7328275SEric Cheng 		NXGE_DC_RESET(group->map, channel);
7336495Sspeer 		group->count--;
7346495Sspeer 	}
7356495Sspeer 
7366495Sspeer 	nxge_grp_dc_map(group);
7376495Sspeer 
7386495Sspeer 	MUTEX_EXIT(&nxge->group_lock);
7396495Sspeer 
7406495Sspeer 	return (current);
7416495Sspeer }
7426495Sspeer 
7436495Sspeer /*
7446495Sspeer  * nxge_grp_dc_map
7456495Sspeer  *
7466495Sspeer  *	Map a linked list to an array of channel numbers.
7476495Sspeer  *
7486495Sspeer  * Arguments:
7496495Sspeer  * 	nxge
7506495Sspeer  * 	group	The group to remap.
7516495Sspeer  *
7526495Sspeer  * Notes:
7536495Sspeer  *	It is expected that the caller will hold the correct mutex.
7546495Sspeer  *
7556495Sspeer  * Context:
7566495Sspeer  *	Service domain
7576495Sspeer  */
7586495Sspeer void
7596495Sspeer nxge_grp_dc_map(
7606495Sspeer 	nxge_grp_t *group)
7616495Sspeer {
7626495Sspeer 	nxge_channel_t *legend;
7636495Sspeer 	nxge_hio_dc_t *dc;
7646495Sspeer 
7656495Sspeer 	(void) memset(group->legend, 0, sizeof (group->legend));
7666495Sspeer 
7676495Sspeer 	legend = group->legend;
7686495Sspeer 	dc = group->dc;
7696495Sspeer 	while (dc) {
7706495Sspeer 		*legend = dc->channel;
7716495Sspeer 		legend++;
7726495Sspeer 		dc = dc->next;
7736495Sspeer 	}
7746495Sspeer }
7756495Sspeer 
7766495Sspeer /*
7776495Sspeer  * ---------------------------------------------------------------------
7786495Sspeer  * These are HIO debugging functions.
7796495Sspeer  * ---------------------------------------------------------------------
7806495Sspeer  */
7816495Sspeer 
7826495Sspeer /*
7836495Sspeer  * nxge_delay
7846495Sspeer  *
7856495Sspeer  *	Delay <seconds> number of seconds.
7866495Sspeer  *
7876495Sspeer  * Arguments:
7886495Sspeer  * 	nxge
7896495Sspeer  * 	group	The group to append to
7906495Sspeer  * 	dc	The DMA channel to append
7916495Sspeer  *
7926495Sspeer  * Notes:
7936495Sspeer  *	This is a developer-only function.
7946495Sspeer  *
7956495Sspeer  * Context:
7966495Sspeer  *	Any domain
7976495Sspeer  */
7986495Sspeer void
7996495Sspeer nxge_delay(
8006495Sspeer 	int seconds)
8016495Sspeer {
8026495Sspeer 	delay(drv_usectohz(seconds * 1000000));
8036495Sspeer }
8046495Sspeer 
8056495Sspeer static dmc_reg_name_t rx_names[] = {
8066495Sspeer 	{ "RXDMA_CFIG1",	0 },
8076495Sspeer 	{ "RXDMA_CFIG2",	8 },
8086495Sspeer 	{ "RBR_CFIG_A",		0x10 },
8096495Sspeer 	{ "RBR_CFIG_B",		0x18 },
8106495Sspeer 	{ "RBR_KICK",		0x20 },
8116495Sspeer 	{ "RBR_STAT",		0x28 },
8126495Sspeer 	{ "RBR_HDH",		0x30 },
8136495Sspeer 	{ "RBR_HDL",		0x38 },
8146495Sspeer 	{ "RCRCFIG_A",		0x40 },
8156495Sspeer 	{ "RCRCFIG_B",		0x48 },
8166495Sspeer 	{ "RCRSTAT_A",		0x50 },
8176495Sspeer 	{ "RCRSTAT_B",		0x58 },
8186495Sspeer 	{ "RCRSTAT_C",		0x60 },
8196495Sspeer 	{ "RX_DMA_ENT_MSK",	0x68 },
8206495Sspeer 	{ "RX_DMA_CTL_STAT",	0x70 },
8216495Sspeer 	{ "RCR_FLSH",		0x78 },
8226495Sspeer 	{ "RXMISC",		0x90 },
8236495Sspeer 	{ "RX_DMA_CTL_STAT_DBG", 0x98 },
8246495Sspeer 	{ 0, -1 }
8256495Sspeer };
8266495Sspeer 
8276495Sspeer static dmc_reg_name_t tx_names[] = {
8286495Sspeer 	{ "Tx_RNG_CFIG",	0 },
8296495Sspeer 	{ "Tx_RNG_HDL",		0x10 },
8306495Sspeer 	{ "Tx_RNG_KICK",	0x18 },
8316495Sspeer 	{ "Tx_ENT_MASK",	0x20 },
8326495Sspeer 	{ "Tx_CS",		0x28 },
8336495Sspeer 	{ "TxDMA_MBH",		0x30 },
8346495Sspeer 	{ "TxDMA_MBL",		0x38 },
8356495Sspeer 	{ "TxDMA_PRE_ST",	0x40 },
8366495Sspeer 	{ "Tx_RNG_ERR_LOGH",	0x48 },
8376495Sspeer 	{ "Tx_RNG_ERR_LOGL",	0x50 },
8386495Sspeer 	{ "TDMC_INTR_DBG",	0x60 },
8396495Sspeer 	{ "Tx_CS_DBG",		0x68 },
8406495Sspeer 	{ 0, -1 }
8416495Sspeer };
8426495Sspeer 
8436495Sspeer /*
8446495Sspeer  * nxge_xx2str
8456495Sspeer  *
8466495Sspeer  *	Translate a register address into a string.
8476495Sspeer  *
8486495Sspeer  * Arguments:
8496495Sspeer  * 	offset	The address of the register to translate.
8506495Sspeer  *
8516495Sspeer  * Notes:
8526495Sspeer  *	These are developer-only function.
8536495Sspeer  *
8546495Sspeer  * Context:
8556495Sspeer  *	Any domain
8566495Sspeer  */
8576495Sspeer const char *
8586495Sspeer nxge_rx2str(
8596495Sspeer 	int offset)
8606495Sspeer {
8616495Sspeer 	dmc_reg_name_t *reg = &rx_names[0];
8626495Sspeer 
8636495Sspeer 	offset &= DMA_CSR_MASK;
8646495Sspeer 
8656495Sspeer 	while (reg->name) {
8666495Sspeer 		if (offset == reg->offset)
8676495Sspeer 			return (reg->name);
8686495Sspeer 		reg++;
8696495Sspeer 	}
8706495Sspeer 
8716495Sspeer 	return (0);
8726495Sspeer }
8736495Sspeer 
8746495Sspeer const char *
8756495Sspeer nxge_tx2str(
8766495Sspeer 	int offset)
8776495Sspeer {
8786495Sspeer 	dmc_reg_name_t *reg = &tx_names[0];
8796495Sspeer 
8806495Sspeer 	offset &= DMA_CSR_MASK;
8816495Sspeer 
8826495Sspeer 	while (reg->name) {
8836495Sspeer 		if (offset == reg->offset)
8846495Sspeer 			return (reg->name);
8856495Sspeer 		reg++;
8866495Sspeer 	}
8876495Sspeer 
8886495Sspeer 	return (0);
8896495Sspeer }
8906495Sspeer 
8916495Sspeer /*
8926495Sspeer  * nxge_ddi_perror
8936495Sspeer  *
8946495Sspeer  *	Map a DDI error number to a string.
8956495Sspeer  *
8966495Sspeer  * Arguments:
8976495Sspeer  * 	ddi_error	The DDI error number to map.
8986495Sspeer  *
8996495Sspeer  * Notes:
9006495Sspeer  *
9016495Sspeer  * Context:
9026495Sspeer  *	Any domain
9036495Sspeer  */
9046495Sspeer const char *
9056495Sspeer nxge_ddi_perror(
9066495Sspeer 	int ddi_error)
9076495Sspeer {
9086495Sspeer 	switch (ddi_error) {
9096495Sspeer 	case DDI_SUCCESS:
9106495Sspeer 		return ("DDI_SUCCESS");
9116495Sspeer 	case DDI_FAILURE:
9126495Sspeer 		return ("DDI_FAILURE");
9136495Sspeer 	case DDI_NOT_WELL_FORMED:
9146495Sspeer 		return ("DDI_NOT_WELL_FORMED");
9156495Sspeer 	case DDI_EAGAIN:
9166495Sspeer 		return ("DDI_EAGAIN");
9176495Sspeer 	case DDI_EINVAL:
9186495Sspeer 		return ("DDI_EINVAL");
9196495Sspeer 	case DDI_ENOTSUP:
9206495Sspeer 		return ("DDI_ENOTSUP");
9216495Sspeer 	case DDI_EPENDING:
9226495Sspeer 		return ("DDI_EPENDING");
9236495Sspeer 	case DDI_ENOMEM:
9246495Sspeer 		return ("DDI_ENOMEM");
9256495Sspeer 	case DDI_EBUSY:
9266495Sspeer 		return ("DDI_EBUSY");
9276495Sspeer 	case DDI_ETRANSPORT:
9286495Sspeer 		return ("DDI_ETRANSPORT");
9296495Sspeer 	case DDI_ECONTEXT:
9306495Sspeer 		return ("DDI_ECONTEXT");
9316495Sspeer 	default:
9326495Sspeer 		return ("Unknown error");
9336495Sspeer 	}
9346495Sspeer }
9356495Sspeer 
9366495Sspeer /*
9376495Sspeer  * ---------------------------------------------------------------------
9386495Sspeer  * These are Sun4v HIO function definitions
9396495Sspeer  * ---------------------------------------------------------------------
9406495Sspeer  */
9416495Sspeer 
9426495Sspeer #if defined(sun4v)
9436495Sspeer 
9446495Sspeer /*
9456495Sspeer  * Local prototypes
9466495Sspeer  */
9477755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
9487755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *);
9496495Sspeer 
9508275SEric Cheng static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
9517755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
9526495Sspeer 
9538275SEric Cheng static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
9548275SEric Cheng static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
9558275SEric Cheng static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
9566495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
9576495Sspeer     mac_ring_type_t, int);
9586495Sspeer 
9596495Sspeer /*
9606495Sspeer  * nxge_hio_init
9616495Sspeer  *
9626495Sspeer  *	Initialize the HIO module of the NXGE driver.
9636495Sspeer  *
9646495Sspeer  * Arguments:
9656495Sspeer  * 	nxge
9666495Sspeer  *
9676495Sspeer  * Notes:
9686495Sspeer  *
9696495Sspeer  * Context:
9706495Sspeer  *	Any domain
9716495Sspeer  */
9726495Sspeer int
97310577SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge)
9746495Sspeer {
9756495Sspeer 	nxge_hio_data_t *nhd;
9766495Sspeer 	int i, region;
9776495Sspeer 
9786495Sspeer 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
9796495Sspeer 	if (nhd == 0) {
9806495Sspeer 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
9816495Sspeer 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
98210577SMichael.Speer@Sun.COM 		if (isLDOMguest(nxge))
98310577SMichael.Speer@Sun.COM 			nhd->type = NXGE_HIO_TYPE_GUEST;
98410577SMichael.Speer@Sun.COM 		else
98510577SMichael.Speer@Sun.COM 			nhd->type = NXGE_HIO_TYPE_SERVICE;
9866495Sspeer 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
9876495Sspeer 	}
9886495Sspeer 
9896713Sspeer 	if ((nxge->environs == SOLARIS_DOMAIN) &&
9906713Sspeer 	    (nxge->niu_type == N2_NIU)) {
9916495Sspeer 		if (nxge->niu_hsvc_available == B_TRUE) {
9926495Sspeer 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
993*11304SJanie.Lu@Sun.COM 			/*
994*11304SJanie.Lu@Sun.COM 			 * Versions supported now are:
995*11304SJanie.Lu@Sun.COM 			 *  - major number >= 1 (NIU_MAJOR_VER).
996*11304SJanie.Lu@Sun.COM 			 */
997*11304SJanie.Lu@Sun.COM 			if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) ||
998*11304SJanie.Lu@Sun.COM 			    (niu_hsvc->hsvc_major == 1 &&
999*11304SJanie.Lu@Sun.COM 			    niu_hsvc->hsvc_minor == 1)) {
10006495Sspeer 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
1001*11304SJanie.Lu@Sun.COM 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1002*11304SJanie.Lu@Sun.COM 				    "nxge_hio_init: hypervisor services "
1003*11304SJanie.Lu@Sun.COM 				    "version %d.%d",
1004*11304SJanie.Lu@Sun.COM 				    niu_hsvc->hsvc_major,
1005*11304SJanie.Lu@Sun.COM 				    niu_hsvc->hsvc_minor));
1006*11304SJanie.Lu@Sun.COM 			}
10076495Sspeer 		}
10086495Sspeer 	}
10096495Sspeer 
10108275SEric Cheng 	/*
10118275SEric Cheng 	 * Initialize share and ring group structures.
10128275SEric Cheng 	 */
10138275SEric Cheng 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
10148275SEric Cheng 		nxge->tx_hio_groups[i].ghandle = NULL;
10158275SEric Cheng 		nxge->tx_hio_groups[i].nxgep = nxge;
10168275SEric Cheng 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
10178275SEric Cheng 		nxge->tx_hio_groups[i].gindex = 0;
10188275SEric Cheng 		nxge->tx_hio_groups[i].sindex = 0;
10198275SEric Cheng 	}
10208275SEric Cheng 
10218275SEric Cheng 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
10228275SEric Cheng 		nxge->rx_hio_groups[i].ghandle = NULL;
10238275SEric Cheng 		nxge->rx_hio_groups[i].nxgep = nxge;
10248275SEric Cheng 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
10258275SEric Cheng 		nxge->rx_hio_groups[i].gindex = 0;
10268275SEric Cheng 		nxge->rx_hio_groups[i].sindex = 0;
10278275SEric Cheng 		nxge->rx_hio_groups[i].started = B_FALSE;
10288597SMichael.Speer@Sun.COM 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
10298275SEric Cheng 		nxge->rx_hio_groups[i].rdctbl = -1;
10308275SEric Cheng 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
10318275SEric Cheng 	}
10328275SEric Cheng 
10336495Sspeer 	if (!isLDOMs(nxge)) {
10346495Sspeer 		nhd->hio.ldoms = B_FALSE;
10356495Sspeer 		return (NXGE_OK);
10366495Sspeer 	}
10376495Sspeer 
10386495Sspeer 	nhd->hio.ldoms = B_TRUE;
10396495Sspeer 
10406495Sspeer 	/*
10416495Sspeer 	 * Fill in what we can.
10426495Sspeer 	 */
10436495Sspeer 	for (region = 0; region < NXGE_VR_SR_MAX; region++) {
10446495Sspeer 		nhd->vr[region].region = region;
10456495Sspeer 	}
10467755SMisaki.Kataoka@Sun.COM 	nhd->vrs = NXGE_VR_SR_MAX - 2;
10476495Sspeer 
10486495Sspeer 	/*
10498275SEric Cheng 	 * Initialize the share stuctures.
10506495Sspeer 	 */
10517812SMichael.Speer@Sun.COM 	for (i = 0; i < NXGE_MAX_TDCS; i++)
10527812SMichael.Speer@Sun.COM 		nxge->tdc_is_shared[i] = B_FALSE;
10537812SMichael.Speer@Sun.COM 
10546495Sspeer 	for (i = 0; i < NXGE_VR_SR_MAX; i++) {
10556495Sspeer 		nxge->shares[i].nxgep = nxge;
10566495Sspeer 		nxge->shares[i].index = 0;
10578275SEric Cheng 		nxge->shares[i].vrp = NULL;
10586495Sspeer 		nxge->shares[i].tmap = 0;
10596495Sspeer 		nxge->shares[i].rmap = 0;
10606495Sspeer 		nxge->shares[i].rxgroup = 0;
10616495Sspeer 		nxge->shares[i].active = B_FALSE;
10626495Sspeer 	}
10636495Sspeer 
10646495Sspeer 	/* Fill in the HV HIO function pointers. */
10656495Sspeer 	nxge_hio_hv_init(nxge);
10666495Sspeer 
10676495Sspeer 	if (isLDOMservice(nxge)) {
10686495Sspeer 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
10696495Sspeer 		    "Hybrid IO-capable service domain"));
10706495Sspeer 		return (NXGE_OK);
10716495Sspeer 	}
10726495Sspeer 
10736495Sspeer 	return (0);
10746495Sspeer }
10758275SEric Cheng #endif /* defined(sun4v) */
10768275SEric Cheng 
10778275SEric Cheng static int
10788275SEric Cheng nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
10798275SEric Cheng     const uint8_t *macaddr)
10808275SEric Cheng {
10818275SEric Cheng 	int rv;
10828275SEric Cheng 	nxge_rdc_grp_t *group;
10838275SEric Cheng 
10848275SEric Cheng 	mutex_enter(nxge->genlock);
10858275SEric Cheng 
10868275SEric Cheng 	/*
10878275SEric Cheng 	 * Initialize the NXGE RDC table data structure.
10888275SEric Cheng 	 */
10898275SEric Cheng 	group = &nxge->pt_config.rdc_grps[g->rdctbl];
10908275SEric Cheng 	if (!group->flag) {
10918275SEric Cheng 		group->port = NXGE_GET_PORT_NUM(nxge->function_num);
10928275SEric Cheng 		group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
10938275SEric Cheng 		group->flag = B_TRUE;	/* This group has been configured. */
10948275SEric Cheng 	}
10958275SEric Cheng 
10968275SEric Cheng 	mutex_exit(nxge->genlock);
10978275SEric Cheng 
10988275SEric Cheng 	/*
10998275SEric Cheng 	 * Add the MAC address.
11008275SEric Cheng 	 */
11018275SEric Cheng 	if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
11028275SEric Cheng 	    g->rdctbl, B_TRUE)) != 0) {
11038275SEric Cheng 		return (rv);
11048275SEric Cheng 	}
11058275SEric Cheng 
11068275SEric Cheng 	mutex_enter(nxge->genlock);
11078275SEric Cheng 	g->n_mac_addrs++;
11088275SEric Cheng 	mutex_exit(nxge->genlock);
11098275SEric Cheng 	return (0);
11108275SEric Cheng }
11116495Sspeer 
11126495Sspeer static int
11138597SMichael.Speer@Sun.COM nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
11148597SMichael.Speer@Sun.COM {
11158597SMichael.Speer@Sun.COM 	p_nxge_t		nxgep = (p_nxge_t)arg;
11168597SMichael.Speer@Sun.COM 	struct ether_addr	addrp;
11178597SMichael.Speer@Sun.COM 
11188597SMichael.Speer@Sun.COM 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
11198597SMichael.Speer@Sun.COM 	if (nxge_set_mac_addr(nxgep, &addrp)) {
11208597SMichael.Speer@Sun.COM 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
11218597SMichael.Speer@Sun.COM 		    "<== nxge_m_unicst: set unitcast failed"));
11228597SMichael.Speer@Sun.COM 		return (EINVAL);
11238597SMichael.Speer@Sun.COM 	}
11248597SMichael.Speer@Sun.COM 
11258597SMichael.Speer@Sun.COM 	nxgep->primary = B_TRUE;
11268597SMichael.Speer@Sun.COM 
11278597SMichael.Speer@Sun.COM 	return (0);
11288597SMichael.Speer@Sun.COM }
11298597SMichael.Speer@Sun.COM 
11308597SMichael.Speer@Sun.COM /*ARGSUSED*/
11318597SMichael.Speer@Sun.COM static int
11328597SMichael.Speer@Sun.COM nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
11338597SMichael.Speer@Sun.COM {
11348597SMichael.Speer@Sun.COM 	nxgep->primary = B_FALSE;
11358597SMichael.Speer@Sun.COM 	return (0);
11368597SMichael.Speer@Sun.COM }
11378597SMichael.Speer@Sun.COM 
11388597SMichael.Speer@Sun.COM static int
11396495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
11406495Sspeer {
114110309SSriharsha.Basavapatna@Sun.COM 	nxge_ring_group_t	*group = (nxge_ring_group_t *)arg;
114210309SSriharsha.Basavapatna@Sun.COM 	p_nxge_t		nxge = group->nxgep;
114310309SSriharsha.Basavapatna@Sun.COM 	int			rv;
114410309SSriharsha.Basavapatna@Sun.COM 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
11456495Sspeer 
11468275SEric Cheng 	ASSERT(group->type == MAC_RING_TYPE_RX);
114710309SSriharsha.Basavapatna@Sun.COM 	ASSERT(group->nxgep != NULL);
114810309SSriharsha.Basavapatna@Sun.COM 
114910309SSriharsha.Basavapatna@Sun.COM 	if (isLDOMguest(group->nxgep))
115010309SSriharsha.Basavapatna@Sun.COM 		return (0);
11518275SEric Cheng 
11528275SEric Cheng 	mutex_enter(nxge->genlock);
11536495Sspeer 
11548597SMichael.Speer@Sun.COM 	if (!nxge->primary && group->port_default_grp) {
11558597SMichael.Speer@Sun.COM 		rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
11568597SMichael.Speer@Sun.COM 		mutex_exit(nxge->genlock);
11578597SMichael.Speer@Sun.COM 		return (rv);
11588597SMichael.Speer@Sun.COM 	}
11598597SMichael.Speer@Sun.COM 
11606495Sspeer 	/*
11618275SEric Cheng 	 * If the group is associated with a VR, then only one
11628275SEric Cheng 	 * address may be assigned to the group.
11636495Sspeer 	 */
11648275SEric Cheng 	vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
11658275SEric Cheng 	if ((vr != NULL) && (group->n_mac_addrs)) {
11668275SEric Cheng 		mutex_exit(nxge->genlock);
11678275SEric Cheng 		return (ENOSPC);
11688275SEric Cheng 	}
11698275SEric Cheng 
11708275SEric Cheng 	mutex_exit(nxge->genlock);
11718275SEric Cheng 
11728275SEric Cheng 	/*
11738275SEric Cheng 	 * Program the mac address for the group.
11748275SEric Cheng 	 */
117510309SSriharsha.Basavapatna@Sun.COM 	if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
11766495Sspeer 		return (rv);
11776495Sspeer 	}
11786495Sspeer 
11796495Sspeer 	return (0);
11806495Sspeer }
11816495Sspeer 
11828275SEric Cheng static int
11838275SEric Cheng find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
11848275SEric Cheng {
11858275SEric Cheng 	int i;
11868275SEric Cheng 	for (i = 0; i <= mmac_info->num_mmac; i++) {
11878275SEric Cheng 		if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
11888275SEric Cheng 		    ETHERADDRL) == 0) {
11898275SEric Cheng 			return (i);
11908275SEric Cheng 		}
11918275SEric Cheng 	}
11928275SEric Cheng 	return (-1);
11938275SEric Cheng }
11948275SEric Cheng 
11956495Sspeer /* ARGSUSED */
11966495Sspeer static int
11976495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
11986495Sspeer {
11998275SEric Cheng 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
12008597SMichael.Speer@Sun.COM 	struct ether_addr addrp;
12018275SEric Cheng 	p_nxge_t nxge = group->nxgep;
12028275SEric Cheng 	nxge_mmac_t *mmac_info;
12038275SEric Cheng 	int rv, slot;
12048275SEric Cheng 
12058275SEric Cheng 	ASSERT(group->type == MAC_RING_TYPE_RX);
120610309SSriharsha.Basavapatna@Sun.COM 	ASSERT(group->nxgep != NULL);
120710309SSriharsha.Basavapatna@Sun.COM 
120810309SSriharsha.Basavapatna@Sun.COM 	if (isLDOMguest(group->nxgep))
120910309SSriharsha.Basavapatna@Sun.COM 		return (0);
12108275SEric Cheng 
12118275SEric Cheng 	mutex_enter(nxge->genlock);
12126495Sspeer 
12138275SEric Cheng 	mmac_info = &nxge->nxge_mmac_info;
12148275SEric Cheng 	slot = find_mac_slot(mmac_info, mac_addr);
12158275SEric Cheng 	if (slot < 0) {
12168597SMichael.Speer@Sun.COM 		if (group->port_default_grp && nxge->primary) {
12178597SMichael.Speer@Sun.COM 			bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
12188597SMichael.Speer@Sun.COM 			if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
12198597SMichael.Speer@Sun.COM 				rv = nxge_hio_clear_unicst(nxge, mac_addr);
12208597SMichael.Speer@Sun.COM 				mutex_exit(nxge->genlock);
12218597SMichael.Speer@Sun.COM 				return (rv);
12228597SMichael.Speer@Sun.COM 			} else {
12238597SMichael.Speer@Sun.COM 				mutex_exit(nxge->genlock);
12248597SMichael.Speer@Sun.COM 				return (EINVAL);
12258597SMichael.Speer@Sun.COM 			}
12268597SMichael.Speer@Sun.COM 		} else {
12278597SMichael.Speer@Sun.COM 			mutex_exit(nxge->genlock);
12288597SMichael.Speer@Sun.COM 			return (EINVAL);
12298597SMichael.Speer@Sun.COM 		}
12308275SEric Cheng 	}
12318275SEric Cheng 
12328275SEric Cheng 	mutex_exit(nxge->genlock);
12336495Sspeer 
12346495Sspeer 	/*
12358275SEric Cheng 	 * Remove the mac address for the group
12366495Sspeer 	 */
12378275SEric Cheng 	if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
12388275SEric Cheng 		return (rv);
12398275SEric Cheng 	}
12408275SEric Cheng 
12418275SEric Cheng 	mutex_enter(nxge->genlock);
12428275SEric Cheng 	group->n_mac_addrs--;
12438275SEric Cheng 	mutex_exit(nxge->genlock);
12446495Sspeer 
12456495Sspeer 	return (0);
12466495Sspeer }
12476495Sspeer 
12488275SEric Cheng static int
12498275SEric Cheng nxge_hio_group_start(mac_group_driver_t gdriver)
12508275SEric Cheng {
12518275SEric Cheng 	nxge_ring_group_t	*group = (nxge_ring_group_t *)gdriver;
12529047SMichael.Speer@Sun.COM 	nxge_rdc_grp_t		*rdc_grp_p;
12538275SEric Cheng 	int			rdctbl;
12548275SEric Cheng 	int			dev_gindex;
12558275SEric Cheng 
12568275SEric Cheng 	ASSERT(group->type == MAC_RING_TYPE_RX);
125710309SSriharsha.Basavapatna@Sun.COM 	ASSERT(group->nxgep != NULL);
12588275SEric Cheng 
12598275SEric Cheng 	ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
12608275SEric Cheng 	if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
12618275SEric Cheng 		return (ENXIO);
12628275SEric Cheng 
12638275SEric Cheng 	mutex_enter(group->nxgep->genlock);
126410309SSriharsha.Basavapatna@Sun.COM 	if (isLDOMguest(group->nxgep))
126510309SSriharsha.Basavapatna@Sun.COM 		goto nxge_hio_group_start_exit;
126610309SSriharsha.Basavapatna@Sun.COM 
12678275SEric Cheng 	dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
12688275SEric Cheng 	    group->gindex;
12699047SMichael.Speer@Sun.COM 	rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
12708275SEric Cheng 
12718275SEric Cheng 	/*
12728275SEric Cheng 	 * Get an rdc table for this group.
12738275SEric Cheng 	 * Group ID is given by the caller, and that's the group it needs
12748275SEric Cheng 	 * to bind to.  The default group is already bound when the driver
12758275SEric Cheng 	 * was attached.
12768275SEric Cheng 	 *
12778275SEric Cheng 	 * For Group 0, it's RDC table was allocated at attach time
12788275SEric Cheng 	 * no need to allocate a new table.
12798275SEric Cheng 	 */
12808275SEric Cheng 	if (group->gindex != 0) {
12818275SEric Cheng 		rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
12828275SEric Cheng 		    dev_gindex, B_TRUE);
12838275SEric Cheng 		if (rdctbl < 0) {
12848275SEric Cheng 			mutex_exit(group->nxgep->genlock);
12858275SEric Cheng 			return (rdctbl);
12868275SEric Cheng 		}
12878275SEric Cheng 	} else {
12888275SEric Cheng 		rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
12898275SEric Cheng 	}
12908275SEric Cheng 
12918275SEric Cheng 	group->rdctbl = rdctbl;
12928275SEric Cheng 
12939047SMichael.Speer@Sun.COM 	(void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
12948275SEric Cheng 
129510309SSriharsha.Basavapatna@Sun.COM nxge_hio_group_start_exit:
12968275SEric Cheng 	group->started = B_TRUE;
12978275SEric Cheng 	mutex_exit(group->nxgep->genlock);
12988275SEric Cheng 	return (0);
12998275SEric Cheng }
13008275SEric Cheng 
13018275SEric Cheng static void
13028275SEric Cheng nxge_hio_group_stop(mac_group_driver_t gdriver)
13038275SEric Cheng {
13048275SEric Cheng 	nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
13058275SEric Cheng 
13068275SEric Cheng 	ASSERT(group->type == MAC_RING_TYPE_RX);
13078275SEric Cheng 
13088275SEric Cheng 	mutex_enter(group->nxgep->genlock);
13098275SEric Cheng 	group->started = B_FALSE;
13108275SEric Cheng 
131110309SSriharsha.Basavapatna@Sun.COM 	if (isLDOMguest(group->nxgep))
131210309SSriharsha.Basavapatna@Sun.COM 		goto nxge_hio_group_stop_exit;
131310309SSriharsha.Basavapatna@Sun.COM 
13148275SEric Cheng 	/*
13158275SEric Cheng 	 * Unbind the RDC table previously bound for this group.
13168275SEric Cheng 	 *
13178275SEric Cheng 	 * Since RDC table for group 0 was allocated at attach
13188275SEric Cheng 	 * time, no need to unbind the table here.
13198275SEric Cheng 	 */
13208275SEric Cheng 	if (group->gindex != 0)
13218275SEric Cheng 		(void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
13228275SEric Cheng 
132310309SSriharsha.Basavapatna@Sun.COM nxge_hio_group_stop_exit:
13248275SEric Cheng 	mutex_exit(group->nxgep->genlock);
13258275SEric Cheng }
13268275SEric Cheng 
13276495Sspeer /* ARGSUSED */
13286495Sspeer void
13298275SEric Cheng nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
13306495Sspeer 	mac_group_info_t *infop, mac_group_handle_t ghdl)
13316495Sspeer {
13328275SEric Cheng 	p_nxge_t		nxgep = (p_nxge_t)arg;
13338275SEric Cheng 	nxge_ring_group_t	*group;
13348275SEric Cheng 	int			dev_gindex;
13356495Sspeer 
13366495Sspeer 	switch (type) {
13376495Sspeer 	case MAC_RING_TYPE_RX:
13388275SEric Cheng 		group = &nxgep->rx_hio_groups[groupid];
13398275SEric Cheng 		group->nxgep = nxgep;
13408275SEric Cheng 		group->ghandle = ghdl;
13418275SEric Cheng 		group->gindex = groupid;
13428275SEric Cheng 		group->sindex = 0;	/* not yet bound to a share */
13436495Sspeer 
134410309SSriharsha.Basavapatna@Sun.COM 		if (!isLDOMguest(nxgep)) {
134510309SSriharsha.Basavapatna@Sun.COM 			dev_gindex =
134610309SSriharsha.Basavapatna@Sun.COM 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
134710309SSriharsha.Basavapatna@Sun.COM 			    groupid;
13488275SEric Cheng 
134910309SSriharsha.Basavapatna@Sun.COM 			if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
135010309SSriharsha.Basavapatna@Sun.COM 			    dev_gindex)
135110309SSriharsha.Basavapatna@Sun.COM 				group->port_default_grp = B_TRUE;
135210309SSriharsha.Basavapatna@Sun.COM 
135310309SSriharsha.Basavapatna@Sun.COM 			infop->mgi_count =
135410309SSriharsha.Basavapatna@Sun.COM 			    nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
135510309SSriharsha.Basavapatna@Sun.COM 		} else {
135610309SSriharsha.Basavapatna@Sun.COM 			infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
135710309SSriharsha.Basavapatna@Sun.COM 		}
13588597SMichael.Speer@Sun.COM 
13598275SEric Cheng 		infop->mgi_driver = (mac_group_driver_t)group;
13608275SEric Cheng 		infop->mgi_start = nxge_hio_group_start;
13618275SEric Cheng 		infop->mgi_stop = nxge_hio_group_stop;
13628275SEric Cheng 		infop->mgi_addmac = nxge_hio_add_mac;
13638275SEric Cheng 		infop->mgi_remmac = nxge_hio_rem_mac;
13646495Sspeer 		break;
13656495Sspeer 
13666495Sspeer 	case MAC_RING_TYPE_TX:
13678275SEric Cheng 		/*
13688275SEric Cheng 		 * 'groupid' for TX should be incremented by one since
13698275SEric Cheng 		 * the default group (groupid 0) is not known by the MAC layer
13708275SEric Cheng 		 */
13718275SEric Cheng 		group = &nxgep->tx_hio_groups[groupid + 1];
13728275SEric Cheng 		group->nxgep = nxgep;
13738275SEric Cheng 		group->ghandle = ghdl;
13748275SEric Cheng 		group->gindex = groupid + 1;
13758275SEric Cheng 		group->sindex = 0;	/* not yet bound to a share */
13768275SEric Cheng 
13778275SEric Cheng 		infop->mgi_driver = (mac_group_driver_t)group;
13788275SEric Cheng 		infop->mgi_start = NULL;
13798275SEric Cheng 		infop->mgi_stop = NULL;
13808275SEric Cheng 		infop->mgi_addmac = NULL;	/* not needed */
13818275SEric Cheng 		infop->mgi_remmac = NULL;	/* not needed */
13828275SEric Cheng 		/* no rings associated with group initially */
13838275SEric Cheng 		infop->mgi_count = 0;
13846495Sspeer 		break;
13856495Sspeer 	}
13866495Sspeer }
13876495Sspeer 
13888275SEric Cheng #if defined(sun4v)
13898275SEric Cheng 
13906495Sspeer int
13916495Sspeer nxge_hio_share_assign(
13926495Sspeer 	nxge_t *nxge,
13936495Sspeer 	uint64_t cookie,
13946495Sspeer 	res_map_t *tmap,
13956495Sspeer 	res_map_t *rmap,
13966495Sspeer 	nxge_hio_vr_t *vr)
13976495Sspeer {
13986495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
13996495Sspeer 	uint64_t slot, hv_rv;
14006495Sspeer 	nxge_hio_dc_t *dc;
14016495Sspeer 	nxhv_vr_fp_t *fp;
14026495Sspeer 	int i;
1403*11304SJanie.Lu@Sun.COM 	uint64_t major;
14046495Sspeer 
14056495Sspeer 	/*
14066495Sspeer 	 * Ask the Hypervisor to set up the VR for us
14076495Sspeer 	 */
14086495Sspeer 	fp = &nhd->hio.vr;
1409*11304SJanie.Lu@Sun.COM 	major = nxge->niu_hsvc.hsvc_major;
1410*11304SJanie.Lu@Sun.COM 	switch (major) {
1411*11304SJanie.Lu@Sun.COM 	case NIU_MAJOR_VER: /* 1 */
1412*11304SJanie.Lu@Sun.COM 		if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1413*11304SJanie.Lu@Sun.COM 			NXGE_ERROR_MSG((nxge, HIO_CTL,
1414*11304SJanie.Lu@Sun.COM 			    "nxge_hio_share_assign: major %d "
1415*11304SJanie.Lu@Sun.COM 			    "vr->assign() returned %d", major, hv_rv));
1416*11304SJanie.Lu@Sun.COM 			nxge_hio_unshare(vr);
1417*11304SJanie.Lu@Sun.COM 			return (-EIO);
1418*11304SJanie.Lu@Sun.COM 		}
1419*11304SJanie.Lu@Sun.COM 
1420*11304SJanie.Lu@Sun.COM 		break;
1421*11304SJanie.Lu@Sun.COM 
1422*11304SJanie.Lu@Sun.COM 	case NIU_MAJOR_VER_2: /* 2 */
1423*11304SJanie.Lu@Sun.COM 	default:
1424*11304SJanie.Lu@Sun.COM 		if ((hv_rv = (*fp->cfgh_assign)
1425*11304SJanie.Lu@Sun.COM 		    (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) {
1426*11304SJanie.Lu@Sun.COM 			NXGE_ERROR_MSG((nxge, HIO_CTL,
1427*11304SJanie.Lu@Sun.COM 			    "nxge_hio_share_assign: major %d "
1428*11304SJanie.Lu@Sun.COM 			    "vr->assign() returned %d", major, hv_rv));
1429*11304SJanie.Lu@Sun.COM 			nxge_hio_unshare(vr);
1430*11304SJanie.Lu@Sun.COM 			return (-EIO);
1431*11304SJanie.Lu@Sun.COM 		}
1432*11304SJanie.Lu@Sun.COM 
1433*11304SJanie.Lu@Sun.COM 		break;
14346495Sspeer 	}
14356495Sspeer 
1436*11304SJanie.Lu@Sun.COM 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
1437*11304SJanie.Lu@Sun.COM 	    "nxge_hio_share_assign: major %d "
1438*11304SJanie.Lu@Sun.COM 	    "vr->assign() success", major));
1439*11304SJanie.Lu@Sun.COM 
14406495Sspeer 	/*
14416495Sspeer 	 * For each shared TDC, ask the HV to find us an empty slot.
14426495Sspeer 	 */
14436495Sspeer 	dc = vr->tx_group.dc;
14446495Sspeer 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
14456495Sspeer 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
14466495Sspeer 		while (dc) {
14476495Sspeer 			hv_rv = (*tx->assign)
14486495Sspeer 			    (vr->cookie, dc->channel, &slot);
14496495Sspeer 			if (hv_rv != 0) {
14506495Sspeer 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
14517950SMichael.Speer@Sun.COM 				    "nxge_hio_share_assign: "
14526495Sspeer 				    "tx->assign(%x, %d) failed: %ld",
14536495Sspeer 				    vr->cookie, dc->channel, hv_rv));
14546495Sspeer 				return (-EIO);
14556495Sspeer 			}
14566495Sspeer 
14576495Sspeer 			dc->cookie = vr->cookie;
14586495Sspeer 			dc->page = (vp_channel_t)slot;
14596495Sspeer 
14606495Sspeer 			/* Inform the caller about the slot chosen. */
14616495Sspeer 			(*tmap) |= 1 << slot;
14626495Sspeer 
14636495Sspeer 			dc = dc->next;
14646495Sspeer 		}
14656495Sspeer 	}
14666495Sspeer 
14676495Sspeer 	/*
14686495Sspeer 	 * For each shared RDC, ask the HV to find us an empty slot.
14696495Sspeer 	 */
14706495Sspeer 	dc = vr->rx_group.dc;
14716495Sspeer 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
14726495Sspeer 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
14736495Sspeer 		while (dc) {
14746495Sspeer 			hv_rv = (*rx->assign)
14756495Sspeer 			    (vr->cookie, dc->channel, &slot);
14766495Sspeer 			if (hv_rv != 0) {
14776495Sspeer 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
14787950SMichael.Speer@Sun.COM 				    "nxge_hio_share_assign: "
14796495Sspeer 				    "rx->assign(%x, %d) failed: %ld",
14806495Sspeer 				    vr->cookie, dc->channel, hv_rv));
14816495Sspeer 				return (-EIO);
14826495Sspeer 			}
14836495Sspeer 
14846495Sspeer 			dc->cookie = vr->cookie;
14856495Sspeer 			dc->page = (vp_channel_t)slot;
14866495Sspeer 
14876495Sspeer 			/* Inform the caller about the slot chosen. */
14886495Sspeer 			(*rmap) |= 1 << slot;
14896495Sspeer 
14906495Sspeer 			dc = dc->next;
14916495Sspeer 		}
14926495Sspeer 	}
14936495Sspeer 
14946495Sspeer 	return (0);
14956495Sspeer }
14966495Sspeer 
14978275SEric Cheng void
14986495Sspeer nxge_hio_share_unassign(
14996495Sspeer 	nxge_hio_vr_t *vr)
15006495Sspeer {
15016495Sspeer 	nxge_t *nxge = (nxge_t *)vr->nxge;
15026495Sspeer 	nxge_hio_data_t *nhd;
15036495Sspeer 	nxge_hio_dc_t *dc;
15046495Sspeer 	nxhv_vr_fp_t *fp;
15056495Sspeer 	uint64_t hv_rv;
15066495Sspeer 
15076495Sspeer 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
15086495Sspeer 
15096495Sspeer 	dc = vr->tx_group.dc;
15106495Sspeer 	while (dc) {
15116495Sspeer 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
15126495Sspeer 		hv_rv = (*tx->unassign)(vr->cookie, dc->page);
15136495Sspeer 		if (hv_rv != 0) {
15146495Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
15157950SMichael.Speer@Sun.COM 			    "nxge_hio_share_unassign: "
15166495Sspeer 			    "tx->unassign(%x, %d) failed: %ld",
15176495Sspeer 			    vr->cookie, dc->page, hv_rv));
15186495Sspeer 		}
15196495Sspeer 		dc = dc->next;
15206495Sspeer 	}
15216495Sspeer 
15226495Sspeer 	dc = vr->rx_group.dc;
15236495Sspeer 	while (dc) {
15246495Sspeer 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
15256495Sspeer 		hv_rv = (*rx->unassign)(vr->cookie, dc->page);
15266495Sspeer 		if (hv_rv != 0) {
15276495Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
15287950SMichael.Speer@Sun.COM 			    "nxge_hio_share_unassign: "
15296495Sspeer 			    "rx->unassign(%x, %d) failed: %ld",
15306495Sspeer 			    vr->cookie, dc->page, hv_rv));
15316495Sspeer 		}
15326495Sspeer 		dc = dc->next;
15336495Sspeer 	}
15346495Sspeer 
15356495Sspeer 	fp = &nhd->hio.vr;
15366495Sspeer 	if (fp->unassign) {
15376495Sspeer 		hv_rv = (*fp->unassign)(vr->cookie);
15386495Sspeer 		if (hv_rv != 0) {
15397950SMichael.Speer@Sun.COM 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
15407950SMichael.Speer@Sun.COM 			    "nxge_hio_share_unassign: "
15416495Sspeer 			    "vr->assign(%x) failed: %ld",
15426495Sspeer 			    vr->cookie, hv_rv));
15436495Sspeer 		}
15446495Sspeer 	}
15456495Sspeer }
15466495Sspeer 
15476495Sspeer int
15488275SEric Cheng nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
15496495Sspeer {
15508275SEric Cheng 	p_nxge_t		nxge = (p_nxge_t)arg;
15518275SEric Cheng 	nxge_share_handle_t	*shp;
15528275SEric Cheng 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
15538275SEric Cheng 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
15546495Sspeer 
15556495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
15566495Sspeer 
15576495Sspeer 	if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
15586495Sspeer 	    nhd->hio.rx.assign == 0) {
15596495Sspeer 		NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
15606495Sspeer 		return (EIO);
15616495Sspeer 	}
15626495Sspeer 
15636495Sspeer 	/*
15646495Sspeer 	 * Get a VR.
15656495Sspeer 	 */
15667755SMisaki.Kataoka@Sun.COM 	if ((vr = nxge_hio_vr_share(nxge)) == 0)
15676495Sspeer 		return (EAGAIN);
15686495Sspeer 
15696495Sspeer 	shp = &nxge->shares[vr->region];
15708275SEric Cheng 	shp->nxgep = nxge;
15716495Sspeer 	shp->index = vr->region;
15726495Sspeer 	shp->vrp = (void *)vr;
15738275SEric Cheng 	shp->tmap = shp->rmap = 0;	/* to be assigned by ms_sbind */
15748275SEric Cheng 	shp->rxgroup = 0;		/* to be assigned by ms_sadd */
15758275SEric Cheng 	shp->active = B_FALSE;		/* not bound yet */
15766495Sspeer 
15776495Sspeer 	*shandle = (mac_share_handle_t)shp;
15786495Sspeer 
15796495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
15806495Sspeer 	return (0);
15816495Sspeer }
15826495Sspeer 
15838275SEric Cheng 
15846495Sspeer void
15856495Sspeer nxge_hio_share_free(mac_share_handle_t shandle)
15866495Sspeer {
15878275SEric Cheng 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
15888275SEric Cheng 	nxge_hio_vr_t		*vr;
15898275SEric Cheng 
15908275SEric Cheng 	/*
15918275SEric Cheng 	 * Clear internal handle state.
15928275SEric Cheng 	 */
15938275SEric Cheng 	vr = shp->vrp;
15948275SEric Cheng 	shp->vrp = (void *)NULL;
15958275SEric Cheng 	shp->index = 0;
15968275SEric Cheng 	shp->tmap = 0;
15978275SEric Cheng 	shp->rmap = 0;
15988275SEric Cheng 	shp->rxgroup = 0;
15998275SEric Cheng 	shp->active = B_FALSE;
16008275SEric Cheng 
16018275SEric Cheng 	/*
16028275SEric Cheng 	 * Free VR resource.
16038275SEric Cheng 	 */
16048275SEric Cheng 	nxge_hio_unshare(vr);
16058275SEric Cheng }
16068275SEric Cheng 
16078275SEric Cheng 
16088275SEric Cheng void
16098275SEric Cheng nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
16108275SEric Cheng     mac_ring_handle_t *rings, uint_t *n_rings)
16118275SEric Cheng {
16128275SEric Cheng 	nxge_t			*nxge;
16138275SEric Cheng 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
16148275SEric Cheng 	nxge_ring_handle_t	*rh;
16158275SEric Cheng 	uint32_t		offset;
16168275SEric Cheng 
16178275SEric Cheng 	nxge = shp->nxgep;
16188275SEric Cheng 
16198275SEric Cheng 	switch (type) {
16208275SEric Cheng 	case MAC_RING_TYPE_RX:
16218275SEric Cheng 		rh = nxge->rx_ring_handles;
16228275SEric Cheng 		offset = nxge->pt_config.hw_config.start_rdc;
16238275SEric Cheng 		break;
16248275SEric Cheng 
16258275SEric Cheng 	case MAC_RING_TYPE_TX:
16268275SEric Cheng 		rh = nxge->tx_ring_handles;
16278275SEric Cheng 		offset = nxge->pt_config.hw_config.tdc.start;
16288275SEric Cheng 		break;
16298275SEric Cheng 	}
16308275SEric Cheng 
16318275SEric Cheng 	/*
16328275SEric Cheng 	 * In version 1.0, we may only give a VR 2 RDCs/TDCs.  Not only that,
16338275SEric Cheng 	 * but the HV has statically assigned the channels like so:
16348275SEric Cheng 	 * VR0: RDC0 & RDC1
16358275SEric Cheng 	 * VR1: RDC2 & RDC3, etc.
16368275SEric Cheng 	 * The TDCs are assigned in exactly the same way.
16378275SEric Cheng 	 */
16388275SEric Cheng 	if (rings != NULL) {
16398275SEric Cheng 		rings[0] = rh[(shp->index * 2) - offset].ring_handle;
16408275SEric Cheng 		rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
16418275SEric Cheng 	}
16428275SEric Cheng 	if (n_rings != NULL) {
16438275SEric Cheng 		*n_rings = 2;
16448275SEric Cheng 	}
16458275SEric Cheng }
16468275SEric Cheng 
16478275SEric Cheng int
16488275SEric Cheng nxge_hio_share_add_group(mac_share_handle_t shandle,
16498275SEric Cheng     mac_group_driver_t ghandle)
16508275SEric Cheng {
16518275SEric Cheng 	nxge_t			*nxge;
16528275SEric Cheng 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
16538275SEric Cheng 	nxge_ring_group_t	*rg = (nxge_ring_group_t *)ghandle;
16548275SEric Cheng 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
16558275SEric Cheng 	nxge_grp_t		*group;
16568275SEric Cheng 	int			i;
16578275SEric Cheng 
16588275SEric Cheng 	if (rg->sindex != 0) {
16598275SEric Cheng 		/* the group is already bound to a share */
16608275SEric Cheng 		return (EALREADY);
16618275SEric Cheng 	}
16628275SEric Cheng 
16638400SNicolas.Droux@Sun.COM 	/*
16648400SNicolas.Droux@Sun.COM 	 * If we are adding a group 0 to a share, this
16658400SNicolas.Droux@Sun.COM 	 * is not correct.
16668400SNicolas.Droux@Sun.COM 	 */
16678400SNicolas.Droux@Sun.COM 	ASSERT(rg->gindex != 0);
16688400SNicolas.Droux@Sun.COM 
16698275SEric Cheng 	nxge = rg->nxgep;
16708275SEric Cheng 	vr = shp->vrp;
16718275SEric Cheng 
16728275SEric Cheng 	switch (rg->type) {
16738275SEric Cheng 	case MAC_RING_TYPE_RX:
16748275SEric Cheng 		/*
16758275SEric Cheng 		 * Make sure that the group has the right rings associated
16768275SEric Cheng 		 * for the share. In version 1.0, we may only give a VR
16778275SEric Cheng 		 * 2 RDCs.  Not only that, but the HV has statically
16788275SEric Cheng 		 * assigned the channels like so:
16798275SEric Cheng 		 * VR0: RDC0 & RDC1
16808275SEric Cheng 		 * VR1: RDC2 & RDC3, etc.
16818275SEric Cheng 		 */
16828275SEric Cheng 		group = nxge->rx_set.group[rg->gindex];
16838275SEric Cheng 
16848275SEric Cheng 		if (group->count > 2) {
16858275SEric Cheng 			/* a share can have at most 2 rings */
16868275SEric Cheng 			return (EINVAL);
16878275SEric Cheng 		}
16888275SEric Cheng 
16898275SEric Cheng 		for (i = 0; i < NXGE_MAX_RDCS; i++) {
16908275SEric Cheng 			if (group->map & (1 << i)) {
16918275SEric Cheng 				if ((i != shp->index * 2) &&
16928275SEric Cheng 				    (i != (shp->index * 2 + 1))) {
16938275SEric Cheng 					/*
16948275SEric Cheng 					 * A group with invalid rings was
16958275SEric Cheng 					 * attempted to bind to this share
16968275SEric Cheng 					 */
16978275SEric Cheng 					return (EINVAL);
16988275SEric Cheng 				}
16998275SEric Cheng 			}
17008275SEric Cheng 		}
17018275SEric Cheng 
17028275SEric Cheng 		rg->sindex = vr->region;
17038275SEric Cheng 		vr->rdc_tbl = rg->rdctbl;
17048275SEric Cheng 		shp->rxgroup = vr->rdc_tbl;
17058275SEric Cheng 		break;
17068275SEric Cheng 
17078275SEric Cheng 	case MAC_RING_TYPE_TX:
17088275SEric Cheng 		/*
17098275SEric Cheng 		 * Make sure that the group has the right rings associated
17108275SEric Cheng 		 * for the share. In version 1.0, we may only give a VR
17118275SEric Cheng 		 * 2 TDCs.  Not only that, but the HV has statically
17128275SEric Cheng 		 * assigned the channels like so:
17138275SEric Cheng 		 * VR0: TDC0 & TDC1
17148275SEric Cheng 		 * VR1: TDC2 & TDC3, etc.
17158275SEric Cheng 		 */
17168275SEric Cheng 		group = nxge->tx_set.group[rg->gindex];
17178275SEric Cheng 
17188275SEric Cheng 		if (group->count > 2) {
17198275SEric Cheng 			/* a share can have at most 2 rings */
17208275SEric Cheng 			return (EINVAL);
17218275SEric Cheng 		}
17228275SEric Cheng 
17238275SEric Cheng 		for (i = 0; i < NXGE_MAX_TDCS; i++) {
17248275SEric Cheng 			if (group->map & (1 << i)) {
17258275SEric Cheng 				if ((i != shp->index * 2) &&
17268275SEric Cheng 				    (i != (shp->index * 2 + 1))) {
17278275SEric Cheng 					/*
17288275SEric Cheng 					 * A group with invalid rings was
17298275SEric Cheng 					 * attempted to bind to this share
17308275SEric Cheng 					 */
17318275SEric Cheng 					return (EINVAL);
17328275SEric Cheng 				}
17338275SEric Cheng 			}
17348275SEric Cheng 		}
17358275SEric Cheng 
17368275SEric Cheng 		vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
17378275SEric Cheng 		    rg->gindex;
17388275SEric Cheng 		rg->sindex = vr->region;
17398275SEric Cheng 		break;
17408275SEric Cheng 	}
17418275SEric Cheng 	return (0);
17428275SEric Cheng }
17438275SEric Cheng 
17448275SEric Cheng int
17458275SEric Cheng nxge_hio_share_rem_group(mac_share_handle_t shandle,
17468275SEric Cheng     mac_group_driver_t ghandle)
17478275SEric Cheng {
17488275SEric Cheng 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
17498275SEric Cheng 	nxge_ring_group_t	*group = (nxge_ring_group_t *)ghandle;
17508275SEric Cheng 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
17518275SEric Cheng 	int			rv = 0;
17528275SEric Cheng 
17538275SEric Cheng 	vr = shp->vrp;
17548275SEric Cheng 
17558275SEric Cheng 	switch (group->type) {
17568275SEric Cheng 	case MAC_RING_TYPE_RX:
17578275SEric Cheng 		group->sindex = 0;
17588275SEric Cheng 		vr->rdc_tbl = 0;
17598275SEric Cheng 		shp->rxgroup = 0;
17608275SEric Cheng 		break;
17618275SEric Cheng 
17628275SEric Cheng 	case MAC_RING_TYPE_TX:
17638275SEric Cheng 		group->sindex = 0;
17648275SEric Cheng 		vr->tdc_tbl = 0;
17658275SEric Cheng 		break;
17668275SEric Cheng 	}
17678275SEric Cheng 
17688275SEric Cheng 	return (rv);
17698275SEric Cheng }
17708275SEric Cheng 
17718275SEric Cheng int
17728275SEric Cheng nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
17738275SEric Cheng     uint64_t *rcookie)
17748275SEric Cheng {
17758275SEric Cheng 	nxge_t			*nxge;
17768275SEric Cheng 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
17778275SEric Cheng 	nxge_hio_vr_t		*vr;
17788275SEric Cheng 	uint64_t		rmap, tmap, hv_rmap, hv_tmap;
17798275SEric Cheng 	int			rv;
17808275SEric Cheng 
17818275SEric Cheng 	nxge = shp->nxgep;
17828275SEric Cheng 	vr = (nxge_hio_vr_t *)shp->vrp;
17838275SEric Cheng 
17848275SEric Cheng 	/*
17858275SEric Cheng 	 * Add resources to the share.
17868275SEric Cheng 	 * For each DMA channel associated with the VR, bind its resources
17878275SEric Cheng 	 * to the VR.
17888275SEric Cheng 	 */
17898275SEric Cheng 	tmap = 0;
17908275SEric Cheng 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
17918275SEric Cheng 	if (rv != 0) {
17928275SEric Cheng 		return (rv);
17938275SEric Cheng 	}
17948275SEric Cheng 
17958275SEric Cheng 	rmap = 0;
17968275SEric Cheng 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
17978275SEric Cheng 	if (rv != 0) {
17988275SEric Cheng 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
17998275SEric Cheng 		return (rv);
18008275SEric Cheng 	}
18018275SEric Cheng 
18028275SEric Cheng 	/*
18038275SEric Cheng 	 * Ask the Hypervisor to set up the VR and allocate slots for
18048275SEric Cheng 	 * each rings associated with the VR.
18058275SEric Cheng 	 */
18068275SEric Cheng 	hv_tmap = hv_rmap = 0;
18078275SEric Cheng 	if ((rv = nxge_hio_share_assign(nxge, cookie,
18088275SEric Cheng 	    &hv_tmap, &hv_rmap, vr))) {
18098275SEric Cheng 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
18108275SEric Cheng 		nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
18118275SEric Cheng 		return (rv);
18128275SEric Cheng 	}
18138275SEric Cheng 
18148275SEric Cheng 	shp->active = B_TRUE;
18158275SEric Cheng 	shp->tmap = hv_tmap;
18168275SEric Cheng 	shp->rmap = hv_rmap;
18178275SEric Cheng 
18188275SEric Cheng 	/* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
18198275SEric Cheng 	*rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
18208275SEric Cheng 
18218275SEric Cheng 	return (0);
18228275SEric Cheng }
18238275SEric Cheng 
18248275SEric Cheng void
18258275SEric Cheng nxge_hio_share_unbind(mac_share_handle_t shandle)
18268275SEric Cheng {
18276495Sspeer 	nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
18286495Sspeer 
18296495Sspeer 	/*
18306495Sspeer 	 * First, unassign the VR (take it back),
18316495Sspeer 	 * so we can enable interrupts again.
18326495Sspeer 	 */
18338275SEric Cheng 	nxge_hio_share_unassign(shp->vrp);
18346495Sspeer 
18356495Sspeer 	/*
18366495Sspeer 	 * Free Ring Resources for TX and RX
18376495Sspeer 	 */
18387755SMisaki.Kataoka@Sun.COM 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
18397755SMisaki.Kataoka@Sun.COM 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
18406495Sspeer }
18416495Sspeer 
18426495Sspeer 
18436495Sspeer /*
18446495Sspeer  * nxge_hio_vr_share
18456495Sspeer  *
18466495Sspeer  *	Find an unused Virtualization Region (VR).
18476495Sspeer  *
18486495Sspeer  * Arguments:
18496495Sspeer  * 	nxge
18506495Sspeer  *
18516495Sspeer  * Notes:
18526495Sspeer  *
18536495Sspeer  * Context:
18546495Sspeer  *	Service domain
18556495Sspeer  */
18567755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *
18576495Sspeer nxge_hio_vr_share(
18586495Sspeer 	nxge_t *nxge)
18596495Sspeer {
18606495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
18616495Sspeer 	nxge_hio_vr_t *vr;
18626495Sspeer 
18636495Sspeer 	int first, limit, region;
18646495Sspeer 
18656495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
18666495Sspeer 
18676495Sspeer 	MUTEX_ENTER(&nhd->lock);
18686495Sspeer 
18697755SMisaki.Kataoka@Sun.COM 	if (nhd->vrs == 0) {
18706495Sspeer 		MUTEX_EXIT(&nhd->lock);
18716495Sspeer 		return (0);
18726495Sspeer 	}
18736495Sspeer 
18746495Sspeer 	/* Find an empty virtual region (VR). */
18756495Sspeer 	if (nxge->function_num == 0) {
18766495Sspeer 		// FUNC0_VIR0 'belongs' to NIU port 0.
18776495Sspeer 		first = FUNC0_VIR1;
18786495Sspeer 		limit = FUNC2_VIR0;
18796495Sspeer 	} else if (nxge->function_num == 1) {
18806495Sspeer 		// FUNC2_VIR0 'belongs' to NIU port 1.
18816495Sspeer 		first = FUNC2_VIR1;
18826495Sspeer 		limit = FUNC_VIR_MAX;
18836495Sspeer 	} else {
18846495Sspeer 		cmn_err(CE_WARN,
18856495Sspeer 		    "Shares not supported on function(%d) at this time.\n",
18866495Sspeer 		    nxge->function_num);
18876495Sspeer 	}
18886495Sspeer 
18896495Sspeer 	for (region = first; region < limit; region++) {
18906495Sspeer 		if (nhd->vr[region].nxge == 0)
18916495Sspeer 			break;
18926495Sspeer 	}
18936495Sspeer 
18946495Sspeer 	if (region == limit) {
18956495Sspeer 		MUTEX_EXIT(&nhd->lock);
18966495Sspeer 		return (0);
18976495Sspeer 	}
18986495Sspeer 
18996495Sspeer 	vr = &nhd->vr[region];
19006495Sspeer 	vr->nxge = (uintptr_t)nxge;
19016495Sspeer 	vr->region = (uintptr_t)region;
19026495Sspeer 
19037755SMisaki.Kataoka@Sun.COM 	nhd->vrs--;
19046495Sspeer 
19056495Sspeer 	MUTEX_EXIT(&nhd->lock);
19066495Sspeer 
19076495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
19086495Sspeer 
19097755SMisaki.Kataoka@Sun.COM 	return (vr);
19106495Sspeer }
19116495Sspeer 
19126495Sspeer void
19136495Sspeer nxge_hio_unshare(
19147755SMisaki.Kataoka@Sun.COM 	nxge_hio_vr_t *vr)
19156495Sspeer {
19166495Sspeer 	nxge_t *nxge = (nxge_t *)vr->nxge;
19176495Sspeer 	nxge_hio_data_t *nhd;
19186495Sspeer 
19196495Sspeer 	vr_region_t region;
19206495Sspeer 
19216495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
19226495Sspeer 
19236495Sspeer 	if (!nxge) {
19247950SMichael.Speer@Sun.COM 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
19256495Sspeer 		    "vr->nxge is NULL"));
19266495Sspeer 		return;
19276495Sspeer 	}
19286495Sspeer 
19296495Sspeer 	/*
19306495Sspeer 	 * This function is no longer called, but I will keep it
19316495Sspeer 	 * here in case we want to revisit this topic in the future.
19326495Sspeer 	 *
19336495Sspeer 	 * nxge_hio_hostinfo_uninit(nxge, vr);
19346495Sspeer 	 */
19358275SEric Cheng 
19368275SEric Cheng 	/*
19378275SEric Cheng 	 * XXX: This is done by ms_sremove?
19388275SEric Cheng 	 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
19398275SEric Cheng 	 */
19406495Sspeer 
19416495Sspeer 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
19426495Sspeer 
19436495Sspeer 	MUTEX_ENTER(&nhd->lock);
19446495Sspeer 
19456495Sspeer 	region = vr->region;
19466495Sspeer 	(void) memset(vr, 0, sizeof (*vr));
19476495Sspeer 	vr->region = region;
19486495Sspeer 
19497755SMisaki.Kataoka@Sun.COM 	nhd->vrs++;
19506495Sspeer 
19516495Sspeer 	MUTEX_EXIT(&nhd->lock);
19526495Sspeer 
19536495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
19546495Sspeer }
19556495Sspeer 
19566495Sspeer int
195710577SMichael.Speer@Sun.COM nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map)
19586495Sspeer {
19598275SEric Cheng 	nxge_t		*nxge = (nxge_t *)vr->nxge;
19608275SEric Cheng 	nxge_grp_t	*group;
19618275SEric Cheng 	int		groupid;
196210577SMichael.Speer@Sun.COM 	int		i, rv = 0;
19638275SEric Cheng 	int		max_dcs;
19646495Sspeer 
19656495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
19666495Sspeer 
19676495Sspeer 	if (!nxge)
19686495Sspeer 		return (EINVAL);
19696495Sspeer 
19708275SEric Cheng 	/*
19718275SEric Cheng 	 * For each ring associated with the group, add the resources
19728275SEric Cheng 	 * to the group and bind.
19738275SEric Cheng 	 */
19748275SEric Cheng 	max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
19758275SEric Cheng 	if (type == MAC_RING_TYPE_TX) {
19768275SEric Cheng 		/* set->group is an array of group indexed by a port group id */
19778275SEric Cheng 		groupid = vr->tdc_tbl -
19788275SEric Cheng 		    nxge->pt_config.hw_config.def_mac_txdma_grpid;
19798275SEric Cheng 		group = nxge->tx_set.group[groupid];
19808275SEric Cheng 	} else {
19818275SEric Cheng 		/* set->group is an array of group indexed by a port group id */
19828275SEric Cheng 		groupid = vr->rdc_tbl -
19838275SEric Cheng 		    nxge->pt_config.hw_config.def_mac_rxdma_grpid;
19848275SEric Cheng 		group = nxge->rx_set.group[groupid];
19858275SEric Cheng 	}
19868275SEric Cheng 
19878275SEric Cheng 	if (group->map == 0) {
19888275SEric Cheng 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
19898275SEric Cheng 		    "with this VR"));
19908275SEric Cheng 		return (EINVAL);
19918275SEric Cheng 	}
19928275SEric Cheng 
19938275SEric Cheng 	for (i = 0; i < max_dcs; i++) {
19948275SEric Cheng 		if (group->map & (1 << i)) {
19958275SEric Cheng 			if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
19968275SEric Cheng 				if (*map == 0) /* Couldn't get even one DC. */
19978275SEric Cheng 					return (-rv);
19988275SEric Cheng 				else
19998275SEric Cheng 					break;
20008275SEric Cheng 			}
20018275SEric Cheng 			*map |= (1 << i);
20026495Sspeer 		}
20036495Sspeer 	}
20046495Sspeer 
200510577SMichael.Speer@Sun.COM 	if ((*map == 0) || (rv != 0)) {
200610577SMichael.Speer@Sun.COM 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
200710577SMichael.Speer@Sun.COM 		    "<== nxge_hio_addres: rv(%x)", rv));
200810577SMichael.Speer@Sun.COM 		return (EIO);
200910577SMichael.Speer@Sun.COM 	}
201010577SMichael.Speer@Sun.COM 
20116495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
20126495Sspeer 	return (0);
20136495Sspeer }
20146495Sspeer 
20156495Sspeer /* ARGSUSED */
20166495Sspeer void
20176495Sspeer nxge_hio_remres(
20187755SMisaki.Kataoka@Sun.COM 	nxge_hio_vr_t *vr,
20196495Sspeer 	mac_ring_type_t type,
20206495Sspeer 	res_map_t res_map)
20216495Sspeer {
20226495Sspeer 	nxge_t *nxge = (nxge_t *)vr->nxge;
20236495Sspeer 	nxge_grp_t *group;
20246495Sspeer 
20256495Sspeer 	if (!nxge) {
20267950SMichael.Speer@Sun.COM 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
20276495Sspeer 		    "vr->nxge is NULL"));
20286495Sspeer 		return;
20296495Sspeer 	}
20306495Sspeer 
20316495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
20326495Sspeer 
20338275SEric Cheng 	/*
20348275SEric Cheng 	 * For each ring bound to the group, remove the DMA resources
20358275SEric Cheng 	 * from the group and unbind.
20368275SEric Cheng 	 */
20376495Sspeer 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
20386495Sspeer 	while (group->dc) {
20396495Sspeer 		nxge_hio_dc_t *dc = group->dc;
20406495Sspeer 		NXGE_DC_RESET(res_map, dc->page);
20416495Sspeer 		nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
20426495Sspeer 	}
20436495Sspeer 
20446495Sspeer 	if (res_map) {
20456495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
20466495Sspeer 		    "res_map %lx", res_map));
20476495Sspeer 	}
20486495Sspeer 
20496495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
20506495Sspeer }
20516495Sspeer 
20526495Sspeer /*
20536495Sspeer  * nxge_hio_tdc_share
20546495Sspeer  *
20556495Sspeer  *	Share an unused TDC channel.
20566495Sspeer  *
20576495Sspeer  * Arguments:
20586495Sspeer  * 	nxge
20596495Sspeer  *
20606495Sspeer  * Notes:
20616495Sspeer  *
20626495Sspeer  * A.7.3 Reconfigure Tx DMA channel
20636495Sspeer  *	Disable TxDMA			A.9.6.10
20646495Sspeer  *     [Rebind TxDMA channel to Port	A.9.6.7]
20656495Sspeer  *
20666495Sspeer  * We don't have to Rebind the TDC to the port - it always already bound.
20676495Sspeer  *
20686495Sspeer  *	Soft Reset TxDMA		A.9.6.2
20696495Sspeer  *
20706495Sspeer  * This procedure will be executed by nxge_init_txdma_channel() in the
20716495Sspeer  * guest domain:
20726495Sspeer  *
20736495Sspeer  *	Re-initialize TxDMA		A.9.6.8
20746495Sspeer  *	Reconfigure TxDMA
20756495Sspeer  *	Enable TxDMA			A.9.6.9
20766495Sspeer  *
20776495Sspeer  * Context:
20786495Sspeer  *	Service domain
20796495Sspeer  */
20806495Sspeer int
20816495Sspeer nxge_hio_tdc_share(
20826495Sspeer 	nxge_t *nxge,
20836495Sspeer 	int channel)
20846495Sspeer {
20857812SMichael.Speer@Sun.COM 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
20866495Sspeer 	nxge_grp_set_t *set = &nxge->tx_set;
20876495Sspeer 	tx_ring_t *ring;
20886713Sspeer 	int count;
20896495Sspeer 
20906495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
20916495Sspeer 
20926495Sspeer 	/*
20936495Sspeer 	 * Wait until this channel is idle.
20946495Sspeer 	 */
20956495Sspeer 	ring = nxge->tx_rings->rings[channel];
20969730SMichael.Speer@Sun.COM 	ASSERT(ring != NULL);
20976713Sspeer 
20986713Sspeer 	(void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
20996886Sspeer 	if (ring->tx_ring_busy) {
21006886Sspeer 		/*
21016886Sspeer 		 * Wait for 30 seconds.
21026886Sspeer 		 */
21036886Sspeer 		for (count = 30 * 1000; count; count--) {
21046886Sspeer 			if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
21056886Sspeer 				break;
21066886Sspeer 			}
21076886Sspeer 
21086886Sspeer 			drv_usecwait(1000);
21096495Sspeer 		}
21106713Sspeer 
21116886Sspeer 		if (count == 0) {
21126886Sspeer 			(void) atomic_swap_32(&ring->tx_ring_offline,
21136886Sspeer 			    NXGE_TX_RING_ONLINE);
21147950SMichael.Speer@Sun.COM 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
21157950SMichael.Speer@Sun.COM 			    "nxge_hio_tdc_share: "
21166886Sspeer 			    "Tx ring %d was always BUSY", channel));
21176886Sspeer 			return (-EIO);
21186886Sspeer 		}
21196886Sspeer 	} else {
21206713Sspeer 		(void) atomic_swap_32(&ring->tx_ring_offline,
21216886Sspeer 		    NXGE_TX_RING_OFFLINED);
21226495Sspeer 	}
21236495Sspeer 
21247812SMichael.Speer@Sun.COM 	MUTEX_ENTER(&nhd->lock);
21257812SMichael.Speer@Sun.COM 	nxge->tdc_is_shared[channel] = B_TRUE;
21267812SMichael.Speer@Sun.COM 	MUTEX_EXIT(&nhd->lock);
21277812SMichael.Speer@Sun.COM 
21286495Sspeer 	if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
21297950SMichael.Speer@Sun.COM 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
21306495Sspeer 		    "Failed to remove interrupt for TxDMA channel %d",
21316495Sspeer 		    channel));
21328275SEric Cheng 		return (-EINVAL);
21336495Sspeer 	}
21346495Sspeer 
21356495Sspeer 	/* Disable TxDMA A.9.6.10 */
21366495Sspeer 	(void) nxge_txdma_channel_disable(nxge, channel);
21376495Sspeer 
21386495Sspeer 	/* The SD is sharing this channel. */
21396495Sspeer 	NXGE_DC_SET(set->shared.map, channel);
21406495Sspeer 	set->shared.count++;
21416495Sspeer 
21426602Sspeer 	/* Soft Reset TxDMA A.9.6.2 */
21436602Sspeer 	nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
21446602Sspeer 
21456495Sspeer 	/*
21466495Sspeer 	 * Initialize the DC-specific FZC control registers.
21476495Sspeer 	 * -----------------------------------------------------
21486495Sspeer 	 */
21496495Sspeer 	if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
21506495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
21517950SMichael.Speer@Sun.COM 		    "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
21526495Sspeer 		return (-EIO);
21536495Sspeer 	}
21546495Sspeer 
21556495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
21566495Sspeer 
21576495Sspeer 	return (0);
21586495Sspeer }
21596495Sspeer 
21606495Sspeer /*
21616495Sspeer  * nxge_hio_rdc_share
21626495Sspeer  *
21636495Sspeer  *	Share an unused RDC channel.
21646495Sspeer  *
21656495Sspeer  * Arguments:
21666495Sspeer  * 	nxge
21676495Sspeer  *
21686495Sspeer  * Notes:
21696495Sspeer  *
21706495Sspeer  * This is the latest version of the procedure to
21716495Sspeer  * Reconfigure an Rx DMA channel:
21726495Sspeer  *
21736495Sspeer  * A.6.3 Reconfigure Rx DMA channel
21746495Sspeer  *	Stop RxMAC		A.9.2.6
21756495Sspeer  *	Drain IPP Port		A.9.3.6
21766495Sspeer  *	Stop and reset RxDMA	A.9.5.3
21776495Sspeer  *
21786495Sspeer  * This procedure will be executed by nxge_init_rxdma_channel() in the
21796495Sspeer  * guest domain:
21806495Sspeer  *
21816495Sspeer  *	Initialize RxDMA	A.9.5.4
21826495Sspeer  *	Reconfigure RxDMA
21836495Sspeer  *	Enable RxDMA		A.9.5.5
21846495Sspeer  *
21856495Sspeer  * We will do this here, since the RDC is a canalis non grata:
21866495Sspeer  *	Enable RxMAC		A.9.2.10
21876495Sspeer  *
21886495Sspeer  * Context:
21896495Sspeer  *	Service domain
21906495Sspeer  */
21916495Sspeer int
21926495Sspeer nxge_hio_rdc_share(
21936495Sspeer 	nxge_t *nxge,
21946495Sspeer 	nxge_hio_vr_t *vr,
21956495Sspeer 	int channel)
21966495Sspeer {
21976495Sspeer 	nxge_grp_set_t *set = &nxge->rx_set;
21986495Sspeer 	nxge_rdc_grp_t *rdc_grp;
21996495Sspeer 
22006495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
22016495Sspeer 
22026495Sspeer 	/* Disable interrupts. */
22036495Sspeer 	if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
22047950SMichael.Speer@Sun.COM 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
22056495Sspeer 		    "Failed to remove interrupt for RxDMA channel %d",
22066495Sspeer 		    channel));
22076495Sspeer 		return (NXGE_ERROR);
22086495Sspeer 	}
22096495Sspeer 
22106495Sspeer 	/* Stop RxMAC = A.9.2.6 */
22116495Sspeer 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
22126495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
22136495Sspeer 		    "Failed to disable RxMAC"));
22146495Sspeer 	}
22156495Sspeer 
22166495Sspeer 	/* Drain IPP Port = A.9.3.6 */
22176495Sspeer 	(void) nxge_ipp_drain(nxge);
22186495Sspeer 
22196495Sspeer 	/* Stop and reset RxDMA = A.9.5.3 */
22206495Sspeer 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
22216495Sspeer 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
22226495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
22236495Sspeer 		    "Failed to disable RxDMA channel %d", channel));
22246495Sspeer 	}
22256495Sspeer 
22266495Sspeer 	/* The SD is sharing this channel. */
22276495Sspeer 	NXGE_DC_SET(set->shared.map, channel);
22286495Sspeer 	set->shared.count++;
22296495Sspeer 
22306602Sspeer 	// Assert RST: RXDMA_CFIG1[30] = 1
22316602Sspeer 	nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
22326602Sspeer 
22336495Sspeer 	/*
22346495Sspeer 	 * The guest domain will reconfigure the RDC later.
22356495Sspeer 	 *
22366495Sspeer 	 * But in the meantime, we must re-enable the Rx MAC so
22376495Sspeer 	 * that we can start receiving packets again on the
22386495Sspeer 	 * remaining RDCs:
22396495Sspeer 	 *
22406495Sspeer 	 * Enable RxMAC = A.9.2.10
22416495Sspeer 	 */
22426495Sspeer 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
22436495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
22447950SMichael.Speer@Sun.COM 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
22456495Sspeer 	}
22466495Sspeer 
22476495Sspeer 	/*
22486495Sspeer 	 * Initialize the DC-specific FZC control registers.
22496495Sspeer 	 * -----------------------------------------------------
22506495Sspeer 	 */
22516495Sspeer 	if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
22526495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
22537950SMichael.Speer@Sun.COM 		    "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
22546495Sspeer 		return (-EIO);
22556495Sspeer 	}
22566495Sspeer 
22576495Sspeer 	/*
22589047SMichael.Speer@Sun.COM 	 * Update the RDC group.
22596495Sspeer 	 */
22606495Sspeer 	rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
22616495Sspeer 	NXGE_DC_SET(rdc_grp->map, channel);
22626495Sspeer 
22636495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
22646495Sspeer 
22656495Sspeer 	return (0);
22666495Sspeer }
22676495Sspeer 
22686495Sspeer /*
22696495Sspeer  * nxge_hio_dc_share
22706495Sspeer  *
22716495Sspeer  *	Share a DMA channel with a guest domain.
22726495Sspeer  *
22736495Sspeer  * Arguments:
22746495Sspeer  * 	nxge
22756495Sspeer  * 	vr	The VR that <channel> will belong to.
22766495Sspeer  * 	type	Tx or Rx.
22778275SEric Cheng  * 	channel	Channel to share
22786495Sspeer  *
22796495Sspeer  * Notes:
22806495Sspeer  *
22816495Sspeer  * Context:
22826495Sspeer  *	Service domain
22836495Sspeer  */
22846495Sspeer int
22856495Sspeer nxge_hio_dc_share(
22866495Sspeer 	nxge_t *nxge,
22876495Sspeer 	nxge_hio_vr_t *vr,
22888275SEric Cheng 	mac_ring_type_t type,
22898275SEric Cheng 	int channel)
22906495Sspeer {
22916495Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
22926495Sspeer 	nxge_hio_dc_t *dc;
22936495Sspeer 	nxge_grp_t *group;
22946495Sspeer 	int slot;
22956495Sspeer 
22966495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
22976495Sspeer 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
22986495Sspeer 
22996495Sspeer 
23006495Sspeer 	/* -------------------------------------------------- */
23016495Sspeer 	slot = (type == MAC_RING_TYPE_TX) ?
23026495Sspeer 	    nxge_hio_tdc_share(nxge, channel) :
23036495Sspeer 	    nxge_hio_rdc_share(nxge, vr, channel);
23046495Sspeer 
23056495Sspeer 	if (slot < 0) {
23066495Sspeer 		if (type == MAC_RING_TYPE_RX) {
23078275SEric Cheng 			nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
23086495Sspeer 		} else {
23098275SEric Cheng 			nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
23106495Sspeer 		}
23116495Sspeer 		return (slot);
23126495Sspeer 	}
23136495Sspeer 
23146495Sspeer 	MUTEX_ENTER(&nhd->lock);
23156495Sspeer 
23166495Sspeer 	/*
23176495Sspeer 	 * Tag this channel.
23186495Sspeer 	 * --------------------------------------------------
23196495Sspeer 	 */
23206495Sspeer 	dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
23216495Sspeer 
23226495Sspeer 	dc->vr = vr;
23236495Sspeer 	dc->channel = (nxge_channel_t)channel;
23246495Sspeer 
23256495Sspeer 	MUTEX_EXIT(&nhd->lock);
23266495Sspeer 
23276495Sspeer 	/*
23286495Sspeer 	 * vr->[t|r]x_group is used by the service domain to
23296495Sspeer 	 * keep track of its shared DMA channels.
23306495Sspeer 	 */
23316495Sspeer 	MUTEX_ENTER(&nxge->group_lock);
23326495Sspeer 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
23336495Sspeer 
23347755SMisaki.Kataoka@Sun.COM 	dc->group = group;
23356495Sspeer 	/* Initialize <group>, if necessary */
23366495Sspeer 	if (group->count == 0) {
23376495Sspeer 		group->nxge = nxge;
23386495Sspeer 		group->type = (type == MAC_RING_TYPE_TX) ?
23396495Sspeer 		    VP_BOUND_TX : VP_BOUND_RX;
23406495Sspeer 		group->sequence	= nhd->sequence++;
23416495Sspeer 		group->active = B_TRUE;
23426495Sspeer 	}
23436495Sspeer 
23446495Sspeer 	MUTEX_EXIT(&nxge->group_lock);
23456495Sspeer 
23466495Sspeer 	NXGE_ERROR_MSG((nxge, HIO_CTL,
23476495Sspeer 	    "DC share: %cDC %d was assigned to slot %d",
23486495Sspeer 	    type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
23496495Sspeer 
23506495Sspeer 	nxge_grp_dc_append(nxge, group, dc);
23516495Sspeer 
23526495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
23536495Sspeer 
23546495Sspeer 	return (0);
23556495Sspeer }
23566495Sspeer 
23576495Sspeer /*
23586495Sspeer  * nxge_hio_tdc_unshare
23596495Sspeer  *
23606495Sspeer  *	Unshare a TDC.
23616495Sspeer  *
23626495Sspeer  * Arguments:
23636495Sspeer  * 	nxge
23646495Sspeer  * 	channel	The channel to unshare (add again).
23656495Sspeer  *
23666495Sspeer  * Notes:
23676495Sspeer  *
23686495Sspeer  * Context:
23696495Sspeer  *	Service domain
23706495Sspeer  */
23716495Sspeer void
23726495Sspeer nxge_hio_tdc_unshare(
23736495Sspeer 	nxge_t *nxge,
23748275SEric Cheng 	int dev_grpid,
23756495Sspeer 	int channel)
23766495Sspeer {
23776495Sspeer 	nxge_grp_set_t *set = &nxge->tx_set;
23788275SEric Cheng 	nxge_grp_t *group;
23798275SEric Cheng 	int grpid;
23806495Sspeer 
23816495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
23826495Sspeer 
23836495Sspeer 	NXGE_DC_RESET(set->shared.map, channel);
23846495Sspeer 	set->shared.count--;
23856495Sspeer 
23868275SEric Cheng 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
23878275SEric Cheng 	group = set->group[grpid];
23888275SEric Cheng 
23897755SMisaki.Kataoka@Sun.COM 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
23906495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
23916495Sspeer 		    "Failed to initialize TxDMA channel %d", channel));
23926495Sspeer 		return;
23936495Sspeer 	}
23946495Sspeer 
23956495Sspeer 	/* Re-add this interrupt. */
23966495Sspeer 	if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
23976495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
23986495Sspeer 		    "Failed to add interrupt for TxDMA channel %d", channel));
23996495Sspeer 	}
24006495Sspeer 
24016495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
24026495Sspeer }
24036495Sspeer 
24046495Sspeer /*
24056495Sspeer  * nxge_hio_rdc_unshare
24066495Sspeer  *
24076495Sspeer  *	Unshare an RDC: add it to the SD's RDC groups (tables).
24086495Sspeer  *
24096495Sspeer  * Arguments:
24106495Sspeer  * 	nxge
24116495Sspeer  * 	channel	The channel to unshare (add again).
24126495Sspeer  *
24136495Sspeer  * Notes:
24146495Sspeer  *
24156495Sspeer  * Context:
24166495Sspeer  *	Service domain
24176495Sspeer  */
24186495Sspeer void
24196495Sspeer nxge_hio_rdc_unshare(
24206495Sspeer 	nxge_t *nxge,
24218275SEric Cheng 	int dev_grpid,
24226495Sspeer 	int channel)
24236495Sspeer {
24248275SEric Cheng 	nxge_grp_set_t		*set = &nxge->rx_set;
24258275SEric Cheng 	nxge_grp_t		*group;
24268275SEric Cheng 	int			grpid;
24276495Sspeer 
24286495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
24296495Sspeer 
24306495Sspeer 	/* Stop RxMAC = A.9.2.6 */
24316495Sspeer 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
24326495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
24336495Sspeer 		    "Failed to disable RxMAC"));
24346495Sspeer 	}
24356495Sspeer 
24366495Sspeer 	/* Drain IPP Port = A.9.3.6 */
24376495Sspeer 	(void) nxge_ipp_drain(nxge);
24386495Sspeer 
24396495Sspeer 	/* Stop and reset RxDMA = A.9.5.3 */
24406495Sspeer 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
24416495Sspeer 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
24426495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
24436495Sspeer 		    "Failed to disable RxDMA channel %d", channel));
24446495Sspeer 	}
24456495Sspeer 
24466495Sspeer 	NXGE_DC_RESET(set->shared.map, channel);
24476495Sspeer 	set->shared.count--;
24486495Sspeer 
24498275SEric Cheng 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
24508275SEric Cheng 	group = set->group[grpid];
24518275SEric Cheng 
24526495Sspeer 	/*
24536495Sspeer 	 * Assert RST: RXDMA_CFIG1[30] = 1
24546495Sspeer 	 *
24556495Sspeer 	 * Initialize RxDMA	A.9.5.4
24566495Sspeer 	 * Reconfigure RxDMA
24576495Sspeer 	 * Enable RxDMA		A.9.5.5
24586495Sspeer 	 */
24597755SMisaki.Kataoka@Sun.COM 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
24606495Sspeer 		/* Be sure to re-enable the RX MAC. */
24616495Sspeer 		if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
24626495Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
24638275SEric Cheng 			    "nxge_hio_rdc_share: Rx MAC still disabled"));
24646495Sspeer 		}
24656495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
24666495Sspeer 		    "Failed to initialize RxDMA channel %d", channel));
24676495Sspeer 		return;
24686495Sspeer 	}
24696495Sspeer 
24706495Sspeer 	/*
24716495Sspeer 	 * Enable RxMAC = A.9.2.10
24726495Sspeer 	 */
24736495Sspeer 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
24746495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
24758275SEric Cheng 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
24766495Sspeer 		return;
24776495Sspeer 	}
24786495Sspeer 
24796495Sspeer 	/* Re-add this interrupt. */
24806495Sspeer 	if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
24816495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
24827950SMichael.Speer@Sun.COM 		    "nxge_hio_rdc_unshare: Failed to add interrupt for "
24836495Sspeer 		    "RxDMA CHANNEL %d", channel));
24846495Sspeer 	}
24856495Sspeer 
24866495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
24876495Sspeer }
24886495Sspeer 
24896495Sspeer /*
24906495Sspeer  * nxge_hio_dc_unshare
24916495Sspeer  *
24926495Sspeer  *	Unshare (reuse) a DMA channel.
24936495Sspeer  *
24946495Sspeer  * Arguments:
24956495Sspeer  * 	nxge
24966495Sspeer  * 	vr	The VR that <channel> belongs to.
24976495Sspeer  * 	type	Tx or Rx.
24986495Sspeer  * 	channel	The DMA channel to reuse.
24996495Sspeer  *
25006495Sspeer  * Notes:
25016495Sspeer  *
25026495Sspeer  * Context:
25036495Sspeer  *	Service domain
25046495Sspeer  */
25056495Sspeer void
25066495Sspeer nxge_hio_dc_unshare(
25076495Sspeer 	nxge_t *nxge,
25086495Sspeer 	nxge_hio_vr_t *vr,
25096495Sspeer 	mac_ring_type_t type,
25106495Sspeer 	int channel)
25116495Sspeer {
25126495Sspeer 	nxge_grp_t *group;
25136495Sspeer 	nxge_hio_dc_t *dc;
25146495Sspeer 
25156495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
25166495Sspeer 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
25176495Sspeer 
25186495Sspeer 	/* Unlink the channel from its group. */
25196495Sspeer 	/* -------------------------------------------------- */
25206495Sspeer 	group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
25216602Sspeer 	NXGE_DC_RESET(group->map, channel);
25226495Sspeer 	if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
25236495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
25247950SMichael.Speer@Sun.COM 		    "nxge_hio_dc_unshare(%d) failed", channel));
25256495Sspeer 		return;
25266495Sspeer 	}
25276495Sspeer 
25286495Sspeer 	dc->vr = 0;
25296495Sspeer 	dc->cookie = 0;
25306495Sspeer 
25316495Sspeer 	if (type == MAC_RING_TYPE_RX) {
25328275SEric Cheng 		nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
25336495Sspeer 	} else {
25348275SEric Cheng 		nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
25356495Sspeer 	}
25366495Sspeer 
25376495Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
25386495Sspeer }
25396495Sspeer 
25408400SNicolas.Droux@Sun.COM 
25418400SNicolas.Droux@Sun.COM /*
25428400SNicolas.Droux@Sun.COM  * nxge_hio_rxdma_bind_intr():
25438400SNicolas.Droux@Sun.COM  *
25448400SNicolas.Droux@Sun.COM  *	For the guest domain driver, need to bind the interrupt group
25458400SNicolas.Droux@Sun.COM  *	and state to the rx_rcr_ring_t.
25468400SNicolas.Droux@Sun.COM  */
25478400SNicolas.Droux@Sun.COM 
25488400SNicolas.Droux@Sun.COM int
25498400SNicolas.Droux@Sun.COM nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
25508400SNicolas.Droux@Sun.COM {
25518400SNicolas.Droux@Sun.COM 	nxge_hio_dc_t	*dc;
25528400SNicolas.Droux@Sun.COM 	nxge_ldgv_t	*control;
25538400SNicolas.Droux@Sun.COM 	nxge_ldg_t	*group;
25548400SNicolas.Droux@Sun.COM 	nxge_ldv_t	*device;
25558400SNicolas.Droux@Sun.COM 
25568400SNicolas.Droux@Sun.COM 	/*
25578400SNicolas.Droux@Sun.COM 	 * Find the DMA channel.
25588400SNicolas.Droux@Sun.COM 	 */
25598400SNicolas.Droux@Sun.COM 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
25608400SNicolas.Droux@Sun.COM 		return (NXGE_ERROR);
25618400SNicolas.Droux@Sun.COM 	}
25628400SNicolas.Droux@Sun.COM 
25638400SNicolas.Droux@Sun.COM 	/*
25648400SNicolas.Droux@Sun.COM 	 * Get the control structure.
25658400SNicolas.Droux@Sun.COM 	 */
25668400SNicolas.Droux@Sun.COM 	control = nxge->ldgvp;
25678400SNicolas.Droux@Sun.COM 	if (control == NULL) {
25688400SNicolas.Droux@Sun.COM 		return (NXGE_ERROR);
25698400SNicolas.Droux@Sun.COM 	}
25708400SNicolas.Droux@Sun.COM 
25718400SNicolas.Droux@Sun.COM 	group = &control->ldgp[dc->ldg.vector];
25728400SNicolas.Droux@Sun.COM 	device = &control->ldvp[dc->ldg.ldsv];
25738400SNicolas.Droux@Sun.COM 
25748400SNicolas.Droux@Sun.COM 	MUTEX_ENTER(&ring->lock);
25758400SNicolas.Droux@Sun.COM 	ring->ldgp = group;
25768400SNicolas.Droux@Sun.COM 	ring->ldvp = device;
25778400SNicolas.Droux@Sun.COM 	MUTEX_EXIT(&ring->lock);
25788400SNicolas.Droux@Sun.COM 
25798400SNicolas.Droux@Sun.COM 	return (NXGE_OK);
25808400SNicolas.Droux@Sun.COM }
25816495Sspeer #endif	/* if defined(sun4v) */
2582