16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 236495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 376495Sspeer #include <sys/nxge/nxge_impl.h> 386495Sspeer #include <sys/nxge/nxge_fzc.h> 396495Sspeer #include <sys/nxge/nxge_rxdma.h> 406495Sspeer #include <sys/nxge/nxge_txdma.h> 416495Sspeer #include <sys/nxge/nxge_hio.h> 426495Sspeer 436495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 446495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 456495Sspeer 466495Sspeer /* 476495Sspeer * External prototypes 486495Sspeer */ 496495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 506495Sspeer 516495Sspeer /* The following function may be found in nxge_main.c */ 526495Sspeer extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 536495Sspeer 546495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 556495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 566495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 576495Sspeer 586495Sspeer /* 596495Sspeer * Local prototypes 606495Sspeer */ 616495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 626495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 636495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 646495Sspeer 656495Sspeer /* 666495Sspeer * These functions are used by both service & guest domains to 676495Sspeer * decide whether they're running in an LDOMs/XEN environment 686495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 696495Sspeer */ 706495Sspeer 716495Sspeer /* 726495Sspeer * nxge_get_environs 736495Sspeer * 746495Sspeer * Figure out if we are in a guest domain or not. 756495Sspeer * 766495Sspeer * Arguments: 776495Sspeer * nxge 786495Sspeer * 796495Sspeer * Notes: 806495Sspeer * 816495Sspeer * Context: 826495Sspeer * Any domain 836495Sspeer */ 846495Sspeer void 856495Sspeer nxge_get_environs( 866495Sspeer nxge_t *nxge) 876495Sspeer { 886495Sspeer char *string; 896495Sspeer 906495Sspeer /* 916495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 926495Sspeer */ 936495Sspeer nxge->environs = SOLARIS_DOMAIN; 946495Sspeer 956495Sspeer /* 966495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 976495Sspeer */ 986495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 996495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1006495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1016495Sspeer if (strcmp(string, "n2niu") == 0) { 1026495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1036495Sspeer /* So we can allocate properly-aligned memory. */ 1046495Sspeer nxge->niu_type = N2_NIU; 1056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1066495Sspeer "Hybrid IO-capable guest domain")); 1076495Sspeer } 1086495Sspeer ddi_prop_free(string); 1096495Sspeer } 1106495Sspeer } 1116495Sspeer 1126495Sspeer #if !defined(sun4v) 1136495Sspeer 1146495Sspeer /* 1156495Sspeer * nxge_hio_init 1166495Sspeer * 1176495Sspeer * Initialize the HIO module of the NXGE driver. 1186495Sspeer * 1196495Sspeer * Arguments: 1206495Sspeer * nxge 1216495Sspeer * 1226495Sspeer * Notes: 1236495Sspeer * This is the non-hybrid I/O version of this function. 1246495Sspeer * 1256495Sspeer * Context: 1266495Sspeer * Any domain 1276495Sspeer */ 1286495Sspeer int 1297587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1306495Sspeer { 1316495Sspeer nxge_hio_data_t *nhd; 1326495Sspeer 1336495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1346495Sspeer if (nhd == 0) { 1356495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1366495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1376495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1386495Sspeer } 1396495Sspeer 1406495Sspeer nhd->hio.ldoms = B_FALSE; 1416495Sspeer 1426495Sspeer return (NXGE_OK); 1436495Sspeer } 1446495Sspeer 1456495Sspeer #endif 1466495Sspeer 1476495Sspeer void 1487587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1496495Sspeer { 1506495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1516495Sspeer 1526495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1536495Sspeer 1547587SMichael.Speer@Sun.COM if (nhd != NULL) { 1557587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 1567587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 1577587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 1587587SMichael.Speer@Sun.COM } 1596495Sspeer } 1606495Sspeer 1616495Sspeer /* 1626495Sspeer * nxge_dci_map 1636495Sspeer * 1646495Sspeer * Map a DMA channel index to a channel number. 1656495Sspeer * 1666495Sspeer * Arguments: 1676495Sspeer * instance The instance number of the driver. 1686495Sspeer * type The type of channel this is: Tx or Rx. 1696495Sspeer * index The index to convert to a channel number 1706495Sspeer * 1716495Sspeer * Notes: 1726495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 1736495Sspeer * 1746495Sspeer * Context: 1756495Sspeer * Any domain 1766495Sspeer */ 1776495Sspeer int 1786495Sspeer nxge_dci_map( 1796495Sspeer nxge_t *nxge, 1806495Sspeer vpc_type_t type, 1816495Sspeer int index) 1826495Sspeer { 1836495Sspeer nxge_grp_set_t *set; 1846495Sspeer int dc; 1856495Sspeer 1866495Sspeer switch (type) { 1876495Sspeer case VP_BOUND_TX: 1886495Sspeer set = &nxge->tx_set; 1896495Sspeer break; 1906495Sspeer case VP_BOUND_RX: 1916495Sspeer set = &nxge->rx_set; 1926495Sspeer break; 1936495Sspeer } 1946495Sspeer 1956495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 1966495Sspeer if ((1 << dc) & set->owned.map) { 1976495Sspeer if (index == 0) 1986495Sspeer return (dc); 1996495Sspeer else 2006495Sspeer index--; 2016495Sspeer } 2026495Sspeer } 2036495Sspeer 2046495Sspeer return (-1); 2056495Sspeer } 2066495Sspeer 2076495Sspeer /* 2086495Sspeer * --------------------------------------------------------------------- 2096495Sspeer * These are the general-purpose DMA channel group functions. That is, 2106495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2116495Sspeer * environment. 2126495Sspeer * 2136495Sspeer * But is also expected that in the future they will be able to manage 2146495Sspeer * Crossbow groups. 2156495Sspeer * --------------------------------------------------------------------- 2166495Sspeer */ 2176495Sspeer 2186495Sspeer /* 2197766SMichael.Speer@Sun.COM * nxge_grp_cleanup(p_nxge_t nxge) 2207766SMichael.Speer@Sun.COM * 2217766SMichael.Speer@Sun.COM * Remove all outstanding groups. 2227766SMichael.Speer@Sun.COM * 2237766SMichael.Speer@Sun.COM * Arguments: 2247766SMichael.Speer@Sun.COM * nxge 2257766SMichael.Speer@Sun.COM */ 2267766SMichael.Speer@Sun.COM void 2277766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge) 2287766SMichael.Speer@Sun.COM { 2297766SMichael.Speer@Sun.COM nxge_grp_set_t *set; 2307766SMichael.Speer@Sun.COM int i; 2317766SMichael.Speer@Sun.COM 2327766SMichael.Speer@Sun.COM MUTEX_ENTER(&nxge->group_lock); 2337766SMichael.Speer@Sun.COM 2347766SMichael.Speer@Sun.COM /* 2357766SMichael.Speer@Sun.COM * Find RX groups that need to be cleaned up. 2367766SMichael.Speer@Sun.COM */ 2377766SMichael.Speer@Sun.COM set = &nxge->rx_set; 2387766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2397766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2407766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2417766SMichael.Speer@Sun.COM set->group[i] = NULL; 2427766SMichael.Speer@Sun.COM } 2437766SMichael.Speer@Sun.COM } 2447766SMichael.Speer@Sun.COM 2457766SMichael.Speer@Sun.COM /* 2467766SMichael.Speer@Sun.COM * Find TX groups that need to be cleaned up. 2477766SMichael.Speer@Sun.COM */ 2487766SMichael.Speer@Sun.COM set = &nxge->tx_set; 2497766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2507766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2517766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2527766SMichael.Speer@Sun.COM set->group[i] = NULL; 2537766SMichael.Speer@Sun.COM } 2547766SMichael.Speer@Sun.COM } 2557766SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 2567766SMichael.Speer@Sun.COM } 2577766SMichael.Speer@Sun.COM 2587766SMichael.Speer@Sun.COM 2597766SMichael.Speer@Sun.COM /* 2606495Sspeer * nxge_grp_add 2616495Sspeer * 2626495Sspeer * Add a group to an instance of NXGE. 2636495Sspeer * 2646495Sspeer * Arguments: 2656495Sspeer * nxge 2666495Sspeer * type Tx or Rx 2676495Sspeer * 2686495Sspeer * Notes: 2696495Sspeer * 2706495Sspeer * Context: 2716495Sspeer * Any domain 2726495Sspeer */ 2737755SMisaki.Kataoka@Sun.COM nxge_grp_t * 2746495Sspeer nxge_grp_add( 2756495Sspeer nxge_t *nxge, 2766495Sspeer nxge_grp_type_t type) 2776495Sspeer { 2786495Sspeer nxge_grp_set_t *set; 2796495Sspeer nxge_grp_t *group; 2806495Sspeer int i; 2816495Sspeer 2826495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 2836495Sspeer group->nxge = nxge; 2846495Sspeer 2856495Sspeer MUTEX_ENTER(&nxge->group_lock); 2866495Sspeer switch (type) { 2876495Sspeer case NXGE_TRANSMIT_GROUP: 2886495Sspeer case EXT_TRANSMIT_GROUP: 2896495Sspeer set = &nxge->tx_set; 2906495Sspeer break; 2916495Sspeer default: 2926495Sspeer set = &nxge->rx_set; 2936495Sspeer break; 2946495Sspeer } 2956495Sspeer 2966495Sspeer group->type = type; 2976495Sspeer group->active = B_TRUE; 2986495Sspeer group->sequence = set->sequence++; 2996495Sspeer 3006495Sspeer /* Find an empty slot for this logical group. */ 3016495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 3026495Sspeer if (set->group[i] == 0) { 3036495Sspeer group->index = i; 3046495Sspeer set->group[i] = group; 3056495Sspeer NXGE_DC_SET(set->lg.map, i); 3066495Sspeer set->lg.count++; 3076495Sspeer break; 3086495Sspeer } 3096495Sspeer } 3106495Sspeer MUTEX_EXIT(&nxge->group_lock); 3116495Sspeer 3126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3136495Sspeer "nxge_grp_add: %cgroup = %d.%d", 3146495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3156495Sspeer nxge->mac.portnum, group->sequence)); 3166495Sspeer 3177755SMisaki.Kataoka@Sun.COM return (group); 3186495Sspeer } 3196495Sspeer 3206495Sspeer void 3216495Sspeer nxge_grp_remove( 3226495Sspeer nxge_t *nxge, 3237755SMisaki.Kataoka@Sun.COM nxge_grp_t *group) /* The group to remove. */ 3246495Sspeer { 3256495Sspeer nxge_grp_set_t *set; 3266495Sspeer vpc_type_t type; 3276495Sspeer 3286495Sspeer MUTEX_ENTER(&nxge->group_lock); 3296495Sspeer switch (group->type) { 3306495Sspeer case NXGE_TRANSMIT_GROUP: 3316495Sspeer case EXT_TRANSMIT_GROUP: 3326495Sspeer set = &nxge->tx_set; 3336495Sspeer break; 3346495Sspeer default: 3356495Sspeer set = &nxge->rx_set; 3366495Sspeer break; 3376495Sspeer } 3386495Sspeer 3396495Sspeer if (set->group[group->index] != group) { 3406495Sspeer MUTEX_EXIT(&nxge->group_lock); 3416495Sspeer return; 3426495Sspeer } 3436495Sspeer 3446495Sspeer set->group[group->index] = 0; 3456495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3466495Sspeer set->lg.count--; 3476495Sspeer 3486495Sspeer /* While inside the mutex, deactivate <group>. */ 3496495Sspeer group->active = B_FALSE; 3506495Sspeer 3516495Sspeer MUTEX_EXIT(&nxge->group_lock); 3526495Sspeer 3536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3546495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3556495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3566495Sspeer nxge->mac.portnum, group->sequence)); 3576495Sspeer 3586495Sspeer /* Now, remove any DCs which are still active. */ 3596495Sspeer switch (group->type) { 3606495Sspeer default: 3616495Sspeer type = VP_BOUND_TX; 3626495Sspeer break; 3636495Sspeer case NXGE_RECEIVE_GROUP: 3646495Sspeer case EXT_RECEIVE_GROUP: 3656495Sspeer type = VP_BOUND_RX; 3666495Sspeer } 3676495Sspeer 3686495Sspeer while (group->dc) { 3696495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3706495Sspeer } 3716495Sspeer 3726495Sspeer KMEM_FREE(group, sizeof (*group)); 3736495Sspeer } 3746495Sspeer 3756495Sspeer /* 376*7950SMichael.Speer@Sun.COM * nxge_grp_dc_add 3776495Sspeer * 3786495Sspeer * Add a DMA channel to a VR/Group. 3796495Sspeer * 3806495Sspeer * Arguments: 3816495Sspeer * nxge 3826495Sspeer * channel The channel to add. 3836495Sspeer * Notes: 3846495Sspeer * 3856495Sspeer * Context: 3866495Sspeer * Any domain 3876495Sspeer */ 3886495Sspeer /* ARGSUSED */ 3896495Sspeer int 3906495Sspeer nxge_grp_dc_add( 3916495Sspeer nxge_t *nxge, 3927755SMisaki.Kataoka@Sun.COM nxge_grp_t *group, /* The group to add <channel> to. */ 3936495Sspeer vpc_type_t type, /* Rx or Tx */ 3946495Sspeer int channel) /* A physical/logical channel number */ 3956495Sspeer { 3966495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 3976495Sspeer nxge_hio_dc_t *dc; 3986495Sspeer nxge_grp_set_t *set; 3996602Sspeer nxge_status_t status = NXGE_OK; 4006495Sspeer 4016495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 4026495Sspeer 4037755SMisaki.Kataoka@Sun.COM if (group == NULL) 4046495Sspeer return (0); 4056495Sspeer 4066495Sspeer switch (type) { 407*7950SMichael.Speer@Sun.COM case VP_BOUND_TX: 4086495Sspeer set = &nxge->tx_set; 4096495Sspeer if (channel > NXGE_MAX_TDCS) { 4106495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4116495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 4126495Sspeer return (NXGE_ERROR); 4136495Sspeer } 4146495Sspeer break; 4156495Sspeer case VP_BOUND_RX: 4166495Sspeer set = &nxge->rx_set; 4176495Sspeer if (channel > NXGE_MAX_RDCS) { 4186495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4196495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 4206495Sspeer return (NXGE_ERROR); 4216495Sspeer } 4226495Sspeer break; 423*7950SMichael.Speer@Sun.COM 424*7950SMichael.Speer@Sun.COM default: 425*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 426*7950SMichael.Speer@Sun.COM "nxge_grp_dc_add: unknown type channel(%d)", channel)); 427*7950SMichael.Speer@Sun.COM return (NXGE_ERROR); 4286495Sspeer } 4296495Sspeer 4306495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4316495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 4326495Sspeer type == VP_BOUND_TX ? 't' : 'r', 4336495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 4346495Sspeer 4356495Sspeer MUTEX_ENTER(&nxge->group_lock); 4366495Sspeer if (group->active != B_TRUE) { 4376495Sspeer /* We may be in the process of removing this group. */ 4386495Sspeer MUTEX_EXIT(&nxge->group_lock); 4396495Sspeer return (NXGE_ERROR); 4406495Sspeer } 4416495Sspeer MUTEX_EXIT(&nxge->group_lock); 4426495Sspeer 4436495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4446495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4456495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4466495Sspeer return (NXGE_ERROR); 4476495Sspeer } 4486495Sspeer 4496495Sspeer MUTEX_ENTER(&nhd->lock); 4506495Sspeer 4516495Sspeer if (dc->group) { 4526495Sspeer MUTEX_EXIT(&nhd->lock); 4536495Sspeer /* This channel is already in use! */ 4546495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4556495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4566495Sspeer return (NXGE_ERROR); 4576495Sspeer } 4586495Sspeer 4596495Sspeer dc->next = 0; 4606495Sspeer dc->page = channel; 4616495Sspeer dc->channel = (nxge_channel_t)channel; 4626495Sspeer 4636495Sspeer dc->type = type; 4646495Sspeer if (type == VP_BOUND_RX) { 4656495Sspeer dc->init = nxge_init_rxdma_channel; 4666495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4676495Sspeer } else { 4686495Sspeer dc->init = nxge_init_txdma_channel; 4696495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4706495Sspeer } 4716495Sspeer 4727755SMisaki.Kataoka@Sun.COM dc->group = group; 4736495Sspeer 4746495Sspeer if (isLDOMguest(nxge)) 4756495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 4766495Sspeer 4776495Sspeer NXGE_DC_SET(set->owned.map, channel); 4786495Sspeer set->owned.count++; 4796495Sspeer 4806495Sspeer MUTEX_EXIT(&nhd->lock); 4816495Sspeer 4826602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 4836602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4846602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 485*7950SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 486*7950SMichael.Speer@Sun.COM (void) memset(dc, 0, sizeof (*dc)); 487*7950SMichael.Speer@Sun.COM NXGE_DC_RESET(set->owned.map, channel); 488*7950SMichael.Speer@Sun.COM set->owned.count--; 489*7950SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 4906603Sspeer return (NXGE_ERROR); 4916602Sspeer } 4926602Sspeer 4936495Sspeer nxge_grp_dc_append(nxge, group, dc); 4946495Sspeer 4957812SMichael.Speer@Sun.COM if (type == VP_BOUND_TX) { 4967812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 4977812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_FALSE; 4987812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 4997812SMichael.Speer@Sun.COM } 5007812SMichael.Speer@Sun.COM 5016495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 5026495Sspeer 5036602Sspeer return ((int)status); 5046495Sspeer } 5056495Sspeer 5066495Sspeer void 5076495Sspeer nxge_grp_dc_remove( 5086495Sspeer nxge_t *nxge, 5096495Sspeer vpc_type_t type, 5106495Sspeer int channel) 5116495Sspeer { 5126495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5136495Sspeer nxge_hio_dc_t *dc; 5146495Sspeer nxge_grp_set_t *set; 5156495Sspeer nxge_grp_t *group; 5166495Sspeer 5176495Sspeer dc_uninit_t uninit; 5186495Sspeer 5196495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 5206495Sspeer 521*7950SMichael.Speer@Sun.COM if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) 522*7950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 523*7950SMichael.Speer@Sun.COM 524*7950SMichael.Speer@Sun.COM if ((dc->group == NULL) && (dc->next == 0) && 525*7950SMichael.Speer@Sun.COM (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) { 526*7950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5276495Sspeer } 528*7950SMichael.Speer@Sun.COM 5296495Sspeer group = (nxge_grp_t *)dc->group; 5306495Sspeer 5316495Sspeer if (isLDOMguest(nxge)) { 5326495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 5336495Sspeer } 5346495Sspeer 5356495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 5366495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 5376495Sspeer nxge->mac.portnum, group->sequence, group->count, 5386495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 5396495Sspeer 5406495Sspeer MUTEX_ENTER(&nhd->lock); 5416495Sspeer 5426602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5436602Sspeer if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 5446602Sspeer NXGE_DC_RESET(group->map, channel); 5456602Sspeer } 5466602Sspeer 5476495Sspeer /* Remove the DC from its group. */ 5486495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 5496495Sspeer MUTEX_EXIT(&nhd->lock); 5506495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 551*7950SMichael.Speer@Sun.COM "nxge_grp_dc_remove(%d) failed", channel)); 552*7950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5536495Sspeer } 5546495Sspeer 5556495Sspeer uninit = dc->uninit; 5566495Sspeer channel = dc->channel; 5576495Sspeer 5586495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5596495Sspeer set->owned.count--; 5606495Sspeer 5616495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5626495Sspeer 5636495Sspeer MUTEX_EXIT(&nhd->lock); 5646495Sspeer 5656495Sspeer (*uninit)(nxge, channel); 5666495Sspeer 567*7950SMichael.Speer@Sun.COM nxge_grp_dc_remove_exit: 5686495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5696495Sspeer } 5706495Sspeer 5716495Sspeer nxge_hio_dc_t * 5726495Sspeer nxge_grp_dc_find( 5736495Sspeer nxge_t *nxge, 5746495Sspeer vpc_type_t type, /* Rx or Tx */ 5756495Sspeer int channel) 5766495Sspeer { 5776495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5786495Sspeer nxge_hio_dc_t *current; 5796495Sspeer 5806495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 5816495Sspeer 5826495Sspeer if (!isLDOMguest(nxge)) { 5836495Sspeer return (¤t[channel]); 5846495Sspeer } else { 5856495Sspeer /* We're in a guest domain. */ 5866495Sspeer int i, limit = (type == VP_BOUND_TX) ? 5876495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 5886495Sspeer 5896495Sspeer MUTEX_ENTER(&nhd->lock); 5906495Sspeer for (i = 0; i < limit; i++, current++) { 5916495Sspeer if (current->channel == channel) { 5926495Sspeer if (current->vr && current->vr->nxge == 5936495Sspeer (uintptr_t)nxge) { 5946495Sspeer MUTEX_EXIT(&nhd->lock); 5956495Sspeer return (current); 5966495Sspeer } 5976495Sspeer } 5986495Sspeer } 5996495Sspeer MUTEX_EXIT(&nhd->lock); 6006495Sspeer } 6016495Sspeer 6026495Sspeer return (0); 6036495Sspeer } 6046495Sspeer 6056495Sspeer /* 6066495Sspeer * nxge_grp_dc_append 6076495Sspeer * 6086495Sspeer * Append a DMA channel to a group. 6096495Sspeer * 6106495Sspeer * Arguments: 6116495Sspeer * nxge 6126495Sspeer * group The group to append to 6136495Sspeer * dc The DMA channel to append 6146495Sspeer * 6156495Sspeer * Notes: 6166495Sspeer * 6176495Sspeer * Context: 6186495Sspeer * Any domain 6196495Sspeer */ 6206495Sspeer static 6216495Sspeer void 6226495Sspeer nxge_grp_dc_append( 6236495Sspeer nxge_t *nxge, 6246495Sspeer nxge_grp_t *group, 6256495Sspeer nxge_hio_dc_t *dc) 6266495Sspeer { 6276495Sspeer MUTEX_ENTER(&nxge->group_lock); 6286495Sspeer 6296495Sspeer if (group->dc == 0) { 6306495Sspeer group->dc = dc; 6316495Sspeer } else { 6326495Sspeer nxge_hio_dc_t *current = group->dc; 6336495Sspeer do { 6346495Sspeer if (current->next == 0) { 6356495Sspeer current->next = dc; 6366495Sspeer break; 6376495Sspeer } 6386495Sspeer current = current->next; 6396495Sspeer } while (current); 6406495Sspeer } 6416495Sspeer 6426495Sspeer NXGE_DC_SET(group->map, dc->channel); 6436495Sspeer 6446495Sspeer nxge_grp_dc_map(group); 6456602Sspeer group->count++; 6466495Sspeer 6476495Sspeer MUTEX_EXIT(&nxge->group_lock); 6486495Sspeer } 6496495Sspeer 6506495Sspeer /* 6516495Sspeer * nxge_grp_dc_unlink 6526495Sspeer * 6536495Sspeer * Unlink a DMA channel fromits linked list (group). 6546495Sspeer * 6556495Sspeer * Arguments: 6566495Sspeer * nxge 6576495Sspeer * group The group (linked list) to unlink from 6586495Sspeer * dc The DMA channel to append 6596495Sspeer * 6606495Sspeer * Notes: 6616495Sspeer * 6626495Sspeer * Context: 6636495Sspeer * Any domain 6646495Sspeer */ 6656495Sspeer nxge_hio_dc_t * 6667812SMichael.Speer@Sun.COM nxge_grp_dc_unlink(nxge_t *nxge, nxge_grp_t *group, int channel) 6676495Sspeer { 6686495Sspeer nxge_hio_dc_t *current, *previous; 6696495Sspeer 6706495Sspeer MUTEX_ENTER(&nxge->group_lock); 6716495Sspeer 6727812SMichael.Speer@Sun.COM if (group == NULL) { 6737812SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 6747812SMichael.Speer@Sun.COM return (0); 6757812SMichael.Speer@Sun.COM } 6767812SMichael.Speer@Sun.COM 6776495Sspeer if ((current = group->dc) == 0) { 6786495Sspeer MUTEX_EXIT(&nxge->group_lock); 6796495Sspeer return (0); 6806495Sspeer } 6816495Sspeer 6826495Sspeer previous = 0; 6836495Sspeer do { 6846495Sspeer if (current->channel == channel) { 6856495Sspeer if (previous) 6866495Sspeer previous->next = current->next; 6876495Sspeer else 6886495Sspeer group->dc = current->next; 6896495Sspeer break; 6906495Sspeer } 6916495Sspeer previous = current; 6926495Sspeer current = current->next; 6936495Sspeer } while (current); 6946495Sspeer 6956495Sspeer if (current == 0) { 6966495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 6976495Sspeer "DC unlink: DC %d not found", channel)); 6986495Sspeer } else { 6996495Sspeer current->next = 0; 7006495Sspeer current->group = 0; 7016495Sspeer 7026495Sspeer group->count--; 7036495Sspeer } 7046495Sspeer 7056495Sspeer nxge_grp_dc_map(group); 7066495Sspeer 7076495Sspeer MUTEX_EXIT(&nxge->group_lock); 7086495Sspeer 7096495Sspeer return (current); 7106495Sspeer } 7116495Sspeer 7126495Sspeer /* 7136495Sspeer * nxge_grp_dc_map 7146495Sspeer * 7156495Sspeer * Map a linked list to an array of channel numbers. 7166495Sspeer * 7176495Sspeer * Arguments: 7186495Sspeer * nxge 7196495Sspeer * group The group to remap. 7206495Sspeer * 7216495Sspeer * Notes: 7226495Sspeer * It is expected that the caller will hold the correct mutex. 7236495Sspeer * 7246495Sspeer * Context: 7256495Sspeer * Service domain 7266495Sspeer */ 7276495Sspeer void 7286495Sspeer nxge_grp_dc_map( 7296495Sspeer nxge_grp_t *group) 7306495Sspeer { 7316495Sspeer nxge_channel_t *legend; 7326495Sspeer nxge_hio_dc_t *dc; 7336495Sspeer 7346495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 7356495Sspeer 7366495Sspeer legend = group->legend; 7376495Sspeer dc = group->dc; 7386495Sspeer while (dc) { 7396495Sspeer *legend = dc->channel; 7406495Sspeer legend++; 7416495Sspeer dc = dc->next; 7426495Sspeer } 7436495Sspeer } 7446495Sspeer 7456495Sspeer /* 7466495Sspeer * --------------------------------------------------------------------- 7476495Sspeer * These are HIO debugging functions. 7486495Sspeer * --------------------------------------------------------------------- 7496495Sspeer */ 7506495Sspeer 7516495Sspeer /* 7526495Sspeer * nxge_delay 7536495Sspeer * 7546495Sspeer * Delay <seconds> number of seconds. 7556495Sspeer * 7566495Sspeer * Arguments: 7576495Sspeer * nxge 7586495Sspeer * group The group to append to 7596495Sspeer * dc The DMA channel to append 7606495Sspeer * 7616495Sspeer * Notes: 7626495Sspeer * This is a developer-only function. 7636495Sspeer * 7646495Sspeer * Context: 7656495Sspeer * Any domain 7666495Sspeer */ 7676495Sspeer void 7686495Sspeer nxge_delay( 7696495Sspeer int seconds) 7706495Sspeer { 7716495Sspeer delay(drv_usectohz(seconds * 1000000)); 7726495Sspeer } 7736495Sspeer 7746495Sspeer static dmc_reg_name_t rx_names[] = { 7756495Sspeer { "RXDMA_CFIG1", 0 }, 7766495Sspeer { "RXDMA_CFIG2", 8 }, 7776495Sspeer { "RBR_CFIG_A", 0x10 }, 7786495Sspeer { "RBR_CFIG_B", 0x18 }, 7796495Sspeer { "RBR_KICK", 0x20 }, 7806495Sspeer { "RBR_STAT", 0x28 }, 7816495Sspeer { "RBR_HDH", 0x30 }, 7826495Sspeer { "RBR_HDL", 0x38 }, 7836495Sspeer { "RCRCFIG_A", 0x40 }, 7846495Sspeer { "RCRCFIG_B", 0x48 }, 7856495Sspeer { "RCRSTAT_A", 0x50 }, 7866495Sspeer { "RCRSTAT_B", 0x58 }, 7876495Sspeer { "RCRSTAT_C", 0x60 }, 7886495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 7896495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 7906495Sspeer { "RCR_FLSH", 0x78 }, 7916495Sspeer { "RXMISC", 0x90 }, 7926495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 7936495Sspeer { 0, -1 } 7946495Sspeer }; 7956495Sspeer 7966495Sspeer static dmc_reg_name_t tx_names[] = { 7976495Sspeer { "Tx_RNG_CFIG", 0 }, 7986495Sspeer { "Tx_RNG_HDL", 0x10 }, 7996495Sspeer { "Tx_RNG_KICK", 0x18 }, 8006495Sspeer { "Tx_ENT_MASK", 0x20 }, 8016495Sspeer { "Tx_CS", 0x28 }, 8026495Sspeer { "TxDMA_MBH", 0x30 }, 8036495Sspeer { "TxDMA_MBL", 0x38 }, 8046495Sspeer { "TxDMA_PRE_ST", 0x40 }, 8056495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 8066495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 8076495Sspeer { "TDMC_INTR_DBG", 0x60 }, 8086495Sspeer { "Tx_CS_DBG", 0x68 }, 8096495Sspeer { 0, -1 } 8106495Sspeer }; 8116495Sspeer 8126495Sspeer /* 8136495Sspeer * nxge_xx2str 8146495Sspeer * 8156495Sspeer * Translate a register address into a string. 8166495Sspeer * 8176495Sspeer * Arguments: 8186495Sspeer * offset The address of the register to translate. 8196495Sspeer * 8206495Sspeer * Notes: 8216495Sspeer * These are developer-only function. 8226495Sspeer * 8236495Sspeer * Context: 8246495Sspeer * Any domain 8256495Sspeer */ 8266495Sspeer const char * 8276495Sspeer nxge_rx2str( 8286495Sspeer int offset) 8296495Sspeer { 8306495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 8316495Sspeer 8326495Sspeer offset &= DMA_CSR_MASK; 8336495Sspeer 8346495Sspeer while (reg->name) { 8356495Sspeer if (offset == reg->offset) 8366495Sspeer return (reg->name); 8376495Sspeer reg++; 8386495Sspeer } 8396495Sspeer 8406495Sspeer return (0); 8416495Sspeer } 8426495Sspeer 8436495Sspeer const char * 8446495Sspeer nxge_tx2str( 8456495Sspeer int offset) 8466495Sspeer { 8476495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 8486495Sspeer 8496495Sspeer offset &= DMA_CSR_MASK; 8506495Sspeer 8516495Sspeer while (reg->name) { 8526495Sspeer if (offset == reg->offset) 8536495Sspeer return (reg->name); 8546495Sspeer reg++; 8556495Sspeer } 8566495Sspeer 8576495Sspeer return (0); 8586495Sspeer } 8596495Sspeer 8606495Sspeer /* 8616495Sspeer * nxge_ddi_perror 8626495Sspeer * 8636495Sspeer * Map a DDI error number to a string. 8646495Sspeer * 8656495Sspeer * Arguments: 8666495Sspeer * ddi_error The DDI error number to map. 8676495Sspeer * 8686495Sspeer * Notes: 8696495Sspeer * 8706495Sspeer * Context: 8716495Sspeer * Any domain 8726495Sspeer */ 8736495Sspeer const char * 8746495Sspeer nxge_ddi_perror( 8756495Sspeer int ddi_error) 8766495Sspeer { 8776495Sspeer switch (ddi_error) { 8786495Sspeer case DDI_SUCCESS: 8796495Sspeer return ("DDI_SUCCESS"); 8806495Sspeer case DDI_FAILURE: 8816495Sspeer return ("DDI_FAILURE"); 8826495Sspeer case DDI_NOT_WELL_FORMED: 8836495Sspeer return ("DDI_NOT_WELL_FORMED"); 8846495Sspeer case DDI_EAGAIN: 8856495Sspeer return ("DDI_EAGAIN"); 8866495Sspeer case DDI_EINVAL: 8876495Sspeer return ("DDI_EINVAL"); 8886495Sspeer case DDI_ENOTSUP: 8896495Sspeer return ("DDI_ENOTSUP"); 8906495Sspeer case DDI_EPENDING: 8916495Sspeer return ("DDI_EPENDING"); 8926495Sspeer case DDI_ENOMEM: 8936495Sspeer return ("DDI_ENOMEM"); 8946495Sspeer case DDI_EBUSY: 8956495Sspeer return ("DDI_EBUSY"); 8966495Sspeer case DDI_ETRANSPORT: 8976495Sspeer return ("DDI_ETRANSPORT"); 8986495Sspeer case DDI_ECONTEXT: 8996495Sspeer return ("DDI_ECONTEXT"); 9006495Sspeer default: 9016495Sspeer return ("Unknown error"); 9026495Sspeer } 9036495Sspeer } 9046495Sspeer 9056495Sspeer /* 9066495Sspeer * --------------------------------------------------------------------- 9076495Sspeer * These are Sun4v HIO function definitions 9086495Sspeer * --------------------------------------------------------------------- 9096495Sspeer */ 9106495Sspeer 9116495Sspeer #if defined(sun4v) 9126495Sspeer 9136495Sspeer /* 9146495Sspeer * Local prototypes 9156495Sspeer */ 9167755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 9176495Sspeer 9186495Sspeer static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 9197755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *); 9206495Sspeer 9217755SMisaki.Kataoka@Sun.COM static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 9227755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 9236495Sspeer 9246495Sspeer static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 9256495Sspeer static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 9266495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 9276495Sspeer mac_ring_type_t, int); 9286495Sspeer 9296495Sspeer /* 9306495Sspeer * nxge_hio_init 9316495Sspeer * 9326495Sspeer * Initialize the HIO module of the NXGE driver. 9336495Sspeer * 9346495Sspeer * Arguments: 9356495Sspeer * nxge 9366495Sspeer * 9376495Sspeer * Notes: 9386495Sspeer * 9396495Sspeer * Context: 9406495Sspeer * Any domain 9416495Sspeer */ 9426495Sspeer int 9436495Sspeer nxge_hio_init( 9446495Sspeer nxge_t *nxge) 9456495Sspeer { 9466495Sspeer nxge_hio_data_t *nhd; 9476495Sspeer int i, region; 9486495Sspeer 9496495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 9506495Sspeer if (nhd == 0) { 9516495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 9526495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9536495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9546495Sspeer } 9556495Sspeer 9566713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 9576713Sspeer (nxge->niu_type == N2_NIU)) { 9586495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9596495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9606495Sspeer if (niu_hsvc->hsvc_major == 1 && 9616495Sspeer niu_hsvc->hsvc_minor == 1) 9626495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9636495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9646495Sspeer "nxge_hio_init: hypervisor services " 9656495Sspeer "version %d.%d", 9666495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9676495Sspeer } 9686495Sspeer } 9696495Sspeer 9706495Sspeer if (!isLDOMs(nxge)) { 9716495Sspeer nhd->hio.ldoms = B_FALSE; 9726495Sspeer return (NXGE_OK); 9736495Sspeer } 9746495Sspeer 9756495Sspeer nhd->hio.ldoms = B_TRUE; 9766495Sspeer 9776495Sspeer /* 9786495Sspeer * Fill in what we can. 9796495Sspeer */ 9806495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 9816495Sspeer nhd->vr[region].region = region; 9826495Sspeer } 9837755SMisaki.Kataoka@Sun.COM nhd->vrs = NXGE_VR_SR_MAX - 2; 9846495Sspeer 9856495Sspeer /* 9867812SMichael.Speer@Sun.COM * Initialize tdc share state, shares and ring group structures. 9876495Sspeer */ 9887812SMichael.Speer@Sun.COM for (i = 0; i < NXGE_MAX_TDCS; i++) 9897812SMichael.Speer@Sun.COM nxge->tdc_is_shared[i] = B_FALSE; 9907812SMichael.Speer@Sun.COM 9916495Sspeer for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 9926495Sspeer nxge->rx_hio_groups[i].ghandle = NULL; 9936495Sspeer nxge->rx_hio_groups[i].nxgep = nxge; 9946495Sspeer nxge->rx_hio_groups[i].gindex = 0; 9956495Sspeer nxge->rx_hio_groups[i].sindex = 0; 9966495Sspeer } 9976495Sspeer 9986495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 9996495Sspeer nxge->shares[i].nxgep = nxge; 10006495Sspeer nxge->shares[i].index = 0; 10016495Sspeer nxge->shares[i].vrp = (void *)NULL; 10026495Sspeer nxge->shares[i].tmap = 0; 10036495Sspeer nxge->shares[i].rmap = 0; 10046495Sspeer nxge->shares[i].rxgroup = 0; 10056495Sspeer nxge->shares[i].active = B_FALSE; 10066495Sspeer } 10076495Sspeer 10086495Sspeer /* Fill in the HV HIO function pointers. */ 10096495Sspeer nxge_hio_hv_init(nxge); 10106495Sspeer 10116495Sspeer if (isLDOMservice(nxge)) { 10126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 10136495Sspeer "Hybrid IO-capable service domain")); 10146495Sspeer return (NXGE_OK); 10156495Sspeer } else { 10166495Sspeer /* 10176495Sspeer * isLDOMguest(nxge) == B_TRUE 10186495Sspeer */ 10196495Sspeer nx_vio_fp_t *vio; 10206495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 10216495Sspeer 10226495Sspeer vio = &nhd->hio.vio; 10236495Sspeer vio->__register = (vio_net_resource_reg_t) 10246495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 10256495Sspeer vio->unregister = (vio_net_resource_unreg_t) 10266495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 10276495Sspeer 10286495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 10296495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 10306495Sspeer return (NXGE_ERROR); 10316495Sspeer } 10326495Sspeer } 10336495Sspeer 10346495Sspeer return (0); 10356495Sspeer } 10366495Sspeer 10376495Sspeer static int 10386495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 10396495Sspeer { 10406495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10416495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10426495Sspeer int group = rxgroup->gindex; 10436495Sspeer int rv, sindex; 10446495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10456495Sspeer 10466495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10476495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10486495Sspeer 10496495Sspeer /* 10506495Sspeer * Program the mac address for the group/share. 10516495Sspeer */ 10526495Sspeer if ((rv = nxge_hio_hostinfo_init(nxge, vr, 10536495Sspeer (ether_addr_t *)mac_addr)) != 0) { 10546495Sspeer return (rv); 10556495Sspeer } 10566495Sspeer 10576495Sspeer return (0); 10586495Sspeer } 10596495Sspeer 10606495Sspeer /* ARGSUSED */ 10616495Sspeer static int 10626495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 10636495Sspeer { 10646495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10656495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10666495Sspeer int group = rxgroup->gindex; 10676495Sspeer int sindex; 10686495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10696495Sspeer 10706495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10716495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10726495Sspeer 10736495Sspeer /* 10746495Sspeer * Remove the mac address for the group/share. 10756495Sspeer */ 10766495Sspeer nxge_hio_hostinfo_uninit(nxge, vr); 10776495Sspeer 10786495Sspeer return (0); 10796495Sspeer } 10806495Sspeer 10816495Sspeer /* ARGSUSED */ 10826495Sspeer void 10836495Sspeer nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 10846495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 10856495Sspeer { 10866495Sspeer p_nxge_t nxgep = (p_nxge_t)arg; 10876495Sspeer nxge_rx_ring_group_t *rxgroup; 10886495Sspeer 10896495Sspeer switch (type) { 10906495Sspeer case MAC_RING_TYPE_RX: 10916495Sspeer rxgroup = &nxgep->rx_hio_groups[group]; 10926495Sspeer rxgroup->gindex = group; 10936495Sspeer 10946495Sspeer infop->mrg_driver = (mac_group_driver_t)rxgroup; 10956495Sspeer infop->mrg_start = NULL; 10966495Sspeer infop->mrg_stop = NULL; 10976495Sspeer infop->mrg_addmac = nxge_hio_add_mac; 10986495Sspeer infop->mrg_remmac = nxge_hio_rem_mac; 10996495Sspeer infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 11006495Sspeer break; 11016495Sspeer 11026495Sspeer case MAC_RING_TYPE_TX: 11036495Sspeer break; 11046495Sspeer } 11056495Sspeer } 11066495Sspeer 11076495Sspeer int 11086495Sspeer nxge_hio_share_assign( 11096495Sspeer nxge_t *nxge, 11106495Sspeer uint64_t cookie, 11116495Sspeer res_map_t *tmap, 11126495Sspeer res_map_t *rmap, 11136495Sspeer nxge_hio_vr_t *vr) 11146495Sspeer { 11156495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11166495Sspeer uint64_t slot, hv_rv; 11176495Sspeer nxge_hio_dc_t *dc; 11186495Sspeer nxhv_vr_fp_t *fp; 11196495Sspeer int i; 11206495Sspeer 11216495Sspeer /* 11226495Sspeer * Ask the Hypervisor to set up the VR for us 11236495Sspeer */ 11246495Sspeer fp = &nhd->hio.vr; 11256495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 11266495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 1127*7950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 11286713Sspeer "vr->assign() returned %d", hv_rv)); 11297755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 11306495Sspeer return (-EIO); 11316495Sspeer } 11326495Sspeer 11336495Sspeer /* 11346495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 11356495Sspeer * ----------------------------------------------------- 11366495Sspeer */ 11376495Sspeer dc = vr->tx_group.dc; 11386495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 11396495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11406495Sspeer while (dc) { 11416495Sspeer hv_rv = (*tx->assign) 11426495Sspeer (vr->cookie, dc->channel, &slot); 11436495Sspeer if (hv_rv != 0) { 11446495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1145*7950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 11466495Sspeer "tx->assign(%x, %d) failed: %ld", 11476495Sspeer vr->cookie, dc->channel, hv_rv)); 11486495Sspeer return (-EIO); 11496495Sspeer } 11506495Sspeer 11516495Sspeer dc->cookie = vr->cookie; 11526495Sspeer dc->page = (vp_channel_t)slot; 11536495Sspeer 11546495Sspeer /* Inform the caller about the slot chosen. */ 11556495Sspeer (*tmap) |= 1 << slot; 11566495Sspeer 11576495Sspeer dc = dc->next; 11586495Sspeer } 11596495Sspeer } 11606495Sspeer 11616495Sspeer /* 11626495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 11636495Sspeer * ----------------------------------------------------- 11646495Sspeer */ 11656495Sspeer dc = vr->rx_group.dc; 11666495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 11676495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11686495Sspeer while (dc) { 11696495Sspeer hv_rv = (*rx->assign) 11706495Sspeer (vr->cookie, dc->channel, &slot); 11716495Sspeer if (hv_rv != 0) { 11726495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1173*7950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 11746495Sspeer "rx->assign(%x, %d) failed: %ld", 11756495Sspeer vr->cookie, dc->channel, hv_rv)); 11766495Sspeer return (-EIO); 11776495Sspeer } 11786495Sspeer 11796495Sspeer dc->cookie = vr->cookie; 11806495Sspeer dc->page = (vp_channel_t)slot; 11816495Sspeer 11826495Sspeer /* Inform the caller about the slot chosen. */ 11836495Sspeer (*rmap) |= 1 << slot; 11846495Sspeer 11856495Sspeer dc = dc->next; 11866495Sspeer } 11876495Sspeer } 11886495Sspeer 11896495Sspeer return (0); 11906495Sspeer } 11916495Sspeer 11926495Sspeer int 11936495Sspeer nxge_hio_share_unassign( 11946495Sspeer nxge_hio_vr_t *vr) 11956495Sspeer { 11966495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 11976495Sspeer nxge_hio_data_t *nhd; 11986495Sspeer nxge_hio_dc_t *dc; 11996495Sspeer nxhv_vr_fp_t *fp; 12006495Sspeer uint64_t hv_rv; 12016495Sspeer 12026495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 12036495Sspeer 12046495Sspeer dc = vr->tx_group.dc; 12056495Sspeer while (dc) { 12066495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 12076495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 12086495Sspeer if (hv_rv != 0) { 12096495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1210*7950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 12116495Sspeer "tx->unassign(%x, %d) failed: %ld", 12126495Sspeer vr->cookie, dc->page, hv_rv)); 12136495Sspeer } 12146495Sspeer dc = dc->next; 12156495Sspeer } 12166495Sspeer 12176495Sspeer dc = vr->rx_group.dc; 12186495Sspeer while (dc) { 12196495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 12206495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 12216495Sspeer if (hv_rv != 0) { 12226495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1223*7950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 12246495Sspeer "rx->unassign(%x, %d) failed: %ld", 12256495Sspeer vr->cookie, dc->page, hv_rv)); 12266495Sspeer } 12276495Sspeer dc = dc->next; 12286495Sspeer } 12296495Sspeer 12306495Sspeer fp = &nhd->hio.vr; 12316495Sspeer if (fp->unassign) { 12326495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 12336495Sspeer if (hv_rv != 0) { 1234*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1235*7950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 12366495Sspeer "vr->assign(%x) failed: %ld", 12376495Sspeer vr->cookie, hv_rv)); 12386495Sspeer } 12396495Sspeer } 12406495Sspeer 12416495Sspeer return (0); 12426495Sspeer } 12436495Sspeer 12446495Sspeer int 12456495Sspeer nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 12466495Sspeer mac_share_handle_t *shandle) 12476495Sspeer { 12486495Sspeer p_nxge_t nxge = (p_nxge_t)arg; 12496495Sspeer nxge_rx_ring_group_t *rxgroup; 12506495Sspeer nxge_share_handle_t *shp; 12516495Sspeer 12526495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 12536495Sspeer uint64_t rmap, tmap; 12546495Sspeer int rv; 12556495Sspeer 12566495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 12576495Sspeer 12586495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 12596495Sspeer 12606495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 12616495Sspeer nhd->hio.rx.assign == 0) { 12626495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 12636495Sspeer return (EIO); 12646495Sspeer } 12656495Sspeer 12666495Sspeer /* 12676495Sspeer * Get a VR. 12686495Sspeer */ 12697755SMisaki.Kataoka@Sun.COM if ((vr = nxge_hio_vr_share(nxge)) == 0) 12706495Sspeer return (EAGAIN); 12716495Sspeer 12726495Sspeer /* 12736495Sspeer * Get an RDC group for us to use. 12746495Sspeer */ 12756495Sspeer if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 12767755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12776495Sspeer return (EBUSY); 12786495Sspeer } 12796495Sspeer 12806495Sspeer /* 12816495Sspeer * Add resources to the share. 12826495Sspeer */ 12836495Sspeer tmap = 0; 12847755SMisaki.Kataoka@Sun.COM rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 12856495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12866495Sspeer if (rv != 0) { 12877755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12886495Sspeer return (rv); 12896495Sspeer } 12906495Sspeer 12916495Sspeer rmap = 0; 12927755SMisaki.Kataoka@Sun.COM rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 12936495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12946495Sspeer if (rv != 0) { 12957755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 12967755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12976495Sspeer return (rv); 12986495Sspeer } 12996495Sspeer 13006495Sspeer if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 13017755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 13027755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 13037755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 13046495Sspeer return (rv); 13056495Sspeer } 13066495Sspeer 13076495Sspeer rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 13086495Sspeer rxgroup->gindex = vr->rdc_tbl; 13096495Sspeer rxgroup->sindex = vr->region; 13106495Sspeer 13116495Sspeer shp = &nxge->shares[vr->region]; 13126495Sspeer shp->index = vr->region; 13136495Sspeer shp->vrp = (void *)vr; 13146495Sspeer shp->tmap = tmap; 13156495Sspeer shp->rmap = rmap; 13166495Sspeer shp->rxgroup = vr->rdc_tbl; 13176495Sspeer shp->active = B_TRUE; 13186495Sspeer 13196495Sspeer /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 13206495Sspeer *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 13216495Sspeer 13226495Sspeer *shandle = (mac_share_handle_t)shp; 13236495Sspeer 13246495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 13256495Sspeer return (0); 13266495Sspeer } 13276495Sspeer 13286495Sspeer void 13296495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 13306495Sspeer { 13316495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13326495Sspeer 13336495Sspeer /* 13346495Sspeer * First, unassign the VR (take it back), 13356495Sspeer * so we can enable interrupts again. 13366495Sspeer */ 13376498Sspeer (void) nxge_hio_share_unassign(shp->vrp); 13386495Sspeer 13396495Sspeer /* 13406495Sspeer * Free Ring Resources for TX and RX 13416495Sspeer */ 13427755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 13437755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 13446495Sspeer 13456495Sspeer /* 13466495Sspeer * Free VR resource. 13476495Sspeer */ 13487755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(shp->vrp); 13496495Sspeer 13506495Sspeer /* 13516495Sspeer * Clear internal handle state. 13526495Sspeer */ 13536495Sspeer shp->index = 0; 13546495Sspeer shp->vrp = (void *)NULL; 13556495Sspeer shp->tmap = 0; 13566495Sspeer shp->rmap = 0; 13576495Sspeer shp->rxgroup = 0; 13586495Sspeer shp->active = B_FALSE; 13596495Sspeer } 13606495Sspeer 13616495Sspeer void 13626495Sspeer nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 13636495Sspeer uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 13646495Sspeer { 13656495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13666495Sspeer 13676495Sspeer switch (type) { 13686495Sspeer case MAC_RING_TYPE_RX: 13696495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13706495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13716495Sspeer *rmap = shp->rmap; 13726495Sspeer *gnum = shp->rxgroup; 13736495Sspeer break; 13746495Sspeer 13756495Sspeer case MAC_RING_TYPE_TX: 13766495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13776495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13786495Sspeer *rmap = shp->tmap; 13796495Sspeer *gnum = 0; 13806495Sspeer break; 13816495Sspeer } 13826495Sspeer } 13836495Sspeer 13846495Sspeer /* 13856495Sspeer * nxge_hio_vr_share 13866495Sspeer * 13876495Sspeer * Find an unused Virtualization Region (VR). 13886495Sspeer * 13896495Sspeer * Arguments: 13906495Sspeer * nxge 13916495Sspeer * 13926495Sspeer * Notes: 13936495Sspeer * 13946495Sspeer * Context: 13956495Sspeer * Service domain 13966495Sspeer */ 13977755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t * 13986495Sspeer nxge_hio_vr_share( 13996495Sspeer nxge_t *nxge) 14006495Sspeer { 14016495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14026495Sspeer nxge_hio_vr_t *vr; 14036495Sspeer 14046495Sspeer int first, limit, region; 14056495Sspeer 14066495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 14076495Sspeer 14086495Sspeer MUTEX_ENTER(&nhd->lock); 14096495Sspeer 14107755SMisaki.Kataoka@Sun.COM if (nhd->vrs == 0) { 14116495Sspeer MUTEX_EXIT(&nhd->lock); 14126495Sspeer return (0); 14136495Sspeer } 14146495Sspeer 14156495Sspeer /* Find an empty virtual region (VR). */ 14166495Sspeer if (nxge->function_num == 0) { 14176495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 14186495Sspeer first = FUNC0_VIR1; 14196495Sspeer limit = FUNC2_VIR0; 14206495Sspeer } else if (nxge->function_num == 1) { 14216495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 14226495Sspeer first = FUNC2_VIR1; 14236495Sspeer limit = FUNC_VIR_MAX; 14246495Sspeer } else { 14256495Sspeer cmn_err(CE_WARN, 14266495Sspeer "Shares not supported on function(%d) at this time.\n", 14276495Sspeer nxge->function_num); 14286495Sspeer } 14296495Sspeer 14306495Sspeer for (region = first; region < limit; region++) { 14316495Sspeer if (nhd->vr[region].nxge == 0) 14326495Sspeer break; 14336495Sspeer } 14346495Sspeer 14356495Sspeer if (region == limit) { 14366495Sspeer MUTEX_EXIT(&nhd->lock); 14376495Sspeer return (0); 14386495Sspeer } 14396495Sspeer 14406495Sspeer vr = &nhd->vr[region]; 14416495Sspeer vr->nxge = (uintptr_t)nxge; 14426495Sspeer vr->region = (uintptr_t)region; 14436495Sspeer 14447755SMisaki.Kataoka@Sun.COM nhd->vrs--; 14456495Sspeer 14466495Sspeer MUTEX_EXIT(&nhd->lock); 14476495Sspeer 14486495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 14496495Sspeer 14507755SMisaki.Kataoka@Sun.COM return (vr); 14516495Sspeer } 14526495Sspeer 14536495Sspeer void 14546495Sspeer nxge_hio_unshare( 14557755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr) 14566495Sspeer { 14576495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14586495Sspeer nxge_hio_data_t *nhd; 14596495Sspeer 14606495Sspeer vr_region_t region; 14616495Sspeer 14626495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 14636495Sspeer 14646495Sspeer if (!nxge) { 1465*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: " 14666495Sspeer "vr->nxge is NULL")); 14676495Sspeer return; 14686495Sspeer } 14696495Sspeer 14706495Sspeer /* 14716495Sspeer * This function is no longer called, but I will keep it 14726495Sspeer * here in case we want to revisit this topic in the future. 14736495Sspeer * 14746495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 14756495Sspeer */ 14766495Sspeer (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 14776495Sspeer 14786495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14796495Sspeer 14806495Sspeer MUTEX_ENTER(&nhd->lock); 14816495Sspeer 14826495Sspeer region = vr->region; 14836495Sspeer (void) memset(vr, 0, sizeof (*vr)); 14846495Sspeer vr->region = region; 14856495Sspeer 14867755SMisaki.Kataoka@Sun.COM nhd->vrs++; 14876495Sspeer 14886495Sspeer MUTEX_EXIT(&nhd->lock); 14896495Sspeer 14906495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 14916495Sspeer } 14926495Sspeer 14936495Sspeer int 14946495Sspeer nxge_hio_addres( 14957755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 14966495Sspeer mac_ring_type_t type, 14976495Sspeer int count) 14986495Sspeer { 14996495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 15006495Sspeer int i; 15016495Sspeer 15026495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 15036495Sspeer 15046495Sspeer if (!nxge) 15056495Sspeer return (EINVAL); 15066495Sspeer 15076495Sspeer for (i = 0; i < count; i++) { 15086495Sspeer int rv; 15096495Sspeer if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 15106495Sspeer if (i == 0) /* Couldn't get even one DC. */ 15116495Sspeer return (-rv); 15126495Sspeer else 15136495Sspeer break; 15146495Sspeer } 15156495Sspeer } 15166495Sspeer 15176495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 15186495Sspeer 15196495Sspeer return (0); 15206495Sspeer } 15216495Sspeer 15226495Sspeer /* ARGSUSED */ 15236495Sspeer void 15246495Sspeer nxge_hio_remres( 15257755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 15266495Sspeer mac_ring_type_t type, 15276495Sspeer res_map_t res_map) 15286495Sspeer { 15296495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 15306495Sspeer nxge_grp_t *group; 15316495Sspeer 15326495Sspeer if (!nxge) { 1533*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 15346495Sspeer "vr->nxge is NULL")); 15356495Sspeer return; 15366495Sspeer } 15376495Sspeer 15386495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 15396495Sspeer 15406495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 15416495Sspeer while (group->dc) { 15426495Sspeer nxge_hio_dc_t *dc = group->dc; 15436495Sspeer NXGE_DC_RESET(res_map, dc->page); 15446495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 15456495Sspeer } 15466495Sspeer 15476495Sspeer if (res_map) { 15486495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 15496495Sspeer "res_map %lx", res_map)); 15506495Sspeer } 15516495Sspeer 15526495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 15536495Sspeer } 15546495Sspeer 15556495Sspeer /* 15566495Sspeer * nxge_hio_tdc_share 15576495Sspeer * 15586495Sspeer * Share an unused TDC channel. 15596495Sspeer * 15606495Sspeer * Arguments: 15616495Sspeer * nxge 15626495Sspeer * 15636495Sspeer * Notes: 15646495Sspeer * 15656495Sspeer * A.7.3 Reconfigure Tx DMA channel 15666495Sspeer * Disable TxDMA A.9.6.10 15676495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 15686495Sspeer * 15696495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 15706495Sspeer * 15716495Sspeer * Soft Reset TxDMA A.9.6.2 15726495Sspeer * 15736495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 15746495Sspeer * guest domain: 15756495Sspeer * 15766495Sspeer * Re-initialize TxDMA A.9.6.8 15776495Sspeer * Reconfigure TxDMA 15786495Sspeer * Enable TxDMA A.9.6.9 15796495Sspeer * 15806495Sspeer * Context: 15816495Sspeer * Service domain 15826495Sspeer */ 15836495Sspeer int 15846495Sspeer nxge_hio_tdc_share( 15856495Sspeer nxge_t *nxge, 15866495Sspeer int channel) 15876495Sspeer { 15887812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 15896495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 15906495Sspeer tx_ring_t *ring; 15916713Sspeer int count; 15926495Sspeer 15936495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 15946495Sspeer 15956495Sspeer /* 15966495Sspeer * Wait until this channel is idle. 15976495Sspeer */ 15986495Sspeer ring = nxge->tx_rings->rings[channel]; 15996713Sspeer 16006713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 16016886Sspeer if (ring->tx_ring_busy) { 16026886Sspeer /* 16036886Sspeer * Wait for 30 seconds. 16046886Sspeer */ 16056886Sspeer for (count = 30 * 1000; count; count--) { 16066886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 16076886Sspeer break; 16086886Sspeer } 16096886Sspeer 16106886Sspeer drv_usecwait(1000); 16116495Sspeer } 16126713Sspeer 16136886Sspeer if (count == 0) { 16146886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 16156886Sspeer NXGE_TX_RING_ONLINE); 1616*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1617*7950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: " 16186886Sspeer "Tx ring %d was always BUSY", channel)); 16196886Sspeer return (-EIO); 16206886Sspeer } 16216886Sspeer } else { 16226713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 16236886Sspeer NXGE_TX_RING_OFFLINED); 16246495Sspeer } 16256495Sspeer 16267812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 16277812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_TRUE; 16287812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 16297812SMichael.Speer@Sun.COM 16307812SMichael.Speer@Sun.COM 16316495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1632*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: " 16336495Sspeer "Failed to remove interrupt for TxDMA channel %d", 16346495Sspeer channel)); 16356495Sspeer return (NXGE_ERROR); 16366495Sspeer } 16376495Sspeer 16386495Sspeer /* Disable TxDMA A.9.6.10 */ 16396495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 16406495Sspeer 16416495Sspeer /* The SD is sharing this channel. */ 16426495Sspeer NXGE_DC_SET(set->shared.map, channel); 16436495Sspeer set->shared.count++; 16446495Sspeer 16456602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 16466602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 16476602Sspeer 16486495Sspeer /* 16496495Sspeer * Initialize the DC-specific FZC control registers. 16506495Sspeer * ----------------------------------------------------- 16516495Sspeer */ 16526495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 16536495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1654*7950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: FZC TDC failed: %d", channel)); 16556495Sspeer return (-EIO); 16566495Sspeer } 16576495Sspeer 16586495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 16596495Sspeer 16606495Sspeer return (0); 16616495Sspeer } 16626495Sspeer 16636495Sspeer /* 16646495Sspeer * nxge_hio_rdc_share 16656495Sspeer * 16666495Sspeer * Share an unused RDC channel. 16676495Sspeer * 16686495Sspeer * Arguments: 16696495Sspeer * nxge 16706495Sspeer * 16716495Sspeer * Notes: 16726495Sspeer * 16736495Sspeer * This is the latest version of the procedure to 16746495Sspeer * Reconfigure an Rx DMA channel: 16756495Sspeer * 16766495Sspeer * A.6.3 Reconfigure Rx DMA channel 16776495Sspeer * Stop RxMAC A.9.2.6 16786495Sspeer * Drain IPP Port A.9.3.6 16796495Sspeer * Stop and reset RxDMA A.9.5.3 16806495Sspeer * 16816495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 16826495Sspeer * guest domain: 16836495Sspeer * 16846495Sspeer * Initialize RxDMA A.9.5.4 16856495Sspeer * Reconfigure RxDMA 16866495Sspeer * Enable RxDMA A.9.5.5 16876495Sspeer * 16886495Sspeer * We will do this here, since the RDC is a canalis non grata: 16896495Sspeer * Enable RxMAC A.9.2.10 16906495Sspeer * 16916495Sspeer * Context: 16926495Sspeer * Service domain 16936495Sspeer */ 16946495Sspeer int 16956495Sspeer nxge_hio_rdc_share( 16966495Sspeer nxge_t *nxge, 16976495Sspeer nxge_hio_vr_t *vr, 16986495Sspeer int channel) 16996495Sspeer { 17006495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 17016495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 17026495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 17036495Sspeer nxge_rdc_grp_t *rdc_grp; 17046495Sspeer 17056495Sspeer int current, last; 17066495Sspeer 17076495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 17086495Sspeer 17096495Sspeer /* Disable interrupts. */ 17106495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1711*7950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 17126495Sspeer "Failed to remove interrupt for RxDMA channel %d", 17136495Sspeer channel)); 17146495Sspeer return (NXGE_ERROR); 17156495Sspeer } 17166495Sspeer 17176495Sspeer /* Stop RxMAC = A.9.2.6 */ 17186495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 17196495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 17206495Sspeer "Failed to disable RxMAC")); 17216495Sspeer } 17226495Sspeer 17236495Sspeer /* Drain IPP Port = A.9.3.6 */ 17246495Sspeer (void) nxge_ipp_drain(nxge); 17256495Sspeer 17266495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 17276495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 17286495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 17296495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 17306495Sspeer "Failed to disable RxDMA channel %d", channel)); 17316495Sspeer } 17326495Sspeer 17336495Sspeer /* The SD is sharing this channel. */ 17346495Sspeer NXGE_DC_SET(set->shared.map, channel); 17356495Sspeer set->shared.count++; 17366495Sspeer 17376602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 17386602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 17396602Sspeer 17406495Sspeer /* 17416495Sspeer * We have to reconfigure the RDC table(s) 17426495Sspeer * to which this channel belongs. 17436495Sspeer */ 17446495Sspeer current = hardware->def_mac_rxdma_grpid; 17456495Sspeer last = current + hardware->max_rdc_grpids; 17466495Sspeer for (; current < last; current++) { 17476495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 17486495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[current]; 17496495Sspeer rdc_grp->map = set->owned.map; 17506495Sspeer rdc_grp->max_rdcs--; 17516495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 17526495Sspeer } 17536495Sspeer } 17546495Sspeer 17556495Sspeer /* 17566495Sspeer * The guest domain will reconfigure the RDC later. 17576495Sspeer * 17586495Sspeer * But in the meantime, we must re-enable the Rx MAC so 17596495Sspeer * that we can start receiving packets again on the 17606495Sspeer * remaining RDCs: 17616495Sspeer * 17626495Sspeer * Enable RxMAC = A.9.2.10 17636495Sspeer */ 17646495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 17656495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1766*7950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: Rx MAC still disabled")); 17676495Sspeer } 17686495Sspeer 17696495Sspeer /* 17706495Sspeer * Initialize the DC-specific FZC control registers. 17716495Sspeer * ----------------------------------------------------- 17726495Sspeer */ 17736495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 17746495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1775*7950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: RZC RDC failed: %ld", channel)); 17766495Sspeer return (-EIO); 17776495Sspeer } 17786495Sspeer 17796495Sspeer /* 17806495Sspeer * We have to initialize the guest's RDC table, too. 17816495Sspeer * ----------------------------------------------------- 17826495Sspeer */ 17836495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 17846495Sspeer if (rdc_grp->max_rdcs == 0) { 17856495Sspeer rdc_grp->start_rdc = (uint8_t)channel; 17866495Sspeer rdc_grp->def_rdc = (uint8_t)channel; 17876495Sspeer rdc_grp->max_rdcs = 1; 17886495Sspeer } else { 17896495Sspeer rdc_grp->max_rdcs++; 17906495Sspeer } 17916495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 17926495Sspeer 17936495Sspeer if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 17946495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1795*7950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 17966495Sspeer return (-EIO); 17976495Sspeer } 17986495Sspeer 17996495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 18006495Sspeer 18016495Sspeer return (0); 18026495Sspeer } 18036495Sspeer 18046495Sspeer /* 18056495Sspeer * nxge_hio_dc_share 18066495Sspeer * 18076495Sspeer * Share a DMA channel with a guest domain. 18086495Sspeer * 18096495Sspeer * Arguments: 18106495Sspeer * nxge 18116495Sspeer * vr The VR that <channel> will belong to. 18126495Sspeer * type Tx or Rx. 18136495Sspeer * res_map The resource map used by the caller, which we will 18146495Sspeer * update if successful. 18156495Sspeer * 18166495Sspeer * Notes: 18176495Sspeer * 18186495Sspeer * Context: 18196495Sspeer * Service domain 18206495Sspeer */ 18216495Sspeer int 18226495Sspeer nxge_hio_dc_share( 18236495Sspeer nxge_t *nxge, 18246495Sspeer nxge_hio_vr_t *vr, 18256495Sspeer mac_ring_type_t type) 18266495Sspeer { 18276495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 18286495Sspeer nxge_hw_pt_cfg_t *hardware; 18296495Sspeer nxge_hio_dc_t *dc; 18306495Sspeer int channel, limit; 18316495Sspeer 18326495Sspeer nxge_grp_set_t *set; 18336495Sspeer nxge_grp_t *group; 18346495Sspeer 18356495Sspeer int slot; 18366495Sspeer 18376495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 18386495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 18396495Sspeer 18406495Sspeer /* 18416495Sspeer * In version 1.0, we may only give a VR 2 RDCs or TDCs. 18426495Sspeer * Not only that, but the HV has statically assigned the 18436495Sspeer * channels like so: 18446495Sspeer * VR0: RDC0 & RDC1 18456495Sspeer * VR1: RDC2 & RDC3, etc. 18466495Sspeer * The TDCs are assigned in exactly the same way. 18476495Sspeer * 18486495Sspeer * So, for example 18496495Sspeer * hardware->start_rdc + vr->region * 2; 18506495Sspeer * VR1: hardware->start_rdc + 1 * 2; 18516495Sspeer * VR3: hardware->start_rdc + 3 * 2; 18526495Sspeer * If start_rdc is 0, we end up with 2 or 6. 18536495Sspeer * If start_rdc is 8, we end up with 10 or 14. 18546495Sspeer */ 18556495Sspeer 18566495Sspeer set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 18576495Sspeer hardware = &nxge->pt_config.hw_config; 18586495Sspeer 18596495Sspeer // This code is still NIU-specific (assuming only 2 ports) 18606495Sspeer channel = hardware->start_rdc + (vr->region % 4) * 2; 18616495Sspeer limit = channel + 2; 18626495Sspeer 18636495Sspeer MUTEX_ENTER(&nhd->lock); 18646495Sspeer for (; channel < limit; channel++) { 18656495Sspeer if ((1 << channel) & set->owned.map) { 18666495Sspeer break; 18676495Sspeer } 18686495Sspeer } 18696495Sspeer 18706495Sspeer if (channel == limit) { 18716495Sspeer MUTEX_EXIT(&nhd->lock); 18726495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1873*7950SMichael.Speer@Sun.COM "nxge_hio_dc_share: there are no channels to share")); 18746495Sspeer return (-EIO); 18756495Sspeer } 18766495Sspeer 18776495Sspeer MUTEX_EXIT(&nhd->lock); 18786495Sspeer 18796495Sspeer /* -------------------------------------------------- */ 18806495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 18816495Sspeer nxge_hio_tdc_share(nxge, channel) : 18826495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 18836495Sspeer 18846495Sspeer if (slot < 0) { 18856495Sspeer if (type == MAC_RING_TYPE_RX) { 18866495Sspeer nxge_hio_rdc_unshare(nxge, channel); 18876495Sspeer } else { 18886495Sspeer nxge_hio_tdc_unshare(nxge, channel); 18896495Sspeer } 18906495Sspeer return (slot); 18916495Sspeer } 18926495Sspeer 18936495Sspeer MUTEX_ENTER(&nhd->lock); 18946495Sspeer 18956495Sspeer /* 18966495Sspeer * Tag this channel. 18976495Sspeer * -------------------------------------------------- 18986495Sspeer */ 18996495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 19006495Sspeer 19016495Sspeer dc->vr = vr; 19026495Sspeer dc->channel = (nxge_channel_t)channel; 19036495Sspeer 19046495Sspeer MUTEX_EXIT(&nhd->lock); 19056495Sspeer 19066495Sspeer /* 19076495Sspeer * vr->[t|r]x_group is used by the service domain to 19086495Sspeer * keep track of its shared DMA channels. 19096495Sspeer */ 19106495Sspeer MUTEX_ENTER(&nxge->group_lock); 19116495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 19126495Sspeer 19137755SMisaki.Kataoka@Sun.COM dc->group = group; 19146495Sspeer 19156495Sspeer /* Initialize <group>, if necessary */ 19166495Sspeer if (group->count == 0) { 19176495Sspeer group->nxge = nxge; 19186495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 19196495Sspeer VP_BOUND_TX : VP_BOUND_RX; 19206495Sspeer group->sequence = nhd->sequence++; 19216495Sspeer group->active = B_TRUE; 19226495Sspeer } 19236495Sspeer 19246495Sspeer MUTEX_EXIT(&nxge->group_lock); 19256495Sspeer 19266495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 19276495Sspeer "DC share: %cDC %d was assigned to slot %d", 19286495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 19296495Sspeer 19306495Sspeer nxge_grp_dc_append(nxge, group, dc); 19316495Sspeer 19326495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 19336495Sspeer 19346495Sspeer return (0); 19356495Sspeer } 19366495Sspeer 19376495Sspeer /* 19386495Sspeer * nxge_hio_tdc_unshare 19396495Sspeer * 19406495Sspeer * Unshare a TDC. 19416495Sspeer * 19426495Sspeer * Arguments: 19436495Sspeer * nxge 19446495Sspeer * channel The channel to unshare (add again). 19456495Sspeer * 19466495Sspeer * Notes: 19476495Sspeer * 19486495Sspeer * Context: 19496495Sspeer * Service domain 19506495Sspeer */ 19516495Sspeer void 19526495Sspeer nxge_hio_tdc_unshare( 19536495Sspeer nxge_t *nxge, 19546495Sspeer int channel) 19556495Sspeer { 19566495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 19577755SMisaki.Kataoka@Sun.COM nxge_grp_t *group = set->group[0]; 19586495Sspeer 19596495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 19606495Sspeer 19616495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19626495Sspeer set->shared.count--; 19636495Sspeer 19647755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 19656495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19666495Sspeer "Failed to initialize TxDMA channel %d", channel)); 19676495Sspeer return; 19686495Sspeer } 19696495Sspeer 19706495Sspeer /* Re-add this interrupt. */ 19716495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 19726495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19736495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 19746495Sspeer } 19756495Sspeer 19766495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 19776495Sspeer } 19786495Sspeer 19796495Sspeer /* 19806495Sspeer * nxge_hio_rdc_unshare 19816495Sspeer * 19826495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 19836495Sspeer * 19846495Sspeer * Arguments: 19856495Sspeer * nxge 19866495Sspeer * channel The channel to unshare (add again). 19876495Sspeer * 19886495Sspeer * Notes: 19896495Sspeer * 19906495Sspeer * Context: 19916495Sspeer * Service domain 19926495Sspeer */ 19936495Sspeer void 19946495Sspeer nxge_hio_rdc_unshare( 19956495Sspeer nxge_t *nxge, 19966495Sspeer int channel) 19976495Sspeer { 19986495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19996495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 20006495Sspeer 20016495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 20027755SMisaki.Kataoka@Sun.COM nxge_grp_t *group = set->group[0]; 20036495Sspeer int current, last; 20046495Sspeer 20056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 20066495Sspeer 20076495Sspeer /* Stop RxMAC = A.9.2.6 */ 20086495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 20096495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 20106495Sspeer "Failed to disable RxMAC")); 20116495Sspeer } 20126495Sspeer 20136495Sspeer /* Drain IPP Port = A.9.3.6 */ 20146495Sspeer (void) nxge_ipp_drain(nxge); 20156495Sspeer 20166495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 20176495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 20186495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 20196495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 20206495Sspeer "Failed to disable RxDMA channel %d", channel)); 20216495Sspeer } 20226495Sspeer 20236495Sspeer NXGE_DC_RESET(set->shared.map, channel); 20246495Sspeer set->shared.count--; 20256495Sspeer 20266495Sspeer /* 20276495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 20286495Sspeer * 20296495Sspeer * Initialize RxDMA A.9.5.4 20306495Sspeer * Reconfigure RxDMA 20316495Sspeer * Enable RxDMA A.9.5.5 20326495Sspeer */ 20337755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 20346495Sspeer /* Be sure to re-enable the RX MAC. */ 20356495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20366495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2037*7950SMichael.Speer@Sun.COM "nxge_hio_rdc_unshare: Rx MAC still disabled")); 20386495Sspeer } 20396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 20406495Sspeer "Failed to initialize RxDMA channel %d", channel)); 20416495Sspeer return; 20426495Sspeer } 20436495Sspeer 20446495Sspeer /* 20456495Sspeer * We have to reconfigure the RDC table(s) 20466495Sspeer * to which this channel once again belongs. 20476495Sspeer */ 20486495Sspeer current = hardware->def_mac_rxdma_grpid; 20496495Sspeer last = current + hardware->max_rdc_grpids; 20506495Sspeer for (; current < last; current++) { 20516495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 20526495Sspeer nxge_rdc_grp_t *group; 20536495Sspeer group = &nxge->pt_config.rdc_grps[current]; 20546495Sspeer group->map = set->owned.map; 20556495Sspeer group->max_rdcs++; 20566495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 20576495Sspeer } 20586495Sspeer } 20596495Sspeer 20606495Sspeer /* 20616495Sspeer * Enable RxMAC = A.9.2.10 20626495Sspeer */ 20636495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20646495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2065*7950SMichael.Speer@Sun.COM "nxge_hio_rdc_unshare: Rx MAC still disabled")); 20666495Sspeer return; 20676495Sspeer } 20686495Sspeer 20696495Sspeer /* Re-add this interrupt. */ 20706495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 20716495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2072*7950SMichael.Speer@Sun.COM "nxge_hio_rdc_unshare: Failed to add interrupt for " 20736495Sspeer "RxDMA CHANNEL %d", channel)); 20746495Sspeer } 20756495Sspeer 20766495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 20776495Sspeer } 20786495Sspeer 20796495Sspeer /* 20806495Sspeer * nxge_hio_dc_unshare 20816495Sspeer * 20826495Sspeer * Unshare (reuse) a DMA channel. 20836495Sspeer * 20846495Sspeer * Arguments: 20856495Sspeer * nxge 20866495Sspeer * vr The VR that <channel> belongs to. 20876495Sspeer * type Tx or Rx. 20886495Sspeer * channel The DMA channel to reuse. 20896495Sspeer * 20906495Sspeer * Notes: 20916495Sspeer * 20926495Sspeer * Context: 20936495Sspeer * Service domain 20946495Sspeer */ 20956495Sspeer void 20966495Sspeer nxge_hio_dc_unshare( 20976495Sspeer nxge_t *nxge, 20986495Sspeer nxge_hio_vr_t *vr, 20996495Sspeer mac_ring_type_t type, 21006495Sspeer int channel) 21016495Sspeer { 21026495Sspeer nxge_grp_t *group; 21036495Sspeer nxge_hio_dc_t *dc; 21046495Sspeer 21056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 21066495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 21076495Sspeer 21086495Sspeer /* Unlink the channel from its group. */ 21096495Sspeer /* -------------------------------------------------- */ 21106495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 21116602Sspeer NXGE_DC_RESET(group->map, channel); 21126495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 21136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2114*7950SMichael.Speer@Sun.COM "nxge_hio_dc_unshare(%d) failed", channel)); 21156495Sspeer return; 21166495Sspeer } 21176495Sspeer 21186495Sspeer dc->vr = 0; 21196495Sspeer dc->cookie = 0; 21206495Sspeer 21216495Sspeer if (type == MAC_RING_TYPE_RX) { 21226495Sspeer nxge_hio_rdc_unshare(nxge, channel); 21236495Sspeer } else { 21246495Sspeer nxge_hio_tdc_unshare(nxge, channel); 21256495Sspeer } 21266495Sspeer 21276495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 21286495Sspeer } 21296495Sspeer 21306495Sspeer #endif /* if defined(sun4v) */ 2131