16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 236495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 376495Sspeer #include <sys/nxge/nxge_impl.h> 386495Sspeer #include <sys/nxge/nxge_fzc.h> 396495Sspeer #include <sys/nxge/nxge_rxdma.h> 406495Sspeer #include <sys/nxge/nxge_txdma.h> 416495Sspeer #include <sys/nxge/nxge_hio.h> 426495Sspeer 436495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 446495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 456495Sspeer 466495Sspeer /* 476495Sspeer * External prototypes 486495Sspeer */ 496495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 506495Sspeer 516495Sspeer /* The following function may be found in nxge_main.c */ 526495Sspeer extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 536495Sspeer 546495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 556495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 566495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 576495Sspeer 586495Sspeer /* 596495Sspeer * Local prototypes 606495Sspeer */ 616495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 626495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 636495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 646495Sspeer 656495Sspeer /* 666495Sspeer * These functions are used by both service & guest domains to 676495Sspeer * decide whether they're running in an LDOMs/XEN environment 686495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 696495Sspeer */ 706495Sspeer 716495Sspeer /* 726495Sspeer * nxge_get_environs 736495Sspeer * 746495Sspeer * Figure out if we are in a guest domain or not. 756495Sspeer * 766495Sspeer * Arguments: 776495Sspeer * nxge 786495Sspeer * 796495Sspeer * Notes: 806495Sspeer * 816495Sspeer * Context: 826495Sspeer * Any domain 836495Sspeer */ 846495Sspeer void 856495Sspeer nxge_get_environs( 866495Sspeer nxge_t *nxge) 876495Sspeer { 886495Sspeer char *string; 896495Sspeer 906495Sspeer /* 916495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 926495Sspeer */ 936495Sspeer nxge->environs = SOLARIS_DOMAIN; 946495Sspeer 956495Sspeer /* 966495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 976495Sspeer */ 986495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 996495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1006495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1016495Sspeer if (strcmp(string, "n2niu") == 0) { 1026495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1036495Sspeer /* So we can allocate properly-aligned memory. */ 1046495Sspeer nxge->niu_type = N2_NIU; 1056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1066495Sspeer "Hybrid IO-capable guest domain")); 1076495Sspeer } 1086495Sspeer ddi_prop_free(string); 1096495Sspeer } 1106495Sspeer } 1116495Sspeer 1126495Sspeer #if !defined(sun4v) 1136495Sspeer 1146495Sspeer /* 1156495Sspeer * nxge_hio_init 1166495Sspeer * 1176495Sspeer * Initialize the HIO module of the NXGE driver. 1186495Sspeer * 1196495Sspeer * Arguments: 1206495Sspeer * nxge 1216495Sspeer * 1226495Sspeer * Notes: 1236495Sspeer * This is the non-hybrid I/O version of this function. 1246495Sspeer * 1256495Sspeer * Context: 1266495Sspeer * Any domain 1276495Sspeer */ 1286495Sspeer int 1297587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1306495Sspeer { 1316495Sspeer nxge_hio_data_t *nhd; 1326495Sspeer 1336495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1346495Sspeer if (nhd == 0) { 1356495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1366495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1376495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1386495Sspeer } 1396495Sspeer 1406495Sspeer nhd->hio.ldoms = B_FALSE; 1416495Sspeer 1426495Sspeer return (NXGE_OK); 1436495Sspeer } 1446495Sspeer 1456495Sspeer #endif 1466495Sspeer 1476495Sspeer void 1487587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1496495Sspeer { 1506495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1516495Sspeer 1526495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1536495Sspeer 1547587SMichael.Speer@Sun.COM if (nhd != NULL) { 1557587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 1567587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 1577587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 1587587SMichael.Speer@Sun.COM } 1596495Sspeer } 1606495Sspeer 1616495Sspeer /* 1626495Sspeer * nxge_dci_map 1636495Sspeer * 1646495Sspeer * Map a DMA channel index to a channel number. 1656495Sspeer * 1666495Sspeer * Arguments: 1676495Sspeer * instance The instance number of the driver. 1686495Sspeer * type The type of channel this is: Tx or Rx. 1696495Sspeer * index The index to convert to a channel number 1706495Sspeer * 1716495Sspeer * Notes: 1726495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 1736495Sspeer * 1746495Sspeer * Context: 1756495Sspeer * Any domain 1766495Sspeer */ 1776495Sspeer int 1786495Sspeer nxge_dci_map( 1796495Sspeer nxge_t *nxge, 1806495Sspeer vpc_type_t type, 1816495Sspeer int index) 1826495Sspeer { 1836495Sspeer nxge_grp_set_t *set; 1846495Sspeer int dc; 1856495Sspeer 1866495Sspeer switch (type) { 1876495Sspeer case VP_BOUND_TX: 1886495Sspeer set = &nxge->tx_set; 1896495Sspeer break; 1906495Sspeer case VP_BOUND_RX: 1916495Sspeer set = &nxge->rx_set; 1926495Sspeer break; 1936495Sspeer } 1946495Sspeer 1956495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 1966495Sspeer if ((1 << dc) & set->owned.map) { 1976495Sspeer if (index == 0) 1986495Sspeer return (dc); 1996495Sspeer else 2006495Sspeer index--; 2016495Sspeer } 2026495Sspeer } 2036495Sspeer 2046495Sspeer return (-1); 2056495Sspeer } 2066495Sspeer 2076495Sspeer /* 2086495Sspeer * --------------------------------------------------------------------- 2096495Sspeer * These are the general-purpose DMA channel group functions. That is, 2106495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2116495Sspeer * environment. 2126495Sspeer * 2136495Sspeer * But is also expected that in the future they will be able to manage 2146495Sspeer * Crossbow groups. 2156495Sspeer * --------------------------------------------------------------------- 2166495Sspeer */ 2176495Sspeer 2186495Sspeer /* 2197766SMichael.Speer@Sun.COM * nxge_grp_cleanup(p_nxge_t nxge) 2207766SMichael.Speer@Sun.COM * 2217766SMichael.Speer@Sun.COM * Remove all outstanding groups. 2227766SMichael.Speer@Sun.COM * 2237766SMichael.Speer@Sun.COM * Arguments: 2247766SMichael.Speer@Sun.COM * nxge 2257766SMichael.Speer@Sun.COM */ 2267766SMichael.Speer@Sun.COM void 2277766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge) 2287766SMichael.Speer@Sun.COM { 2297766SMichael.Speer@Sun.COM nxge_grp_set_t *set; 2307766SMichael.Speer@Sun.COM int i; 2317766SMichael.Speer@Sun.COM 2327766SMichael.Speer@Sun.COM MUTEX_ENTER(&nxge->group_lock); 2337766SMichael.Speer@Sun.COM 2347766SMichael.Speer@Sun.COM /* 2357766SMichael.Speer@Sun.COM * Find RX groups that need to be cleaned up. 2367766SMichael.Speer@Sun.COM */ 2377766SMichael.Speer@Sun.COM set = &nxge->rx_set; 2387766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2397766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2407766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2417766SMichael.Speer@Sun.COM set->group[i] = NULL; 2427766SMichael.Speer@Sun.COM } 2437766SMichael.Speer@Sun.COM } 2447766SMichael.Speer@Sun.COM 2457766SMichael.Speer@Sun.COM /* 2467766SMichael.Speer@Sun.COM * Find TX groups that need to be cleaned up. 2477766SMichael.Speer@Sun.COM */ 2487766SMichael.Speer@Sun.COM set = &nxge->tx_set; 2497766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2507766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2517766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2527766SMichael.Speer@Sun.COM set->group[i] = NULL; 2537766SMichael.Speer@Sun.COM } 2547766SMichael.Speer@Sun.COM } 2557766SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 2567766SMichael.Speer@Sun.COM } 2577766SMichael.Speer@Sun.COM 2587766SMichael.Speer@Sun.COM 2597766SMichael.Speer@Sun.COM /* 2606495Sspeer * nxge_grp_add 2616495Sspeer * 2626495Sspeer * Add a group to an instance of NXGE. 2636495Sspeer * 2646495Sspeer * Arguments: 2656495Sspeer * nxge 2666495Sspeer * type Tx or Rx 2676495Sspeer * 2686495Sspeer * Notes: 2696495Sspeer * 2706495Sspeer * Context: 2716495Sspeer * Any domain 2726495Sspeer */ 2737755SMisaki.Kataoka@Sun.COM nxge_grp_t * 2746495Sspeer nxge_grp_add( 2756495Sspeer nxge_t *nxge, 2766495Sspeer nxge_grp_type_t type) 2776495Sspeer { 2786495Sspeer nxge_grp_set_t *set; 2796495Sspeer nxge_grp_t *group; 2806495Sspeer int i; 2816495Sspeer 2826495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 2836495Sspeer group->nxge = nxge; 2846495Sspeer 2856495Sspeer MUTEX_ENTER(&nxge->group_lock); 2866495Sspeer switch (type) { 2876495Sspeer case NXGE_TRANSMIT_GROUP: 2886495Sspeer case EXT_TRANSMIT_GROUP: 2896495Sspeer set = &nxge->tx_set; 2906495Sspeer break; 2916495Sspeer default: 2926495Sspeer set = &nxge->rx_set; 2936495Sspeer break; 2946495Sspeer } 2956495Sspeer 2966495Sspeer group->type = type; 2976495Sspeer group->active = B_TRUE; 2986495Sspeer group->sequence = set->sequence++; 2996495Sspeer 3006495Sspeer /* Find an empty slot for this logical group. */ 3016495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 3026495Sspeer if (set->group[i] == 0) { 3036495Sspeer group->index = i; 3046495Sspeer set->group[i] = group; 3056495Sspeer NXGE_DC_SET(set->lg.map, i); 3066495Sspeer set->lg.count++; 3076495Sspeer break; 3086495Sspeer } 3096495Sspeer } 3106495Sspeer MUTEX_EXIT(&nxge->group_lock); 3116495Sspeer 3126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3136495Sspeer "nxge_grp_add: %cgroup = %d.%d", 3146495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3156495Sspeer nxge->mac.portnum, group->sequence)); 3166495Sspeer 3177755SMisaki.Kataoka@Sun.COM return (group); 3186495Sspeer } 3196495Sspeer 3206495Sspeer void 3216495Sspeer nxge_grp_remove( 3226495Sspeer nxge_t *nxge, 3237755SMisaki.Kataoka@Sun.COM nxge_grp_t *group) /* The group to remove. */ 3246495Sspeer { 3256495Sspeer nxge_grp_set_t *set; 3266495Sspeer vpc_type_t type; 3276495Sspeer 3286495Sspeer MUTEX_ENTER(&nxge->group_lock); 3296495Sspeer switch (group->type) { 3306495Sspeer case NXGE_TRANSMIT_GROUP: 3316495Sspeer case EXT_TRANSMIT_GROUP: 3326495Sspeer set = &nxge->tx_set; 3336495Sspeer break; 3346495Sspeer default: 3356495Sspeer set = &nxge->rx_set; 3366495Sspeer break; 3376495Sspeer } 3386495Sspeer 3396495Sspeer if (set->group[group->index] != group) { 3406495Sspeer MUTEX_EXIT(&nxge->group_lock); 3416495Sspeer return; 3426495Sspeer } 3436495Sspeer 3446495Sspeer set->group[group->index] = 0; 3456495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3466495Sspeer set->lg.count--; 3476495Sspeer 3486495Sspeer /* While inside the mutex, deactivate <group>. */ 3496495Sspeer group->active = B_FALSE; 3506495Sspeer 3516495Sspeer MUTEX_EXIT(&nxge->group_lock); 3526495Sspeer 3536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3546495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3556495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3566495Sspeer nxge->mac.portnum, group->sequence)); 3576495Sspeer 3586495Sspeer /* Now, remove any DCs which are still active. */ 3596495Sspeer switch (group->type) { 3606495Sspeer default: 3616495Sspeer type = VP_BOUND_TX; 3626495Sspeer break; 3636495Sspeer case NXGE_RECEIVE_GROUP: 3646495Sspeer case EXT_RECEIVE_GROUP: 3656495Sspeer type = VP_BOUND_RX; 3666495Sspeer } 3676495Sspeer 3686495Sspeer while (group->dc) { 3696495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3706495Sspeer } 3716495Sspeer 3726495Sspeer KMEM_FREE(group, sizeof (*group)); 3736495Sspeer } 3746495Sspeer 3756495Sspeer /* 3766495Sspeer * nx_hio_dc_add 3776495Sspeer * 3786495Sspeer * Add a DMA channel to a VR/Group. 3796495Sspeer * 3806495Sspeer * Arguments: 3816495Sspeer * nxge 3826495Sspeer * channel The channel to add. 3836495Sspeer * Notes: 3846495Sspeer * 3856495Sspeer * Context: 3866495Sspeer * Any domain 3876495Sspeer */ 3886495Sspeer /* ARGSUSED */ 3896495Sspeer int 3906495Sspeer nxge_grp_dc_add( 3916495Sspeer nxge_t *nxge, 3927755SMisaki.Kataoka@Sun.COM nxge_grp_t *group, /* The group to add <channel> to. */ 3936495Sspeer vpc_type_t type, /* Rx or Tx */ 3946495Sspeer int channel) /* A physical/logical channel number */ 3956495Sspeer { 3966495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 3976495Sspeer nxge_hio_dc_t *dc; 3986495Sspeer nxge_grp_set_t *set; 3996602Sspeer nxge_status_t status = NXGE_OK; 4006495Sspeer 4016495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 4026495Sspeer 4037755SMisaki.Kataoka@Sun.COM if (group == NULL) 4046495Sspeer return (0); 4056495Sspeer 4066495Sspeer switch (type) { 4076495Sspeer default: 4086495Sspeer set = &nxge->tx_set; 4096495Sspeer if (channel > NXGE_MAX_TDCS) { 4106495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4116495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 4126495Sspeer return (NXGE_ERROR); 4136495Sspeer } 4146495Sspeer break; 4156495Sspeer case VP_BOUND_RX: 4166495Sspeer set = &nxge->rx_set; 4176495Sspeer if (channel > NXGE_MAX_RDCS) { 4186495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4196495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 4206495Sspeer return (NXGE_ERROR); 4216495Sspeer } 4226495Sspeer break; 4236495Sspeer } 4246495Sspeer 4256495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4266495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 4276495Sspeer type == VP_BOUND_TX ? 't' : 'r', 4286495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 4296495Sspeer 4306495Sspeer MUTEX_ENTER(&nxge->group_lock); 4316495Sspeer if (group->active != B_TRUE) { 4326495Sspeer /* We may be in the process of removing this group. */ 4336495Sspeer MUTEX_EXIT(&nxge->group_lock); 4346495Sspeer return (NXGE_ERROR); 4356495Sspeer } 4366495Sspeer MUTEX_EXIT(&nxge->group_lock); 4376495Sspeer 4386495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4406495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4416495Sspeer return (NXGE_ERROR); 4426495Sspeer } 4436495Sspeer 4446495Sspeer MUTEX_ENTER(&nhd->lock); 4456495Sspeer 4466495Sspeer if (dc->group) { 4476495Sspeer MUTEX_EXIT(&nhd->lock); 4486495Sspeer /* This channel is already in use! */ 4496495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4506495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4516495Sspeer return (NXGE_ERROR); 4526495Sspeer } 4536495Sspeer 4546495Sspeer dc->next = 0; 4556495Sspeer dc->page = channel; 4566495Sspeer dc->channel = (nxge_channel_t)channel; 4576495Sspeer 4586495Sspeer dc->type = type; 4596495Sspeer if (type == VP_BOUND_RX) { 4606495Sspeer dc->init = nxge_init_rxdma_channel; 4616495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4626495Sspeer } else { 4636495Sspeer dc->init = nxge_init_txdma_channel; 4646495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4656495Sspeer } 4666495Sspeer 4677755SMisaki.Kataoka@Sun.COM dc->group = group; 4686495Sspeer 4696495Sspeer if (isLDOMguest(nxge)) 4706495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 4716495Sspeer 4726495Sspeer NXGE_DC_SET(set->owned.map, channel); 4736495Sspeer set->owned.count++; 4746495Sspeer 4756495Sspeer MUTEX_EXIT(&nhd->lock); 4766495Sspeer 4776602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 4786602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4796602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 4806603Sspeer return (NXGE_ERROR); 4816602Sspeer } 4826602Sspeer 4836495Sspeer nxge_grp_dc_append(nxge, group, dc); 4846495Sspeer 485*7812SMichael.Speer@Sun.COM if (type == VP_BOUND_TX) { 486*7812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 487*7812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_FALSE; 488*7812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 489*7812SMichael.Speer@Sun.COM } 490*7812SMichael.Speer@Sun.COM 4916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 4926495Sspeer 4936602Sspeer return ((int)status); 4946495Sspeer } 4956495Sspeer 4966495Sspeer void 4976495Sspeer nxge_grp_dc_remove( 4986495Sspeer nxge_t *nxge, 4996495Sspeer vpc_type_t type, 5006495Sspeer int channel) 5016495Sspeer { 5026495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5036495Sspeer nxge_hio_dc_t *dc; 5046495Sspeer nxge_grp_set_t *set; 5056495Sspeer nxge_grp_t *group; 5066495Sspeer 5076495Sspeer dc_uninit_t uninit; 5086495Sspeer 5096495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 5106495Sspeer 5116495Sspeer if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 5126495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5136495Sspeer "nx_hio_dc_remove: find(%d) failed", channel)); 5146495Sspeer return; 5156495Sspeer } 5166495Sspeer group = (nxge_grp_t *)dc->group; 5176495Sspeer 5186495Sspeer if (isLDOMguest(nxge)) { 5196495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 5206495Sspeer } 5216495Sspeer 5226495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 5236495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 5246495Sspeer nxge->mac.portnum, group->sequence, group->count, 5256495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 5266495Sspeer 5276495Sspeer MUTEX_ENTER(&nhd->lock); 5286495Sspeer 5296602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5306602Sspeer if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 5316602Sspeer NXGE_DC_RESET(group->map, channel); 5326602Sspeer } 5336602Sspeer 5346495Sspeer /* Remove the DC from its group. */ 5356495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 5366495Sspeer MUTEX_EXIT(&nhd->lock); 5376495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5386495Sspeer "nx_hio_dc_remove(%d) failed", channel)); 5396495Sspeer return; 5406495Sspeer } 5416495Sspeer 5426495Sspeer uninit = dc->uninit; 5436495Sspeer channel = dc->channel; 5446495Sspeer 5456495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5466495Sspeer set->owned.count--; 5476495Sspeer 5486495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5496495Sspeer 5506495Sspeer MUTEX_EXIT(&nhd->lock); 5516495Sspeer 5526495Sspeer (*uninit)(nxge, channel); 5536495Sspeer 5546495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5556495Sspeer } 5566495Sspeer 5576495Sspeer nxge_hio_dc_t * 5586495Sspeer nxge_grp_dc_find( 5596495Sspeer nxge_t *nxge, 5606495Sspeer vpc_type_t type, /* Rx or Tx */ 5616495Sspeer int channel) 5626495Sspeer { 5636495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5646495Sspeer nxge_hio_dc_t *current; 5656495Sspeer 5666495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 5676495Sspeer 5686495Sspeer if (!isLDOMguest(nxge)) { 5696495Sspeer return (¤t[channel]); 5706495Sspeer } else { 5716495Sspeer /* We're in a guest domain. */ 5726495Sspeer int i, limit = (type == VP_BOUND_TX) ? 5736495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 5746495Sspeer 5756495Sspeer MUTEX_ENTER(&nhd->lock); 5766495Sspeer for (i = 0; i < limit; i++, current++) { 5776495Sspeer if (current->channel == channel) { 5786495Sspeer if (current->vr && current->vr->nxge == 5796495Sspeer (uintptr_t)nxge) { 5806495Sspeer MUTEX_EXIT(&nhd->lock); 5816495Sspeer return (current); 5826495Sspeer } 5836495Sspeer } 5846495Sspeer } 5856495Sspeer MUTEX_EXIT(&nhd->lock); 5866495Sspeer } 5876495Sspeer 5886495Sspeer return (0); 5896495Sspeer } 5906495Sspeer 5916495Sspeer /* 5926495Sspeer * nxge_grp_dc_append 5936495Sspeer * 5946495Sspeer * Append a DMA channel to a group. 5956495Sspeer * 5966495Sspeer * Arguments: 5976495Sspeer * nxge 5986495Sspeer * group The group to append to 5996495Sspeer * dc The DMA channel to append 6006495Sspeer * 6016495Sspeer * Notes: 6026495Sspeer * 6036495Sspeer * Context: 6046495Sspeer * Any domain 6056495Sspeer */ 6066495Sspeer static 6076495Sspeer void 6086495Sspeer nxge_grp_dc_append( 6096495Sspeer nxge_t *nxge, 6106495Sspeer nxge_grp_t *group, 6116495Sspeer nxge_hio_dc_t *dc) 6126495Sspeer { 6136495Sspeer MUTEX_ENTER(&nxge->group_lock); 6146495Sspeer 6156495Sspeer if (group->dc == 0) { 6166495Sspeer group->dc = dc; 6176495Sspeer } else { 6186495Sspeer nxge_hio_dc_t *current = group->dc; 6196495Sspeer do { 6206495Sspeer if (current->next == 0) { 6216495Sspeer current->next = dc; 6226495Sspeer break; 6236495Sspeer } 6246495Sspeer current = current->next; 6256495Sspeer } while (current); 6266495Sspeer } 6276495Sspeer 6286495Sspeer NXGE_DC_SET(group->map, dc->channel); 6296495Sspeer 6306495Sspeer nxge_grp_dc_map(group); 6316602Sspeer group->count++; 6326495Sspeer 6336495Sspeer MUTEX_EXIT(&nxge->group_lock); 6346495Sspeer } 6356495Sspeer 6366495Sspeer /* 6376495Sspeer * nxge_grp_dc_unlink 6386495Sspeer * 6396495Sspeer * Unlink a DMA channel fromits linked list (group). 6406495Sspeer * 6416495Sspeer * Arguments: 6426495Sspeer * nxge 6436495Sspeer * group The group (linked list) to unlink from 6446495Sspeer * dc The DMA channel to append 6456495Sspeer * 6466495Sspeer * Notes: 6476495Sspeer * 6486495Sspeer * Context: 6496495Sspeer * Any domain 6506495Sspeer */ 6516495Sspeer nxge_hio_dc_t * 652*7812SMichael.Speer@Sun.COM nxge_grp_dc_unlink(nxge_t *nxge, nxge_grp_t *group, int channel) 6536495Sspeer { 6546495Sspeer nxge_hio_dc_t *current, *previous; 6556495Sspeer 6566495Sspeer MUTEX_ENTER(&nxge->group_lock); 6576495Sspeer 658*7812SMichael.Speer@Sun.COM if (group == NULL) { 659*7812SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 660*7812SMichael.Speer@Sun.COM return (0); 661*7812SMichael.Speer@Sun.COM } 662*7812SMichael.Speer@Sun.COM 6636495Sspeer if ((current = group->dc) == 0) { 6646495Sspeer MUTEX_EXIT(&nxge->group_lock); 6656495Sspeer return (0); 6666495Sspeer } 6676495Sspeer 6686495Sspeer previous = 0; 6696495Sspeer do { 6706495Sspeer if (current->channel == channel) { 6716495Sspeer if (previous) 6726495Sspeer previous->next = current->next; 6736495Sspeer else 6746495Sspeer group->dc = current->next; 6756495Sspeer break; 6766495Sspeer } 6776495Sspeer previous = current; 6786495Sspeer current = current->next; 6796495Sspeer } while (current); 6806495Sspeer 6816495Sspeer if (current == 0) { 6826495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 6836495Sspeer "DC unlink: DC %d not found", channel)); 6846495Sspeer } else { 6856495Sspeer current->next = 0; 6866495Sspeer current->group = 0; 6876495Sspeer 6886495Sspeer group->count--; 6896495Sspeer } 6906495Sspeer 6916495Sspeer nxge_grp_dc_map(group); 6926495Sspeer 6936495Sspeer MUTEX_EXIT(&nxge->group_lock); 6946495Sspeer 6956495Sspeer return (current); 6966495Sspeer } 6976495Sspeer 6986495Sspeer /* 6996495Sspeer * nxge_grp_dc_map 7006495Sspeer * 7016495Sspeer * Map a linked list to an array of channel numbers. 7026495Sspeer * 7036495Sspeer * Arguments: 7046495Sspeer * nxge 7056495Sspeer * group The group to remap. 7066495Sspeer * 7076495Sspeer * Notes: 7086495Sspeer * It is expected that the caller will hold the correct mutex. 7096495Sspeer * 7106495Sspeer * Context: 7116495Sspeer * Service domain 7126495Sspeer */ 7136495Sspeer void 7146495Sspeer nxge_grp_dc_map( 7156495Sspeer nxge_grp_t *group) 7166495Sspeer { 7176495Sspeer nxge_channel_t *legend; 7186495Sspeer nxge_hio_dc_t *dc; 7196495Sspeer 7206495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 7216495Sspeer 7226495Sspeer legend = group->legend; 7236495Sspeer dc = group->dc; 7246495Sspeer while (dc) { 7256495Sspeer *legend = dc->channel; 7266495Sspeer legend++; 7276495Sspeer dc = dc->next; 7286495Sspeer } 7296495Sspeer } 7306495Sspeer 7316495Sspeer /* 7326495Sspeer * --------------------------------------------------------------------- 7336495Sspeer * These are HIO debugging functions. 7346495Sspeer * --------------------------------------------------------------------- 7356495Sspeer */ 7366495Sspeer 7376495Sspeer /* 7386495Sspeer * nxge_delay 7396495Sspeer * 7406495Sspeer * Delay <seconds> number of seconds. 7416495Sspeer * 7426495Sspeer * Arguments: 7436495Sspeer * nxge 7446495Sspeer * group The group to append to 7456495Sspeer * dc The DMA channel to append 7466495Sspeer * 7476495Sspeer * Notes: 7486495Sspeer * This is a developer-only function. 7496495Sspeer * 7506495Sspeer * Context: 7516495Sspeer * Any domain 7526495Sspeer */ 7536495Sspeer void 7546495Sspeer nxge_delay( 7556495Sspeer int seconds) 7566495Sspeer { 7576495Sspeer delay(drv_usectohz(seconds * 1000000)); 7586495Sspeer } 7596495Sspeer 7606495Sspeer static dmc_reg_name_t rx_names[] = { 7616495Sspeer { "RXDMA_CFIG1", 0 }, 7626495Sspeer { "RXDMA_CFIG2", 8 }, 7636495Sspeer { "RBR_CFIG_A", 0x10 }, 7646495Sspeer { "RBR_CFIG_B", 0x18 }, 7656495Sspeer { "RBR_KICK", 0x20 }, 7666495Sspeer { "RBR_STAT", 0x28 }, 7676495Sspeer { "RBR_HDH", 0x30 }, 7686495Sspeer { "RBR_HDL", 0x38 }, 7696495Sspeer { "RCRCFIG_A", 0x40 }, 7706495Sspeer { "RCRCFIG_B", 0x48 }, 7716495Sspeer { "RCRSTAT_A", 0x50 }, 7726495Sspeer { "RCRSTAT_B", 0x58 }, 7736495Sspeer { "RCRSTAT_C", 0x60 }, 7746495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 7756495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 7766495Sspeer { "RCR_FLSH", 0x78 }, 7776495Sspeer { "RXMISC", 0x90 }, 7786495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 7796495Sspeer { 0, -1 } 7806495Sspeer }; 7816495Sspeer 7826495Sspeer static dmc_reg_name_t tx_names[] = { 7836495Sspeer { "Tx_RNG_CFIG", 0 }, 7846495Sspeer { "Tx_RNG_HDL", 0x10 }, 7856495Sspeer { "Tx_RNG_KICK", 0x18 }, 7866495Sspeer { "Tx_ENT_MASK", 0x20 }, 7876495Sspeer { "Tx_CS", 0x28 }, 7886495Sspeer { "TxDMA_MBH", 0x30 }, 7896495Sspeer { "TxDMA_MBL", 0x38 }, 7906495Sspeer { "TxDMA_PRE_ST", 0x40 }, 7916495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 7926495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 7936495Sspeer { "TDMC_INTR_DBG", 0x60 }, 7946495Sspeer { "Tx_CS_DBG", 0x68 }, 7956495Sspeer { 0, -1 } 7966495Sspeer }; 7976495Sspeer 7986495Sspeer /* 7996495Sspeer * nxge_xx2str 8006495Sspeer * 8016495Sspeer * Translate a register address into a string. 8026495Sspeer * 8036495Sspeer * Arguments: 8046495Sspeer * offset The address of the register to translate. 8056495Sspeer * 8066495Sspeer * Notes: 8076495Sspeer * These are developer-only function. 8086495Sspeer * 8096495Sspeer * Context: 8106495Sspeer * Any domain 8116495Sspeer */ 8126495Sspeer const char * 8136495Sspeer nxge_rx2str( 8146495Sspeer int offset) 8156495Sspeer { 8166495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 8176495Sspeer 8186495Sspeer offset &= DMA_CSR_MASK; 8196495Sspeer 8206495Sspeer while (reg->name) { 8216495Sspeer if (offset == reg->offset) 8226495Sspeer return (reg->name); 8236495Sspeer reg++; 8246495Sspeer } 8256495Sspeer 8266495Sspeer return (0); 8276495Sspeer } 8286495Sspeer 8296495Sspeer const char * 8306495Sspeer nxge_tx2str( 8316495Sspeer int offset) 8326495Sspeer { 8336495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 8346495Sspeer 8356495Sspeer offset &= DMA_CSR_MASK; 8366495Sspeer 8376495Sspeer while (reg->name) { 8386495Sspeer if (offset == reg->offset) 8396495Sspeer return (reg->name); 8406495Sspeer reg++; 8416495Sspeer } 8426495Sspeer 8436495Sspeer return (0); 8446495Sspeer } 8456495Sspeer 8466495Sspeer /* 8476495Sspeer * nxge_ddi_perror 8486495Sspeer * 8496495Sspeer * Map a DDI error number to a string. 8506495Sspeer * 8516495Sspeer * Arguments: 8526495Sspeer * ddi_error The DDI error number to map. 8536495Sspeer * 8546495Sspeer * Notes: 8556495Sspeer * 8566495Sspeer * Context: 8576495Sspeer * Any domain 8586495Sspeer */ 8596495Sspeer const char * 8606495Sspeer nxge_ddi_perror( 8616495Sspeer int ddi_error) 8626495Sspeer { 8636495Sspeer switch (ddi_error) { 8646495Sspeer case DDI_SUCCESS: 8656495Sspeer return ("DDI_SUCCESS"); 8666495Sspeer case DDI_FAILURE: 8676495Sspeer return ("DDI_FAILURE"); 8686495Sspeer case DDI_NOT_WELL_FORMED: 8696495Sspeer return ("DDI_NOT_WELL_FORMED"); 8706495Sspeer case DDI_EAGAIN: 8716495Sspeer return ("DDI_EAGAIN"); 8726495Sspeer case DDI_EINVAL: 8736495Sspeer return ("DDI_EINVAL"); 8746495Sspeer case DDI_ENOTSUP: 8756495Sspeer return ("DDI_ENOTSUP"); 8766495Sspeer case DDI_EPENDING: 8776495Sspeer return ("DDI_EPENDING"); 8786495Sspeer case DDI_ENOMEM: 8796495Sspeer return ("DDI_ENOMEM"); 8806495Sspeer case DDI_EBUSY: 8816495Sspeer return ("DDI_EBUSY"); 8826495Sspeer case DDI_ETRANSPORT: 8836495Sspeer return ("DDI_ETRANSPORT"); 8846495Sspeer case DDI_ECONTEXT: 8856495Sspeer return ("DDI_ECONTEXT"); 8866495Sspeer default: 8876495Sspeer return ("Unknown error"); 8886495Sspeer } 8896495Sspeer } 8906495Sspeer 8916495Sspeer /* 8926495Sspeer * --------------------------------------------------------------------- 8936495Sspeer * These are Sun4v HIO function definitions 8946495Sspeer * --------------------------------------------------------------------- 8956495Sspeer */ 8966495Sspeer 8976495Sspeer #if defined(sun4v) 8986495Sspeer 8996495Sspeer /* 9006495Sspeer * Local prototypes 9016495Sspeer */ 9027755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 9036495Sspeer 9046495Sspeer static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 9057755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *); 9066495Sspeer 9077755SMisaki.Kataoka@Sun.COM static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 9087755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 9096495Sspeer 9106495Sspeer static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 9116495Sspeer static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 9126495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 9136495Sspeer mac_ring_type_t, int); 9146495Sspeer 9156495Sspeer /* 9166495Sspeer * nxge_hio_init 9176495Sspeer * 9186495Sspeer * Initialize the HIO module of the NXGE driver. 9196495Sspeer * 9206495Sspeer * Arguments: 9216495Sspeer * nxge 9226495Sspeer * 9236495Sspeer * Notes: 9246495Sspeer * 9256495Sspeer * Context: 9266495Sspeer * Any domain 9276495Sspeer */ 9286495Sspeer int 9296495Sspeer nxge_hio_init( 9306495Sspeer nxge_t *nxge) 9316495Sspeer { 9326495Sspeer nxge_hio_data_t *nhd; 9336495Sspeer int i, region; 9346495Sspeer 9356495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 9366495Sspeer if (nhd == 0) { 9376495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 9386495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9396495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9406495Sspeer } 9416495Sspeer 9426713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 9436713Sspeer (nxge->niu_type == N2_NIU)) { 9446495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9456495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9466495Sspeer if (niu_hsvc->hsvc_major == 1 && 9476495Sspeer niu_hsvc->hsvc_minor == 1) 9486495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9496495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9506495Sspeer "nxge_hio_init: hypervisor services " 9516495Sspeer "version %d.%d", 9526495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9536495Sspeer } 9546495Sspeer } 9556495Sspeer 9566495Sspeer if (!isLDOMs(nxge)) { 9576495Sspeer nhd->hio.ldoms = B_FALSE; 9586495Sspeer return (NXGE_OK); 9596495Sspeer } 9606495Sspeer 9616495Sspeer nhd->hio.ldoms = B_TRUE; 9626495Sspeer 9636495Sspeer /* 9646495Sspeer * Fill in what we can. 9656495Sspeer */ 9666495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 9676495Sspeer nhd->vr[region].region = region; 9686495Sspeer } 9697755SMisaki.Kataoka@Sun.COM nhd->vrs = NXGE_VR_SR_MAX - 2; 9706495Sspeer 9716495Sspeer /* 972*7812SMichael.Speer@Sun.COM * Initialize tdc share state, shares and ring group structures. 9736495Sspeer */ 974*7812SMichael.Speer@Sun.COM for (i = 0; i < NXGE_MAX_TDCS; i++) 975*7812SMichael.Speer@Sun.COM nxge->tdc_is_shared[i] = B_FALSE; 976*7812SMichael.Speer@Sun.COM 9776495Sspeer for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 9786495Sspeer nxge->rx_hio_groups[i].ghandle = NULL; 9796495Sspeer nxge->rx_hio_groups[i].nxgep = nxge; 9806495Sspeer nxge->rx_hio_groups[i].gindex = 0; 9816495Sspeer nxge->rx_hio_groups[i].sindex = 0; 9826495Sspeer } 9836495Sspeer 9846495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 9856495Sspeer nxge->shares[i].nxgep = nxge; 9866495Sspeer nxge->shares[i].index = 0; 9876495Sspeer nxge->shares[i].vrp = (void *)NULL; 9886495Sspeer nxge->shares[i].tmap = 0; 9896495Sspeer nxge->shares[i].rmap = 0; 9906495Sspeer nxge->shares[i].rxgroup = 0; 9916495Sspeer nxge->shares[i].active = B_FALSE; 9926495Sspeer } 9936495Sspeer 9946495Sspeer /* Fill in the HV HIO function pointers. */ 9956495Sspeer nxge_hio_hv_init(nxge); 9966495Sspeer 9976495Sspeer if (isLDOMservice(nxge)) { 9986495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9996495Sspeer "Hybrid IO-capable service domain")); 10006495Sspeer return (NXGE_OK); 10016495Sspeer } else { 10026495Sspeer /* 10036495Sspeer * isLDOMguest(nxge) == B_TRUE 10046495Sspeer */ 10056495Sspeer nx_vio_fp_t *vio; 10066495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 10076495Sspeer 10086495Sspeer vio = &nhd->hio.vio; 10096495Sspeer vio->__register = (vio_net_resource_reg_t) 10106495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 10116495Sspeer vio->unregister = (vio_net_resource_unreg_t) 10126495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 10136495Sspeer 10146495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 10156495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 10166495Sspeer return (NXGE_ERROR); 10176495Sspeer } 10186495Sspeer } 10196495Sspeer 10206495Sspeer return (0); 10216495Sspeer } 10226495Sspeer 10236495Sspeer static int 10246495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 10256495Sspeer { 10266495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10276495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10286495Sspeer int group = rxgroup->gindex; 10296495Sspeer int rv, sindex; 10306495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10316495Sspeer 10326495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10336495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10346495Sspeer 10356495Sspeer /* 10366495Sspeer * Program the mac address for the group/share. 10376495Sspeer */ 10386495Sspeer if ((rv = nxge_hio_hostinfo_init(nxge, vr, 10396495Sspeer (ether_addr_t *)mac_addr)) != 0) { 10406495Sspeer return (rv); 10416495Sspeer } 10426495Sspeer 10436495Sspeer return (0); 10446495Sspeer } 10456495Sspeer 10466495Sspeer /* ARGSUSED */ 10476495Sspeer static int 10486495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 10496495Sspeer { 10506495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10516495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10526495Sspeer int group = rxgroup->gindex; 10536495Sspeer int sindex; 10546495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10556495Sspeer 10566495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10576495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10586495Sspeer 10596495Sspeer /* 10606495Sspeer * Remove the mac address for the group/share. 10616495Sspeer */ 10626495Sspeer nxge_hio_hostinfo_uninit(nxge, vr); 10636495Sspeer 10646495Sspeer return (0); 10656495Sspeer } 10666495Sspeer 10676495Sspeer /* ARGSUSED */ 10686495Sspeer void 10696495Sspeer nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 10706495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 10716495Sspeer { 10726495Sspeer p_nxge_t nxgep = (p_nxge_t)arg; 10736495Sspeer nxge_rx_ring_group_t *rxgroup; 10746495Sspeer 10756495Sspeer switch (type) { 10766495Sspeer case MAC_RING_TYPE_RX: 10776495Sspeer rxgroup = &nxgep->rx_hio_groups[group]; 10786495Sspeer rxgroup->gindex = group; 10796495Sspeer 10806495Sspeer infop->mrg_driver = (mac_group_driver_t)rxgroup; 10816495Sspeer infop->mrg_start = NULL; 10826495Sspeer infop->mrg_stop = NULL; 10836495Sspeer infop->mrg_addmac = nxge_hio_add_mac; 10846495Sspeer infop->mrg_remmac = nxge_hio_rem_mac; 10856495Sspeer infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 10866495Sspeer break; 10876495Sspeer 10886495Sspeer case MAC_RING_TYPE_TX: 10896495Sspeer break; 10906495Sspeer } 10916495Sspeer } 10926495Sspeer 10936495Sspeer int 10946495Sspeer nxge_hio_share_assign( 10956495Sspeer nxge_t *nxge, 10966495Sspeer uint64_t cookie, 10976495Sspeer res_map_t *tmap, 10986495Sspeer res_map_t *rmap, 10996495Sspeer nxge_hio_vr_t *vr) 11006495Sspeer { 11016495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11026495Sspeer uint64_t slot, hv_rv; 11036495Sspeer nxge_hio_dc_t *dc; 11046495Sspeer nxhv_vr_fp_t *fp; 11056495Sspeer int i; 11066495Sspeer 11076495Sspeer /* 11086495Sspeer * Ask the Hypervisor to set up the VR for us 11096495Sspeer */ 11106495Sspeer fp = &nhd->hio.vr; 11116495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 11126495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 11136713Sspeer "nx_hio_share_assign: " 11146713Sspeer "vr->assign() returned %d", hv_rv)); 11157755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 11166495Sspeer return (-EIO); 11176495Sspeer } 11186495Sspeer 11196495Sspeer /* 11206495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 11216495Sspeer * ----------------------------------------------------- 11226495Sspeer */ 11236495Sspeer dc = vr->tx_group.dc; 11246495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 11256495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11266495Sspeer while (dc) { 11276495Sspeer hv_rv = (*tx->assign) 11286495Sspeer (vr->cookie, dc->channel, &slot); 11296495Sspeer if (hv_rv != 0) { 11306495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11316495Sspeer "nx_hio_share_assign: " 11326495Sspeer "tx->assign(%x, %d) failed: %ld", 11336495Sspeer vr->cookie, dc->channel, hv_rv)); 11346495Sspeer return (-EIO); 11356495Sspeer } 11366495Sspeer 11376495Sspeer dc->cookie = vr->cookie; 11386495Sspeer dc->page = (vp_channel_t)slot; 11396495Sspeer 11406495Sspeer /* Inform the caller about the slot chosen. */ 11416495Sspeer (*tmap) |= 1 << slot; 11426495Sspeer 11436495Sspeer dc = dc->next; 11446495Sspeer } 11456495Sspeer } 11466495Sspeer 11476495Sspeer /* 11486495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 11496495Sspeer * ----------------------------------------------------- 11506495Sspeer */ 11516495Sspeer dc = vr->rx_group.dc; 11526495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 11536495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11546495Sspeer while (dc) { 11556495Sspeer hv_rv = (*rx->assign) 11566495Sspeer (vr->cookie, dc->channel, &slot); 11576495Sspeer if (hv_rv != 0) { 11586495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11596495Sspeer "nx_hio_share_assign: " 11606495Sspeer "rx->assign(%x, %d) failed: %ld", 11616495Sspeer vr->cookie, dc->channel, hv_rv)); 11626495Sspeer return (-EIO); 11636495Sspeer } 11646495Sspeer 11656495Sspeer dc->cookie = vr->cookie; 11666495Sspeer dc->page = (vp_channel_t)slot; 11676495Sspeer 11686495Sspeer /* Inform the caller about the slot chosen. */ 11696495Sspeer (*rmap) |= 1 << slot; 11706495Sspeer 11716495Sspeer dc = dc->next; 11726495Sspeer } 11736495Sspeer } 11746495Sspeer 11756495Sspeer return (0); 11766495Sspeer } 11776495Sspeer 11786495Sspeer int 11796495Sspeer nxge_hio_share_unassign( 11806495Sspeer nxge_hio_vr_t *vr) 11816495Sspeer { 11826495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 11836495Sspeer nxge_hio_data_t *nhd; 11846495Sspeer nxge_hio_dc_t *dc; 11856495Sspeer nxhv_vr_fp_t *fp; 11866495Sspeer uint64_t hv_rv; 11876495Sspeer 11886495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11896495Sspeer 11906495Sspeer dc = vr->tx_group.dc; 11916495Sspeer while (dc) { 11926495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11936495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 11946495Sspeer if (hv_rv != 0) { 11956495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11966495Sspeer "nx_hio_dc_unshare: " 11976495Sspeer "tx->unassign(%x, %d) failed: %ld", 11986495Sspeer vr->cookie, dc->page, hv_rv)); 11996495Sspeer } 12006495Sspeer dc = dc->next; 12016495Sspeer } 12026495Sspeer 12036495Sspeer dc = vr->rx_group.dc; 12046495Sspeer while (dc) { 12056495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 12066495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 12076495Sspeer if (hv_rv != 0) { 12086495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 12096495Sspeer "nx_hio_dc_unshare: " 12106495Sspeer "rx->unassign(%x, %d) failed: %ld", 12116495Sspeer vr->cookie, dc->page, hv_rv)); 12126495Sspeer } 12136495Sspeer dc = dc->next; 12146495Sspeer } 12156495Sspeer 12166495Sspeer fp = &nhd->hio.vr; 12176495Sspeer if (fp->unassign) { 12186495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 12196495Sspeer if (hv_rv != 0) { 12206495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 12216495Sspeer "vr->assign(%x) failed: %ld", 12226495Sspeer vr->cookie, hv_rv)); 12236495Sspeer } 12246495Sspeer } 12256495Sspeer 12266495Sspeer return (0); 12276495Sspeer } 12286495Sspeer 12296495Sspeer int 12306495Sspeer nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 12316495Sspeer mac_share_handle_t *shandle) 12326495Sspeer { 12336495Sspeer p_nxge_t nxge = (p_nxge_t)arg; 12346495Sspeer nxge_rx_ring_group_t *rxgroup; 12356495Sspeer nxge_share_handle_t *shp; 12366495Sspeer 12376495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 12386495Sspeer uint64_t rmap, tmap; 12396495Sspeer int rv; 12406495Sspeer 12416495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 12426495Sspeer 12436495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 12446495Sspeer 12456495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 12466495Sspeer nhd->hio.rx.assign == 0) { 12476495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 12486495Sspeer return (EIO); 12496495Sspeer } 12506495Sspeer 12516495Sspeer /* 12526495Sspeer * Get a VR. 12536495Sspeer */ 12547755SMisaki.Kataoka@Sun.COM if ((vr = nxge_hio_vr_share(nxge)) == 0) 12556495Sspeer return (EAGAIN); 12566495Sspeer 12576495Sspeer /* 12586495Sspeer * Get an RDC group for us to use. 12596495Sspeer */ 12606495Sspeer if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 12617755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12626495Sspeer return (EBUSY); 12636495Sspeer } 12646495Sspeer 12656495Sspeer /* 12666495Sspeer * Add resources to the share. 12676495Sspeer */ 12686495Sspeer tmap = 0; 12697755SMisaki.Kataoka@Sun.COM rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 12706495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12716495Sspeer if (rv != 0) { 12727755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12736495Sspeer return (rv); 12746495Sspeer } 12756495Sspeer 12766495Sspeer rmap = 0; 12777755SMisaki.Kataoka@Sun.COM rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 12786495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12796495Sspeer if (rv != 0) { 12807755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 12817755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12826495Sspeer return (rv); 12836495Sspeer } 12846495Sspeer 12856495Sspeer if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 12867755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 12877755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 12887755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12896495Sspeer return (rv); 12906495Sspeer } 12916495Sspeer 12926495Sspeer rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 12936495Sspeer rxgroup->gindex = vr->rdc_tbl; 12946495Sspeer rxgroup->sindex = vr->region; 12956495Sspeer 12966495Sspeer shp = &nxge->shares[vr->region]; 12976495Sspeer shp->index = vr->region; 12986495Sspeer shp->vrp = (void *)vr; 12996495Sspeer shp->tmap = tmap; 13006495Sspeer shp->rmap = rmap; 13016495Sspeer shp->rxgroup = vr->rdc_tbl; 13026495Sspeer shp->active = B_TRUE; 13036495Sspeer 13046495Sspeer /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 13056495Sspeer *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 13066495Sspeer 13076495Sspeer *shandle = (mac_share_handle_t)shp; 13086495Sspeer 13096495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 13106495Sspeer return (0); 13116495Sspeer } 13126495Sspeer 13136495Sspeer void 13146495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 13156495Sspeer { 13166495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13176495Sspeer 13186495Sspeer /* 13196495Sspeer * First, unassign the VR (take it back), 13206495Sspeer * so we can enable interrupts again. 13216495Sspeer */ 13226498Sspeer (void) nxge_hio_share_unassign(shp->vrp); 13236495Sspeer 13246495Sspeer /* 13256495Sspeer * Free Ring Resources for TX and RX 13266495Sspeer */ 13277755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 13287755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 13296495Sspeer 13306495Sspeer /* 13316495Sspeer * Free VR resource. 13326495Sspeer */ 13337755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(shp->vrp); 13346495Sspeer 13356495Sspeer /* 13366495Sspeer * Clear internal handle state. 13376495Sspeer */ 13386495Sspeer shp->index = 0; 13396495Sspeer shp->vrp = (void *)NULL; 13406495Sspeer shp->tmap = 0; 13416495Sspeer shp->rmap = 0; 13426495Sspeer shp->rxgroup = 0; 13436495Sspeer shp->active = B_FALSE; 13446495Sspeer } 13456495Sspeer 13466495Sspeer void 13476495Sspeer nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 13486495Sspeer uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 13496495Sspeer { 13506495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13516495Sspeer 13526495Sspeer switch (type) { 13536495Sspeer case MAC_RING_TYPE_RX: 13546495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13556495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13566495Sspeer *rmap = shp->rmap; 13576495Sspeer *gnum = shp->rxgroup; 13586495Sspeer break; 13596495Sspeer 13606495Sspeer case MAC_RING_TYPE_TX: 13616495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13626495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13636495Sspeer *rmap = shp->tmap; 13646495Sspeer *gnum = 0; 13656495Sspeer break; 13666495Sspeer } 13676495Sspeer } 13686495Sspeer 13696495Sspeer /* 13706495Sspeer * nxge_hio_vr_share 13716495Sspeer * 13726495Sspeer * Find an unused Virtualization Region (VR). 13736495Sspeer * 13746495Sspeer * Arguments: 13756495Sspeer * nxge 13766495Sspeer * 13776495Sspeer * Notes: 13786495Sspeer * 13796495Sspeer * Context: 13806495Sspeer * Service domain 13816495Sspeer */ 13827755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t * 13836495Sspeer nxge_hio_vr_share( 13846495Sspeer nxge_t *nxge) 13856495Sspeer { 13866495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13876495Sspeer nxge_hio_vr_t *vr; 13886495Sspeer 13896495Sspeer int first, limit, region; 13906495Sspeer 13916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 13926495Sspeer 13936495Sspeer MUTEX_ENTER(&nhd->lock); 13946495Sspeer 13957755SMisaki.Kataoka@Sun.COM if (nhd->vrs == 0) { 13966495Sspeer MUTEX_EXIT(&nhd->lock); 13976495Sspeer return (0); 13986495Sspeer } 13996495Sspeer 14006495Sspeer /* Find an empty virtual region (VR). */ 14016495Sspeer if (nxge->function_num == 0) { 14026495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 14036495Sspeer first = FUNC0_VIR1; 14046495Sspeer limit = FUNC2_VIR0; 14056495Sspeer } else if (nxge->function_num == 1) { 14066495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 14076495Sspeer first = FUNC2_VIR1; 14086495Sspeer limit = FUNC_VIR_MAX; 14096495Sspeer } else { 14106495Sspeer cmn_err(CE_WARN, 14116495Sspeer "Shares not supported on function(%d) at this time.\n", 14126495Sspeer nxge->function_num); 14136495Sspeer } 14146495Sspeer 14156495Sspeer for (region = first; region < limit; region++) { 14166495Sspeer if (nhd->vr[region].nxge == 0) 14176495Sspeer break; 14186495Sspeer } 14196495Sspeer 14206495Sspeer if (region == limit) { 14216495Sspeer MUTEX_EXIT(&nhd->lock); 14226495Sspeer return (0); 14236495Sspeer } 14246495Sspeer 14256495Sspeer vr = &nhd->vr[region]; 14266495Sspeer vr->nxge = (uintptr_t)nxge; 14276495Sspeer vr->region = (uintptr_t)region; 14286495Sspeer 14297755SMisaki.Kataoka@Sun.COM nhd->vrs--; 14306495Sspeer 14316495Sspeer MUTEX_EXIT(&nhd->lock); 14326495Sspeer 14336495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 14346495Sspeer 14357755SMisaki.Kataoka@Sun.COM return (vr); 14366495Sspeer } 14376495Sspeer 14386495Sspeer void 14396495Sspeer nxge_hio_unshare( 14407755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr) 14416495Sspeer { 14426495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14436495Sspeer nxge_hio_data_t *nhd; 14446495Sspeer 14456495Sspeer vr_region_t region; 14466495Sspeer 14476495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 14486495Sspeer 14496495Sspeer if (!nxge) { 14506495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 14516495Sspeer "vr->nxge is NULL")); 14526495Sspeer return; 14536495Sspeer } 14546495Sspeer 14556495Sspeer /* 14566495Sspeer * This function is no longer called, but I will keep it 14576495Sspeer * here in case we want to revisit this topic in the future. 14586495Sspeer * 14596495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 14606495Sspeer */ 14616495Sspeer (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 14626495Sspeer 14636495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14646495Sspeer 14656495Sspeer MUTEX_ENTER(&nhd->lock); 14666495Sspeer 14676495Sspeer region = vr->region; 14686495Sspeer (void) memset(vr, 0, sizeof (*vr)); 14696495Sspeer vr->region = region; 14706495Sspeer 14717755SMisaki.Kataoka@Sun.COM nhd->vrs++; 14726495Sspeer 14736495Sspeer MUTEX_EXIT(&nhd->lock); 14746495Sspeer 14756495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 14766495Sspeer } 14776495Sspeer 14786495Sspeer int 14796495Sspeer nxge_hio_addres( 14807755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 14816495Sspeer mac_ring_type_t type, 14826495Sspeer int count) 14836495Sspeer { 14846495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14856495Sspeer int i; 14866495Sspeer 14876495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 14886495Sspeer 14896495Sspeer if (!nxge) 14906495Sspeer return (EINVAL); 14916495Sspeer 14926495Sspeer for (i = 0; i < count; i++) { 14936495Sspeer int rv; 14946495Sspeer if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 14956495Sspeer if (i == 0) /* Couldn't get even one DC. */ 14966495Sspeer return (-rv); 14976495Sspeer else 14986495Sspeer break; 14996495Sspeer } 15006495Sspeer } 15016495Sspeer 15026495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 15036495Sspeer 15046495Sspeer return (0); 15056495Sspeer } 15066495Sspeer 15076495Sspeer /* ARGSUSED */ 15086495Sspeer void 15096495Sspeer nxge_hio_remres( 15107755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 15116495Sspeer mac_ring_type_t type, 15126495Sspeer res_map_t res_map) 15136495Sspeer { 15146495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 15156495Sspeer nxge_grp_t *group; 15166495Sspeer 15176495Sspeer if (!nxge) { 15186495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 15196495Sspeer "vr->nxge is NULL")); 15206495Sspeer return; 15216495Sspeer } 15226495Sspeer 15236495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 15246495Sspeer 15256495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 15266495Sspeer while (group->dc) { 15276495Sspeer nxge_hio_dc_t *dc = group->dc; 15286495Sspeer NXGE_DC_RESET(res_map, dc->page); 15296495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 15306495Sspeer } 15316495Sspeer 15326495Sspeer if (res_map) { 15336495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 15346495Sspeer "res_map %lx", res_map)); 15356495Sspeer } 15366495Sspeer 15376495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 15386495Sspeer } 15396495Sspeer 15406495Sspeer /* 15416495Sspeer * nxge_hio_tdc_share 15426495Sspeer * 15436495Sspeer * Share an unused TDC channel. 15446495Sspeer * 15456495Sspeer * Arguments: 15466495Sspeer * nxge 15476495Sspeer * 15486495Sspeer * Notes: 15496495Sspeer * 15506495Sspeer * A.7.3 Reconfigure Tx DMA channel 15516495Sspeer * Disable TxDMA A.9.6.10 15526495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 15536495Sspeer * 15546495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 15556495Sspeer * 15566495Sspeer * Soft Reset TxDMA A.9.6.2 15576495Sspeer * 15586495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 15596495Sspeer * guest domain: 15606495Sspeer * 15616495Sspeer * Re-initialize TxDMA A.9.6.8 15626495Sspeer * Reconfigure TxDMA 15636495Sspeer * Enable TxDMA A.9.6.9 15646495Sspeer * 15656495Sspeer * Context: 15666495Sspeer * Service domain 15676495Sspeer */ 15686495Sspeer int 15696495Sspeer nxge_hio_tdc_share( 15706495Sspeer nxge_t *nxge, 15716495Sspeer int channel) 15726495Sspeer { 1573*7812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 15746495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 15756495Sspeer tx_ring_t *ring; 15766713Sspeer int count; 15776495Sspeer 15786495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 15796495Sspeer 15806495Sspeer /* 15816495Sspeer * Wait until this channel is idle. 15826495Sspeer */ 15836495Sspeer ring = nxge->tx_rings->rings[channel]; 15846713Sspeer 15856713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 15866886Sspeer if (ring->tx_ring_busy) { 15876886Sspeer /* 15886886Sspeer * Wait for 30 seconds. 15896886Sspeer */ 15906886Sspeer for (count = 30 * 1000; count; count--) { 15916886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 15926886Sspeer break; 15936886Sspeer } 15946886Sspeer 15956886Sspeer drv_usecwait(1000); 15966495Sspeer } 15976713Sspeer 15986886Sspeer if (count == 0) { 15996886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 16006886Sspeer NXGE_TX_RING_ONLINE); 16016886Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 16026886Sspeer "Tx ring %d was always BUSY", channel)); 16036886Sspeer return (-EIO); 16046886Sspeer } 16056886Sspeer } else { 16066713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 16076886Sspeer NXGE_TX_RING_OFFLINED); 16086495Sspeer } 16096495Sspeer 1610*7812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 1611*7812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_TRUE; 1612*7812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 1613*7812SMichael.Speer@Sun.COM 1614*7812SMichael.Speer@Sun.COM 16156495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 16166495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 16176495Sspeer "Failed to remove interrupt for TxDMA channel %d", 16186495Sspeer channel)); 16196495Sspeer return (NXGE_ERROR); 16206495Sspeer } 16216495Sspeer 16226495Sspeer /* Disable TxDMA A.9.6.10 */ 16236495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 16246495Sspeer 16256495Sspeer /* The SD is sharing this channel. */ 16266495Sspeer NXGE_DC_SET(set->shared.map, channel); 16276495Sspeer set->shared.count++; 16286495Sspeer 16296602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 16306602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 16316602Sspeer 16326495Sspeer /* 16336495Sspeer * Initialize the DC-specific FZC control registers. 16346495Sspeer * ----------------------------------------------------- 16356495Sspeer */ 16366495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 16376495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 16386495Sspeer "nx_hio_dc_share: FZC TDC failed: %d", channel)); 16396495Sspeer return (-EIO); 16406495Sspeer } 16416495Sspeer 16426495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 16436495Sspeer 16446495Sspeer return (0); 16456495Sspeer } 16466495Sspeer 16476495Sspeer /* 16486495Sspeer * nxge_hio_rdc_share 16496495Sspeer * 16506495Sspeer * Share an unused RDC channel. 16516495Sspeer * 16526495Sspeer * Arguments: 16536495Sspeer * nxge 16546495Sspeer * 16556495Sspeer * Notes: 16566495Sspeer * 16576495Sspeer * This is the latest version of the procedure to 16586495Sspeer * Reconfigure an Rx DMA channel: 16596495Sspeer * 16606495Sspeer * A.6.3 Reconfigure Rx DMA channel 16616495Sspeer * Stop RxMAC A.9.2.6 16626495Sspeer * Drain IPP Port A.9.3.6 16636495Sspeer * Stop and reset RxDMA A.9.5.3 16646495Sspeer * 16656495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 16666495Sspeer * guest domain: 16676495Sspeer * 16686495Sspeer * Initialize RxDMA A.9.5.4 16696495Sspeer * Reconfigure RxDMA 16706495Sspeer * Enable RxDMA A.9.5.5 16716495Sspeer * 16726495Sspeer * We will do this here, since the RDC is a canalis non grata: 16736495Sspeer * Enable RxMAC A.9.2.10 16746495Sspeer * 16756495Sspeer * Context: 16766495Sspeer * Service domain 16776495Sspeer */ 16786495Sspeer int 16796495Sspeer nxge_hio_rdc_share( 16806495Sspeer nxge_t *nxge, 16816495Sspeer nxge_hio_vr_t *vr, 16826495Sspeer int channel) 16836495Sspeer { 16846495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 16856495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 16866495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 16876495Sspeer nxge_rdc_grp_t *rdc_grp; 16886495Sspeer 16896495Sspeer int current, last; 16906495Sspeer 16916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 16926495Sspeer 16936495Sspeer /* Disable interrupts. */ 16946495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 16956495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 16966495Sspeer "Failed to remove interrupt for RxDMA channel %d", 16976495Sspeer channel)); 16986495Sspeer return (NXGE_ERROR); 16996495Sspeer } 17006495Sspeer 17016495Sspeer /* Stop RxMAC = A.9.2.6 */ 17026495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 17036495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 17046495Sspeer "Failed to disable RxMAC")); 17056495Sspeer } 17066495Sspeer 17076495Sspeer /* Drain IPP Port = A.9.3.6 */ 17086495Sspeer (void) nxge_ipp_drain(nxge); 17096495Sspeer 17106495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 17116495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 17126495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 17136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 17146495Sspeer "Failed to disable RxDMA channel %d", channel)); 17156495Sspeer } 17166495Sspeer 17176495Sspeer /* The SD is sharing this channel. */ 17186495Sspeer NXGE_DC_SET(set->shared.map, channel); 17196495Sspeer set->shared.count++; 17206495Sspeer 17216602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 17226602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 17236602Sspeer 17246495Sspeer /* 17256495Sspeer * We have to reconfigure the RDC table(s) 17266495Sspeer * to which this channel belongs. 17276495Sspeer */ 17286495Sspeer current = hardware->def_mac_rxdma_grpid; 17296495Sspeer last = current + hardware->max_rdc_grpids; 17306495Sspeer for (; current < last; current++) { 17316495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 17326495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[current]; 17336495Sspeer rdc_grp->map = set->owned.map; 17346495Sspeer rdc_grp->max_rdcs--; 17356495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 17366495Sspeer } 17376495Sspeer } 17386495Sspeer 17396495Sspeer /* 17406495Sspeer * The guest domain will reconfigure the RDC later. 17416495Sspeer * 17426495Sspeer * But in the meantime, we must re-enable the Rx MAC so 17436495Sspeer * that we can start receiving packets again on the 17446495Sspeer * remaining RDCs: 17456495Sspeer * 17466495Sspeer * Enable RxMAC = A.9.2.10 17476495Sspeer */ 17486495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 17496495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17506495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 17516495Sspeer } 17526495Sspeer 17536495Sspeer /* 17546495Sspeer * Initialize the DC-specific FZC control registers. 17556495Sspeer * ----------------------------------------------------- 17566495Sspeer */ 17576495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 17586495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17596495Sspeer "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 17606495Sspeer return (-EIO); 17616495Sspeer } 17626495Sspeer 17636495Sspeer /* 17646495Sspeer * We have to initialize the guest's RDC table, too. 17656495Sspeer * ----------------------------------------------------- 17666495Sspeer */ 17676495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 17686495Sspeer if (rdc_grp->max_rdcs == 0) { 17696495Sspeer rdc_grp->start_rdc = (uint8_t)channel; 17706495Sspeer rdc_grp->def_rdc = (uint8_t)channel; 17716495Sspeer rdc_grp->max_rdcs = 1; 17726495Sspeer } else { 17736495Sspeer rdc_grp->max_rdcs++; 17746495Sspeer } 17756495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 17766495Sspeer 17776495Sspeer if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 17786495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17796495Sspeer "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 17806495Sspeer return (-EIO); 17816495Sspeer } 17826495Sspeer 17836495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 17846495Sspeer 17856495Sspeer return (0); 17866495Sspeer } 17876495Sspeer 17886495Sspeer /* 17896495Sspeer * nxge_hio_dc_share 17906495Sspeer * 17916495Sspeer * Share a DMA channel with a guest domain. 17926495Sspeer * 17936495Sspeer * Arguments: 17946495Sspeer * nxge 17956495Sspeer * vr The VR that <channel> will belong to. 17966495Sspeer * type Tx or Rx. 17976495Sspeer * res_map The resource map used by the caller, which we will 17986495Sspeer * update if successful. 17996495Sspeer * 18006495Sspeer * Notes: 18016495Sspeer * 18026495Sspeer * Context: 18036495Sspeer * Service domain 18046495Sspeer */ 18056495Sspeer int 18066495Sspeer nxge_hio_dc_share( 18076495Sspeer nxge_t *nxge, 18086495Sspeer nxge_hio_vr_t *vr, 18096495Sspeer mac_ring_type_t type) 18106495Sspeer { 18116495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 18126495Sspeer nxge_hw_pt_cfg_t *hardware; 18136495Sspeer nxge_hio_dc_t *dc; 18146495Sspeer int channel, limit; 18156495Sspeer 18166495Sspeer nxge_grp_set_t *set; 18176495Sspeer nxge_grp_t *group; 18186495Sspeer 18196495Sspeer int slot; 18206495Sspeer 18216495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 18226495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 18236495Sspeer 18246495Sspeer /* 18256495Sspeer * In version 1.0, we may only give a VR 2 RDCs or TDCs. 18266495Sspeer * Not only that, but the HV has statically assigned the 18276495Sspeer * channels like so: 18286495Sspeer * VR0: RDC0 & RDC1 18296495Sspeer * VR1: RDC2 & RDC3, etc. 18306495Sspeer * The TDCs are assigned in exactly the same way. 18316495Sspeer * 18326495Sspeer * So, for example 18336495Sspeer * hardware->start_rdc + vr->region * 2; 18346495Sspeer * VR1: hardware->start_rdc + 1 * 2; 18356495Sspeer * VR3: hardware->start_rdc + 3 * 2; 18366495Sspeer * If start_rdc is 0, we end up with 2 or 6. 18376495Sspeer * If start_rdc is 8, we end up with 10 or 14. 18386495Sspeer */ 18396495Sspeer 18406495Sspeer set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 18416495Sspeer hardware = &nxge->pt_config.hw_config; 18426495Sspeer 18436495Sspeer // This code is still NIU-specific (assuming only 2 ports) 18446495Sspeer channel = hardware->start_rdc + (vr->region % 4) * 2; 18456495Sspeer limit = channel + 2; 18466495Sspeer 18476495Sspeer MUTEX_ENTER(&nhd->lock); 18486495Sspeer for (; channel < limit; channel++) { 18496495Sspeer if ((1 << channel) & set->owned.map) { 18506495Sspeer break; 18516495Sspeer } 18526495Sspeer } 18536495Sspeer 18546495Sspeer if (channel == limit) { 18556495Sspeer MUTEX_EXIT(&nhd->lock); 18566495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 18576495Sspeer "nx_hio_dc_share: there are no channels to share")); 18586495Sspeer return (-EIO); 18596495Sspeer } 18606495Sspeer 18616495Sspeer MUTEX_EXIT(&nhd->lock); 18626495Sspeer 18636495Sspeer /* -------------------------------------------------- */ 18646495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 18656495Sspeer nxge_hio_tdc_share(nxge, channel) : 18666495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 18676495Sspeer 18686495Sspeer if (slot < 0) { 18696495Sspeer if (type == MAC_RING_TYPE_RX) { 18706495Sspeer nxge_hio_rdc_unshare(nxge, channel); 18716495Sspeer } else { 18726495Sspeer nxge_hio_tdc_unshare(nxge, channel); 18736495Sspeer } 18746495Sspeer return (slot); 18756495Sspeer } 18766495Sspeer 18776495Sspeer MUTEX_ENTER(&nhd->lock); 18786495Sspeer 18796495Sspeer /* 18806495Sspeer * Tag this channel. 18816495Sspeer * -------------------------------------------------- 18826495Sspeer */ 18836495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 18846495Sspeer 18856495Sspeer dc->vr = vr; 18866495Sspeer dc->channel = (nxge_channel_t)channel; 18876495Sspeer 18886495Sspeer MUTEX_EXIT(&nhd->lock); 18896495Sspeer 18906495Sspeer /* 18916495Sspeer * vr->[t|r]x_group is used by the service domain to 18926495Sspeer * keep track of its shared DMA channels. 18936495Sspeer */ 18946495Sspeer MUTEX_ENTER(&nxge->group_lock); 18956495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 18966495Sspeer 18977755SMisaki.Kataoka@Sun.COM dc->group = group; 18986495Sspeer 18996495Sspeer /* Initialize <group>, if necessary */ 19006495Sspeer if (group->count == 0) { 19016495Sspeer group->nxge = nxge; 19026495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 19036495Sspeer VP_BOUND_TX : VP_BOUND_RX; 19046495Sspeer group->sequence = nhd->sequence++; 19056495Sspeer group->active = B_TRUE; 19066495Sspeer } 19076495Sspeer 19086495Sspeer MUTEX_EXIT(&nxge->group_lock); 19096495Sspeer 19106495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 19116495Sspeer "DC share: %cDC %d was assigned to slot %d", 19126495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 19136495Sspeer 19146495Sspeer nxge_grp_dc_append(nxge, group, dc); 19156495Sspeer 19166495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 19176495Sspeer 19186495Sspeer return (0); 19196495Sspeer } 19206495Sspeer 19216495Sspeer /* 19226495Sspeer * nxge_hio_tdc_unshare 19236495Sspeer * 19246495Sspeer * Unshare a TDC. 19256495Sspeer * 19266495Sspeer * Arguments: 19276495Sspeer * nxge 19286495Sspeer * channel The channel to unshare (add again). 19296495Sspeer * 19306495Sspeer * Notes: 19316495Sspeer * 19326495Sspeer * Context: 19336495Sspeer * Service domain 19346495Sspeer */ 19356495Sspeer void 19366495Sspeer nxge_hio_tdc_unshare( 19376495Sspeer nxge_t *nxge, 19386495Sspeer int channel) 19396495Sspeer { 19406495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 19417755SMisaki.Kataoka@Sun.COM nxge_grp_t *group = set->group[0]; 19426495Sspeer 19436495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 19446495Sspeer 19456495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19466495Sspeer set->shared.count--; 19476495Sspeer 19487755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 19496495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19506495Sspeer "Failed to initialize TxDMA channel %d", channel)); 19516495Sspeer return; 19526495Sspeer } 19536495Sspeer 19546495Sspeer /* Re-add this interrupt. */ 19556495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 19566495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19576495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 19586495Sspeer } 19596495Sspeer 19606495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 19616495Sspeer } 19626495Sspeer 19636495Sspeer /* 19646495Sspeer * nxge_hio_rdc_unshare 19656495Sspeer * 19666495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 19676495Sspeer * 19686495Sspeer * Arguments: 19696495Sspeer * nxge 19706495Sspeer * channel The channel to unshare (add again). 19716495Sspeer * 19726495Sspeer * Notes: 19736495Sspeer * 19746495Sspeer * Context: 19756495Sspeer * Service domain 19766495Sspeer */ 19776495Sspeer void 19786495Sspeer nxge_hio_rdc_unshare( 19796495Sspeer nxge_t *nxge, 19806495Sspeer int channel) 19816495Sspeer { 19826495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19836495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 19846495Sspeer 19856495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 19867755SMisaki.Kataoka@Sun.COM nxge_grp_t *group = set->group[0]; 19876495Sspeer int current, last; 19886495Sspeer 19896495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 19906495Sspeer 19916495Sspeer /* Stop RxMAC = A.9.2.6 */ 19926495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 19936495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19946495Sspeer "Failed to disable RxMAC")); 19956495Sspeer } 19966495Sspeer 19976495Sspeer /* Drain IPP Port = A.9.3.6 */ 19986495Sspeer (void) nxge_ipp_drain(nxge); 19996495Sspeer 20006495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 20016495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 20026495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 20036495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 20046495Sspeer "Failed to disable RxDMA channel %d", channel)); 20056495Sspeer } 20066495Sspeer 20076495Sspeer NXGE_DC_RESET(set->shared.map, channel); 20086495Sspeer set->shared.count--; 20096495Sspeer 20106495Sspeer /* 20116495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 20126495Sspeer * 20136495Sspeer * Initialize RxDMA A.9.5.4 20146495Sspeer * Reconfigure RxDMA 20156495Sspeer * Enable RxDMA A.9.5.5 20166495Sspeer */ 20177755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 20186495Sspeer /* Be sure to re-enable the RX MAC. */ 20196495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20206495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20216495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 20226495Sspeer } 20236495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 20246495Sspeer "Failed to initialize RxDMA channel %d", channel)); 20256495Sspeer return; 20266495Sspeer } 20276495Sspeer 20286495Sspeer /* 20296495Sspeer * We have to reconfigure the RDC table(s) 20306495Sspeer * to which this channel once again belongs. 20316495Sspeer */ 20326495Sspeer current = hardware->def_mac_rxdma_grpid; 20336495Sspeer last = current + hardware->max_rdc_grpids; 20346495Sspeer for (; current < last; current++) { 20356495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 20366495Sspeer nxge_rdc_grp_t *group; 20376495Sspeer group = &nxge->pt_config.rdc_grps[current]; 20386495Sspeer group->map = set->owned.map; 20396495Sspeer group->max_rdcs++; 20406495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 20416495Sspeer } 20426495Sspeer } 20436495Sspeer 20446495Sspeer /* 20456495Sspeer * Enable RxMAC = A.9.2.10 20466495Sspeer */ 20476495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20486495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20496495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 20506495Sspeer return; 20516495Sspeer } 20526495Sspeer 20536495Sspeer /* Re-add this interrupt. */ 20546495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 20556495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20566495Sspeer "nx_hio_rdc_unshare: Failed to add interrupt for " 20576495Sspeer "RxDMA CHANNEL %d", channel)); 20586495Sspeer } 20596495Sspeer 20606495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 20616495Sspeer } 20626495Sspeer 20636495Sspeer /* 20646495Sspeer * nxge_hio_dc_unshare 20656495Sspeer * 20666495Sspeer * Unshare (reuse) a DMA channel. 20676495Sspeer * 20686495Sspeer * Arguments: 20696495Sspeer * nxge 20706495Sspeer * vr The VR that <channel> belongs to. 20716495Sspeer * type Tx or Rx. 20726495Sspeer * channel The DMA channel to reuse. 20736495Sspeer * 20746495Sspeer * Notes: 20756495Sspeer * 20766495Sspeer * Context: 20776495Sspeer * Service domain 20786495Sspeer */ 20796495Sspeer void 20806495Sspeer nxge_hio_dc_unshare( 20816495Sspeer nxge_t *nxge, 20826495Sspeer nxge_hio_vr_t *vr, 20836495Sspeer mac_ring_type_t type, 20846495Sspeer int channel) 20856495Sspeer { 20866495Sspeer nxge_grp_t *group; 20876495Sspeer nxge_hio_dc_t *dc; 20886495Sspeer 20896495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 20906495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 20916495Sspeer 20926495Sspeer /* Unlink the channel from its group. */ 20936495Sspeer /* -------------------------------------------------- */ 20946495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 20956602Sspeer NXGE_DC_RESET(group->map, channel); 20966495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 20976495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20986495Sspeer "nx_hio_dc_unshare(%d) failed", channel)); 20996495Sspeer return; 21006495Sspeer } 21016495Sspeer 21026495Sspeer dc->vr = 0; 21036495Sspeer dc->cookie = 0; 21046495Sspeer 21056495Sspeer if (type == MAC_RING_TYPE_RX) { 21066495Sspeer nxge_hio_rdc_unshare(nxge, channel); 21076495Sspeer } else { 21086495Sspeer nxge_hio_tdc_unshare(nxge, channel); 21096495Sspeer } 21106495Sspeer 21116495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 21126495Sspeer } 21136495Sspeer 21146495Sspeer #endif /* if defined(sun4v) */ 2115