16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 236495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 376495Sspeer #include <sys/nxge/nxge_impl.h> 386495Sspeer #include <sys/nxge/nxge_fzc.h> 396495Sspeer #include <sys/nxge/nxge_rxdma.h> 406495Sspeer #include <sys/nxge/nxge_txdma.h> 416495Sspeer #include <sys/nxge/nxge_hio.h> 426495Sspeer 436495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 446495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 456495Sspeer 466495Sspeer /* 476495Sspeer * External prototypes 486495Sspeer */ 496495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 506495Sspeer 516495Sspeer /* The following function may be found in nxge_main.c */ 526495Sspeer extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 536495Sspeer 546495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 556495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 566495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 576495Sspeer 586495Sspeer /* 596495Sspeer * Local prototypes 606495Sspeer */ 616495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 626495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 636495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 646495Sspeer 656495Sspeer /* 666495Sspeer * These functions are used by both service & guest domains to 676495Sspeer * decide whether they're running in an LDOMs/XEN environment 686495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 696495Sspeer */ 706495Sspeer 716495Sspeer /* 726495Sspeer * nxge_get_environs 736495Sspeer * 746495Sspeer * Figure out if we are in a guest domain or not. 756495Sspeer * 766495Sspeer * Arguments: 776495Sspeer * nxge 786495Sspeer * 796495Sspeer * Notes: 806495Sspeer * 816495Sspeer * Context: 826495Sspeer * Any domain 836495Sspeer */ 846495Sspeer void 856495Sspeer nxge_get_environs( 866495Sspeer nxge_t *nxge) 876495Sspeer { 886495Sspeer char *string; 896495Sspeer 906495Sspeer /* 916495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 926495Sspeer */ 936495Sspeer nxge->environs = SOLARIS_DOMAIN; 946495Sspeer 956495Sspeer /* 966495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 976495Sspeer */ 986495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 996495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1006495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1016495Sspeer if (strcmp(string, "n2niu") == 0) { 1026495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1036495Sspeer /* So we can allocate properly-aligned memory. */ 1046495Sspeer nxge->niu_type = N2_NIU; 1056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1066495Sspeer "Hybrid IO-capable guest domain")); 1076495Sspeer } 1086495Sspeer ddi_prop_free(string); 1096495Sspeer } 1106495Sspeer } 1116495Sspeer 1126495Sspeer #if !defined(sun4v) 1136495Sspeer 1146495Sspeer /* 1156495Sspeer * nxge_hio_init 1166495Sspeer * 1176495Sspeer * Initialize the HIO module of the NXGE driver. 1186495Sspeer * 1196495Sspeer * Arguments: 1206495Sspeer * nxge 1216495Sspeer * 1226495Sspeer * Notes: 1236495Sspeer * This is the non-hybrid I/O version of this function. 1246495Sspeer * 1256495Sspeer * Context: 1266495Sspeer * Any domain 1276495Sspeer */ 1286495Sspeer int 1297587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1306495Sspeer { 1316495Sspeer nxge_hio_data_t *nhd; 1326495Sspeer 1336495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1346495Sspeer if (nhd == 0) { 1356495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1366495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1376495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1386495Sspeer } 1396495Sspeer 1406495Sspeer nhd->hio.ldoms = B_FALSE; 1416495Sspeer 1426495Sspeer return (NXGE_OK); 1436495Sspeer } 1446495Sspeer 1456495Sspeer #endif 1466495Sspeer 1476495Sspeer void 1487587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1496495Sspeer { 1506495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1516495Sspeer 1526495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1536495Sspeer 1547587SMichael.Speer@Sun.COM if (nhd != NULL) { 1557587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 1567587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 1577587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 1587587SMichael.Speer@Sun.COM } 1596495Sspeer } 1606495Sspeer 1616495Sspeer /* 1626495Sspeer * nxge_dci_map 1636495Sspeer * 1646495Sspeer * Map a DMA channel index to a channel number. 1656495Sspeer * 1666495Sspeer * Arguments: 1676495Sspeer * instance The instance number of the driver. 1686495Sspeer * type The type of channel this is: Tx or Rx. 1696495Sspeer * index The index to convert to a channel number 1706495Sspeer * 1716495Sspeer * Notes: 1726495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 1736495Sspeer * 1746495Sspeer * Context: 1756495Sspeer * Any domain 1766495Sspeer */ 1776495Sspeer int 1786495Sspeer nxge_dci_map( 1796495Sspeer nxge_t *nxge, 1806495Sspeer vpc_type_t type, 1816495Sspeer int index) 1826495Sspeer { 1836495Sspeer nxge_grp_set_t *set; 1846495Sspeer int dc; 1856495Sspeer 1866495Sspeer switch (type) { 1876495Sspeer case VP_BOUND_TX: 1886495Sspeer set = &nxge->tx_set; 1896495Sspeer break; 1906495Sspeer case VP_BOUND_RX: 1916495Sspeer set = &nxge->rx_set; 1926495Sspeer break; 1936495Sspeer } 1946495Sspeer 1956495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 1966495Sspeer if ((1 << dc) & set->owned.map) { 1976495Sspeer if (index == 0) 1986495Sspeer return (dc); 1996495Sspeer else 2006495Sspeer index--; 2016495Sspeer } 2026495Sspeer } 2036495Sspeer 2046495Sspeer return (-1); 2056495Sspeer } 2066495Sspeer 2076495Sspeer /* 2086495Sspeer * --------------------------------------------------------------------- 2096495Sspeer * These are the general-purpose DMA channel group functions. That is, 2106495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2116495Sspeer * environment. 2126495Sspeer * 2136495Sspeer * But is also expected that in the future they will be able to manage 2146495Sspeer * Crossbow groups. 2156495Sspeer * --------------------------------------------------------------------- 2166495Sspeer */ 2176495Sspeer 2186495Sspeer /* 219*7766SMichael.Speer@Sun.COM * nxge_grp_cleanup(p_nxge_t nxge) 220*7766SMichael.Speer@Sun.COM * 221*7766SMichael.Speer@Sun.COM * Remove all outstanding groups. 222*7766SMichael.Speer@Sun.COM * 223*7766SMichael.Speer@Sun.COM * Arguments: 224*7766SMichael.Speer@Sun.COM * nxge 225*7766SMichael.Speer@Sun.COM */ 226*7766SMichael.Speer@Sun.COM void 227*7766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge) 228*7766SMichael.Speer@Sun.COM { 229*7766SMichael.Speer@Sun.COM nxge_grp_set_t *set; 230*7766SMichael.Speer@Sun.COM int i; 231*7766SMichael.Speer@Sun.COM 232*7766SMichael.Speer@Sun.COM MUTEX_ENTER(&nxge->group_lock); 233*7766SMichael.Speer@Sun.COM 234*7766SMichael.Speer@Sun.COM /* 235*7766SMichael.Speer@Sun.COM * Find RX groups that need to be cleaned up. 236*7766SMichael.Speer@Sun.COM */ 237*7766SMichael.Speer@Sun.COM set = &nxge->rx_set; 238*7766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 239*7766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 240*7766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 241*7766SMichael.Speer@Sun.COM set->group[i] = NULL; 242*7766SMichael.Speer@Sun.COM } 243*7766SMichael.Speer@Sun.COM } 244*7766SMichael.Speer@Sun.COM 245*7766SMichael.Speer@Sun.COM /* 246*7766SMichael.Speer@Sun.COM * Find TX groups that need to be cleaned up. 247*7766SMichael.Speer@Sun.COM */ 248*7766SMichael.Speer@Sun.COM set = &nxge->tx_set; 249*7766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 250*7766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 251*7766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 252*7766SMichael.Speer@Sun.COM set->group[i] = NULL; 253*7766SMichael.Speer@Sun.COM } 254*7766SMichael.Speer@Sun.COM } 255*7766SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 256*7766SMichael.Speer@Sun.COM } 257*7766SMichael.Speer@Sun.COM 258*7766SMichael.Speer@Sun.COM 259*7766SMichael.Speer@Sun.COM /* 2606495Sspeer * nxge_grp_add 2616495Sspeer * 2626495Sspeer * Add a group to an instance of NXGE. 2636495Sspeer * 2646495Sspeer * Arguments: 2656495Sspeer * nxge 2666495Sspeer * type Tx or Rx 2676495Sspeer * 2686495Sspeer * Notes: 2696495Sspeer * 2706495Sspeer * Context: 2716495Sspeer * Any domain 2726495Sspeer */ 2737755SMisaki.Kataoka@Sun.COM nxge_grp_t * 2746495Sspeer nxge_grp_add( 2756495Sspeer nxge_t *nxge, 2766495Sspeer nxge_grp_type_t type) 2776495Sspeer { 2786495Sspeer nxge_grp_set_t *set; 2796495Sspeer nxge_grp_t *group; 2806495Sspeer int i; 2816495Sspeer 2826495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 2836495Sspeer group->nxge = nxge; 2846495Sspeer 2856495Sspeer MUTEX_ENTER(&nxge->group_lock); 2866495Sspeer switch (type) { 2876495Sspeer case NXGE_TRANSMIT_GROUP: 2886495Sspeer case EXT_TRANSMIT_GROUP: 2896495Sspeer set = &nxge->tx_set; 2906495Sspeer break; 2916495Sspeer default: 2926495Sspeer set = &nxge->rx_set; 2936495Sspeer break; 2946495Sspeer } 2956495Sspeer 2966495Sspeer group->type = type; 2976495Sspeer group->active = B_TRUE; 2986495Sspeer group->sequence = set->sequence++; 2996495Sspeer 3006495Sspeer /* Find an empty slot for this logical group. */ 3016495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 3026495Sspeer if (set->group[i] == 0) { 3036495Sspeer group->index = i; 3046495Sspeer set->group[i] = group; 3056495Sspeer NXGE_DC_SET(set->lg.map, i); 3066495Sspeer set->lg.count++; 3076495Sspeer break; 3086495Sspeer } 3096495Sspeer } 3106495Sspeer MUTEX_EXIT(&nxge->group_lock); 3116495Sspeer 3126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3136495Sspeer "nxge_grp_add: %cgroup = %d.%d", 3146495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3156495Sspeer nxge->mac.portnum, group->sequence)); 3166495Sspeer 3177755SMisaki.Kataoka@Sun.COM return (group); 3186495Sspeer } 3196495Sspeer 3206495Sspeer void 3216495Sspeer nxge_grp_remove( 3226495Sspeer nxge_t *nxge, 3237755SMisaki.Kataoka@Sun.COM nxge_grp_t *group) /* The group to remove. */ 3246495Sspeer { 3256495Sspeer nxge_grp_set_t *set; 3266495Sspeer vpc_type_t type; 3276495Sspeer 3286495Sspeer MUTEX_ENTER(&nxge->group_lock); 3296495Sspeer switch (group->type) { 3306495Sspeer case NXGE_TRANSMIT_GROUP: 3316495Sspeer case EXT_TRANSMIT_GROUP: 3326495Sspeer set = &nxge->tx_set; 3336495Sspeer break; 3346495Sspeer default: 3356495Sspeer set = &nxge->rx_set; 3366495Sspeer break; 3376495Sspeer } 3386495Sspeer 3396495Sspeer if (set->group[group->index] != group) { 3406495Sspeer MUTEX_EXIT(&nxge->group_lock); 3416495Sspeer return; 3426495Sspeer } 3436495Sspeer 3446495Sspeer set->group[group->index] = 0; 3456495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3466495Sspeer set->lg.count--; 3476495Sspeer 3486495Sspeer /* While inside the mutex, deactivate <group>. */ 3496495Sspeer group->active = B_FALSE; 3506495Sspeer 3516495Sspeer MUTEX_EXIT(&nxge->group_lock); 3526495Sspeer 3536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3546495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3556495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3566495Sspeer nxge->mac.portnum, group->sequence)); 3576495Sspeer 3586495Sspeer /* Now, remove any DCs which are still active. */ 3596495Sspeer switch (group->type) { 3606495Sspeer default: 3616495Sspeer type = VP_BOUND_TX; 3626495Sspeer break; 3636495Sspeer case NXGE_RECEIVE_GROUP: 3646495Sspeer case EXT_RECEIVE_GROUP: 3656495Sspeer type = VP_BOUND_RX; 3666495Sspeer } 3676495Sspeer 3686495Sspeer while (group->dc) { 3696495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3706495Sspeer } 3716495Sspeer 3726495Sspeer KMEM_FREE(group, sizeof (*group)); 3736495Sspeer } 3746495Sspeer 3756495Sspeer /* 3766495Sspeer * nx_hio_dc_add 3776495Sspeer * 3786495Sspeer * Add a DMA channel to a VR/Group. 3796495Sspeer * 3806495Sspeer * Arguments: 3816495Sspeer * nxge 3826495Sspeer * channel The channel to add. 3836495Sspeer * Notes: 3846495Sspeer * 3856495Sspeer * Context: 3866495Sspeer * Any domain 3876495Sspeer */ 3886495Sspeer /* ARGSUSED */ 3896495Sspeer int 3906495Sspeer nxge_grp_dc_add( 3916495Sspeer nxge_t *nxge, 3927755SMisaki.Kataoka@Sun.COM nxge_grp_t *group, /* The group to add <channel> to. */ 3936495Sspeer vpc_type_t type, /* Rx or Tx */ 3946495Sspeer int channel) /* A physical/logical channel number */ 3956495Sspeer { 3966495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 3976495Sspeer nxge_hio_dc_t *dc; 3986495Sspeer nxge_grp_set_t *set; 3996602Sspeer nxge_status_t status = NXGE_OK; 4006495Sspeer 4016495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 4026495Sspeer 4037755SMisaki.Kataoka@Sun.COM if (group == NULL) 4046495Sspeer return (0); 4056495Sspeer 4066495Sspeer switch (type) { 4076495Sspeer default: 4086495Sspeer set = &nxge->tx_set; 4096495Sspeer if (channel > NXGE_MAX_TDCS) { 4106495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4116495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 4126495Sspeer return (NXGE_ERROR); 4136495Sspeer } 4146495Sspeer break; 4156495Sspeer case VP_BOUND_RX: 4166495Sspeer set = &nxge->rx_set; 4176495Sspeer if (channel > NXGE_MAX_RDCS) { 4186495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4196495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 4206495Sspeer return (NXGE_ERROR); 4216495Sspeer } 4226495Sspeer break; 4236495Sspeer } 4246495Sspeer 4256495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4266495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 4276495Sspeer type == VP_BOUND_TX ? 't' : 'r', 4286495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 4296495Sspeer 4306495Sspeer MUTEX_ENTER(&nxge->group_lock); 4316495Sspeer if (group->active != B_TRUE) { 4326495Sspeer /* We may be in the process of removing this group. */ 4336495Sspeer MUTEX_EXIT(&nxge->group_lock); 4346495Sspeer return (NXGE_ERROR); 4356495Sspeer } 4366495Sspeer MUTEX_EXIT(&nxge->group_lock); 4376495Sspeer 4386495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4406495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4416495Sspeer return (NXGE_ERROR); 4426495Sspeer } 4436495Sspeer 4446495Sspeer MUTEX_ENTER(&nhd->lock); 4456495Sspeer 4466495Sspeer if (dc->group) { 4476495Sspeer MUTEX_EXIT(&nhd->lock); 4486495Sspeer /* This channel is already in use! */ 4496495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4506495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4516495Sspeer return (NXGE_ERROR); 4526495Sspeer } 4536495Sspeer 4546495Sspeer dc->next = 0; 4556495Sspeer dc->page = channel; 4566495Sspeer dc->channel = (nxge_channel_t)channel; 4576495Sspeer 4586495Sspeer dc->type = type; 4596495Sspeer if (type == VP_BOUND_RX) { 4606495Sspeer dc->init = nxge_init_rxdma_channel; 4616495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4626495Sspeer } else { 4636495Sspeer dc->init = nxge_init_txdma_channel; 4646495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4656495Sspeer } 4666495Sspeer 4677755SMisaki.Kataoka@Sun.COM dc->group = group; 4686495Sspeer 4696495Sspeer if (isLDOMguest(nxge)) 4706495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 4716495Sspeer 4726495Sspeer NXGE_DC_SET(set->owned.map, channel); 4736495Sspeer set->owned.count++; 4746495Sspeer 4756495Sspeer MUTEX_EXIT(&nhd->lock); 4766495Sspeer 4776602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 4786602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4796602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 4806603Sspeer return (NXGE_ERROR); 4816602Sspeer } 4826602Sspeer 4836495Sspeer nxge_grp_dc_append(nxge, group, dc); 4846495Sspeer 4856495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 4866495Sspeer 4876602Sspeer return ((int)status); 4886495Sspeer } 4896495Sspeer 4906495Sspeer void 4916495Sspeer nxge_grp_dc_remove( 4926495Sspeer nxge_t *nxge, 4936495Sspeer vpc_type_t type, 4946495Sspeer int channel) 4956495Sspeer { 4966495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 4976495Sspeer nxge_hio_dc_t *dc; 4986495Sspeer nxge_grp_set_t *set; 4996495Sspeer nxge_grp_t *group; 5006495Sspeer 5016495Sspeer dc_uninit_t uninit; 5026495Sspeer 5036495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 5046495Sspeer 5056495Sspeer if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 5066495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5076495Sspeer "nx_hio_dc_remove: find(%d) failed", channel)); 5086495Sspeer return; 5096495Sspeer } 5106495Sspeer group = (nxge_grp_t *)dc->group; 5116495Sspeer 5126495Sspeer if (isLDOMguest(nxge)) { 5136495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 5146495Sspeer } 5156495Sspeer 5166495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 5176495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 5186495Sspeer nxge->mac.portnum, group->sequence, group->count, 5196495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 5206495Sspeer 5216495Sspeer MUTEX_ENTER(&nhd->lock); 5226495Sspeer 5236602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5246602Sspeer if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 5256602Sspeer NXGE_DC_RESET(group->map, channel); 5266602Sspeer } 5276602Sspeer 5286495Sspeer /* Remove the DC from its group. */ 5296495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 5306495Sspeer MUTEX_EXIT(&nhd->lock); 5316495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5326495Sspeer "nx_hio_dc_remove(%d) failed", channel)); 5336495Sspeer return; 5346495Sspeer } 5356495Sspeer 5366495Sspeer uninit = dc->uninit; 5376495Sspeer channel = dc->channel; 5386495Sspeer 5396495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5406495Sspeer set->owned.count--; 5416495Sspeer 5426495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5436495Sspeer 5446495Sspeer MUTEX_EXIT(&nhd->lock); 5456495Sspeer 5466495Sspeer (*uninit)(nxge, channel); 5476495Sspeer 5486495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5496495Sspeer } 5506495Sspeer 5516495Sspeer nxge_hio_dc_t * 5526495Sspeer nxge_grp_dc_find( 5536495Sspeer nxge_t *nxge, 5546495Sspeer vpc_type_t type, /* Rx or Tx */ 5556495Sspeer int channel) 5566495Sspeer { 5576495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5586495Sspeer nxge_hio_dc_t *current; 5596495Sspeer 5606495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 5616495Sspeer 5626495Sspeer if (!isLDOMguest(nxge)) { 5636495Sspeer return (¤t[channel]); 5646495Sspeer } else { 5656495Sspeer /* We're in a guest domain. */ 5666495Sspeer int i, limit = (type == VP_BOUND_TX) ? 5676495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 5686495Sspeer 5696495Sspeer MUTEX_ENTER(&nhd->lock); 5706495Sspeer for (i = 0; i < limit; i++, current++) { 5716495Sspeer if (current->channel == channel) { 5726495Sspeer if (current->vr && current->vr->nxge == 5736495Sspeer (uintptr_t)nxge) { 5746495Sspeer MUTEX_EXIT(&nhd->lock); 5756495Sspeer return (current); 5766495Sspeer } 5776495Sspeer } 5786495Sspeer } 5796495Sspeer MUTEX_EXIT(&nhd->lock); 5806495Sspeer } 5816495Sspeer 5826495Sspeer return (0); 5836495Sspeer } 5846495Sspeer 5856495Sspeer /* 5866495Sspeer * nxge_grp_dc_append 5876495Sspeer * 5886495Sspeer * Append a DMA channel to a group. 5896495Sspeer * 5906495Sspeer * Arguments: 5916495Sspeer * nxge 5926495Sspeer * group The group to append to 5936495Sspeer * dc The DMA channel to append 5946495Sspeer * 5956495Sspeer * Notes: 5966495Sspeer * 5976495Sspeer * Context: 5986495Sspeer * Any domain 5996495Sspeer */ 6006495Sspeer static 6016495Sspeer void 6026495Sspeer nxge_grp_dc_append( 6036495Sspeer nxge_t *nxge, 6046495Sspeer nxge_grp_t *group, 6056495Sspeer nxge_hio_dc_t *dc) 6066495Sspeer { 6076495Sspeer MUTEX_ENTER(&nxge->group_lock); 6086495Sspeer 6096495Sspeer if (group->dc == 0) { 6106495Sspeer group->dc = dc; 6116495Sspeer } else { 6126495Sspeer nxge_hio_dc_t *current = group->dc; 6136495Sspeer do { 6146495Sspeer if (current->next == 0) { 6156495Sspeer current->next = dc; 6166495Sspeer break; 6176495Sspeer } 6186495Sspeer current = current->next; 6196495Sspeer } while (current); 6206495Sspeer } 6216495Sspeer 6226495Sspeer NXGE_DC_SET(group->map, dc->channel); 6236495Sspeer 6246495Sspeer nxge_grp_dc_map(group); 6256602Sspeer group->count++; 6266495Sspeer 6276495Sspeer MUTEX_EXIT(&nxge->group_lock); 6286495Sspeer } 6296495Sspeer 6306495Sspeer /* 6316495Sspeer * nxge_grp_dc_unlink 6326495Sspeer * 6336495Sspeer * Unlink a DMA channel fromits linked list (group). 6346495Sspeer * 6356495Sspeer * Arguments: 6366495Sspeer * nxge 6376495Sspeer * group The group (linked list) to unlink from 6386495Sspeer * dc The DMA channel to append 6396495Sspeer * 6406495Sspeer * Notes: 6416495Sspeer * 6426495Sspeer * Context: 6436495Sspeer * Any domain 6446495Sspeer */ 6456495Sspeer nxge_hio_dc_t * 6466495Sspeer nxge_grp_dc_unlink( 6476495Sspeer nxge_t *nxge, 6486495Sspeer nxge_grp_t *group, 6496495Sspeer int channel) 6506495Sspeer { 6516495Sspeer nxge_hio_dc_t *current, *previous; 6526495Sspeer 6536495Sspeer MUTEX_ENTER(&nxge->group_lock); 6546495Sspeer 6556495Sspeer if ((current = group->dc) == 0) { 6566495Sspeer MUTEX_EXIT(&nxge->group_lock); 6576495Sspeer return (0); 6586495Sspeer } 6596495Sspeer 6606495Sspeer previous = 0; 6616495Sspeer do { 6626495Sspeer if (current->channel == channel) { 6636495Sspeer if (previous) 6646495Sspeer previous->next = current->next; 6656495Sspeer else 6666495Sspeer group->dc = current->next; 6676495Sspeer break; 6686495Sspeer } 6696495Sspeer previous = current; 6706495Sspeer current = current->next; 6716495Sspeer } while (current); 6726495Sspeer 6736495Sspeer if (current == 0) { 6746495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 6756495Sspeer "DC unlink: DC %d not found", channel)); 6766495Sspeer } else { 6776495Sspeer current->next = 0; 6786495Sspeer current->group = 0; 6796495Sspeer 6806495Sspeer group->count--; 6816495Sspeer } 6826495Sspeer 6836495Sspeer nxge_grp_dc_map(group); 6846495Sspeer 6856495Sspeer MUTEX_EXIT(&nxge->group_lock); 6866495Sspeer 6876495Sspeer return (current); 6886495Sspeer } 6896495Sspeer 6906495Sspeer /* 6916495Sspeer * nxge_grp_dc_map 6926495Sspeer * 6936495Sspeer * Map a linked list to an array of channel numbers. 6946495Sspeer * 6956495Sspeer * Arguments: 6966495Sspeer * nxge 6976495Sspeer * group The group to remap. 6986495Sspeer * 6996495Sspeer * Notes: 7006495Sspeer * It is expected that the caller will hold the correct mutex. 7016495Sspeer * 7026495Sspeer * Context: 7036495Sspeer * Service domain 7046495Sspeer */ 7056495Sspeer void 7066495Sspeer nxge_grp_dc_map( 7076495Sspeer nxge_grp_t *group) 7086495Sspeer { 7096495Sspeer nxge_channel_t *legend; 7106495Sspeer nxge_hio_dc_t *dc; 7116495Sspeer 7126495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 7136495Sspeer 7146495Sspeer legend = group->legend; 7156495Sspeer dc = group->dc; 7166495Sspeer while (dc) { 7176495Sspeer *legend = dc->channel; 7186495Sspeer legend++; 7196495Sspeer dc = dc->next; 7206495Sspeer } 7216495Sspeer } 7226495Sspeer 7236495Sspeer /* 7246495Sspeer * --------------------------------------------------------------------- 7256495Sspeer * These are HIO debugging functions. 7266495Sspeer * --------------------------------------------------------------------- 7276495Sspeer */ 7286495Sspeer 7296495Sspeer /* 7306495Sspeer * nxge_delay 7316495Sspeer * 7326495Sspeer * Delay <seconds> number of seconds. 7336495Sspeer * 7346495Sspeer * Arguments: 7356495Sspeer * nxge 7366495Sspeer * group The group to append to 7376495Sspeer * dc The DMA channel to append 7386495Sspeer * 7396495Sspeer * Notes: 7406495Sspeer * This is a developer-only function. 7416495Sspeer * 7426495Sspeer * Context: 7436495Sspeer * Any domain 7446495Sspeer */ 7456495Sspeer void 7466495Sspeer nxge_delay( 7476495Sspeer int seconds) 7486495Sspeer { 7496495Sspeer delay(drv_usectohz(seconds * 1000000)); 7506495Sspeer } 7516495Sspeer 7526495Sspeer static dmc_reg_name_t rx_names[] = { 7536495Sspeer { "RXDMA_CFIG1", 0 }, 7546495Sspeer { "RXDMA_CFIG2", 8 }, 7556495Sspeer { "RBR_CFIG_A", 0x10 }, 7566495Sspeer { "RBR_CFIG_B", 0x18 }, 7576495Sspeer { "RBR_KICK", 0x20 }, 7586495Sspeer { "RBR_STAT", 0x28 }, 7596495Sspeer { "RBR_HDH", 0x30 }, 7606495Sspeer { "RBR_HDL", 0x38 }, 7616495Sspeer { "RCRCFIG_A", 0x40 }, 7626495Sspeer { "RCRCFIG_B", 0x48 }, 7636495Sspeer { "RCRSTAT_A", 0x50 }, 7646495Sspeer { "RCRSTAT_B", 0x58 }, 7656495Sspeer { "RCRSTAT_C", 0x60 }, 7666495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 7676495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 7686495Sspeer { "RCR_FLSH", 0x78 }, 7696495Sspeer { "RXMISC", 0x90 }, 7706495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 7716495Sspeer { 0, -1 } 7726495Sspeer }; 7736495Sspeer 7746495Sspeer static dmc_reg_name_t tx_names[] = { 7756495Sspeer { "Tx_RNG_CFIG", 0 }, 7766495Sspeer { "Tx_RNG_HDL", 0x10 }, 7776495Sspeer { "Tx_RNG_KICK", 0x18 }, 7786495Sspeer { "Tx_ENT_MASK", 0x20 }, 7796495Sspeer { "Tx_CS", 0x28 }, 7806495Sspeer { "TxDMA_MBH", 0x30 }, 7816495Sspeer { "TxDMA_MBL", 0x38 }, 7826495Sspeer { "TxDMA_PRE_ST", 0x40 }, 7836495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 7846495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 7856495Sspeer { "TDMC_INTR_DBG", 0x60 }, 7866495Sspeer { "Tx_CS_DBG", 0x68 }, 7876495Sspeer { 0, -1 } 7886495Sspeer }; 7896495Sspeer 7906495Sspeer /* 7916495Sspeer * nxge_xx2str 7926495Sspeer * 7936495Sspeer * Translate a register address into a string. 7946495Sspeer * 7956495Sspeer * Arguments: 7966495Sspeer * offset The address of the register to translate. 7976495Sspeer * 7986495Sspeer * Notes: 7996495Sspeer * These are developer-only function. 8006495Sspeer * 8016495Sspeer * Context: 8026495Sspeer * Any domain 8036495Sspeer */ 8046495Sspeer const char * 8056495Sspeer nxge_rx2str( 8066495Sspeer int offset) 8076495Sspeer { 8086495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 8096495Sspeer 8106495Sspeer offset &= DMA_CSR_MASK; 8116495Sspeer 8126495Sspeer while (reg->name) { 8136495Sspeer if (offset == reg->offset) 8146495Sspeer return (reg->name); 8156495Sspeer reg++; 8166495Sspeer } 8176495Sspeer 8186495Sspeer return (0); 8196495Sspeer } 8206495Sspeer 8216495Sspeer const char * 8226495Sspeer nxge_tx2str( 8236495Sspeer int offset) 8246495Sspeer { 8256495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 8266495Sspeer 8276495Sspeer offset &= DMA_CSR_MASK; 8286495Sspeer 8296495Sspeer while (reg->name) { 8306495Sspeer if (offset == reg->offset) 8316495Sspeer return (reg->name); 8326495Sspeer reg++; 8336495Sspeer } 8346495Sspeer 8356495Sspeer return (0); 8366495Sspeer } 8376495Sspeer 8386495Sspeer /* 8396495Sspeer * nxge_ddi_perror 8406495Sspeer * 8416495Sspeer * Map a DDI error number to a string. 8426495Sspeer * 8436495Sspeer * Arguments: 8446495Sspeer * ddi_error The DDI error number to map. 8456495Sspeer * 8466495Sspeer * Notes: 8476495Sspeer * 8486495Sspeer * Context: 8496495Sspeer * Any domain 8506495Sspeer */ 8516495Sspeer const char * 8526495Sspeer nxge_ddi_perror( 8536495Sspeer int ddi_error) 8546495Sspeer { 8556495Sspeer switch (ddi_error) { 8566495Sspeer case DDI_SUCCESS: 8576495Sspeer return ("DDI_SUCCESS"); 8586495Sspeer case DDI_FAILURE: 8596495Sspeer return ("DDI_FAILURE"); 8606495Sspeer case DDI_NOT_WELL_FORMED: 8616495Sspeer return ("DDI_NOT_WELL_FORMED"); 8626495Sspeer case DDI_EAGAIN: 8636495Sspeer return ("DDI_EAGAIN"); 8646495Sspeer case DDI_EINVAL: 8656495Sspeer return ("DDI_EINVAL"); 8666495Sspeer case DDI_ENOTSUP: 8676495Sspeer return ("DDI_ENOTSUP"); 8686495Sspeer case DDI_EPENDING: 8696495Sspeer return ("DDI_EPENDING"); 8706495Sspeer case DDI_ENOMEM: 8716495Sspeer return ("DDI_ENOMEM"); 8726495Sspeer case DDI_EBUSY: 8736495Sspeer return ("DDI_EBUSY"); 8746495Sspeer case DDI_ETRANSPORT: 8756495Sspeer return ("DDI_ETRANSPORT"); 8766495Sspeer case DDI_ECONTEXT: 8776495Sspeer return ("DDI_ECONTEXT"); 8786495Sspeer default: 8796495Sspeer return ("Unknown error"); 8806495Sspeer } 8816495Sspeer } 8826495Sspeer 8836495Sspeer /* 8846495Sspeer * --------------------------------------------------------------------- 8856495Sspeer * These are Sun4v HIO function definitions 8866495Sspeer * --------------------------------------------------------------------- 8876495Sspeer */ 8886495Sspeer 8896495Sspeer #if defined(sun4v) 8906495Sspeer 8916495Sspeer /* 8926495Sspeer * Local prototypes 8936495Sspeer */ 8947755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 8956495Sspeer 8966495Sspeer static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 8977755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *); 8986495Sspeer 8997755SMisaki.Kataoka@Sun.COM static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 9007755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 9016495Sspeer 9026495Sspeer static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 9036495Sspeer static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 9046495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 9056495Sspeer mac_ring_type_t, int); 9066495Sspeer 9076495Sspeer /* 9086495Sspeer * nxge_hio_init 9096495Sspeer * 9106495Sspeer * Initialize the HIO module of the NXGE driver. 9116495Sspeer * 9126495Sspeer * Arguments: 9136495Sspeer * nxge 9146495Sspeer * 9156495Sspeer * Notes: 9166495Sspeer * 9176495Sspeer * Context: 9186495Sspeer * Any domain 9196495Sspeer */ 9206495Sspeer int 9216495Sspeer nxge_hio_init( 9226495Sspeer nxge_t *nxge) 9236495Sspeer { 9246495Sspeer nxge_hio_data_t *nhd; 9256495Sspeer int i, region; 9266495Sspeer 9276495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 9286495Sspeer if (nhd == 0) { 9296495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 9306495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9316495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9326495Sspeer } 9336495Sspeer 9346713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 9356713Sspeer (nxge->niu_type == N2_NIU)) { 9366495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9376495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9386495Sspeer if (niu_hsvc->hsvc_major == 1 && 9396495Sspeer niu_hsvc->hsvc_minor == 1) 9406495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9416495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9426495Sspeer "nxge_hio_init: hypervisor services " 9436495Sspeer "version %d.%d", 9446495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9456495Sspeer } 9466495Sspeer } 9476495Sspeer 9486495Sspeer if (!isLDOMs(nxge)) { 9496495Sspeer nhd->hio.ldoms = B_FALSE; 9506495Sspeer return (NXGE_OK); 9516495Sspeer } 9526495Sspeer 9536495Sspeer nhd->hio.ldoms = B_TRUE; 9546495Sspeer 9556495Sspeer /* 9566495Sspeer * Fill in what we can. 9576495Sspeer */ 9586495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 9596495Sspeer nhd->vr[region].region = region; 9606495Sspeer } 9617755SMisaki.Kataoka@Sun.COM nhd->vrs = NXGE_VR_SR_MAX - 2; 9626495Sspeer 9636495Sspeer /* 9646495Sspeer * Initialize share and ring group structures. 9656495Sspeer */ 9666495Sspeer for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 9676495Sspeer nxge->rx_hio_groups[i].ghandle = NULL; 9686495Sspeer nxge->rx_hio_groups[i].nxgep = nxge; 9696495Sspeer nxge->rx_hio_groups[i].gindex = 0; 9706495Sspeer nxge->rx_hio_groups[i].sindex = 0; 9716495Sspeer } 9726495Sspeer 9736495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 9746495Sspeer nxge->shares[i].nxgep = nxge; 9756495Sspeer nxge->shares[i].index = 0; 9766495Sspeer nxge->shares[i].vrp = (void *)NULL; 9776495Sspeer nxge->shares[i].tmap = 0; 9786495Sspeer nxge->shares[i].rmap = 0; 9796495Sspeer nxge->shares[i].rxgroup = 0; 9806495Sspeer nxge->shares[i].active = B_FALSE; 9816495Sspeer } 9826495Sspeer 9836495Sspeer /* Fill in the HV HIO function pointers. */ 9846495Sspeer nxge_hio_hv_init(nxge); 9856495Sspeer 9866495Sspeer if (isLDOMservice(nxge)) { 9876495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9886495Sspeer "Hybrid IO-capable service domain")); 9896495Sspeer return (NXGE_OK); 9906495Sspeer } else { 9916495Sspeer /* 9926495Sspeer * isLDOMguest(nxge) == B_TRUE 9936495Sspeer */ 9946495Sspeer nx_vio_fp_t *vio; 9956495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 9966495Sspeer 9976495Sspeer vio = &nhd->hio.vio; 9986495Sspeer vio->__register = (vio_net_resource_reg_t) 9996495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 10006495Sspeer vio->unregister = (vio_net_resource_unreg_t) 10016495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 10026495Sspeer 10036495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 10046495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 10056495Sspeer return (NXGE_ERROR); 10066495Sspeer } 10076495Sspeer } 10086495Sspeer 10096495Sspeer return (0); 10106495Sspeer } 10116495Sspeer 10126495Sspeer static int 10136495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 10146495Sspeer { 10156495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10166495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10176495Sspeer int group = rxgroup->gindex; 10186495Sspeer int rv, sindex; 10196495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10206495Sspeer 10216495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10226495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10236495Sspeer 10246495Sspeer /* 10256495Sspeer * Program the mac address for the group/share. 10266495Sspeer */ 10276495Sspeer if ((rv = nxge_hio_hostinfo_init(nxge, vr, 10286495Sspeer (ether_addr_t *)mac_addr)) != 0) { 10296495Sspeer return (rv); 10306495Sspeer } 10316495Sspeer 10326495Sspeer return (0); 10336495Sspeer } 10346495Sspeer 10356495Sspeer /* ARGSUSED */ 10366495Sspeer static int 10376495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 10386495Sspeer { 10396495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10406495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10416495Sspeer int group = rxgroup->gindex; 10426495Sspeer int sindex; 10436495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10446495Sspeer 10456495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10466495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10476495Sspeer 10486495Sspeer /* 10496495Sspeer * Remove the mac address for the group/share. 10506495Sspeer */ 10516495Sspeer nxge_hio_hostinfo_uninit(nxge, vr); 10526495Sspeer 10536495Sspeer return (0); 10546495Sspeer } 10556495Sspeer 10566495Sspeer /* ARGSUSED */ 10576495Sspeer void 10586495Sspeer nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 10596495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 10606495Sspeer { 10616495Sspeer p_nxge_t nxgep = (p_nxge_t)arg; 10626495Sspeer nxge_rx_ring_group_t *rxgroup; 10636495Sspeer 10646495Sspeer switch (type) { 10656495Sspeer case MAC_RING_TYPE_RX: 10666495Sspeer rxgroup = &nxgep->rx_hio_groups[group]; 10676495Sspeer rxgroup->gindex = group; 10686495Sspeer 10696495Sspeer infop->mrg_driver = (mac_group_driver_t)rxgroup; 10706495Sspeer infop->mrg_start = NULL; 10716495Sspeer infop->mrg_stop = NULL; 10726495Sspeer infop->mrg_addmac = nxge_hio_add_mac; 10736495Sspeer infop->mrg_remmac = nxge_hio_rem_mac; 10746495Sspeer infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 10756495Sspeer break; 10766495Sspeer 10776495Sspeer case MAC_RING_TYPE_TX: 10786495Sspeer break; 10796495Sspeer } 10806495Sspeer } 10816495Sspeer 10826495Sspeer int 10836495Sspeer nxge_hio_share_assign( 10846495Sspeer nxge_t *nxge, 10856495Sspeer uint64_t cookie, 10866495Sspeer res_map_t *tmap, 10876495Sspeer res_map_t *rmap, 10886495Sspeer nxge_hio_vr_t *vr) 10896495Sspeer { 10906495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 10916495Sspeer uint64_t slot, hv_rv; 10926495Sspeer nxge_hio_dc_t *dc; 10936495Sspeer nxhv_vr_fp_t *fp; 10946495Sspeer int i; 10956495Sspeer 10966495Sspeer /* 10976495Sspeer * Ask the Hypervisor to set up the VR for us 10986495Sspeer */ 10996495Sspeer fp = &nhd->hio.vr; 11006495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 11016495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 11026713Sspeer "nx_hio_share_assign: " 11036713Sspeer "vr->assign() returned %d", hv_rv)); 11047755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 11056495Sspeer return (-EIO); 11066495Sspeer } 11076495Sspeer 11086495Sspeer /* 11096495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 11106495Sspeer * ----------------------------------------------------- 11116495Sspeer */ 11126495Sspeer dc = vr->tx_group.dc; 11136495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 11146495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11156495Sspeer while (dc) { 11166495Sspeer hv_rv = (*tx->assign) 11176495Sspeer (vr->cookie, dc->channel, &slot); 11186495Sspeer if (hv_rv != 0) { 11196495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11206495Sspeer "nx_hio_share_assign: " 11216495Sspeer "tx->assign(%x, %d) failed: %ld", 11226495Sspeer vr->cookie, dc->channel, hv_rv)); 11236495Sspeer return (-EIO); 11246495Sspeer } 11256495Sspeer 11266495Sspeer dc->cookie = vr->cookie; 11276495Sspeer dc->page = (vp_channel_t)slot; 11286495Sspeer 11296495Sspeer /* Inform the caller about the slot chosen. */ 11306495Sspeer (*tmap) |= 1 << slot; 11316495Sspeer 11326495Sspeer dc = dc->next; 11336495Sspeer } 11346495Sspeer } 11356495Sspeer 11366495Sspeer /* 11376495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 11386495Sspeer * ----------------------------------------------------- 11396495Sspeer */ 11406495Sspeer dc = vr->rx_group.dc; 11416495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 11426495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11436495Sspeer while (dc) { 11446495Sspeer hv_rv = (*rx->assign) 11456495Sspeer (vr->cookie, dc->channel, &slot); 11466495Sspeer if (hv_rv != 0) { 11476495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11486495Sspeer "nx_hio_share_assign: " 11496495Sspeer "rx->assign(%x, %d) failed: %ld", 11506495Sspeer vr->cookie, dc->channel, hv_rv)); 11516495Sspeer return (-EIO); 11526495Sspeer } 11536495Sspeer 11546495Sspeer dc->cookie = vr->cookie; 11556495Sspeer dc->page = (vp_channel_t)slot; 11566495Sspeer 11576495Sspeer /* Inform the caller about the slot chosen. */ 11586495Sspeer (*rmap) |= 1 << slot; 11596495Sspeer 11606495Sspeer dc = dc->next; 11616495Sspeer } 11626495Sspeer } 11636495Sspeer 11646495Sspeer return (0); 11656495Sspeer } 11666495Sspeer 11676495Sspeer int 11686495Sspeer nxge_hio_share_unassign( 11696495Sspeer nxge_hio_vr_t *vr) 11706495Sspeer { 11716495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 11726495Sspeer nxge_hio_data_t *nhd; 11736495Sspeer nxge_hio_dc_t *dc; 11746495Sspeer nxhv_vr_fp_t *fp; 11756495Sspeer uint64_t hv_rv; 11766495Sspeer 11776495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11786495Sspeer 11796495Sspeer dc = vr->tx_group.dc; 11806495Sspeer while (dc) { 11816495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11826495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 11836495Sspeer if (hv_rv != 0) { 11846495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11856495Sspeer "nx_hio_dc_unshare: " 11866495Sspeer "tx->unassign(%x, %d) failed: %ld", 11876495Sspeer vr->cookie, dc->page, hv_rv)); 11886495Sspeer } 11896495Sspeer dc = dc->next; 11906495Sspeer } 11916495Sspeer 11926495Sspeer dc = vr->rx_group.dc; 11936495Sspeer while (dc) { 11946495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11956495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 11966495Sspeer if (hv_rv != 0) { 11976495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11986495Sspeer "nx_hio_dc_unshare: " 11996495Sspeer "rx->unassign(%x, %d) failed: %ld", 12006495Sspeer vr->cookie, dc->page, hv_rv)); 12016495Sspeer } 12026495Sspeer dc = dc->next; 12036495Sspeer } 12046495Sspeer 12056495Sspeer fp = &nhd->hio.vr; 12066495Sspeer if (fp->unassign) { 12076495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 12086495Sspeer if (hv_rv != 0) { 12096495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 12106495Sspeer "vr->assign(%x) failed: %ld", 12116495Sspeer vr->cookie, hv_rv)); 12126495Sspeer } 12136495Sspeer } 12146495Sspeer 12156495Sspeer return (0); 12166495Sspeer } 12176495Sspeer 12186495Sspeer int 12196495Sspeer nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 12206495Sspeer mac_share_handle_t *shandle) 12216495Sspeer { 12226495Sspeer p_nxge_t nxge = (p_nxge_t)arg; 12236495Sspeer nxge_rx_ring_group_t *rxgroup; 12246495Sspeer nxge_share_handle_t *shp; 12256495Sspeer 12266495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 12276495Sspeer uint64_t rmap, tmap; 12286495Sspeer int rv; 12296495Sspeer 12306495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 12316495Sspeer 12326495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 12336495Sspeer 12346495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 12356495Sspeer nhd->hio.rx.assign == 0) { 12366495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 12376495Sspeer return (EIO); 12386495Sspeer } 12396495Sspeer 12406495Sspeer /* 12416495Sspeer * Get a VR. 12426495Sspeer */ 12437755SMisaki.Kataoka@Sun.COM if ((vr = nxge_hio_vr_share(nxge)) == 0) 12446495Sspeer return (EAGAIN); 12456495Sspeer 12466495Sspeer /* 12476495Sspeer * Get an RDC group for us to use. 12486495Sspeer */ 12496495Sspeer if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 12507755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12516495Sspeer return (EBUSY); 12526495Sspeer } 12536495Sspeer 12546495Sspeer /* 12556495Sspeer * Add resources to the share. 12566495Sspeer */ 12576495Sspeer tmap = 0; 12587755SMisaki.Kataoka@Sun.COM rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 12596495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12606495Sspeer if (rv != 0) { 12617755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12626495Sspeer return (rv); 12636495Sspeer } 12646495Sspeer 12656495Sspeer rmap = 0; 12667755SMisaki.Kataoka@Sun.COM rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 12676495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12686495Sspeer if (rv != 0) { 12697755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 12707755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12716495Sspeer return (rv); 12726495Sspeer } 12736495Sspeer 12746495Sspeer if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 12757755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 12767755SMisaki.Kataoka@Sun.COM nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 12777755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(vr); 12786495Sspeer return (rv); 12796495Sspeer } 12806495Sspeer 12816495Sspeer rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 12826495Sspeer rxgroup->gindex = vr->rdc_tbl; 12836495Sspeer rxgroup->sindex = vr->region; 12846495Sspeer 12856495Sspeer shp = &nxge->shares[vr->region]; 12866495Sspeer shp->index = vr->region; 12876495Sspeer shp->vrp = (void *)vr; 12886495Sspeer shp->tmap = tmap; 12896495Sspeer shp->rmap = rmap; 12906495Sspeer shp->rxgroup = vr->rdc_tbl; 12916495Sspeer shp->active = B_TRUE; 12926495Sspeer 12936495Sspeer /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 12946495Sspeer *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 12956495Sspeer 12966495Sspeer *shandle = (mac_share_handle_t)shp; 12976495Sspeer 12986495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 12996495Sspeer return (0); 13006495Sspeer } 13016495Sspeer 13026495Sspeer void 13036495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 13046495Sspeer { 13056495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13066495Sspeer 13076495Sspeer /* 13086495Sspeer * First, unassign the VR (take it back), 13096495Sspeer * so we can enable interrupts again. 13106495Sspeer */ 13116498Sspeer (void) nxge_hio_share_unassign(shp->vrp); 13126495Sspeer 13136495Sspeer /* 13146495Sspeer * Free Ring Resources for TX and RX 13156495Sspeer */ 13167755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 13177755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 13186495Sspeer 13196495Sspeer /* 13206495Sspeer * Free VR resource. 13216495Sspeer */ 13227755SMisaki.Kataoka@Sun.COM nxge_hio_unshare(shp->vrp); 13236495Sspeer 13246495Sspeer /* 13256495Sspeer * Clear internal handle state. 13266495Sspeer */ 13276495Sspeer shp->index = 0; 13286495Sspeer shp->vrp = (void *)NULL; 13296495Sspeer shp->tmap = 0; 13306495Sspeer shp->rmap = 0; 13316495Sspeer shp->rxgroup = 0; 13326495Sspeer shp->active = B_FALSE; 13336495Sspeer } 13346495Sspeer 13356495Sspeer void 13366495Sspeer nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 13376495Sspeer uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 13386495Sspeer { 13396495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13406495Sspeer 13416495Sspeer switch (type) { 13426495Sspeer case MAC_RING_TYPE_RX: 13436495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13446495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13456495Sspeer *rmap = shp->rmap; 13466495Sspeer *gnum = shp->rxgroup; 13476495Sspeer break; 13486495Sspeer 13496495Sspeer case MAC_RING_TYPE_TX: 13506495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13516495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13526495Sspeer *rmap = shp->tmap; 13536495Sspeer *gnum = 0; 13546495Sspeer break; 13556495Sspeer } 13566495Sspeer } 13576495Sspeer 13586495Sspeer /* 13596495Sspeer * nxge_hio_vr_share 13606495Sspeer * 13616495Sspeer * Find an unused Virtualization Region (VR). 13626495Sspeer * 13636495Sspeer * Arguments: 13646495Sspeer * nxge 13656495Sspeer * 13666495Sspeer * Notes: 13676495Sspeer * 13686495Sspeer * Context: 13696495Sspeer * Service domain 13706495Sspeer */ 13717755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t * 13726495Sspeer nxge_hio_vr_share( 13736495Sspeer nxge_t *nxge) 13746495Sspeer { 13756495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13766495Sspeer nxge_hio_vr_t *vr; 13776495Sspeer 13786495Sspeer int first, limit, region; 13796495Sspeer 13806495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 13816495Sspeer 13826495Sspeer MUTEX_ENTER(&nhd->lock); 13836495Sspeer 13847755SMisaki.Kataoka@Sun.COM if (nhd->vrs == 0) { 13856495Sspeer MUTEX_EXIT(&nhd->lock); 13866495Sspeer return (0); 13876495Sspeer } 13886495Sspeer 13896495Sspeer /* Find an empty virtual region (VR). */ 13906495Sspeer if (nxge->function_num == 0) { 13916495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 13926495Sspeer first = FUNC0_VIR1; 13936495Sspeer limit = FUNC2_VIR0; 13946495Sspeer } else if (nxge->function_num == 1) { 13956495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 13966495Sspeer first = FUNC2_VIR1; 13976495Sspeer limit = FUNC_VIR_MAX; 13986495Sspeer } else { 13996495Sspeer cmn_err(CE_WARN, 14006495Sspeer "Shares not supported on function(%d) at this time.\n", 14016495Sspeer nxge->function_num); 14026495Sspeer } 14036495Sspeer 14046495Sspeer for (region = first; region < limit; region++) { 14056495Sspeer if (nhd->vr[region].nxge == 0) 14066495Sspeer break; 14076495Sspeer } 14086495Sspeer 14096495Sspeer if (region == limit) { 14106495Sspeer MUTEX_EXIT(&nhd->lock); 14116495Sspeer return (0); 14126495Sspeer } 14136495Sspeer 14146495Sspeer vr = &nhd->vr[region]; 14156495Sspeer vr->nxge = (uintptr_t)nxge; 14166495Sspeer vr->region = (uintptr_t)region; 14176495Sspeer 14187755SMisaki.Kataoka@Sun.COM nhd->vrs--; 14196495Sspeer 14206495Sspeer MUTEX_EXIT(&nhd->lock); 14216495Sspeer 14226495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 14236495Sspeer 14247755SMisaki.Kataoka@Sun.COM return (vr); 14256495Sspeer } 14266495Sspeer 14276495Sspeer void 14286495Sspeer nxge_hio_unshare( 14297755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr) 14306495Sspeer { 14316495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14326495Sspeer nxge_hio_data_t *nhd; 14336495Sspeer 14346495Sspeer vr_region_t region; 14356495Sspeer 14366495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 14376495Sspeer 14386495Sspeer if (!nxge) { 14396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 14406495Sspeer "vr->nxge is NULL")); 14416495Sspeer return; 14426495Sspeer } 14436495Sspeer 14446495Sspeer /* 14456495Sspeer * This function is no longer called, but I will keep it 14466495Sspeer * here in case we want to revisit this topic in the future. 14476495Sspeer * 14486495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 14496495Sspeer */ 14506495Sspeer (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 14516495Sspeer 14526495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14536495Sspeer 14546495Sspeer MUTEX_ENTER(&nhd->lock); 14556495Sspeer 14566495Sspeer region = vr->region; 14576495Sspeer (void) memset(vr, 0, sizeof (*vr)); 14586495Sspeer vr->region = region; 14596495Sspeer 14607755SMisaki.Kataoka@Sun.COM nhd->vrs++; 14616495Sspeer 14626495Sspeer MUTEX_EXIT(&nhd->lock); 14636495Sspeer 14646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 14656495Sspeer } 14666495Sspeer 14676495Sspeer int 14686495Sspeer nxge_hio_addres( 14697755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 14706495Sspeer mac_ring_type_t type, 14716495Sspeer int count) 14726495Sspeer { 14736495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14746495Sspeer int i; 14756495Sspeer 14766495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 14776495Sspeer 14786495Sspeer if (!nxge) 14796495Sspeer return (EINVAL); 14806495Sspeer 14816495Sspeer for (i = 0; i < count; i++) { 14826495Sspeer int rv; 14836495Sspeer if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 14846495Sspeer if (i == 0) /* Couldn't get even one DC. */ 14856495Sspeer return (-rv); 14866495Sspeer else 14876495Sspeer break; 14886495Sspeer } 14896495Sspeer } 14906495Sspeer 14916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 14926495Sspeer 14936495Sspeer return (0); 14946495Sspeer } 14956495Sspeer 14966495Sspeer /* ARGSUSED */ 14976495Sspeer void 14986495Sspeer nxge_hio_remres( 14997755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 15006495Sspeer mac_ring_type_t type, 15016495Sspeer res_map_t res_map) 15026495Sspeer { 15036495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 15046495Sspeer nxge_grp_t *group; 15056495Sspeer 15066495Sspeer if (!nxge) { 15076495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 15086495Sspeer "vr->nxge is NULL")); 15096495Sspeer return; 15106495Sspeer } 15116495Sspeer 15126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 15136495Sspeer 15146495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 15156495Sspeer while (group->dc) { 15166495Sspeer nxge_hio_dc_t *dc = group->dc; 15176495Sspeer NXGE_DC_RESET(res_map, dc->page); 15186495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 15196495Sspeer } 15206495Sspeer 15216495Sspeer if (res_map) { 15226495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 15236495Sspeer "res_map %lx", res_map)); 15246495Sspeer } 15256495Sspeer 15266495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 15276495Sspeer } 15286495Sspeer 15296495Sspeer /* 15306495Sspeer * nxge_hio_tdc_share 15316495Sspeer * 15326495Sspeer * Share an unused TDC channel. 15336495Sspeer * 15346495Sspeer * Arguments: 15356495Sspeer * nxge 15366495Sspeer * 15376495Sspeer * Notes: 15386495Sspeer * 15396495Sspeer * A.7.3 Reconfigure Tx DMA channel 15406495Sspeer * Disable TxDMA A.9.6.10 15416495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 15426495Sspeer * 15436495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 15446495Sspeer * 15456495Sspeer * Soft Reset TxDMA A.9.6.2 15466495Sspeer * 15476495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 15486495Sspeer * guest domain: 15496495Sspeer * 15506495Sspeer * Re-initialize TxDMA A.9.6.8 15516495Sspeer * Reconfigure TxDMA 15526495Sspeer * Enable TxDMA A.9.6.9 15536495Sspeer * 15546495Sspeer * Context: 15556495Sspeer * Service domain 15566495Sspeer */ 15576495Sspeer int 15586495Sspeer nxge_hio_tdc_share( 15596495Sspeer nxge_t *nxge, 15606495Sspeer int channel) 15616495Sspeer { 15626495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 15636495Sspeer tx_ring_t *ring; 15646713Sspeer int count; 15656495Sspeer 15666495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 15676495Sspeer 15686495Sspeer /* 15696495Sspeer * Wait until this channel is idle. 15706495Sspeer */ 15716495Sspeer ring = nxge->tx_rings->rings[channel]; 15726713Sspeer 15736713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 15746886Sspeer if (ring->tx_ring_busy) { 15756886Sspeer /* 15766886Sspeer * Wait for 30 seconds. 15776886Sspeer */ 15786886Sspeer for (count = 30 * 1000; count; count--) { 15796886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 15806886Sspeer break; 15816886Sspeer } 15826886Sspeer 15836886Sspeer drv_usecwait(1000); 15846495Sspeer } 15856713Sspeer 15866886Sspeer if (count == 0) { 15876886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 15886886Sspeer NXGE_TX_RING_ONLINE); 15896886Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 15906886Sspeer "Tx ring %d was always BUSY", channel)); 15916886Sspeer return (-EIO); 15926886Sspeer } 15936886Sspeer } else { 15946713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 15956886Sspeer NXGE_TX_RING_OFFLINED); 15966495Sspeer } 15976495Sspeer 15986495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 15996495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 16006495Sspeer "Failed to remove interrupt for TxDMA channel %d", 16016495Sspeer channel)); 16026495Sspeer return (NXGE_ERROR); 16036495Sspeer } 16046495Sspeer 16056495Sspeer /* Disable TxDMA A.9.6.10 */ 16066495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 16076495Sspeer 16086495Sspeer /* The SD is sharing this channel. */ 16096495Sspeer NXGE_DC_SET(set->shared.map, channel); 16106495Sspeer set->shared.count++; 16116495Sspeer 16126602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 16136602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 16146602Sspeer 16156495Sspeer /* 16166495Sspeer * Initialize the DC-specific FZC control registers. 16176495Sspeer * ----------------------------------------------------- 16186495Sspeer */ 16196495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 16206495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 16216495Sspeer "nx_hio_dc_share: FZC TDC failed: %d", channel)); 16226495Sspeer return (-EIO); 16236495Sspeer } 16246495Sspeer 16256495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 16266495Sspeer 16276495Sspeer return (0); 16286495Sspeer } 16296495Sspeer 16306495Sspeer /* 16316495Sspeer * nxge_hio_rdc_share 16326495Sspeer * 16336495Sspeer * Share an unused RDC channel. 16346495Sspeer * 16356495Sspeer * Arguments: 16366495Sspeer * nxge 16376495Sspeer * 16386495Sspeer * Notes: 16396495Sspeer * 16406495Sspeer * This is the latest version of the procedure to 16416495Sspeer * Reconfigure an Rx DMA channel: 16426495Sspeer * 16436495Sspeer * A.6.3 Reconfigure Rx DMA channel 16446495Sspeer * Stop RxMAC A.9.2.6 16456495Sspeer * Drain IPP Port A.9.3.6 16466495Sspeer * Stop and reset RxDMA A.9.5.3 16476495Sspeer * 16486495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 16496495Sspeer * guest domain: 16506495Sspeer * 16516495Sspeer * Initialize RxDMA A.9.5.4 16526495Sspeer * Reconfigure RxDMA 16536495Sspeer * Enable RxDMA A.9.5.5 16546495Sspeer * 16556495Sspeer * We will do this here, since the RDC is a canalis non grata: 16566495Sspeer * Enable RxMAC A.9.2.10 16576495Sspeer * 16586495Sspeer * Context: 16596495Sspeer * Service domain 16606495Sspeer */ 16616495Sspeer int 16626495Sspeer nxge_hio_rdc_share( 16636495Sspeer nxge_t *nxge, 16646495Sspeer nxge_hio_vr_t *vr, 16656495Sspeer int channel) 16666495Sspeer { 16676495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 16686495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 16696495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 16706495Sspeer nxge_rdc_grp_t *rdc_grp; 16716495Sspeer 16726495Sspeer int current, last; 16736495Sspeer 16746495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 16756495Sspeer 16766495Sspeer /* Disable interrupts. */ 16776495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 16786495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 16796495Sspeer "Failed to remove interrupt for RxDMA channel %d", 16806495Sspeer channel)); 16816495Sspeer return (NXGE_ERROR); 16826495Sspeer } 16836495Sspeer 16846495Sspeer /* Stop RxMAC = A.9.2.6 */ 16856495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 16866495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 16876495Sspeer "Failed to disable RxMAC")); 16886495Sspeer } 16896495Sspeer 16906495Sspeer /* Drain IPP Port = A.9.3.6 */ 16916495Sspeer (void) nxge_ipp_drain(nxge); 16926495Sspeer 16936495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 16946495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 16956495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 16966495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 16976495Sspeer "Failed to disable RxDMA channel %d", channel)); 16986495Sspeer } 16996495Sspeer 17006495Sspeer /* The SD is sharing this channel. */ 17016495Sspeer NXGE_DC_SET(set->shared.map, channel); 17026495Sspeer set->shared.count++; 17036495Sspeer 17046602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 17056602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 17066602Sspeer 17076495Sspeer /* 17086495Sspeer * We have to reconfigure the RDC table(s) 17096495Sspeer * to which this channel belongs. 17106495Sspeer */ 17116495Sspeer current = hardware->def_mac_rxdma_grpid; 17126495Sspeer last = current + hardware->max_rdc_grpids; 17136495Sspeer for (; current < last; current++) { 17146495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 17156495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[current]; 17166495Sspeer rdc_grp->map = set->owned.map; 17176495Sspeer rdc_grp->max_rdcs--; 17186495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 17196495Sspeer } 17206495Sspeer } 17216495Sspeer 17226495Sspeer /* 17236495Sspeer * The guest domain will reconfigure the RDC later. 17246495Sspeer * 17256495Sspeer * But in the meantime, we must re-enable the Rx MAC so 17266495Sspeer * that we can start receiving packets again on the 17276495Sspeer * remaining RDCs: 17286495Sspeer * 17296495Sspeer * Enable RxMAC = A.9.2.10 17306495Sspeer */ 17316495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 17326495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17336495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 17346495Sspeer } 17356495Sspeer 17366495Sspeer /* 17376495Sspeer * Initialize the DC-specific FZC control registers. 17386495Sspeer * ----------------------------------------------------- 17396495Sspeer */ 17406495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 17416495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17426495Sspeer "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 17436495Sspeer return (-EIO); 17446495Sspeer } 17456495Sspeer 17466495Sspeer /* 17476495Sspeer * We have to initialize the guest's RDC table, too. 17486495Sspeer * ----------------------------------------------------- 17496495Sspeer */ 17506495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 17516495Sspeer if (rdc_grp->max_rdcs == 0) { 17526495Sspeer rdc_grp->start_rdc = (uint8_t)channel; 17536495Sspeer rdc_grp->def_rdc = (uint8_t)channel; 17546495Sspeer rdc_grp->max_rdcs = 1; 17556495Sspeer } else { 17566495Sspeer rdc_grp->max_rdcs++; 17576495Sspeer } 17586495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 17596495Sspeer 17606495Sspeer if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 17616495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17626495Sspeer "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 17636495Sspeer return (-EIO); 17646495Sspeer } 17656495Sspeer 17666495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 17676495Sspeer 17686495Sspeer return (0); 17696495Sspeer } 17706495Sspeer 17716495Sspeer /* 17726495Sspeer * nxge_hio_dc_share 17736495Sspeer * 17746495Sspeer * Share a DMA channel with a guest domain. 17756495Sspeer * 17766495Sspeer * Arguments: 17776495Sspeer * nxge 17786495Sspeer * vr The VR that <channel> will belong to. 17796495Sspeer * type Tx or Rx. 17806495Sspeer * res_map The resource map used by the caller, which we will 17816495Sspeer * update if successful. 17826495Sspeer * 17836495Sspeer * Notes: 17846495Sspeer * 17856495Sspeer * Context: 17866495Sspeer * Service domain 17876495Sspeer */ 17886495Sspeer int 17896495Sspeer nxge_hio_dc_share( 17906495Sspeer nxge_t *nxge, 17916495Sspeer nxge_hio_vr_t *vr, 17926495Sspeer mac_ring_type_t type) 17936495Sspeer { 17946495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 17956495Sspeer nxge_hw_pt_cfg_t *hardware; 17966495Sspeer nxge_hio_dc_t *dc; 17976495Sspeer int channel, limit; 17986495Sspeer 17996495Sspeer nxge_grp_set_t *set; 18006495Sspeer nxge_grp_t *group; 18016495Sspeer 18026495Sspeer int slot; 18036495Sspeer 18046495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 18056495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 18066495Sspeer 18076495Sspeer /* 18086495Sspeer * In version 1.0, we may only give a VR 2 RDCs or TDCs. 18096495Sspeer * Not only that, but the HV has statically assigned the 18106495Sspeer * channels like so: 18116495Sspeer * VR0: RDC0 & RDC1 18126495Sspeer * VR1: RDC2 & RDC3, etc. 18136495Sspeer * The TDCs are assigned in exactly the same way. 18146495Sspeer * 18156495Sspeer * So, for example 18166495Sspeer * hardware->start_rdc + vr->region * 2; 18176495Sspeer * VR1: hardware->start_rdc + 1 * 2; 18186495Sspeer * VR3: hardware->start_rdc + 3 * 2; 18196495Sspeer * If start_rdc is 0, we end up with 2 or 6. 18206495Sspeer * If start_rdc is 8, we end up with 10 or 14. 18216495Sspeer */ 18226495Sspeer 18236495Sspeer set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 18246495Sspeer hardware = &nxge->pt_config.hw_config; 18256495Sspeer 18266495Sspeer // This code is still NIU-specific (assuming only 2 ports) 18276495Sspeer channel = hardware->start_rdc + (vr->region % 4) * 2; 18286495Sspeer limit = channel + 2; 18296495Sspeer 18306495Sspeer MUTEX_ENTER(&nhd->lock); 18316495Sspeer for (; channel < limit; channel++) { 18326495Sspeer if ((1 << channel) & set->owned.map) { 18336495Sspeer break; 18346495Sspeer } 18356495Sspeer } 18366495Sspeer 18376495Sspeer if (channel == limit) { 18386495Sspeer MUTEX_EXIT(&nhd->lock); 18396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 18406495Sspeer "nx_hio_dc_share: there are no channels to share")); 18416495Sspeer return (-EIO); 18426495Sspeer } 18436495Sspeer 18446495Sspeer MUTEX_EXIT(&nhd->lock); 18456495Sspeer 18466495Sspeer /* -------------------------------------------------- */ 18476495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 18486495Sspeer nxge_hio_tdc_share(nxge, channel) : 18496495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 18506495Sspeer 18516495Sspeer if (slot < 0) { 18526495Sspeer if (type == MAC_RING_TYPE_RX) { 18536495Sspeer nxge_hio_rdc_unshare(nxge, channel); 18546495Sspeer } else { 18556495Sspeer nxge_hio_tdc_unshare(nxge, channel); 18566495Sspeer } 18576495Sspeer return (slot); 18586495Sspeer } 18596495Sspeer 18606495Sspeer MUTEX_ENTER(&nhd->lock); 18616495Sspeer 18626495Sspeer /* 18636495Sspeer * Tag this channel. 18646495Sspeer * -------------------------------------------------- 18656495Sspeer */ 18666495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 18676495Sspeer 18686495Sspeer dc->vr = vr; 18696495Sspeer dc->channel = (nxge_channel_t)channel; 18706495Sspeer 18716495Sspeer MUTEX_EXIT(&nhd->lock); 18726495Sspeer 18736495Sspeer /* 18746495Sspeer * vr->[t|r]x_group is used by the service domain to 18756495Sspeer * keep track of its shared DMA channels. 18766495Sspeer */ 18776495Sspeer MUTEX_ENTER(&nxge->group_lock); 18786495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 18796495Sspeer 18807755SMisaki.Kataoka@Sun.COM dc->group = group; 18816495Sspeer 18826495Sspeer /* Initialize <group>, if necessary */ 18836495Sspeer if (group->count == 0) { 18846495Sspeer group->nxge = nxge; 18856495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 18866495Sspeer VP_BOUND_TX : VP_BOUND_RX; 18876495Sspeer group->sequence = nhd->sequence++; 18886495Sspeer group->active = B_TRUE; 18896495Sspeer } 18906495Sspeer 18916495Sspeer MUTEX_EXIT(&nxge->group_lock); 18926495Sspeer 18936495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 18946495Sspeer "DC share: %cDC %d was assigned to slot %d", 18956495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 18966495Sspeer 18976495Sspeer nxge_grp_dc_append(nxge, group, dc); 18986495Sspeer 18996495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 19006495Sspeer 19016495Sspeer return (0); 19026495Sspeer } 19036495Sspeer 19046495Sspeer /* 19056495Sspeer * nxge_hio_tdc_unshare 19066495Sspeer * 19076495Sspeer * Unshare a TDC. 19086495Sspeer * 19096495Sspeer * Arguments: 19106495Sspeer * nxge 19116495Sspeer * channel The channel to unshare (add again). 19126495Sspeer * 19136495Sspeer * Notes: 19146495Sspeer * 19156495Sspeer * Context: 19166495Sspeer * Service domain 19176495Sspeer */ 19186495Sspeer void 19196495Sspeer nxge_hio_tdc_unshare( 19206495Sspeer nxge_t *nxge, 19216495Sspeer int channel) 19226495Sspeer { 19236495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 19247755SMisaki.Kataoka@Sun.COM nxge_grp_t *group = set->group[0]; 19256495Sspeer 19266495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 19276495Sspeer 19286495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19296495Sspeer set->shared.count--; 19306495Sspeer 19317755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 19326495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19336495Sspeer "Failed to initialize TxDMA channel %d", channel)); 19346495Sspeer return; 19356495Sspeer } 19366495Sspeer 19376495Sspeer /* Re-add this interrupt. */ 19386495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 19396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19406495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 19416495Sspeer } 19426495Sspeer 19436495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 19446495Sspeer } 19456495Sspeer 19466495Sspeer /* 19476495Sspeer * nxge_hio_rdc_unshare 19486495Sspeer * 19496495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 19506495Sspeer * 19516495Sspeer * Arguments: 19526495Sspeer * nxge 19536495Sspeer * channel The channel to unshare (add again). 19546495Sspeer * 19556495Sspeer * Notes: 19566495Sspeer * 19576495Sspeer * Context: 19586495Sspeer * Service domain 19596495Sspeer */ 19606495Sspeer void 19616495Sspeer nxge_hio_rdc_unshare( 19626495Sspeer nxge_t *nxge, 19636495Sspeer int channel) 19646495Sspeer { 19656495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19666495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 19676495Sspeer 19686495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 19697755SMisaki.Kataoka@Sun.COM nxge_grp_t *group = set->group[0]; 19706495Sspeer int current, last; 19716495Sspeer 19726495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 19736495Sspeer 19746495Sspeer /* Stop RxMAC = A.9.2.6 */ 19756495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 19766495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19776495Sspeer "Failed to disable RxMAC")); 19786495Sspeer } 19796495Sspeer 19806495Sspeer /* Drain IPP Port = A.9.3.6 */ 19816495Sspeer (void) nxge_ipp_drain(nxge); 19826495Sspeer 19836495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 19846495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 19856495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 19866495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19876495Sspeer "Failed to disable RxDMA channel %d", channel)); 19886495Sspeer } 19896495Sspeer 19906495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19916495Sspeer set->shared.count--; 19926495Sspeer 19936495Sspeer /* 19946495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 19956495Sspeer * 19966495Sspeer * Initialize RxDMA A.9.5.4 19976495Sspeer * Reconfigure RxDMA 19986495Sspeer * Enable RxDMA A.9.5.5 19996495Sspeer */ 20007755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 20016495Sspeer /* Be sure to re-enable the RX MAC. */ 20026495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20036495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20046495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 20056495Sspeer } 20066495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 20076495Sspeer "Failed to initialize RxDMA channel %d", channel)); 20086495Sspeer return; 20096495Sspeer } 20106495Sspeer 20116495Sspeer /* 20126495Sspeer * We have to reconfigure the RDC table(s) 20136495Sspeer * to which this channel once again belongs. 20146495Sspeer */ 20156495Sspeer current = hardware->def_mac_rxdma_grpid; 20166495Sspeer last = current + hardware->max_rdc_grpids; 20176495Sspeer for (; current < last; current++) { 20186495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 20196495Sspeer nxge_rdc_grp_t *group; 20206495Sspeer group = &nxge->pt_config.rdc_grps[current]; 20216495Sspeer group->map = set->owned.map; 20226495Sspeer group->max_rdcs++; 20236495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 20246495Sspeer } 20256495Sspeer } 20266495Sspeer 20276495Sspeer /* 20286495Sspeer * Enable RxMAC = A.9.2.10 20296495Sspeer */ 20306495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20316495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20326495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 20336495Sspeer return; 20346495Sspeer } 20356495Sspeer 20366495Sspeer /* Re-add this interrupt. */ 20376495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 20386495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20396495Sspeer "nx_hio_rdc_unshare: Failed to add interrupt for " 20406495Sspeer "RxDMA CHANNEL %d", channel)); 20416495Sspeer } 20426495Sspeer 20436495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 20446495Sspeer } 20456495Sspeer 20466495Sspeer /* 20476495Sspeer * nxge_hio_dc_unshare 20486495Sspeer * 20496495Sspeer * Unshare (reuse) a DMA channel. 20506495Sspeer * 20516495Sspeer * Arguments: 20526495Sspeer * nxge 20536495Sspeer * vr The VR that <channel> belongs to. 20546495Sspeer * type Tx or Rx. 20556495Sspeer * channel The DMA channel to reuse. 20566495Sspeer * 20576495Sspeer * Notes: 20586495Sspeer * 20596495Sspeer * Context: 20606495Sspeer * Service domain 20616495Sspeer */ 20626495Sspeer void 20636495Sspeer nxge_hio_dc_unshare( 20646495Sspeer nxge_t *nxge, 20656495Sspeer nxge_hio_vr_t *vr, 20666495Sspeer mac_ring_type_t type, 20676495Sspeer int channel) 20686495Sspeer { 20696495Sspeer nxge_grp_t *group; 20706495Sspeer nxge_hio_dc_t *dc; 20716495Sspeer 20726495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 20736495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 20746495Sspeer 20756495Sspeer /* Unlink the channel from its group. */ 20766495Sspeer /* -------------------------------------------------- */ 20776495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 20786602Sspeer NXGE_DC_RESET(group->map, channel); 20796495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 20806495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20816495Sspeer "nx_hio_dc_unshare(%d) failed", channel)); 20826495Sspeer return; 20836495Sspeer } 20846495Sspeer 20856495Sspeer dc->vr = 0; 20866495Sspeer dc->cookie = 0; 20876495Sspeer 20886495Sspeer if (type == MAC_RING_TYPE_RX) { 20896495Sspeer nxge_hio_rdc_unshare(nxge, channel); 20906495Sspeer } else { 20916495Sspeer nxge_hio_tdc_unshare(nxge, channel); 20926495Sspeer } 20936495Sspeer 20946495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 20956495Sspeer } 20966495Sspeer 20976495Sspeer #endif /* if defined(sun4v) */ 2098