16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 238597SMichael.Speer@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 378275SEric Cheng #include <sys/mac_provider.h> 386495Sspeer #include <sys/nxge/nxge_impl.h> 396495Sspeer #include <sys/nxge/nxge_fzc.h> 406495Sspeer #include <sys/nxge/nxge_rxdma.h> 416495Sspeer #include <sys/nxge/nxge_txdma.h> 426495Sspeer #include <sys/nxge/nxge_hio.h> 436495Sspeer 446495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 456495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 466495Sspeer 476495Sspeer /* 486495Sspeer * External prototypes 496495Sspeer */ 506495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 516495Sspeer 526495Sspeer /* The following function may be found in nxge_main.c */ 538275SEric Cheng extern int nxge_m_mmac_remove(void *arg, int slot); 548275SEric Cheng extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 558275SEric Cheng boolean_t usetbl); 566495Sspeer 576495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 586495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 596495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 606495Sspeer 616495Sspeer /* 626495Sspeer * Local prototypes 636495Sspeer */ 646495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 656495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 666495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 676495Sspeer 686495Sspeer /* 696495Sspeer * These functions are used by both service & guest domains to 706495Sspeer * decide whether they're running in an LDOMs/XEN environment 716495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 726495Sspeer */ 736495Sspeer 746495Sspeer /* 756495Sspeer * nxge_get_environs 766495Sspeer * 776495Sspeer * Figure out if we are in a guest domain or not. 786495Sspeer * 796495Sspeer * Arguments: 806495Sspeer * nxge 816495Sspeer * 826495Sspeer * Notes: 836495Sspeer * 846495Sspeer * Context: 856495Sspeer * Any domain 866495Sspeer */ 876495Sspeer void 886495Sspeer nxge_get_environs( 896495Sspeer nxge_t *nxge) 906495Sspeer { 916495Sspeer char *string; 926495Sspeer 936495Sspeer /* 946495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 956495Sspeer */ 966495Sspeer nxge->environs = SOLARIS_DOMAIN; 976495Sspeer 986495Sspeer /* 996495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 1006495Sspeer */ 1016495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 1026495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1036495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1046495Sspeer if (strcmp(string, "n2niu") == 0) { 1056495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1066495Sspeer /* So we can allocate properly-aligned memory. */ 1076495Sspeer nxge->niu_type = N2_NIU; 1086495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1096495Sspeer "Hybrid IO-capable guest domain")); 1106495Sspeer } 1116495Sspeer ddi_prop_free(string); 1126495Sspeer } 1136495Sspeer } 1146495Sspeer 1156495Sspeer #if !defined(sun4v) 1166495Sspeer 1176495Sspeer /* 1186495Sspeer * nxge_hio_init 1196495Sspeer * 1206495Sspeer * Initialize the HIO module of the NXGE driver. 1216495Sspeer * 1226495Sspeer * Arguments: 1236495Sspeer * nxge 1246495Sspeer * 1256495Sspeer * Notes: 1266495Sspeer * This is the non-hybrid I/O version of this function. 1276495Sspeer * 1286495Sspeer * Context: 1296495Sspeer * Any domain 1306495Sspeer */ 1316495Sspeer int 1327587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1336495Sspeer { 1346495Sspeer nxge_hio_data_t *nhd; 1358275SEric Cheng int i; 1366495Sspeer 1376495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1386495Sspeer if (nhd == 0) { 1396495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1406495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1416495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1426495Sspeer } 1436495Sspeer 1448275SEric Cheng /* 1458275SEric Cheng * Initialize share and ring group structures. 1468275SEric Cheng */ 1478275SEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) 1488275SEric Cheng nxge->tdc_is_shared[i] = B_FALSE; 1498275SEric Cheng 1508275SEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 1518275SEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 1528275SEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 1538275SEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 1548275SEric Cheng nxge->tx_hio_groups[i].gindex = 0; 1558275SEric Cheng nxge->tx_hio_groups[i].sindex = 0; 1568275SEric Cheng } 1578275SEric Cheng 1588275SEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 1598275SEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 1608275SEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 1618275SEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 1628275SEric Cheng nxge->rx_hio_groups[i].gindex = 0; 1638275SEric Cheng nxge->rx_hio_groups[i].sindex = 0; 1648275SEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 1658597SMichael.Speer@Sun.COM nxge->rx_hio_groups[i].port_default_grp = B_FALSE; 1668275SEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 1678275SEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 1688275SEric Cheng } 1698275SEric Cheng 1706495Sspeer nhd->hio.ldoms = B_FALSE; 1716495Sspeer 1726495Sspeer return (NXGE_OK); 1736495Sspeer } 1746495Sspeer 1756495Sspeer #endif 1766495Sspeer 1776495Sspeer void 1787587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1796495Sspeer { 1806495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1816495Sspeer 1826495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1836495Sspeer 1847587SMichael.Speer@Sun.COM if (nhd != NULL) { 1857587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 1867587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 1877587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 1887587SMichael.Speer@Sun.COM } 1896495Sspeer } 1906495Sspeer 1916495Sspeer /* 1926495Sspeer * nxge_dci_map 1936495Sspeer * 1946495Sspeer * Map a DMA channel index to a channel number. 1956495Sspeer * 1966495Sspeer * Arguments: 1976495Sspeer * instance The instance number of the driver. 1986495Sspeer * type The type of channel this is: Tx or Rx. 1996495Sspeer * index The index to convert to a channel number 2006495Sspeer * 2016495Sspeer * Notes: 2026495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 2036495Sspeer * 2046495Sspeer * Context: 2056495Sspeer * Any domain 2066495Sspeer */ 2076495Sspeer int 2086495Sspeer nxge_dci_map( 2096495Sspeer nxge_t *nxge, 2106495Sspeer vpc_type_t type, 2116495Sspeer int index) 2126495Sspeer { 2136495Sspeer nxge_grp_set_t *set; 2146495Sspeer int dc; 2156495Sspeer 2166495Sspeer switch (type) { 2176495Sspeer case VP_BOUND_TX: 2186495Sspeer set = &nxge->tx_set; 2196495Sspeer break; 2206495Sspeer case VP_BOUND_RX: 2216495Sspeer set = &nxge->rx_set; 2226495Sspeer break; 2236495Sspeer } 2246495Sspeer 2256495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 2266495Sspeer if ((1 << dc) & set->owned.map) { 2276495Sspeer if (index == 0) 2286495Sspeer return (dc); 2296495Sspeer else 2306495Sspeer index--; 2316495Sspeer } 2326495Sspeer } 2336495Sspeer 2346495Sspeer return (-1); 2356495Sspeer } 2366495Sspeer 2376495Sspeer /* 2386495Sspeer * --------------------------------------------------------------------- 2396495Sspeer * These are the general-purpose DMA channel group functions. That is, 2406495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2416495Sspeer * environment. 2426495Sspeer * 2436495Sspeer * But is also expected that in the future they will be able to manage 2446495Sspeer * Crossbow groups. 2456495Sspeer * --------------------------------------------------------------------- 2466495Sspeer */ 2476495Sspeer 2486495Sspeer /* 2497766SMichael.Speer@Sun.COM * nxge_grp_cleanup(p_nxge_t nxge) 2507766SMichael.Speer@Sun.COM * 2517766SMichael.Speer@Sun.COM * Remove all outstanding groups. 2527766SMichael.Speer@Sun.COM * 2537766SMichael.Speer@Sun.COM * Arguments: 2547766SMichael.Speer@Sun.COM * nxge 2557766SMichael.Speer@Sun.COM */ 2567766SMichael.Speer@Sun.COM void 2577766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge) 2587766SMichael.Speer@Sun.COM { 2597766SMichael.Speer@Sun.COM nxge_grp_set_t *set; 2607766SMichael.Speer@Sun.COM int i; 2617766SMichael.Speer@Sun.COM 2627766SMichael.Speer@Sun.COM MUTEX_ENTER(&nxge->group_lock); 2637766SMichael.Speer@Sun.COM 2647766SMichael.Speer@Sun.COM /* 2657766SMichael.Speer@Sun.COM * Find RX groups that need to be cleaned up. 2667766SMichael.Speer@Sun.COM */ 2677766SMichael.Speer@Sun.COM set = &nxge->rx_set; 2687766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2697766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2707766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2717766SMichael.Speer@Sun.COM set->group[i] = NULL; 2727766SMichael.Speer@Sun.COM } 2737766SMichael.Speer@Sun.COM } 2747766SMichael.Speer@Sun.COM 2757766SMichael.Speer@Sun.COM /* 2767766SMichael.Speer@Sun.COM * Find TX groups that need to be cleaned up. 2777766SMichael.Speer@Sun.COM */ 2787766SMichael.Speer@Sun.COM set = &nxge->tx_set; 2797766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2807766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2817766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2827766SMichael.Speer@Sun.COM set->group[i] = NULL; 2837766SMichael.Speer@Sun.COM } 2847766SMichael.Speer@Sun.COM } 2857766SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 2867766SMichael.Speer@Sun.COM } 2877766SMichael.Speer@Sun.COM 2887766SMichael.Speer@Sun.COM 2897766SMichael.Speer@Sun.COM /* 2906495Sspeer * nxge_grp_add 2916495Sspeer * 2926495Sspeer * Add a group to an instance of NXGE. 2936495Sspeer * 2946495Sspeer * Arguments: 2956495Sspeer * nxge 2966495Sspeer * type Tx or Rx 2976495Sspeer * 2986495Sspeer * Notes: 2996495Sspeer * 3006495Sspeer * Context: 3016495Sspeer * Any domain 3026495Sspeer */ 3037755SMisaki.Kataoka@Sun.COM nxge_grp_t * 3046495Sspeer nxge_grp_add( 3056495Sspeer nxge_t *nxge, 3066495Sspeer nxge_grp_type_t type) 3076495Sspeer { 3086495Sspeer nxge_grp_set_t *set; 3096495Sspeer nxge_grp_t *group; 3106495Sspeer int i; 3116495Sspeer 3126495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 3136495Sspeer group->nxge = nxge; 3146495Sspeer 3156495Sspeer MUTEX_ENTER(&nxge->group_lock); 3166495Sspeer switch (type) { 3176495Sspeer case NXGE_TRANSMIT_GROUP: 3186495Sspeer case EXT_TRANSMIT_GROUP: 3196495Sspeer set = &nxge->tx_set; 3206495Sspeer break; 3216495Sspeer default: 3226495Sspeer set = &nxge->rx_set; 3236495Sspeer break; 3246495Sspeer } 3256495Sspeer 3266495Sspeer group->type = type; 3276495Sspeer group->active = B_TRUE; 3286495Sspeer group->sequence = set->sequence++; 3296495Sspeer 3306495Sspeer /* Find an empty slot for this logical group. */ 3316495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 3326495Sspeer if (set->group[i] == 0) { 3336495Sspeer group->index = i; 3346495Sspeer set->group[i] = group; 3356495Sspeer NXGE_DC_SET(set->lg.map, i); 3366495Sspeer set->lg.count++; 3376495Sspeer break; 3386495Sspeer } 3396495Sspeer } 3406495Sspeer MUTEX_EXIT(&nxge->group_lock); 3416495Sspeer 3426495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3436495Sspeer "nxge_grp_add: %cgroup = %d.%d", 3446495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3456495Sspeer nxge->mac.portnum, group->sequence)); 3466495Sspeer 3477755SMisaki.Kataoka@Sun.COM return (group); 3486495Sspeer } 3496495Sspeer 3506495Sspeer void 3516495Sspeer nxge_grp_remove( 3526495Sspeer nxge_t *nxge, 3537755SMisaki.Kataoka@Sun.COM nxge_grp_t *group) /* The group to remove. */ 3546495Sspeer { 3556495Sspeer nxge_grp_set_t *set; 3566495Sspeer vpc_type_t type; 3576495Sspeer 3586495Sspeer MUTEX_ENTER(&nxge->group_lock); 3596495Sspeer switch (group->type) { 3606495Sspeer case NXGE_TRANSMIT_GROUP: 3616495Sspeer case EXT_TRANSMIT_GROUP: 3626495Sspeer set = &nxge->tx_set; 3636495Sspeer break; 3646495Sspeer default: 3656495Sspeer set = &nxge->rx_set; 3666495Sspeer break; 3676495Sspeer } 3686495Sspeer 3696495Sspeer if (set->group[group->index] != group) { 3706495Sspeer MUTEX_EXIT(&nxge->group_lock); 3716495Sspeer return; 3726495Sspeer } 3736495Sspeer 3746495Sspeer set->group[group->index] = 0; 3756495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3766495Sspeer set->lg.count--; 3776495Sspeer 3786495Sspeer /* While inside the mutex, deactivate <group>. */ 3796495Sspeer group->active = B_FALSE; 3806495Sspeer 3816495Sspeer MUTEX_EXIT(&nxge->group_lock); 3826495Sspeer 3836495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3846495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3856495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3866495Sspeer nxge->mac.portnum, group->sequence)); 3876495Sspeer 3886495Sspeer /* Now, remove any DCs which are still active. */ 3896495Sspeer switch (group->type) { 3906495Sspeer default: 3916495Sspeer type = VP_BOUND_TX; 3926495Sspeer break; 3936495Sspeer case NXGE_RECEIVE_GROUP: 3946495Sspeer case EXT_RECEIVE_GROUP: 3956495Sspeer type = VP_BOUND_RX; 3966495Sspeer } 3976495Sspeer 3986495Sspeer while (group->dc) { 3996495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 4006495Sspeer } 4016495Sspeer 4026495Sspeer KMEM_FREE(group, sizeof (*group)); 4036495Sspeer } 4046495Sspeer 4056495Sspeer /* 4067950SMichael.Speer@Sun.COM * nxge_grp_dc_add 4076495Sspeer * 4086495Sspeer * Add a DMA channel to a VR/Group. 4096495Sspeer * 4106495Sspeer * Arguments: 4116495Sspeer * nxge 4126495Sspeer * channel The channel to add. 4136495Sspeer * Notes: 4146495Sspeer * 4156495Sspeer * Context: 4166495Sspeer * Any domain 4176495Sspeer */ 4186495Sspeer /* ARGSUSED */ 4196495Sspeer int 4206495Sspeer nxge_grp_dc_add( 4216495Sspeer nxge_t *nxge, 4227755SMisaki.Kataoka@Sun.COM nxge_grp_t *group, /* The group to add <channel> to. */ 4236495Sspeer vpc_type_t type, /* Rx or Tx */ 4246495Sspeer int channel) /* A physical/logical channel number */ 4256495Sspeer { 4266495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 4276495Sspeer nxge_hio_dc_t *dc; 4286495Sspeer nxge_grp_set_t *set; 4296602Sspeer nxge_status_t status = NXGE_OK; 4306495Sspeer 4316495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 4326495Sspeer 4338275SEric Cheng if (group == 0) 4346495Sspeer return (0); 4356495Sspeer 4366495Sspeer switch (type) { 4377950SMichael.Speer@Sun.COM case VP_BOUND_TX: 4386495Sspeer set = &nxge->tx_set; 4396495Sspeer if (channel > NXGE_MAX_TDCS) { 4406495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4416495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 4426495Sspeer return (NXGE_ERROR); 4436495Sspeer } 4446495Sspeer break; 4456495Sspeer case VP_BOUND_RX: 4466495Sspeer set = &nxge->rx_set; 4476495Sspeer if (channel > NXGE_MAX_RDCS) { 4486495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4496495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 4506495Sspeer return (NXGE_ERROR); 4516495Sspeer } 4526495Sspeer break; 4537950SMichael.Speer@Sun.COM 4547950SMichael.Speer@Sun.COM default: 4557950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4567950SMichael.Speer@Sun.COM "nxge_grp_dc_add: unknown type channel(%d)", channel)); 4576495Sspeer } 4586495Sspeer 4596495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4606495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 4616495Sspeer type == VP_BOUND_TX ? 't' : 'r', 4626495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 4636495Sspeer 4646495Sspeer MUTEX_ENTER(&nxge->group_lock); 4656495Sspeer if (group->active != B_TRUE) { 4666495Sspeer /* We may be in the process of removing this group. */ 4676495Sspeer MUTEX_EXIT(&nxge->group_lock); 4686495Sspeer return (NXGE_ERROR); 4696495Sspeer } 4706495Sspeer MUTEX_EXIT(&nxge->group_lock); 4716495Sspeer 4726495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4736495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4746495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4756495Sspeer return (NXGE_ERROR); 4766495Sspeer } 4776495Sspeer 4786495Sspeer MUTEX_ENTER(&nhd->lock); 4796495Sspeer 4806495Sspeer if (dc->group) { 4816495Sspeer MUTEX_EXIT(&nhd->lock); 4826495Sspeer /* This channel is already in use! */ 4836495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4846495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4856495Sspeer return (NXGE_ERROR); 4866495Sspeer } 4876495Sspeer 4886495Sspeer dc->next = 0; 4896495Sspeer dc->page = channel; 4906495Sspeer dc->channel = (nxge_channel_t)channel; 4916495Sspeer 4926495Sspeer dc->type = type; 4936495Sspeer if (type == VP_BOUND_RX) { 4946495Sspeer dc->init = nxge_init_rxdma_channel; 4956495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4966495Sspeer } else { 4976495Sspeer dc->init = nxge_init_txdma_channel; 4986495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4996495Sspeer } 5006495Sspeer 5017755SMisaki.Kataoka@Sun.COM dc->group = group; 5026495Sspeer 5036495Sspeer if (isLDOMguest(nxge)) 5046495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 5056495Sspeer 5066495Sspeer NXGE_DC_SET(set->owned.map, channel); 5076495Sspeer set->owned.count++; 5086495Sspeer 5096495Sspeer MUTEX_EXIT(&nhd->lock); 5106495Sspeer 5116602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 5126602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5136602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 5147950SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 5157950SMichael.Speer@Sun.COM (void) memset(dc, 0, sizeof (*dc)); 5167950SMichael.Speer@Sun.COM NXGE_DC_RESET(set->owned.map, channel); 5177950SMichael.Speer@Sun.COM set->owned.count--; 5187950SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 5196603Sspeer return (NXGE_ERROR); 5206602Sspeer } 5216602Sspeer 5226495Sspeer nxge_grp_dc_append(nxge, group, dc); 5236495Sspeer 5247812SMichael.Speer@Sun.COM if (type == VP_BOUND_TX) { 5257812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 5267812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_FALSE; 5277812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 5287812SMichael.Speer@Sun.COM } 5297812SMichael.Speer@Sun.COM 5306495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 5316495Sspeer 5326602Sspeer return ((int)status); 5336495Sspeer } 5346495Sspeer 5356495Sspeer void 5366495Sspeer nxge_grp_dc_remove( 5376495Sspeer nxge_t *nxge, 5386495Sspeer vpc_type_t type, 5396495Sspeer int channel) 5406495Sspeer { 5416495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5426495Sspeer nxge_hio_dc_t *dc; 5436495Sspeer nxge_grp_set_t *set; 5446495Sspeer nxge_grp_t *group; 5456495Sspeer 5466495Sspeer dc_uninit_t uninit; 5476495Sspeer 5486495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 5496495Sspeer 5507950SMichael.Speer@Sun.COM if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) 5517950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5527950SMichael.Speer@Sun.COM 5537950SMichael.Speer@Sun.COM if ((dc->group == NULL) && (dc->next == 0) && 5547950SMichael.Speer@Sun.COM (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) { 5557950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5566495Sspeer } 5577950SMichael.Speer@Sun.COM 5586495Sspeer group = (nxge_grp_t *)dc->group; 5596495Sspeer 5606495Sspeer if (isLDOMguest(nxge)) { 5616495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 5626495Sspeer } 5636495Sspeer 5646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 5656495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 5666495Sspeer nxge->mac.portnum, group->sequence, group->count, 5676495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 5686495Sspeer 5696495Sspeer MUTEX_ENTER(&nhd->lock); 5706495Sspeer 5716602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5726602Sspeer 5736495Sspeer /* Remove the DC from its group. */ 5746495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 5756495Sspeer MUTEX_EXIT(&nhd->lock); 5766495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5777950SMichael.Speer@Sun.COM "nxge_grp_dc_remove(%d) failed", channel)); 5787950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5796495Sspeer } 5806495Sspeer 5816495Sspeer uninit = dc->uninit; 5826495Sspeer channel = dc->channel; 5836495Sspeer 5846495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5856495Sspeer set->owned.count--; 5866495Sspeer 5876495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5886495Sspeer 5896495Sspeer MUTEX_EXIT(&nhd->lock); 5906495Sspeer 5916495Sspeer (*uninit)(nxge, channel); 5926495Sspeer 5937950SMichael.Speer@Sun.COM nxge_grp_dc_remove_exit: 5946495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5956495Sspeer } 5966495Sspeer 5976495Sspeer nxge_hio_dc_t * 5986495Sspeer nxge_grp_dc_find( 5996495Sspeer nxge_t *nxge, 6006495Sspeer vpc_type_t type, /* Rx or Tx */ 6016495Sspeer int channel) 6026495Sspeer { 6036495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 6046495Sspeer nxge_hio_dc_t *current; 6056495Sspeer 6066495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 6076495Sspeer 6086495Sspeer if (!isLDOMguest(nxge)) { 6096495Sspeer return (¤t[channel]); 6106495Sspeer } else { 6116495Sspeer /* We're in a guest domain. */ 6126495Sspeer int i, limit = (type == VP_BOUND_TX) ? 6136495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 6146495Sspeer 6156495Sspeer MUTEX_ENTER(&nhd->lock); 6166495Sspeer for (i = 0; i < limit; i++, current++) { 6176495Sspeer if (current->channel == channel) { 6186495Sspeer if (current->vr && current->vr->nxge == 6196495Sspeer (uintptr_t)nxge) { 6206495Sspeer MUTEX_EXIT(&nhd->lock); 6216495Sspeer return (current); 6226495Sspeer } 6236495Sspeer } 6246495Sspeer } 6256495Sspeer MUTEX_EXIT(&nhd->lock); 6266495Sspeer } 6276495Sspeer 6286495Sspeer return (0); 6296495Sspeer } 6306495Sspeer 6316495Sspeer /* 6326495Sspeer * nxge_grp_dc_append 6336495Sspeer * 6346495Sspeer * Append a DMA channel to a group. 6356495Sspeer * 6366495Sspeer * Arguments: 6376495Sspeer * nxge 6386495Sspeer * group The group to append to 6396495Sspeer * dc The DMA channel to append 6406495Sspeer * 6416495Sspeer * Notes: 6426495Sspeer * 6436495Sspeer * Context: 6446495Sspeer * Any domain 6456495Sspeer */ 6466495Sspeer static 6476495Sspeer void 6486495Sspeer nxge_grp_dc_append( 6496495Sspeer nxge_t *nxge, 6506495Sspeer nxge_grp_t *group, 6516495Sspeer nxge_hio_dc_t *dc) 6526495Sspeer { 6536495Sspeer MUTEX_ENTER(&nxge->group_lock); 6546495Sspeer 6556495Sspeer if (group->dc == 0) { 6566495Sspeer group->dc = dc; 6576495Sspeer } else { 6586495Sspeer nxge_hio_dc_t *current = group->dc; 6596495Sspeer do { 6606495Sspeer if (current->next == 0) { 6616495Sspeer current->next = dc; 6626495Sspeer break; 6636495Sspeer } 6646495Sspeer current = current->next; 6656495Sspeer } while (current); 6666495Sspeer } 6676495Sspeer 6686495Sspeer NXGE_DC_SET(group->map, dc->channel); 6696495Sspeer 6706495Sspeer nxge_grp_dc_map(group); 6716602Sspeer group->count++; 6726495Sspeer 6736495Sspeer MUTEX_EXIT(&nxge->group_lock); 6746495Sspeer } 6756495Sspeer 6766495Sspeer /* 6776495Sspeer * nxge_grp_dc_unlink 6786495Sspeer * 6796495Sspeer * Unlink a DMA channel fromits linked list (group). 6806495Sspeer * 6816495Sspeer * Arguments: 6826495Sspeer * nxge 6836495Sspeer * group The group (linked list) to unlink from 6846495Sspeer * dc The DMA channel to append 6856495Sspeer * 6866495Sspeer * Notes: 6876495Sspeer * 6886495Sspeer * Context: 6896495Sspeer * Any domain 6906495Sspeer */ 6916495Sspeer nxge_hio_dc_t * 6928275SEric Cheng nxge_grp_dc_unlink( 6938275SEric Cheng nxge_t *nxge, 6948275SEric Cheng nxge_grp_t *group, 6958275SEric Cheng int channel) 6966495Sspeer { 6976495Sspeer nxge_hio_dc_t *current, *previous; 6986495Sspeer 6996495Sspeer MUTEX_ENTER(&nxge->group_lock); 7006495Sspeer 7017812SMichael.Speer@Sun.COM if (group == NULL) { 7027812SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 7037812SMichael.Speer@Sun.COM return (0); 7047812SMichael.Speer@Sun.COM } 7057812SMichael.Speer@Sun.COM 7066495Sspeer if ((current = group->dc) == 0) { 7076495Sspeer MUTEX_EXIT(&nxge->group_lock); 7086495Sspeer return (0); 7096495Sspeer } 7106495Sspeer 7116495Sspeer previous = 0; 7126495Sspeer do { 7136495Sspeer if (current->channel == channel) { 7146495Sspeer if (previous) 7156495Sspeer previous->next = current->next; 7166495Sspeer else 7176495Sspeer group->dc = current->next; 7186495Sspeer break; 7196495Sspeer } 7206495Sspeer previous = current; 7216495Sspeer current = current->next; 7226495Sspeer } while (current); 7236495Sspeer 7246495Sspeer if (current == 0) { 7256495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 7266495Sspeer "DC unlink: DC %d not found", channel)); 7276495Sspeer } else { 7286495Sspeer current->next = 0; 7296495Sspeer current->group = 0; 7306495Sspeer 7318275SEric Cheng NXGE_DC_RESET(group->map, channel); 7326495Sspeer group->count--; 7336495Sspeer } 7346495Sspeer 7356495Sspeer nxge_grp_dc_map(group); 7366495Sspeer 7376495Sspeer MUTEX_EXIT(&nxge->group_lock); 7386495Sspeer 7396495Sspeer return (current); 7406495Sspeer } 7416495Sspeer 7426495Sspeer /* 7436495Sspeer * nxge_grp_dc_map 7446495Sspeer * 7456495Sspeer * Map a linked list to an array of channel numbers. 7466495Sspeer * 7476495Sspeer * Arguments: 7486495Sspeer * nxge 7496495Sspeer * group The group to remap. 7506495Sspeer * 7516495Sspeer * Notes: 7526495Sspeer * It is expected that the caller will hold the correct mutex. 7536495Sspeer * 7546495Sspeer * Context: 7556495Sspeer * Service domain 7566495Sspeer */ 7576495Sspeer void 7586495Sspeer nxge_grp_dc_map( 7596495Sspeer nxge_grp_t *group) 7606495Sspeer { 7616495Sspeer nxge_channel_t *legend; 7626495Sspeer nxge_hio_dc_t *dc; 7636495Sspeer 7646495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 7656495Sspeer 7666495Sspeer legend = group->legend; 7676495Sspeer dc = group->dc; 7686495Sspeer while (dc) { 7696495Sspeer *legend = dc->channel; 7706495Sspeer legend++; 7716495Sspeer dc = dc->next; 7726495Sspeer } 7736495Sspeer } 7746495Sspeer 7756495Sspeer /* 7766495Sspeer * --------------------------------------------------------------------- 7776495Sspeer * These are HIO debugging functions. 7786495Sspeer * --------------------------------------------------------------------- 7796495Sspeer */ 7806495Sspeer 7816495Sspeer /* 7826495Sspeer * nxge_delay 7836495Sspeer * 7846495Sspeer * Delay <seconds> number of seconds. 7856495Sspeer * 7866495Sspeer * Arguments: 7876495Sspeer * nxge 7886495Sspeer * group The group to append to 7896495Sspeer * dc The DMA channel to append 7906495Sspeer * 7916495Sspeer * Notes: 7926495Sspeer * This is a developer-only function. 7936495Sspeer * 7946495Sspeer * Context: 7956495Sspeer * Any domain 7966495Sspeer */ 7976495Sspeer void 7986495Sspeer nxge_delay( 7996495Sspeer int seconds) 8006495Sspeer { 8016495Sspeer delay(drv_usectohz(seconds * 1000000)); 8026495Sspeer } 8036495Sspeer 8046495Sspeer static dmc_reg_name_t rx_names[] = { 8056495Sspeer { "RXDMA_CFIG1", 0 }, 8066495Sspeer { "RXDMA_CFIG2", 8 }, 8076495Sspeer { "RBR_CFIG_A", 0x10 }, 8086495Sspeer { "RBR_CFIG_B", 0x18 }, 8096495Sspeer { "RBR_KICK", 0x20 }, 8106495Sspeer { "RBR_STAT", 0x28 }, 8116495Sspeer { "RBR_HDH", 0x30 }, 8126495Sspeer { "RBR_HDL", 0x38 }, 8136495Sspeer { "RCRCFIG_A", 0x40 }, 8146495Sspeer { "RCRCFIG_B", 0x48 }, 8156495Sspeer { "RCRSTAT_A", 0x50 }, 8166495Sspeer { "RCRSTAT_B", 0x58 }, 8176495Sspeer { "RCRSTAT_C", 0x60 }, 8186495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 8196495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 8206495Sspeer { "RCR_FLSH", 0x78 }, 8216495Sspeer { "RXMISC", 0x90 }, 8226495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 8236495Sspeer { 0, -1 } 8246495Sspeer }; 8256495Sspeer 8266495Sspeer static dmc_reg_name_t tx_names[] = { 8276495Sspeer { "Tx_RNG_CFIG", 0 }, 8286495Sspeer { "Tx_RNG_HDL", 0x10 }, 8296495Sspeer { "Tx_RNG_KICK", 0x18 }, 8306495Sspeer { "Tx_ENT_MASK", 0x20 }, 8316495Sspeer { "Tx_CS", 0x28 }, 8326495Sspeer { "TxDMA_MBH", 0x30 }, 8336495Sspeer { "TxDMA_MBL", 0x38 }, 8346495Sspeer { "TxDMA_PRE_ST", 0x40 }, 8356495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 8366495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 8376495Sspeer { "TDMC_INTR_DBG", 0x60 }, 8386495Sspeer { "Tx_CS_DBG", 0x68 }, 8396495Sspeer { 0, -1 } 8406495Sspeer }; 8416495Sspeer 8426495Sspeer /* 8436495Sspeer * nxge_xx2str 8446495Sspeer * 8456495Sspeer * Translate a register address into a string. 8466495Sspeer * 8476495Sspeer * Arguments: 8486495Sspeer * offset The address of the register to translate. 8496495Sspeer * 8506495Sspeer * Notes: 8516495Sspeer * These are developer-only function. 8526495Sspeer * 8536495Sspeer * Context: 8546495Sspeer * Any domain 8556495Sspeer */ 8566495Sspeer const char * 8576495Sspeer nxge_rx2str( 8586495Sspeer int offset) 8596495Sspeer { 8606495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 8616495Sspeer 8626495Sspeer offset &= DMA_CSR_MASK; 8636495Sspeer 8646495Sspeer while (reg->name) { 8656495Sspeer if (offset == reg->offset) 8666495Sspeer return (reg->name); 8676495Sspeer reg++; 8686495Sspeer } 8696495Sspeer 8706495Sspeer return (0); 8716495Sspeer } 8726495Sspeer 8736495Sspeer const char * 8746495Sspeer nxge_tx2str( 8756495Sspeer int offset) 8766495Sspeer { 8776495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 8786495Sspeer 8796495Sspeer offset &= DMA_CSR_MASK; 8806495Sspeer 8816495Sspeer while (reg->name) { 8826495Sspeer if (offset == reg->offset) 8836495Sspeer return (reg->name); 8846495Sspeer reg++; 8856495Sspeer } 8866495Sspeer 8876495Sspeer return (0); 8886495Sspeer } 8896495Sspeer 8906495Sspeer /* 8916495Sspeer * nxge_ddi_perror 8926495Sspeer * 8936495Sspeer * Map a DDI error number to a string. 8946495Sspeer * 8956495Sspeer * Arguments: 8966495Sspeer * ddi_error The DDI error number to map. 8976495Sspeer * 8986495Sspeer * Notes: 8996495Sspeer * 9006495Sspeer * Context: 9016495Sspeer * Any domain 9026495Sspeer */ 9036495Sspeer const char * 9046495Sspeer nxge_ddi_perror( 9056495Sspeer int ddi_error) 9066495Sspeer { 9076495Sspeer switch (ddi_error) { 9086495Sspeer case DDI_SUCCESS: 9096495Sspeer return ("DDI_SUCCESS"); 9106495Sspeer case DDI_FAILURE: 9116495Sspeer return ("DDI_FAILURE"); 9126495Sspeer case DDI_NOT_WELL_FORMED: 9136495Sspeer return ("DDI_NOT_WELL_FORMED"); 9146495Sspeer case DDI_EAGAIN: 9156495Sspeer return ("DDI_EAGAIN"); 9166495Sspeer case DDI_EINVAL: 9176495Sspeer return ("DDI_EINVAL"); 9186495Sspeer case DDI_ENOTSUP: 9196495Sspeer return ("DDI_ENOTSUP"); 9206495Sspeer case DDI_EPENDING: 9216495Sspeer return ("DDI_EPENDING"); 9226495Sspeer case DDI_ENOMEM: 9236495Sspeer return ("DDI_ENOMEM"); 9246495Sspeer case DDI_EBUSY: 9256495Sspeer return ("DDI_EBUSY"); 9266495Sspeer case DDI_ETRANSPORT: 9276495Sspeer return ("DDI_ETRANSPORT"); 9286495Sspeer case DDI_ECONTEXT: 9296495Sspeer return ("DDI_ECONTEXT"); 9306495Sspeer default: 9316495Sspeer return ("Unknown error"); 9326495Sspeer } 9336495Sspeer } 9346495Sspeer 9356495Sspeer /* 9366495Sspeer * --------------------------------------------------------------------- 9376495Sspeer * These are Sun4v HIO function definitions 9386495Sspeer * --------------------------------------------------------------------- 9396495Sspeer */ 9406495Sspeer 9416495Sspeer #if defined(sun4v) 9426495Sspeer 9436495Sspeer /* 9446495Sspeer * Local prototypes 9456495Sspeer */ 9467755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 9477755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *); 9486495Sspeer 9498275SEric Cheng static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *); 9507755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 9516495Sspeer 9528275SEric Cheng static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 9538275SEric Cheng static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 9548275SEric Cheng static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int); 9556495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 9566495Sspeer mac_ring_type_t, int); 9576495Sspeer 9586495Sspeer /* 9596495Sspeer * nxge_hio_init 9606495Sspeer * 9616495Sspeer * Initialize the HIO module of the NXGE driver. 9626495Sspeer * 9636495Sspeer * Arguments: 9646495Sspeer * nxge 9656495Sspeer * 9666495Sspeer * Notes: 9676495Sspeer * 9686495Sspeer * Context: 9696495Sspeer * Any domain 9706495Sspeer */ 9716495Sspeer int 9726495Sspeer nxge_hio_init( 9736495Sspeer nxge_t *nxge) 9746495Sspeer { 9756495Sspeer nxge_hio_data_t *nhd; 9766495Sspeer int i, region; 9776495Sspeer 9786495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 9796495Sspeer if (nhd == 0) { 9806495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 9816495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9826495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9836495Sspeer } 9846495Sspeer 9856713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 9866713Sspeer (nxge->niu_type == N2_NIU)) { 9876495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9886495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9896495Sspeer if (niu_hsvc->hsvc_major == 1 && 9906495Sspeer niu_hsvc->hsvc_minor == 1) 9916495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9926495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9936495Sspeer "nxge_hio_init: hypervisor services " 9946495Sspeer "version %d.%d", 9956495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9966495Sspeer } 9976495Sspeer } 9986495Sspeer 9998275SEric Cheng /* 10008275SEric Cheng * Initialize share and ring group structures. 10018275SEric Cheng */ 10028275SEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 10038275SEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 10048275SEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 10058275SEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 10068275SEric Cheng nxge->tx_hio_groups[i].gindex = 0; 10078275SEric Cheng nxge->tx_hio_groups[i].sindex = 0; 10088275SEric Cheng } 10098275SEric Cheng 10108275SEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 10118275SEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 10128275SEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 10138275SEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 10148275SEric Cheng nxge->rx_hio_groups[i].gindex = 0; 10158275SEric Cheng nxge->rx_hio_groups[i].sindex = 0; 10168275SEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 10178597SMichael.Speer@Sun.COM nxge->rx_hio_groups[i].port_default_grp = B_FALSE; 10188275SEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 10198275SEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 10208275SEric Cheng } 10218275SEric Cheng 10226495Sspeer if (!isLDOMs(nxge)) { 10236495Sspeer nhd->hio.ldoms = B_FALSE; 10246495Sspeer return (NXGE_OK); 10256495Sspeer } 10266495Sspeer 10276495Sspeer nhd->hio.ldoms = B_TRUE; 10286495Sspeer 10296495Sspeer /* 10306495Sspeer * Fill in what we can. 10316495Sspeer */ 10326495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 10336495Sspeer nhd->vr[region].region = region; 10346495Sspeer } 10357755SMisaki.Kataoka@Sun.COM nhd->vrs = NXGE_VR_SR_MAX - 2; 10366495Sspeer 10376495Sspeer /* 10388275SEric Cheng * Initialize the share stuctures. 10396495Sspeer */ 10407812SMichael.Speer@Sun.COM for (i = 0; i < NXGE_MAX_TDCS; i++) 10417812SMichael.Speer@Sun.COM nxge->tdc_is_shared[i] = B_FALSE; 10427812SMichael.Speer@Sun.COM 10436495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 10446495Sspeer nxge->shares[i].nxgep = nxge; 10456495Sspeer nxge->shares[i].index = 0; 10468275SEric Cheng nxge->shares[i].vrp = NULL; 10476495Sspeer nxge->shares[i].tmap = 0; 10486495Sspeer nxge->shares[i].rmap = 0; 10496495Sspeer nxge->shares[i].rxgroup = 0; 10506495Sspeer nxge->shares[i].active = B_FALSE; 10516495Sspeer } 10526495Sspeer 10536495Sspeer /* Fill in the HV HIO function pointers. */ 10546495Sspeer nxge_hio_hv_init(nxge); 10556495Sspeer 10566495Sspeer if (isLDOMservice(nxge)) { 10576495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 10586495Sspeer "Hybrid IO-capable service domain")); 10596495Sspeer return (NXGE_OK); 10606495Sspeer } else { 10616495Sspeer /* 10626495Sspeer * isLDOMguest(nxge) == B_TRUE 10636495Sspeer */ 10646495Sspeer nx_vio_fp_t *vio; 10656495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 10666495Sspeer 10676495Sspeer vio = &nhd->hio.vio; 10686495Sspeer vio->__register = (vio_net_resource_reg_t) 10696495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 10706495Sspeer vio->unregister = (vio_net_resource_unreg_t) 10716495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 10726495Sspeer 10736495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 10746495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 10756495Sspeer return (NXGE_ERROR); 10766495Sspeer } 10776495Sspeer } 10786495Sspeer 10796495Sspeer return (0); 10806495Sspeer } 10818275SEric Cheng #endif /* defined(sun4v) */ 10828275SEric Cheng 10838275SEric Cheng static int 10848275SEric Cheng nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g, 10858275SEric Cheng const uint8_t *macaddr) 10868275SEric Cheng { 10878275SEric Cheng int rv; 10888275SEric Cheng nxge_rdc_grp_t *group; 10898275SEric Cheng 10908275SEric Cheng mutex_enter(nxge->genlock); 10918275SEric Cheng 10928275SEric Cheng /* 10938275SEric Cheng * Initialize the NXGE RDC table data structure. 10948275SEric Cheng */ 10958275SEric Cheng group = &nxge->pt_config.rdc_grps[g->rdctbl]; 10968275SEric Cheng if (!group->flag) { 10978275SEric Cheng group->port = NXGE_GET_PORT_NUM(nxge->function_num); 10988275SEric Cheng group->config_method = RDC_TABLE_ENTRY_METHOD_REP; 10998275SEric Cheng group->flag = B_TRUE; /* This group has been configured. */ 11008275SEric Cheng } 11018275SEric Cheng 11028275SEric Cheng mutex_exit(nxge->genlock); 11038275SEric Cheng 11048275SEric Cheng /* 11058275SEric Cheng * Add the MAC address. 11068275SEric Cheng */ 11078275SEric Cheng if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr, 11088275SEric Cheng g->rdctbl, B_TRUE)) != 0) { 11098275SEric Cheng return (rv); 11108275SEric Cheng } 11118275SEric Cheng 11128275SEric Cheng mutex_enter(nxge->genlock); 11138275SEric Cheng g->n_mac_addrs++; 11148275SEric Cheng mutex_exit(nxge->genlock); 11158275SEric Cheng return (0); 11168275SEric Cheng } 11176495Sspeer 11186495Sspeer static int 11198597SMichael.Speer@Sun.COM nxge_hio_set_unicst(void *arg, const uint8_t *macaddr) 11208597SMichael.Speer@Sun.COM { 11218597SMichael.Speer@Sun.COM p_nxge_t nxgep = (p_nxge_t)arg; 11228597SMichael.Speer@Sun.COM struct ether_addr addrp; 11238597SMichael.Speer@Sun.COM 11248597SMichael.Speer@Sun.COM bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 11258597SMichael.Speer@Sun.COM if (nxge_set_mac_addr(nxgep, &addrp)) { 11268597SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 11278597SMichael.Speer@Sun.COM "<== nxge_m_unicst: set unitcast failed")); 11288597SMichael.Speer@Sun.COM return (EINVAL); 11298597SMichael.Speer@Sun.COM } 11308597SMichael.Speer@Sun.COM 11318597SMichael.Speer@Sun.COM nxgep->primary = B_TRUE; 11328597SMichael.Speer@Sun.COM 11338597SMichael.Speer@Sun.COM return (0); 11348597SMichael.Speer@Sun.COM } 11358597SMichael.Speer@Sun.COM 11368597SMichael.Speer@Sun.COM /*ARGSUSED*/ 11378597SMichael.Speer@Sun.COM static int 11388597SMichael.Speer@Sun.COM nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr) 11398597SMichael.Speer@Sun.COM { 11408597SMichael.Speer@Sun.COM nxgep->primary = B_FALSE; 11418597SMichael.Speer@Sun.COM return (0); 11428597SMichael.Speer@Sun.COM } 11438597SMichael.Speer@Sun.COM 11448597SMichael.Speer@Sun.COM static int 11456495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 11466495Sspeer { 11478275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 11488275SEric Cheng p_nxge_t nxge = group->nxgep; 11498275SEric Cheng int rv; 11506495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 11516495Sspeer 11528275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 11538275SEric Cheng 11548275SEric Cheng mutex_enter(nxge->genlock); 11556495Sspeer 11568597SMichael.Speer@Sun.COM if (!nxge->primary && group->port_default_grp) { 11578597SMichael.Speer@Sun.COM rv = nxge_hio_set_unicst((void *)nxge, mac_addr); 11588597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 11598597SMichael.Speer@Sun.COM return (rv); 11608597SMichael.Speer@Sun.COM } 11618597SMichael.Speer@Sun.COM 11626495Sspeer /* 11638275SEric Cheng * If the group is associated with a VR, then only one 11648275SEric Cheng * address may be assigned to the group. 11656495Sspeer */ 11668275SEric Cheng vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp; 11678275SEric Cheng if ((vr != NULL) && (group->n_mac_addrs)) { 11688275SEric Cheng mutex_exit(nxge->genlock); 11698275SEric Cheng return (ENOSPC); 11708275SEric Cheng } 11718275SEric Cheng 11728275SEric Cheng mutex_exit(nxge->genlock); 11738275SEric Cheng 11748275SEric Cheng /* 11758275SEric Cheng * Program the mac address for the group. 11768275SEric Cheng */ 11778275SEric Cheng if ((rv = nxge_hio_group_mac_add(nxge, group, 11788275SEric Cheng mac_addr)) != 0) { 11796495Sspeer return (rv); 11806495Sspeer } 11816495Sspeer 11826495Sspeer return (0); 11836495Sspeer } 11846495Sspeer 11858275SEric Cheng static int 11868275SEric Cheng find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr) 11878275SEric Cheng { 11888275SEric Cheng int i; 11898275SEric Cheng for (i = 0; i <= mmac_info->num_mmac; i++) { 11908275SEric Cheng if (memcmp(mmac_info->mac_pool[i].addr, mac_addr, 11918275SEric Cheng ETHERADDRL) == 0) { 11928275SEric Cheng return (i); 11938275SEric Cheng } 11948275SEric Cheng } 11958275SEric Cheng return (-1); 11968275SEric Cheng } 11978275SEric Cheng 11986495Sspeer /* ARGSUSED */ 11996495Sspeer static int 12006495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 12016495Sspeer { 12028275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 12038597SMichael.Speer@Sun.COM struct ether_addr addrp; 12048275SEric Cheng p_nxge_t nxge = group->nxgep; 12058275SEric Cheng nxge_mmac_t *mmac_info; 12068275SEric Cheng int rv, slot; 12078275SEric Cheng 12088275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 12098275SEric Cheng 12108275SEric Cheng mutex_enter(nxge->genlock); 12116495Sspeer 12128275SEric Cheng mmac_info = &nxge->nxge_mmac_info; 12138275SEric Cheng slot = find_mac_slot(mmac_info, mac_addr); 12148275SEric Cheng if (slot < 0) { 12158597SMichael.Speer@Sun.COM if (group->port_default_grp && nxge->primary) { 12168597SMichael.Speer@Sun.COM bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL); 12178597SMichael.Speer@Sun.COM if (ether_cmp(&addrp, &nxge->ouraddr) == 0) { 12188597SMichael.Speer@Sun.COM rv = nxge_hio_clear_unicst(nxge, mac_addr); 12198597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 12208597SMichael.Speer@Sun.COM return (rv); 12218597SMichael.Speer@Sun.COM } else { 12228597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 12238597SMichael.Speer@Sun.COM return (EINVAL); 12248597SMichael.Speer@Sun.COM } 12258597SMichael.Speer@Sun.COM } else { 12268597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 12278597SMichael.Speer@Sun.COM return (EINVAL); 12288597SMichael.Speer@Sun.COM } 12298275SEric Cheng } 12308275SEric Cheng 12318275SEric Cheng mutex_exit(nxge->genlock); 12326495Sspeer 12336495Sspeer /* 12348275SEric Cheng * Remove the mac address for the group 12356495Sspeer */ 12368275SEric Cheng if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) { 12378275SEric Cheng return (rv); 12388275SEric Cheng } 12398275SEric Cheng 12408275SEric Cheng mutex_enter(nxge->genlock); 12418275SEric Cheng group->n_mac_addrs--; 12428275SEric Cheng mutex_exit(nxge->genlock); 12436495Sspeer 12446495Sspeer return (0); 12456495Sspeer } 12466495Sspeer 12478275SEric Cheng static int 12488275SEric Cheng nxge_hio_group_start(mac_group_driver_t gdriver) 12498275SEric Cheng { 12508275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 1251*9047SMichael.Speer@Sun.COM nxge_rdc_grp_t *rdc_grp_p; 12528275SEric Cheng int rdctbl; 12538275SEric Cheng int dev_gindex; 12548275SEric Cheng 12558275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 12568275SEric Cheng 12578275SEric Cheng #ifdef later 12588275SEric Cheng ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED); 12598275SEric Cheng #endif 12608275SEric Cheng if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED) 12618275SEric Cheng return (ENXIO); 12628275SEric Cheng 12638275SEric Cheng mutex_enter(group->nxgep->genlock); 12648275SEric Cheng dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 12658275SEric Cheng group->gindex; 1266*9047SMichael.Speer@Sun.COM rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex]; 12678275SEric Cheng 12688275SEric Cheng /* 12698275SEric Cheng * Get an rdc table for this group. 12708275SEric Cheng * Group ID is given by the caller, and that's the group it needs 12718275SEric Cheng * to bind to. The default group is already bound when the driver 12728275SEric Cheng * was attached. 12738275SEric Cheng * 12748275SEric Cheng * For Group 0, it's RDC table was allocated at attach time 12758275SEric Cheng * no need to allocate a new table. 12768275SEric Cheng */ 12778275SEric Cheng if (group->gindex != 0) { 12788275SEric Cheng rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep, 12798275SEric Cheng dev_gindex, B_TRUE); 12808275SEric Cheng if (rdctbl < 0) { 12818275SEric Cheng mutex_exit(group->nxgep->genlock); 12828275SEric Cheng return (rdctbl); 12838275SEric Cheng } 12848275SEric Cheng } else { 12858275SEric Cheng rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid; 12868275SEric Cheng } 12878275SEric Cheng 12888275SEric Cheng group->rdctbl = rdctbl; 12898275SEric Cheng 1290*9047SMichael.Speer@Sun.COM (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl); 12918275SEric Cheng 12928275SEric Cheng group->started = B_TRUE; 12938275SEric Cheng mutex_exit(group->nxgep->genlock); 12948275SEric Cheng 12958275SEric Cheng return (0); 12968275SEric Cheng } 12978275SEric Cheng 12988275SEric Cheng static void 12998275SEric Cheng nxge_hio_group_stop(mac_group_driver_t gdriver) 13008275SEric Cheng { 13018275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 13028275SEric Cheng 13038275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 13048275SEric Cheng 13058275SEric Cheng mutex_enter(group->nxgep->genlock); 13068275SEric Cheng group->started = B_FALSE; 13078275SEric Cheng 13088275SEric Cheng /* 13098275SEric Cheng * Unbind the RDC table previously bound for this group. 13108275SEric Cheng * 13118275SEric Cheng * Since RDC table for group 0 was allocated at attach 13128275SEric Cheng * time, no need to unbind the table here. 13138275SEric Cheng */ 13148275SEric Cheng if (group->gindex != 0) 13158275SEric Cheng (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl); 13168275SEric Cheng 13178275SEric Cheng mutex_exit(group->nxgep->genlock); 13188275SEric Cheng } 13198275SEric Cheng 13206495Sspeer /* ARGSUSED */ 13216495Sspeer void 13228275SEric Cheng nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid, 13236495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 13246495Sspeer { 13258275SEric Cheng p_nxge_t nxgep = (p_nxge_t)arg; 13268275SEric Cheng nxge_ring_group_t *group; 13278275SEric Cheng int dev_gindex; 13286495Sspeer 13296495Sspeer switch (type) { 13306495Sspeer case MAC_RING_TYPE_RX: 13318275SEric Cheng group = &nxgep->rx_hio_groups[groupid]; 13328275SEric Cheng group->nxgep = nxgep; 13338275SEric Cheng group->ghandle = ghdl; 13348275SEric Cheng group->gindex = groupid; 13358275SEric Cheng group->sindex = 0; /* not yet bound to a share */ 13366495Sspeer 13378275SEric Cheng dev_gindex = nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 13388275SEric Cheng groupid; 13398275SEric Cheng 13408597SMichael.Speer@Sun.COM if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid == 13418597SMichael.Speer@Sun.COM dev_gindex) 13428597SMichael.Speer@Sun.COM group->port_default_grp = B_TRUE; 13438597SMichael.Speer@Sun.COM 13448275SEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 13458275SEric Cheng infop->mgi_start = nxge_hio_group_start; 13468275SEric Cheng infop->mgi_stop = nxge_hio_group_stop; 13478275SEric Cheng infop->mgi_addmac = nxge_hio_add_mac; 13488275SEric Cheng infop->mgi_remmac = nxge_hio_rem_mac; 13498275SEric Cheng infop->mgi_count = 13508275SEric Cheng nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs; 13516495Sspeer break; 13526495Sspeer 13536495Sspeer case MAC_RING_TYPE_TX: 13548275SEric Cheng /* 13558275SEric Cheng * 'groupid' for TX should be incremented by one since 13568275SEric Cheng * the default group (groupid 0) is not known by the MAC layer 13578275SEric Cheng */ 13588275SEric Cheng group = &nxgep->tx_hio_groups[groupid + 1]; 13598275SEric Cheng group->nxgep = nxgep; 13608275SEric Cheng group->ghandle = ghdl; 13618275SEric Cheng group->gindex = groupid + 1; 13628275SEric Cheng group->sindex = 0; /* not yet bound to a share */ 13638275SEric Cheng 13648275SEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 13658275SEric Cheng infop->mgi_start = NULL; 13668275SEric Cheng infop->mgi_stop = NULL; 13678275SEric Cheng infop->mgi_addmac = NULL; /* not needed */ 13688275SEric Cheng infop->mgi_remmac = NULL; /* not needed */ 13698275SEric Cheng /* no rings associated with group initially */ 13708275SEric Cheng infop->mgi_count = 0; 13716495Sspeer break; 13726495Sspeer } 13736495Sspeer } 13746495Sspeer 13758275SEric Cheng #if defined(sun4v) 13768275SEric Cheng 13776495Sspeer int 13786495Sspeer nxge_hio_share_assign( 13796495Sspeer nxge_t *nxge, 13806495Sspeer uint64_t cookie, 13816495Sspeer res_map_t *tmap, 13826495Sspeer res_map_t *rmap, 13836495Sspeer nxge_hio_vr_t *vr) 13846495Sspeer { 13856495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13866495Sspeer uint64_t slot, hv_rv; 13876495Sspeer nxge_hio_dc_t *dc; 13886495Sspeer nxhv_vr_fp_t *fp; 13896495Sspeer int i; 13906495Sspeer 13916495Sspeer /* 13926495Sspeer * Ask the Hypervisor to set up the VR for us 13936495Sspeer */ 13946495Sspeer fp = &nhd->hio.vr; 13956495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 13966495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 13977950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 13986713Sspeer "vr->assign() returned %d", hv_rv)); 13996495Sspeer return (-EIO); 14006495Sspeer } 14016495Sspeer 14026495Sspeer /* 14036495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 14046495Sspeer * ----------------------------------------------------- 14056495Sspeer */ 14066495Sspeer dc = vr->tx_group.dc; 14076495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 14086495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 14096495Sspeer while (dc) { 14106495Sspeer hv_rv = (*tx->assign) 14116495Sspeer (vr->cookie, dc->channel, &slot); 14126495Sspeer if (hv_rv != 0) { 14136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14147950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 14156495Sspeer "tx->assign(%x, %d) failed: %ld", 14166495Sspeer vr->cookie, dc->channel, hv_rv)); 14176495Sspeer return (-EIO); 14186495Sspeer } 14196495Sspeer 14206495Sspeer dc->cookie = vr->cookie; 14216495Sspeer dc->page = (vp_channel_t)slot; 14226495Sspeer 14236495Sspeer /* Inform the caller about the slot chosen. */ 14246495Sspeer (*tmap) |= 1 << slot; 14256495Sspeer 14266495Sspeer dc = dc->next; 14276495Sspeer } 14286495Sspeer } 14296495Sspeer 14306495Sspeer /* 14316495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 14326495Sspeer * ----------------------------------------------------- 14336495Sspeer */ 14346495Sspeer dc = vr->rx_group.dc; 14356495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 14366495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 14376495Sspeer while (dc) { 14386495Sspeer hv_rv = (*rx->assign) 14396495Sspeer (vr->cookie, dc->channel, &slot); 14406495Sspeer if (hv_rv != 0) { 14416495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14427950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 14436495Sspeer "rx->assign(%x, %d) failed: %ld", 14446495Sspeer vr->cookie, dc->channel, hv_rv)); 14456495Sspeer return (-EIO); 14466495Sspeer } 14476495Sspeer 14486495Sspeer dc->cookie = vr->cookie; 14496495Sspeer dc->page = (vp_channel_t)slot; 14506495Sspeer 14516495Sspeer /* Inform the caller about the slot chosen. */ 14526495Sspeer (*rmap) |= 1 << slot; 14536495Sspeer 14546495Sspeer dc = dc->next; 14556495Sspeer } 14566495Sspeer } 14576495Sspeer 14586495Sspeer return (0); 14596495Sspeer } 14606495Sspeer 14618275SEric Cheng void 14626495Sspeer nxge_hio_share_unassign( 14636495Sspeer nxge_hio_vr_t *vr) 14646495Sspeer { 14656495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14666495Sspeer nxge_hio_data_t *nhd; 14676495Sspeer nxge_hio_dc_t *dc; 14686495Sspeer nxhv_vr_fp_t *fp; 14696495Sspeer uint64_t hv_rv; 14706495Sspeer 14716495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14726495Sspeer 14736495Sspeer dc = vr->tx_group.dc; 14746495Sspeer while (dc) { 14756495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 14766495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 14776495Sspeer if (hv_rv != 0) { 14786495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14797950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14806495Sspeer "tx->unassign(%x, %d) failed: %ld", 14816495Sspeer vr->cookie, dc->page, hv_rv)); 14826495Sspeer } 14836495Sspeer dc = dc->next; 14846495Sspeer } 14856495Sspeer 14866495Sspeer dc = vr->rx_group.dc; 14876495Sspeer while (dc) { 14886495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 14896495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 14906495Sspeer if (hv_rv != 0) { 14916495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14927950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14936495Sspeer "rx->unassign(%x, %d) failed: %ld", 14946495Sspeer vr->cookie, dc->page, hv_rv)); 14956495Sspeer } 14966495Sspeer dc = dc->next; 14976495Sspeer } 14986495Sspeer 14996495Sspeer fp = &nhd->hio.vr; 15006495Sspeer if (fp->unassign) { 15016495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 15026495Sspeer if (hv_rv != 0) { 15037950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 15047950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 15056495Sspeer "vr->assign(%x) failed: %ld", 15066495Sspeer vr->cookie, hv_rv)); 15076495Sspeer } 15086495Sspeer } 15096495Sspeer } 15106495Sspeer 15116495Sspeer int 15128275SEric Cheng nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle) 15136495Sspeer { 15148275SEric Cheng p_nxge_t nxge = (p_nxge_t)arg; 15158275SEric Cheng nxge_share_handle_t *shp; 15168275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 15178275SEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 15186495Sspeer 15196495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 15206495Sspeer 15216495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 15226495Sspeer nhd->hio.rx.assign == 0) { 15236495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 15246495Sspeer return (EIO); 15256495Sspeer } 15266495Sspeer 15276495Sspeer /* 15286495Sspeer * Get a VR. 15296495Sspeer */ 15307755SMisaki.Kataoka@Sun.COM if ((vr = nxge_hio_vr_share(nxge)) == 0) 15316495Sspeer return (EAGAIN); 15326495Sspeer 15336495Sspeer shp = &nxge->shares[vr->region]; 15348275SEric Cheng shp->nxgep = nxge; 15356495Sspeer shp->index = vr->region; 15366495Sspeer shp->vrp = (void *)vr; 15378275SEric Cheng shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */ 15388275SEric Cheng shp->rxgroup = 0; /* to be assigned by ms_sadd */ 15398275SEric Cheng shp->active = B_FALSE; /* not bound yet */ 15406495Sspeer 15416495Sspeer *shandle = (mac_share_handle_t)shp; 15426495Sspeer 15436495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 15446495Sspeer return (0); 15456495Sspeer } 15466495Sspeer 15478275SEric Cheng 15486495Sspeer void 15496495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 15506495Sspeer { 15518275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 15528275SEric Cheng nxge_hio_vr_t *vr; 15538275SEric Cheng 15548275SEric Cheng /* 15558275SEric Cheng * Clear internal handle state. 15568275SEric Cheng */ 15578275SEric Cheng vr = shp->vrp; 15588275SEric Cheng shp->vrp = (void *)NULL; 15598275SEric Cheng shp->index = 0; 15608275SEric Cheng shp->tmap = 0; 15618275SEric Cheng shp->rmap = 0; 15628275SEric Cheng shp->rxgroup = 0; 15638275SEric Cheng shp->active = B_FALSE; 15648275SEric Cheng 15658275SEric Cheng /* 15668275SEric Cheng * Free VR resource. 15678275SEric Cheng */ 15688275SEric Cheng nxge_hio_unshare(vr); 15698275SEric Cheng } 15708275SEric Cheng 15718275SEric Cheng 15728275SEric Cheng void 15738275SEric Cheng nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 15748275SEric Cheng mac_ring_handle_t *rings, uint_t *n_rings) 15758275SEric Cheng { 15768275SEric Cheng nxge_t *nxge; 15778275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 15788275SEric Cheng nxge_ring_handle_t *rh; 15798275SEric Cheng uint32_t offset; 15808275SEric Cheng 15818275SEric Cheng nxge = shp->nxgep; 15828275SEric Cheng 15838275SEric Cheng switch (type) { 15848275SEric Cheng case MAC_RING_TYPE_RX: 15858275SEric Cheng rh = nxge->rx_ring_handles; 15868275SEric Cheng offset = nxge->pt_config.hw_config.start_rdc; 15878275SEric Cheng break; 15888275SEric Cheng 15898275SEric Cheng case MAC_RING_TYPE_TX: 15908275SEric Cheng rh = nxge->tx_ring_handles; 15918275SEric Cheng offset = nxge->pt_config.hw_config.tdc.start; 15928275SEric Cheng break; 15938275SEric Cheng } 15948275SEric Cheng 15958275SEric Cheng /* 15968275SEric Cheng * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that, 15978275SEric Cheng * but the HV has statically assigned the channels like so: 15988275SEric Cheng * VR0: RDC0 & RDC1 15998275SEric Cheng * VR1: RDC2 & RDC3, etc. 16008275SEric Cheng * The TDCs are assigned in exactly the same way. 16018275SEric Cheng */ 16028275SEric Cheng if (rings != NULL) { 16038275SEric Cheng rings[0] = rh[(shp->index * 2) - offset].ring_handle; 16048275SEric Cheng rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle; 16058275SEric Cheng } 16068275SEric Cheng if (n_rings != NULL) { 16078275SEric Cheng *n_rings = 2; 16088275SEric Cheng } 16098275SEric Cheng } 16108275SEric Cheng 16118275SEric Cheng int 16128275SEric Cheng nxge_hio_share_add_group(mac_share_handle_t shandle, 16138275SEric Cheng mac_group_driver_t ghandle) 16148275SEric Cheng { 16158275SEric Cheng nxge_t *nxge; 16168275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 16178275SEric Cheng nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle; 16188275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 16198275SEric Cheng nxge_grp_t *group; 16208275SEric Cheng int i; 16218275SEric Cheng 16228275SEric Cheng if (rg->sindex != 0) { 16238275SEric Cheng /* the group is already bound to a share */ 16248275SEric Cheng return (EALREADY); 16258275SEric Cheng } 16268275SEric Cheng 16278400SNicolas.Droux@Sun.COM /* 16288400SNicolas.Droux@Sun.COM * If we are adding a group 0 to a share, this 16298400SNicolas.Droux@Sun.COM * is not correct. 16308400SNicolas.Droux@Sun.COM */ 16318400SNicolas.Droux@Sun.COM ASSERT(rg->gindex != 0); 16328400SNicolas.Droux@Sun.COM 16338275SEric Cheng nxge = rg->nxgep; 16348275SEric Cheng vr = shp->vrp; 16358275SEric Cheng 16368275SEric Cheng switch (rg->type) { 16378275SEric Cheng case MAC_RING_TYPE_RX: 16388275SEric Cheng /* 16398275SEric Cheng * Make sure that the group has the right rings associated 16408275SEric Cheng * for the share. In version 1.0, we may only give a VR 16418275SEric Cheng * 2 RDCs. Not only that, but the HV has statically 16428275SEric Cheng * assigned the channels like so: 16438275SEric Cheng * VR0: RDC0 & RDC1 16448275SEric Cheng * VR1: RDC2 & RDC3, etc. 16458275SEric Cheng */ 16468275SEric Cheng group = nxge->rx_set.group[rg->gindex]; 16478275SEric Cheng 16488275SEric Cheng if (group->count > 2) { 16498275SEric Cheng /* a share can have at most 2 rings */ 16508275SEric Cheng return (EINVAL); 16518275SEric Cheng } 16528275SEric Cheng 16538275SEric Cheng for (i = 0; i < NXGE_MAX_RDCS; i++) { 16548275SEric Cheng if (group->map & (1 << i)) { 16558275SEric Cheng if ((i != shp->index * 2) && 16568275SEric Cheng (i != (shp->index * 2 + 1))) { 16578275SEric Cheng /* 16588275SEric Cheng * A group with invalid rings was 16598275SEric Cheng * attempted to bind to this share 16608275SEric Cheng */ 16618275SEric Cheng return (EINVAL); 16628275SEric Cheng } 16638275SEric Cheng } 16648275SEric Cheng } 16658275SEric Cheng 16668275SEric Cheng rg->sindex = vr->region; 16678275SEric Cheng vr->rdc_tbl = rg->rdctbl; 16688275SEric Cheng shp->rxgroup = vr->rdc_tbl; 16698275SEric Cheng break; 16708275SEric Cheng 16718275SEric Cheng case MAC_RING_TYPE_TX: 16728275SEric Cheng /* 16738275SEric Cheng * Make sure that the group has the right rings associated 16748275SEric Cheng * for the share. In version 1.0, we may only give a VR 16758275SEric Cheng * 2 TDCs. Not only that, but the HV has statically 16768275SEric Cheng * assigned the channels like so: 16778275SEric Cheng * VR0: TDC0 & TDC1 16788275SEric Cheng * VR1: TDC2 & TDC3, etc. 16798275SEric Cheng */ 16808275SEric Cheng group = nxge->tx_set.group[rg->gindex]; 16818275SEric Cheng 16828275SEric Cheng if (group->count > 2) { 16838275SEric Cheng /* a share can have at most 2 rings */ 16848275SEric Cheng return (EINVAL); 16858275SEric Cheng } 16868275SEric Cheng 16878275SEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) { 16888275SEric Cheng if (group->map & (1 << i)) { 16898275SEric Cheng if ((i != shp->index * 2) && 16908275SEric Cheng (i != (shp->index * 2 + 1))) { 16918275SEric Cheng /* 16928275SEric Cheng * A group with invalid rings was 16938275SEric Cheng * attempted to bind to this share 16948275SEric Cheng */ 16958275SEric Cheng return (EINVAL); 16968275SEric Cheng } 16978275SEric Cheng } 16988275SEric Cheng } 16998275SEric Cheng 17008275SEric Cheng vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid + 17018275SEric Cheng rg->gindex; 17028275SEric Cheng rg->sindex = vr->region; 17038275SEric Cheng break; 17048275SEric Cheng } 17058275SEric Cheng return (0); 17068275SEric Cheng } 17078275SEric Cheng 17088275SEric Cheng int 17098275SEric Cheng nxge_hio_share_rem_group(mac_share_handle_t shandle, 17108275SEric Cheng mac_group_driver_t ghandle) 17118275SEric Cheng { 17128275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17138275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle; 17148275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 17158275SEric Cheng int rv = 0; 17168275SEric Cheng 17178275SEric Cheng vr = shp->vrp; 17188275SEric Cheng 17198275SEric Cheng switch (group->type) { 17208275SEric Cheng case MAC_RING_TYPE_RX: 17218275SEric Cheng group->sindex = 0; 17228275SEric Cheng vr->rdc_tbl = 0; 17238275SEric Cheng shp->rxgroup = 0; 17248275SEric Cheng break; 17258275SEric Cheng 17268275SEric Cheng case MAC_RING_TYPE_TX: 17278275SEric Cheng group->sindex = 0; 17288275SEric Cheng vr->tdc_tbl = 0; 17298275SEric Cheng break; 17308275SEric Cheng } 17318275SEric Cheng 17328275SEric Cheng return (rv); 17338275SEric Cheng } 17348275SEric Cheng 17358275SEric Cheng int 17368275SEric Cheng nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie, 17378275SEric Cheng uint64_t *rcookie) 17388275SEric Cheng { 17398275SEric Cheng nxge_t *nxge; 17408275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17418275SEric Cheng nxge_hio_vr_t *vr; 17428275SEric Cheng uint64_t rmap, tmap, hv_rmap, hv_tmap; 17438275SEric Cheng int rv; 17448275SEric Cheng 17458275SEric Cheng nxge = shp->nxgep; 17468275SEric Cheng vr = (nxge_hio_vr_t *)shp->vrp; 17478275SEric Cheng 17488275SEric Cheng /* 17498275SEric Cheng * Add resources to the share. 17508275SEric Cheng * For each DMA channel associated with the VR, bind its resources 17518275SEric Cheng * to the VR. 17528275SEric Cheng */ 17538275SEric Cheng tmap = 0; 17548275SEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap); 17558275SEric Cheng if (rv != 0) { 17568275SEric Cheng return (rv); 17578275SEric Cheng } 17588275SEric Cheng 17598275SEric Cheng rmap = 0; 17608275SEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap); 17618275SEric Cheng if (rv != 0) { 17628275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 17638275SEric Cheng return (rv); 17648275SEric Cheng } 17658275SEric Cheng 17668275SEric Cheng /* 17678275SEric Cheng * Ask the Hypervisor to set up the VR and allocate slots for 17688275SEric Cheng * each rings associated with the VR. 17698275SEric Cheng */ 17708275SEric Cheng hv_tmap = hv_rmap = 0; 17718275SEric Cheng if ((rv = nxge_hio_share_assign(nxge, cookie, 17728275SEric Cheng &hv_tmap, &hv_rmap, vr))) { 17738275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 17748275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap); 17758275SEric Cheng return (rv); 17768275SEric Cheng } 17778275SEric Cheng 17788275SEric Cheng shp->active = B_TRUE; 17798275SEric Cheng shp->tmap = hv_tmap; 17808275SEric Cheng shp->rmap = hv_rmap; 17818275SEric Cheng 17828275SEric Cheng /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 17838275SEric Cheng *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 17848275SEric Cheng 17858275SEric Cheng return (0); 17868275SEric Cheng } 17878275SEric Cheng 17888275SEric Cheng void 17898275SEric Cheng nxge_hio_share_unbind(mac_share_handle_t shandle) 17908275SEric Cheng { 17916495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17926495Sspeer 17936495Sspeer /* 17946495Sspeer * First, unassign the VR (take it back), 17956495Sspeer * so we can enable interrupts again. 17966495Sspeer */ 17978275SEric Cheng nxge_hio_share_unassign(shp->vrp); 17986495Sspeer 17996495Sspeer /* 18006495Sspeer * Free Ring Resources for TX and RX 18016495Sspeer */ 18027755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 18037755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 18046495Sspeer } 18056495Sspeer 18066495Sspeer 18076495Sspeer /* 18086495Sspeer * nxge_hio_vr_share 18096495Sspeer * 18106495Sspeer * Find an unused Virtualization Region (VR). 18116495Sspeer * 18126495Sspeer * Arguments: 18136495Sspeer * nxge 18146495Sspeer * 18156495Sspeer * Notes: 18166495Sspeer * 18176495Sspeer * Context: 18186495Sspeer * Service domain 18196495Sspeer */ 18207755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t * 18216495Sspeer nxge_hio_vr_share( 18226495Sspeer nxge_t *nxge) 18236495Sspeer { 18246495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 18256495Sspeer nxge_hio_vr_t *vr; 18266495Sspeer 18276495Sspeer int first, limit, region; 18286495Sspeer 18296495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 18306495Sspeer 18316495Sspeer MUTEX_ENTER(&nhd->lock); 18326495Sspeer 18337755SMisaki.Kataoka@Sun.COM if (nhd->vrs == 0) { 18346495Sspeer MUTEX_EXIT(&nhd->lock); 18356495Sspeer return (0); 18366495Sspeer } 18376495Sspeer 18386495Sspeer /* Find an empty virtual region (VR). */ 18396495Sspeer if (nxge->function_num == 0) { 18406495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 18416495Sspeer first = FUNC0_VIR1; 18426495Sspeer limit = FUNC2_VIR0; 18436495Sspeer } else if (nxge->function_num == 1) { 18446495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 18456495Sspeer first = FUNC2_VIR1; 18466495Sspeer limit = FUNC_VIR_MAX; 18476495Sspeer } else { 18486495Sspeer cmn_err(CE_WARN, 18496495Sspeer "Shares not supported on function(%d) at this time.\n", 18506495Sspeer nxge->function_num); 18516495Sspeer } 18526495Sspeer 18536495Sspeer for (region = first; region < limit; region++) { 18546495Sspeer if (nhd->vr[region].nxge == 0) 18556495Sspeer break; 18566495Sspeer } 18576495Sspeer 18586495Sspeer if (region == limit) { 18596495Sspeer MUTEX_EXIT(&nhd->lock); 18606495Sspeer return (0); 18616495Sspeer } 18626495Sspeer 18636495Sspeer vr = &nhd->vr[region]; 18646495Sspeer vr->nxge = (uintptr_t)nxge; 18656495Sspeer vr->region = (uintptr_t)region; 18666495Sspeer 18677755SMisaki.Kataoka@Sun.COM nhd->vrs--; 18686495Sspeer 18696495Sspeer MUTEX_EXIT(&nhd->lock); 18706495Sspeer 18716495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 18726495Sspeer 18737755SMisaki.Kataoka@Sun.COM return (vr); 18746495Sspeer } 18756495Sspeer 18766495Sspeer void 18776495Sspeer nxge_hio_unshare( 18787755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr) 18796495Sspeer { 18806495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 18816495Sspeer nxge_hio_data_t *nhd; 18826495Sspeer 18836495Sspeer vr_region_t region; 18846495Sspeer 18856495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 18866495Sspeer 18876495Sspeer if (!nxge) { 18887950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: " 18896495Sspeer "vr->nxge is NULL")); 18906495Sspeer return; 18916495Sspeer } 18926495Sspeer 18936495Sspeer /* 18946495Sspeer * This function is no longer called, but I will keep it 18956495Sspeer * here in case we want to revisit this topic in the future. 18966495Sspeer * 18976495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 18986495Sspeer */ 18998275SEric Cheng 19008275SEric Cheng /* 19018275SEric Cheng * XXX: This is done by ms_sremove? 19028275SEric Cheng * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 19038275SEric Cheng */ 19046495Sspeer 19056495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19066495Sspeer 19076495Sspeer MUTEX_ENTER(&nhd->lock); 19086495Sspeer 19096495Sspeer region = vr->region; 19106495Sspeer (void) memset(vr, 0, sizeof (*vr)); 19116495Sspeer vr->region = region; 19126495Sspeer 19137755SMisaki.Kataoka@Sun.COM nhd->vrs++; 19146495Sspeer 19156495Sspeer MUTEX_EXIT(&nhd->lock); 19166495Sspeer 19176495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 19186495Sspeer } 19196495Sspeer 19206495Sspeer int 19216495Sspeer nxge_hio_addres( 19227755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 19236495Sspeer mac_ring_type_t type, 19248275SEric Cheng uint64_t *map) 19256495Sspeer { 19268275SEric Cheng nxge_t *nxge = (nxge_t *)vr->nxge; 19278275SEric Cheng nxge_grp_t *group; 19288275SEric Cheng int groupid; 19298275SEric Cheng int i; 19308275SEric Cheng int max_dcs; 19316495Sspeer 19326495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 19336495Sspeer 19346495Sspeer if (!nxge) 19356495Sspeer return (EINVAL); 19366495Sspeer 19378275SEric Cheng /* 19388275SEric Cheng * For each ring associated with the group, add the resources 19398275SEric Cheng * to the group and bind. 19408275SEric Cheng */ 19418275SEric Cheng max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS; 19428275SEric Cheng if (type == MAC_RING_TYPE_TX) { 19438275SEric Cheng /* set->group is an array of group indexed by a port group id */ 19448275SEric Cheng groupid = vr->tdc_tbl - 19458275SEric Cheng nxge->pt_config.hw_config.def_mac_txdma_grpid; 19468275SEric Cheng group = nxge->tx_set.group[groupid]; 19478275SEric Cheng } else { 19488275SEric Cheng /* set->group is an array of group indexed by a port group id */ 19498275SEric Cheng groupid = vr->rdc_tbl - 19508275SEric Cheng nxge->pt_config.hw_config.def_mac_rxdma_grpid; 19518275SEric Cheng group = nxge->rx_set.group[groupid]; 19528275SEric Cheng } 19538275SEric Cheng 19548275SEric Cheng if (group->map == 0) { 19558275SEric Cheng NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated " 19568275SEric Cheng "with this VR")); 19578275SEric Cheng return (EINVAL); 19588275SEric Cheng } 19598275SEric Cheng 19608275SEric Cheng for (i = 0; i < max_dcs; i++) { 19618275SEric Cheng if (group->map & (1 << i)) { 19628275SEric Cheng int rv; 19638275SEric Cheng 19648275SEric Cheng if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) { 19658275SEric Cheng if (*map == 0) /* Couldn't get even one DC. */ 19668275SEric Cheng return (-rv); 19678275SEric Cheng else 19688275SEric Cheng break; 19698275SEric Cheng } 19708275SEric Cheng *map |= (1 << i); 19716495Sspeer } 19726495Sspeer } 19736495Sspeer 19746495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 19756495Sspeer 19766495Sspeer return (0); 19776495Sspeer } 19786495Sspeer 19796495Sspeer /* ARGSUSED */ 19806495Sspeer void 19816495Sspeer nxge_hio_remres( 19827755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 19836495Sspeer mac_ring_type_t type, 19846495Sspeer res_map_t res_map) 19856495Sspeer { 19866495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 19876495Sspeer nxge_grp_t *group; 19886495Sspeer 19896495Sspeer if (!nxge) { 19907950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 19916495Sspeer "vr->nxge is NULL")); 19926495Sspeer return; 19936495Sspeer } 19946495Sspeer 19956495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 19966495Sspeer 19978275SEric Cheng /* 19988275SEric Cheng * For each ring bound to the group, remove the DMA resources 19998275SEric Cheng * from the group and unbind. 20008275SEric Cheng */ 20016495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 20026495Sspeer while (group->dc) { 20036495Sspeer nxge_hio_dc_t *dc = group->dc; 20046495Sspeer NXGE_DC_RESET(res_map, dc->page); 20056495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 20066495Sspeer } 20076495Sspeer 20086495Sspeer if (res_map) { 20096495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 20106495Sspeer "res_map %lx", res_map)); 20116495Sspeer } 20126495Sspeer 20136495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 20146495Sspeer } 20156495Sspeer 20166495Sspeer /* 20176495Sspeer * nxge_hio_tdc_share 20186495Sspeer * 20196495Sspeer * Share an unused TDC channel. 20206495Sspeer * 20216495Sspeer * Arguments: 20226495Sspeer * nxge 20236495Sspeer * 20246495Sspeer * Notes: 20256495Sspeer * 20266495Sspeer * A.7.3 Reconfigure Tx DMA channel 20276495Sspeer * Disable TxDMA A.9.6.10 20286495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 20296495Sspeer * 20306495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 20316495Sspeer * 20326495Sspeer * Soft Reset TxDMA A.9.6.2 20336495Sspeer * 20346495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 20356495Sspeer * guest domain: 20366495Sspeer * 20376495Sspeer * Re-initialize TxDMA A.9.6.8 20386495Sspeer * Reconfigure TxDMA 20396495Sspeer * Enable TxDMA A.9.6.9 20406495Sspeer * 20416495Sspeer * Context: 20426495Sspeer * Service domain 20436495Sspeer */ 20446495Sspeer int 20456495Sspeer nxge_hio_tdc_share( 20466495Sspeer nxge_t *nxge, 20476495Sspeer int channel) 20486495Sspeer { 20497812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 20506495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 20516495Sspeer tx_ring_t *ring; 20526713Sspeer int count; 20536495Sspeer 20546495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 20556495Sspeer 20566495Sspeer /* 20576495Sspeer * Wait until this channel is idle. 20586495Sspeer */ 20596495Sspeer ring = nxge->tx_rings->rings[channel]; 20606713Sspeer 20616713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 20626886Sspeer if (ring->tx_ring_busy) { 20636886Sspeer /* 20646886Sspeer * Wait for 30 seconds. 20656886Sspeer */ 20666886Sspeer for (count = 30 * 1000; count; count--) { 20676886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 20686886Sspeer break; 20696886Sspeer } 20706886Sspeer 20716886Sspeer drv_usecwait(1000); 20726495Sspeer } 20736713Sspeer 20746886Sspeer if (count == 0) { 20756886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 20766886Sspeer NXGE_TX_RING_ONLINE); 20777950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20787950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: " 20796886Sspeer "Tx ring %d was always BUSY", channel)); 20806886Sspeer return (-EIO); 20816886Sspeer } 20826886Sspeer } else { 20836713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 20846886Sspeer NXGE_TX_RING_OFFLINED); 20856495Sspeer } 20866495Sspeer 20877812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 20887812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_TRUE; 20897812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 20907812SMichael.Speer@Sun.COM 20916495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 20927950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: " 20936495Sspeer "Failed to remove interrupt for TxDMA channel %d", 20946495Sspeer channel)); 20958275SEric Cheng return (-EINVAL); 20966495Sspeer } 20976495Sspeer 20986495Sspeer /* Disable TxDMA A.9.6.10 */ 20996495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 21006495Sspeer 21016495Sspeer /* The SD is sharing this channel. */ 21026495Sspeer NXGE_DC_SET(set->shared.map, channel); 21036495Sspeer set->shared.count++; 21046495Sspeer 21056602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 21066602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 21076602Sspeer 21086495Sspeer /* 21096495Sspeer * Initialize the DC-specific FZC control registers. 21106495Sspeer * ----------------------------------------------------- 21116495Sspeer */ 21126495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 21136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 21147950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: FZC TDC failed: %d", channel)); 21156495Sspeer return (-EIO); 21166495Sspeer } 21176495Sspeer 21186495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 21196495Sspeer 21206495Sspeer return (0); 21216495Sspeer } 21226495Sspeer 21236495Sspeer /* 21246495Sspeer * nxge_hio_rdc_share 21256495Sspeer * 21266495Sspeer * Share an unused RDC channel. 21276495Sspeer * 21286495Sspeer * Arguments: 21296495Sspeer * nxge 21306495Sspeer * 21316495Sspeer * Notes: 21326495Sspeer * 21336495Sspeer * This is the latest version of the procedure to 21346495Sspeer * Reconfigure an Rx DMA channel: 21356495Sspeer * 21366495Sspeer * A.6.3 Reconfigure Rx DMA channel 21376495Sspeer * Stop RxMAC A.9.2.6 21386495Sspeer * Drain IPP Port A.9.3.6 21396495Sspeer * Stop and reset RxDMA A.9.5.3 21406495Sspeer * 21416495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 21426495Sspeer * guest domain: 21436495Sspeer * 21446495Sspeer * Initialize RxDMA A.9.5.4 21456495Sspeer * Reconfigure RxDMA 21466495Sspeer * Enable RxDMA A.9.5.5 21476495Sspeer * 21486495Sspeer * We will do this here, since the RDC is a canalis non grata: 21496495Sspeer * Enable RxMAC A.9.2.10 21506495Sspeer * 21516495Sspeer * Context: 21526495Sspeer * Service domain 21536495Sspeer */ 21546495Sspeer int 21556495Sspeer nxge_hio_rdc_share( 21566495Sspeer nxge_t *nxge, 21576495Sspeer nxge_hio_vr_t *vr, 21586495Sspeer int channel) 21596495Sspeer { 21606495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 21616495Sspeer nxge_rdc_grp_t *rdc_grp; 21626495Sspeer 21636495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 21646495Sspeer 21656495Sspeer /* Disable interrupts. */ 21666495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 21677950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21686495Sspeer "Failed to remove interrupt for RxDMA channel %d", 21696495Sspeer channel)); 21706495Sspeer return (NXGE_ERROR); 21716495Sspeer } 21726495Sspeer 21736495Sspeer /* Stop RxMAC = A.9.2.6 */ 21746495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 21756495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21766495Sspeer "Failed to disable RxMAC")); 21776495Sspeer } 21786495Sspeer 21796495Sspeer /* Drain IPP Port = A.9.3.6 */ 21806495Sspeer (void) nxge_ipp_drain(nxge); 21816495Sspeer 21826495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 21836495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 21846495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 21856495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21866495Sspeer "Failed to disable RxDMA channel %d", channel)); 21876495Sspeer } 21886495Sspeer 21896495Sspeer /* The SD is sharing this channel. */ 21906495Sspeer NXGE_DC_SET(set->shared.map, channel); 21916495Sspeer set->shared.count++; 21926495Sspeer 21936602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 21946602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 21956602Sspeer 21966495Sspeer /* 21976495Sspeer * The guest domain will reconfigure the RDC later. 21986495Sspeer * 21996495Sspeer * But in the meantime, we must re-enable the Rx MAC so 22006495Sspeer * that we can start receiving packets again on the 22016495Sspeer * remaining RDCs: 22026495Sspeer * 22036495Sspeer * Enable RxMAC = A.9.2.10 22046495Sspeer */ 22056495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 22066495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 22077950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: Rx MAC still disabled")); 22086495Sspeer } 22096495Sspeer 22106495Sspeer /* 22116495Sspeer * Initialize the DC-specific FZC control registers. 22126495Sspeer * ----------------------------------------------------- 22136495Sspeer */ 22146495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 22156495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 22167950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: RZC RDC failed: %ld", channel)); 22176495Sspeer return (-EIO); 22186495Sspeer } 22196495Sspeer 22206495Sspeer /* 2221*9047SMichael.Speer@Sun.COM * Update the RDC group. 22226495Sspeer */ 22236495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 22246495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 22256495Sspeer 22266495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 22276495Sspeer 22286495Sspeer return (0); 22296495Sspeer } 22306495Sspeer 22316495Sspeer /* 22326495Sspeer * nxge_hio_dc_share 22336495Sspeer * 22346495Sspeer * Share a DMA channel with a guest domain. 22356495Sspeer * 22366495Sspeer * Arguments: 22376495Sspeer * nxge 22386495Sspeer * vr The VR that <channel> will belong to. 22396495Sspeer * type Tx or Rx. 22408275SEric Cheng * channel Channel to share 22416495Sspeer * 22426495Sspeer * Notes: 22436495Sspeer * 22446495Sspeer * Context: 22456495Sspeer * Service domain 22466495Sspeer */ 22476495Sspeer int 22486495Sspeer nxge_hio_dc_share( 22496495Sspeer nxge_t *nxge, 22506495Sspeer nxge_hio_vr_t *vr, 22518275SEric Cheng mac_ring_type_t type, 22528275SEric Cheng int channel) 22536495Sspeer { 22546495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 22556495Sspeer nxge_hio_dc_t *dc; 22566495Sspeer nxge_grp_t *group; 22576495Sspeer int slot; 22586495Sspeer 22596495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 22606495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 22616495Sspeer 22626495Sspeer 22636495Sspeer /* -------------------------------------------------- */ 22646495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 22656495Sspeer nxge_hio_tdc_share(nxge, channel) : 22666495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 22676495Sspeer 22686495Sspeer if (slot < 0) { 22696495Sspeer if (type == MAC_RING_TYPE_RX) { 22708275SEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 22716495Sspeer } else { 22728275SEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 22736495Sspeer } 22746495Sspeer return (slot); 22756495Sspeer } 22766495Sspeer 22776495Sspeer MUTEX_ENTER(&nhd->lock); 22786495Sspeer 22796495Sspeer /* 22806495Sspeer * Tag this channel. 22816495Sspeer * -------------------------------------------------- 22826495Sspeer */ 22836495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 22846495Sspeer 22856495Sspeer dc->vr = vr; 22866495Sspeer dc->channel = (nxge_channel_t)channel; 22876495Sspeer 22886495Sspeer MUTEX_EXIT(&nhd->lock); 22896495Sspeer 22906495Sspeer /* 22916495Sspeer * vr->[t|r]x_group is used by the service domain to 22926495Sspeer * keep track of its shared DMA channels. 22936495Sspeer */ 22946495Sspeer MUTEX_ENTER(&nxge->group_lock); 22956495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 22966495Sspeer 22977755SMisaki.Kataoka@Sun.COM dc->group = group; 22986495Sspeer /* Initialize <group>, if necessary */ 22996495Sspeer if (group->count == 0) { 23006495Sspeer group->nxge = nxge; 23016495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 23026495Sspeer VP_BOUND_TX : VP_BOUND_RX; 23036495Sspeer group->sequence = nhd->sequence++; 23046495Sspeer group->active = B_TRUE; 23056495Sspeer } 23066495Sspeer 23076495Sspeer MUTEX_EXIT(&nxge->group_lock); 23086495Sspeer 23096495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 23106495Sspeer "DC share: %cDC %d was assigned to slot %d", 23116495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 23126495Sspeer 23136495Sspeer nxge_grp_dc_append(nxge, group, dc); 23146495Sspeer 23156495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 23166495Sspeer 23176495Sspeer return (0); 23186495Sspeer } 23196495Sspeer 23206495Sspeer /* 23216495Sspeer * nxge_hio_tdc_unshare 23226495Sspeer * 23236495Sspeer * Unshare a TDC. 23246495Sspeer * 23256495Sspeer * Arguments: 23266495Sspeer * nxge 23276495Sspeer * channel The channel to unshare (add again). 23286495Sspeer * 23296495Sspeer * Notes: 23306495Sspeer * 23316495Sspeer * Context: 23326495Sspeer * Service domain 23336495Sspeer */ 23346495Sspeer void 23356495Sspeer nxge_hio_tdc_unshare( 23366495Sspeer nxge_t *nxge, 23378275SEric Cheng int dev_grpid, 23386495Sspeer int channel) 23396495Sspeer { 23406495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 23418275SEric Cheng nxge_grp_t *group; 23428275SEric Cheng int grpid; 23436495Sspeer 23446495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 23456495Sspeer 23466495Sspeer NXGE_DC_RESET(set->shared.map, channel); 23476495Sspeer set->shared.count--; 23486495Sspeer 23498275SEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid; 23508275SEric Cheng group = set->group[grpid]; 23518275SEric Cheng 23527755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 23536495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 23546495Sspeer "Failed to initialize TxDMA channel %d", channel)); 23556495Sspeer return; 23566495Sspeer } 23576495Sspeer 23586495Sspeer /* Re-add this interrupt. */ 23596495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 23606495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 23616495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 23626495Sspeer } 23636495Sspeer 23646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 23656495Sspeer } 23666495Sspeer 23676495Sspeer /* 23686495Sspeer * nxge_hio_rdc_unshare 23696495Sspeer * 23706495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 23716495Sspeer * 23726495Sspeer * Arguments: 23736495Sspeer * nxge 23746495Sspeer * channel The channel to unshare (add again). 23756495Sspeer * 23766495Sspeer * Notes: 23776495Sspeer * 23786495Sspeer * Context: 23796495Sspeer * Service domain 23806495Sspeer */ 23816495Sspeer void 23826495Sspeer nxge_hio_rdc_unshare( 23836495Sspeer nxge_t *nxge, 23848275SEric Cheng int dev_grpid, 23856495Sspeer int channel) 23866495Sspeer { 23878275SEric Cheng nxge_grp_set_t *set = &nxge->rx_set; 23888275SEric Cheng nxge_grp_t *group; 23898275SEric Cheng int grpid; 23906495Sspeer 23916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 23926495Sspeer 23936495Sspeer /* Stop RxMAC = A.9.2.6 */ 23946495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 23956495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 23966495Sspeer "Failed to disable RxMAC")); 23976495Sspeer } 23986495Sspeer 23996495Sspeer /* Drain IPP Port = A.9.3.6 */ 24006495Sspeer (void) nxge_ipp_drain(nxge); 24016495Sspeer 24026495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 24036495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 24046495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 24056495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 24066495Sspeer "Failed to disable RxDMA channel %d", channel)); 24076495Sspeer } 24086495Sspeer 24096495Sspeer NXGE_DC_RESET(set->shared.map, channel); 24106495Sspeer set->shared.count--; 24116495Sspeer 24128275SEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid; 24138275SEric Cheng group = set->group[grpid]; 24148275SEric Cheng 24156495Sspeer /* 24166495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 24176495Sspeer * 24186495Sspeer * Initialize RxDMA A.9.5.4 24196495Sspeer * Reconfigure RxDMA 24206495Sspeer * Enable RxDMA A.9.5.5 24216495Sspeer */ 24227755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 24236495Sspeer /* Be sure to re-enable the RX MAC. */ 24246495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 24256495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24268275SEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 24276495Sspeer } 24286495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 24296495Sspeer "Failed to initialize RxDMA channel %d", channel)); 24306495Sspeer return; 24316495Sspeer } 24326495Sspeer 24336495Sspeer /* 24346495Sspeer * Enable RxMAC = A.9.2.10 24356495Sspeer */ 24366495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 24376495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24388275SEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 24396495Sspeer return; 24406495Sspeer } 24416495Sspeer 24426495Sspeer /* Re-add this interrupt. */ 24436495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 24446495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24457950SMichael.Speer@Sun.COM "nxge_hio_rdc_unshare: Failed to add interrupt for " 24466495Sspeer "RxDMA CHANNEL %d", channel)); 24476495Sspeer } 24486495Sspeer 24496495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 24506495Sspeer } 24516495Sspeer 24526495Sspeer /* 24536495Sspeer * nxge_hio_dc_unshare 24546495Sspeer * 24556495Sspeer * Unshare (reuse) a DMA channel. 24566495Sspeer * 24576495Sspeer * Arguments: 24586495Sspeer * nxge 24596495Sspeer * vr The VR that <channel> belongs to. 24606495Sspeer * type Tx or Rx. 24616495Sspeer * channel The DMA channel to reuse. 24626495Sspeer * 24636495Sspeer * Notes: 24646495Sspeer * 24656495Sspeer * Context: 24666495Sspeer * Service domain 24676495Sspeer */ 24686495Sspeer void 24696495Sspeer nxge_hio_dc_unshare( 24706495Sspeer nxge_t *nxge, 24716495Sspeer nxge_hio_vr_t *vr, 24726495Sspeer mac_ring_type_t type, 24736495Sspeer int channel) 24746495Sspeer { 24756495Sspeer nxge_grp_t *group; 24766495Sspeer nxge_hio_dc_t *dc; 24776495Sspeer 24786495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 24796495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 24806495Sspeer 24816495Sspeer /* Unlink the channel from its group. */ 24826495Sspeer /* -------------------------------------------------- */ 24836495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 24846602Sspeer NXGE_DC_RESET(group->map, channel); 24856495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 24866495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24877950SMichael.Speer@Sun.COM "nxge_hio_dc_unshare(%d) failed", channel)); 24886495Sspeer return; 24896495Sspeer } 24906495Sspeer 24916495Sspeer dc->vr = 0; 24926495Sspeer dc->cookie = 0; 24936495Sspeer 24946495Sspeer if (type == MAC_RING_TYPE_RX) { 24958275SEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 24966495Sspeer } else { 24978275SEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 24986495Sspeer } 24996495Sspeer 25006495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 25016495Sspeer } 25026495Sspeer 25038400SNicolas.Droux@Sun.COM 25048400SNicolas.Droux@Sun.COM /* 25058400SNicolas.Droux@Sun.COM * nxge_hio_rxdma_bind_intr(): 25068400SNicolas.Droux@Sun.COM * 25078400SNicolas.Droux@Sun.COM * For the guest domain driver, need to bind the interrupt group 25088400SNicolas.Droux@Sun.COM * and state to the rx_rcr_ring_t. 25098400SNicolas.Droux@Sun.COM */ 25108400SNicolas.Droux@Sun.COM 25118400SNicolas.Droux@Sun.COM int 25128400SNicolas.Droux@Sun.COM nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel) 25138400SNicolas.Droux@Sun.COM { 25148400SNicolas.Droux@Sun.COM nxge_hio_dc_t *dc; 25158400SNicolas.Droux@Sun.COM nxge_ldgv_t *control; 25168400SNicolas.Droux@Sun.COM nxge_ldg_t *group; 25178400SNicolas.Droux@Sun.COM nxge_ldv_t *device; 25188400SNicolas.Droux@Sun.COM 25198400SNicolas.Droux@Sun.COM /* 25208400SNicolas.Droux@Sun.COM * Find the DMA channel. 25218400SNicolas.Droux@Sun.COM */ 25228400SNicolas.Droux@Sun.COM if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) { 25238400SNicolas.Droux@Sun.COM return (NXGE_ERROR); 25248400SNicolas.Droux@Sun.COM } 25258400SNicolas.Droux@Sun.COM 25268400SNicolas.Droux@Sun.COM /* 25278400SNicolas.Droux@Sun.COM * Get the control structure. 25288400SNicolas.Droux@Sun.COM */ 25298400SNicolas.Droux@Sun.COM control = nxge->ldgvp; 25308400SNicolas.Droux@Sun.COM if (control == NULL) { 25318400SNicolas.Droux@Sun.COM return (NXGE_ERROR); 25328400SNicolas.Droux@Sun.COM } 25338400SNicolas.Droux@Sun.COM 25348400SNicolas.Droux@Sun.COM group = &control->ldgp[dc->ldg.vector]; 25358400SNicolas.Droux@Sun.COM device = &control->ldvp[dc->ldg.ldsv]; 25368400SNicolas.Droux@Sun.COM 25378400SNicolas.Droux@Sun.COM MUTEX_ENTER(&ring->lock); 25388400SNicolas.Droux@Sun.COM ring->ldgp = group; 25398400SNicolas.Droux@Sun.COM ring->ldvp = device; 25408400SNicolas.Droux@Sun.COM MUTEX_EXIT(&ring->lock); 25418400SNicolas.Droux@Sun.COM 25428400SNicolas.Droux@Sun.COM return (NXGE_OK); 25438400SNicolas.Droux@Sun.COM } 25446495Sspeer #endif /* if defined(sun4v) */ 2545