16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 236495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 378275SEric Cheng #include <sys/mac_provider.h> 386495Sspeer #include <sys/nxge/nxge_impl.h> 396495Sspeer #include <sys/nxge/nxge_fzc.h> 406495Sspeer #include <sys/nxge/nxge_rxdma.h> 416495Sspeer #include <sys/nxge/nxge_txdma.h> 426495Sspeer #include <sys/nxge/nxge_hio.h> 436495Sspeer 446495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 456495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 466495Sspeer 476495Sspeer /* 486495Sspeer * External prototypes 496495Sspeer */ 506495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 516495Sspeer 526495Sspeer /* The following function may be found in nxge_main.c */ 538275SEric Cheng extern int nxge_m_mmac_remove(void *arg, int slot); 548275SEric Cheng extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 558275SEric Cheng boolean_t usetbl); 566495Sspeer 576495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 586495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 596495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 606495Sspeer 616495Sspeer /* 626495Sspeer * Local prototypes 636495Sspeer */ 646495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 656495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 666495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 676495Sspeer 686495Sspeer /* 696495Sspeer * These functions are used by both service & guest domains to 706495Sspeer * decide whether they're running in an LDOMs/XEN environment 716495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 726495Sspeer */ 736495Sspeer 746495Sspeer /* 756495Sspeer * nxge_get_environs 766495Sspeer * 776495Sspeer * Figure out if we are in a guest domain or not. 786495Sspeer * 796495Sspeer * Arguments: 806495Sspeer * nxge 816495Sspeer * 826495Sspeer * Notes: 836495Sspeer * 846495Sspeer * Context: 856495Sspeer * Any domain 866495Sspeer */ 876495Sspeer void 886495Sspeer nxge_get_environs( 896495Sspeer nxge_t *nxge) 906495Sspeer { 916495Sspeer char *string; 926495Sspeer 936495Sspeer /* 946495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 956495Sspeer */ 966495Sspeer nxge->environs = SOLARIS_DOMAIN; 976495Sspeer 986495Sspeer /* 996495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 1006495Sspeer */ 1016495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 1026495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1036495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1046495Sspeer if (strcmp(string, "n2niu") == 0) { 1056495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1066495Sspeer /* So we can allocate properly-aligned memory. */ 1076495Sspeer nxge->niu_type = N2_NIU; 1086495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1096495Sspeer "Hybrid IO-capable guest domain")); 1106495Sspeer } 1116495Sspeer ddi_prop_free(string); 1126495Sspeer } 1136495Sspeer } 1146495Sspeer 1156495Sspeer #if !defined(sun4v) 1166495Sspeer 1176495Sspeer /* 1186495Sspeer * nxge_hio_init 1196495Sspeer * 1206495Sspeer * Initialize the HIO module of the NXGE driver. 1216495Sspeer * 1226495Sspeer * Arguments: 1236495Sspeer * nxge 1246495Sspeer * 1256495Sspeer * Notes: 1266495Sspeer * This is the non-hybrid I/O version of this function. 1276495Sspeer * 1286495Sspeer * Context: 1296495Sspeer * Any domain 1306495Sspeer */ 1316495Sspeer int 1327587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1336495Sspeer { 1346495Sspeer nxge_hio_data_t *nhd; 1358275SEric Cheng int i; 1366495Sspeer 1376495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1386495Sspeer if (nhd == 0) { 1396495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1406495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1416495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1426495Sspeer } 1436495Sspeer 1448275SEric Cheng /* 1458275SEric Cheng * Initialize share and ring group structures. 1468275SEric Cheng */ 1478275SEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) 1488275SEric Cheng nxge->tdc_is_shared[i] = B_FALSE; 1498275SEric Cheng 1508275SEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 1518275SEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 1528275SEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 1538275SEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 1548275SEric Cheng nxge->tx_hio_groups[i].gindex = 0; 1558275SEric Cheng nxge->tx_hio_groups[i].sindex = 0; 1568275SEric Cheng } 1578275SEric Cheng 1588275SEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 1598275SEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 1608275SEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 1618275SEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 1628275SEric Cheng nxge->rx_hio_groups[i].gindex = 0; 1638275SEric Cheng nxge->rx_hio_groups[i].sindex = 0; 1648275SEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 1658275SEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 1668275SEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 1678275SEric Cheng } 1688275SEric Cheng 1696495Sspeer nhd->hio.ldoms = B_FALSE; 1706495Sspeer 1716495Sspeer return (NXGE_OK); 1726495Sspeer } 1736495Sspeer 1746495Sspeer #endif 1756495Sspeer 1766495Sspeer void 1777587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1786495Sspeer { 1796495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1806495Sspeer 1816495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1826495Sspeer 1837587SMichael.Speer@Sun.COM if (nhd != NULL) { 1847587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 1857587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 1867587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 1877587SMichael.Speer@Sun.COM } 1886495Sspeer } 1896495Sspeer 1906495Sspeer /* 1916495Sspeer * nxge_dci_map 1926495Sspeer * 1936495Sspeer * Map a DMA channel index to a channel number. 1946495Sspeer * 1956495Sspeer * Arguments: 1966495Sspeer * instance The instance number of the driver. 1976495Sspeer * type The type of channel this is: Tx or Rx. 1986495Sspeer * index The index to convert to a channel number 1996495Sspeer * 2006495Sspeer * Notes: 2016495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 2026495Sspeer * 2036495Sspeer * Context: 2046495Sspeer * Any domain 2056495Sspeer */ 2066495Sspeer int 2076495Sspeer nxge_dci_map( 2086495Sspeer nxge_t *nxge, 2096495Sspeer vpc_type_t type, 2106495Sspeer int index) 2116495Sspeer { 2126495Sspeer nxge_grp_set_t *set; 2136495Sspeer int dc; 2146495Sspeer 2156495Sspeer switch (type) { 2166495Sspeer case VP_BOUND_TX: 2176495Sspeer set = &nxge->tx_set; 2186495Sspeer break; 2196495Sspeer case VP_BOUND_RX: 2206495Sspeer set = &nxge->rx_set; 2216495Sspeer break; 2226495Sspeer } 2236495Sspeer 2246495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 2256495Sspeer if ((1 << dc) & set->owned.map) { 2266495Sspeer if (index == 0) 2276495Sspeer return (dc); 2286495Sspeer else 2296495Sspeer index--; 2306495Sspeer } 2316495Sspeer } 2326495Sspeer 2336495Sspeer return (-1); 2346495Sspeer } 2356495Sspeer 2366495Sspeer /* 2376495Sspeer * --------------------------------------------------------------------- 2386495Sspeer * These are the general-purpose DMA channel group functions. That is, 2396495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2406495Sspeer * environment. 2416495Sspeer * 2426495Sspeer * But is also expected that in the future they will be able to manage 2436495Sspeer * Crossbow groups. 2446495Sspeer * --------------------------------------------------------------------- 2456495Sspeer */ 2466495Sspeer 2476495Sspeer /* 2487766SMichael.Speer@Sun.COM * nxge_grp_cleanup(p_nxge_t nxge) 2497766SMichael.Speer@Sun.COM * 2507766SMichael.Speer@Sun.COM * Remove all outstanding groups. 2517766SMichael.Speer@Sun.COM * 2527766SMichael.Speer@Sun.COM * Arguments: 2537766SMichael.Speer@Sun.COM * nxge 2547766SMichael.Speer@Sun.COM */ 2557766SMichael.Speer@Sun.COM void 2567766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge) 2577766SMichael.Speer@Sun.COM { 2587766SMichael.Speer@Sun.COM nxge_grp_set_t *set; 2597766SMichael.Speer@Sun.COM int i; 2607766SMichael.Speer@Sun.COM 2617766SMichael.Speer@Sun.COM MUTEX_ENTER(&nxge->group_lock); 2627766SMichael.Speer@Sun.COM 2637766SMichael.Speer@Sun.COM /* 2647766SMichael.Speer@Sun.COM * Find RX groups that need to be cleaned up. 2657766SMichael.Speer@Sun.COM */ 2667766SMichael.Speer@Sun.COM set = &nxge->rx_set; 2677766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2687766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2697766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2707766SMichael.Speer@Sun.COM set->group[i] = NULL; 2717766SMichael.Speer@Sun.COM } 2727766SMichael.Speer@Sun.COM } 2737766SMichael.Speer@Sun.COM 2747766SMichael.Speer@Sun.COM /* 2757766SMichael.Speer@Sun.COM * Find TX groups that need to be cleaned up. 2767766SMichael.Speer@Sun.COM */ 2777766SMichael.Speer@Sun.COM set = &nxge->tx_set; 2787766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2797766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2807766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2817766SMichael.Speer@Sun.COM set->group[i] = NULL; 2827766SMichael.Speer@Sun.COM } 2837766SMichael.Speer@Sun.COM } 2847766SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 2857766SMichael.Speer@Sun.COM } 2867766SMichael.Speer@Sun.COM 2877766SMichael.Speer@Sun.COM 2887766SMichael.Speer@Sun.COM /* 2896495Sspeer * nxge_grp_add 2906495Sspeer * 2916495Sspeer * Add a group to an instance of NXGE. 2926495Sspeer * 2936495Sspeer * Arguments: 2946495Sspeer * nxge 2956495Sspeer * type Tx or Rx 2966495Sspeer * 2976495Sspeer * Notes: 2986495Sspeer * 2996495Sspeer * Context: 3006495Sspeer * Any domain 3016495Sspeer */ 3027755SMisaki.Kataoka@Sun.COM nxge_grp_t * 3036495Sspeer nxge_grp_add( 3046495Sspeer nxge_t *nxge, 3056495Sspeer nxge_grp_type_t type) 3066495Sspeer { 3076495Sspeer nxge_grp_set_t *set; 3086495Sspeer nxge_grp_t *group; 3096495Sspeer int i; 3106495Sspeer 3116495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 3126495Sspeer group->nxge = nxge; 3136495Sspeer 3146495Sspeer MUTEX_ENTER(&nxge->group_lock); 3156495Sspeer switch (type) { 3166495Sspeer case NXGE_TRANSMIT_GROUP: 3176495Sspeer case EXT_TRANSMIT_GROUP: 3186495Sspeer set = &nxge->tx_set; 3196495Sspeer break; 3206495Sspeer default: 3216495Sspeer set = &nxge->rx_set; 3226495Sspeer break; 3236495Sspeer } 3246495Sspeer 3256495Sspeer group->type = type; 3266495Sspeer group->active = B_TRUE; 3276495Sspeer group->sequence = set->sequence++; 3286495Sspeer 3296495Sspeer /* Find an empty slot for this logical group. */ 3306495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 3316495Sspeer if (set->group[i] == 0) { 3326495Sspeer group->index = i; 3336495Sspeer set->group[i] = group; 3346495Sspeer NXGE_DC_SET(set->lg.map, i); 3356495Sspeer set->lg.count++; 3366495Sspeer break; 3376495Sspeer } 3386495Sspeer } 3396495Sspeer MUTEX_EXIT(&nxge->group_lock); 3406495Sspeer 3416495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3426495Sspeer "nxge_grp_add: %cgroup = %d.%d", 3436495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3446495Sspeer nxge->mac.portnum, group->sequence)); 3456495Sspeer 3467755SMisaki.Kataoka@Sun.COM return (group); 3476495Sspeer } 3486495Sspeer 3496495Sspeer void 3506495Sspeer nxge_grp_remove( 3516495Sspeer nxge_t *nxge, 3527755SMisaki.Kataoka@Sun.COM nxge_grp_t *group) /* The group to remove. */ 3536495Sspeer { 3546495Sspeer nxge_grp_set_t *set; 3556495Sspeer vpc_type_t type; 3566495Sspeer 3576495Sspeer MUTEX_ENTER(&nxge->group_lock); 3586495Sspeer switch (group->type) { 3596495Sspeer case NXGE_TRANSMIT_GROUP: 3606495Sspeer case EXT_TRANSMIT_GROUP: 3616495Sspeer set = &nxge->tx_set; 3626495Sspeer break; 3636495Sspeer default: 3646495Sspeer set = &nxge->rx_set; 3656495Sspeer break; 3666495Sspeer } 3676495Sspeer 3686495Sspeer if (set->group[group->index] != group) { 3696495Sspeer MUTEX_EXIT(&nxge->group_lock); 3706495Sspeer return; 3716495Sspeer } 3726495Sspeer 3736495Sspeer set->group[group->index] = 0; 3746495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3756495Sspeer set->lg.count--; 3766495Sspeer 3776495Sspeer /* While inside the mutex, deactivate <group>. */ 3786495Sspeer group->active = B_FALSE; 3796495Sspeer 3806495Sspeer MUTEX_EXIT(&nxge->group_lock); 3816495Sspeer 3826495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3836495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3846495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3856495Sspeer nxge->mac.portnum, group->sequence)); 3866495Sspeer 3876495Sspeer /* Now, remove any DCs which are still active. */ 3886495Sspeer switch (group->type) { 3896495Sspeer default: 3906495Sspeer type = VP_BOUND_TX; 3916495Sspeer break; 3926495Sspeer case NXGE_RECEIVE_GROUP: 3936495Sspeer case EXT_RECEIVE_GROUP: 3946495Sspeer type = VP_BOUND_RX; 3956495Sspeer } 3966495Sspeer 3976495Sspeer while (group->dc) { 3986495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3996495Sspeer } 4006495Sspeer 4016495Sspeer KMEM_FREE(group, sizeof (*group)); 4026495Sspeer } 4036495Sspeer 4046495Sspeer /* 4057950SMichael.Speer@Sun.COM * nxge_grp_dc_add 4066495Sspeer * 4076495Sspeer * Add a DMA channel to a VR/Group. 4086495Sspeer * 4096495Sspeer * Arguments: 4106495Sspeer * nxge 4116495Sspeer * channel The channel to add. 4126495Sspeer * Notes: 4136495Sspeer * 4146495Sspeer * Context: 4156495Sspeer * Any domain 4166495Sspeer */ 4176495Sspeer /* ARGSUSED */ 4186495Sspeer int 4196495Sspeer nxge_grp_dc_add( 4206495Sspeer nxge_t *nxge, 4217755SMisaki.Kataoka@Sun.COM nxge_grp_t *group, /* The group to add <channel> to. */ 4226495Sspeer vpc_type_t type, /* Rx or Tx */ 4236495Sspeer int channel) /* A physical/logical channel number */ 4246495Sspeer { 4256495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 4266495Sspeer nxge_hio_dc_t *dc; 4276495Sspeer nxge_grp_set_t *set; 4286602Sspeer nxge_status_t status = NXGE_OK; 4296495Sspeer 4306495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 4316495Sspeer 4328275SEric Cheng if (group == 0) 4336495Sspeer return (0); 4346495Sspeer 4356495Sspeer switch (type) { 4367950SMichael.Speer@Sun.COM case VP_BOUND_TX: 4376495Sspeer set = &nxge->tx_set; 4386495Sspeer if (channel > NXGE_MAX_TDCS) { 4396495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4406495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 4416495Sspeer return (NXGE_ERROR); 4426495Sspeer } 4436495Sspeer break; 4446495Sspeer case VP_BOUND_RX: 4456495Sspeer set = &nxge->rx_set; 4466495Sspeer if (channel > NXGE_MAX_RDCS) { 4476495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4486495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 4496495Sspeer return (NXGE_ERROR); 4506495Sspeer } 4516495Sspeer break; 4527950SMichael.Speer@Sun.COM 4537950SMichael.Speer@Sun.COM default: 4547950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4557950SMichael.Speer@Sun.COM "nxge_grp_dc_add: unknown type channel(%d)", channel)); 4566495Sspeer } 4576495Sspeer 4586495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4596495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 4606495Sspeer type == VP_BOUND_TX ? 't' : 'r', 4616495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 4626495Sspeer 4636495Sspeer MUTEX_ENTER(&nxge->group_lock); 4646495Sspeer if (group->active != B_TRUE) { 4656495Sspeer /* We may be in the process of removing this group. */ 4666495Sspeer MUTEX_EXIT(&nxge->group_lock); 4676495Sspeer return (NXGE_ERROR); 4686495Sspeer } 4696495Sspeer MUTEX_EXIT(&nxge->group_lock); 4706495Sspeer 4716495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4726495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4736495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4746495Sspeer return (NXGE_ERROR); 4756495Sspeer } 4766495Sspeer 4776495Sspeer MUTEX_ENTER(&nhd->lock); 4786495Sspeer 4796495Sspeer if (dc->group) { 4806495Sspeer MUTEX_EXIT(&nhd->lock); 4816495Sspeer /* This channel is already in use! */ 4826495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4836495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4846495Sspeer return (NXGE_ERROR); 4856495Sspeer } 4866495Sspeer 4876495Sspeer dc->next = 0; 4886495Sspeer dc->page = channel; 4896495Sspeer dc->channel = (nxge_channel_t)channel; 4906495Sspeer 4916495Sspeer dc->type = type; 4926495Sspeer if (type == VP_BOUND_RX) { 4936495Sspeer dc->init = nxge_init_rxdma_channel; 4946495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4956495Sspeer } else { 4966495Sspeer dc->init = nxge_init_txdma_channel; 4976495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4986495Sspeer } 4996495Sspeer 5007755SMisaki.Kataoka@Sun.COM dc->group = group; 5016495Sspeer 5026495Sspeer if (isLDOMguest(nxge)) 5036495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 5046495Sspeer 5056495Sspeer NXGE_DC_SET(set->owned.map, channel); 5066495Sspeer set->owned.count++; 5076495Sspeer 5086495Sspeer MUTEX_EXIT(&nhd->lock); 5096495Sspeer 5106602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 5116602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5126602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 5137950SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 5147950SMichael.Speer@Sun.COM (void) memset(dc, 0, sizeof (*dc)); 5157950SMichael.Speer@Sun.COM NXGE_DC_RESET(set->owned.map, channel); 5167950SMichael.Speer@Sun.COM set->owned.count--; 5177950SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 5186603Sspeer return (NXGE_ERROR); 5196602Sspeer } 5206602Sspeer 5216495Sspeer nxge_grp_dc_append(nxge, group, dc); 5226495Sspeer 5237812SMichael.Speer@Sun.COM if (type == VP_BOUND_TX) { 5247812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 5257812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_FALSE; 5267812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 5277812SMichael.Speer@Sun.COM } 5287812SMichael.Speer@Sun.COM 5296495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 5306495Sspeer 5316602Sspeer return ((int)status); 5326495Sspeer } 5336495Sspeer 5346495Sspeer void 5356495Sspeer nxge_grp_dc_remove( 5366495Sspeer nxge_t *nxge, 5376495Sspeer vpc_type_t type, 5386495Sspeer int channel) 5396495Sspeer { 5406495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5416495Sspeer nxge_hio_dc_t *dc; 5426495Sspeer nxge_grp_set_t *set; 5436495Sspeer nxge_grp_t *group; 5446495Sspeer 5456495Sspeer dc_uninit_t uninit; 5466495Sspeer 5476495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 5486495Sspeer 5497950SMichael.Speer@Sun.COM if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) 5507950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5517950SMichael.Speer@Sun.COM 5527950SMichael.Speer@Sun.COM if ((dc->group == NULL) && (dc->next == 0) && 5537950SMichael.Speer@Sun.COM (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) { 5547950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5556495Sspeer } 5567950SMichael.Speer@Sun.COM 5576495Sspeer group = (nxge_grp_t *)dc->group; 5586495Sspeer 5596495Sspeer if (isLDOMguest(nxge)) { 5606495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 5616495Sspeer } 5626495Sspeer 5636495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 5646495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 5656495Sspeer nxge->mac.portnum, group->sequence, group->count, 5666495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 5676495Sspeer 5686495Sspeer MUTEX_ENTER(&nhd->lock); 5696495Sspeer 5706602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5716602Sspeer 5726495Sspeer /* Remove the DC from its group. */ 5736495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 5746495Sspeer MUTEX_EXIT(&nhd->lock); 5756495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5767950SMichael.Speer@Sun.COM "nxge_grp_dc_remove(%d) failed", channel)); 5777950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5786495Sspeer } 5796495Sspeer 5806495Sspeer uninit = dc->uninit; 5816495Sspeer channel = dc->channel; 5826495Sspeer 5836495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5846495Sspeer set->owned.count--; 5856495Sspeer 5866495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5876495Sspeer 5886495Sspeer MUTEX_EXIT(&nhd->lock); 5896495Sspeer 5906495Sspeer (*uninit)(nxge, channel); 5916495Sspeer 5927950SMichael.Speer@Sun.COM nxge_grp_dc_remove_exit: 5936495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5946495Sspeer } 5956495Sspeer 5966495Sspeer nxge_hio_dc_t * 5976495Sspeer nxge_grp_dc_find( 5986495Sspeer nxge_t *nxge, 5996495Sspeer vpc_type_t type, /* Rx or Tx */ 6006495Sspeer int channel) 6016495Sspeer { 6026495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 6036495Sspeer nxge_hio_dc_t *current; 6046495Sspeer 6056495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 6066495Sspeer 6076495Sspeer if (!isLDOMguest(nxge)) { 6086495Sspeer return (¤t[channel]); 6096495Sspeer } else { 6106495Sspeer /* We're in a guest domain. */ 6116495Sspeer int i, limit = (type == VP_BOUND_TX) ? 6126495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 6136495Sspeer 6146495Sspeer MUTEX_ENTER(&nhd->lock); 6156495Sspeer for (i = 0; i < limit; i++, current++) { 6166495Sspeer if (current->channel == channel) { 6176495Sspeer if (current->vr && current->vr->nxge == 6186495Sspeer (uintptr_t)nxge) { 6196495Sspeer MUTEX_EXIT(&nhd->lock); 6206495Sspeer return (current); 6216495Sspeer } 6226495Sspeer } 6236495Sspeer } 6246495Sspeer MUTEX_EXIT(&nhd->lock); 6256495Sspeer } 6266495Sspeer 6276495Sspeer return (0); 6286495Sspeer } 6296495Sspeer 6306495Sspeer /* 6316495Sspeer * nxge_grp_dc_append 6326495Sspeer * 6336495Sspeer * Append a DMA channel to a group. 6346495Sspeer * 6356495Sspeer * Arguments: 6366495Sspeer * nxge 6376495Sspeer * group The group to append to 6386495Sspeer * dc The DMA channel to append 6396495Sspeer * 6406495Sspeer * Notes: 6416495Sspeer * 6426495Sspeer * Context: 6436495Sspeer * Any domain 6446495Sspeer */ 6456495Sspeer static 6466495Sspeer void 6476495Sspeer nxge_grp_dc_append( 6486495Sspeer nxge_t *nxge, 6496495Sspeer nxge_grp_t *group, 6506495Sspeer nxge_hio_dc_t *dc) 6516495Sspeer { 6526495Sspeer MUTEX_ENTER(&nxge->group_lock); 6536495Sspeer 6546495Sspeer if (group->dc == 0) { 6556495Sspeer group->dc = dc; 6566495Sspeer } else { 6576495Sspeer nxge_hio_dc_t *current = group->dc; 6586495Sspeer do { 6596495Sspeer if (current->next == 0) { 6606495Sspeer current->next = dc; 6616495Sspeer break; 6626495Sspeer } 6636495Sspeer current = current->next; 6646495Sspeer } while (current); 6656495Sspeer } 6666495Sspeer 6676495Sspeer NXGE_DC_SET(group->map, dc->channel); 6686495Sspeer 6696495Sspeer nxge_grp_dc_map(group); 6706602Sspeer group->count++; 6716495Sspeer 6726495Sspeer MUTEX_EXIT(&nxge->group_lock); 6736495Sspeer } 6746495Sspeer 6756495Sspeer /* 6766495Sspeer * nxge_grp_dc_unlink 6776495Sspeer * 6786495Sspeer * Unlink a DMA channel fromits linked list (group). 6796495Sspeer * 6806495Sspeer * Arguments: 6816495Sspeer * nxge 6826495Sspeer * group The group (linked list) to unlink from 6836495Sspeer * dc The DMA channel to append 6846495Sspeer * 6856495Sspeer * Notes: 6866495Sspeer * 6876495Sspeer * Context: 6886495Sspeer * Any domain 6896495Sspeer */ 6906495Sspeer nxge_hio_dc_t * 6918275SEric Cheng nxge_grp_dc_unlink( 6928275SEric Cheng nxge_t *nxge, 6938275SEric Cheng nxge_grp_t *group, 6948275SEric Cheng int channel) 6956495Sspeer { 6966495Sspeer nxge_hio_dc_t *current, *previous; 6976495Sspeer 6986495Sspeer MUTEX_ENTER(&nxge->group_lock); 6996495Sspeer 7007812SMichael.Speer@Sun.COM if (group == NULL) { 7017812SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 7027812SMichael.Speer@Sun.COM return (0); 7037812SMichael.Speer@Sun.COM } 7047812SMichael.Speer@Sun.COM 7056495Sspeer if ((current = group->dc) == 0) { 7066495Sspeer MUTEX_EXIT(&nxge->group_lock); 7076495Sspeer return (0); 7086495Sspeer } 7096495Sspeer 7106495Sspeer previous = 0; 7116495Sspeer do { 7126495Sspeer if (current->channel == channel) { 7136495Sspeer if (previous) 7146495Sspeer previous->next = current->next; 7156495Sspeer else 7166495Sspeer group->dc = current->next; 7176495Sspeer break; 7186495Sspeer } 7196495Sspeer previous = current; 7206495Sspeer current = current->next; 7216495Sspeer } while (current); 7226495Sspeer 7236495Sspeer if (current == 0) { 7246495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 7256495Sspeer "DC unlink: DC %d not found", channel)); 7266495Sspeer } else { 7276495Sspeer current->next = 0; 7286495Sspeer current->group = 0; 7296495Sspeer 7308275SEric Cheng NXGE_DC_RESET(group->map, channel); 7316495Sspeer group->count--; 7326495Sspeer } 7336495Sspeer 7346495Sspeer nxge_grp_dc_map(group); 7356495Sspeer 7366495Sspeer MUTEX_EXIT(&nxge->group_lock); 7376495Sspeer 7386495Sspeer return (current); 7396495Sspeer } 7406495Sspeer 7416495Sspeer /* 7426495Sspeer * nxge_grp_dc_map 7436495Sspeer * 7446495Sspeer * Map a linked list to an array of channel numbers. 7456495Sspeer * 7466495Sspeer * Arguments: 7476495Sspeer * nxge 7486495Sspeer * group The group to remap. 7496495Sspeer * 7506495Sspeer * Notes: 7516495Sspeer * It is expected that the caller will hold the correct mutex. 7526495Sspeer * 7536495Sspeer * Context: 7546495Sspeer * Service domain 7556495Sspeer */ 7566495Sspeer void 7576495Sspeer nxge_grp_dc_map( 7586495Sspeer nxge_grp_t *group) 7596495Sspeer { 7606495Sspeer nxge_channel_t *legend; 7616495Sspeer nxge_hio_dc_t *dc; 7626495Sspeer 7636495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 7646495Sspeer 7656495Sspeer legend = group->legend; 7666495Sspeer dc = group->dc; 7676495Sspeer while (dc) { 7686495Sspeer *legend = dc->channel; 7696495Sspeer legend++; 7706495Sspeer dc = dc->next; 7716495Sspeer } 7726495Sspeer } 7736495Sspeer 7746495Sspeer /* 7756495Sspeer * --------------------------------------------------------------------- 7766495Sspeer * These are HIO debugging functions. 7776495Sspeer * --------------------------------------------------------------------- 7786495Sspeer */ 7796495Sspeer 7806495Sspeer /* 7816495Sspeer * nxge_delay 7826495Sspeer * 7836495Sspeer * Delay <seconds> number of seconds. 7846495Sspeer * 7856495Sspeer * Arguments: 7866495Sspeer * nxge 7876495Sspeer * group The group to append to 7886495Sspeer * dc The DMA channel to append 7896495Sspeer * 7906495Sspeer * Notes: 7916495Sspeer * This is a developer-only function. 7926495Sspeer * 7936495Sspeer * Context: 7946495Sspeer * Any domain 7956495Sspeer */ 7966495Sspeer void 7976495Sspeer nxge_delay( 7986495Sspeer int seconds) 7996495Sspeer { 8006495Sspeer delay(drv_usectohz(seconds * 1000000)); 8016495Sspeer } 8026495Sspeer 8036495Sspeer static dmc_reg_name_t rx_names[] = { 8046495Sspeer { "RXDMA_CFIG1", 0 }, 8056495Sspeer { "RXDMA_CFIG2", 8 }, 8066495Sspeer { "RBR_CFIG_A", 0x10 }, 8076495Sspeer { "RBR_CFIG_B", 0x18 }, 8086495Sspeer { "RBR_KICK", 0x20 }, 8096495Sspeer { "RBR_STAT", 0x28 }, 8106495Sspeer { "RBR_HDH", 0x30 }, 8116495Sspeer { "RBR_HDL", 0x38 }, 8126495Sspeer { "RCRCFIG_A", 0x40 }, 8136495Sspeer { "RCRCFIG_B", 0x48 }, 8146495Sspeer { "RCRSTAT_A", 0x50 }, 8156495Sspeer { "RCRSTAT_B", 0x58 }, 8166495Sspeer { "RCRSTAT_C", 0x60 }, 8176495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 8186495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 8196495Sspeer { "RCR_FLSH", 0x78 }, 8206495Sspeer { "RXMISC", 0x90 }, 8216495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 8226495Sspeer { 0, -1 } 8236495Sspeer }; 8246495Sspeer 8256495Sspeer static dmc_reg_name_t tx_names[] = { 8266495Sspeer { "Tx_RNG_CFIG", 0 }, 8276495Sspeer { "Tx_RNG_HDL", 0x10 }, 8286495Sspeer { "Tx_RNG_KICK", 0x18 }, 8296495Sspeer { "Tx_ENT_MASK", 0x20 }, 8306495Sspeer { "Tx_CS", 0x28 }, 8316495Sspeer { "TxDMA_MBH", 0x30 }, 8326495Sspeer { "TxDMA_MBL", 0x38 }, 8336495Sspeer { "TxDMA_PRE_ST", 0x40 }, 8346495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 8356495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 8366495Sspeer { "TDMC_INTR_DBG", 0x60 }, 8376495Sspeer { "Tx_CS_DBG", 0x68 }, 8386495Sspeer { 0, -1 } 8396495Sspeer }; 8406495Sspeer 8416495Sspeer /* 8426495Sspeer * nxge_xx2str 8436495Sspeer * 8446495Sspeer * Translate a register address into a string. 8456495Sspeer * 8466495Sspeer * Arguments: 8476495Sspeer * offset The address of the register to translate. 8486495Sspeer * 8496495Sspeer * Notes: 8506495Sspeer * These are developer-only function. 8516495Sspeer * 8526495Sspeer * Context: 8536495Sspeer * Any domain 8546495Sspeer */ 8556495Sspeer const char * 8566495Sspeer nxge_rx2str( 8576495Sspeer int offset) 8586495Sspeer { 8596495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 8606495Sspeer 8616495Sspeer offset &= DMA_CSR_MASK; 8626495Sspeer 8636495Sspeer while (reg->name) { 8646495Sspeer if (offset == reg->offset) 8656495Sspeer return (reg->name); 8666495Sspeer reg++; 8676495Sspeer } 8686495Sspeer 8696495Sspeer return (0); 8706495Sspeer } 8716495Sspeer 8726495Sspeer const char * 8736495Sspeer nxge_tx2str( 8746495Sspeer int offset) 8756495Sspeer { 8766495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 8776495Sspeer 8786495Sspeer offset &= DMA_CSR_MASK; 8796495Sspeer 8806495Sspeer while (reg->name) { 8816495Sspeer if (offset == reg->offset) 8826495Sspeer return (reg->name); 8836495Sspeer reg++; 8846495Sspeer } 8856495Sspeer 8866495Sspeer return (0); 8876495Sspeer } 8886495Sspeer 8896495Sspeer /* 8906495Sspeer * nxge_ddi_perror 8916495Sspeer * 8926495Sspeer * Map a DDI error number to a string. 8936495Sspeer * 8946495Sspeer * Arguments: 8956495Sspeer * ddi_error The DDI error number to map. 8966495Sspeer * 8976495Sspeer * Notes: 8986495Sspeer * 8996495Sspeer * Context: 9006495Sspeer * Any domain 9016495Sspeer */ 9026495Sspeer const char * 9036495Sspeer nxge_ddi_perror( 9046495Sspeer int ddi_error) 9056495Sspeer { 9066495Sspeer switch (ddi_error) { 9076495Sspeer case DDI_SUCCESS: 9086495Sspeer return ("DDI_SUCCESS"); 9096495Sspeer case DDI_FAILURE: 9106495Sspeer return ("DDI_FAILURE"); 9116495Sspeer case DDI_NOT_WELL_FORMED: 9126495Sspeer return ("DDI_NOT_WELL_FORMED"); 9136495Sspeer case DDI_EAGAIN: 9146495Sspeer return ("DDI_EAGAIN"); 9156495Sspeer case DDI_EINVAL: 9166495Sspeer return ("DDI_EINVAL"); 9176495Sspeer case DDI_ENOTSUP: 9186495Sspeer return ("DDI_ENOTSUP"); 9196495Sspeer case DDI_EPENDING: 9206495Sspeer return ("DDI_EPENDING"); 9216495Sspeer case DDI_ENOMEM: 9226495Sspeer return ("DDI_ENOMEM"); 9236495Sspeer case DDI_EBUSY: 9246495Sspeer return ("DDI_EBUSY"); 9256495Sspeer case DDI_ETRANSPORT: 9266495Sspeer return ("DDI_ETRANSPORT"); 9276495Sspeer case DDI_ECONTEXT: 9286495Sspeer return ("DDI_ECONTEXT"); 9296495Sspeer default: 9306495Sspeer return ("Unknown error"); 9316495Sspeer } 9326495Sspeer } 9336495Sspeer 9346495Sspeer /* 9356495Sspeer * --------------------------------------------------------------------- 9366495Sspeer * These are Sun4v HIO function definitions 9376495Sspeer * --------------------------------------------------------------------- 9386495Sspeer */ 9396495Sspeer 9406495Sspeer #if defined(sun4v) 9416495Sspeer 9426495Sspeer /* 9436495Sspeer * Local prototypes 9446495Sspeer */ 9457755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 9467755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *); 9476495Sspeer 9488275SEric Cheng static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *); 9497755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 9506495Sspeer 9518275SEric Cheng static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 9528275SEric Cheng static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 9538275SEric Cheng static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int); 9546495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 9556495Sspeer mac_ring_type_t, int); 9566495Sspeer 9576495Sspeer /* 9586495Sspeer * nxge_hio_init 9596495Sspeer * 9606495Sspeer * Initialize the HIO module of the NXGE driver. 9616495Sspeer * 9626495Sspeer * Arguments: 9636495Sspeer * nxge 9646495Sspeer * 9656495Sspeer * Notes: 9666495Sspeer * 9676495Sspeer * Context: 9686495Sspeer * Any domain 9696495Sspeer */ 9706495Sspeer int 9716495Sspeer nxge_hio_init( 9726495Sspeer nxge_t *nxge) 9736495Sspeer { 9746495Sspeer nxge_hio_data_t *nhd; 9756495Sspeer int i, region; 9766495Sspeer 9776495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 9786495Sspeer if (nhd == 0) { 9796495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 9806495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9816495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9826495Sspeer } 9836495Sspeer 9846713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 9856713Sspeer (nxge->niu_type == N2_NIU)) { 9866495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9876495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9886495Sspeer if (niu_hsvc->hsvc_major == 1 && 9896495Sspeer niu_hsvc->hsvc_minor == 1) 9906495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9926495Sspeer "nxge_hio_init: hypervisor services " 9936495Sspeer "version %d.%d", 9946495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9956495Sspeer } 9966495Sspeer } 9976495Sspeer 9988275SEric Cheng /* 9998275SEric Cheng * Initialize share and ring group structures. 10008275SEric Cheng */ 10018275SEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 10028275SEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 10038275SEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 10048275SEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 10058275SEric Cheng nxge->tx_hio_groups[i].gindex = 0; 10068275SEric Cheng nxge->tx_hio_groups[i].sindex = 0; 10078275SEric Cheng } 10088275SEric Cheng 10098275SEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 10108275SEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 10118275SEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 10128275SEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 10138275SEric Cheng nxge->rx_hio_groups[i].gindex = 0; 10148275SEric Cheng nxge->rx_hio_groups[i].sindex = 0; 10158275SEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 10168275SEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 10178275SEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 10188275SEric Cheng } 10198275SEric Cheng 10206495Sspeer if (!isLDOMs(nxge)) { 10216495Sspeer nhd->hio.ldoms = B_FALSE; 10226495Sspeer return (NXGE_OK); 10236495Sspeer } 10246495Sspeer 10256495Sspeer nhd->hio.ldoms = B_TRUE; 10266495Sspeer 10276495Sspeer /* 10286495Sspeer * Fill in what we can. 10296495Sspeer */ 10306495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 10316495Sspeer nhd->vr[region].region = region; 10326495Sspeer } 10337755SMisaki.Kataoka@Sun.COM nhd->vrs = NXGE_VR_SR_MAX - 2; 10346495Sspeer 10356495Sspeer /* 10368275SEric Cheng * Initialize the share stuctures. 10376495Sspeer */ 10387812SMichael.Speer@Sun.COM for (i = 0; i < NXGE_MAX_TDCS; i++) 10397812SMichael.Speer@Sun.COM nxge->tdc_is_shared[i] = B_FALSE; 10407812SMichael.Speer@Sun.COM 10416495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 10426495Sspeer nxge->shares[i].nxgep = nxge; 10436495Sspeer nxge->shares[i].index = 0; 10448275SEric Cheng nxge->shares[i].vrp = NULL; 10456495Sspeer nxge->shares[i].tmap = 0; 10466495Sspeer nxge->shares[i].rmap = 0; 10476495Sspeer nxge->shares[i].rxgroup = 0; 10486495Sspeer nxge->shares[i].active = B_FALSE; 10496495Sspeer } 10506495Sspeer 10516495Sspeer /* Fill in the HV HIO function pointers. */ 10526495Sspeer nxge_hio_hv_init(nxge); 10536495Sspeer 10546495Sspeer if (isLDOMservice(nxge)) { 10556495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 10566495Sspeer "Hybrid IO-capable service domain")); 10576495Sspeer return (NXGE_OK); 10586495Sspeer } else { 10596495Sspeer /* 10606495Sspeer * isLDOMguest(nxge) == B_TRUE 10616495Sspeer */ 10626495Sspeer nx_vio_fp_t *vio; 10636495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 10646495Sspeer 10656495Sspeer vio = &nhd->hio.vio; 10666495Sspeer vio->__register = (vio_net_resource_reg_t) 10676495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 10686495Sspeer vio->unregister = (vio_net_resource_unreg_t) 10696495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 10706495Sspeer 10716495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 10726495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 10736495Sspeer return (NXGE_ERROR); 10746495Sspeer } 10756495Sspeer } 10766495Sspeer 10776495Sspeer return (0); 10786495Sspeer } 10798275SEric Cheng #endif /* defined(sun4v) */ 10808275SEric Cheng 10818275SEric Cheng static int 10828275SEric Cheng nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g, 10838275SEric Cheng const uint8_t *macaddr) 10848275SEric Cheng { 10858275SEric Cheng int rv; 10868275SEric Cheng nxge_rdc_grp_t *group; 10878275SEric Cheng 10888275SEric Cheng mutex_enter(nxge->genlock); 10898275SEric Cheng 10908275SEric Cheng /* 10918275SEric Cheng * Initialize the NXGE RDC table data structure. 10928275SEric Cheng */ 10938275SEric Cheng group = &nxge->pt_config.rdc_grps[g->rdctbl]; 10948275SEric Cheng if (!group->flag) { 10958275SEric Cheng group->port = NXGE_GET_PORT_NUM(nxge->function_num); 10968275SEric Cheng group->config_method = RDC_TABLE_ENTRY_METHOD_REP; 10978275SEric Cheng group->flag = B_TRUE; /* This group has been configured. */ 10988275SEric Cheng } 10998275SEric Cheng 11008275SEric Cheng mutex_exit(nxge->genlock); 11018275SEric Cheng 11028275SEric Cheng /* 11038275SEric Cheng * Add the MAC address. 11048275SEric Cheng */ 11058275SEric Cheng if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr, 11068275SEric Cheng g->rdctbl, B_TRUE)) != 0) { 11078275SEric Cheng return (rv); 11088275SEric Cheng } 11098275SEric Cheng 11108275SEric Cheng mutex_enter(nxge->genlock); 11118275SEric Cheng g->n_mac_addrs++; 11128275SEric Cheng mutex_exit(nxge->genlock); 11138275SEric Cheng return (0); 11148275SEric Cheng } 11156495Sspeer 11166495Sspeer static int 11176495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 11186495Sspeer { 11198275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 11208275SEric Cheng p_nxge_t nxge = group->nxgep; 11218275SEric Cheng int rv; 11226495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 11236495Sspeer 11248275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 11258275SEric Cheng 11268275SEric Cheng mutex_enter(nxge->genlock); 11276495Sspeer 11286495Sspeer /* 11298275SEric Cheng * If the group is associated with a VR, then only one 11308275SEric Cheng * address may be assigned to the group. 11316495Sspeer */ 11328275SEric Cheng vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp; 11338275SEric Cheng if ((vr != NULL) && (group->n_mac_addrs)) { 11348275SEric Cheng mutex_exit(nxge->genlock); 11358275SEric Cheng return (ENOSPC); 11368275SEric Cheng } 11378275SEric Cheng 11388275SEric Cheng mutex_exit(nxge->genlock); 11398275SEric Cheng 11408275SEric Cheng /* 11418275SEric Cheng * Program the mac address for the group. 11428275SEric Cheng */ 11438275SEric Cheng if ((rv = nxge_hio_group_mac_add(nxge, group, 11448275SEric Cheng mac_addr)) != 0) { 11456495Sspeer return (rv); 11466495Sspeer } 11476495Sspeer 11486495Sspeer return (0); 11496495Sspeer } 11506495Sspeer 11518275SEric Cheng static int 11528275SEric Cheng find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr) 11538275SEric Cheng { 11548275SEric Cheng int i; 11558275SEric Cheng for (i = 0; i <= mmac_info->num_mmac; i++) { 11568275SEric Cheng if (memcmp(mmac_info->mac_pool[i].addr, mac_addr, 11578275SEric Cheng ETHERADDRL) == 0) { 11588275SEric Cheng return (i); 11598275SEric Cheng } 11608275SEric Cheng } 11618275SEric Cheng return (-1); 11628275SEric Cheng } 11638275SEric Cheng 11646495Sspeer /* ARGSUSED */ 11656495Sspeer static int 11666495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 11676495Sspeer { 11688275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 11698275SEric Cheng p_nxge_t nxge = group->nxgep; 11708275SEric Cheng nxge_mmac_t *mmac_info; 11718275SEric Cheng int rv, slot; 11728275SEric Cheng 11738275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 11748275SEric Cheng 11758275SEric Cheng mutex_enter(nxge->genlock); 11766495Sspeer 11778275SEric Cheng mmac_info = &nxge->nxge_mmac_info; 11788275SEric Cheng slot = find_mac_slot(mmac_info, mac_addr); 11798275SEric Cheng if (slot < 0) { 11808275SEric Cheng mutex_exit(nxge->genlock); 11818275SEric Cheng return (EINVAL); 11828275SEric Cheng } 11838275SEric Cheng 11848275SEric Cheng mutex_exit(nxge->genlock); 11856495Sspeer 11866495Sspeer /* 11878275SEric Cheng * Remove the mac address for the group 11886495Sspeer */ 11898275SEric Cheng if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) { 11908275SEric Cheng return (rv); 11918275SEric Cheng } 11928275SEric Cheng 11938275SEric Cheng mutex_enter(nxge->genlock); 11948275SEric Cheng group->n_mac_addrs--; 11958275SEric Cheng mutex_exit(nxge->genlock); 11966495Sspeer 11976495Sspeer return (0); 11986495Sspeer } 11996495Sspeer 12008275SEric Cheng static int 12018275SEric Cheng nxge_hio_group_start(mac_group_driver_t gdriver) 12028275SEric Cheng { 12038275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 12048275SEric Cheng int rdctbl; 12058275SEric Cheng int dev_gindex; 12068275SEric Cheng 12078275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 12088275SEric Cheng 12098275SEric Cheng #ifdef later 12108275SEric Cheng ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED); 12118275SEric Cheng #endif 12128275SEric Cheng if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED) 12138275SEric Cheng return (ENXIO); 12148275SEric Cheng 12158275SEric Cheng mutex_enter(group->nxgep->genlock); 12168275SEric Cheng dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 12178275SEric Cheng group->gindex; 12188275SEric Cheng 12198275SEric Cheng /* 12208275SEric Cheng * Get an rdc table for this group. 12218275SEric Cheng * Group ID is given by the caller, and that's the group it needs 12228275SEric Cheng * to bind to. The default group is already bound when the driver 12238275SEric Cheng * was attached. 12248275SEric Cheng * 12258275SEric Cheng * For Group 0, it's RDC table was allocated at attach time 12268275SEric Cheng * no need to allocate a new table. 12278275SEric Cheng */ 12288275SEric Cheng if (group->gindex != 0) { 12298275SEric Cheng rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep, 12308275SEric Cheng dev_gindex, B_TRUE); 12318275SEric Cheng if (rdctbl < 0) { 12328275SEric Cheng mutex_exit(group->nxgep->genlock); 12338275SEric Cheng return (rdctbl); 12348275SEric Cheng } 12358275SEric Cheng } else { 12368275SEric Cheng rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid; 12378275SEric Cheng } 12388275SEric Cheng 12398275SEric Cheng group->rdctbl = rdctbl; 12408275SEric Cheng 12418275SEric Cheng (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdctbl); 12428275SEric Cheng 12438275SEric Cheng group->started = B_TRUE; 12448275SEric Cheng mutex_exit(group->nxgep->genlock); 12458275SEric Cheng 12468275SEric Cheng return (0); 12478275SEric Cheng } 12488275SEric Cheng 12498275SEric Cheng static void 12508275SEric Cheng nxge_hio_group_stop(mac_group_driver_t gdriver) 12518275SEric Cheng { 12528275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 12538275SEric Cheng 12548275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 12558275SEric Cheng 12568275SEric Cheng mutex_enter(group->nxgep->genlock); 12578275SEric Cheng group->started = B_FALSE; 12588275SEric Cheng 12598275SEric Cheng /* 12608275SEric Cheng * Unbind the RDC table previously bound for this group. 12618275SEric Cheng * 12628275SEric Cheng * Since RDC table for group 0 was allocated at attach 12638275SEric Cheng * time, no need to unbind the table here. 12648275SEric Cheng */ 12658275SEric Cheng if (group->gindex != 0) 12668275SEric Cheng (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl); 12678275SEric Cheng 12688275SEric Cheng mutex_exit(group->nxgep->genlock); 12698275SEric Cheng } 12708275SEric Cheng 12716495Sspeer /* ARGSUSED */ 12726495Sspeer void 12738275SEric Cheng nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid, 12746495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 12756495Sspeer { 12768275SEric Cheng p_nxge_t nxgep = (p_nxge_t)arg; 12778275SEric Cheng nxge_ring_group_t *group; 12788275SEric Cheng int dev_gindex; 12796495Sspeer 12806495Sspeer switch (type) { 12816495Sspeer case MAC_RING_TYPE_RX: 12828275SEric Cheng group = &nxgep->rx_hio_groups[groupid]; 12838275SEric Cheng group->nxgep = nxgep; 12848275SEric Cheng group->ghandle = ghdl; 12858275SEric Cheng group->gindex = groupid; 12868275SEric Cheng group->sindex = 0; /* not yet bound to a share */ 12876495Sspeer 12888275SEric Cheng dev_gindex = nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 12898275SEric Cheng groupid; 12908275SEric Cheng 12918275SEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 12928275SEric Cheng infop->mgi_start = nxge_hio_group_start; 12938275SEric Cheng infop->mgi_stop = nxge_hio_group_stop; 12948275SEric Cheng infop->mgi_addmac = nxge_hio_add_mac; 12958275SEric Cheng infop->mgi_remmac = nxge_hio_rem_mac; 12968275SEric Cheng infop->mgi_count = 12978275SEric Cheng nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs; 12986495Sspeer break; 12996495Sspeer 13006495Sspeer case MAC_RING_TYPE_TX: 13018275SEric Cheng /* 13028275SEric Cheng * 'groupid' for TX should be incremented by one since 13038275SEric Cheng * the default group (groupid 0) is not known by the MAC layer 13048275SEric Cheng */ 13058275SEric Cheng group = &nxgep->tx_hio_groups[groupid + 1]; 13068275SEric Cheng group->nxgep = nxgep; 13078275SEric Cheng group->ghandle = ghdl; 13088275SEric Cheng group->gindex = groupid + 1; 13098275SEric Cheng group->sindex = 0; /* not yet bound to a share */ 13108275SEric Cheng 13118275SEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 13128275SEric Cheng infop->mgi_start = NULL; 13138275SEric Cheng infop->mgi_stop = NULL; 13148275SEric Cheng infop->mgi_addmac = NULL; /* not needed */ 13158275SEric Cheng infop->mgi_remmac = NULL; /* not needed */ 13168275SEric Cheng /* no rings associated with group initially */ 13178275SEric Cheng infop->mgi_count = 0; 13186495Sspeer break; 13196495Sspeer } 13206495Sspeer } 13216495Sspeer 13228275SEric Cheng #if defined(sun4v) 13238275SEric Cheng 13246495Sspeer int 13256495Sspeer nxge_hio_share_assign( 13266495Sspeer nxge_t *nxge, 13276495Sspeer uint64_t cookie, 13286495Sspeer res_map_t *tmap, 13296495Sspeer res_map_t *rmap, 13306495Sspeer nxge_hio_vr_t *vr) 13316495Sspeer { 13326495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13336495Sspeer uint64_t slot, hv_rv; 13346495Sspeer nxge_hio_dc_t *dc; 13356495Sspeer nxhv_vr_fp_t *fp; 13366495Sspeer int i; 13376495Sspeer 13386495Sspeer /* 13396495Sspeer * Ask the Hypervisor to set up the VR for us 13406495Sspeer */ 13416495Sspeer fp = &nhd->hio.vr; 13426495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 13436495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 13447950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 13456713Sspeer "vr->assign() returned %d", hv_rv)); 13466495Sspeer return (-EIO); 13476495Sspeer } 13486495Sspeer 13496495Sspeer /* 13506495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 13516495Sspeer * ----------------------------------------------------- 13526495Sspeer */ 13536495Sspeer dc = vr->tx_group.dc; 13546495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 13556495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 13566495Sspeer while (dc) { 13576495Sspeer hv_rv = (*tx->assign) 13586495Sspeer (vr->cookie, dc->channel, &slot); 13596495Sspeer if (hv_rv != 0) { 13606495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 13617950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 13626495Sspeer "tx->assign(%x, %d) failed: %ld", 13636495Sspeer vr->cookie, dc->channel, hv_rv)); 13646495Sspeer return (-EIO); 13656495Sspeer } 13666495Sspeer 13676495Sspeer dc->cookie = vr->cookie; 13686495Sspeer dc->page = (vp_channel_t)slot; 13696495Sspeer 13706495Sspeer /* Inform the caller about the slot chosen. */ 13716495Sspeer (*tmap) |= 1 << slot; 13726495Sspeer 13736495Sspeer dc = dc->next; 13746495Sspeer } 13756495Sspeer } 13766495Sspeer 13776495Sspeer /* 13786495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 13796495Sspeer * ----------------------------------------------------- 13806495Sspeer */ 13816495Sspeer dc = vr->rx_group.dc; 13826495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 13836495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 13846495Sspeer while (dc) { 13856495Sspeer hv_rv = (*rx->assign) 13866495Sspeer (vr->cookie, dc->channel, &slot); 13876495Sspeer if (hv_rv != 0) { 13886495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 13897950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 13906495Sspeer "rx->assign(%x, %d) failed: %ld", 13916495Sspeer vr->cookie, dc->channel, hv_rv)); 13926495Sspeer return (-EIO); 13936495Sspeer } 13946495Sspeer 13956495Sspeer dc->cookie = vr->cookie; 13966495Sspeer dc->page = (vp_channel_t)slot; 13976495Sspeer 13986495Sspeer /* Inform the caller about the slot chosen. */ 13996495Sspeer (*rmap) |= 1 << slot; 14006495Sspeer 14016495Sspeer dc = dc->next; 14026495Sspeer } 14036495Sspeer } 14046495Sspeer 14056495Sspeer return (0); 14066495Sspeer } 14076495Sspeer 14088275SEric Cheng void 14096495Sspeer nxge_hio_share_unassign( 14106495Sspeer nxge_hio_vr_t *vr) 14116495Sspeer { 14126495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14136495Sspeer nxge_hio_data_t *nhd; 14146495Sspeer nxge_hio_dc_t *dc; 14156495Sspeer nxhv_vr_fp_t *fp; 14166495Sspeer uint64_t hv_rv; 14176495Sspeer 14186495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14196495Sspeer 14206495Sspeer dc = vr->tx_group.dc; 14216495Sspeer while (dc) { 14226495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 14236495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 14246495Sspeer if (hv_rv != 0) { 14256495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14267950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14276495Sspeer "tx->unassign(%x, %d) failed: %ld", 14286495Sspeer vr->cookie, dc->page, hv_rv)); 14296495Sspeer } 14306495Sspeer dc = dc->next; 14316495Sspeer } 14326495Sspeer 14336495Sspeer dc = vr->rx_group.dc; 14346495Sspeer while (dc) { 14356495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 14366495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 14376495Sspeer if (hv_rv != 0) { 14386495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14397950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14406495Sspeer "rx->unassign(%x, %d) failed: %ld", 14416495Sspeer vr->cookie, dc->page, hv_rv)); 14426495Sspeer } 14436495Sspeer dc = dc->next; 14446495Sspeer } 14456495Sspeer 14466495Sspeer fp = &nhd->hio.vr; 14476495Sspeer if (fp->unassign) { 14486495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 14496495Sspeer if (hv_rv != 0) { 14507950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14517950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14526495Sspeer "vr->assign(%x) failed: %ld", 14536495Sspeer vr->cookie, hv_rv)); 14546495Sspeer } 14556495Sspeer } 14566495Sspeer } 14576495Sspeer 14586495Sspeer int 14598275SEric Cheng nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle) 14606495Sspeer { 14618275SEric Cheng p_nxge_t nxge = (p_nxge_t)arg; 14628275SEric Cheng nxge_share_handle_t *shp; 14638275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 14648275SEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14656495Sspeer 14666495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 14676495Sspeer 14686495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 14696495Sspeer nhd->hio.rx.assign == 0) { 14706495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 14716495Sspeer return (EIO); 14726495Sspeer } 14736495Sspeer 14746495Sspeer /* 14756495Sspeer * Get a VR. 14766495Sspeer */ 14777755SMisaki.Kataoka@Sun.COM if ((vr = nxge_hio_vr_share(nxge)) == 0) 14786495Sspeer return (EAGAIN); 14796495Sspeer 14806495Sspeer shp = &nxge->shares[vr->region]; 14818275SEric Cheng shp->nxgep = nxge; 14826495Sspeer shp->index = vr->region; 14836495Sspeer shp->vrp = (void *)vr; 14848275SEric Cheng shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */ 14858275SEric Cheng shp->rxgroup = 0; /* to be assigned by ms_sadd */ 14868275SEric Cheng shp->active = B_FALSE; /* not bound yet */ 14876495Sspeer 14886495Sspeer *shandle = (mac_share_handle_t)shp; 14896495Sspeer 14906495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 14916495Sspeer return (0); 14926495Sspeer } 14936495Sspeer 14948275SEric Cheng 14956495Sspeer void 14966495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 14976495Sspeer { 14988275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 14998275SEric Cheng nxge_hio_vr_t *vr; 15008275SEric Cheng 15018275SEric Cheng /* 15028275SEric Cheng * Clear internal handle state. 15038275SEric Cheng */ 15048275SEric Cheng vr = shp->vrp; 15058275SEric Cheng shp->vrp = (void *)NULL; 15068275SEric Cheng shp->index = 0; 15078275SEric Cheng shp->tmap = 0; 15088275SEric Cheng shp->rmap = 0; 15098275SEric Cheng shp->rxgroup = 0; 15108275SEric Cheng shp->active = B_FALSE; 15118275SEric Cheng 15128275SEric Cheng /* 15138275SEric Cheng * Free VR resource. 15148275SEric Cheng */ 15158275SEric Cheng nxge_hio_unshare(vr); 15168275SEric Cheng } 15178275SEric Cheng 15188275SEric Cheng 15198275SEric Cheng void 15208275SEric Cheng nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 15218275SEric Cheng mac_ring_handle_t *rings, uint_t *n_rings) 15228275SEric Cheng { 15238275SEric Cheng nxge_t *nxge; 15248275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 15258275SEric Cheng nxge_ring_handle_t *rh; 15268275SEric Cheng uint32_t offset; 15278275SEric Cheng 15288275SEric Cheng nxge = shp->nxgep; 15298275SEric Cheng 15308275SEric Cheng switch (type) { 15318275SEric Cheng case MAC_RING_TYPE_RX: 15328275SEric Cheng rh = nxge->rx_ring_handles; 15338275SEric Cheng offset = nxge->pt_config.hw_config.start_rdc; 15348275SEric Cheng break; 15358275SEric Cheng 15368275SEric Cheng case MAC_RING_TYPE_TX: 15378275SEric Cheng rh = nxge->tx_ring_handles; 15388275SEric Cheng offset = nxge->pt_config.hw_config.tdc.start; 15398275SEric Cheng break; 15408275SEric Cheng } 15418275SEric Cheng 15428275SEric Cheng /* 15438275SEric Cheng * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that, 15448275SEric Cheng * but the HV has statically assigned the channels like so: 15458275SEric Cheng * VR0: RDC0 & RDC1 15468275SEric Cheng * VR1: RDC2 & RDC3, etc. 15478275SEric Cheng * The TDCs are assigned in exactly the same way. 15488275SEric Cheng */ 15498275SEric Cheng if (rings != NULL) { 15508275SEric Cheng rings[0] = rh[(shp->index * 2) - offset].ring_handle; 15518275SEric Cheng rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle; 15528275SEric Cheng } 15538275SEric Cheng if (n_rings != NULL) { 15548275SEric Cheng *n_rings = 2; 15558275SEric Cheng } 15568275SEric Cheng } 15578275SEric Cheng 15588275SEric Cheng int 15598275SEric Cheng nxge_hio_share_add_group(mac_share_handle_t shandle, 15608275SEric Cheng mac_group_driver_t ghandle) 15618275SEric Cheng { 15628275SEric Cheng nxge_t *nxge; 15638275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 15648275SEric Cheng nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle; 15658275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 15668275SEric Cheng nxge_grp_t *group; 15678275SEric Cheng int i; 15688275SEric Cheng 15698275SEric Cheng if (rg->sindex != 0) { 15708275SEric Cheng /* the group is already bound to a share */ 15718275SEric Cheng return (EALREADY); 15728275SEric Cheng } 15738275SEric Cheng 1574*8400SNicolas.Droux@Sun.COM /* 1575*8400SNicolas.Droux@Sun.COM * If we are adding a group 0 to a share, this 1576*8400SNicolas.Droux@Sun.COM * is not correct. 1577*8400SNicolas.Droux@Sun.COM */ 1578*8400SNicolas.Droux@Sun.COM ASSERT(rg->gindex != 0); 1579*8400SNicolas.Droux@Sun.COM 15808275SEric Cheng nxge = rg->nxgep; 15818275SEric Cheng vr = shp->vrp; 15828275SEric Cheng 15838275SEric Cheng switch (rg->type) { 15848275SEric Cheng case MAC_RING_TYPE_RX: 15858275SEric Cheng /* 15868275SEric Cheng * Make sure that the group has the right rings associated 15878275SEric Cheng * for the share. In version 1.0, we may only give a VR 15888275SEric Cheng * 2 RDCs. Not only that, but the HV has statically 15898275SEric Cheng * assigned the channels like so: 15908275SEric Cheng * VR0: RDC0 & RDC1 15918275SEric Cheng * VR1: RDC2 & RDC3, etc. 15928275SEric Cheng */ 15938275SEric Cheng group = nxge->rx_set.group[rg->gindex]; 15948275SEric Cheng 15958275SEric Cheng if (group->count > 2) { 15968275SEric Cheng /* a share can have at most 2 rings */ 15978275SEric Cheng return (EINVAL); 15988275SEric Cheng } 15998275SEric Cheng 16008275SEric Cheng for (i = 0; i < NXGE_MAX_RDCS; i++) { 16018275SEric Cheng if (group->map & (1 << i)) { 16028275SEric Cheng if ((i != shp->index * 2) && 16038275SEric Cheng (i != (shp->index * 2 + 1))) { 16048275SEric Cheng /* 16058275SEric Cheng * A group with invalid rings was 16068275SEric Cheng * attempted to bind to this share 16078275SEric Cheng */ 16088275SEric Cheng return (EINVAL); 16098275SEric Cheng } 16108275SEric Cheng } 16118275SEric Cheng } 16128275SEric Cheng 16138275SEric Cheng rg->sindex = vr->region; 16148275SEric Cheng vr->rdc_tbl = rg->rdctbl; 16158275SEric Cheng shp->rxgroup = vr->rdc_tbl; 16168275SEric Cheng break; 16178275SEric Cheng 16188275SEric Cheng case MAC_RING_TYPE_TX: 16198275SEric Cheng /* 16208275SEric Cheng * Make sure that the group has the right rings associated 16218275SEric Cheng * for the share. In version 1.0, we may only give a VR 16228275SEric Cheng * 2 TDCs. Not only that, but the HV has statically 16238275SEric Cheng * assigned the channels like so: 16248275SEric Cheng * VR0: TDC0 & TDC1 16258275SEric Cheng * VR1: TDC2 & TDC3, etc. 16268275SEric Cheng */ 16278275SEric Cheng group = nxge->tx_set.group[rg->gindex]; 16288275SEric Cheng 16298275SEric Cheng if (group->count > 2) { 16308275SEric Cheng /* a share can have at most 2 rings */ 16318275SEric Cheng return (EINVAL); 16328275SEric Cheng } 16338275SEric Cheng 16348275SEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) { 16358275SEric Cheng if (group->map & (1 << i)) { 16368275SEric Cheng if ((i != shp->index * 2) && 16378275SEric Cheng (i != (shp->index * 2 + 1))) { 16388275SEric Cheng /* 16398275SEric Cheng * A group with invalid rings was 16408275SEric Cheng * attempted to bind to this share 16418275SEric Cheng */ 16428275SEric Cheng return (EINVAL); 16438275SEric Cheng } 16448275SEric Cheng } 16458275SEric Cheng } 16468275SEric Cheng 16478275SEric Cheng vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid + 16488275SEric Cheng rg->gindex; 16498275SEric Cheng rg->sindex = vr->region; 16508275SEric Cheng break; 16518275SEric Cheng } 16528275SEric Cheng return (0); 16538275SEric Cheng } 16548275SEric Cheng 16558275SEric Cheng int 16568275SEric Cheng nxge_hio_share_rem_group(mac_share_handle_t shandle, 16578275SEric Cheng mac_group_driver_t ghandle) 16588275SEric Cheng { 16598275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 16608275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle; 16618275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 16628275SEric Cheng int rv = 0; 16638275SEric Cheng 16648275SEric Cheng vr = shp->vrp; 16658275SEric Cheng 16668275SEric Cheng switch (group->type) { 16678275SEric Cheng case MAC_RING_TYPE_RX: 16688275SEric Cheng group->sindex = 0; 16698275SEric Cheng vr->rdc_tbl = 0; 16708275SEric Cheng shp->rxgroup = 0; 16718275SEric Cheng break; 16728275SEric Cheng 16738275SEric Cheng case MAC_RING_TYPE_TX: 16748275SEric Cheng group->sindex = 0; 16758275SEric Cheng vr->tdc_tbl = 0; 16768275SEric Cheng break; 16778275SEric Cheng } 16788275SEric Cheng 16798275SEric Cheng return (rv); 16808275SEric Cheng } 16818275SEric Cheng 16828275SEric Cheng int 16838275SEric Cheng nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie, 16848275SEric Cheng uint64_t *rcookie) 16858275SEric Cheng { 16868275SEric Cheng nxge_t *nxge; 16878275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 16888275SEric Cheng nxge_hio_vr_t *vr; 16898275SEric Cheng uint64_t rmap, tmap, hv_rmap, hv_tmap; 16908275SEric Cheng int rv; 16918275SEric Cheng 16928275SEric Cheng nxge = shp->nxgep; 16938275SEric Cheng vr = (nxge_hio_vr_t *)shp->vrp; 16948275SEric Cheng 16958275SEric Cheng /* 16968275SEric Cheng * Add resources to the share. 16978275SEric Cheng * For each DMA channel associated with the VR, bind its resources 16988275SEric Cheng * to the VR. 16998275SEric Cheng */ 17008275SEric Cheng tmap = 0; 17018275SEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap); 17028275SEric Cheng if (rv != 0) { 17038275SEric Cheng return (rv); 17048275SEric Cheng } 17058275SEric Cheng 17068275SEric Cheng rmap = 0; 17078275SEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap); 17088275SEric Cheng if (rv != 0) { 17098275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 17108275SEric Cheng return (rv); 17118275SEric Cheng } 17128275SEric Cheng 17138275SEric Cheng /* 17148275SEric Cheng * Ask the Hypervisor to set up the VR and allocate slots for 17158275SEric Cheng * each rings associated with the VR. 17168275SEric Cheng */ 17178275SEric Cheng hv_tmap = hv_rmap = 0; 17188275SEric Cheng if ((rv = nxge_hio_share_assign(nxge, cookie, 17198275SEric Cheng &hv_tmap, &hv_rmap, vr))) { 17208275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 17218275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap); 17228275SEric Cheng return (rv); 17238275SEric Cheng } 17248275SEric Cheng 17258275SEric Cheng shp->active = B_TRUE; 17268275SEric Cheng shp->tmap = hv_tmap; 17278275SEric Cheng shp->rmap = hv_rmap; 17288275SEric Cheng 17298275SEric Cheng /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 17308275SEric Cheng *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 17318275SEric Cheng 17328275SEric Cheng return (0); 17338275SEric Cheng } 17348275SEric Cheng 17358275SEric Cheng void 17368275SEric Cheng nxge_hio_share_unbind(mac_share_handle_t shandle) 17378275SEric Cheng { 17386495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17396495Sspeer 17406495Sspeer /* 17416495Sspeer * First, unassign the VR (take it back), 17426495Sspeer * so we can enable interrupts again. 17436495Sspeer */ 17448275SEric Cheng nxge_hio_share_unassign(shp->vrp); 17456495Sspeer 17466495Sspeer /* 17476495Sspeer * Free Ring Resources for TX and RX 17486495Sspeer */ 17497755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 17507755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 17516495Sspeer } 17526495Sspeer 17536495Sspeer 17546495Sspeer /* 17556495Sspeer * nxge_hio_vr_share 17566495Sspeer * 17576495Sspeer * Find an unused Virtualization Region (VR). 17586495Sspeer * 17596495Sspeer * Arguments: 17606495Sspeer * nxge 17616495Sspeer * 17626495Sspeer * Notes: 17636495Sspeer * 17646495Sspeer * Context: 17656495Sspeer * Service domain 17666495Sspeer */ 17677755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t * 17686495Sspeer nxge_hio_vr_share( 17696495Sspeer nxge_t *nxge) 17706495Sspeer { 17716495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 17726495Sspeer nxge_hio_vr_t *vr; 17736495Sspeer 17746495Sspeer int first, limit, region; 17756495Sspeer 17766495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 17776495Sspeer 17786495Sspeer MUTEX_ENTER(&nhd->lock); 17796495Sspeer 17807755SMisaki.Kataoka@Sun.COM if (nhd->vrs == 0) { 17816495Sspeer MUTEX_EXIT(&nhd->lock); 17826495Sspeer return (0); 17836495Sspeer } 17846495Sspeer 17856495Sspeer /* Find an empty virtual region (VR). */ 17866495Sspeer if (nxge->function_num == 0) { 17876495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 17886495Sspeer first = FUNC0_VIR1; 17896495Sspeer limit = FUNC2_VIR0; 17906495Sspeer } else if (nxge->function_num == 1) { 17916495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 17926495Sspeer first = FUNC2_VIR1; 17936495Sspeer limit = FUNC_VIR_MAX; 17946495Sspeer } else { 17956495Sspeer cmn_err(CE_WARN, 17966495Sspeer "Shares not supported on function(%d) at this time.\n", 17976495Sspeer nxge->function_num); 17986495Sspeer } 17996495Sspeer 18006495Sspeer for (region = first; region < limit; region++) { 18016495Sspeer if (nhd->vr[region].nxge == 0) 18026495Sspeer break; 18036495Sspeer } 18046495Sspeer 18056495Sspeer if (region == limit) { 18066495Sspeer MUTEX_EXIT(&nhd->lock); 18076495Sspeer return (0); 18086495Sspeer } 18096495Sspeer 18106495Sspeer vr = &nhd->vr[region]; 18116495Sspeer vr->nxge = (uintptr_t)nxge; 18126495Sspeer vr->region = (uintptr_t)region; 18136495Sspeer 18147755SMisaki.Kataoka@Sun.COM nhd->vrs--; 18156495Sspeer 18166495Sspeer MUTEX_EXIT(&nhd->lock); 18176495Sspeer 18186495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 18196495Sspeer 18207755SMisaki.Kataoka@Sun.COM return (vr); 18216495Sspeer } 18226495Sspeer 18236495Sspeer void 18246495Sspeer nxge_hio_unshare( 18257755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr) 18266495Sspeer { 18276495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 18286495Sspeer nxge_hio_data_t *nhd; 18296495Sspeer 18306495Sspeer vr_region_t region; 18316495Sspeer 18326495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 18336495Sspeer 18346495Sspeer if (!nxge) { 18357950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: " 18366495Sspeer "vr->nxge is NULL")); 18376495Sspeer return; 18386495Sspeer } 18396495Sspeer 18406495Sspeer /* 18416495Sspeer * This function is no longer called, but I will keep it 18426495Sspeer * here in case we want to revisit this topic in the future. 18436495Sspeer * 18446495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 18456495Sspeer */ 18468275SEric Cheng 18478275SEric Cheng /* 18488275SEric Cheng * XXX: This is done by ms_sremove? 18498275SEric Cheng * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 18508275SEric Cheng */ 18516495Sspeer 18526495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 18536495Sspeer 18546495Sspeer MUTEX_ENTER(&nhd->lock); 18556495Sspeer 18566495Sspeer region = vr->region; 18576495Sspeer (void) memset(vr, 0, sizeof (*vr)); 18586495Sspeer vr->region = region; 18596495Sspeer 18607755SMisaki.Kataoka@Sun.COM nhd->vrs++; 18616495Sspeer 18626495Sspeer MUTEX_EXIT(&nhd->lock); 18636495Sspeer 18646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 18656495Sspeer } 18666495Sspeer 18676495Sspeer int 18686495Sspeer nxge_hio_addres( 18697755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 18706495Sspeer mac_ring_type_t type, 18718275SEric Cheng uint64_t *map) 18726495Sspeer { 18738275SEric Cheng nxge_t *nxge = (nxge_t *)vr->nxge; 18748275SEric Cheng nxge_grp_t *group; 18758275SEric Cheng int groupid; 18768275SEric Cheng int i; 18778275SEric Cheng int max_dcs; 18786495Sspeer 18796495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 18806495Sspeer 18816495Sspeer if (!nxge) 18826495Sspeer return (EINVAL); 18836495Sspeer 18848275SEric Cheng /* 18858275SEric Cheng * For each ring associated with the group, add the resources 18868275SEric Cheng * to the group and bind. 18878275SEric Cheng */ 18888275SEric Cheng max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS; 18898275SEric Cheng if (type == MAC_RING_TYPE_TX) { 18908275SEric Cheng /* set->group is an array of group indexed by a port group id */ 18918275SEric Cheng groupid = vr->tdc_tbl - 18928275SEric Cheng nxge->pt_config.hw_config.def_mac_txdma_grpid; 18938275SEric Cheng group = nxge->tx_set.group[groupid]; 18948275SEric Cheng } else { 18958275SEric Cheng /* set->group is an array of group indexed by a port group id */ 18968275SEric Cheng groupid = vr->rdc_tbl - 18978275SEric Cheng nxge->pt_config.hw_config.def_mac_rxdma_grpid; 18988275SEric Cheng group = nxge->rx_set.group[groupid]; 18998275SEric Cheng } 19008275SEric Cheng 19018275SEric Cheng if (group->map == 0) { 19028275SEric Cheng NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated " 19038275SEric Cheng "with this VR")); 19048275SEric Cheng return (EINVAL); 19058275SEric Cheng } 19068275SEric Cheng 19078275SEric Cheng for (i = 0; i < max_dcs; i++) { 19088275SEric Cheng if (group->map & (1 << i)) { 19098275SEric Cheng int rv; 19108275SEric Cheng 19118275SEric Cheng if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) { 19128275SEric Cheng if (*map == 0) /* Couldn't get even one DC. */ 19138275SEric Cheng return (-rv); 19148275SEric Cheng else 19158275SEric Cheng break; 19168275SEric Cheng } 19178275SEric Cheng *map |= (1 << i); 19186495Sspeer } 19196495Sspeer } 19206495Sspeer 19216495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 19226495Sspeer 19236495Sspeer return (0); 19246495Sspeer } 19256495Sspeer 19266495Sspeer /* ARGSUSED */ 19276495Sspeer void 19286495Sspeer nxge_hio_remres( 19297755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 19306495Sspeer mac_ring_type_t type, 19316495Sspeer res_map_t res_map) 19326495Sspeer { 19336495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 19346495Sspeer nxge_grp_t *group; 19356495Sspeer 19366495Sspeer if (!nxge) { 19377950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 19386495Sspeer "vr->nxge is NULL")); 19396495Sspeer return; 19406495Sspeer } 19416495Sspeer 19426495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 19436495Sspeer 19448275SEric Cheng /* 19458275SEric Cheng * For each ring bound to the group, remove the DMA resources 19468275SEric Cheng * from the group and unbind. 19478275SEric Cheng */ 19486495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 19496495Sspeer while (group->dc) { 19506495Sspeer nxge_hio_dc_t *dc = group->dc; 19516495Sspeer NXGE_DC_RESET(res_map, dc->page); 19526495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 19536495Sspeer } 19546495Sspeer 19556495Sspeer if (res_map) { 19566495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 19576495Sspeer "res_map %lx", res_map)); 19586495Sspeer } 19596495Sspeer 19606495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 19616495Sspeer } 19626495Sspeer 19636495Sspeer /* 19646495Sspeer * nxge_hio_tdc_share 19656495Sspeer * 19666495Sspeer * Share an unused TDC channel. 19676495Sspeer * 19686495Sspeer * Arguments: 19696495Sspeer * nxge 19706495Sspeer * 19716495Sspeer * Notes: 19726495Sspeer * 19736495Sspeer * A.7.3 Reconfigure Tx DMA channel 19746495Sspeer * Disable TxDMA A.9.6.10 19756495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 19766495Sspeer * 19776495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 19786495Sspeer * 19796495Sspeer * Soft Reset TxDMA A.9.6.2 19806495Sspeer * 19816495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 19826495Sspeer * guest domain: 19836495Sspeer * 19846495Sspeer * Re-initialize TxDMA A.9.6.8 19856495Sspeer * Reconfigure TxDMA 19866495Sspeer * Enable TxDMA A.9.6.9 19876495Sspeer * 19886495Sspeer * Context: 19896495Sspeer * Service domain 19906495Sspeer */ 19916495Sspeer int 19926495Sspeer nxge_hio_tdc_share( 19936495Sspeer nxge_t *nxge, 19946495Sspeer int channel) 19956495Sspeer { 19967812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19976495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 19986495Sspeer tx_ring_t *ring; 19996713Sspeer int count; 20006495Sspeer 20016495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 20026495Sspeer 20036495Sspeer /* 20046495Sspeer * Wait until this channel is idle. 20056495Sspeer */ 20066495Sspeer ring = nxge->tx_rings->rings[channel]; 20076713Sspeer 20086713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 20096886Sspeer if (ring->tx_ring_busy) { 20106886Sspeer /* 20116886Sspeer * Wait for 30 seconds. 20126886Sspeer */ 20136886Sspeer for (count = 30 * 1000; count; count--) { 20146886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 20156886Sspeer break; 20166886Sspeer } 20176886Sspeer 20186886Sspeer drv_usecwait(1000); 20196495Sspeer } 20206713Sspeer 20216886Sspeer if (count == 0) { 20226886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 20236886Sspeer NXGE_TX_RING_ONLINE); 20247950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20257950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: " 20266886Sspeer "Tx ring %d was always BUSY", channel)); 20276886Sspeer return (-EIO); 20286886Sspeer } 20296886Sspeer } else { 20306713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 20316886Sspeer NXGE_TX_RING_OFFLINED); 20326495Sspeer } 20336495Sspeer 20347812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 20357812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_TRUE; 20367812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 20377812SMichael.Speer@Sun.COM 20386495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 20397950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: " 20406495Sspeer "Failed to remove interrupt for TxDMA channel %d", 20416495Sspeer channel)); 20428275SEric Cheng return (-EINVAL); 20436495Sspeer } 20446495Sspeer 20456495Sspeer /* Disable TxDMA A.9.6.10 */ 20466495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 20476495Sspeer 20486495Sspeer /* The SD is sharing this channel. */ 20496495Sspeer NXGE_DC_SET(set->shared.map, channel); 20506495Sspeer set->shared.count++; 20516495Sspeer 20526602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 20536602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 20546602Sspeer 20556495Sspeer /* 20566495Sspeer * Initialize the DC-specific FZC control registers. 20576495Sspeer * ----------------------------------------------------- 20586495Sspeer */ 20596495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 20606495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20617950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: FZC TDC failed: %d", channel)); 20626495Sspeer return (-EIO); 20636495Sspeer } 20646495Sspeer 20656495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 20666495Sspeer 20676495Sspeer return (0); 20686495Sspeer } 20696495Sspeer 20706495Sspeer /* 20716495Sspeer * nxge_hio_rdc_share 20726495Sspeer * 20736495Sspeer * Share an unused RDC channel. 20746495Sspeer * 20756495Sspeer * Arguments: 20766495Sspeer * nxge 20776495Sspeer * 20786495Sspeer * Notes: 20796495Sspeer * 20806495Sspeer * This is the latest version of the procedure to 20816495Sspeer * Reconfigure an Rx DMA channel: 20826495Sspeer * 20836495Sspeer * A.6.3 Reconfigure Rx DMA channel 20846495Sspeer * Stop RxMAC A.9.2.6 20856495Sspeer * Drain IPP Port A.9.3.6 20866495Sspeer * Stop and reset RxDMA A.9.5.3 20876495Sspeer * 20886495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 20896495Sspeer * guest domain: 20906495Sspeer * 20916495Sspeer * Initialize RxDMA A.9.5.4 20926495Sspeer * Reconfigure RxDMA 20936495Sspeer * Enable RxDMA A.9.5.5 20946495Sspeer * 20956495Sspeer * We will do this here, since the RDC is a canalis non grata: 20966495Sspeer * Enable RxMAC A.9.2.10 20976495Sspeer * 20986495Sspeer * Context: 20996495Sspeer * Service domain 21006495Sspeer */ 21016495Sspeer int 21026495Sspeer nxge_hio_rdc_share( 21036495Sspeer nxge_t *nxge, 21046495Sspeer nxge_hio_vr_t *vr, 21056495Sspeer int channel) 21066495Sspeer { 21076495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 21086495Sspeer nxge_rdc_grp_t *rdc_grp; 21096495Sspeer 21106495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 21116495Sspeer 21126495Sspeer /* Disable interrupts. */ 21136495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 21147950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21156495Sspeer "Failed to remove interrupt for RxDMA channel %d", 21166495Sspeer channel)); 21176495Sspeer return (NXGE_ERROR); 21186495Sspeer } 21196495Sspeer 21206495Sspeer /* Stop RxMAC = A.9.2.6 */ 21216495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 21226495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21236495Sspeer "Failed to disable RxMAC")); 21246495Sspeer } 21256495Sspeer 21266495Sspeer /* Drain IPP Port = A.9.3.6 */ 21276495Sspeer (void) nxge_ipp_drain(nxge); 21286495Sspeer 21296495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 21306495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 21316495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 21326495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21336495Sspeer "Failed to disable RxDMA channel %d", channel)); 21346495Sspeer } 21356495Sspeer 21366495Sspeer /* The SD is sharing this channel. */ 21376495Sspeer NXGE_DC_SET(set->shared.map, channel); 21386495Sspeer set->shared.count++; 21396495Sspeer 21406602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 21416602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 21426602Sspeer 21436495Sspeer /* 21446495Sspeer * The guest domain will reconfigure the RDC later. 21456495Sspeer * 21466495Sspeer * But in the meantime, we must re-enable the Rx MAC so 21476495Sspeer * that we can start receiving packets again on the 21486495Sspeer * remaining RDCs: 21496495Sspeer * 21506495Sspeer * Enable RxMAC = A.9.2.10 21516495Sspeer */ 21526495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 21536495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 21547950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: Rx MAC still disabled")); 21556495Sspeer } 21566495Sspeer 21576495Sspeer /* 21586495Sspeer * Initialize the DC-specific FZC control registers. 21596495Sspeer * ----------------------------------------------------- 21606495Sspeer */ 21616495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 21626495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 21637950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: RZC RDC failed: %ld", channel)); 21646495Sspeer return (-EIO); 21656495Sspeer } 21666495Sspeer 21676495Sspeer /* 21686495Sspeer * We have to initialize the guest's RDC table, too. 21696495Sspeer * ----------------------------------------------------- 21706495Sspeer */ 21716495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 21726495Sspeer if (rdc_grp->max_rdcs == 0) { 21736495Sspeer rdc_grp->start_rdc = (uint8_t)channel; 21746495Sspeer rdc_grp->def_rdc = (uint8_t)channel; 21756495Sspeer rdc_grp->max_rdcs = 1; 21766495Sspeer } else { 21776495Sspeer rdc_grp->max_rdcs++; 21786495Sspeer } 21796495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 21806495Sspeer 21816495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 21826495Sspeer 21836495Sspeer return (0); 21846495Sspeer } 21856495Sspeer 21866495Sspeer /* 21876495Sspeer * nxge_hio_dc_share 21886495Sspeer * 21896495Sspeer * Share a DMA channel with a guest domain. 21906495Sspeer * 21916495Sspeer * Arguments: 21926495Sspeer * nxge 21936495Sspeer * vr The VR that <channel> will belong to. 21946495Sspeer * type Tx or Rx. 21958275SEric Cheng * channel Channel to share 21966495Sspeer * 21976495Sspeer * Notes: 21986495Sspeer * 21996495Sspeer * Context: 22006495Sspeer * Service domain 22016495Sspeer */ 22026495Sspeer int 22036495Sspeer nxge_hio_dc_share( 22046495Sspeer nxge_t *nxge, 22056495Sspeer nxge_hio_vr_t *vr, 22068275SEric Cheng mac_ring_type_t type, 22078275SEric Cheng int channel) 22086495Sspeer { 22096495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 22106495Sspeer nxge_hio_dc_t *dc; 22116495Sspeer nxge_grp_t *group; 22126495Sspeer int slot; 22136495Sspeer 22146495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 22156495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 22166495Sspeer 22176495Sspeer 22186495Sspeer /* -------------------------------------------------- */ 22196495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 22206495Sspeer nxge_hio_tdc_share(nxge, channel) : 22216495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 22226495Sspeer 22236495Sspeer if (slot < 0) { 22246495Sspeer if (type == MAC_RING_TYPE_RX) { 22258275SEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 22266495Sspeer } else { 22278275SEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 22286495Sspeer } 22296495Sspeer return (slot); 22306495Sspeer } 22316495Sspeer 22326495Sspeer MUTEX_ENTER(&nhd->lock); 22336495Sspeer 22346495Sspeer /* 22356495Sspeer * Tag this channel. 22366495Sspeer * -------------------------------------------------- 22376495Sspeer */ 22386495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 22396495Sspeer 22406495Sspeer dc->vr = vr; 22416495Sspeer dc->channel = (nxge_channel_t)channel; 22426495Sspeer 22436495Sspeer MUTEX_EXIT(&nhd->lock); 22446495Sspeer 22456495Sspeer /* 22466495Sspeer * vr->[t|r]x_group is used by the service domain to 22476495Sspeer * keep track of its shared DMA channels. 22486495Sspeer */ 22496495Sspeer MUTEX_ENTER(&nxge->group_lock); 22506495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 22516495Sspeer 22527755SMisaki.Kataoka@Sun.COM dc->group = group; 22536495Sspeer /* Initialize <group>, if necessary */ 22546495Sspeer if (group->count == 0) { 22556495Sspeer group->nxge = nxge; 22566495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 22576495Sspeer VP_BOUND_TX : VP_BOUND_RX; 22586495Sspeer group->sequence = nhd->sequence++; 22596495Sspeer group->active = B_TRUE; 22606495Sspeer } 22616495Sspeer 22626495Sspeer MUTEX_EXIT(&nxge->group_lock); 22636495Sspeer 22646495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 22656495Sspeer "DC share: %cDC %d was assigned to slot %d", 22666495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 22676495Sspeer 22686495Sspeer nxge_grp_dc_append(nxge, group, dc); 22696495Sspeer 22706495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 22716495Sspeer 22726495Sspeer return (0); 22736495Sspeer } 22746495Sspeer 22756495Sspeer /* 22766495Sspeer * nxge_hio_tdc_unshare 22776495Sspeer * 22786495Sspeer * Unshare a TDC. 22796495Sspeer * 22806495Sspeer * Arguments: 22816495Sspeer * nxge 22826495Sspeer * channel The channel to unshare (add again). 22836495Sspeer * 22846495Sspeer * Notes: 22856495Sspeer * 22866495Sspeer * Context: 22876495Sspeer * Service domain 22886495Sspeer */ 22896495Sspeer void 22906495Sspeer nxge_hio_tdc_unshare( 22916495Sspeer nxge_t *nxge, 22928275SEric Cheng int dev_grpid, 22936495Sspeer int channel) 22946495Sspeer { 22956495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 22968275SEric Cheng nxge_grp_t *group; 22978275SEric Cheng int grpid; 22986495Sspeer 22996495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 23006495Sspeer 23016495Sspeer NXGE_DC_RESET(set->shared.map, channel); 23026495Sspeer set->shared.count--; 23036495Sspeer 23048275SEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid; 23058275SEric Cheng group = set->group[grpid]; 23068275SEric Cheng 23077755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 23086495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 23096495Sspeer "Failed to initialize TxDMA channel %d", channel)); 23106495Sspeer return; 23116495Sspeer } 23126495Sspeer 23136495Sspeer /* Re-add this interrupt. */ 23146495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 23156495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 23166495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 23176495Sspeer } 23186495Sspeer 23196495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 23206495Sspeer } 23216495Sspeer 23226495Sspeer /* 23236495Sspeer * nxge_hio_rdc_unshare 23246495Sspeer * 23256495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 23266495Sspeer * 23276495Sspeer * Arguments: 23286495Sspeer * nxge 23296495Sspeer * channel The channel to unshare (add again). 23306495Sspeer * 23316495Sspeer * Notes: 23326495Sspeer * 23336495Sspeer * Context: 23346495Sspeer * Service domain 23356495Sspeer */ 23366495Sspeer void 23376495Sspeer nxge_hio_rdc_unshare( 23386495Sspeer nxge_t *nxge, 23398275SEric Cheng int dev_grpid, 23406495Sspeer int channel) 23416495Sspeer { 23428275SEric Cheng nxge_grp_set_t *set = &nxge->rx_set; 23438275SEric Cheng nxge_grp_t *group; 23448275SEric Cheng int grpid; 23456495Sspeer 23466495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 23476495Sspeer 23486495Sspeer /* Stop RxMAC = A.9.2.6 */ 23496495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 23506495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 23516495Sspeer "Failed to disable RxMAC")); 23526495Sspeer } 23536495Sspeer 23546495Sspeer /* Drain IPP Port = A.9.3.6 */ 23556495Sspeer (void) nxge_ipp_drain(nxge); 23566495Sspeer 23576495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 23586495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 23596495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 23606495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 23616495Sspeer "Failed to disable RxDMA channel %d", channel)); 23626495Sspeer } 23636495Sspeer 23646495Sspeer NXGE_DC_RESET(set->shared.map, channel); 23656495Sspeer set->shared.count--; 23666495Sspeer 23678275SEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid; 23688275SEric Cheng group = set->group[grpid]; 23698275SEric Cheng 23706495Sspeer /* 23716495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 23726495Sspeer * 23736495Sspeer * Initialize RxDMA A.9.5.4 23746495Sspeer * Reconfigure RxDMA 23756495Sspeer * Enable RxDMA A.9.5.5 23766495Sspeer */ 23777755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 23786495Sspeer /* Be sure to re-enable the RX MAC. */ 23796495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 23806495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 23818275SEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 23826495Sspeer } 23836495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 23846495Sspeer "Failed to initialize RxDMA channel %d", channel)); 23856495Sspeer return; 23866495Sspeer } 23876495Sspeer 23886495Sspeer /* 23896495Sspeer * Enable RxMAC = A.9.2.10 23906495Sspeer */ 23916495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 23926495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 23938275SEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 23946495Sspeer return; 23956495Sspeer } 23966495Sspeer 23976495Sspeer /* Re-add this interrupt. */ 23986495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 23996495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24007950SMichael.Speer@Sun.COM "nxge_hio_rdc_unshare: Failed to add interrupt for " 24016495Sspeer "RxDMA CHANNEL %d", channel)); 24026495Sspeer } 24036495Sspeer 24046495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 24056495Sspeer } 24066495Sspeer 24076495Sspeer /* 24086495Sspeer * nxge_hio_dc_unshare 24096495Sspeer * 24106495Sspeer * Unshare (reuse) a DMA channel. 24116495Sspeer * 24126495Sspeer * Arguments: 24136495Sspeer * nxge 24146495Sspeer * vr The VR that <channel> belongs to. 24156495Sspeer * type Tx or Rx. 24166495Sspeer * channel The DMA channel to reuse. 24176495Sspeer * 24186495Sspeer * Notes: 24196495Sspeer * 24206495Sspeer * Context: 24216495Sspeer * Service domain 24226495Sspeer */ 24236495Sspeer void 24246495Sspeer nxge_hio_dc_unshare( 24256495Sspeer nxge_t *nxge, 24266495Sspeer nxge_hio_vr_t *vr, 24276495Sspeer mac_ring_type_t type, 24286495Sspeer int channel) 24296495Sspeer { 24306495Sspeer nxge_grp_t *group; 24316495Sspeer nxge_hio_dc_t *dc; 24326495Sspeer 24336495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 24346495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 24356495Sspeer 24366495Sspeer /* Unlink the channel from its group. */ 24376495Sspeer /* -------------------------------------------------- */ 24386495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 24396602Sspeer NXGE_DC_RESET(group->map, channel); 24406495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 24416495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24427950SMichael.Speer@Sun.COM "nxge_hio_dc_unshare(%d) failed", channel)); 24436495Sspeer return; 24446495Sspeer } 24456495Sspeer 24466495Sspeer dc->vr = 0; 24476495Sspeer dc->cookie = 0; 24486495Sspeer 24496495Sspeer if (type == MAC_RING_TYPE_RX) { 24508275SEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 24516495Sspeer } else { 24528275SEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 24536495Sspeer } 24546495Sspeer 24556495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 24566495Sspeer } 24576495Sspeer 2458*8400SNicolas.Droux@Sun.COM 2459*8400SNicolas.Droux@Sun.COM /* 2460*8400SNicolas.Droux@Sun.COM * nxge_hio_rxdma_bind_intr(): 2461*8400SNicolas.Droux@Sun.COM * 2462*8400SNicolas.Droux@Sun.COM * For the guest domain driver, need to bind the interrupt group 2463*8400SNicolas.Droux@Sun.COM * and state to the rx_rcr_ring_t. 2464*8400SNicolas.Droux@Sun.COM */ 2465*8400SNicolas.Droux@Sun.COM 2466*8400SNicolas.Droux@Sun.COM int 2467*8400SNicolas.Droux@Sun.COM nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel) 2468*8400SNicolas.Droux@Sun.COM { 2469*8400SNicolas.Droux@Sun.COM nxge_hio_dc_t *dc; 2470*8400SNicolas.Droux@Sun.COM nxge_ldgv_t *control; 2471*8400SNicolas.Droux@Sun.COM nxge_ldg_t *group; 2472*8400SNicolas.Droux@Sun.COM nxge_ldv_t *device; 2473*8400SNicolas.Droux@Sun.COM 2474*8400SNicolas.Droux@Sun.COM /* 2475*8400SNicolas.Droux@Sun.COM * Find the DMA channel. 2476*8400SNicolas.Droux@Sun.COM */ 2477*8400SNicolas.Droux@Sun.COM if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) { 2478*8400SNicolas.Droux@Sun.COM return (NXGE_ERROR); 2479*8400SNicolas.Droux@Sun.COM } 2480*8400SNicolas.Droux@Sun.COM 2481*8400SNicolas.Droux@Sun.COM /* 2482*8400SNicolas.Droux@Sun.COM * Get the control structure. 2483*8400SNicolas.Droux@Sun.COM */ 2484*8400SNicolas.Droux@Sun.COM control = nxge->ldgvp; 2485*8400SNicolas.Droux@Sun.COM if (control == NULL) { 2486*8400SNicolas.Droux@Sun.COM return (NXGE_ERROR); 2487*8400SNicolas.Droux@Sun.COM } 2488*8400SNicolas.Droux@Sun.COM 2489*8400SNicolas.Droux@Sun.COM group = &control->ldgp[dc->ldg.vector]; 2490*8400SNicolas.Droux@Sun.COM device = &control->ldvp[dc->ldg.ldsv]; 2491*8400SNicolas.Droux@Sun.COM 2492*8400SNicolas.Droux@Sun.COM MUTEX_ENTER(&ring->lock); 2493*8400SNicolas.Droux@Sun.COM ring->ldgp = group; 2494*8400SNicolas.Droux@Sun.COM ring->ldvp = device; 2495*8400SNicolas.Droux@Sun.COM MUTEX_EXIT(&ring->lock); 2496*8400SNicolas.Droux@Sun.COM 2497*8400SNicolas.Droux@Sun.COM return (NXGE_OK); 2498*8400SNicolas.Droux@Sun.COM } 24996495Sspeer #endif /* if defined(sun4v) */ 2500