16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 238597SMichael.Speer@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 378275SEric Cheng #include <sys/mac_provider.h> 386495Sspeer #include <sys/nxge/nxge_impl.h> 396495Sspeer #include <sys/nxge/nxge_fzc.h> 406495Sspeer #include <sys/nxge/nxge_rxdma.h> 416495Sspeer #include <sys/nxge/nxge_txdma.h> 426495Sspeer #include <sys/nxge/nxge_hio.h> 436495Sspeer 446495Sspeer /* 456495Sspeer * External prototypes 466495Sspeer */ 476495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 486495Sspeer 496495Sspeer /* The following function may be found in nxge_main.c */ 508275SEric Cheng extern int nxge_m_mmac_remove(void *arg, int slot); 518275SEric Cheng extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 528275SEric Cheng boolean_t usetbl); 536495Sspeer 546495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 556495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 566495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 576495Sspeer 586495Sspeer /* 596495Sspeer * Local prototypes 606495Sspeer */ 616495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 626495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 636495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 646495Sspeer 656495Sspeer /* 666495Sspeer * These functions are used by both service & guest domains to 676495Sspeer * decide whether they're running in an LDOMs/XEN environment 686495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 696495Sspeer */ 706495Sspeer 716495Sspeer /* 726495Sspeer * nxge_get_environs 736495Sspeer * 746495Sspeer * Figure out if we are in a guest domain or not. 756495Sspeer * 766495Sspeer * Arguments: 776495Sspeer * nxge 786495Sspeer * 796495Sspeer * Notes: 806495Sspeer * 816495Sspeer * Context: 826495Sspeer * Any domain 836495Sspeer */ 846495Sspeer void 856495Sspeer nxge_get_environs( 866495Sspeer nxge_t *nxge) 876495Sspeer { 886495Sspeer char *string; 896495Sspeer 906495Sspeer /* 916495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 926495Sspeer */ 936495Sspeer nxge->environs = SOLARIS_DOMAIN; 946495Sspeer 956495Sspeer /* 966495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 976495Sspeer */ 986495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 996495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1006495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1016495Sspeer if (strcmp(string, "n2niu") == 0) { 1026495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1036495Sspeer /* So we can allocate properly-aligned memory. */ 1046495Sspeer nxge->niu_type = N2_NIU; 1056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1066495Sspeer "Hybrid IO-capable guest domain")); 1076495Sspeer } 1086495Sspeer ddi_prop_free(string); 1096495Sspeer } 1106495Sspeer } 1116495Sspeer 1126495Sspeer #if !defined(sun4v) 1136495Sspeer 1146495Sspeer /* 1156495Sspeer * nxge_hio_init 1166495Sspeer * 1176495Sspeer * Initialize the HIO module of the NXGE driver. 1186495Sspeer * 1196495Sspeer * Arguments: 1206495Sspeer * nxge 1216495Sspeer * 1226495Sspeer * Notes: 1236495Sspeer * This is the non-hybrid I/O version of this function. 1246495Sspeer * 1256495Sspeer * Context: 1266495Sspeer * Any domain 1276495Sspeer */ 1286495Sspeer int 1297587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1306495Sspeer { 1316495Sspeer nxge_hio_data_t *nhd; 1328275SEric Cheng int i; 1336495Sspeer 1346495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1356495Sspeer if (nhd == 0) { 1366495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1376495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1386495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1396495Sspeer } 1406495Sspeer 1418275SEric Cheng /* 1428275SEric Cheng * Initialize share and ring group structures. 1438275SEric Cheng */ 1448275SEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) 1458275SEric Cheng nxge->tdc_is_shared[i] = B_FALSE; 1468275SEric Cheng 1478275SEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 1488275SEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 1498275SEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 1508275SEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 1518275SEric Cheng nxge->tx_hio_groups[i].gindex = 0; 1528275SEric Cheng nxge->tx_hio_groups[i].sindex = 0; 1538275SEric Cheng } 1548275SEric Cheng 1558275SEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 1568275SEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 1578275SEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 1588275SEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 1598275SEric Cheng nxge->rx_hio_groups[i].gindex = 0; 1608275SEric Cheng nxge->rx_hio_groups[i].sindex = 0; 1618275SEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 1628597SMichael.Speer@Sun.COM nxge->rx_hio_groups[i].port_default_grp = B_FALSE; 1638275SEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 1648275SEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 1658275SEric Cheng } 1668275SEric Cheng 1676495Sspeer nhd->hio.ldoms = B_FALSE; 1686495Sspeer 1696495Sspeer return (NXGE_OK); 1706495Sspeer } 1716495Sspeer 1726495Sspeer #endif 1736495Sspeer 1746495Sspeer void 1757587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1766495Sspeer { 1776495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1786495Sspeer 1796495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1806495Sspeer 1817587SMichael.Speer@Sun.COM if (nhd != NULL) { 1827587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 1837587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 1847587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 1857587SMichael.Speer@Sun.COM } 1866495Sspeer } 1876495Sspeer 1886495Sspeer /* 1896495Sspeer * nxge_dci_map 1906495Sspeer * 1916495Sspeer * Map a DMA channel index to a channel number. 1926495Sspeer * 1936495Sspeer * Arguments: 1946495Sspeer * instance The instance number of the driver. 1956495Sspeer * type The type of channel this is: Tx or Rx. 1966495Sspeer * index The index to convert to a channel number 1976495Sspeer * 1986495Sspeer * Notes: 1996495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 2006495Sspeer * 2016495Sspeer * Context: 2026495Sspeer * Any domain 2036495Sspeer */ 2046495Sspeer int 2056495Sspeer nxge_dci_map( 2066495Sspeer nxge_t *nxge, 2076495Sspeer vpc_type_t type, 2086495Sspeer int index) 2096495Sspeer { 2106495Sspeer nxge_grp_set_t *set; 2116495Sspeer int dc; 2126495Sspeer 2136495Sspeer switch (type) { 2146495Sspeer case VP_BOUND_TX: 2156495Sspeer set = &nxge->tx_set; 2166495Sspeer break; 2176495Sspeer case VP_BOUND_RX: 2186495Sspeer set = &nxge->rx_set; 2196495Sspeer break; 2206495Sspeer } 2216495Sspeer 2226495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 2236495Sspeer if ((1 << dc) & set->owned.map) { 2246495Sspeer if (index == 0) 2256495Sspeer return (dc); 2266495Sspeer else 2276495Sspeer index--; 2286495Sspeer } 2296495Sspeer } 2306495Sspeer 2316495Sspeer return (-1); 2326495Sspeer } 2336495Sspeer 2346495Sspeer /* 2356495Sspeer * --------------------------------------------------------------------- 2366495Sspeer * These are the general-purpose DMA channel group functions. That is, 2376495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2386495Sspeer * environment. 2396495Sspeer * 2406495Sspeer * But is also expected that in the future they will be able to manage 2416495Sspeer * Crossbow groups. 2426495Sspeer * --------------------------------------------------------------------- 2436495Sspeer */ 2446495Sspeer 2456495Sspeer /* 2467766SMichael.Speer@Sun.COM * nxge_grp_cleanup(p_nxge_t nxge) 2477766SMichael.Speer@Sun.COM * 2487766SMichael.Speer@Sun.COM * Remove all outstanding groups. 2497766SMichael.Speer@Sun.COM * 2507766SMichael.Speer@Sun.COM * Arguments: 2517766SMichael.Speer@Sun.COM * nxge 2527766SMichael.Speer@Sun.COM */ 2537766SMichael.Speer@Sun.COM void 2547766SMichael.Speer@Sun.COM nxge_grp_cleanup(p_nxge_t nxge) 2557766SMichael.Speer@Sun.COM { 2567766SMichael.Speer@Sun.COM nxge_grp_set_t *set; 2577766SMichael.Speer@Sun.COM int i; 2587766SMichael.Speer@Sun.COM 2597766SMichael.Speer@Sun.COM MUTEX_ENTER(&nxge->group_lock); 2607766SMichael.Speer@Sun.COM 2617766SMichael.Speer@Sun.COM /* 2627766SMichael.Speer@Sun.COM * Find RX groups that need to be cleaned up. 2637766SMichael.Speer@Sun.COM */ 2647766SMichael.Speer@Sun.COM set = &nxge->rx_set; 2657766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2667766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2677766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2687766SMichael.Speer@Sun.COM set->group[i] = NULL; 2697766SMichael.Speer@Sun.COM } 2707766SMichael.Speer@Sun.COM } 2717766SMichael.Speer@Sun.COM 2727766SMichael.Speer@Sun.COM /* 2737766SMichael.Speer@Sun.COM * Find TX groups that need to be cleaned up. 2747766SMichael.Speer@Sun.COM */ 2757766SMichael.Speer@Sun.COM set = &nxge->tx_set; 2767766SMichael.Speer@Sun.COM for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2777766SMichael.Speer@Sun.COM if (set->group[i] != NULL) { 2787766SMichael.Speer@Sun.COM KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 2797766SMichael.Speer@Sun.COM set->group[i] = NULL; 2807766SMichael.Speer@Sun.COM } 2817766SMichael.Speer@Sun.COM } 2827766SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 2837766SMichael.Speer@Sun.COM } 2847766SMichael.Speer@Sun.COM 2857766SMichael.Speer@Sun.COM 2867766SMichael.Speer@Sun.COM /* 2876495Sspeer * nxge_grp_add 2886495Sspeer * 2896495Sspeer * Add a group to an instance of NXGE. 2906495Sspeer * 2916495Sspeer * Arguments: 2926495Sspeer * nxge 2936495Sspeer * type Tx or Rx 2946495Sspeer * 2956495Sspeer * Notes: 2966495Sspeer * 2976495Sspeer * Context: 2986495Sspeer * Any domain 2996495Sspeer */ 3007755SMisaki.Kataoka@Sun.COM nxge_grp_t * 3016495Sspeer nxge_grp_add( 3026495Sspeer nxge_t *nxge, 3036495Sspeer nxge_grp_type_t type) 3046495Sspeer { 3056495Sspeer nxge_grp_set_t *set; 3066495Sspeer nxge_grp_t *group; 3076495Sspeer int i; 3086495Sspeer 3096495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 3106495Sspeer group->nxge = nxge; 3116495Sspeer 3126495Sspeer MUTEX_ENTER(&nxge->group_lock); 3136495Sspeer switch (type) { 3146495Sspeer case NXGE_TRANSMIT_GROUP: 3156495Sspeer case EXT_TRANSMIT_GROUP: 3166495Sspeer set = &nxge->tx_set; 3176495Sspeer break; 3186495Sspeer default: 3196495Sspeer set = &nxge->rx_set; 3206495Sspeer break; 3216495Sspeer } 3226495Sspeer 3236495Sspeer group->type = type; 3246495Sspeer group->active = B_TRUE; 3256495Sspeer group->sequence = set->sequence++; 3266495Sspeer 3276495Sspeer /* Find an empty slot for this logical group. */ 3286495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 3296495Sspeer if (set->group[i] == 0) { 3306495Sspeer group->index = i; 3316495Sspeer set->group[i] = group; 3326495Sspeer NXGE_DC_SET(set->lg.map, i); 3336495Sspeer set->lg.count++; 3346495Sspeer break; 3356495Sspeer } 3366495Sspeer } 3376495Sspeer MUTEX_EXIT(&nxge->group_lock); 3386495Sspeer 3396495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3406495Sspeer "nxge_grp_add: %cgroup = %d.%d", 3416495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3426495Sspeer nxge->mac.portnum, group->sequence)); 3436495Sspeer 3447755SMisaki.Kataoka@Sun.COM return (group); 3456495Sspeer } 3466495Sspeer 3476495Sspeer void 3486495Sspeer nxge_grp_remove( 3496495Sspeer nxge_t *nxge, 3507755SMisaki.Kataoka@Sun.COM nxge_grp_t *group) /* The group to remove. */ 3516495Sspeer { 3526495Sspeer nxge_grp_set_t *set; 3536495Sspeer vpc_type_t type; 3546495Sspeer 3556495Sspeer MUTEX_ENTER(&nxge->group_lock); 3566495Sspeer switch (group->type) { 3576495Sspeer case NXGE_TRANSMIT_GROUP: 3586495Sspeer case EXT_TRANSMIT_GROUP: 3596495Sspeer set = &nxge->tx_set; 3606495Sspeer break; 3616495Sspeer default: 3626495Sspeer set = &nxge->rx_set; 3636495Sspeer break; 3646495Sspeer } 3656495Sspeer 3666495Sspeer if (set->group[group->index] != group) { 3676495Sspeer MUTEX_EXIT(&nxge->group_lock); 3686495Sspeer return; 3696495Sspeer } 3706495Sspeer 3716495Sspeer set->group[group->index] = 0; 3726495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3736495Sspeer set->lg.count--; 3746495Sspeer 3756495Sspeer /* While inside the mutex, deactivate <group>. */ 3766495Sspeer group->active = B_FALSE; 3776495Sspeer 3786495Sspeer MUTEX_EXIT(&nxge->group_lock); 3796495Sspeer 3806495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3816495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3826495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3836495Sspeer nxge->mac.portnum, group->sequence)); 3846495Sspeer 3856495Sspeer /* Now, remove any DCs which are still active. */ 3866495Sspeer switch (group->type) { 3876495Sspeer default: 3886495Sspeer type = VP_BOUND_TX; 3896495Sspeer break; 3906495Sspeer case NXGE_RECEIVE_GROUP: 3916495Sspeer case EXT_RECEIVE_GROUP: 3926495Sspeer type = VP_BOUND_RX; 3936495Sspeer } 3946495Sspeer 3956495Sspeer while (group->dc) { 3966495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3976495Sspeer } 3986495Sspeer 3996495Sspeer KMEM_FREE(group, sizeof (*group)); 4006495Sspeer } 4016495Sspeer 4026495Sspeer /* 4037950SMichael.Speer@Sun.COM * nxge_grp_dc_add 4046495Sspeer * 4056495Sspeer * Add a DMA channel to a VR/Group. 4066495Sspeer * 4076495Sspeer * Arguments: 4086495Sspeer * nxge 4096495Sspeer * channel The channel to add. 4106495Sspeer * Notes: 4116495Sspeer * 4126495Sspeer * Context: 4136495Sspeer * Any domain 4146495Sspeer */ 4156495Sspeer /* ARGSUSED */ 4166495Sspeer int 4176495Sspeer nxge_grp_dc_add( 4186495Sspeer nxge_t *nxge, 4197755SMisaki.Kataoka@Sun.COM nxge_grp_t *group, /* The group to add <channel> to. */ 4206495Sspeer vpc_type_t type, /* Rx or Tx */ 4216495Sspeer int channel) /* A physical/logical channel number */ 4226495Sspeer { 4236495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 4246495Sspeer nxge_hio_dc_t *dc; 4256495Sspeer nxge_grp_set_t *set; 4266602Sspeer nxge_status_t status = NXGE_OK; 4276495Sspeer 4286495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 4296495Sspeer 4308275SEric Cheng if (group == 0) 4316495Sspeer return (0); 4326495Sspeer 4336495Sspeer switch (type) { 4347950SMichael.Speer@Sun.COM case VP_BOUND_TX: 4356495Sspeer set = &nxge->tx_set; 4366495Sspeer if (channel > NXGE_MAX_TDCS) { 4376495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4386495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 4396495Sspeer return (NXGE_ERROR); 4406495Sspeer } 4416495Sspeer break; 4426495Sspeer case VP_BOUND_RX: 4436495Sspeer set = &nxge->rx_set; 4446495Sspeer if (channel > NXGE_MAX_RDCS) { 4456495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4466495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 4476495Sspeer return (NXGE_ERROR); 4486495Sspeer } 4496495Sspeer break; 4507950SMichael.Speer@Sun.COM 4517950SMichael.Speer@Sun.COM default: 4527950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4537950SMichael.Speer@Sun.COM "nxge_grp_dc_add: unknown type channel(%d)", channel)); 4546495Sspeer } 4556495Sspeer 4566495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4576495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 4586495Sspeer type == VP_BOUND_TX ? 't' : 'r', 4596495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 4606495Sspeer 4616495Sspeer MUTEX_ENTER(&nxge->group_lock); 4626495Sspeer if (group->active != B_TRUE) { 4636495Sspeer /* We may be in the process of removing this group. */ 4646495Sspeer MUTEX_EXIT(&nxge->group_lock); 4656495Sspeer return (NXGE_ERROR); 4666495Sspeer } 4676495Sspeer MUTEX_EXIT(&nxge->group_lock); 4686495Sspeer 4696495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4706495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4716495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4726495Sspeer return (NXGE_ERROR); 4736495Sspeer } 4746495Sspeer 4756495Sspeer MUTEX_ENTER(&nhd->lock); 4766495Sspeer 4776495Sspeer if (dc->group) { 4786495Sspeer MUTEX_EXIT(&nhd->lock); 4796495Sspeer /* This channel is already in use! */ 4806495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4816495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4826495Sspeer return (NXGE_ERROR); 4836495Sspeer } 4846495Sspeer 4856495Sspeer dc->next = 0; 4866495Sspeer dc->page = channel; 4876495Sspeer dc->channel = (nxge_channel_t)channel; 4886495Sspeer 4896495Sspeer dc->type = type; 4906495Sspeer if (type == VP_BOUND_RX) { 4916495Sspeer dc->init = nxge_init_rxdma_channel; 4926495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4936495Sspeer } else { 4946495Sspeer dc->init = nxge_init_txdma_channel; 4956495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4966495Sspeer } 4976495Sspeer 4987755SMisaki.Kataoka@Sun.COM dc->group = group; 4996495Sspeer 5006495Sspeer if (isLDOMguest(nxge)) 5016495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 5026495Sspeer 5036495Sspeer NXGE_DC_SET(set->owned.map, channel); 5046495Sspeer set->owned.count++; 5056495Sspeer 5066495Sspeer MUTEX_EXIT(&nhd->lock); 5076495Sspeer 5086602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 5096602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5106602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 5117950SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 5127950SMichael.Speer@Sun.COM (void) memset(dc, 0, sizeof (*dc)); 5137950SMichael.Speer@Sun.COM NXGE_DC_RESET(set->owned.map, channel); 5147950SMichael.Speer@Sun.COM set->owned.count--; 5157950SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 5166603Sspeer return (NXGE_ERROR); 5176602Sspeer } 5186602Sspeer 5196495Sspeer nxge_grp_dc_append(nxge, group, dc); 5206495Sspeer 5217812SMichael.Speer@Sun.COM if (type == VP_BOUND_TX) { 5227812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 5237812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_FALSE; 5247812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 5257812SMichael.Speer@Sun.COM } 5267812SMichael.Speer@Sun.COM 5276495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 5286495Sspeer 5296602Sspeer return ((int)status); 5306495Sspeer } 5316495Sspeer 5326495Sspeer void 5336495Sspeer nxge_grp_dc_remove( 5346495Sspeer nxge_t *nxge, 5356495Sspeer vpc_type_t type, 5366495Sspeer int channel) 5376495Sspeer { 5386495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5396495Sspeer nxge_hio_dc_t *dc; 5406495Sspeer nxge_grp_set_t *set; 5416495Sspeer nxge_grp_t *group; 5426495Sspeer 5436495Sspeer dc_uninit_t uninit; 5446495Sspeer 5456495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 5466495Sspeer 5477950SMichael.Speer@Sun.COM if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) 5487950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5497950SMichael.Speer@Sun.COM 5507950SMichael.Speer@Sun.COM if ((dc->group == NULL) && (dc->next == 0) && 5517950SMichael.Speer@Sun.COM (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) { 5527950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5536495Sspeer } 5547950SMichael.Speer@Sun.COM 5556495Sspeer group = (nxge_grp_t *)dc->group; 5566495Sspeer 5576495Sspeer if (isLDOMguest(nxge)) { 5586495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 5596495Sspeer } 5606495Sspeer 5616495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 5626495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 5636495Sspeer nxge->mac.portnum, group->sequence, group->count, 5646495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 5656495Sspeer 5666495Sspeer MUTEX_ENTER(&nhd->lock); 5676495Sspeer 5686602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5696602Sspeer 5706495Sspeer /* Remove the DC from its group. */ 5716495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 5726495Sspeer MUTEX_EXIT(&nhd->lock); 5736495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5747950SMichael.Speer@Sun.COM "nxge_grp_dc_remove(%d) failed", channel)); 5757950SMichael.Speer@Sun.COM goto nxge_grp_dc_remove_exit; 5766495Sspeer } 5776495Sspeer 5786495Sspeer uninit = dc->uninit; 5796495Sspeer channel = dc->channel; 5806495Sspeer 5816495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5826495Sspeer set->owned.count--; 5836495Sspeer 5846495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5856495Sspeer 5866495Sspeer MUTEX_EXIT(&nhd->lock); 5876495Sspeer 5886495Sspeer (*uninit)(nxge, channel); 5896495Sspeer 5907950SMichael.Speer@Sun.COM nxge_grp_dc_remove_exit: 5916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5926495Sspeer } 5936495Sspeer 5946495Sspeer nxge_hio_dc_t * 5956495Sspeer nxge_grp_dc_find( 5966495Sspeer nxge_t *nxge, 5976495Sspeer vpc_type_t type, /* Rx or Tx */ 5986495Sspeer int channel) 5996495Sspeer { 6006495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 6016495Sspeer nxge_hio_dc_t *current; 6026495Sspeer 6036495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 6046495Sspeer 6056495Sspeer if (!isLDOMguest(nxge)) { 6066495Sspeer return (¤t[channel]); 6076495Sspeer } else { 6086495Sspeer /* We're in a guest domain. */ 6096495Sspeer int i, limit = (type == VP_BOUND_TX) ? 6106495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 6116495Sspeer 6126495Sspeer MUTEX_ENTER(&nhd->lock); 6136495Sspeer for (i = 0; i < limit; i++, current++) { 6146495Sspeer if (current->channel == channel) { 6156495Sspeer if (current->vr && current->vr->nxge == 6166495Sspeer (uintptr_t)nxge) { 6176495Sspeer MUTEX_EXIT(&nhd->lock); 6186495Sspeer return (current); 6196495Sspeer } 6206495Sspeer } 6216495Sspeer } 6226495Sspeer MUTEX_EXIT(&nhd->lock); 6236495Sspeer } 6246495Sspeer 6256495Sspeer return (0); 6266495Sspeer } 6276495Sspeer 6286495Sspeer /* 6296495Sspeer * nxge_grp_dc_append 6306495Sspeer * 6316495Sspeer * Append a DMA channel to a group. 6326495Sspeer * 6336495Sspeer * Arguments: 6346495Sspeer * nxge 6356495Sspeer * group The group to append to 6366495Sspeer * dc The DMA channel to append 6376495Sspeer * 6386495Sspeer * Notes: 6396495Sspeer * 6406495Sspeer * Context: 6416495Sspeer * Any domain 6426495Sspeer */ 6436495Sspeer static 6446495Sspeer void 6456495Sspeer nxge_grp_dc_append( 6466495Sspeer nxge_t *nxge, 6476495Sspeer nxge_grp_t *group, 6486495Sspeer nxge_hio_dc_t *dc) 6496495Sspeer { 6506495Sspeer MUTEX_ENTER(&nxge->group_lock); 6516495Sspeer 6526495Sspeer if (group->dc == 0) { 6536495Sspeer group->dc = dc; 6546495Sspeer } else { 6556495Sspeer nxge_hio_dc_t *current = group->dc; 6566495Sspeer do { 6576495Sspeer if (current->next == 0) { 6586495Sspeer current->next = dc; 6596495Sspeer break; 6606495Sspeer } 6616495Sspeer current = current->next; 6626495Sspeer } while (current); 6636495Sspeer } 6646495Sspeer 6656495Sspeer NXGE_DC_SET(group->map, dc->channel); 6666495Sspeer 6676495Sspeer nxge_grp_dc_map(group); 6686602Sspeer group->count++; 6696495Sspeer 6706495Sspeer MUTEX_EXIT(&nxge->group_lock); 6716495Sspeer } 6726495Sspeer 6736495Sspeer /* 6746495Sspeer * nxge_grp_dc_unlink 6756495Sspeer * 6766495Sspeer * Unlink a DMA channel fromits linked list (group). 6776495Sspeer * 6786495Sspeer * Arguments: 6796495Sspeer * nxge 6806495Sspeer * group The group (linked list) to unlink from 6816495Sspeer * dc The DMA channel to append 6826495Sspeer * 6836495Sspeer * Notes: 6846495Sspeer * 6856495Sspeer * Context: 6866495Sspeer * Any domain 6876495Sspeer */ 6886495Sspeer nxge_hio_dc_t * 6898275SEric Cheng nxge_grp_dc_unlink( 6908275SEric Cheng nxge_t *nxge, 6918275SEric Cheng nxge_grp_t *group, 6928275SEric Cheng int channel) 6936495Sspeer { 6946495Sspeer nxge_hio_dc_t *current, *previous; 6956495Sspeer 6966495Sspeer MUTEX_ENTER(&nxge->group_lock); 6976495Sspeer 6987812SMichael.Speer@Sun.COM if (group == NULL) { 6997812SMichael.Speer@Sun.COM MUTEX_EXIT(&nxge->group_lock); 7007812SMichael.Speer@Sun.COM return (0); 7017812SMichael.Speer@Sun.COM } 7027812SMichael.Speer@Sun.COM 7036495Sspeer if ((current = group->dc) == 0) { 7046495Sspeer MUTEX_EXIT(&nxge->group_lock); 7056495Sspeer return (0); 7066495Sspeer } 7076495Sspeer 7086495Sspeer previous = 0; 7096495Sspeer do { 7106495Sspeer if (current->channel == channel) { 7116495Sspeer if (previous) 7126495Sspeer previous->next = current->next; 7136495Sspeer else 7146495Sspeer group->dc = current->next; 7156495Sspeer break; 7166495Sspeer } 7176495Sspeer previous = current; 7186495Sspeer current = current->next; 7196495Sspeer } while (current); 7206495Sspeer 7216495Sspeer if (current == 0) { 7226495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 7236495Sspeer "DC unlink: DC %d not found", channel)); 7246495Sspeer } else { 7256495Sspeer current->next = 0; 7266495Sspeer current->group = 0; 7276495Sspeer 7288275SEric Cheng NXGE_DC_RESET(group->map, channel); 7296495Sspeer group->count--; 7306495Sspeer } 7316495Sspeer 7326495Sspeer nxge_grp_dc_map(group); 7336495Sspeer 7346495Sspeer MUTEX_EXIT(&nxge->group_lock); 7356495Sspeer 7366495Sspeer return (current); 7376495Sspeer } 7386495Sspeer 7396495Sspeer /* 7406495Sspeer * nxge_grp_dc_map 7416495Sspeer * 7426495Sspeer * Map a linked list to an array of channel numbers. 7436495Sspeer * 7446495Sspeer * Arguments: 7456495Sspeer * nxge 7466495Sspeer * group The group to remap. 7476495Sspeer * 7486495Sspeer * Notes: 7496495Sspeer * It is expected that the caller will hold the correct mutex. 7506495Sspeer * 7516495Sspeer * Context: 7526495Sspeer * Service domain 7536495Sspeer */ 7546495Sspeer void 7556495Sspeer nxge_grp_dc_map( 7566495Sspeer nxge_grp_t *group) 7576495Sspeer { 7586495Sspeer nxge_channel_t *legend; 7596495Sspeer nxge_hio_dc_t *dc; 7606495Sspeer 7616495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 7626495Sspeer 7636495Sspeer legend = group->legend; 7646495Sspeer dc = group->dc; 7656495Sspeer while (dc) { 7666495Sspeer *legend = dc->channel; 7676495Sspeer legend++; 7686495Sspeer dc = dc->next; 7696495Sspeer } 7706495Sspeer } 7716495Sspeer 7726495Sspeer /* 7736495Sspeer * --------------------------------------------------------------------- 7746495Sspeer * These are HIO debugging functions. 7756495Sspeer * --------------------------------------------------------------------- 7766495Sspeer */ 7776495Sspeer 7786495Sspeer /* 7796495Sspeer * nxge_delay 7806495Sspeer * 7816495Sspeer * Delay <seconds> number of seconds. 7826495Sspeer * 7836495Sspeer * Arguments: 7846495Sspeer * nxge 7856495Sspeer * group The group to append to 7866495Sspeer * dc The DMA channel to append 7876495Sspeer * 7886495Sspeer * Notes: 7896495Sspeer * This is a developer-only function. 7906495Sspeer * 7916495Sspeer * Context: 7926495Sspeer * Any domain 7936495Sspeer */ 7946495Sspeer void 7956495Sspeer nxge_delay( 7966495Sspeer int seconds) 7976495Sspeer { 7986495Sspeer delay(drv_usectohz(seconds * 1000000)); 7996495Sspeer } 8006495Sspeer 8016495Sspeer static dmc_reg_name_t rx_names[] = { 8026495Sspeer { "RXDMA_CFIG1", 0 }, 8036495Sspeer { "RXDMA_CFIG2", 8 }, 8046495Sspeer { "RBR_CFIG_A", 0x10 }, 8056495Sspeer { "RBR_CFIG_B", 0x18 }, 8066495Sspeer { "RBR_KICK", 0x20 }, 8076495Sspeer { "RBR_STAT", 0x28 }, 8086495Sspeer { "RBR_HDH", 0x30 }, 8096495Sspeer { "RBR_HDL", 0x38 }, 8106495Sspeer { "RCRCFIG_A", 0x40 }, 8116495Sspeer { "RCRCFIG_B", 0x48 }, 8126495Sspeer { "RCRSTAT_A", 0x50 }, 8136495Sspeer { "RCRSTAT_B", 0x58 }, 8146495Sspeer { "RCRSTAT_C", 0x60 }, 8156495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 8166495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 8176495Sspeer { "RCR_FLSH", 0x78 }, 8186495Sspeer { "RXMISC", 0x90 }, 8196495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 8206495Sspeer { 0, -1 } 8216495Sspeer }; 8226495Sspeer 8236495Sspeer static dmc_reg_name_t tx_names[] = { 8246495Sspeer { "Tx_RNG_CFIG", 0 }, 8256495Sspeer { "Tx_RNG_HDL", 0x10 }, 8266495Sspeer { "Tx_RNG_KICK", 0x18 }, 8276495Sspeer { "Tx_ENT_MASK", 0x20 }, 8286495Sspeer { "Tx_CS", 0x28 }, 8296495Sspeer { "TxDMA_MBH", 0x30 }, 8306495Sspeer { "TxDMA_MBL", 0x38 }, 8316495Sspeer { "TxDMA_PRE_ST", 0x40 }, 8326495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 8336495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 8346495Sspeer { "TDMC_INTR_DBG", 0x60 }, 8356495Sspeer { "Tx_CS_DBG", 0x68 }, 8366495Sspeer { 0, -1 } 8376495Sspeer }; 8386495Sspeer 8396495Sspeer /* 8406495Sspeer * nxge_xx2str 8416495Sspeer * 8426495Sspeer * Translate a register address into a string. 8436495Sspeer * 8446495Sspeer * Arguments: 8456495Sspeer * offset The address of the register to translate. 8466495Sspeer * 8476495Sspeer * Notes: 8486495Sspeer * These are developer-only function. 8496495Sspeer * 8506495Sspeer * Context: 8516495Sspeer * Any domain 8526495Sspeer */ 8536495Sspeer const char * 8546495Sspeer nxge_rx2str( 8556495Sspeer int offset) 8566495Sspeer { 8576495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 8586495Sspeer 8596495Sspeer offset &= DMA_CSR_MASK; 8606495Sspeer 8616495Sspeer while (reg->name) { 8626495Sspeer if (offset == reg->offset) 8636495Sspeer return (reg->name); 8646495Sspeer reg++; 8656495Sspeer } 8666495Sspeer 8676495Sspeer return (0); 8686495Sspeer } 8696495Sspeer 8706495Sspeer const char * 8716495Sspeer nxge_tx2str( 8726495Sspeer int offset) 8736495Sspeer { 8746495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 8756495Sspeer 8766495Sspeer offset &= DMA_CSR_MASK; 8776495Sspeer 8786495Sspeer while (reg->name) { 8796495Sspeer if (offset == reg->offset) 8806495Sspeer return (reg->name); 8816495Sspeer reg++; 8826495Sspeer } 8836495Sspeer 8846495Sspeer return (0); 8856495Sspeer } 8866495Sspeer 8876495Sspeer /* 8886495Sspeer * nxge_ddi_perror 8896495Sspeer * 8906495Sspeer * Map a DDI error number to a string. 8916495Sspeer * 8926495Sspeer * Arguments: 8936495Sspeer * ddi_error The DDI error number to map. 8946495Sspeer * 8956495Sspeer * Notes: 8966495Sspeer * 8976495Sspeer * Context: 8986495Sspeer * Any domain 8996495Sspeer */ 9006495Sspeer const char * 9016495Sspeer nxge_ddi_perror( 9026495Sspeer int ddi_error) 9036495Sspeer { 9046495Sspeer switch (ddi_error) { 9056495Sspeer case DDI_SUCCESS: 9066495Sspeer return ("DDI_SUCCESS"); 9076495Sspeer case DDI_FAILURE: 9086495Sspeer return ("DDI_FAILURE"); 9096495Sspeer case DDI_NOT_WELL_FORMED: 9106495Sspeer return ("DDI_NOT_WELL_FORMED"); 9116495Sspeer case DDI_EAGAIN: 9126495Sspeer return ("DDI_EAGAIN"); 9136495Sspeer case DDI_EINVAL: 9146495Sspeer return ("DDI_EINVAL"); 9156495Sspeer case DDI_ENOTSUP: 9166495Sspeer return ("DDI_ENOTSUP"); 9176495Sspeer case DDI_EPENDING: 9186495Sspeer return ("DDI_EPENDING"); 9196495Sspeer case DDI_ENOMEM: 9206495Sspeer return ("DDI_ENOMEM"); 9216495Sspeer case DDI_EBUSY: 9226495Sspeer return ("DDI_EBUSY"); 9236495Sspeer case DDI_ETRANSPORT: 9246495Sspeer return ("DDI_ETRANSPORT"); 9256495Sspeer case DDI_ECONTEXT: 9266495Sspeer return ("DDI_ECONTEXT"); 9276495Sspeer default: 9286495Sspeer return ("Unknown error"); 9296495Sspeer } 9306495Sspeer } 9316495Sspeer 9326495Sspeer /* 9336495Sspeer * --------------------------------------------------------------------- 9346495Sspeer * These are Sun4v HIO function definitions 9356495Sspeer * --------------------------------------------------------------------- 9366495Sspeer */ 9376495Sspeer 9386495Sspeer #if defined(sun4v) 9396495Sspeer 9406495Sspeer /* 9416495Sspeer * Local prototypes 9426495Sspeer */ 9437755SMisaki.Kataoka@Sun.COM static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 9447755SMisaki.Kataoka@Sun.COM static void nxge_hio_unshare(nxge_hio_vr_t *); 9456495Sspeer 9468275SEric Cheng static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *); 9477755SMisaki.Kataoka@Sun.COM static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 9486495Sspeer 9498275SEric Cheng static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 9508275SEric Cheng static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 9518275SEric Cheng static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int); 9526495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 9536495Sspeer mac_ring_type_t, int); 9546495Sspeer 9556495Sspeer /* 9566495Sspeer * nxge_hio_init 9576495Sspeer * 9586495Sspeer * Initialize the HIO module of the NXGE driver. 9596495Sspeer * 9606495Sspeer * Arguments: 9616495Sspeer * nxge 9626495Sspeer * 9636495Sspeer * Notes: 9646495Sspeer * 9656495Sspeer * Context: 9666495Sspeer * Any domain 9676495Sspeer */ 9686495Sspeer int 9696495Sspeer nxge_hio_init( 9706495Sspeer nxge_t *nxge) 9716495Sspeer { 9726495Sspeer nxge_hio_data_t *nhd; 9736495Sspeer int i, region; 9746495Sspeer 9756495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 9766495Sspeer if (nhd == 0) { 9776495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 9786495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9796495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9806495Sspeer } 9816495Sspeer 9826713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 9836713Sspeer (nxge->niu_type == N2_NIU)) { 9846495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9856495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9866495Sspeer if (niu_hsvc->hsvc_major == 1 && 9876495Sspeer niu_hsvc->hsvc_minor == 1) 9886495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9896495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9906495Sspeer "nxge_hio_init: hypervisor services " 9916495Sspeer "version %d.%d", 9926495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9936495Sspeer } 9946495Sspeer } 9956495Sspeer 9968275SEric Cheng /* 9978275SEric Cheng * Initialize share and ring group structures. 9988275SEric Cheng */ 9998275SEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 10008275SEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 10018275SEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 10028275SEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 10038275SEric Cheng nxge->tx_hio_groups[i].gindex = 0; 10048275SEric Cheng nxge->tx_hio_groups[i].sindex = 0; 10058275SEric Cheng } 10068275SEric Cheng 10078275SEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 10088275SEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 10098275SEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 10108275SEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 10118275SEric Cheng nxge->rx_hio_groups[i].gindex = 0; 10128275SEric Cheng nxge->rx_hio_groups[i].sindex = 0; 10138275SEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 10148597SMichael.Speer@Sun.COM nxge->rx_hio_groups[i].port_default_grp = B_FALSE; 10158275SEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 10168275SEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 10178275SEric Cheng } 10188275SEric Cheng 10196495Sspeer if (!isLDOMs(nxge)) { 10206495Sspeer nhd->hio.ldoms = B_FALSE; 10216495Sspeer return (NXGE_OK); 10226495Sspeer } 10236495Sspeer 10246495Sspeer nhd->hio.ldoms = B_TRUE; 10256495Sspeer 10266495Sspeer /* 10276495Sspeer * Fill in what we can. 10286495Sspeer */ 10296495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 10306495Sspeer nhd->vr[region].region = region; 10316495Sspeer } 10327755SMisaki.Kataoka@Sun.COM nhd->vrs = NXGE_VR_SR_MAX - 2; 10336495Sspeer 10346495Sspeer /* 10358275SEric Cheng * Initialize the share stuctures. 10366495Sspeer */ 10377812SMichael.Speer@Sun.COM for (i = 0; i < NXGE_MAX_TDCS; i++) 10387812SMichael.Speer@Sun.COM nxge->tdc_is_shared[i] = B_FALSE; 10397812SMichael.Speer@Sun.COM 10406495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 10416495Sspeer nxge->shares[i].nxgep = nxge; 10426495Sspeer nxge->shares[i].index = 0; 10438275SEric Cheng nxge->shares[i].vrp = NULL; 10446495Sspeer nxge->shares[i].tmap = 0; 10456495Sspeer nxge->shares[i].rmap = 0; 10466495Sspeer nxge->shares[i].rxgroup = 0; 10476495Sspeer nxge->shares[i].active = B_FALSE; 10486495Sspeer } 10496495Sspeer 10506495Sspeer /* Fill in the HV HIO function pointers. */ 10516495Sspeer nxge_hio_hv_init(nxge); 10526495Sspeer 10536495Sspeer if (isLDOMservice(nxge)) { 10546495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 10556495Sspeer "Hybrid IO-capable service domain")); 10566495Sspeer return (NXGE_OK); 10576495Sspeer } 10586495Sspeer 10596495Sspeer return (0); 10606495Sspeer } 10618275SEric Cheng #endif /* defined(sun4v) */ 10628275SEric Cheng 10638275SEric Cheng static int 10648275SEric Cheng nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g, 10658275SEric Cheng const uint8_t *macaddr) 10668275SEric Cheng { 10678275SEric Cheng int rv; 10688275SEric Cheng nxge_rdc_grp_t *group; 10698275SEric Cheng 10708275SEric Cheng mutex_enter(nxge->genlock); 10718275SEric Cheng 10728275SEric Cheng /* 10738275SEric Cheng * Initialize the NXGE RDC table data structure. 10748275SEric Cheng */ 10758275SEric Cheng group = &nxge->pt_config.rdc_grps[g->rdctbl]; 10768275SEric Cheng if (!group->flag) { 10778275SEric Cheng group->port = NXGE_GET_PORT_NUM(nxge->function_num); 10788275SEric Cheng group->config_method = RDC_TABLE_ENTRY_METHOD_REP; 10798275SEric Cheng group->flag = B_TRUE; /* This group has been configured. */ 10808275SEric Cheng } 10818275SEric Cheng 10828275SEric Cheng mutex_exit(nxge->genlock); 10838275SEric Cheng 10848275SEric Cheng /* 10858275SEric Cheng * Add the MAC address. 10868275SEric Cheng */ 10878275SEric Cheng if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr, 10888275SEric Cheng g->rdctbl, B_TRUE)) != 0) { 10898275SEric Cheng return (rv); 10908275SEric Cheng } 10918275SEric Cheng 10928275SEric Cheng mutex_enter(nxge->genlock); 10938275SEric Cheng g->n_mac_addrs++; 10948275SEric Cheng mutex_exit(nxge->genlock); 10958275SEric Cheng return (0); 10968275SEric Cheng } 10976495Sspeer 10986495Sspeer static int 10998597SMichael.Speer@Sun.COM nxge_hio_set_unicst(void *arg, const uint8_t *macaddr) 11008597SMichael.Speer@Sun.COM { 11018597SMichael.Speer@Sun.COM p_nxge_t nxgep = (p_nxge_t)arg; 11028597SMichael.Speer@Sun.COM struct ether_addr addrp; 11038597SMichael.Speer@Sun.COM 11048597SMichael.Speer@Sun.COM bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 11058597SMichael.Speer@Sun.COM if (nxge_set_mac_addr(nxgep, &addrp)) { 11068597SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 11078597SMichael.Speer@Sun.COM "<== nxge_m_unicst: set unitcast failed")); 11088597SMichael.Speer@Sun.COM return (EINVAL); 11098597SMichael.Speer@Sun.COM } 11108597SMichael.Speer@Sun.COM 11118597SMichael.Speer@Sun.COM nxgep->primary = B_TRUE; 11128597SMichael.Speer@Sun.COM 11138597SMichael.Speer@Sun.COM return (0); 11148597SMichael.Speer@Sun.COM } 11158597SMichael.Speer@Sun.COM 11168597SMichael.Speer@Sun.COM /*ARGSUSED*/ 11178597SMichael.Speer@Sun.COM static int 11188597SMichael.Speer@Sun.COM nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr) 11198597SMichael.Speer@Sun.COM { 11208597SMichael.Speer@Sun.COM nxgep->primary = B_FALSE; 11218597SMichael.Speer@Sun.COM return (0); 11228597SMichael.Speer@Sun.COM } 11238597SMichael.Speer@Sun.COM 11248597SMichael.Speer@Sun.COM static int 11256495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 11266495Sspeer { 1127*10309SSriharsha.Basavapatna@Sun.COM nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 1128*10309SSriharsha.Basavapatna@Sun.COM p_nxge_t nxge = group->nxgep; 1129*10309SSriharsha.Basavapatna@Sun.COM int rv; 1130*10309SSriharsha.Basavapatna@Sun.COM nxge_hio_vr_t *vr; /* The Virtualization Region */ 11316495Sspeer 11328275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 1133*10309SSriharsha.Basavapatna@Sun.COM ASSERT(group->nxgep != NULL); 1134*10309SSriharsha.Basavapatna@Sun.COM 1135*10309SSriharsha.Basavapatna@Sun.COM if (isLDOMguest(group->nxgep)) 1136*10309SSriharsha.Basavapatna@Sun.COM return (0); 11378275SEric Cheng 11388275SEric Cheng mutex_enter(nxge->genlock); 11396495Sspeer 11408597SMichael.Speer@Sun.COM if (!nxge->primary && group->port_default_grp) { 11418597SMichael.Speer@Sun.COM rv = nxge_hio_set_unicst((void *)nxge, mac_addr); 11428597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 11438597SMichael.Speer@Sun.COM return (rv); 11448597SMichael.Speer@Sun.COM } 11458597SMichael.Speer@Sun.COM 11466495Sspeer /* 11478275SEric Cheng * If the group is associated with a VR, then only one 11488275SEric Cheng * address may be assigned to the group. 11496495Sspeer */ 11508275SEric Cheng vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp; 11518275SEric Cheng if ((vr != NULL) && (group->n_mac_addrs)) { 11528275SEric Cheng mutex_exit(nxge->genlock); 11538275SEric Cheng return (ENOSPC); 11548275SEric Cheng } 11558275SEric Cheng 11568275SEric Cheng mutex_exit(nxge->genlock); 11578275SEric Cheng 11588275SEric Cheng /* 11598275SEric Cheng * Program the mac address for the group. 11608275SEric Cheng */ 1161*10309SSriharsha.Basavapatna@Sun.COM if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) { 11626495Sspeer return (rv); 11636495Sspeer } 11646495Sspeer 11656495Sspeer return (0); 11666495Sspeer } 11676495Sspeer 11688275SEric Cheng static int 11698275SEric Cheng find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr) 11708275SEric Cheng { 11718275SEric Cheng int i; 11728275SEric Cheng for (i = 0; i <= mmac_info->num_mmac; i++) { 11738275SEric Cheng if (memcmp(mmac_info->mac_pool[i].addr, mac_addr, 11748275SEric Cheng ETHERADDRL) == 0) { 11758275SEric Cheng return (i); 11768275SEric Cheng } 11778275SEric Cheng } 11788275SEric Cheng return (-1); 11798275SEric Cheng } 11808275SEric Cheng 11816495Sspeer /* ARGSUSED */ 11826495Sspeer static int 11836495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 11846495Sspeer { 11858275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 11868597SMichael.Speer@Sun.COM struct ether_addr addrp; 11878275SEric Cheng p_nxge_t nxge = group->nxgep; 11888275SEric Cheng nxge_mmac_t *mmac_info; 11898275SEric Cheng int rv, slot; 11908275SEric Cheng 11918275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 1192*10309SSriharsha.Basavapatna@Sun.COM ASSERT(group->nxgep != NULL); 1193*10309SSriharsha.Basavapatna@Sun.COM 1194*10309SSriharsha.Basavapatna@Sun.COM if (isLDOMguest(group->nxgep)) 1195*10309SSriharsha.Basavapatna@Sun.COM return (0); 11968275SEric Cheng 11978275SEric Cheng mutex_enter(nxge->genlock); 11986495Sspeer 11998275SEric Cheng mmac_info = &nxge->nxge_mmac_info; 12008275SEric Cheng slot = find_mac_slot(mmac_info, mac_addr); 12018275SEric Cheng if (slot < 0) { 12028597SMichael.Speer@Sun.COM if (group->port_default_grp && nxge->primary) { 12038597SMichael.Speer@Sun.COM bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL); 12048597SMichael.Speer@Sun.COM if (ether_cmp(&addrp, &nxge->ouraddr) == 0) { 12058597SMichael.Speer@Sun.COM rv = nxge_hio_clear_unicst(nxge, mac_addr); 12068597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 12078597SMichael.Speer@Sun.COM return (rv); 12088597SMichael.Speer@Sun.COM } else { 12098597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 12108597SMichael.Speer@Sun.COM return (EINVAL); 12118597SMichael.Speer@Sun.COM } 12128597SMichael.Speer@Sun.COM } else { 12138597SMichael.Speer@Sun.COM mutex_exit(nxge->genlock); 12148597SMichael.Speer@Sun.COM return (EINVAL); 12158597SMichael.Speer@Sun.COM } 12168275SEric Cheng } 12178275SEric Cheng 12188275SEric Cheng mutex_exit(nxge->genlock); 12196495Sspeer 12206495Sspeer /* 12218275SEric Cheng * Remove the mac address for the group 12226495Sspeer */ 12238275SEric Cheng if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) { 12248275SEric Cheng return (rv); 12258275SEric Cheng } 12268275SEric Cheng 12278275SEric Cheng mutex_enter(nxge->genlock); 12288275SEric Cheng group->n_mac_addrs--; 12298275SEric Cheng mutex_exit(nxge->genlock); 12306495Sspeer 12316495Sspeer return (0); 12326495Sspeer } 12336495Sspeer 12348275SEric Cheng static int 12358275SEric Cheng nxge_hio_group_start(mac_group_driver_t gdriver) 12368275SEric Cheng { 12378275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 12389047SMichael.Speer@Sun.COM nxge_rdc_grp_t *rdc_grp_p; 12398275SEric Cheng int rdctbl; 12408275SEric Cheng int dev_gindex; 12418275SEric Cheng 12428275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 1243*10309SSriharsha.Basavapatna@Sun.COM ASSERT(group->nxgep != NULL); 12448275SEric Cheng 12458275SEric Cheng ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED); 12468275SEric Cheng if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED) 12478275SEric Cheng return (ENXIO); 12488275SEric Cheng 12498275SEric Cheng mutex_enter(group->nxgep->genlock); 1250*10309SSriharsha.Basavapatna@Sun.COM if (isLDOMguest(group->nxgep)) 1251*10309SSriharsha.Basavapatna@Sun.COM goto nxge_hio_group_start_exit; 1252*10309SSriharsha.Basavapatna@Sun.COM 12538275SEric Cheng dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 12548275SEric Cheng group->gindex; 12559047SMichael.Speer@Sun.COM rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex]; 12568275SEric Cheng 12578275SEric Cheng /* 12588275SEric Cheng * Get an rdc table for this group. 12598275SEric Cheng * Group ID is given by the caller, and that's the group it needs 12608275SEric Cheng * to bind to. The default group is already bound when the driver 12618275SEric Cheng * was attached. 12628275SEric Cheng * 12638275SEric Cheng * For Group 0, it's RDC table was allocated at attach time 12648275SEric Cheng * no need to allocate a new table. 12658275SEric Cheng */ 12668275SEric Cheng if (group->gindex != 0) { 12678275SEric Cheng rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep, 12688275SEric Cheng dev_gindex, B_TRUE); 12698275SEric Cheng if (rdctbl < 0) { 12708275SEric Cheng mutex_exit(group->nxgep->genlock); 12718275SEric Cheng return (rdctbl); 12728275SEric Cheng } 12738275SEric Cheng } else { 12748275SEric Cheng rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid; 12758275SEric Cheng } 12768275SEric Cheng 12778275SEric Cheng group->rdctbl = rdctbl; 12788275SEric Cheng 12799047SMichael.Speer@Sun.COM (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl); 12808275SEric Cheng 1281*10309SSriharsha.Basavapatna@Sun.COM nxge_hio_group_start_exit: 12828275SEric Cheng group->started = B_TRUE; 12838275SEric Cheng mutex_exit(group->nxgep->genlock); 12848275SEric Cheng return (0); 12858275SEric Cheng } 12868275SEric Cheng 12878275SEric Cheng static void 12888275SEric Cheng nxge_hio_group_stop(mac_group_driver_t gdriver) 12898275SEric Cheng { 12908275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 12918275SEric Cheng 12928275SEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 12938275SEric Cheng 12948275SEric Cheng mutex_enter(group->nxgep->genlock); 12958275SEric Cheng group->started = B_FALSE; 12968275SEric Cheng 1297*10309SSriharsha.Basavapatna@Sun.COM if (isLDOMguest(group->nxgep)) 1298*10309SSriharsha.Basavapatna@Sun.COM goto nxge_hio_group_stop_exit; 1299*10309SSriharsha.Basavapatna@Sun.COM 13008275SEric Cheng /* 13018275SEric Cheng * Unbind the RDC table previously bound for this group. 13028275SEric Cheng * 13038275SEric Cheng * Since RDC table for group 0 was allocated at attach 13048275SEric Cheng * time, no need to unbind the table here. 13058275SEric Cheng */ 13068275SEric Cheng if (group->gindex != 0) 13078275SEric Cheng (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl); 13088275SEric Cheng 1309*10309SSriharsha.Basavapatna@Sun.COM nxge_hio_group_stop_exit: 13108275SEric Cheng mutex_exit(group->nxgep->genlock); 13118275SEric Cheng } 13128275SEric Cheng 13136495Sspeer /* ARGSUSED */ 13146495Sspeer void 13158275SEric Cheng nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid, 13166495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 13176495Sspeer { 13188275SEric Cheng p_nxge_t nxgep = (p_nxge_t)arg; 13198275SEric Cheng nxge_ring_group_t *group; 13208275SEric Cheng int dev_gindex; 13216495Sspeer 13226495Sspeer switch (type) { 13236495Sspeer case MAC_RING_TYPE_RX: 13248275SEric Cheng group = &nxgep->rx_hio_groups[groupid]; 13258275SEric Cheng group->nxgep = nxgep; 13268275SEric Cheng group->ghandle = ghdl; 13278275SEric Cheng group->gindex = groupid; 13288275SEric Cheng group->sindex = 0; /* not yet bound to a share */ 13296495Sspeer 1330*10309SSriharsha.Basavapatna@Sun.COM if (!isLDOMguest(nxgep)) { 1331*10309SSriharsha.Basavapatna@Sun.COM dev_gindex = 1332*10309SSriharsha.Basavapatna@Sun.COM nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 1333*10309SSriharsha.Basavapatna@Sun.COM groupid; 13348275SEric Cheng 1335*10309SSriharsha.Basavapatna@Sun.COM if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid == 1336*10309SSriharsha.Basavapatna@Sun.COM dev_gindex) 1337*10309SSriharsha.Basavapatna@Sun.COM group->port_default_grp = B_TRUE; 1338*10309SSriharsha.Basavapatna@Sun.COM 1339*10309SSriharsha.Basavapatna@Sun.COM infop->mgi_count = 1340*10309SSriharsha.Basavapatna@Sun.COM nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs; 1341*10309SSriharsha.Basavapatna@Sun.COM } else { 1342*10309SSriharsha.Basavapatna@Sun.COM infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1343*10309SSriharsha.Basavapatna@Sun.COM } 13448597SMichael.Speer@Sun.COM 13458275SEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 13468275SEric Cheng infop->mgi_start = nxge_hio_group_start; 13478275SEric Cheng infop->mgi_stop = nxge_hio_group_stop; 13488275SEric Cheng infop->mgi_addmac = nxge_hio_add_mac; 13498275SEric Cheng infop->mgi_remmac = nxge_hio_rem_mac; 13506495Sspeer break; 13516495Sspeer 13526495Sspeer case MAC_RING_TYPE_TX: 13538275SEric Cheng /* 13548275SEric Cheng * 'groupid' for TX should be incremented by one since 13558275SEric Cheng * the default group (groupid 0) is not known by the MAC layer 13568275SEric Cheng */ 13578275SEric Cheng group = &nxgep->tx_hio_groups[groupid + 1]; 13588275SEric Cheng group->nxgep = nxgep; 13598275SEric Cheng group->ghandle = ghdl; 13608275SEric Cheng group->gindex = groupid + 1; 13618275SEric Cheng group->sindex = 0; /* not yet bound to a share */ 13628275SEric Cheng 13638275SEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 13648275SEric Cheng infop->mgi_start = NULL; 13658275SEric Cheng infop->mgi_stop = NULL; 13668275SEric Cheng infop->mgi_addmac = NULL; /* not needed */ 13678275SEric Cheng infop->mgi_remmac = NULL; /* not needed */ 13688275SEric Cheng /* no rings associated with group initially */ 13698275SEric Cheng infop->mgi_count = 0; 13706495Sspeer break; 13716495Sspeer } 13726495Sspeer } 13736495Sspeer 13748275SEric Cheng #if defined(sun4v) 13758275SEric Cheng 13766495Sspeer int 13776495Sspeer nxge_hio_share_assign( 13786495Sspeer nxge_t *nxge, 13796495Sspeer uint64_t cookie, 13806495Sspeer res_map_t *tmap, 13816495Sspeer res_map_t *rmap, 13826495Sspeer nxge_hio_vr_t *vr) 13836495Sspeer { 13846495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13856495Sspeer uint64_t slot, hv_rv; 13866495Sspeer nxge_hio_dc_t *dc; 13876495Sspeer nxhv_vr_fp_t *fp; 13886495Sspeer int i; 13896495Sspeer 13906495Sspeer /* 13916495Sspeer * Ask the Hypervisor to set up the VR for us 13926495Sspeer */ 13936495Sspeer fp = &nhd->hio.vr; 13946495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 13956495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 13967950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 13976713Sspeer "vr->assign() returned %d", hv_rv)); 13986495Sspeer return (-EIO); 13996495Sspeer } 14006495Sspeer 14016495Sspeer /* 14026495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 14036495Sspeer * ----------------------------------------------------- 14046495Sspeer */ 14056495Sspeer dc = vr->tx_group.dc; 14066495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 14076495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 14086495Sspeer while (dc) { 14096495Sspeer hv_rv = (*tx->assign) 14106495Sspeer (vr->cookie, dc->channel, &slot); 14116495Sspeer if (hv_rv != 0) { 14126495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14137950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 14146495Sspeer "tx->assign(%x, %d) failed: %ld", 14156495Sspeer vr->cookie, dc->channel, hv_rv)); 14166495Sspeer return (-EIO); 14176495Sspeer } 14186495Sspeer 14196495Sspeer dc->cookie = vr->cookie; 14206495Sspeer dc->page = (vp_channel_t)slot; 14216495Sspeer 14226495Sspeer /* Inform the caller about the slot chosen. */ 14236495Sspeer (*tmap) |= 1 << slot; 14246495Sspeer 14256495Sspeer dc = dc->next; 14266495Sspeer } 14276495Sspeer } 14286495Sspeer 14296495Sspeer /* 14306495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 14316495Sspeer * ----------------------------------------------------- 14326495Sspeer */ 14336495Sspeer dc = vr->rx_group.dc; 14346495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 14356495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 14366495Sspeer while (dc) { 14376495Sspeer hv_rv = (*rx->assign) 14386495Sspeer (vr->cookie, dc->channel, &slot); 14396495Sspeer if (hv_rv != 0) { 14406495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14417950SMichael.Speer@Sun.COM "nxge_hio_share_assign: " 14426495Sspeer "rx->assign(%x, %d) failed: %ld", 14436495Sspeer vr->cookie, dc->channel, hv_rv)); 14446495Sspeer return (-EIO); 14456495Sspeer } 14466495Sspeer 14476495Sspeer dc->cookie = vr->cookie; 14486495Sspeer dc->page = (vp_channel_t)slot; 14496495Sspeer 14506495Sspeer /* Inform the caller about the slot chosen. */ 14516495Sspeer (*rmap) |= 1 << slot; 14526495Sspeer 14536495Sspeer dc = dc->next; 14546495Sspeer } 14556495Sspeer } 14566495Sspeer 14576495Sspeer return (0); 14586495Sspeer } 14596495Sspeer 14608275SEric Cheng void 14616495Sspeer nxge_hio_share_unassign( 14626495Sspeer nxge_hio_vr_t *vr) 14636495Sspeer { 14646495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14656495Sspeer nxge_hio_data_t *nhd; 14666495Sspeer nxge_hio_dc_t *dc; 14676495Sspeer nxhv_vr_fp_t *fp; 14686495Sspeer uint64_t hv_rv; 14696495Sspeer 14706495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14716495Sspeer 14726495Sspeer dc = vr->tx_group.dc; 14736495Sspeer while (dc) { 14746495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 14756495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 14766495Sspeer if (hv_rv != 0) { 14776495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14787950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14796495Sspeer "tx->unassign(%x, %d) failed: %ld", 14806495Sspeer vr->cookie, dc->page, hv_rv)); 14816495Sspeer } 14826495Sspeer dc = dc->next; 14836495Sspeer } 14846495Sspeer 14856495Sspeer dc = vr->rx_group.dc; 14866495Sspeer while (dc) { 14876495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 14886495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 14896495Sspeer if (hv_rv != 0) { 14906495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 14917950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 14926495Sspeer "rx->unassign(%x, %d) failed: %ld", 14936495Sspeer vr->cookie, dc->page, hv_rv)); 14946495Sspeer } 14956495Sspeer dc = dc->next; 14966495Sspeer } 14976495Sspeer 14986495Sspeer fp = &nhd->hio.vr; 14996495Sspeer if (fp->unassign) { 15006495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 15016495Sspeer if (hv_rv != 0) { 15027950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 15037950SMichael.Speer@Sun.COM "nxge_hio_share_unassign: " 15046495Sspeer "vr->assign(%x) failed: %ld", 15056495Sspeer vr->cookie, hv_rv)); 15066495Sspeer } 15076495Sspeer } 15086495Sspeer } 15096495Sspeer 15106495Sspeer int 15118275SEric Cheng nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle) 15126495Sspeer { 15138275SEric Cheng p_nxge_t nxge = (p_nxge_t)arg; 15148275SEric Cheng nxge_share_handle_t *shp; 15158275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 15168275SEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 15176495Sspeer 15186495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 15196495Sspeer 15206495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 15216495Sspeer nhd->hio.rx.assign == 0) { 15226495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 15236495Sspeer return (EIO); 15246495Sspeer } 15256495Sspeer 15266495Sspeer /* 15276495Sspeer * Get a VR. 15286495Sspeer */ 15297755SMisaki.Kataoka@Sun.COM if ((vr = nxge_hio_vr_share(nxge)) == 0) 15306495Sspeer return (EAGAIN); 15316495Sspeer 15326495Sspeer shp = &nxge->shares[vr->region]; 15338275SEric Cheng shp->nxgep = nxge; 15346495Sspeer shp->index = vr->region; 15356495Sspeer shp->vrp = (void *)vr; 15368275SEric Cheng shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */ 15378275SEric Cheng shp->rxgroup = 0; /* to be assigned by ms_sadd */ 15388275SEric Cheng shp->active = B_FALSE; /* not bound yet */ 15396495Sspeer 15406495Sspeer *shandle = (mac_share_handle_t)shp; 15416495Sspeer 15426495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 15436495Sspeer return (0); 15446495Sspeer } 15456495Sspeer 15468275SEric Cheng 15476495Sspeer void 15486495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 15496495Sspeer { 15508275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 15518275SEric Cheng nxge_hio_vr_t *vr; 15528275SEric Cheng 15538275SEric Cheng /* 15548275SEric Cheng * Clear internal handle state. 15558275SEric Cheng */ 15568275SEric Cheng vr = shp->vrp; 15578275SEric Cheng shp->vrp = (void *)NULL; 15588275SEric Cheng shp->index = 0; 15598275SEric Cheng shp->tmap = 0; 15608275SEric Cheng shp->rmap = 0; 15618275SEric Cheng shp->rxgroup = 0; 15628275SEric Cheng shp->active = B_FALSE; 15638275SEric Cheng 15648275SEric Cheng /* 15658275SEric Cheng * Free VR resource. 15668275SEric Cheng */ 15678275SEric Cheng nxge_hio_unshare(vr); 15688275SEric Cheng } 15698275SEric Cheng 15708275SEric Cheng 15718275SEric Cheng void 15728275SEric Cheng nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 15738275SEric Cheng mac_ring_handle_t *rings, uint_t *n_rings) 15748275SEric Cheng { 15758275SEric Cheng nxge_t *nxge; 15768275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 15778275SEric Cheng nxge_ring_handle_t *rh; 15788275SEric Cheng uint32_t offset; 15798275SEric Cheng 15808275SEric Cheng nxge = shp->nxgep; 15818275SEric Cheng 15828275SEric Cheng switch (type) { 15838275SEric Cheng case MAC_RING_TYPE_RX: 15848275SEric Cheng rh = nxge->rx_ring_handles; 15858275SEric Cheng offset = nxge->pt_config.hw_config.start_rdc; 15868275SEric Cheng break; 15878275SEric Cheng 15888275SEric Cheng case MAC_RING_TYPE_TX: 15898275SEric Cheng rh = nxge->tx_ring_handles; 15908275SEric Cheng offset = nxge->pt_config.hw_config.tdc.start; 15918275SEric Cheng break; 15928275SEric Cheng } 15938275SEric Cheng 15948275SEric Cheng /* 15958275SEric Cheng * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that, 15968275SEric Cheng * but the HV has statically assigned the channels like so: 15978275SEric Cheng * VR0: RDC0 & RDC1 15988275SEric Cheng * VR1: RDC2 & RDC3, etc. 15998275SEric Cheng * The TDCs are assigned in exactly the same way. 16008275SEric Cheng */ 16018275SEric Cheng if (rings != NULL) { 16028275SEric Cheng rings[0] = rh[(shp->index * 2) - offset].ring_handle; 16038275SEric Cheng rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle; 16048275SEric Cheng } 16058275SEric Cheng if (n_rings != NULL) { 16068275SEric Cheng *n_rings = 2; 16078275SEric Cheng } 16088275SEric Cheng } 16098275SEric Cheng 16108275SEric Cheng int 16118275SEric Cheng nxge_hio_share_add_group(mac_share_handle_t shandle, 16128275SEric Cheng mac_group_driver_t ghandle) 16138275SEric Cheng { 16148275SEric Cheng nxge_t *nxge; 16158275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 16168275SEric Cheng nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle; 16178275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 16188275SEric Cheng nxge_grp_t *group; 16198275SEric Cheng int i; 16208275SEric Cheng 16218275SEric Cheng if (rg->sindex != 0) { 16228275SEric Cheng /* the group is already bound to a share */ 16238275SEric Cheng return (EALREADY); 16248275SEric Cheng } 16258275SEric Cheng 16268400SNicolas.Droux@Sun.COM /* 16278400SNicolas.Droux@Sun.COM * If we are adding a group 0 to a share, this 16288400SNicolas.Droux@Sun.COM * is not correct. 16298400SNicolas.Droux@Sun.COM */ 16308400SNicolas.Droux@Sun.COM ASSERT(rg->gindex != 0); 16318400SNicolas.Droux@Sun.COM 16328275SEric Cheng nxge = rg->nxgep; 16338275SEric Cheng vr = shp->vrp; 16348275SEric Cheng 16358275SEric Cheng switch (rg->type) { 16368275SEric Cheng case MAC_RING_TYPE_RX: 16378275SEric Cheng /* 16388275SEric Cheng * Make sure that the group has the right rings associated 16398275SEric Cheng * for the share. In version 1.0, we may only give a VR 16408275SEric Cheng * 2 RDCs. Not only that, but the HV has statically 16418275SEric Cheng * assigned the channels like so: 16428275SEric Cheng * VR0: RDC0 & RDC1 16438275SEric Cheng * VR1: RDC2 & RDC3, etc. 16448275SEric Cheng */ 16458275SEric Cheng group = nxge->rx_set.group[rg->gindex]; 16468275SEric Cheng 16478275SEric Cheng if (group->count > 2) { 16488275SEric Cheng /* a share can have at most 2 rings */ 16498275SEric Cheng return (EINVAL); 16508275SEric Cheng } 16518275SEric Cheng 16528275SEric Cheng for (i = 0; i < NXGE_MAX_RDCS; i++) { 16538275SEric Cheng if (group->map & (1 << i)) { 16548275SEric Cheng if ((i != shp->index * 2) && 16558275SEric Cheng (i != (shp->index * 2 + 1))) { 16568275SEric Cheng /* 16578275SEric Cheng * A group with invalid rings was 16588275SEric Cheng * attempted to bind to this share 16598275SEric Cheng */ 16608275SEric Cheng return (EINVAL); 16618275SEric Cheng } 16628275SEric Cheng } 16638275SEric Cheng } 16648275SEric Cheng 16658275SEric Cheng rg->sindex = vr->region; 16668275SEric Cheng vr->rdc_tbl = rg->rdctbl; 16678275SEric Cheng shp->rxgroup = vr->rdc_tbl; 16688275SEric Cheng break; 16698275SEric Cheng 16708275SEric Cheng case MAC_RING_TYPE_TX: 16718275SEric Cheng /* 16728275SEric Cheng * Make sure that the group has the right rings associated 16738275SEric Cheng * for the share. In version 1.0, we may only give a VR 16748275SEric Cheng * 2 TDCs. Not only that, but the HV has statically 16758275SEric Cheng * assigned the channels like so: 16768275SEric Cheng * VR0: TDC0 & TDC1 16778275SEric Cheng * VR1: TDC2 & TDC3, etc. 16788275SEric Cheng */ 16798275SEric Cheng group = nxge->tx_set.group[rg->gindex]; 16808275SEric Cheng 16818275SEric Cheng if (group->count > 2) { 16828275SEric Cheng /* a share can have at most 2 rings */ 16838275SEric Cheng return (EINVAL); 16848275SEric Cheng } 16858275SEric Cheng 16868275SEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) { 16878275SEric Cheng if (group->map & (1 << i)) { 16888275SEric Cheng if ((i != shp->index * 2) && 16898275SEric Cheng (i != (shp->index * 2 + 1))) { 16908275SEric Cheng /* 16918275SEric Cheng * A group with invalid rings was 16928275SEric Cheng * attempted to bind to this share 16938275SEric Cheng */ 16948275SEric Cheng return (EINVAL); 16958275SEric Cheng } 16968275SEric Cheng } 16978275SEric Cheng } 16988275SEric Cheng 16998275SEric Cheng vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid + 17008275SEric Cheng rg->gindex; 17018275SEric Cheng rg->sindex = vr->region; 17028275SEric Cheng break; 17038275SEric Cheng } 17048275SEric Cheng return (0); 17058275SEric Cheng } 17068275SEric Cheng 17078275SEric Cheng int 17088275SEric Cheng nxge_hio_share_rem_group(mac_share_handle_t shandle, 17098275SEric Cheng mac_group_driver_t ghandle) 17108275SEric Cheng { 17118275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17128275SEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle; 17138275SEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 17148275SEric Cheng int rv = 0; 17158275SEric Cheng 17168275SEric Cheng vr = shp->vrp; 17178275SEric Cheng 17188275SEric Cheng switch (group->type) { 17198275SEric Cheng case MAC_RING_TYPE_RX: 17208275SEric Cheng group->sindex = 0; 17218275SEric Cheng vr->rdc_tbl = 0; 17228275SEric Cheng shp->rxgroup = 0; 17238275SEric Cheng break; 17248275SEric Cheng 17258275SEric Cheng case MAC_RING_TYPE_TX: 17268275SEric Cheng group->sindex = 0; 17278275SEric Cheng vr->tdc_tbl = 0; 17288275SEric Cheng break; 17298275SEric Cheng } 17308275SEric Cheng 17318275SEric Cheng return (rv); 17328275SEric Cheng } 17338275SEric Cheng 17348275SEric Cheng int 17358275SEric Cheng nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie, 17368275SEric Cheng uint64_t *rcookie) 17378275SEric Cheng { 17388275SEric Cheng nxge_t *nxge; 17398275SEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17408275SEric Cheng nxge_hio_vr_t *vr; 17418275SEric Cheng uint64_t rmap, tmap, hv_rmap, hv_tmap; 17428275SEric Cheng int rv; 17438275SEric Cheng 17448275SEric Cheng nxge = shp->nxgep; 17458275SEric Cheng vr = (nxge_hio_vr_t *)shp->vrp; 17468275SEric Cheng 17478275SEric Cheng /* 17488275SEric Cheng * Add resources to the share. 17498275SEric Cheng * For each DMA channel associated with the VR, bind its resources 17508275SEric Cheng * to the VR. 17518275SEric Cheng */ 17528275SEric Cheng tmap = 0; 17538275SEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap); 17548275SEric Cheng if (rv != 0) { 17558275SEric Cheng return (rv); 17568275SEric Cheng } 17578275SEric Cheng 17588275SEric Cheng rmap = 0; 17598275SEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap); 17608275SEric Cheng if (rv != 0) { 17618275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 17628275SEric Cheng return (rv); 17638275SEric Cheng } 17648275SEric Cheng 17658275SEric Cheng /* 17668275SEric Cheng * Ask the Hypervisor to set up the VR and allocate slots for 17678275SEric Cheng * each rings associated with the VR. 17688275SEric Cheng */ 17698275SEric Cheng hv_tmap = hv_rmap = 0; 17708275SEric Cheng if ((rv = nxge_hio_share_assign(nxge, cookie, 17718275SEric Cheng &hv_tmap, &hv_rmap, vr))) { 17728275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 17738275SEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap); 17748275SEric Cheng return (rv); 17758275SEric Cheng } 17768275SEric Cheng 17778275SEric Cheng shp->active = B_TRUE; 17788275SEric Cheng shp->tmap = hv_tmap; 17798275SEric Cheng shp->rmap = hv_rmap; 17808275SEric Cheng 17818275SEric Cheng /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 17828275SEric Cheng *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 17838275SEric Cheng 17848275SEric Cheng return (0); 17858275SEric Cheng } 17868275SEric Cheng 17878275SEric Cheng void 17888275SEric Cheng nxge_hio_share_unbind(mac_share_handle_t shandle) 17898275SEric Cheng { 17906495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 17916495Sspeer 17926495Sspeer /* 17936495Sspeer * First, unassign the VR (take it back), 17946495Sspeer * so we can enable interrupts again. 17956495Sspeer */ 17968275SEric Cheng nxge_hio_share_unassign(shp->vrp); 17976495Sspeer 17986495Sspeer /* 17996495Sspeer * Free Ring Resources for TX and RX 18006495Sspeer */ 18017755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 18027755SMisaki.Kataoka@Sun.COM nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 18036495Sspeer } 18046495Sspeer 18056495Sspeer 18066495Sspeer /* 18076495Sspeer * nxge_hio_vr_share 18086495Sspeer * 18096495Sspeer * Find an unused Virtualization Region (VR). 18106495Sspeer * 18116495Sspeer * Arguments: 18126495Sspeer * nxge 18136495Sspeer * 18146495Sspeer * Notes: 18156495Sspeer * 18166495Sspeer * Context: 18176495Sspeer * Service domain 18186495Sspeer */ 18197755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t * 18206495Sspeer nxge_hio_vr_share( 18216495Sspeer nxge_t *nxge) 18226495Sspeer { 18236495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 18246495Sspeer nxge_hio_vr_t *vr; 18256495Sspeer 18266495Sspeer int first, limit, region; 18276495Sspeer 18286495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 18296495Sspeer 18306495Sspeer MUTEX_ENTER(&nhd->lock); 18316495Sspeer 18327755SMisaki.Kataoka@Sun.COM if (nhd->vrs == 0) { 18336495Sspeer MUTEX_EXIT(&nhd->lock); 18346495Sspeer return (0); 18356495Sspeer } 18366495Sspeer 18376495Sspeer /* Find an empty virtual region (VR). */ 18386495Sspeer if (nxge->function_num == 0) { 18396495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 18406495Sspeer first = FUNC0_VIR1; 18416495Sspeer limit = FUNC2_VIR0; 18426495Sspeer } else if (nxge->function_num == 1) { 18436495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 18446495Sspeer first = FUNC2_VIR1; 18456495Sspeer limit = FUNC_VIR_MAX; 18466495Sspeer } else { 18476495Sspeer cmn_err(CE_WARN, 18486495Sspeer "Shares not supported on function(%d) at this time.\n", 18496495Sspeer nxge->function_num); 18506495Sspeer } 18516495Sspeer 18526495Sspeer for (region = first; region < limit; region++) { 18536495Sspeer if (nhd->vr[region].nxge == 0) 18546495Sspeer break; 18556495Sspeer } 18566495Sspeer 18576495Sspeer if (region == limit) { 18586495Sspeer MUTEX_EXIT(&nhd->lock); 18596495Sspeer return (0); 18606495Sspeer } 18616495Sspeer 18626495Sspeer vr = &nhd->vr[region]; 18636495Sspeer vr->nxge = (uintptr_t)nxge; 18646495Sspeer vr->region = (uintptr_t)region; 18656495Sspeer 18667755SMisaki.Kataoka@Sun.COM nhd->vrs--; 18676495Sspeer 18686495Sspeer MUTEX_EXIT(&nhd->lock); 18696495Sspeer 18706495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 18716495Sspeer 18727755SMisaki.Kataoka@Sun.COM return (vr); 18736495Sspeer } 18746495Sspeer 18756495Sspeer void 18766495Sspeer nxge_hio_unshare( 18777755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr) 18786495Sspeer { 18796495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 18806495Sspeer nxge_hio_data_t *nhd; 18816495Sspeer 18826495Sspeer vr_region_t region; 18836495Sspeer 18846495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 18856495Sspeer 18866495Sspeer if (!nxge) { 18877950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: " 18886495Sspeer "vr->nxge is NULL")); 18896495Sspeer return; 18906495Sspeer } 18916495Sspeer 18926495Sspeer /* 18936495Sspeer * This function is no longer called, but I will keep it 18946495Sspeer * here in case we want to revisit this topic in the future. 18956495Sspeer * 18966495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 18976495Sspeer */ 18988275SEric Cheng 18998275SEric Cheng /* 19008275SEric Cheng * XXX: This is done by ms_sremove? 19018275SEric Cheng * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 19028275SEric Cheng */ 19036495Sspeer 19046495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19056495Sspeer 19066495Sspeer MUTEX_ENTER(&nhd->lock); 19076495Sspeer 19086495Sspeer region = vr->region; 19096495Sspeer (void) memset(vr, 0, sizeof (*vr)); 19106495Sspeer vr->region = region; 19116495Sspeer 19127755SMisaki.Kataoka@Sun.COM nhd->vrs++; 19136495Sspeer 19146495Sspeer MUTEX_EXIT(&nhd->lock); 19156495Sspeer 19166495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 19176495Sspeer } 19186495Sspeer 19196495Sspeer int 19206495Sspeer nxge_hio_addres( 19217755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 19226495Sspeer mac_ring_type_t type, 19238275SEric Cheng uint64_t *map) 19246495Sspeer { 19258275SEric Cheng nxge_t *nxge = (nxge_t *)vr->nxge; 19268275SEric Cheng nxge_grp_t *group; 19278275SEric Cheng int groupid; 19288275SEric Cheng int i; 19298275SEric Cheng int max_dcs; 19306495Sspeer 19316495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 19326495Sspeer 19336495Sspeer if (!nxge) 19346495Sspeer return (EINVAL); 19356495Sspeer 19368275SEric Cheng /* 19378275SEric Cheng * For each ring associated with the group, add the resources 19388275SEric Cheng * to the group and bind. 19398275SEric Cheng */ 19408275SEric Cheng max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS; 19418275SEric Cheng if (type == MAC_RING_TYPE_TX) { 19428275SEric Cheng /* set->group is an array of group indexed by a port group id */ 19438275SEric Cheng groupid = vr->tdc_tbl - 19448275SEric Cheng nxge->pt_config.hw_config.def_mac_txdma_grpid; 19458275SEric Cheng group = nxge->tx_set.group[groupid]; 19468275SEric Cheng } else { 19478275SEric Cheng /* set->group is an array of group indexed by a port group id */ 19488275SEric Cheng groupid = vr->rdc_tbl - 19498275SEric Cheng nxge->pt_config.hw_config.def_mac_rxdma_grpid; 19508275SEric Cheng group = nxge->rx_set.group[groupid]; 19518275SEric Cheng } 19528275SEric Cheng 19538275SEric Cheng if (group->map == 0) { 19548275SEric Cheng NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated " 19558275SEric Cheng "with this VR")); 19568275SEric Cheng return (EINVAL); 19578275SEric Cheng } 19588275SEric Cheng 19598275SEric Cheng for (i = 0; i < max_dcs; i++) { 19608275SEric Cheng if (group->map & (1 << i)) { 19618275SEric Cheng int rv; 19628275SEric Cheng 19638275SEric Cheng if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) { 19648275SEric Cheng if (*map == 0) /* Couldn't get even one DC. */ 19658275SEric Cheng return (-rv); 19668275SEric Cheng else 19678275SEric Cheng break; 19688275SEric Cheng } 19698275SEric Cheng *map |= (1 << i); 19706495Sspeer } 19716495Sspeer } 19726495Sspeer 19736495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 19746495Sspeer 19756495Sspeer return (0); 19766495Sspeer } 19776495Sspeer 19786495Sspeer /* ARGSUSED */ 19796495Sspeer void 19806495Sspeer nxge_hio_remres( 19817755SMisaki.Kataoka@Sun.COM nxge_hio_vr_t *vr, 19826495Sspeer mac_ring_type_t type, 19836495Sspeer res_map_t res_map) 19846495Sspeer { 19856495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 19866495Sspeer nxge_grp_t *group; 19876495Sspeer 19886495Sspeer if (!nxge) { 19897950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 19906495Sspeer "vr->nxge is NULL")); 19916495Sspeer return; 19926495Sspeer } 19936495Sspeer 19946495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 19956495Sspeer 19968275SEric Cheng /* 19978275SEric Cheng * For each ring bound to the group, remove the DMA resources 19988275SEric Cheng * from the group and unbind. 19998275SEric Cheng */ 20006495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 20016495Sspeer while (group->dc) { 20026495Sspeer nxge_hio_dc_t *dc = group->dc; 20036495Sspeer NXGE_DC_RESET(res_map, dc->page); 20046495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 20056495Sspeer } 20066495Sspeer 20076495Sspeer if (res_map) { 20086495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 20096495Sspeer "res_map %lx", res_map)); 20106495Sspeer } 20116495Sspeer 20126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 20136495Sspeer } 20146495Sspeer 20156495Sspeer /* 20166495Sspeer * nxge_hio_tdc_share 20176495Sspeer * 20186495Sspeer * Share an unused TDC channel. 20196495Sspeer * 20206495Sspeer * Arguments: 20216495Sspeer * nxge 20226495Sspeer * 20236495Sspeer * Notes: 20246495Sspeer * 20256495Sspeer * A.7.3 Reconfigure Tx DMA channel 20266495Sspeer * Disable TxDMA A.9.6.10 20276495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 20286495Sspeer * 20296495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 20306495Sspeer * 20316495Sspeer * Soft Reset TxDMA A.9.6.2 20326495Sspeer * 20336495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 20346495Sspeer * guest domain: 20356495Sspeer * 20366495Sspeer * Re-initialize TxDMA A.9.6.8 20376495Sspeer * Reconfigure TxDMA 20386495Sspeer * Enable TxDMA A.9.6.9 20396495Sspeer * 20406495Sspeer * Context: 20416495Sspeer * Service domain 20426495Sspeer */ 20436495Sspeer int 20446495Sspeer nxge_hio_tdc_share( 20456495Sspeer nxge_t *nxge, 20466495Sspeer int channel) 20476495Sspeer { 20487812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 20496495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 20506495Sspeer tx_ring_t *ring; 20516713Sspeer int count; 20526495Sspeer 20536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 20546495Sspeer 20556495Sspeer /* 20566495Sspeer * Wait until this channel is idle. 20576495Sspeer */ 20586495Sspeer ring = nxge->tx_rings->rings[channel]; 20599730SMichael.Speer@Sun.COM ASSERT(ring != NULL); 20606713Sspeer 20616713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 20626886Sspeer if (ring->tx_ring_busy) { 20636886Sspeer /* 20646886Sspeer * Wait for 30 seconds. 20656886Sspeer */ 20666886Sspeer for (count = 30 * 1000; count; count--) { 20676886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 20686886Sspeer break; 20696886Sspeer } 20706886Sspeer 20716886Sspeer drv_usecwait(1000); 20726495Sspeer } 20736713Sspeer 20746886Sspeer if (count == 0) { 20756886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 20766886Sspeer NXGE_TX_RING_ONLINE); 20777950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20787950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: " 20796886Sspeer "Tx ring %d was always BUSY", channel)); 20806886Sspeer return (-EIO); 20816886Sspeer } 20826886Sspeer } else { 20836713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 20846886Sspeer NXGE_TX_RING_OFFLINED); 20856495Sspeer } 20866495Sspeer 20877812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 20887812SMichael.Speer@Sun.COM nxge->tdc_is_shared[channel] = B_TRUE; 20897812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 20907812SMichael.Speer@Sun.COM 20916495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 20927950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: " 20936495Sspeer "Failed to remove interrupt for TxDMA channel %d", 20946495Sspeer channel)); 20958275SEric Cheng return (-EINVAL); 20966495Sspeer } 20976495Sspeer 20986495Sspeer /* Disable TxDMA A.9.6.10 */ 20996495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 21006495Sspeer 21016495Sspeer /* The SD is sharing this channel. */ 21026495Sspeer NXGE_DC_SET(set->shared.map, channel); 21036495Sspeer set->shared.count++; 21046495Sspeer 21056602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 21066602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 21076602Sspeer 21086495Sspeer /* 21096495Sspeer * Initialize the DC-specific FZC control registers. 21106495Sspeer * ----------------------------------------------------- 21116495Sspeer */ 21126495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 21136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 21147950SMichael.Speer@Sun.COM "nxge_hio_tdc_share: FZC TDC failed: %d", channel)); 21156495Sspeer return (-EIO); 21166495Sspeer } 21176495Sspeer 21186495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 21196495Sspeer 21206495Sspeer return (0); 21216495Sspeer } 21226495Sspeer 21236495Sspeer /* 21246495Sspeer * nxge_hio_rdc_share 21256495Sspeer * 21266495Sspeer * Share an unused RDC channel. 21276495Sspeer * 21286495Sspeer * Arguments: 21296495Sspeer * nxge 21306495Sspeer * 21316495Sspeer * Notes: 21326495Sspeer * 21336495Sspeer * This is the latest version of the procedure to 21346495Sspeer * Reconfigure an Rx DMA channel: 21356495Sspeer * 21366495Sspeer * A.6.3 Reconfigure Rx DMA channel 21376495Sspeer * Stop RxMAC A.9.2.6 21386495Sspeer * Drain IPP Port A.9.3.6 21396495Sspeer * Stop and reset RxDMA A.9.5.3 21406495Sspeer * 21416495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 21426495Sspeer * guest domain: 21436495Sspeer * 21446495Sspeer * Initialize RxDMA A.9.5.4 21456495Sspeer * Reconfigure RxDMA 21466495Sspeer * Enable RxDMA A.9.5.5 21476495Sspeer * 21486495Sspeer * We will do this here, since the RDC is a canalis non grata: 21496495Sspeer * Enable RxMAC A.9.2.10 21506495Sspeer * 21516495Sspeer * Context: 21526495Sspeer * Service domain 21536495Sspeer */ 21546495Sspeer int 21556495Sspeer nxge_hio_rdc_share( 21566495Sspeer nxge_t *nxge, 21576495Sspeer nxge_hio_vr_t *vr, 21586495Sspeer int channel) 21596495Sspeer { 21606495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 21616495Sspeer nxge_rdc_grp_t *rdc_grp; 21626495Sspeer 21636495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 21646495Sspeer 21656495Sspeer /* Disable interrupts. */ 21666495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 21677950SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21686495Sspeer "Failed to remove interrupt for RxDMA channel %d", 21696495Sspeer channel)); 21706495Sspeer return (NXGE_ERROR); 21716495Sspeer } 21726495Sspeer 21736495Sspeer /* Stop RxMAC = A.9.2.6 */ 21746495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 21756495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21766495Sspeer "Failed to disable RxMAC")); 21776495Sspeer } 21786495Sspeer 21796495Sspeer /* Drain IPP Port = A.9.3.6 */ 21806495Sspeer (void) nxge_ipp_drain(nxge); 21816495Sspeer 21826495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 21836495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 21846495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 21856495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 21866495Sspeer "Failed to disable RxDMA channel %d", channel)); 21876495Sspeer } 21886495Sspeer 21896495Sspeer /* The SD is sharing this channel. */ 21906495Sspeer NXGE_DC_SET(set->shared.map, channel); 21916495Sspeer set->shared.count++; 21926495Sspeer 21936602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 21946602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 21956602Sspeer 21966495Sspeer /* 21976495Sspeer * The guest domain will reconfigure the RDC later. 21986495Sspeer * 21996495Sspeer * But in the meantime, we must re-enable the Rx MAC so 22006495Sspeer * that we can start receiving packets again on the 22016495Sspeer * remaining RDCs: 22026495Sspeer * 22036495Sspeer * Enable RxMAC = A.9.2.10 22046495Sspeer */ 22056495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 22066495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 22077950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: Rx MAC still disabled")); 22086495Sspeer } 22096495Sspeer 22106495Sspeer /* 22116495Sspeer * Initialize the DC-specific FZC control registers. 22126495Sspeer * ----------------------------------------------------- 22136495Sspeer */ 22146495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 22156495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 22167950SMichael.Speer@Sun.COM "nxge_hio_rdc_share: RZC RDC failed: %ld", channel)); 22176495Sspeer return (-EIO); 22186495Sspeer } 22196495Sspeer 22206495Sspeer /* 22219047SMichael.Speer@Sun.COM * Update the RDC group. 22226495Sspeer */ 22236495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 22246495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 22256495Sspeer 22266495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 22276495Sspeer 22286495Sspeer return (0); 22296495Sspeer } 22306495Sspeer 22316495Sspeer /* 22326495Sspeer * nxge_hio_dc_share 22336495Sspeer * 22346495Sspeer * Share a DMA channel with a guest domain. 22356495Sspeer * 22366495Sspeer * Arguments: 22376495Sspeer * nxge 22386495Sspeer * vr The VR that <channel> will belong to. 22396495Sspeer * type Tx or Rx. 22408275SEric Cheng * channel Channel to share 22416495Sspeer * 22426495Sspeer * Notes: 22436495Sspeer * 22446495Sspeer * Context: 22456495Sspeer * Service domain 22466495Sspeer */ 22476495Sspeer int 22486495Sspeer nxge_hio_dc_share( 22496495Sspeer nxge_t *nxge, 22506495Sspeer nxge_hio_vr_t *vr, 22518275SEric Cheng mac_ring_type_t type, 22528275SEric Cheng int channel) 22536495Sspeer { 22546495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 22556495Sspeer nxge_hio_dc_t *dc; 22566495Sspeer nxge_grp_t *group; 22576495Sspeer int slot; 22586495Sspeer 22596495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 22606495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 22616495Sspeer 22626495Sspeer 22636495Sspeer /* -------------------------------------------------- */ 22646495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 22656495Sspeer nxge_hio_tdc_share(nxge, channel) : 22666495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 22676495Sspeer 22686495Sspeer if (slot < 0) { 22696495Sspeer if (type == MAC_RING_TYPE_RX) { 22708275SEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 22716495Sspeer } else { 22728275SEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 22736495Sspeer } 22746495Sspeer return (slot); 22756495Sspeer } 22766495Sspeer 22776495Sspeer MUTEX_ENTER(&nhd->lock); 22786495Sspeer 22796495Sspeer /* 22806495Sspeer * Tag this channel. 22816495Sspeer * -------------------------------------------------- 22826495Sspeer */ 22836495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 22846495Sspeer 22856495Sspeer dc->vr = vr; 22866495Sspeer dc->channel = (nxge_channel_t)channel; 22876495Sspeer 22886495Sspeer MUTEX_EXIT(&nhd->lock); 22896495Sspeer 22906495Sspeer /* 22916495Sspeer * vr->[t|r]x_group is used by the service domain to 22926495Sspeer * keep track of its shared DMA channels. 22936495Sspeer */ 22946495Sspeer MUTEX_ENTER(&nxge->group_lock); 22956495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 22966495Sspeer 22977755SMisaki.Kataoka@Sun.COM dc->group = group; 22986495Sspeer /* Initialize <group>, if necessary */ 22996495Sspeer if (group->count == 0) { 23006495Sspeer group->nxge = nxge; 23016495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 23026495Sspeer VP_BOUND_TX : VP_BOUND_RX; 23036495Sspeer group->sequence = nhd->sequence++; 23046495Sspeer group->active = B_TRUE; 23056495Sspeer } 23066495Sspeer 23076495Sspeer MUTEX_EXIT(&nxge->group_lock); 23086495Sspeer 23096495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 23106495Sspeer "DC share: %cDC %d was assigned to slot %d", 23116495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 23126495Sspeer 23136495Sspeer nxge_grp_dc_append(nxge, group, dc); 23146495Sspeer 23156495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 23166495Sspeer 23176495Sspeer return (0); 23186495Sspeer } 23196495Sspeer 23206495Sspeer /* 23216495Sspeer * nxge_hio_tdc_unshare 23226495Sspeer * 23236495Sspeer * Unshare a TDC. 23246495Sspeer * 23256495Sspeer * Arguments: 23266495Sspeer * nxge 23276495Sspeer * channel The channel to unshare (add again). 23286495Sspeer * 23296495Sspeer * Notes: 23306495Sspeer * 23316495Sspeer * Context: 23326495Sspeer * Service domain 23336495Sspeer */ 23346495Sspeer void 23356495Sspeer nxge_hio_tdc_unshare( 23366495Sspeer nxge_t *nxge, 23378275SEric Cheng int dev_grpid, 23386495Sspeer int channel) 23396495Sspeer { 23406495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 23418275SEric Cheng nxge_grp_t *group; 23428275SEric Cheng int grpid; 23436495Sspeer 23446495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 23456495Sspeer 23466495Sspeer NXGE_DC_RESET(set->shared.map, channel); 23476495Sspeer set->shared.count--; 23486495Sspeer 23498275SEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid; 23508275SEric Cheng group = set->group[grpid]; 23518275SEric Cheng 23527755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 23536495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 23546495Sspeer "Failed to initialize TxDMA channel %d", channel)); 23556495Sspeer return; 23566495Sspeer } 23576495Sspeer 23586495Sspeer /* Re-add this interrupt. */ 23596495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 23606495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 23616495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 23626495Sspeer } 23636495Sspeer 23646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 23656495Sspeer } 23666495Sspeer 23676495Sspeer /* 23686495Sspeer * nxge_hio_rdc_unshare 23696495Sspeer * 23706495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 23716495Sspeer * 23726495Sspeer * Arguments: 23736495Sspeer * nxge 23746495Sspeer * channel The channel to unshare (add again). 23756495Sspeer * 23766495Sspeer * Notes: 23776495Sspeer * 23786495Sspeer * Context: 23796495Sspeer * Service domain 23806495Sspeer */ 23816495Sspeer void 23826495Sspeer nxge_hio_rdc_unshare( 23836495Sspeer nxge_t *nxge, 23848275SEric Cheng int dev_grpid, 23856495Sspeer int channel) 23866495Sspeer { 23878275SEric Cheng nxge_grp_set_t *set = &nxge->rx_set; 23888275SEric Cheng nxge_grp_t *group; 23898275SEric Cheng int grpid; 23906495Sspeer 23916495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 23926495Sspeer 23936495Sspeer /* Stop RxMAC = A.9.2.6 */ 23946495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 23956495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 23966495Sspeer "Failed to disable RxMAC")); 23976495Sspeer } 23986495Sspeer 23996495Sspeer /* Drain IPP Port = A.9.3.6 */ 24006495Sspeer (void) nxge_ipp_drain(nxge); 24016495Sspeer 24026495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 24036495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 24046495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 24056495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 24066495Sspeer "Failed to disable RxDMA channel %d", channel)); 24076495Sspeer } 24086495Sspeer 24096495Sspeer NXGE_DC_RESET(set->shared.map, channel); 24106495Sspeer set->shared.count--; 24116495Sspeer 24128275SEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid; 24138275SEric Cheng group = set->group[grpid]; 24148275SEric Cheng 24156495Sspeer /* 24166495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 24176495Sspeer * 24186495Sspeer * Initialize RxDMA A.9.5.4 24196495Sspeer * Reconfigure RxDMA 24206495Sspeer * Enable RxDMA A.9.5.5 24216495Sspeer */ 24227755SMisaki.Kataoka@Sun.COM if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 24236495Sspeer /* Be sure to re-enable the RX MAC. */ 24246495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 24256495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24268275SEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 24276495Sspeer } 24286495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 24296495Sspeer "Failed to initialize RxDMA channel %d", channel)); 24306495Sspeer return; 24316495Sspeer } 24326495Sspeer 24336495Sspeer /* 24346495Sspeer * Enable RxMAC = A.9.2.10 24356495Sspeer */ 24366495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 24376495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24388275SEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 24396495Sspeer return; 24406495Sspeer } 24416495Sspeer 24426495Sspeer /* Re-add this interrupt. */ 24436495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 24446495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24457950SMichael.Speer@Sun.COM "nxge_hio_rdc_unshare: Failed to add interrupt for " 24466495Sspeer "RxDMA CHANNEL %d", channel)); 24476495Sspeer } 24486495Sspeer 24496495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 24506495Sspeer } 24516495Sspeer 24526495Sspeer /* 24536495Sspeer * nxge_hio_dc_unshare 24546495Sspeer * 24556495Sspeer * Unshare (reuse) a DMA channel. 24566495Sspeer * 24576495Sspeer * Arguments: 24586495Sspeer * nxge 24596495Sspeer * vr The VR that <channel> belongs to. 24606495Sspeer * type Tx or Rx. 24616495Sspeer * channel The DMA channel to reuse. 24626495Sspeer * 24636495Sspeer * Notes: 24646495Sspeer * 24656495Sspeer * Context: 24666495Sspeer * Service domain 24676495Sspeer */ 24686495Sspeer void 24696495Sspeer nxge_hio_dc_unshare( 24706495Sspeer nxge_t *nxge, 24716495Sspeer nxge_hio_vr_t *vr, 24726495Sspeer mac_ring_type_t type, 24736495Sspeer int channel) 24746495Sspeer { 24756495Sspeer nxge_grp_t *group; 24766495Sspeer nxge_hio_dc_t *dc; 24776495Sspeer 24786495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 24796495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 24806495Sspeer 24816495Sspeer /* Unlink the channel from its group. */ 24826495Sspeer /* -------------------------------------------------- */ 24836495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 24846602Sspeer NXGE_DC_RESET(group->map, channel); 24856495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 24866495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 24877950SMichael.Speer@Sun.COM "nxge_hio_dc_unshare(%d) failed", channel)); 24886495Sspeer return; 24896495Sspeer } 24906495Sspeer 24916495Sspeer dc->vr = 0; 24926495Sspeer dc->cookie = 0; 24936495Sspeer 24946495Sspeer if (type == MAC_RING_TYPE_RX) { 24958275SEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 24966495Sspeer } else { 24978275SEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 24986495Sspeer } 24996495Sspeer 25006495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 25016495Sspeer } 25026495Sspeer 25038400SNicolas.Droux@Sun.COM 25048400SNicolas.Droux@Sun.COM /* 25058400SNicolas.Droux@Sun.COM * nxge_hio_rxdma_bind_intr(): 25068400SNicolas.Droux@Sun.COM * 25078400SNicolas.Droux@Sun.COM * For the guest domain driver, need to bind the interrupt group 25088400SNicolas.Droux@Sun.COM * and state to the rx_rcr_ring_t. 25098400SNicolas.Droux@Sun.COM */ 25108400SNicolas.Droux@Sun.COM 25118400SNicolas.Droux@Sun.COM int 25128400SNicolas.Droux@Sun.COM nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel) 25138400SNicolas.Droux@Sun.COM { 25148400SNicolas.Droux@Sun.COM nxge_hio_dc_t *dc; 25158400SNicolas.Droux@Sun.COM nxge_ldgv_t *control; 25168400SNicolas.Droux@Sun.COM nxge_ldg_t *group; 25178400SNicolas.Droux@Sun.COM nxge_ldv_t *device; 25188400SNicolas.Droux@Sun.COM 25198400SNicolas.Droux@Sun.COM /* 25208400SNicolas.Droux@Sun.COM * Find the DMA channel. 25218400SNicolas.Droux@Sun.COM */ 25228400SNicolas.Droux@Sun.COM if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) { 25238400SNicolas.Droux@Sun.COM return (NXGE_ERROR); 25248400SNicolas.Droux@Sun.COM } 25258400SNicolas.Droux@Sun.COM 25268400SNicolas.Droux@Sun.COM /* 25278400SNicolas.Droux@Sun.COM * Get the control structure. 25288400SNicolas.Droux@Sun.COM */ 25298400SNicolas.Droux@Sun.COM control = nxge->ldgvp; 25308400SNicolas.Droux@Sun.COM if (control == NULL) { 25318400SNicolas.Droux@Sun.COM return (NXGE_ERROR); 25328400SNicolas.Droux@Sun.COM } 25338400SNicolas.Droux@Sun.COM 25348400SNicolas.Droux@Sun.COM group = &control->ldgp[dc->ldg.vector]; 25358400SNicolas.Droux@Sun.COM device = &control->ldvp[dc->ldg.ldsv]; 25368400SNicolas.Droux@Sun.COM 25378400SNicolas.Droux@Sun.COM MUTEX_ENTER(&ring->lock); 25388400SNicolas.Droux@Sun.COM ring->ldgp = group; 25398400SNicolas.Droux@Sun.COM ring->ldvp = device; 25408400SNicolas.Droux@Sun.COM MUTEX_EXIT(&ring->lock); 25418400SNicolas.Droux@Sun.COM 25428400SNicolas.Droux@Sun.COM return (NXGE_OK); 25438400SNicolas.Droux@Sun.COM } 25446495Sspeer #endif /* if defined(sun4v) */ 2545