16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 236495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer /* 286495Sspeer * nxge_hio.c 296495Sspeer * 306495Sspeer * This file manages the virtualization resources for Neptune 316495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 326495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 336495Sspeer * request & use hardware resources from the service domain. 346495Sspeer * 356495Sspeer */ 366495Sspeer 376495Sspeer #include <sys/nxge/nxge_impl.h> 386495Sspeer #include <sys/nxge/nxge_fzc.h> 396495Sspeer #include <sys/nxge/nxge_rxdma.h> 406495Sspeer #include <sys/nxge/nxge_txdma.h> 416495Sspeer #include <sys/nxge/nxge_hio.h> 426495Sspeer 436495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 446495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 456495Sspeer 466495Sspeer /* 476495Sspeer * External prototypes 486495Sspeer */ 496495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 506495Sspeer 516495Sspeer /* The following function may be found in nxge_main.c */ 526495Sspeer extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 536495Sspeer 546495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 556495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 566495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 576495Sspeer 586495Sspeer /* 596495Sspeer * Local prototypes 606495Sspeer */ 616495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 626495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 636495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 646495Sspeer 656495Sspeer /* 666495Sspeer * These functions are used by both service & guest domains to 676495Sspeer * decide whether they're running in an LDOMs/XEN environment 686495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 696495Sspeer */ 706495Sspeer 716495Sspeer /* 726495Sspeer * nxge_get_environs 736495Sspeer * 746495Sspeer * Figure out if we are in a guest domain or not. 756495Sspeer * 766495Sspeer * Arguments: 776495Sspeer * nxge 786495Sspeer * 796495Sspeer * Notes: 806495Sspeer * 816495Sspeer * Context: 826495Sspeer * Any domain 836495Sspeer */ 846495Sspeer void 856495Sspeer nxge_get_environs( 866495Sspeer nxge_t *nxge) 876495Sspeer { 886495Sspeer char *string; 896495Sspeer 906495Sspeer /* 916495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 926495Sspeer */ 936495Sspeer nxge->environs = SOLARIS_DOMAIN; 946495Sspeer 956495Sspeer /* 966495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 976495Sspeer */ 986495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 996495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1006495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1016495Sspeer if (strcmp(string, "n2niu") == 0) { 1026495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1036495Sspeer /* So we can allocate properly-aligned memory. */ 1046495Sspeer nxge->niu_type = N2_NIU; 1056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1066495Sspeer "Hybrid IO-capable guest domain")); 1076495Sspeer } 1086495Sspeer ddi_prop_free(string); 1096495Sspeer } 1106495Sspeer } 1116495Sspeer 1126495Sspeer #if !defined(sun4v) 1136495Sspeer 1146495Sspeer /* 1156495Sspeer * nxge_hio_init 1166495Sspeer * 1176495Sspeer * Initialize the HIO module of the NXGE driver. 1186495Sspeer * 1196495Sspeer * Arguments: 1206495Sspeer * nxge 1216495Sspeer * 1226495Sspeer * Notes: 1236495Sspeer * This is the non-hybrid I/O version of this function. 1246495Sspeer * 1256495Sspeer * Context: 1266495Sspeer * Any domain 1276495Sspeer */ 1286495Sspeer int 129*7587SMichael.Speer@Sun.COM nxge_hio_init(nxge_t *nxge) 1306495Sspeer { 1316495Sspeer nxge_hio_data_t *nhd; 1326495Sspeer 1336495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1346495Sspeer if (nhd == 0) { 1356495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1366495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1376495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1386495Sspeer } 1396495Sspeer 1406495Sspeer nhd->hio.ldoms = B_FALSE; 1416495Sspeer 1426495Sspeer return (NXGE_OK); 1436495Sspeer } 1446495Sspeer 1456495Sspeer #endif 1466495Sspeer 1476495Sspeer void 148*7587SMichael.Speer@Sun.COM nxge_hio_uninit(nxge_t *nxge) 1496495Sspeer { 1506495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1516495Sspeer 1526495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1536495Sspeer 154*7587SMichael.Speer@Sun.COM if (nhd != NULL) { 155*7587SMichael.Speer@Sun.COM MUTEX_DESTROY(&nhd->lock); 156*7587SMichael.Speer@Sun.COM KMEM_FREE(nhd, sizeof (*nhd)); 157*7587SMichael.Speer@Sun.COM nxge->nxge_hw_p->hio = 0; 158*7587SMichael.Speer@Sun.COM } 1596495Sspeer } 1606495Sspeer 1616495Sspeer /* 1626495Sspeer * nxge_dci_map 1636495Sspeer * 1646495Sspeer * Map a DMA channel index to a channel number. 1656495Sspeer * 1666495Sspeer * Arguments: 1676495Sspeer * instance The instance number of the driver. 1686495Sspeer * type The type of channel this is: Tx or Rx. 1696495Sspeer * index The index to convert to a channel number 1706495Sspeer * 1716495Sspeer * Notes: 1726495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 1736495Sspeer * 1746495Sspeer * Context: 1756495Sspeer * Any domain 1766495Sspeer */ 1776495Sspeer int 1786495Sspeer nxge_dci_map( 1796495Sspeer nxge_t *nxge, 1806495Sspeer vpc_type_t type, 1816495Sspeer int index) 1826495Sspeer { 1836495Sspeer nxge_grp_set_t *set; 1846495Sspeer int dc; 1856495Sspeer 1866495Sspeer switch (type) { 1876495Sspeer case VP_BOUND_TX: 1886495Sspeer set = &nxge->tx_set; 1896495Sspeer break; 1906495Sspeer case VP_BOUND_RX: 1916495Sspeer set = &nxge->rx_set; 1926495Sspeer break; 1936495Sspeer } 1946495Sspeer 1956495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 1966495Sspeer if ((1 << dc) & set->owned.map) { 1976495Sspeer if (index == 0) 1986495Sspeer return (dc); 1996495Sspeer else 2006495Sspeer index--; 2016495Sspeer } 2026495Sspeer } 2036495Sspeer 2046495Sspeer return (-1); 2056495Sspeer } 2066495Sspeer 2076495Sspeer /* 2086495Sspeer * --------------------------------------------------------------------- 2096495Sspeer * These are the general-purpose DMA channel group functions. That is, 2106495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2116495Sspeer * environment. 2126495Sspeer * 2136495Sspeer * But is also expected that in the future they will be able to manage 2146495Sspeer * Crossbow groups. 2156495Sspeer * --------------------------------------------------------------------- 2166495Sspeer */ 2176495Sspeer 2186495Sspeer /* 2196495Sspeer * nxge_grp_add 2206495Sspeer * 2216495Sspeer * Add a group to an instance of NXGE. 2226495Sspeer * 2236495Sspeer * Arguments: 2246495Sspeer * nxge 2256495Sspeer * type Tx or Rx 2266495Sspeer * 2276495Sspeer * Notes: 2286495Sspeer * 2296495Sspeer * Context: 2306495Sspeer * Any domain 2316495Sspeer */ 2326495Sspeer vr_handle_t 2336495Sspeer nxge_grp_add( 2346495Sspeer nxge_t *nxge, 2356495Sspeer nxge_grp_type_t type) 2366495Sspeer { 2376495Sspeer nxge_grp_set_t *set; 2386495Sspeer nxge_grp_t *group; 2396495Sspeer int i; 2406495Sspeer 2416495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 2426495Sspeer group->nxge = nxge; 2436495Sspeer 2446495Sspeer MUTEX_ENTER(&nxge->group_lock); 2456495Sspeer switch (type) { 2466495Sspeer case NXGE_TRANSMIT_GROUP: 2476495Sspeer case EXT_TRANSMIT_GROUP: 2486495Sspeer set = &nxge->tx_set; 2496495Sspeer break; 2506495Sspeer default: 2516495Sspeer set = &nxge->rx_set; 2526495Sspeer break; 2536495Sspeer } 2546495Sspeer 2556495Sspeer group->type = type; 2566495Sspeer group->active = B_TRUE; 2576495Sspeer group->sequence = set->sequence++; 2586495Sspeer 2596495Sspeer /* Find an empty slot for this logical group. */ 2606495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2616495Sspeer if (set->group[i] == 0) { 2626495Sspeer group->index = i; 2636495Sspeer set->group[i] = group; 2646495Sspeer NXGE_DC_SET(set->lg.map, i); 2656495Sspeer set->lg.count++; 2666495Sspeer break; 2676495Sspeer } 2686495Sspeer } 2696495Sspeer MUTEX_EXIT(&nxge->group_lock); 2706495Sspeer 2716495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 2726495Sspeer "nxge_grp_add: %cgroup = %d.%d", 2736495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 2746495Sspeer nxge->mac.portnum, group->sequence)); 2756495Sspeer 2766495Sspeer return ((vr_handle_t)group); 2776495Sspeer } 2786495Sspeer 2796495Sspeer void 2806495Sspeer nxge_grp_remove( 2816495Sspeer nxge_t *nxge, 2826495Sspeer vr_handle_t handle) /* The group to remove. */ 2836495Sspeer { 2846495Sspeer nxge_grp_set_t *set; 2856495Sspeer nxge_grp_t *group; 2866495Sspeer vpc_type_t type; 2876495Sspeer 2886495Sspeer group = (nxge_grp_t *)handle; 2896495Sspeer 2906495Sspeer MUTEX_ENTER(&nxge->group_lock); 2916495Sspeer switch (group->type) { 2926495Sspeer case NXGE_TRANSMIT_GROUP: 2936495Sspeer case EXT_TRANSMIT_GROUP: 2946495Sspeer set = &nxge->tx_set; 2956495Sspeer break; 2966495Sspeer default: 2976495Sspeer set = &nxge->rx_set; 2986495Sspeer break; 2996495Sspeer } 3006495Sspeer 3016495Sspeer if (set->group[group->index] != group) { 3026495Sspeer MUTEX_EXIT(&nxge->group_lock); 3036495Sspeer return; 3046495Sspeer } 3056495Sspeer 3066495Sspeer set->group[group->index] = 0; 3076495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3086495Sspeer set->lg.count--; 3096495Sspeer 3106495Sspeer /* While inside the mutex, deactivate <group>. */ 3116495Sspeer group->active = B_FALSE; 3126495Sspeer 3136495Sspeer MUTEX_EXIT(&nxge->group_lock); 3146495Sspeer 3156495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3166495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3176495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3186495Sspeer nxge->mac.portnum, group->sequence)); 3196495Sspeer 3206495Sspeer /* Now, remove any DCs which are still active. */ 3216495Sspeer switch (group->type) { 3226495Sspeer default: 3236495Sspeer type = VP_BOUND_TX; 3246495Sspeer break; 3256495Sspeer case NXGE_RECEIVE_GROUP: 3266495Sspeer case EXT_RECEIVE_GROUP: 3276495Sspeer type = VP_BOUND_RX; 3286495Sspeer } 3296495Sspeer 3306495Sspeer while (group->dc) { 3316495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3326495Sspeer } 3336495Sspeer 3346495Sspeer KMEM_FREE(group, sizeof (*group)); 3356495Sspeer } 3366495Sspeer 3376495Sspeer /* 3386495Sspeer * nx_hio_dc_add 3396495Sspeer * 3406495Sspeer * Add a DMA channel to a VR/Group. 3416495Sspeer * 3426495Sspeer * Arguments: 3436495Sspeer * nxge 3446495Sspeer * channel The channel to add. 3456495Sspeer * Notes: 3466495Sspeer * 3476495Sspeer * Context: 3486495Sspeer * Any domain 3496495Sspeer */ 3506495Sspeer /* ARGSUSED */ 3516495Sspeer int 3526495Sspeer nxge_grp_dc_add( 3536495Sspeer nxge_t *nxge, 3546495Sspeer vr_handle_t handle, /* The group to add <channel> to. */ 3556495Sspeer vpc_type_t type, /* Rx or Tx */ 3566495Sspeer int channel) /* A physical/logical channel number */ 3576495Sspeer { 3586495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 3596495Sspeer nxge_hio_dc_t *dc; 3606495Sspeer nxge_grp_set_t *set; 3616495Sspeer nxge_grp_t *group; 3626602Sspeer nxge_status_t status = NXGE_OK; 3636495Sspeer 3646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 3656495Sspeer 3666495Sspeer if (handle == 0) 3676495Sspeer return (0); 3686495Sspeer 3696495Sspeer switch (type) { 3706495Sspeer default: 3716495Sspeer set = &nxge->tx_set; 3726495Sspeer if (channel > NXGE_MAX_TDCS) { 3736495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 3746495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 3756495Sspeer return (NXGE_ERROR); 3766495Sspeer } 3776495Sspeer break; 3786495Sspeer case VP_BOUND_RX: 3796495Sspeer set = &nxge->rx_set; 3806495Sspeer if (channel > NXGE_MAX_RDCS) { 3816495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 3826495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 3836495Sspeer return (NXGE_ERROR); 3846495Sspeer } 3856495Sspeer break; 3866495Sspeer } 3876495Sspeer 3886495Sspeer group = (nxge_grp_t *)handle; 3896495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3906495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 3916495Sspeer type == VP_BOUND_TX ? 't' : 'r', 3926495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 3936495Sspeer 3946495Sspeer MUTEX_ENTER(&nxge->group_lock); 3956495Sspeer if (group->active != B_TRUE) { 3966495Sspeer /* We may be in the process of removing this group. */ 3976495Sspeer MUTEX_EXIT(&nxge->group_lock); 3986495Sspeer return (NXGE_ERROR); 3996495Sspeer } 4006495Sspeer MUTEX_EXIT(&nxge->group_lock); 4016495Sspeer 4026495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4036495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4046495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4056495Sspeer return (NXGE_ERROR); 4066495Sspeer } 4076495Sspeer 4086495Sspeer MUTEX_ENTER(&nhd->lock); 4096495Sspeer 4106495Sspeer if (dc->group) { 4116495Sspeer MUTEX_EXIT(&nhd->lock); 4126495Sspeer /* This channel is already in use! */ 4136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4146495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4156495Sspeer return (NXGE_ERROR); 4166495Sspeer } 4176495Sspeer 4186495Sspeer dc->next = 0; 4196495Sspeer dc->page = channel; 4206495Sspeer dc->channel = (nxge_channel_t)channel; 4216495Sspeer 4226495Sspeer dc->type = type; 4236495Sspeer if (type == VP_BOUND_RX) { 4246495Sspeer dc->init = nxge_init_rxdma_channel; 4256495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4266495Sspeer } else { 4276495Sspeer dc->init = nxge_init_txdma_channel; 4286495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4296495Sspeer } 4306495Sspeer 4316495Sspeer dc->group = handle; 4326495Sspeer 4336495Sspeer if (isLDOMguest(nxge)) 4346495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 4356495Sspeer 4366495Sspeer NXGE_DC_SET(set->owned.map, channel); 4376495Sspeer set->owned.count++; 4386495Sspeer 4396495Sspeer MUTEX_EXIT(&nhd->lock); 4406495Sspeer 4416602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 4426602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4436602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 4446603Sspeer return (NXGE_ERROR); 4456602Sspeer } 4466602Sspeer 4476495Sspeer nxge_grp_dc_append(nxge, group, dc); 4486495Sspeer 4496495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 4506495Sspeer 4516602Sspeer return ((int)status); 4526495Sspeer } 4536495Sspeer 4546495Sspeer void 4556495Sspeer nxge_grp_dc_remove( 4566495Sspeer nxge_t *nxge, 4576495Sspeer vpc_type_t type, 4586495Sspeer int channel) 4596495Sspeer { 4606495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 4616495Sspeer nxge_hio_dc_t *dc; 4626495Sspeer nxge_grp_set_t *set; 4636495Sspeer nxge_grp_t *group; 4646495Sspeer 4656495Sspeer dc_uninit_t uninit; 4666495Sspeer 4676495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 4686495Sspeer 4696495Sspeer if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 4706495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4716495Sspeer "nx_hio_dc_remove: find(%d) failed", channel)); 4726495Sspeer return; 4736495Sspeer } 4746495Sspeer group = (nxge_grp_t *)dc->group; 4756495Sspeer 4766495Sspeer if (isLDOMguest(nxge)) { 4776495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 4786495Sspeer } 4796495Sspeer 4806495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4816495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 4826495Sspeer nxge->mac.portnum, group->sequence, group->count, 4836495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 4846495Sspeer 4856495Sspeer MUTEX_ENTER(&nhd->lock); 4866495Sspeer 4876602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 4886602Sspeer if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 4896602Sspeer NXGE_DC_RESET(group->map, channel); 4906602Sspeer } 4916602Sspeer 4926495Sspeer /* Remove the DC from its group. */ 4936495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 4946495Sspeer MUTEX_EXIT(&nhd->lock); 4956495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4966495Sspeer "nx_hio_dc_remove(%d) failed", channel)); 4976495Sspeer return; 4986495Sspeer } 4996495Sspeer 5006495Sspeer uninit = dc->uninit; 5016495Sspeer channel = dc->channel; 5026495Sspeer 5036495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5046495Sspeer set->owned.count--; 5056495Sspeer 5066495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5076495Sspeer 5086495Sspeer MUTEX_EXIT(&nhd->lock); 5096495Sspeer 5106495Sspeer (*uninit)(nxge, channel); 5116495Sspeer 5126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5136495Sspeer } 5146495Sspeer 5156495Sspeer nxge_hio_dc_t * 5166495Sspeer nxge_grp_dc_find( 5176495Sspeer nxge_t *nxge, 5186495Sspeer vpc_type_t type, /* Rx or Tx */ 5196495Sspeer int channel) 5206495Sspeer { 5216495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5226495Sspeer nxge_hio_dc_t *current; 5236495Sspeer 5246495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 5256495Sspeer 5266495Sspeer if (!isLDOMguest(nxge)) { 5276495Sspeer return (¤t[channel]); 5286495Sspeer } else { 5296495Sspeer /* We're in a guest domain. */ 5306495Sspeer int i, limit = (type == VP_BOUND_TX) ? 5316495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 5326495Sspeer 5336495Sspeer MUTEX_ENTER(&nhd->lock); 5346495Sspeer for (i = 0; i < limit; i++, current++) { 5356495Sspeer if (current->channel == channel) { 5366495Sspeer if (current->vr && current->vr->nxge == 5376495Sspeer (uintptr_t)nxge) { 5386495Sspeer MUTEX_EXIT(&nhd->lock); 5396495Sspeer return (current); 5406495Sspeer } 5416495Sspeer } 5426495Sspeer } 5436495Sspeer MUTEX_EXIT(&nhd->lock); 5446495Sspeer } 5456495Sspeer 5466495Sspeer return (0); 5476495Sspeer } 5486495Sspeer 5496495Sspeer /* 5506495Sspeer * nxge_grp_dc_append 5516495Sspeer * 5526495Sspeer * Append a DMA channel to a group. 5536495Sspeer * 5546495Sspeer * Arguments: 5556495Sspeer * nxge 5566495Sspeer * group The group to append to 5576495Sspeer * dc The DMA channel to append 5586495Sspeer * 5596495Sspeer * Notes: 5606495Sspeer * 5616495Sspeer * Context: 5626495Sspeer * Any domain 5636495Sspeer */ 5646495Sspeer static 5656495Sspeer void 5666495Sspeer nxge_grp_dc_append( 5676495Sspeer nxge_t *nxge, 5686495Sspeer nxge_grp_t *group, 5696495Sspeer nxge_hio_dc_t *dc) 5706495Sspeer { 5716495Sspeer MUTEX_ENTER(&nxge->group_lock); 5726495Sspeer 5736495Sspeer if (group->dc == 0) { 5746495Sspeer group->dc = dc; 5756495Sspeer } else { 5766495Sspeer nxge_hio_dc_t *current = group->dc; 5776495Sspeer do { 5786495Sspeer if (current->next == 0) { 5796495Sspeer current->next = dc; 5806495Sspeer break; 5816495Sspeer } 5826495Sspeer current = current->next; 5836495Sspeer } while (current); 5846495Sspeer } 5856495Sspeer 5866495Sspeer NXGE_DC_SET(group->map, dc->channel); 5876495Sspeer 5886495Sspeer nxge_grp_dc_map(group); 5896602Sspeer group->count++; 5906495Sspeer 5916495Sspeer MUTEX_EXIT(&nxge->group_lock); 5926495Sspeer } 5936495Sspeer 5946495Sspeer /* 5956495Sspeer * nxge_grp_dc_unlink 5966495Sspeer * 5976495Sspeer * Unlink a DMA channel fromits linked list (group). 5986495Sspeer * 5996495Sspeer * Arguments: 6006495Sspeer * nxge 6016495Sspeer * group The group (linked list) to unlink from 6026495Sspeer * dc The DMA channel to append 6036495Sspeer * 6046495Sspeer * Notes: 6056495Sspeer * 6066495Sspeer * Context: 6076495Sspeer * Any domain 6086495Sspeer */ 6096495Sspeer nxge_hio_dc_t * 6106495Sspeer nxge_grp_dc_unlink( 6116495Sspeer nxge_t *nxge, 6126495Sspeer nxge_grp_t *group, 6136495Sspeer int channel) 6146495Sspeer { 6156495Sspeer nxge_hio_dc_t *current, *previous; 6166495Sspeer 6176495Sspeer MUTEX_ENTER(&nxge->group_lock); 6186495Sspeer 6196495Sspeer if ((current = group->dc) == 0) { 6206495Sspeer MUTEX_EXIT(&nxge->group_lock); 6216495Sspeer return (0); 6226495Sspeer } 6236495Sspeer 6246495Sspeer previous = 0; 6256495Sspeer do { 6266495Sspeer if (current->channel == channel) { 6276495Sspeer if (previous) 6286495Sspeer previous->next = current->next; 6296495Sspeer else 6306495Sspeer group->dc = current->next; 6316495Sspeer break; 6326495Sspeer } 6336495Sspeer previous = current; 6346495Sspeer current = current->next; 6356495Sspeer } while (current); 6366495Sspeer 6376495Sspeer if (current == 0) { 6386495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 6396495Sspeer "DC unlink: DC %d not found", channel)); 6406495Sspeer } else { 6416495Sspeer current->next = 0; 6426495Sspeer current->group = 0; 6436495Sspeer 6446495Sspeer group->count--; 6456495Sspeer } 6466495Sspeer 6476495Sspeer nxge_grp_dc_map(group); 6486495Sspeer 6496495Sspeer MUTEX_EXIT(&nxge->group_lock); 6506495Sspeer 6516495Sspeer return (current); 6526495Sspeer } 6536495Sspeer 6546495Sspeer /* 6556495Sspeer * nxge_grp_dc_map 6566495Sspeer * 6576495Sspeer * Map a linked list to an array of channel numbers. 6586495Sspeer * 6596495Sspeer * Arguments: 6606495Sspeer * nxge 6616495Sspeer * group The group to remap. 6626495Sspeer * 6636495Sspeer * Notes: 6646495Sspeer * It is expected that the caller will hold the correct mutex. 6656495Sspeer * 6666495Sspeer * Context: 6676495Sspeer * Service domain 6686495Sspeer */ 6696495Sspeer void 6706495Sspeer nxge_grp_dc_map( 6716495Sspeer nxge_grp_t *group) 6726495Sspeer { 6736495Sspeer nxge_channel_t *legend; 6746495Sspeer nxge_hio_dc_t *dc; 6756495Sspeer 6766495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 6776495Sspeer 6786495Sspeer legend = group->legend; 6796495Sspeer dc = group->dc; 6806495Sspeer while (dc) { 6816495Sspeer *legend = dc->channel; 6826495Sspeer legend++; 6836495Sspeer dc = dc->next; 6846495Sspeer } 6856495Sspeer } 6866495Sspeer 6876495Sspeer /* 6886495Sspeer * --------------------------------------------------------------------- 6896495Sspeer * These are HIO debugging functions. 6906495Sspeer * --------------------------------------------------------------------- 6916495Sspeer */ 6926495Sspeer 6936495Sspeer /* 6946495Sspeer * nxge_delay 6956495Sspeer * 6966495Sspeer * Delay <seconds> number of seconds. 6976495Sspeer * 6986495Sspeer * Arguments: 6996495Sspeer * nxge 7006495Sspeer * group The group to append to 7016495Sspeer * dc The DMA channel to append 7026495Sspeer * 7036495Sspeer * Notes: 7046495Sspeer * This is a developer-only function. 7056495Sspeer * 7066495Sspeer * Context: 7076495Sspeer * Any domain 7086495Sspeer */ 7096495Sspeer void 7106495Sspeer nxge_delay( 7116495Sspeer int seconds) 7126495Sspeer { 7136495Sspeer delay(drv_usectohz(seconds * 1000000)); 7146495Sspeer } 7156495Sspeer 7166495Sspeer static dmc_reg_name_t rx_names[] = { 7176495Sspeer { "RXDMA_CFIG1", 0 }, 7186495Sspeer { "RXDMA_CFIG2", 8 }, 7196495Sspeer { "RBR_CFIG_A", 0x10 }, 7206495Sspeer { "RBR_CFIG_B", 0x18 }, 7216495Sspeer { "RBR_KICK", 0x20 }, 7226495Sspeer { "RBR_STAT", 0x28 }, 7236495Sspeer { "RBR_HDH", 0x30 }, 7246495Sspeer { "RBR_HDL", 0x38 }, 7256495Sspeer { "RCRCFIG_A", 0x40 }, 7266495Sspeer { "RCRCFIG_B", 0x48 }, 7276495Sspeer { "RCRSTAT_A", 0x50 }, 7286495Sspeer { "RCRSTAT_B", 0x58 }, 7296495Sspeer { "RCRSTAT_C", 0x60 }, 7306495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 7316495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 7326495Sspeer { "RCR_FLSH", 0x78 }, 7336495Sspeer { "RXMISC", 0x90 }, 7346495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 7356495Sspeer { 0, -1 } 7366495Sspeer }; 7376495Sspeer 7386495Sspeer static dmc_reg_name_t tx_names[] = { 7396495Sspeer { "Tx_RNG_CFIG", 0 }, 7406495Sspeer { "Tx_RNG_HDL", 0x10 }, 7416495Sspeer { "Tx_RNG_KICK", 0x18 }, 7426495Sspeer { "Tx_ENT_MASK", 0x20 }, 7436495Sspeer { "Tx_CS", 0x28 }, 7446495Sspeer { "TxDMA_MBH", 0x30 }, 7456495Sspeer { "TxDMA_MBL", 0x38 }, 7466495Sspeer { "TxDMA_PRE_ST", 0x40 }, 7476495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 7486495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 7496495Sspeer { "TDMC_INTR_DBG", 0x60 }, 7506495Sspeer { "Tx_CS_DBG", 0x68 }, 7516495Sspeer { 0, -1 } 7526495Sspeer }; 7536495Sspeer 7546495Sspeer /* 7556495Sspeer * nxge_xx2str 7566495Sspeer * 7576495Sspeer * Translate a register address into a string. 7586495Sspeer * 7596495Sspeer * Arguments: 7606495Sspeer * offset The address of the register to translate. 7616495Sspeer * 7626495Sspeer * Notes: 7636495Sspeer * These are developer-only function. 7646495Sspeer * 7656495Sspeer * Context: 7666495Sspeer * Any domain 7676495Sspeer */ 7686495Sspeer const char * 7696495Sspeer nxge_rx2str( 7706495Sspeer int offset) 7716495Sspeer { 7726495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 7736495Sspeer 7746495Sspeer offset &= DMA_CSR_MASK; 7756495Sspeer 7766495Sspeer while (reg->name) { 7776495Sspeer if (offset == reg->offset) 7786495Sspeer return (reg->name); 7796495Sspeer reg++; 7806495Sspeer } 7816495Sspeer 7826495Sspeer return (0); 7836495Sspeer } 7846495Sspeer 7856495Sspeer const char * 7866495Sspeer nxge_tx2str( 7876495Sspeer int offset) 7886495Sspeer { 7896495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 7906495Sspeer 7916495Sspeer offset &= DMA_CSR_MASK; 7926495Sspeer 7936495Sspeer while (reg->name) { 7946495Sspeer if (offset == reg->offset) 7956495Sspeer return (reg->name); 7966495Sspeer reg++; 7976495Sspeer } 7986495Sspeer 7996495Sspeer return (0); 8006495Sspeer } 8016495Sspeer 8026495Sspeer /* 8036495Sspeer * nxge_ddi_perror 8046495Sspeer * 8056495Sspeer * Map a DDI error number to a string. 8066495Sspeer * 8076495Sspeer * Arguments: 8086495Sspeer * ddi_error The DDI error number to map. 8096495Sspeer * 8106495Sspeer * Notes: 8116495Sspeer * 8126495Sspeer * Context: 8136495Sspeer * Any domain 8146495Sspeer */ 8156495Sspeer const char * 8166495Sspeer nxge_ddi_perror( 8176495Sspeer int ddi_error) 8186495Sspeer { 8196495Sspeer switch (ddi_error) { 8206495Sspeer case DDI_SUCCESS: 8216495Sspeer return ("DDI_SUCCESS"); 8226495Sspeer case DDI_FAILURE: 8236495Sspeer return ("DDI_FAILURE"); 8246495Sspeer case DDI_NOT_WELL_FORMED: 8256495Sspeer return ("DDI_NOT_WELL_FORMED"); 8266495Sspeer case DDI_EAGAIN: 8276495Sspeer return ("DDI_EAGAIN"); 8286495Sspeer case DDI_EINVAL: 8296495Sspeer return ("DDI_EINVAL"); 8306495Sspeer case DDI_ENOTSUP: 8316495Sspeer return ("DDI_ENOTSUP"); 8326495Sspeer case DDI_EPENDING: 8336495Sspeer return ("DDI_EPENDING"); 8346495Sspeer case DDI_ENOMEM: 8356495Sspeer return ("DDI_ENOMEM"); 8366495Sspeer case DDI_EBUSY: 8376495Sspeer return ("DDI_EBUSY"); 8386495Sspeer case DDI_ETRANSPORT: 8396495Sspeer return ("DDI_ETRANSPORT"); 8406495Sspeer case DDI_ECONTEXT: 8416495Sspeer return ("DDI_ECONTEXT"); 8426495Sspeer default: 8436495Sspeer return ("Unknown error"); 8446495Sspeer } 8456495Sspeer } 8466495Sspeer 8476495Sspeer /* 8486495Sspeer * --------------------------------------------------------------------- 8496495Sspeer * These are Sun4v HIO function definitions 8506495Sspeer * --------------------------------------------------------------------- 8516495Sspeer */ 8526495Sspeer 8536495Sspeer #if defined(sun4v) 8546495Sspeer 8556495Sspeer /* 8566495Sspeer * Local prototypes 8576495Sspeer */ 8586495Sspeer static vr_handle_t nxge_hio_vr_share(nxge_t *); 8596495Sspeer 8606495Sspeer static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 8616495Sspeer static void nxge_hio_unshare(vr_handle_t); 8626495Sspeer 8636495Sspeer static int nxge_hio_addres(vr_handle_t, mac_ring_type_t, int); 8646495Sspeer static void nxge_hio_remres(vr_handle_t, mac_ring_type_t, res_map_t); 8656495Sspeer 8666495Sspeer static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 8676495Sspeer static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 8686495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 8696495Sspeer mac_ring_type_t, int); 8706495Sspeer 8716495Sspeer /* 8726495Sspeer * nxge_hio_init 8736495Sspeer * 8746495Sspeer * Initialize the HIO module of the NXGE driver. 8756495Sspeer * 8766495Sspeer * Arguments: 8776495Sspeer * nxge 8786495Sspeer * 8796495Sspeer * Notes: 8806495Sspeer * 8816495Sspeer * Context: 8826495Sspeer * Any domain 8836495Sspeer */ 8846495Sspeer int 8856495Sspeer nxge_hio_init( 8866495Sspeer nxge_t *nxge) 8876495Sspeer { 8886495Sspeer nxge_hio_data_t *nhd; 8896495Sspeer int i, region; 8906495Sspeer 8916495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 8926495Sspeer if (nhd == 0) { 8936495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 8946495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 8956495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 8966495Sspeer } 8976495Sspeer 8986713Sspeer if ((nxge->environs == SOLARIS_DOMAIN) && 8996713Sspeer (nxge->niu_type == N2_NIU)) { 9006495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9016495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9026495Sspeer if (niu_hsvc->hsvc_major == 1 && 9036495Sspeer niu_hsvc->hsvc_minor == 1) 9046495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9056495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9066495Sspeer "nxge_hio_init: hypervisor services " 9076495Sspeer "version %d.%d", 9086495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9096495Sspeer } 9106495Sspeer } 9116495Sspeer 9126495Sspeer if (!isLDOMs(nxge)) { 9136495Sspeer nhd->hio.ldoms = B_FALSE; 9146495Sspeer return (NXGE_OK); 9156495Sspeer } 9166495Sspeer 9176495Sspeer nhd->hio.ldoms = B_TRUE; 9186495Sspeer 9196495Sspeer /* 9206495Sspeer * Fill in what we can. 9216495Sspeer */ 9226495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 9236495Sspeer nhd->vr[region].region = region; 9246495Sspeer } 9256495Sspeer nhd->available.vrs = NXGE_VR_SR_MAX - 2; 9266495Sspeer 9276495Sspeer /* 9286495Sspeer * Initialize share and ring group structures. 9296495Sspeer */ 9306495Sspeer for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 9316495Sspeer nxge->rx_hio_groups[i].ghandle = NULL; 9326495Sspeer nxge->rx_hio_groups[i].nxgep = nxge; 9336495Sspeer nxge->rx_hio_groups[i].gindex = 0; 9346495Sspeer nxge->rx_hio_groups[i].sindex = 0; 9356495Sspeer } 9366495Sspeer 9376495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 9386495Sspeer nxge->shares[i].nxgep = nxge; 9396495Sspeer nxge->shares[i].index = 0; 9406495Sspeer nxge->shares[i].vrp = (void *)NULL; 9416495Sspeer nxge->shares[i].tmap = 0; 9426495Sspeer nxge->shares[i].rmap = 0; 9436495Sspeer nxge->shares[i].rxgroup = 0; 9446495Sspeer nxge->shares[i].active = B_FALSE; 9456495Sspeer } 9466495Sspeer 9476495Sspeer /* Fill in the HV HIO function pointers. */ 9486495Sspeer nxge_hio_hv_init(nxge); 9496495Sspeer 9506495Sspeer if (isLDOMservice(nxge)) { 9516495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9526495Sspeer "Hybrid IO-capable service domain")); 9536495Sspeer return (NXGE_OK); 9546495Sspeer } else { 9556495Sspeer /* 9566495Sspeer * isLDOMguest(nxge) == B_TRUE 9576495Sspeer */ 9586495Sspeer nx_vio_fp_t *vio; 9596495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 9606495Sspeer 9616495Sspeer vio = &nhd->hio.vio; 9626495Sspeer vio->__register = (vio_net_resource_reg_t) 9636495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 9646495Sspeer vio->unregister = (vio_net_resource_unreg_t) 9656495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 9666495Sspeer 9676495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 9686495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 9696495Sspeer return (NXGE_ERROR); 9706495Sspeer } 9716495Sspeer } 9726495Sspeer 9736495Sspeer return (0); 9746495Sspeer } 9756495Sspeer 9766495Sspeer static int 9776495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 9786495Sspeer { 9796495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 9806495Sspeer p_nxge_t nxge = rxgroup->nxgep; 9816495Sspeer int group = rxgroup->gindex; 9826495Sspeer int rv, sindex; 9836495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 9846495Sspeer 9856495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 9866495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 9876495Sspeer 9886495Sspeer /* 9896495Sspeer * Program the mac address for the group/share. 9906495Sspeer */ 9916495Sspeer if ((rv = nxge_hio_hostinfo_init(nxge, vr, 9926495Sspeer (ether_addr_t *)mac_addr)) != 0) { 9936495Sspeer return (rv); 9946495Sspeer } 9956495Sspeer 9966495Sspeer return (0); 9976495Sspeer } 9986495Sspeer 9996495Sspeer /* ARGSUSED */ 10006495Sspeer static int 10016495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 10026495Sspeer { 10036495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10046495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10056495Sspeer int group = rxgroup->gindex; 10066495Sspeer int sindex; 10076495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10086495Sspeer 10096495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10106495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10116495Sspeer 10126495Sspeer /* 10136495Sspeer * Remove the mac address for the group/share. 10146495Sspeer */ 10156495Sspeer nxge_hio_hostinfo_uninit(nxge, vr); 10166495Sspeer 10176495Sspeer return (0); 10186495Sspeer } 10196495Sspeer 10206495Sspeer /* ARGSUSED */ 10216495Sspeer void 10226495Sspeer nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 10236495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 10246495Sspeer { 10256495Sspeer p_nxge_t nxgep = (p_nxge_t)arg; 10266495Sspeer nxge_rx_ring_group_t *rxgroup; 10276495Sspeer 10286495Sspeer switch (type) { 10296495Sspeer case MAC_RING_TYPE_RX: 10306495Sspeer rxgroup = &nxgep->rx_hio_groups[group]; 10316495Sspeer rxgroup->gindex = group; 10326495Sspeer 10336495Sspeer infop->mrg_driver = (mac_group_driver_t)rxgroup; 10346495Sspeer infop->mrg_start = NULL; 10356495Sspeer infop->mrg_stop = NULL; 10366495Sspeer infop->mrg_addmac = nxge_hio_add_mac; 10376495Sspeer infop->mrg_remmac = nxge_hio_rem_mac; 10386495Sspeer infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 10396495Sspeer break; 10406495Sspeer 10416495Sspeer case MAC_RING_TYPE_TX: 10426495Sspeer break; 10436495Sspeer } 10446495Sspeer } 10456495Sspeer 10466495Sspeer int 10476495Sspeer nxge_hio_share_assign( 10486495Sspeer nxge_t *nxge, 10496495Sspeer uint64_t cookie, 10506495Sspeer res_map_t *tmap, 10516495Sspeer res_map_t *rmap, 10526495Sspeer nxge_hio_vr_t *vr) 10536495Sspeer { 10546495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 10556495Sspeer uint64_t slot, hv_rv; 10566495Sspeer nxge_hio_dc_t *dc; 10576495Sspeer nxhv_vr_fp_t *fp; 10586495Sspeer int i; 10596495Sspeer 10606495Sspeer /* 10616495Sspeer * Ask the Hypervisor to set up the VR for us 10626495Sspeer */ 10636495Sspeer fp = &nhd->hio.vr; 10646495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 10656495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 10666713Sspeer "nx_hio_share_assign: " 10676713Sspeer "vr->assign() returned %d", hv_rv)); 10686495Sspeer nxge_hio_unshare((vr_handle_t)vr); 10696495Sspeer return (-EIO); 10706495Sspeer } 10716495Sspeer 10726495Sspeer /* 10736495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 10746495Sspeer * ----------------------------------------------------- 10756495Sspeer */ 10766495Sspeer dc = vr->tx_group.dc; 10776495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 10786495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 10796495Sspeer while (dc) { 10806495Sspeer hv_rv = (*tx->assign) 10816495Sspeer (vr->cookie, dc->channel, &slot); 10826495Sspeer if (hv_rv != 0) { 10836495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 10846495Sspeer "nx_hio_share_assign: " 10856495Sspeer "tx->assign(%x, %d) failed: %ld", 10866495Sspeer vr->cookie, dc->channel, hv_rv)); 10876495Sspeer return (-EIO); 10886495Sspeer } 10896495Sspeer 10906495Sspeer dc->cookie = vr->cookie; 10916495Sspeer dc->page = (vp_channel_t)slot; 10926495Sspeer 10936495Sspeer /* Inform the caller about the slot chosen. */ 10946495Sspeer (*tmap) |= 1 << slot; 10956495Sspeer 10966495Sspeer dc = dc->next; 10976495Sspeer } 10986495Sspeer } 10996495Sspeer 11006495Sspeer /* 11016495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 11026495Sspeer * ----------------------------------------------------- 11036495Sspeer */ 11046495Sspeer dc = vr->rx_group.dc; 11056495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 11066495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11076495Sspeer while (dc) { 11086495Sspeer hv_rv = (*rx->assign) 11096495Sspeer (vr->cookie, dc->channel, &slot); 11106495Sspeer if (hv_rv != 0) { 11116495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11126495Sspeer "nx_hio_share_assign: " 11136495Sspeer "rx->assign(%x, %d) failed: %ld", 11146495Sspeer vr->cookie, dc->channel, hv_rv)); 11156495Sspeer return (-EIO); 11166495Sspeer } 11176495Sspeer 11186495Sspeer dc->cookie = vr->cookie; 11196495Sspeer dc->page = (vp_channel_t)slot; 11206495Sspeer 11216495Sspeer /* Inform the caller about the slot chosen. */ 11226495Sspeer (*rmap) |= 1 << slot; 11236495Sspeer 11246495Sspeer dc = dc->next; 11256495Sspeer } 11266495Sspeer } 11276495Sspeer 11286495Sspeer return (0); 11296495Sspeer } 11306495Sspeer 11316495Sspeer int 11326495Sspeer nxge_hio_share_unassign( 11336495Sspeer nxge_hio_vr_t *vr) 11346495Sspeer { 11356495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 11366495Sspeer nxge_hio_data_t *nhd; 11376495Sspeer nxge_hio_dc_t *dc; 11386495Sspeer nxhv_vr_fp_t *fp; 11396495Sspeer uint64_t hv_rv; 11406495Sspeer 11416495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11426495Sspeer 11436495Sspeer dc = vr->tx_group.dc; 11446495Sspeer while (dc) { 11456495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11466495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 11476495Sspeer if (hv_rv != 0) { 11486495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11496495Sspeer "nx_hio_dc_unshare: " 11506495Sspeer "tx->unassign(%x, %d) failed: %ld", 11516495Sspeer vr->cookie, dc->page, hv_rv)); 11526495Sspeer } 11536495Sspeer dc = dc->next; 11546495Sspeer } 11556495Sspeer 11566495Sspeer dc = vr->rx_group.dc; 11576495Sspeer while (dc) { 11586495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11596495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 11606495Sspeer if (hv_rv != 0) { 11616495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11626495Sspeer "nx_hio_dc_unshare: " 11636495Sspeer "rx->unassign(%x, %d) failed: %ld", 11646495Sspeer vr->cookie, dc->page, hv_rv)); 11656495Sspeer } 11666495Sspeer dc = dc->next; 11676495Sspeer } 11686495Sspeer 11696495Sspeer fp = &nhd->hio.vr; 11706495Sspeer if (fp->unassign) { 11716495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 11726495Sspeer if (hv_rv != 0) { 11736495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 11746495Sspeer "vr->assign(%x) failed: %ld", 11756495Sspeer vr->cookie, hv_rv)); 11766495Sspeer } 11776495Sspeer } 11786495Sspeer 11796495Sspeer return (0); 11806495Sspeer } 11816495Sspeer 11826495Sspeer int 11836495Sspeer nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 11846495Sspeer mac_share_handle_t *shandle) 11856495Sspeer { 11866495Sspeer p_nxge_t nxge = (p_nxge_t)arg; 11876495Sspeer nxge_rx_ring_group_t *rxgroup; 11886495Sspeer nxge_share_handle_t *shp; 11896495Sspeer 11906495Sspeer vr_handle_t shared; /* The VR being shared */ 11916495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 11926495Sspeer uint64_t rmap, tmap; 11936495Sspeer int rv; 11946495Sspeer 11956495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11966495Sspeer 11976495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 11986495Sspeer 11996495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 12006495Sspeer nhd->hio.rx.assign == 0) { 12016495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 12026495Sspeer return (EIO); 12036495Sspeer } 12046495Sspeer 12056495Sspeer /* 12066495Sspeer * Get a VR. 12076495Sspeer */ 12086495Sspeer if ((shared = nxge_hio_vr_share(nxge)) == 0) 12096495Sspeer return (EAGAIN); 12106495Sspeer vr = (nxge_hio_vr_t *)shared; 12116495Sspeer 12126495Sspeer /* 12136495Sspeer * Get an RDC group for us to use. 12146495Sspeer */ 12156495Sspeer if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 12166495Sspeer nxge_hio_unshare(shared); 12176495Sspeer return (EBUSY); 12186495Sspeer } 12196495Sspeer 12206495Sspeer /* 12216495Sspeer * Add resources to the share. 12226495Sspeer */ 12236495Sspeer tmap = 0; 12246495Sspeer rv = nxge_hio_addres(shared, MAC_RING_TYPE_TX, 12256495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12266495Sspeer if (rv != 0) { 12276495Sspeer nxge_hio_unshare(shared); 12286495Sspeer return (rv); 12296495Sspeer } 12306495Sspeer 12316495Sspeer rmap = 0; 12326495Sspeer rv = nxge_hio_addres(shared, MAC_RING_TYPE_RX, 12336495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12346495Sspeer if (rv != 0) { 12356495Sspeer nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 12366495Sspeer nxge_hio_unshare(shared); 12376495Sspeer return (rv); 12386495Sspeer } 12396495Sspeer 12406495Sspeer if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 12416495Sspeer nxge_hio_remres(shared, MAC_RING_TYPE_RX, tmap); 12426495Sspeer nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 12436495Sspeer nxge_hio_unshare(shared); 12446495Sspeer return (rv); 12456495Sspeer } 12466495Sspeer 12476495Sspeer rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 12486495Sspeer rxgroup->gindex = vr->rdc_tbl; 12496495Sspeer rxgroup->sindex = vr->region; 12506495Sspeer 12516495Sspeer shp = &nxge->shares[vr->region]; 12526495Sspeer shp->index = vr->region; 12536495Sspeer shp->vrp = (void *)vr; 12546495Sspeer shp->tmap = tmap; 12556495Sspeer shp->rmap = rmap; 12566495Sspeer shp->rxgroup = vr->rdc_tbl; 12576495Sspeer shp->active = B_TRUE; 12586495Sspeer 12596495Sspeer /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 12606495Sspeer *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 12616495Sspeer 12626495Sspeer *shandle = (mac_share_handle_t)shp; 12636495Sspeer 12646495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 12656495Sspeer return (0); 12666495Sspeer } 12676495Sspeer 12686495Sspeer void 12696495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 12706495Sspeer { 12716495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 12726495Sspeer 12736495Sspeer /* 12746495Sspeer * First, unassign the VR (take it back), 12756495Sspeer * so we can enable interrupts again. 12766495Sspeer */ 12776498Sspeer (void) nxge_hio_share_unassign(shp->vrp); 12786495Sspeer 12796495Sspeer /* 12806495Sspeer * Free Ring Resources for TX and RX 12816495Sspeer */ 12826495Sspeer nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 12836495Sspeer nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 12846495Sspeer 12856495Sspeer /* 12866495Sspeer * Free VR resource. 12876495Sspeer */ 12886495Sspeer nxge_hio_unshare((vr_handle_t)shp->vrp); 12896495Sspeer 12906495Sspeer /* 12916495Sspeer * Clear internal handle state. 12926495Sspeer */ 12936495Sspeer shp->index = 0; 12946495Sspeer shp->vrp = (void *)NULL; 12956495Sspeer shp->tmap = 0; 12966495Sspeer shp->rmap = 0; 12976495Sspeer shp->rxgroup = 0; 12986495Sspeer shp->active = B_FALSE; 12996495Sspeer } 13006495Sspeer 13016495Sspeer void 13026495Sspeer nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 13036495Sspeer uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 13046495Sspeer { 13056495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13066495Sspeer 13076495Sspeer switch (type) { 13086495Sspeer case MAC_RING_TYPE_RX: 13096495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13106495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13116495Sspeer *rmap = shp->rmap; 13126495Sspeer *gnum = shp->rxgroup; 13136495Sspeer break; 13146495Sspeer 13156495Sspeer case MAC_RING_TYPE_TX: 13166495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13176495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13186495Sspeer *rmap = shp->tmap; 13196495Sspeer *gnum = 0; 13206495Sspeer break; 13216495Sspeer } 13226495Sspeer } 13236495Sspeer 13246495Sspeer /* 13256495Sspeer * nxge_hio_vr_share 13266495Sspeer * 13276495Sspeer * Find an unused Virtualization Region (VR). 13286495Sspeer * 13296495Sspeer * Arguments: 13306495Sspeer * nxge 13316495Sspeer * 13326495Sspeer * Notes: 13336495Sspeer * 13346495Sspeer * Context: 13356495Sspeer * Service domain 13366495Sspeer */ 13376495Sspeer vr_handle_t 13386495Sspeer nxge_hio_vr_share( 13396495Sspeer nxge_t *nxge) 13406495Sspeer { 13416495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13426495Sspeer nxge_hio_vr_t *vr; 13436495Sspeer 13446495Sspeer int first, limit, region; 13456495Sspeer 13466495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 13476495Sspeer 13486495Sspeer MUTEX_ENTER(&nhd->lock); 13496495Sspeer 13506495Sspeer if (nhd->available.vrs == 0) { 13516495Sspeer MUTEX_EXIT(&nhd->lock); 13526495Sspeer return (0); 13536495Sspeer } 13546495Sspeer 13556495Sspeer /* Find an empty virtual region (VR). */ 13566495Sspeer if (nxge->function_num == 0) { 13576495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 13586495Sspeer first = FUNC0_VIR1; 13596495Sspeer limit = FUNC2_VIR0; 13606495Sspeer } else if (nxge->function_num == 1) { 13616495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 13626495Sspeer first = FUNC2_VIR1; 13636495Sspeer limit = FUNC_VIR_MAX; 13646495Sspeer } else { 13656495Sspeer cmn_err(CE_WARN, 13666495Sspeer "Shares not supported on function(%d) at this time.\n", 13676495Sspeer nxge->function_num); 13686495Sspeer } 13696495Sspeer 13706495Sspeer for (region = first; region < limit; region++) { 13716495Sspeer if (nhd->vr[region].nxge == 0) 13726495Sspeer break; 13736495Sspeer } 13746495Sspeer 13756495Sspeer if (region == limit) { 13766495Sspeer MUTEX_EXIT(&nhd->lock); 13776495Sspeer return (0); 13786495Sspeer } 13796495Sspeer 13806495Sspeer vr = &nhd->vr[region]; 13816495Sspeer vr->nxge = (uintptr_t)nxge; 13826495Sspeer vr->region = (uintptr_t)region; 13836495Sspeer 13846495Sspeer nhd->available.vrs--; 13856495Sspeer 13866495Sspeer MUTEX_EXIT(&nhd->lock); 13876495Sspeer 13886495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 13896495Sspeer 13906495Sspeer return ((vr_handle_t)vr); 13916495Sspeer } 13926495Sspeer 13936495Sspeer void 13946495Sspeer nxge_hio_unshare( 13956495Sspeer vr_handle_t shared) 13966495Sspeer { 13976495Sspeer nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 13986495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 13996495Sspeer nxge_hio_data_t *nhd; 14006495Sspeer 14016495Sspeer vr_region_t region; 14026495Sspeer 14036495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 14046495Sspeer 14056495Sspeer if (!nxge) { 14066495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 14076495Sspeer "vr->nxge is NULL")); 14086495Sspeer return; 14096495Sspeer } 14106495Sspeer 14116495Sspeer /* 14126495Sspeer * This function is no longer called, but I will keep it 14136495Sspeer * here in case we want to revisit this topic in the future. 14146495Sspeer * 14156495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 14166495Sspeer */ 14176495Sspeer (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 14186495Sspeer 14196495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14206495Sspeer 14216495Sspeer MUTEX_ENTER(&nhd->lock); 14226495Sspeer 14236495Sspeer region = vr->region; 14246495Sspeer (void) memset(vr, 0, sizeof (*vr)); 14256495Sspeer vr->region = region; 14266495Sspeer 14276495Sspeer nhd->available.vrs++; 14286495Sspeer 14296495Sspeer MUTEX_EXIT(&nhd->lock); 14306495Sspeer 14316495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 14326495Sspeer } 14336495Sspeer 14346495Sspeer int 14356495Sspeer nxge_hio_addres( 14366495Sspeer vr_handle_t shared, 14376495Sspeer mac_ring_type_t type, 14386495Sspeer int count) 14396495Sspeer { 14406495Sspeer nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 14416495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14426495Sspeer int i; 14436495Sspeer 14446495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 14456495Sspeer 14466495Sspeer if (!nxge) 14476495Sspeer return (EINVAL); 14486495Sspeer 14496495Sspeer for (i = 0; i < count; i++) { 14506495Sspeer int rv; 14516495Sspeer if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 14526495Sspeer if (i == 0) /* Couldn't get even one DC. */ 14536495Sspeer return (-rv); 14546495Sspeer else 14556495Sspeer break; 14566495Sspeer } 14576495Sspeer } 14586495Sspeer 14596495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 14606495Sspeer 14616495Sspeer return (0); 14626495Sspeer } 14636495Sspeer 14646495Sspeer /* ARGSUSED */ 14656495Sspeer void 14666495Sspeer nxge_hio_remres( 14676495Sspeer vr_handle_t shared, 14686495Sspeer mac_ring_type_t type, 14696495Sspeer res_map_t res_map) 14706495Sspeer { 14716495Sspeer nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 14726495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14736495Sspeer nxge_grp_t *group; 14746495Sspeer 14756495Sspeer if (!nxge) { 14766495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 14776495Sspeer "vr->nxge is NULL")); 14786495Sspeer return; 14796495Sspeer } 14806495Sspeer 14816495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 14826495Sspeer 14836495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 14846495Sspeer while (group->dc) { 14856495Sspeer nxge_hio_dc_t *dc = group->dc; 14866495Sspeer NXGE_DC_RESET(res_map, dc->page); 14876495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 14886495Sspeer } 14896495Sspeer 14906495Sspeer if (res_map) { 14916495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 14926495Sspeer "res_map %lx", res_map)); 14936495Sspeer } 14946495Sspeer 14956495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 14966495Sspeer } 14976495Sspeer 14986495Sspeer /* 14996495Sspeer * nxge_hio_tdc_share 15006495Sspeer * 15016495Sspeer * Share an unused TDC channel. 15026495Sspeer * 15036495Sspeer * Arguments: 15046495Sspeer * nxge 15056495Sspeer * 15066495Sspeer * Notes: 15076495Sspeer * 15086495Sspeer * A.7.3 Reconfigure Tx DMA channel 15096495Sspeer * Disable TxDMA A.9.6.10 15106495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 15116495Sspeer * 15126495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 15136495Sspeer * 15146495Sspeer * Soft Reset TxDMA A.9.6.2 15156495Sspeer * 15166495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 15176495Sspeer * guest domain: 15186495Sspeer * 15196495Sspeer * Re-initialize TxDMA A.9.6.8 15206495Sspeer * Reconfigure TxDMA 15216495Sspeer * Enable TxDMA A.9.6.9 15226495Sspeer * 15236495Sspeer * Context: 15246495Sspeer * Service domain 15256495Sspeer */ 15266495Sspeer int 15276495Sspeer nxge_hio_tdc_share( 15286495Sspeer nxge_t *nxge, 15296495Sspeer int channel) 15306495Sspeer { 15316495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 15326495Sspeer tx_ring_t *ring; 15336713Sspeer int count; 15346495Sspeer 15356495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 15366495Sspeer 15376495Sspeer /* 15386495Sspeer * Wait until this channel is idle. 15396495Sspeer */ 15406495Sspeer ring = nxge->tx_rings->rings[channel]; 15416713Sspeer 15426713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 15436886Sspeer if (ring->tx_ring_busy) { 15446886Sspeer /* 15456886Sspeer * Wait for 30 seconds. 15466886Sspeer */ 15476886Sspeer for (count = 30 * 1000; count; count--) { 15486886Sspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 15496886Sspeer break; 15506886Sspeer } 15516886Sspeer 15526886Sspeer drv_usecwait(1000); 15536495Sspeer } 15546713Sspeer 15556886Sspeer if (count == 0) { 15566886Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 15576886Sspeer NXGE_TX_RING_ONLINE); 15586886Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 15596886Sspeer "Tx ring %d was always BUSY", channel)); 15606886Sspeer return (-EIO); 15616886Sspeer } 15626886Sspeer } else { 15636713Sspeer (void) atomic_swap_32(&ring->tx_ring_offline, 15646886Sspeer NXGE_TX_RING_OFFLINED); 15656495Sspeer } 15666495Sspeer 15676495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 15686495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 15696495Sspeer "Failed to remove interrupt for TxDMA channel %d", 15706495Sspeer channel)); 15716495Sspeer return (NXGE_ERROR); 15726495Sspeer } 15736495Sspeer 15746495Sspeer /* Disable TxDMA A.9.6.10 */ 15756495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 15766495Sspeer 15776495Sspeer /* The SD is sharing this channel. */ 15786495Sspeer NXGE_DC_SET(set->shared.map, channel); 15796495Sspeer set->shared.count++; 15806495Sspeer 15816602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 15826602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 15836602Sspeer 15846495Sspeer /* 15856495Sspeer * Initialize the DC-specific FZC control registers. 15866495Sspeer * ----------------------------------------------------- 15876495Sspeer */ 15886495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 15896495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 15906495Sspeer "nx_hio_dc_share: FZC TDC failed: %d", channel)); 15916495Sspeer return (-EIO); 15926495Sspeer } 15936495Sspeer 15946495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 15956495Sspeer 15966495Sspeer return (0); 15976495Sspeer } 15986495Sspeer 15996495Sspeer /* 16006495Sspeer * nxge_hio_rdc_share 16016495Sspeer * 16026495Sspeer * Share an unused RDC channel. 16036495Sspeer * 16046495Sspeer * Arguments: 16056495Sspeer * nxge 16066495Sspeer * 16076495Sspeer * Notes: 16086495Sspeer * 16096495Sspeer * This is the latest version of the procedure to 16106495Sspeer * Reconfigure an Rx DMA channel: 16116495Sspeer * 16126495Sspeer * A.6.3 Reconfigure Rx DMA channel 16136495Sspeer * Stop RxMAC A.9.2.6 16146495Sspeer * Drain IPP Port A.9.3.6 16156495Sspeer * Stop and reset RxDMA A.9.5.3 16166495Sspeer * 16176495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 16186495Sspeer * guest domain: 16196495Sspeer * 16206495Sspeer * Initialize RxDMA A.9.5.4 16216495Sspeer * Reconfigure RxDMA 16226495Sspeer * Enable RxDMA A.9.5.5 16236495Sspeer * 16246495Sspeer * We will do this here, since the RDC is a canalis non grata: 16256495Sspeer * Enable RxMAC A.9.2.10 16266495Sspeer * 16276495Sspeer * Context: 16286495Sspeer * Service domain 16296495Sspeer */ 16306495Sspeer int 16316495Sspeer nxge_hio_rdc_share( 16326495Sspeer nxge_t *nxge, 16336495Sspeer nxge_hio_vr_t *vr, 16346495Sspeer int channel) 16356495Sspeer { 16366495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 16376495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 16386495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 16396495Sspeer nxge_rdc_grp_t *rdc_grp; 16406495Sspeer 16416495Sspeer int current, last; 16426495Sspeer 16436495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 16446495Sspeer 16456495Sspeer /* Disable interrupts. */ 16466495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 16476495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 16486495Sspeer "Failed to remove interrupt for RxDMA channel %d", 16496495Sspeer channel)); 16506495Sspeer return (NXGE_ERROR); 16516495Sspeer } 16526495Sspeer 16536495Sspeer /* Stop RxMAC = A.9.2.6 */ 16546495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 16556495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 16566495Sspeer "Failed to disable RxMAC")); 16576495Sspeer } 16586495Sspeer 16596495Sspeer /* Drain IPP Port = A.9.3.6 */ 16606495Sspeer (void) nxge_ipp_drain(nxge); 16616495Sspeer 16626495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 16636495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 16646495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 16656495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 16666495Sspeer "Failed to disable RxDMA channel %d", channel)); 16676495Sspeer } 16686495Sspeer 16696495Sspeer /* The SD is sharing this channel. */ 16706495Sspeer NXGE_DC_SET(set->shared.map, channel); 16716495Sspeer set->shared.count++; 16726495Sspeer 16736602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 16746602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 16756602Sspeer 16766495Sspeer /* 16776495Sspeer * We have to reconfigure the RDC table(s) 16786495Sspeer * to which this channel belongs. 16796495Sspeer */ 16806495Sspeer current = hardware->def_mac_rxdma_grpid; 16816495Sspeer last = current + hardware->max_rdc_grpids; 16826495Sspeer for (; current < last; current++) { 16836495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 16846495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[current]; 16856495Sspeer rdc_grp->map = set->owned.map; 16866495Sspeer rdc_grp->max_rdcs--; 16876495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 16886495Sspeer } 16896495Sspeer } 16906495Sspeer 16916495Sspeer /* 16926495Sspeer * The guest domain will reconfigure the RDC later. 16936495Sspeer * 16946495Sspeer * But in the meantime, we must re-enable the Rx MAC so 16956495Sspeer * that we can start receiving packets again on the 16966495Sspeer * remaining RDCs: 16976495Sspeer * 16986495Sspeer * Enable RxMAC = A.9.2.10 16996495Sspeer */ 17006495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 17016495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17026495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 17036495Sspeer } 17046495Sspeer 17056495Sspeer /* 17066495Sspeer * Initialize the DC-specific FZC control registers. 17076495Sspeer * ----------------------------------------------------- 17086495Sspeer */ 17096495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 17106495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17116495Sspeer "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 17126495Sspeer return (-EIO); 17136495Sspeer } 17146495Sspeer 17156495Sspeer /* 17166495Sspeer * We have to initialize the guest's RDC table, too. 17176495Sspeer * ----------------------------------------------------- 17186495Sspeer */ 17196495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 17206495Sspeer if (rdc_grp->max_rdcs == 0) { 17216495Sspeer rdc_grp->start_rdc = (uint8_t)channel; 17226495Sspeer rdc_grp->def_rdc = (uint8_t)channel; 17236495Sspeer rdc_grp->max_rdcs = 1; 17246495Sspeer } else { 17256495Sspeer rdc_grp->max_rdcs++; 17266495Sspeer } 17276495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 17286495Sspeer 17296495Sspeer if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 17306495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17316495Sspeer "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 17326495Sspeer return (-EIO); 17336495Sspeer } 17346495Sspeer 17356495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 17366495Sspeer 17376495Sspeer return (0); 17386495Sspeer } 17396495Sspeer 17406495Sspeer /* 17416495Sspeer * nxge_hio_dc_share 17426495Sspeer * 17436495Sspeer * Share a DMA channel with a guest domain. 17446495Sspeer * 17456495Sspeer * Arguments: 17466495Sspeer * nxge 17476495Sspeer * vr The VR that <channel> will belong to. 17486495Sspeer * type Tx or Rx. 17496495Sspeer * res_map The resource map used by the caller, which we will 17506495Sspeer * update if successful. 17516495Sspeer * 17526495Sspeer * Notes: 17536495Sspeer * 17546495Sspeer * Context: 17556495Sspeer * Service domain 17566495Sspeer */ 17576495Sspeer int 17586495Sspeer nxge_hio_dc_share( 17596495Sspeer nxge_t *nxge, 17606495Sspeer nxge_hio_vr_t *vr, 17616495Sspeer mac_ring_type_t type) 17626495Sspeer { 17636495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 17646495Sspeer nxge_hw_pt_cfg_t *hardware; 17656495Sspeer nxge_hio_dc_t *dc; 17666495Sspeer int channel, limit; 17676495Sspeer 17686495Sspeer nxge_grp_set_t *set; 17696495Sspeer nxge_grp_t *group; 17706495Sspeer 17716495Sspeer int slot; 17726495Sspeer 17736495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 17746495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 17756495Sspeer 17766495Sspeer /* 17776495Sspeer * In version 1.0, we may only give a VR 2 RDCs or TDCs. 17786495Sspeer * Not only that, but the HV has statically assigned the 17796495Sspeer * channels like so: 17806495Sspeer * VR0: RDC0 & RDC1 17816495Sspeer * VR1: RDC2 & RDC3, etc. 17826495Sspeer * The TDCs are assigned in exactly the same way. 17836495Sspeer * 17846495Sspeer * So, for example 17856495Sspeer * hardware->start_rdc + vr->region * 2; 17866495Sspeer * VR1: hardware->start_rdc + 1 * 2; 17876495Sspeer * VR3: hardware->start_rdc + 3 * 2; 17886495Sspeer * If start_rdc is 0, we end up with 2 or 6. 17896495Sspeer * If start_rdc is 8, we end up with 10 or 14. 17906495Sspeer */ 17916495Sspeer 17926495Sspeer set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 17936495Sspeer hardware = &nxge->pt_config.hw_config; 17946495Sspeer 17956495Sspeer // This code is still NIU-specific (assuming only 2 ports) 17966495Sspeer channel = hardware->start_rdc + (vr->region % 4) * 2; 17976495Sspeer limit = channel + 2; 17986495Sspeer 17996495Sspeer MUTEX_ENTER(&nhd->lock); 18006495Sspeer for (; channel < limit; channel++) { 18016495Sspeer if ((1 << channel) & set->owned.map) { 18026495Sspeer break; 18036495Sspeer } 18046495Sspeer } 18056495Sspeer 18066495Sspeer if (channel == limit) { 18076495Sspeer MUTEX_EXIT(&nhd->lock); 18086495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 18096495Sspeer "nx_hio_dc_share: there are no channels to share")); 18106495Sspeer return (-EIO); 18116495Sspeer } 18126495Sspeer 18136495Sspeer MUTEX_EXIT(&nhd->lock); 18146495Sspeer 18156495Sspeer /* -------------------------------------------------- */ 18166495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 18176495Sspeer nxge_hio_tdc_share(nxge, channel) : 18186495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 18196495Sspeer 18206495Sspeer if (slot < 0) { 18216495Sspeer if (type == MAC_RING_TYPE_RX) { 18226495Sspeer nxge_hio_rdc_unshare(nxge, channel); 18236495Sspeer } else { 18246495Sspeer nxge_hio_tdc_unshare(nxge, channel); 18256495Sspeer } 18266495Sspeer return (slot); 18276495Sspeer } 18286495Sspeer 18296495Sspeer MUTEX_ENTER(&nhd->lock); 18306495Sspeer 18316495Sspeer /* 18326495Sspeer * Tag this channel. 18336495Sspeer * -------------------------------------------------- 18346495Sspeer */ 18356495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 18366495Sspeer 18376495Sspeer dc->vr = vr; 18386495Sspeer dc->channel = (nxge_channel_t)channel; 18396495Sspeer 18406495Sspeer MUTEX_EXIT(&nhd->lock); 18416495Sspeer 18426495Sspeer /* 18436495Sspeer * vr->[t|r]x_group is used by the service domain to 18446495Sspeer * keep track of its shared DMA channels. 18456495Sspeer */ 18466495Sspeer MUTEX_ENTER(&nxge->group_lock); 18476495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 18486495Sspeer 18496495Sspeer dc->group = (vr_handle_t)group; 18506495Sspeer 18516495Sspeer /* Initialize <group>, if necessary */ 18526495Sspeer if (group->count == 0) { 18536495Sspeer group->nxge = nxge; 18546495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 18556495Sspeer VP_BOUND_TX : VP_BOUND_RX; 18566495Sspeer group->sequence = nhd->sequence++; 18576495Sspeer group->active = B_TRUE; 18586495Sspeer } 18596495Sspeer 18606495Sspeer MUTEX_EXIT(&nxge->group_lock); 18616495Sspeer 18626495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 18636495Sspeer "DC share: %cDC %d was assigned to slot %d", 18646495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 18656495Sspeer 18666495Sspeer nxge_grp_dc_append(nxge, group, dc); 18676495Sspeer 18686495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 18696495Sspeer 18706495Sspeer return (0); 18716495Sspeer } 18726495Sspeer 18736495Sspeer /* 18746495Sspeer * nxge_hio_tdc_unshare 18756495Sspeer * 18766495Sspeer * Unshare a TDC. 18776495Sspeer * 18786495Sspeer * Arguments: 18796495Sspeer * nxge 18806495Sspeer * channel The channel to unshare (add again). 18816495Sspeer * 18826495Sspeer * Notes: 18836495Sspeer * 18846495Sspeer * Context: 18856495Sspeer * Service domain 18866495Sspeer */ 18876495Sspeer void 18886495Sspeer nxge_hio_tdc_unshare( 18896495Sspeer nxge_t *nxge, 18906495Sspeer int channel) 18916495Sspeer { 18926495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 18936495Sspeer vr_handle_t handle = (vr_handle_t)set->group[0]; 18946495Sspeer 18956495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 18966495Sspeer 18976495Sspeer NXGE_DC_RESET(set->shared.map, channel); 18986495Sspeer set->shared.count--; 18996495Sspeer 19006495Sspeer if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_TX, channel))) { 19016495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19026495Sspeer "Failed to initialize TxDMA channel %d", channel)); 19036495Sspeer return; 19046495Sspeer } 19056495Sspeer 19066495Sspeer /* Re-add this interrupt. */ 19076495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 19086495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19096495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 19106495Sspeer } 19116495Sspeer 19126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 19136495Sspeer } 19146495Sspeer 19156495Sspeer /* 19166495Sspeer * nxge_hio_rdc_unshare 19176495Sspeer * 19186495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 19196495Sspeer * 19206495Sspeer * Arguments: 19216495Sspeer * nxge 19226495Sspeer * channel The channel to unshare (add again). 19236495Sspeer * 19246495Sspeer * Notes: 19256495Sspeer * 19266495Sspeer * Context: 19276495Sspeer * Service domain 19286495Sspeer */ 19296495Sspeer void 19306495Sspeer nxge_hio_rdc_unshare( 19316495Sspeer nxge_t *nxge, 19326495Sspeer int channel) 19336495Sspeer { 19346495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19356495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 19366495Sspeer 19376495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 19386495Sspeer vr_handle_t handle = (vr_handle_t)set->group[0]; 19396495Sspeer int current, last; 19406495Sspeer 19416495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 19426495Sspeer 19436495Sspeer /* Stop RxMAC = A.9.2.6 */ 19446495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 19456495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19466495Sspeer "Failed to disable RxMAC")); 19476495Sspeer } 19486495Sspeer 19496495Sspeer /* Drain IPP Port = A.9.3.6 */ 19506495Sspeer (void) nxge_ipp_drain(nxge); 19516495Sspeer 19526495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 19536495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 19546495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 19556495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19566495Sspeer "Failed to disable RxDMA channel %d", channel)); 19576495Sspeer } 19586495Sspeer 19596495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19606495Sspeer set->shared.count--; 19616495Sspeer 19626495Sspeer /* 19636495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 19646495Sspeer * 19656495Sspeer * Initialize RxDMA A.9.5.4 19666495Sspeer * Reconfigure RxDMA 19676495Sspeer * Enable RxDMA A.9.5.5 19686495Sspeer */ 19696495Sspeer if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_RX, channel))) { 19706495Sspeer /* Be sure to re-enable the RX MAC. */ 19716495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 19726495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 19736495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 19746495Sspeer } 19756495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19766495Sspeer "Failed to initialize RxDMA channel %d", channel)); 19776495Sspeer return; 19786495Sspeer } 19796495Sspeer 19806495Sspeer /* 19816495Sspeer * We have to reconfigure the RDC table(s) 19826495Sspeer * to which this channel once again belongs. 19836495Sspeer */ 19846495Sspeer current = hardware->def_mac_rxdma_grpid; 19856495Sspeer last = current + hardware->max_rdc_grpids; 19866495Sspeer for (; current < last; current++) { 19876495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 19886495Sspeer nxge_rdc_grp_t *group; 19896495Sspeer group = &nxge->pt_config.rdc_grps[current]; 19906495Sspeer group->map = set->owned.map; 19916495Sspeer group->max_rdcs++; 19926495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 19936495Sspeer } 19946495Sspeer } 19956495Sspeer 19966495Sspeer /* 19976495Sspeer * Enable RxMAC = A.9.2.10 19986495Sspeer */ 19996495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20006495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20016495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 20026495Sspeer return; 20036495Sspeer } 20046495Sspeer 20056495Sspeer /* Re-add this interrupt. */ 20066495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 20076495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20086495Sspeer "nx_hio_rdc_unshare: Failed to add interrupt for " 20096495Sspeer "RxDMA CHANNEL %d", channel)); 20106495Sspeer } 20116495Sspeer 20126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 20136495Sspeer } 20146495Sspeer 20156495Sspeer /* 20166495Sspeer * nxge_hio_dc_unshare 20176495Sspeer * 20186495Sspeer * Unshare (reuse) a DMA channel. 20196495Sspeer * 20206495Sspeer * Arguments: 20216495Sspeer * nxge 20226495Sspeer * vr The VR that <channel> belongs to. 20236495Sspeer * type Tx or Rx. 20246495Sspeer * channel The DMA channel to reuse. 20256495Sspeer * 20266495Sspeer * Notes: 20276495Sspeer * 20286495Sspeer * Context: 20296495Sspeer * Service domain 20306495Sspeer */ 20316495Sspeer void 20326495Sspeer nxge_hio_dc_unshare( 20336495Sspeer nxge_t *nxge, 20346495Sspeer nxge_hio_vr_t *vr, 20356495Sspeer mac_ring_type_t type, 20366495Sspeer int channel) 20376495Sspeer { 20386495Sspeer nxge_grp_t *group; 20396495Sspeer nxge_hio_dc_t *dc; 20406495Sspeer 20416495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 20426495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 20436495Sspeer 20446495Sspeer /* Unlink the channel from its group. */ 20456495Sspeer /* -------------------------------------------------- */ 20466495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 20476602Sspeer NXGE_DC_RESET(group->map, channel); 20486495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 20496495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20506495Sspeer "nx_hio_dc_unshare(%d) failed", channel)); 20516495Sspeer return; 20526495Sspeer } 20536495Sspeer 20546495Sspeer dc->vr = 0; 20556495Sspeer dc->cookie = 0; 20566495Sspeer 20576495Sspeer if (type == MAC_RING_TYPE_RX) { 20586495Sspeer nxge_hio_rdc_unshare(nxge, channel); 20596495Sspeer } else { 20606495Sspeer nxge_hio_tdc_unshare(nxge, channel); 20616495Sspeer } 20626495Sspeer 20636495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 20646495Sspeer } 20656495Sspeer 20666495Sspeer #endif /* if defined(sun4v) */ 2067