16495Sspeer /* 26495Sspeer * CDDL HEADER START 36495Sspeer * 46495Sspeer * The contents of this file are subject to the terms of the 56495Sspeer * Common Development and Distribution License (the "License"). 66495Sspeer * You may not use this file except in compliance with the License. 76495Sspeer * 86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96495Sspeer * or http://www.opensolaris.org/os/licensing. 106495Sspeer * See the License for the specific language governing permissions 116495Sspeer * and limitations under the License. 126495Sspeer * 136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each 146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156495Sspeer * If applicable, add the following below this CDDL HEADER, with the 166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 186495Sspeer * 196495Sspeer * CDDL HEADER END 206495Sspeer */ 216495Sspeer 226495Sspeer /* 236495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 246495Sspeer * Use is subject to license terms. 256495Sspeer */ 266495Sspeer 276495Sspeer #pragma ident "%Z%%M% %I% %E% SMI" 286495Sspeer 296495Sspeer /* 306495Sspeer * nxge_hio.c 316495Sspeer * 326495Sspeer * This file manages the virtualization resources for Neptune 336495Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 346495Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 356495Sspeer * request & use hardware resources from the service domain. 366495Sspeer * 376495Sspeer */ 386495Sspeer 396495Sspeer #include <sys/nxge/nxge_impl.h> 406495Sspeer #include <sys/nxge/nxge_fzc.h> 416495Sspeer #include <sys/nxge/nxge_rxdma.h> 426495Sspeer #include <sys/nxge/nxge_txdma.h> 436495Sspeer #include <sys/nxge/nxge_hio.h> 446495Sspeer 456495Sspeer #define NXGE_HIO_SHARE_MIN_CHANNELS 2 466495Sspeer #define NXGE_HIO_SHARE_MAX_CHANNELS 2 476495Sspeer 486495Sspeer /* 496495Sspeer * External prototypes 506495Sspeer */ 516495Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 526495Sspeer 536495Sspeer /* The following function may be found in nxge_main.c */ 546495Sspeer extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 556495Sspeer 566495Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 576495Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 586495Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 596495Sspeer 606495Sspeer /* 616495Sspeer * Local prototypes 626495Sspeer */ 636495Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 646495Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 656495Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 666495Sspeer 676495Sspeer /* 686495Sspeer * These functions are used by both service & guest domains to 696495Sspeer * decide whether they're running in an LDOMs/XEN environment 706495Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 716495Sspeer */ 726495Sspeer 736495Sspeer /* 746495Sspeer * nxge_get_environs 756495Sspeer * 766495Sspeer * Figure out if we are in a guest domain or not. 776495Sspeer * 786495Sspeer * Arguments: 796495Sspeer * nxge 806495Sspeer * 816495Sspeer * Notes: 826495Sspeer * 836495Sspeer * Context: 846495Sspeer * Any domain 856495Sspeer */ 866495Sspeer void 876495Sspeer nxge_get_environs( 886495Sspeer nxge_t *nxge) 896495Sspeer { 906495Sspeer char *string; 916495Sspeer 926495Sspeer /* 936495Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 946495Sspeer */ 956495Sspeer nxge->environs = SOLARIS_DOMAIN; 966495Sspeer 976495Sspeer /* 986495Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 996495Sspeer */ 1006495Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 1016495Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1026495Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 1036495Sspeer if (strcmp(string, "n2niu") == 0) { 1046495Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 1056495Sspeer /* So we can allocate properly-aligned memory. */ 1066495Sspeer nxge->niu_type = N2_NIU; 1076495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1086495Sspeer "Hybrid IO-capable guest domain")); 1096495Sspeer } 1106495Sspeer ddi_prop_free(string); 1116495Sspeer } 1126495Sspeer } 1136495Sspeer 1146495Sspeer #if !defined(sun4v) 1156495Sspeer 1166495Sspeer /* 1176495Sspeer * nxge_hio_init 1186495Sspeer * 1196495Sspeer * Initialize the HIO module of the NXGE driver. 1206495Sspeer * 1216495Sspeer * Arguments: 1226495Sspeer * nxge 1236495Sspeer * 1246495Sspeer * Notes: 1256495Sspeer * This is the non-hybrid I/O version of this function. 1266495Sspeer * 1276495Sspeer * Context: 1286495Sspeer * Any domain 1296495Sspeer */ 1306495Sspeer int 1316495Sspeer nxge_hio_init( 1326495Sspeer nxge_t *nxge) 1336495Sspeer { 1346495Sspeer nxge_hio_data_t *nhd; 1356495Sspeer 1366495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1376495Sspeer if (nhd == 0) { 1386495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 1396495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 1406495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 1416495Sspeer } 1426495Sspeer 1436495Sspeer nhd->hio.ldoms = B_FALSE; 1446495Sspeer 1456495Sspeer return (NXGE_OK); 1466495Sspeer } 1476495Sspeer 1486495Sspeer #endif 1496495Sspeer 1506495Sspeer void 1516495Sspeer nxge_hio_uninit( 1526495Sspeer nxge_t *nxge) 1536495Sspeer { 1546495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1556495Sspeer 1566495Sspeer ASSERT(nhd != NULL); 1576495Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 1586495Sspeer 1596495Sspeer MUTEX_DESTROY(&nhd->lock); 1606495Sspeer 1616495Sspeer KMEM_FREE(nhd, sizeof (*nhd)); 1626495Sspeer 1636495Sspeer nxge->nxge_hw_p->hio = 0; 1646495Sspeer } 1656495Sspeer 1666495Sspeer /* 1676495Sspeer * nxge_dci_map 1686495Sspeer * 1696495Sspeer * Map a DMA channel index to a channel number. 1706495Sspeer * 1716495Sspeer * Arguments: 1726495Sspeer * instance The instance number of the driver. 1736495Sspeer * type The type of channel this is: Tx or Rx. 1746495Sspeer * index The index to convert to a channel number 1756495Sspeer * 1766495Sspeer * Notes: 1776495Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 1786495Sspeer * 1796495Sspeer * Context: 1806495Sspeer * Any domain 1816495Sspeer */ 1826495Sspeer int 1836495Sspeer nxge_dci_map( 1846495Sspeer nxge_t *nxge, 1856495Sspeer vpc_type_t type, 1866495Sspeer int index) 1876495Sspeer { 1886495Sspeer nxge_grp_set_t *set; 1896495Sspeer int dc; 1906495Sspeer 1916495Sspeer switch (type) { 1926495Sspeer case VP_BOUND_TX: 1936495Sspeer set = &nxge->tx_set; 1946495Sspeer break; 1956495Sspeer case VP_BOUND_RX: 1966495Sspeer set = &nxge->rx_set; 1976495Sspeer break; 1986495Sspeer } 1996495Sspeer 2006495Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 2016495Sspeer if ((1 << dc) & set->owned.map) { 2026495Sspeer if (index == 0) 2036495Sspeer return (dc); 2046495Sspeer else 2056495Sspeer index--; 2066495Sspeer } 2076495Sspeer } 2086495Sspeer 2096495Sspeer return (-1); 2106495Sspeer } 2116495Sspeer 2126495Sspeer /* 2136495Sspeer * --------------------------------------------------------------------- 2146495Sspeer * These are the general-purpose DMA channel group functions. That is, 2156495Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 2166495Sspeer * environment. 2176495Sspeer * 2186495Sspeer * But is also expected that in the future they will be able to manage 2196495Sspeer * Crossbow groups. 2206495Sspeer * --------------------------------------------------------------------- 2216495Sspeer */ 2226495Sspeer 2236495Sspeer /* 2246495Sspeer * nxge_grp_add 2256495Sspeer * 2266495Sspeer * Add a group to an instance of NXGE. 2276495Sspeer * 2286495Sspeer * Arguments: 2296495Sspeer * nxge 2306495Sspeer * type Tx or Rx 2316495Sspeer * 2326495Sspeer * Notes: 2336495Sspeer * 2346495Sspeer * Context: 2356495Sspeer * Any domain 2366495Sspeer */ 2376495Sspeer vr_handle_t 2386495Sspeer nxge_grp_add( 2396495Sspeer nxge_t *nxge, 2406495Sspeer nxge_grp_type_t type) 2416495Sspeer { 2426495Sspeer nxge_grp_set_t *set; 2436495Sspeer nxge_grp_t *group; 2446495Sspeer int i; 2456495Sspeer 2466495Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 2476495Sspeer group->nxge = nxge; 2486495Sspeer 2496495Sspeer MUTEX_ENTER(&nxge->group_lock); 2506495Sspeer switch (type) { 2516495Sspeer case NXGE_TRANSMIT_GROUP: 2526495Sspeer case EXT_TRANSMIT_GROUP: 2536495Sspeer set = &nxge->tx_set; 2546495Sspeer break; 2556495Sspeer default: 2566495Sspeer set = &nxge->rx_set; 2576495Sspeer break; 2586495Sspeer } 2596495Sspeer 2606495Sspeer group->type = type; 2616495Sspeer group->active = B_TRUE; 2626495Sspeer group->sequence = set->sequence++; 2636495Sspeer 2646495Sspeer /* Find an empty slot for this logical group. */ 2656495Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 2666495Sspeer if (set->group[i] == 0) { 2676495Sspeer group->index = i; 2686495Sspeer set->group[i] = group; 2696495Sspeer NXGE_DC_SET(set->lg.map, i); 2706495Sspeer set->lg.count++; 2716495Sspeer break; 2726495Sspeer } 2736495Sspeer } 2746495Sspeer MUTEX_EXIT(&nxge->group_lock); 2756495Sspeer 2766495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 2776495Sspeer "nxge_grp_add: %cgroup = %d.%d", 2786495Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 2796495Sspeer nxge->mac.portnum, group->sequence)); 2806495Sspeer 2816495Sspeer return ((vr_handle_t)group); 2826495Sspeer } 2836495Sspeer 2846495Sspeer void 2856495Sspeer nxge_grp_remove( 2866495Sspeer nxge_t *nxge, 2876495Sspeer vr_handle_t handle) /* The group to remove. */ 2886495Sspeer { 2896495Sspeer nxge_grp_set_t *set; 2906495Sspeer nxge_grp_t *group; 2916495Sspeer vpc_type_t type; 2926495Sspeer 2936495Sspeer group = (nxge_grp_t *)handle; 2946495Sspeer 2956495Sspeer MUTEX_ENTER(&nxge->group_lock); 2966495Sspeer switch (group->type) { 2976495Sspeer case NXGE_TRANSMIT_GROUP: 2986495Sspeer case EXT_TRANSMIT_GROUP: 2996495Sspeer set = &nxge->tx_set; 3006495Sspeer break; 3016495Sspeer default: 3026495Sspeer set = &nxge->rx_set; 3036495Sspeer break; 3046495Sspeer } 3056495Sspeer 3066495Sspeer if (set->group[group->index] != group) { 3076495Sspeer MUTEX_EXIT(&nxge->group_lock); 3086495Sspeer return; 3096495Sspeer } 3106495Sspeer 3116495Sspeer set->group[group->index] = 0; 3126495Sspeer NXGE_DC_RESET(set->lg.map, group->index); 3136495Sspeer set->lg.count--; 3146495Sspeer 3156495Sspeer /* While inside the mutex, deactivate <group>. */ 3166495Sspeer group->active = B_FALSE; 3176495Sspeer 3186495Sspeer MUTEX_EXIT(&nxge->group_lock); 3196495Sspeer 3206495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3216495Sspeer "nxge_grp_remove(%c.%d.%d) called", 3226495Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 3236495Sspeer nxge->mac.portnum, group->sequence)); 3246495Sspeer 3256495Sspeer /* Now, remove any DCs which are still active. */ 3266495Sspeer switch (group->type) { 3276495Sspeer default: 3286495Sspeer type = VP_BOUND_TX; 3296495Sspeer break; 3306495Sspeer case NXGE_RECEIVE_GROUP: 3316495Sspeer case EXT_RECEIVE_GROUP: 3326495Sspeer type = VP_BOUND_RX; 3336495Sspeer } 3346495Sspeer 3356495Sspeer while (group->dc) { 3366495Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 3376495Sspeer } 3386495Sspeer 3396495Sspeer KMEM_FREE(group, sizeof (*group)); 3406495Sspeer } 3416495Sspeer 3426495Sspeer /* 3436495Sspeer * nx_hio_dc_add 3446495Sspeer * 3456495Sspeer * Add a DMA channel to a VR/Group. 3466495Sspeer * 3476495Sspeer * Arguments: 3486495Sspeer * nxge 3496495Sspeer * channel The channel to add. 3506495Sspeer * Notes: 3516495Sspeer * 3526495Sspeer * Context: 3536495Sspeer * Any domain 3546495Sspeer */ 3556495Sspeer /* ARGSUSED */ 3566495Sspeer int 3576495Sspeer nxge_grp_dc_add( 3586495Sspeer nxge_t *nxge, 3596495Sspeer vr_handle_t handle, /* The group to add <channel> to. */ 3606495Sspeer vpc_type_t type, /* Rx or Tx */ 3616495Sspeer int channel) /* A physical/logical channel number */ 3626495Sspeer { 3636495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 3646495Sspeer nxge_hio_dc_t *dc; 3656495Sspeer nxge_grp_set_t *set; 3666495Sspeer nxge_grp_t *group; 3676602Sspeer nxge_status_t status = NXGE_OK; 3686495Sspeer 3696495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 3706495Sspeer 3716495Sspeer if (handle == 0) 3726495Sspeer return (0); 3736495Sspeer 3746495Sspeer switch (type) { 3756495Sspeer default: 3766495Sspeer set = &nxge->tx_set; 3776495Sspeer if (channel > NXGE_MAX_TDCS) { 3786495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 3796495Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 3806495Sspeer return (NXGE_ERROR); 3816495Sspeer } 3826495Sspeer break; 3836495Sspeer case VP_BOUND_RX: 3846495Sspeer set = &nxge->rx_set; 3856495Sspeer if (channel > NXGE_MAX_RDCS) { 3866495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 3876495Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 3886495Sspeer return (NXGE_ERROR); 3896495Sspeer } 3906495Sspeer break; 3916495Sspeer } 3926495Sspeer 3936495Sspeer group = (nxge_grp_t *)handle; 3946495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 3956495Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 3966495Sspeer type == VP_BOUND_TX ? 't' : 'r', 3976495Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 3986495Sspeer 3996495Sspeer MUTEX_ENTER(&nxge->group_lock); 4006495Sspeer if (group->active != B_TRUE) { 4016495Sspeer /* We may be in the process of removing this group. */ 4026495Sspeer MUTEX_EXIT(&nxge->group_lock); 4036495Sspeer return (NXGE_ERROR); 4046495Sspeer } 4056495Sspeer MUTEX_EXIT(&nxge->group_lock); 4066495Sspeer 4076495Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 4086495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4096495Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 4106495Sspeer return (NXGE_ERROR); 4116495Sspeer } 4126495Sspeer 4136495Sspeer MUTEX_ENTER(&nhd->lock); 4146495Sspeer 4156495Sspeer if (dc->group) { 4166495Sspeer MUTEX_EXIT(&nhd->lock); 4176495Sspeer /* This channel is already in use! */ 4186495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4196495Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 4206495Sspeer return (NXGE_ERROR); 4216495Sspeer } 4226495Sspeer 4236495Sspeer dc->next = 0; 4246495Sspeer dc->page = channel; 4256495Sspeer dc->channel = (nxge_channel_t)channel; 4266495Sspeer 4276495Sspeer dc->type = type; 4286495Sspeer if (type == VP_BOUND_RX) { 4296495Sspeer dc->init = nxge_init_rxdma_channel; 4306495Sspeer dc->uninit = nxge_uninit_rxdma_channel; 4316495Sspeer } else { 4326495Sspeer dc->init = nxge_init_txdma_channel; 4336495Sspeer dc->uninit = nxge_uninit_txdma_channel; 4346495Sspeer } 4356495Sspeer 4366495Sspeer dc->group = handle; 4376495Sspeer 4386495Sspeer if (isLDOMguest(nxge)) 4396495Sspeer (void) nxge_hio_ldsv_add(nxge, dc); 4406495Sspeer 4416495Sspeer NXGE_DC_SET(set->owned.map, channel); 4426495Sspeer set->owned.count++; 4436495Sspeer 4446495Sspeer MUTEX_EXIT(&nhd->lock); 4456495Sspeer 4466602Sspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 4476602Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4486602Sspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 449*6603Sspeer return (NXGE_ERROR); 4506602Sspeer } 4516602Sspeer 4526495Sspeer nxge_grp_dc_append(nxge, group, dc); 4536495Sspeer 4546495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 4556495Sspeer 4566602Sspeer return ((int)status); 4576495Sspeer } 4586495Sspeer 4596495Sspeer void 4606495Sspeer nxge_grp_dc_remove( 4616495Sspeer nxge_t *nxge, 4626495Sspeer vpc_type_t type, 4636495Sspeer int channel) 4646495Sspeer { 4656495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 4666495Sspeer nxge_hio_dc_t *dc; 4676495Sspeer nxge_grp_set_t *set; 4686495Sspeer nxge_grp_t *group; 4696495Sspeer 4706495Sspeer dc_uninit_t uninit; 4716495Sspeer 4726495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 4736495Sspeer 4746495Sspeer if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 4756495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 4766495Sspeer "nx_hio_dc_remove: find(%d) failed", channel)); 4776495Sspeer return; 4786495Sspeer } 4796495Sspeer group = (nxge_grp_t *)dc->group; 4806495Sspeer 4816495Sspeer if (isLDOMguest(nxge)) { 4826495Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 4836495Sspeer } 4846495Sspeer 4856495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 4866495Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 4876495Sspeer nxge->mac.portnum, group->sequence, group->count, 4886495Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 4896495Sspeer 4906495Sspeer MUTEX_ENTER(&nhd->lock); 4916495Sspeer 4926602Sspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 4936602Sspeer if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 4946602Sspeer NXGE_DC_RESET(group->map, channel); 4956602Sspeer } 4966602Sspeer 4976495Sspeer /* Remove the DC from its group. */ 4986495Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 4996495Sspeer MUTEX_EXIT(&nhd->lock); 5006495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5016495Sspeer "nx_hio_dc_remove(%d) failed", channel)); 5026495Sspeer return; 5036495Sspeer } 5046495Sspeer 5056495Sspeer uninit = dc->uninit; 5066495Sspeer channel = dc->channel; 5076495Sspeer 5086495Sspeer NXGE_DC_RESET(set->owned.map, channel); 5096495Sspeer set->owned.count--; 5106495Sspeer 5116495Sspeer (void) memset(dc, 0, sizeof (*dc)); 5126495Sspeer 5136495Sspeer MUTEX_EXIT(&nhd->lock); 5146495Sspeer 5156495Sspeer (*uninit)(nxge, channel); 5166495Sspeer 5176495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 5186495Sspeer } 5196495Sspeer 5206495Sspeer nxge_hio_dc_t * 5216495Sspeer nxge_grp_dc_find( 5226495Sspeer nxge_t *nxge, 5236495Sspeer vpc_type_t type, /* Rx or Tx */ 5246495Sspeer int channel) 5256495Sspeer { 5266495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 5276495Sspeer nxge_hio_dc_t *current; 5286495Sspeer 5296495Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 5306495Sspeer 5316495Sspeer if (!isLDOMguest(nxge)) { 5326495Sspeer return (¤t[channel]); 5336495Sspeer } else { 5346495Sspeer /* We're in a guest domain. */ 5356495Sspeer int i, limit = (type == VP_BOUND_TX) ? 5366495Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 5376495Sspeer 5386495Sspeer MUTEX_ENTER(&nhd->lock); 5396495Sspeer for (i = 0; i < limit; i++, current++) { 5406495Sspeer if (current->channel == channel) { 5416495Sspeer if (current->vr && current->vr->nxge == 5426495Sspeer (uintptr_t)nxge) { 5436495Sspeer MUTEX_EXIT(&nhd->lock); 5446495Sspeer return (current); 5456495Sspeer } 5466495Sspeer } 5476495Sspeer } 5486495Sspeer MUTEX_EXIT(&nhd->lock); 5496495Sspeer } 5506495Sspeer 5516495Sspeer return (0); 5526495Sspeer } 5536495Sspeer 5546495Sspeer /* 5556495Sspeer * nxge_grp_dc_append 5566495Sspeer * 5576495Sspeer * Append a DMA channel to a group. 5586495Sspeer * 5596495Sspeer * Arguments: 5606495Sspeer * nxge 5616495Sspeer * group The group to append to 5626495Sspeer * dc The DMA channel to append 5636495Sspeer * 5646495Sspeer * Notes: 5656495Sspeer * 5666495Sspeer * Context: 5676495Sspeer * Any domain 5686495Sspeer */ 5696495Sspeer static 5706495Sspeer void 5716495Sspeer nxge_grp_dc_append( 5726495Sspeer nxge_t *nxge, 5736495Sspeer nxge_grp_t *group, 5746495Sspeer nxge_hio_dc_t *dc) 5756495Sspeer { 5766495Sspeer MUTEX_ENTER(&nxge->group_lock); 5776495Sspeer 5786495Sspeer if (group->dc == 0) { 5796495Sspeer group->dc = dc; 5806495Sspeer } else { 5816495Sspeer nxge_hio_dc_t *current = group->dc; 5826495Sspeer do { 5836495Sspeer if (current->next == 0) { 5846495Sspeer current->next = dc; 5856495Sspeer break; 5866495Sspeer } 5876495Sspeer current = current->next; 5886495Sspeer } while (current); 5896495Sspeer } 5906495Sspeer 5916495Sspeer NXGE_DC_SET(group->map, dc->channel); 5926495Sspeer 5936495Sspeer nxge_grp_dc_map(group); 5946602Sspeer group->count++; 5956495Sspeer 5966495Sspeer MUTEX_EXIT(&nxge->group_lock); 5976495Sspeer } 5986495Sspeer 5996495Sspeer /* 6006495Sspeer * nxge_grp_dc_unlink 6016495Sspeer * 6026495Sspeer * Unlink a DMA channel fromits linked list (group). 6036495Sspeer * 6046495Sspeer * Arguments: 6056495Sspeer * nxge 6066495Sspeer * group The group (linked list) to unlink from 6076495Sspeer * dc The DMA channel to append 6086495Sspeer * 6096495Sspeer * Notes: 6106495Sspeer * 6116495Sspeer * Context: 6126495Sspeer * Any domain 6136495Sspeer */ 6146495Sspeer nxge_hio_dc_t * 6156495Sspeer nxge_grp_dc_unlink( 6166495Sspeer nxge_t *nxge, 6176495Sspeer nxge_grp_t *group, 6186495Sspeer int channel) 6196495Sspeer { 6206495Sspeer nxge_hio_dc_t *current, *previous; 6216495Sspeer 6226495Sspeer MUTEX_ENTER(&nxge->group_lock); 6236495Sspeer 6246495Sspeer if ((current = group->dc) == 0) { 6256495Sspeer MUTEX_EXIT(&nxge->group_lock); 6266495Sspeer return (0); 6276495Sspeer } 6286495Sspeer 6296495Sspeer previous = 0; 6306495Sspeer do { 6316495Sspeer if (current->channel == channel) { 6326495Sspeer if (previous) 6336495Sspeer previous->next = current->next; 6346495Sspeer else 6356495Sspeer group->dc = current->next; 6366495Sspeer break; 6376495Sspeer } 6386495Sspeer previous = current; 6396495Sspeer current = current->next; 6406495Sspeer } while (current); 6416495Sspeer 6426495Sspeer if (current == 0) { 6436495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 6446495Sspeer "DC unlink: DC %d not found", channel)); 6456495Sspeer } else { 6466495Sspeer current->next = 0; 6476495Sspeer current->group = 0; 6486495Sspeer 6496495Sspeer group->count--; 6506495Sspeer } 6516495Sspeer 6526495Sspeer nxge_grp_dc_map(group); 6536495Sspeer 6546495Sspeer MUTEX_EXIT(&nxge->group_lock); 6556495Sspeer 6566495Sspeer return (current); 6576495Sspeer } 6586495Sspeer 6596495Sspeer /* 6606495Sspeer * nxge_grp_dc_map 6616495Sspeer * 6626495Sspeer * Map a linked list to an array of channel numbers. 6636495Sspeer * 6646495Sspeer * Arguments: 6656495Sspeer * nxge 6666495Sspeer * group The group to remap. 6676495Sspeer * 6686495Sspeer * Notes: 6696495Sspeer * It is expected that the caller will hold the correct mutex. 6706495Sspeer * 6716495Sspeer * Context: 6726495Sspeer * Service domain 6736495Sspeer */ 6746495Sspeer void 6756495Sspeer nxge_grp_dc_map( 6766495Sspeer nxge_grp_t *group) 6776495Sspeer { 6786495Sspeer nxge_channel_t *legend; 6796495Sspeer nxge_hio_dc_t *dc; 6806495Sspeer 6816495Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 6826495Sspeer 6836495Sspeer legend = group->legend; 6846495Sspeer dc = group->dc; 6856495Sspeer while (dc) { 6866495Sspeer *legend = dc->channel; 6876495Sspeer legend++; 6886495Sspeer dc = dc->next; 6896495Sspeer } 6906495Sspeer } 6916495Sspeer 6926495Sspeer /* 6936495Sspeer * --------------------------------------------------------------------- 6946495Sspeer * These are HIO debugging functions. 6956495Sspeer * --------------------------------------------------------------------- 6966495Sspeer */ 6976495Sspeer 6986495Sspeer /* 6996495Sspeer * nxge_delay 7006495Sspeer * 7016495Sspeer * Delay <seconds> number of seconds. 7026495Sspeer * 7036495Sspeer * Arguments: 7046495Sspeer * nxge 7056495Sspeer * group The group to append to 7066495Sspeer * dc The DMA channel to append 7076495Sspeer * 7086495Sspeer * Notes: 7096495Sspeer * This is a developer-only function. 7106495Sspeer * 7116495Sspeer * Context: 7126495Sspeer * Any domain 7136495Sspeer */ 7146495Sspeer void 7156495Sspeer nxge_delay( 7166495Sspeer int seconds) 7176495Sspeer { 7186495Sspeer delay(drv_usectohz(seconds * 1000000)); 7196495Sspeer } 7206495Sspeer 7216495Sspeer static dmc_reg_name_t rx_names[] = { 7226495Sspeer { "RXDMA_CFIG1", 0 }, 7236495Sspeer { "RXDMA_CFIG2", 8 }, 7246495Sspeer { "RBR_CFIG_A", 0x10 }, 7256495Sspeer { "RBR_CFIG_B", 0x18 }, 7266495Sspeer { "RBR_KICK", 0x20 }, 7276495Sspeer { "RBR_STAT", 0x28 }, 7286495Sspeer { "RBR_HDH", 0x30 }, 7296495Sspeer { "RBR_HDL", 0x38 }, 7306495Sspeer { "RCRCFIG_A", 0x40 }, 7316495Sspeer { "RCRCFIG_B", 0x48 }, 7326495Sspeer { "RCRSTAT_A", 0x50 }, 7336495Sspeer { "RCRSTAT_B", 0x58 }, 7346495Sspeer { "RCRSTAT_C", 0x60 }, 7356495Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 7366495Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 7376495Sspeer { "RCR_FLSH", 0x78 }, 7386495Sspeer { "RXMISC", 0x90 }, 7396495Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 7406495Sspeer { 0, -1 } 7416495Sspeer }; 7426495Sspeer 7436495Sspeer static dmc_reg_name_t tx_names[] = { 7446495Sspeer { "Tx_RNG_CFIG", 0 }, 7456495Sspeer { "Tx_RNG_HDL", 0x10 }, 7466495Sspeer { "Tx_RNG_KICK", 0x18 }, 7476495Sspeer { "Tx_ENT_MASK", 0x20 }, 7486495Sspeer { "Tx_CS", 0x28 }, 7496495Sspeer { "TxDMA_MBH", 0x30 }, 7506495Sspeer { "TxDMA_MBL", 0x38 }, 7516495Sspeer { "TxDMA_PRE_ST", 0x40 }, 7526495Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 7536495Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 7546495Sspeer { "TDMC_INTR_DBG", 0x60 }, 7556495Sspeer { "Tx_CS_DBG", 0x68 }, 7566495Sspeer { 0, -1 } 7576495Sspeer }; 7586495Sspeer 7596495Sspeer /* 7606495Sspeer * nxge_xx2str 7616495Sspeer * 7626495Sspeer * Translate a register address into a string. 7636495Sspeer * 7646495Sspeer * Arguments: 7656495Sspeer * offset The address of the register to translate. 7666495Sspeer * 7676495Sspeer * Notes: 7686495Sspeer * These are developer-only function. 7696495Sspeer * 7706495Sspeer * Context: 7716495Sspeer * Any domain 7726495Sspeer */ 7736495Sspeer const char * 7746495Sspeer nxge_rx2str( 7756495Sspeer int offset) 7766495Sspeer { 7776495Sspeer dmc_reg_name_t *reg = &rx_names[0]; 7786495Sspeer 7796495Sspeer offset &= DMA_CSR_MASK; 7806495Sspeer 7816495Sspeer while (reg->name) { 7826495Sspeer if (offset == reg->offset) 7836495Sspeer return (reg->name); 7846495Sspeer reg++; 7856495Sspeer } 7866495Sspeer 7876495Sspeer return (0); 7886495Sspeer } 7896495Sspeer 7906495Sspeer const char * 7916495Sspeer nxge_tx2str( 7926495Sspeer int offset) 7936495Sspeer { 7946495Sspeer dmc_reg_name_t *reg = &tx_names[0]; 7956495Sspeer 7966495Sspeer offset &= DMA_CSR_MASK; 7976495Sspeer 7986495Sspeer while (reg->name) { 7996495Sspeer if (offset == reg->offset) 8006495Sspeer return (reg->name); 8016495Sspeer reg++; 8026495Sspeer } 8036495Sspeer 8046495Sspeer return (0); 8056495Sspeer } 8066495Sspeer 8076495Sspeer /* 8086495Sspeer * nxge_ddi_perror 8096495Sspeer * 8106495Sspeer * Map a DDI error number to a string. 8116495Sspeer * 8126495Sspeer * Arguments: 8136495Sspeer * ddi_error The DDI error number to map. 8146495Sspeer * 8156495Sspeer * Notes: 8166495Sspeer * 8176495Sspeer * Context: 8186495Sspeer * Any domain 8196495Sspeer */ 8206495Sspeer const char * 8216495Sspeer nxge_ddi_perror( 8226495Sspeer int ddi_error) 8236495Sspeer { 8246495Sspeer switch (ddi_error) { 8256495Sspeer case DDI_SUCCESS: 8266495Sspeer return ("DDI_SUCCESS"); 8276495Sspeer case DDI_FAILURE: 8286495Sspeer return ("DDI_FAILURE"); 8296495Sspeer case DDI_NOT_WELL_FORMED: 8306495Sspeer return ("DDI_NOT_WELL_FORMED"); 8316495Sspeer case DDI_EAGAIN: 8326495Sspeer return ("DDI_EAGAIN"); 8336495Sspeer case DDI_EINVAL: 8346495Sspeer return ("DDI_EINVAL"); 8356495Sspeer case DDI_ENOTSUP: 8366495Sspeer return ("DDI_ENOTSUP"); 8376495Sspeer case DDI_EPENDING: 8386495Sspeer return ("DDI_EPENDING"); 8396495Sspeer case DDI_ENOMEM: 8406495Sspeer return ("DDI_ENOMEM"); 8416495Sspeer case DDI_EBUSY: 8426495Sspeer return ("DDI_EBUSY"); 8436495Sspeer case DDI_ETRANSPORT: 8446495Sspeer return ("DDI_ETRANSPORT"); 8456495Sspeer case DDI_ECONTEXT: 8466495Sspeer return ("DDI_ECONTEXT"); 8476495Sspeer default: 8486495Sspeer return ("Unknown error"); 8496495Sspeer } 8506495Sspeer } 8516495Sspeer 8526495Sspeer /* 8536495Sspeer * --------------------------------------------------------------------- 8546495Sspeer * These are Sun4v HIO function definitions 8556495Sspeer * --------------------------------------------------------------------- 8566495Sspeer */ 8576495Sspeer 8586495Sspeer #if defined(sun4v) 8596495Sspeer 8606495Sspeer /* 8616495Sspeer * Local prototypes 8626495Sspeer */ 8636495Sspeer static vr_handle_t nxge_hio_vr_share(nxge_t *); 8646495Sspeer 8656495Sspeer static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 8666495Sspeer static void nxge_hio_unshare(vr_handle_t); 8676495Sspeer 8686495Sspeer static int nxge_hio_addres(vr_handle_t, mac_ring_type_t, int); 8696495Sspeer static void nxge_hio_remres(vr_handle_t, mac_ring_type_t, res_map_t); 8706495Sspeer 8716495Sspeer static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 8726495Sspeer static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 8736495Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 8746495Sspeer mac_ring_type_t, int); 8756495Sspeer 8766495Sspeer /* 8776495Sspeer * nxge_hio_init 8786495Sspeer * 8796495Sspeer * Initialize the HIO module of the NXGE driver. 8806495Sspeer * 8816495Sspeer * Arguments: 8826495Sspeer * nxge 8836495Sspeer * 8846495Sspeer * Notes: 8856495Sspeer * 8866495Sspeer * Context: 8876495Sspeer * Any domain 8886495Sspeer */ 8896495Sspeer int 8906495Sspeer nxge_hio_init( 8916495Sspeer nxge_t *nxge) 8926495Sspeer { 8936495Sspeer nxge_hio_data_t *nhd; 8946495Sspeer int i, region; 8956495Sspeer 8966495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 8976495Sspeer if (nhd == 0) { 8986495Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 8996495Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 9006495Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 9016495Sspeer } 9026495Sspeer 9036495Sspeer if (nxge->environs == SOLARIS_DOMAIN) { 9046495Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 9056495Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 9066495Sspeer if (niu_hsvc->hsvc_major == 1 && 9076495Sspeer niu_hsvc->hsvc_minor == 1) 9086495Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 9096495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9106495Sspeer "nxge_hio_init: hypervisor services " 9116495Sspeer "version %d.%d", 9126495Sspeer niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 9136495Sspeer } 9146495Sspeer } 9156495Sspeer 9166495Sspeer if (!isLDOMs(nxge)) { 9176495Sspeer nhd->hio.ldoms = B_FALSE; 9186495Sspeer return (NXGE_OK); 9196495Sspeer } 9206495Sspeer 9216495Sspeer nhd->hio.ldoms = B_TRUE; 9226495Sspeer 9236495Sspeer /* 9246495Sspeer * Fill in what we can. 9256495Sspeer */ 9266495Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 9276495Sspeer nhd->vr[region].region = region; 9286495Sspeer } 9296495Sspeer nhd->available.vrs = NXGE_VR_SR_MAX - 2; 9306495Sspeer 9316495Sspeer /* 9326495Sspeer * Initialize share and ring group structures. 9336495Sspeer */ 9346495Sspeer for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 9356495Sspeer nxge->rx_hio_groups[i].ghandle = NULL; 9366495Sspeer nxge->rx_hio_groups[i].nxgep = nxge; 9376495Sspeer nxge->rx_hio_groups[i].gindex = 0; 9386495Sspeer nxge->rx_hio_groups[i].sindex = 0; 9396495Sspeer } 9406495Sspeer 9416495Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 9426495Sspeer nxge->shares[i].nxgep = nxge; 9436495Sspeer nxge->shares[i].index = 0; 9446495Sspeer nxge->shares[i].vrp = (void *)NULL; 9456495Sspeer nxge->shares[i].tmap = 0; 9466495Sspeer nxge->shares[i].rmap = 0; 9476495Sspeer nxge->shares[i].rxgroup = 0; 9486495Sspeer nxge->shares[i].active = B_FALSE; 9496495Sspeer } 9506495Sspeer 9516495Sspeer /* Fill in the HV HIO function pointers. */ 9526495Sspeer nxge_hio_hv_init(nxge); 9536495Sspeer 9546495Sspeer if (isLDOMservice(nxge)) { 9556495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 9566495Sspeer "Hybrid IO-capable service domain")); 9576495Sspeer return (NXGE_OK); 9586495Sspeer } else { 9596495Sspeer /* 9606495Sspeer * isLDOMguest(nxge) == B_TRUE 9616495Sspeer */ 9626495Sspeer nx_vio_fp_t *vio; 9636495Sspeer nhd->type = NXGE_HIO_TYPE_GUEST; 9646495Sspeer 9656495Sspeer vio = &nhd->hio.vio; 9666495Sspeer vio->__register = (vio_net_resource_reg_t) 9676495Sspeer modgetsymvalue("vio_net_resource_reg", 0); 9686495Sspeer vio->unregister = (vio_net_resource_unreg_t) 9696495Sspeer modgetsymvalue("vio_net_resource_unreg", 0); 9706495Sspeer 9716495Sspeer if (vio->__register == 0 || vio->unregister == 0) { 9726495Sspeer NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 9736495Sspeer return (NXGE_ERROR); 9746495Sspeer } 9756495Sspeer } 9766495Sspeer 9776495Sspeer return (0); 9786495Sspeer } 9796495Sspeer 9806495Sspeer static int 9816495Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 9826495Sspeer { 9836495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 9846495Sspeer p_nxge_t nxge = rxgroup->nxgep; 9856495Sspeer int group = rxgroup->gindex; 9866495Sspeer int rv, sindex; 9876495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 9886495Sspeer 9896495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 9906495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 9916495Sspeer 9926495Sspeer /* 9936495Sspeer * Program the mac address for the group/share. 9946495Sspeer */ 9956495Sspeer if ((rv = nxge_hio_hostinfo_init(nxge, vr, 9966495Sspeer (ether_addr_t *)mac_addr)) != 0) { 9976495Sspeer return (rv); 9986495Sspeer } 9996495Sspeer 10006495Sspeer return (0); 10016495Sspeer } 10026495Sspeer 10036495Sspeer /* ARGSUSED */ 10046495Sspeer static int 10056495Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 10066495Sspeer { 10076495Sspeer nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 10086495Sspeer p_nxge_t nxge = rxgroup->nxgep; 10096495Sspeer int group = rxgroup->gindex; 10106495Sspeer int sindex; 10116495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 10126495Sspeer 10136495Sspeer sindex = nxge->rx_hio_groups[group].sindex; 10146495Sspeer vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 10156495Sspeer 10166495Sspeer /* 10176495Sspeer * Remove the mac address for the group/share. 10186495Sspeer */ 10196495Sspeer nxge_hio_hostinfo_uninit(nxge, vr); 10206495Sspeer 10216495Sspeer return (0); 10226495Sspeer } 10236495Sspeer 10246495Sspeer /* ARGSUSED */ 10256495Sspeer void 10266495Sspeer nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 10276495Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 10286495Sspeer { 10296495Sspeer p_nxge_t nxgep = (p_nxge_t)arg; 10306495Sspeer nxge_rx_ring_group_t *rxgroup; 10316495Sspeer 10326495Sspeer switch (type) { 10336495Sspeer case MAC_RING_TYPE_RX: 10346495Sspeer rxgroup = &nxgep->rx_hio_groups[group]; 10356495Sspeer rxgroup->gindex = group; 10366495Sspeer 10376495Sspeer infop->mrg_driver = (mac_group_driver_t)rxgroup; 10386495Sspeer infop->mrg_start = NULL; 10396495Sspeer infop->mrg_stop = NULL; 10406495Sspeer infop->mrg_addmac = nxge_hio_add_mac; 10416495Sspeer infop->mrg_remmac = nxge_hio_rem_mac; 10426495Sspeer infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 10436495Sspeer break; 10446495Sspeer 10456495Sspeer case MAC_RING_TYPE_TX: 10466495Sspeer break; 10476495Sspeer } 10486495Sspeer } 10496495Sspeer 10506495Sspeer int 10516495Sspeer nxge_hio_share_assign( 10526495Sspeer nxge_t *nxge, 10536495Sspeer uint64_t cookie, 10546495Sspeer res_map_t *tmap, 10556495Sspeer res_map_t *rmap, 10566495Sspeer nxge_hio_vr_t *vr) 10576495Sspeer { 10586495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 10596495Sspeer uint64_t slot, hv_rv; 10606495Sspeer nxge_hio_dc_t *dc; 10616495Sspeer nxhv_vr_fp_t *fp; 10626495Sspeer int i; 10636495Sspeer 10646495Sspeer /* 10656495Sspeer * Ask the Hypervisor to set up the VR for us 10666495Sspeer */ 10676495Sspeer fp = &nhd->hio.vr; 10686495Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 10696495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 10706495Sspeer "nx_hio_share_assign: " 10716495Sspeer "vr->assign() returned %d", hv_rv)); 10726495Sspeer nxge_hio_unshare((vr_handle_t)vr); 10736495Sspeer return (-EIO); 10746495Sspeer } 10756495Sspeer 10766495Sspeer /* 10776495Sspeer * For each shared TDC, ask the HV to find us an empty slot. 10786495Sspeer * ----------------------------------------------------- 10796495Sspeer */ 10806495Sspeer dc = vr->tx_group.dc; 10816495Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 10826495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 10836495Sspeer while (dc) { 10846495Sspeer hv_rv = (*tx->assign) 10856495Sspeer (vr->cookie, dc->channel, &slot); 10866495Sspeer cmn_err(CE_CONT, "tx->assign(%d, %d)", dc->channel, dc->page); 10876495Sspeer if (hv_rv != 0) { 10886495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 10896495Sspeer "nx_hio_share_assign: " 10906495Sspeer "tx->assign(%x, %d) failed: %ld", 10916495Sspeer vr->cookie, dc->channel, hv_rv)); 10926495Sspeer return (-EIO); 10936495Sspeer } 10946495Sspeer 10956495Sspeer dc->cookie = vr->cookie; 10966495Sspeer dc->page = (vp_channel_t)slot; 10976495Sspeer 10986495Sspeer /* Inform the caller about the slot chosen. */ 10996495Sspeer (*tmap) |= 1 << slot; 11006495Sspeer 11016495Sspeer dc = dc->next; 11026495Sspeer } 11036495Sspeer } 11046495Sspeer 11056495Sspeer /* 11066495Sspeer * For each shared RDC, ask the HV to find us an empty slot. 11076495Sspeer * ----------------------------------------------------- 11086495Sspeer */ 11096495Sspeer dc = vr->rx_group.dc; 11106495Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 11116495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11126495Sspeer while (dc) { 11136495Sspeer hv_rv = (*rx->assign) 11146495Sspeer (vr->cookie, dc->channel, &slot); 11156495Sspeer cmn_err(CE_CONT, "rx->assign(%d, %d)", dc->channel, dc->page); 11166495Sspeer if (hv_rv != 0) { 11176495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11186495Sspeer "nx_hio_share_assign: " 11196495Sspeer "rx->assign(%x, %d) failed: %ld", 11206495Sspeer vr->cookie, dc->channel, hv_rv)); 11216495Sspeer return (-EIO); 11226495Sspeer } 11236495Sspeer 11246495Sspeer dc->cookie = vr->cookie; 11256495Sspeer dc->page = (vp_channel_t)slot; 11266495Sspeer 11276495Sspeer /* Inform the caller about the slot chosen. */ 11286495Sspeer (*rmap) |= 1 << slot; 11296495Sspeer 11306495Sspeer dc = dc->next; 11316495Sspeer } 11326495Sspeer } 11336495Sspeer 11346495Sspeer cmn_err(CE_CONT, "tmap %lx, rmap %lx", *tmap, *rmap); 11356495Sspeer return (0); 11366495Sspeer } 11376495Sspeer 11386495Sspeer int 11396495Sspeer nxge_hio_share_unassign( 11406495Sspeer nxge_hio_vr_t *vr) 11416495Sspeer { 11426495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 11436495Sspeer nxge_hio_data_t *nhd; 11446495Sspeer nxge_hio_dc_t *dc; 11456495Sspeer nxhv_vr_fp_t *fp; 11466495Sspeer uint64_t hv_rv; 11476495Sspeer 11486495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 11496495Sspeer 11506495Sspeer dc = vr->tx_group.dc; 11516495Sspeer while (dc) { 11526495Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 11536495Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 11546495Sspeer if (hv_rv != 0) { 11556495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11566495Sspeer "nx_hio_dc_unshare: " 11576495Sspeer "tx->unassign(%x, %d) failed: %ld", 11586495Sspeer vr->cookie, dc->page, hv_rv)); 11596495Sspeer } 11606495Sspeer dc = dc->next; 11616495Sspeer } 11626495Sspeer 11636495Sspeer dc = vr->rx_group.dc; 11646495Sspeer while (dc) { 11656495Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 11666495Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 11676495Sspeer if (hv_rv != 0) { 11686495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 11696495Sspeer "nx_hio_dc_unshare: " 11706495Sspeer "rx->unassign(%x, %d) failed: %ld", 11716495Sspeer vr->cookie, dc->page, hv_rv)); 11726495Sspeer } 11736495Sspeer dc = dc->next; 11746495Sspeer } 11756495Sspeer 11766495Sspeer fp = &nhd->hio.vr; 11776495Sspeer if (fp->unassign) { 11786495Sspeer hv_rv = (*fp->unassign)(vr->cookie); 11796495Sspeer if (hv_rv != 0) { 11806495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 11816495Sspeer "vr->assign(%x) failed: %ld", 11826495Sspeer vr->cookie, hv_rv)); 11836495Sspeer } 11846495Sspeer } 11856495Sspeer 11866495Sspeer return (0); 11876495Sspeer } 11886495Sspeer 11896495Sspeer int 11906495Sspeer nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 11916495Sspeer mac_share_handle_t *shandle) 11926495Sspeer { 11936495Sspeer p_nxge_t nxge = (p_nxge_t)arg; 11946495Sspeer nxge_rx_ring_group_t *rxgroup; 11956495Sspeer nxge_share_handle_t *shp; 11966495Sspeer 11976495Sspeer vr_handle_t shared; /* The VR being shared */ 11986495Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 11996495Sspeer uint64_t rmap, tmap; 12006495Sspeer int rv; 12016495Sspeer 12026495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 12036495Sspeer 12046495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 12056495Sspeer 12066495Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 12076495Sspeer nhd->hio.rx.assign == 0) { 12086495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 12096495Sspeer return (EIO); 12106495Sspeer } 12116495Sspeer 12126495Sspeer /* 12136495Sspeer * Get a VR. 12146495Sspeer */ 12156495Sspeer if ((shared = nxge_hio_vr_share(nxge)) == 0) 12166495Sspeer return (EAGAIN); 12176495Sspeer vr = (nxge_hio_vr_t *)shared; 12186495Sspeer 12196495Sspeer /* 12206495Sspeer * Get an RDC group for us to use. 12216495Sspeer */ 12226495Sspeer if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 12236495Sspeer nxge_hio_unshare(shared); 12246495Sspeer return (EBUSY); 12256495Sspeer } 12266495Sspeer 12276495Sspeer /* 12286495Sspeer * Add resources to the share. 12296495Sspeer */ 12306495Sspeer tmap = 0; 12316495Sspeer rv = nxge_hio_addres(shared, MAC_RING_TYPE_TX, 12326495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12336495Sspeer if (rv != 0) { 12346495Sspeer nxge_hio_unshare(shared); 12356495Sspeer return (rv); 12366495Sspeer } 12376495Sspeer 12386495Sspeer rmap = 0; 12396495Sspeer rv = nxge_hio_addres(shared, MAC_RING_TYPE_RX, 12406495Sspeer NXGE_HIO_SHARE_MAX_CHANNELS); 12416495Sspeer if (rv != 0) { 12426495Sspeer nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 12436495Sspeer nxge_hio_unshare(shared); 12446495Sspeer return (rv); 12456495Sspeer } 12466495Sspeer 12476495Sspeer if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 12486495Sspeer nxge_hio_remres(shared, MAC_RING_TYPE_RX, tmap); 12496495Sspeer nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 12506495Sspeer nxge_hio_unshare(shared); 12516495Sspeer return (rv); 12526495Sspeer } 12536495Sspeer 12546495Sspeer rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 12556495Sspeer rxgroup->gindex = vr->rdc_tbl; 12566495Sspeer rxgroup->sindex = vr->region; 12576495Sspeer 12586495Sspeer shp = &nxge->shares[vr->region]; 12596495Sspeer shp->index = vr->region; 12606495Sspeer shp->vrp = (void *)vr; 12616495Sspeer shp->tmap = tmap; 12626495Sspeer shp->rmap = rmap; 12636495Sspeer shp->rxgroup = vr->rdc_tbl; 12646495Sspeer shp->active = B_TRUE; 12656495Sspeer 12666495Sspeer /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 12676495Sspeer *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 12686495Sspeer 12696495Sspeer *shandle = (mac_share_handle_t)shp; 12706495Sspeer 12716495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 12726495Sspeer return (0); 12736495Sspeer } 12746495Sspeer 12756495Sspeer void 12766495Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 12776495Sspeer { 12786495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 12796495Sspeer 12806495Sspeer /* 12816495Sspeer * First, unassign the VR (take it back), 12826495Sspeer * so we can enable interrupts again. 12836495Sspeer */ 12846498Sspeer (void) nxge_hio_share_unassign(shp->vrp); 12856495Sspeer 12866495Sspeer /* 12876495Sspeer * Free Ring Resources for TX and RX 12886495Sspeer */ 12896495Sspeer nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 12906495Sspeer nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 12916495Sspeer 12926495Sspeer /* 12936495Sspeer * Free VR resource. 12946495Sspeer */ 12956495Sspeer nxge_hio_unshare((vr_handle_t)shp->vrp); 12966495Sspeer 12976495Sspeer /* 12986495Sspeer * Clear internal handle state. 12996495Sspeer */ 13006495Sspeer shp->index = 0; 13016495Sspeer shp->vrp = (void *)NULL; 13026495Sspeer shp->tmap = 0; 13036495Sspeer shp->rmap = 0; 13046495Sspeer shp->rxgroup = 0; 13056495Sspeer shp->active = B_FALSE; 13066495Sspeer } 13076495Sspeer 13086495Sspeer void 13096495Sspeer nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 13106495Sspeer uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 13116495Sspeer { 13126495Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 13136495Sspeer 13146495Sspeer switch (type) { 13156495Sspeer case MAC_RING_TYPE_RX: 13166495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13176495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13186495Sspeer *rmap = shp->rmap; 13196495Sspeer *gnum = shp->rxgroup; 13206495Sspeer break; 13216495Sspeer 13226495Sspeer case MAC_RING_TYPE_TX: 13236495Sspeer *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 13246495Sspeer *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 13256495Sspeer *rmap = shp->tmap; 13266495Sspeer *gnum = 0; 13276495Sspeer break; 13286495Sspeer } 13296495Sspeer } 13306495Sspeer 13316495Sspeer /* 13326495Sspeer * nxge_hio_vr_share 13336495Sspeer * 13346495Sspeer * Find an unused Virtualization Region (VR). 13356495Sspeer * 13366495Sspeer * Arguments: 13376495Sspeer * nxge 13386495Sspeer * 13396495Sspeer * Notes: 13406495Sspeer * 13416495Sspeer * Context: 13426495Sspeer * Service domain 13436495Sspeer */ 13446495Sspeer vr_handle_t 13456495Sspeer nxge_hio_vr_share( 13466495Sspeer nxge_t *nxge) 13476495Sspeer { 13486495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 13496495Sspeer nxge_hio_vr_t *vr; 13506495Sspeer 13516495Sspeer int first, limit, region; 13526495Sspeer 13536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 13546495Sspeer 13556495Sspeer MUTEX_ENTER(&nhd->lock); 13566495Sspeer 13576495Sspeer if (nhd->available.vrs == 0) { 13586495Sspeer MUTEX_EXIT(&nhd->lock); 13596495Sspeer return (0); 13606495Sspeer } 13616495Sspeer 13626495Sspeer /* Find an empty virtual region (VR). */ 13636495Sspeer if (nxge->function_num == 0) { 13646495Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 13656495Sspeer first = FUNC0_VIR1; 13666495Sspeer limit = FUNC2_VIR0; 13676495Sspeer } else if (nxge->function_num == 1) { 13686495Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 13696495Sspeer first = FUNC2_VIR1; 13706495Sspeer limit = FUNC_VIR_MAX; 13716495Sspeer } else { 13726495Sspeer cmn_err(CE_WARN, 13736495Sspeer "Shares not supported on function(%d) at this time.\n", 13746495Sspeer nxge->function_num); 13756495Sspeer } 13766495Sspeer 13776495Sspeer for (region = first; region < limit; region++) { 13786495Sspeer if (nhd->vr[region].nxge == 0) 13796495Sspeer break; 13806495Sspeer } 13816495Sspeer 13826495Sspeer if (region == limit) { 13836495Sspeer MUTEX_EXIT(&nhd->lock); 13846495Sspeer return (0); 13856495Sspeer } 13866495Sspeer 13876495Sspeer vr = &nhd->vr[region]; 13886495Sspeer vr->nxge = (uintptr_t)nxge; 13896495Sspeer vr->region = (uintptr_t)region; 13906495Sspeer 13916495Sspeer nhd->available.vrs--; 13926495Sspeer 13936495Sspeer MUTEX_EXIT(&nhd->lock); 13946495Sspeer 13956495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 13966495Sspeer 13976495Sspeer return ((vr_handle_t)vr); 13986495Sspeer } 13996495Sspeer 14006495Sspeer void 14016495Sspeer nxge_hio_unshare( 14026495Sspeer vr_handle_t shared) 14036495Sspeer { 14046495Sspeer nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 14056495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14066495Sspeer nxge_hio_data_t *nhd; 14076495Sspeer 14086495Sspeer vr_region_t region; 14096495Sspeer 14106495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 14116495Sspeer 14126495Sspeer if (!nxge) { 14136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 14146495Sspeer "vr->nxge is NULL")); 14156495Sspeer return; 14166495Sspeer } 14176495Sspeer 14186495Sspeer /* 14196495Sspeer * This function is no longer called, but I will keep it 14206495Sspeer * here in case we want to revisit this topic in the future. 14216495Sspeer * 14226495Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 14236495Sspeer */ 14246495Sspeer (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 14256495Sspeer 14266495Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 14276495Sspeer 14286495Sspeer MUTEX_ENTER(&nhd->lock); 14296495Sspeer 14306495Sspeer region = vr->region; 14316495Sspeer (void) memset(vr, 0, sizeof (*vr)); 14326495Sspeer vr->region = region; 14336495Sspeer 14346495Sspeer nhd->available.vrs++; 14356495Sspeer 14366495Sspeer MUTEX_EXIT(&nhd->lock); 14376495Sspeer 14386495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 14396495Sspeer } 14406495Sspeer 14416495Sspeer int 14426495Sspeer nxge_hio_addres( 14436495Sspeer vr_handle_t shared, 14446495Sspeer mac_ring_type_t type, 14456495Sspeer int count) 14466495Sspeer { 14476495Sspeer nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 14486495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14496495Sspeer int i; 14506495Sspeer 14516495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 14526495Sspeer 14536495Sspeer if (!nxge) 14546495Sspeer return (EINVAL); 14556495Sspeer 14566495Sspeer for (i = 0; i < count; i++) { 14576495Sspeer int rv; 14586495Sspeer if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 14596495Sspeer if (i == 0) /* Couldn't get even one DC. */ 14606495Sspeer return (-rv); 14616495Sspeer else 14626495Sspeer break; 14636495Sspeer } 14646495Sspeer } 14656495Sspeer 14666495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 14676495Sspeer 14686495Sspeer return (0); 14696495Sspeer } 14706495Sspeer 14716495Sspeer /* ARGSUSED */ 14726495Sspeer void 14736495Sspeer nxge_hio_remres( 14746495Sspeer vr_handle_t shared, 14756495Sspeer mac_ring_type_t type, 14766495Sspeer res_map_t res_map) 14776495Sspeer { 14786495Sspeer nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 14796495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 14806495Sspeer nxge_grp_t *group; 14816495Sspeer 14826495Sspeer if (!nxge) { 14836495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 14846495Sspeer "vr->nxge is NULL")); 14856495Sspeer return; 14866495Sspeer } 14876495Sspeer 14886495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 14896495Sspeer 14906495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 14916495Sspeer while (group->dc) { 14926495Sspeer nxge_hio_dc_t *dc = group->dc; 14936495Sspeer NXGE_DC_RESET(res_map, dc->page); 14946495Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 14956495Sspeer } 14966495Sspeer 14976495Sspeer if (res_map) { 14986495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 14996495Sspeer "res_map %lx", res_map)); 15006495Sspeer } 15016495Sspeer 15026495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 15036495Sspeer } 15046495Sspeer 15056495Sspeer /* 15066495Sspeer * nxge_hio_tdc_share 15076495Sspeer * 15086495Sspeer * Share an unused TDC channel. 15096495Sspeer * 15106495Sspeer * Arguments: 15116495Sspeer * nxge 15126495Sspeer * 15136495Sspeer * Notes: 15146495Sspeer * 15156495Sspeer * A.7.3 Reconfigure Tx DMA channel 15166495Sspeer * Disable TxDMA A.9.6.10 15176495Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 15186495Sspeer * 15196495Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 15206495Sspeer * 15216495Sspeer * Soft Reset TxDMA A.9.6.2 15226495Sspeer * 15236495Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 15246495Sspeer * guest domain: 15256495Sspeer * 15266495Sspeer * Re-initialize TxDMA A.9.6.8 15276495Sspeer * Reconfigure TxDMA 15286495Sspeer * Enable TxDMA A.9.6.9 15296495Sspeer * 15306495Sspeer * Context: 15316495Sspeer * Service domain 15326495Sspeer */ 15336495Sspeer int 15346495Sspeer nxge_hio_tdc_share( 15356495Sspeer nxge_t *nxge, 15366495Sspeer int channel) 15376495Sspeer { 15386495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 15396495Sspeer tx_ring_t *ring; 15406495Sspeer 15416495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 15426495Sspeer 15436495Sspeer /* 15446495Sspeer * Wait until this channel is idle. 15456495Sspeer */ 15466495Sspeer ring = nxge->tx_rings->rings[channel]; 15476495Sspeer MUTEX_ENTER(&ring->lock); 15486495Sspeer switch (ring->tx_ring_state) { 15496495Sspeer int count; 15506495Sspeer case TX_RING_STATE_OFFLINE: 15516495Sspeer break; 15526495Sspeer case TX_RING_STATE_IDLE: 15536495Sspeer ring->tx_ring_state = TX_RING_STATE_OFFLINE; 15546495Sspeer break; 15556495Sspeer case TX_RING_STATE_BUSY: 15566495Sspeer /* 30 seconds */ 15576495Sspeer for (count = 30 * 1000; count; count--) { 15586495Sspeer MUTEX_EXIT(&ring->lock); 15596495Sspeer drv_usecwait(1000); /* 1 millisecond */ 15606495Sspeer MUTEX_ENTER(&ring->lock); 15616495Sspeer if (ring->tx_ring_state == TX_RING_STATE_IDLE) { 15626495Sspeer ring->tx_ring_state = TX_RING_STATE_OFFLINE; 15636495Sspeer break; 15646495Sspeer } 15656495Sspeer } 15666495Sspeer if (count == 0) { 15676495Sspeer MUTEX_EXIT(&ring->lock); 15686495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 15696495Sspeer "Tx ring %d was always BUSY", channel)); 15706495Sspeer return (-EIO); 15716495Sspeer } 15726495Sspeer break; 15736602Sspeer default: 15746602Sspeer MUTEX_EXIT(&ring->lock); 15756602Sspeer return (-EIO); 15766495Sspeer } 15776602Sspeer MUTEX_EXIT(&ring->lock); 15786495Sspeer 15796495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 15806495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 15816495Sspeer "Failed to remove interrupt for TxDMA channel %d", 15826495Sspeer channel)); 15836495Sspeer return (NXGE_ERROR); 15846495Sspeer } 15856495Sspeer 15866495Sspeer /* Disable TxDMA A.9.6.10 */ 15876495Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 15886495Sspeer 15896495Sspeer /* The SD is sharing this channel. */ 15906495Sspeer NXGE_DC_SET(set->shared.map, channel); 15916495Sspeer set->shared.count++; 15926495Sspeer 15936602Sspeer /* Soft Reset TxDMA A.9.6.2 */ 15946602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 15956602Sspeer 15966495Sspeer /* 15976495Sspeer * Initialize the DC-specific FZC control registers. 15986495Sspeer * ----------------------------------------------------- 15996495Sspeer */ 16006495Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 16016495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 16026495Sspeer "nx_hio_dc_share: FZC TDC failed: %d", channel)); 16036495Sspeer return (-EIO); 16046495Sspeer } 16056495Sspeer 16066495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 16076495Sspeer 16086495Sspeer return (0); 16096495Sspeer } 16106495Sspeer 16116495Sspeer /* 16126495Sspeer * nxge_hio_rdc_share 16136495Sspeer * 16146495Sspeer * Share an unused RDC channel. 16156495Sspeer * 16166495Sspeer * Arguments: 16176495Sspeer * nxge 16186495Sspeer * 16196495Sspeer * Notes: 16206495Sspeer * 16216495Sspeer * This is the latest version of the procedure to 16226495Sspeer * Reconfigure an Rx DMA channel: 16236495Sspeer * 16246495Sspeer * A.6.3 Reconfigure Rx DMA channel 16256495Sspeer * Stop RxMAC A.9.2.6 16266495Sspeer * Drain IPP Port A.9.3.6 16276495Sspeer * Stop and reset RxDMA A.9.5.3 16286495Sspeer * 16296495Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 16306495Sspeer * guest domain: 16316495Sspeer * 16326495Sspeer * Initialize RxDMA A.9.5.4 16336495Sspeer * Reconfigure RxDMA 16346495Sspeer * Enable RxDMA A.9.5.5 16356495Sspeer * 16366495Sspeer * We will do this here, since the RDC is a canalis non grata: 16376495Sspeer * Enable RxMAC A.9.2.10 16386495Sspeer * 16396495Sspeer * Context: 16406495Sspeer * Service domain 16416495Sspeer */ 16426495Sspeer int 16436495Sspeer nxge_hio_rdc_share( 16446495Sspeer nxge_t *nxge, 16456495Sspeer nxge_hio_vr_t *vr, 16466495Sspeer int channel) 16476495Sspeer { 16486495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 16496495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 16506495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 16516495Sspeer nxge_rdc_grp_t *rdc_grp; 16526495Sspeer 16536495Sspeer int current, last; 16546495Sspeer 16556495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 16566495Sspeer 16576495Sspeer /* Disable interrupts. */ 16586495Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 16596495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 16606495Sspeer "Failed to remove interrupt for RxDMA channel %d", 16616495Sspeer channel)); 16626495Sspeer return (NXGE_ERROR); 16636495Sspeer } 16646495Sspeer 16656495Sspeer /* Stop RxMAC = A.9.2.6 */ 16666495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 16676495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 16686495Sspeer "Failed to disable RxMAC")); 16696495Sspeer } 16706495Sspeer 16716495Sspeer /* Drain IPP Port = A.9.3.6 */ 16726495Sspeer (void) nxge_ipp_drain(nxge); 16736495Sspeer 16746495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 16756495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 16766495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 16776495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 16786495Sspeer "Failed to disable RxDMA channel %d", channel)); 16796495Sspeer } 16806495Sspeer 16816495Sspeer /* The SD is sharing this channel. */ 16826495Sspeer NXGE_DC_SET(set->shared.map, channel); 16836495Sspeer set->shared.count++; 16846495Sspeer 16856602Sspeer // Assert RST: RXDMA_CFIG1[30] = 1 16866602Sspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 16876602Sspeer 16886495Sspeer /* 16896495Sspeer * We have to reconfigure the RDC table(s) 16906495Sspeer * to which this channel belongs. 16916495Sspeer */ 16926495Sspeer current = hardware->def_mac_rxdma_grpid; 16936495Sspeer last = current + hardware->max_rdc_grpids; 16946495Sspeer for (; current < last; current++) { 16956495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 16966495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[current]; 16976495Sspeer rdc_grp->map = set->owned.map; 16986495Sspeer rdc_grp->max_rdcs--; 16996495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 17006495Sspeer } 17016495Sspeer } 17026495Sspeer 17036495Sspeer /* 17046495Sspeer * The guest domain will reconfigure the RDC later. 17056495Sspeer * 17066495Sspeer * But in the meantime, we must re-enable the Rx MAC so 17076495Sspeer * that we can start receiving packets again on the 17086495Sspeer * remaining RDCs: 17096495Sspeer * 17106495Sspeer * Enable RxMAC = A.9.2.10 17116495Sspeer */ 17126495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 17136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17146495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 17156495Sspeer } 17166495Sspeer 17176495Sspeer /* 17186495Sspeer * Initialize the DC-specific FZC control registers. 17196495Sspeer * ----------------------------------------------------- 17206495Sspeer */ 17216495Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 17226495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17236495Sspeer "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 17246495Sspeer return (-EIO); 17256495Sspeer } 17266495Sspeer 17276495Sspeer /* 17286495Sspeer * We have to initialize the guest's RDC table, too. 17296495Sspeer * ----------------------------------------------------- 17306495Sspeer */ 17316495Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 17326495Sspeer if (rdc_grp->max_rdcs == 0) { 17336495Sspeer rdc_grp->start_rdc = (uint8_t)channel; 17346495Sspeer rdc_grp->def_rdc = (uint8_t)channel; 17356495Sspeer rdc_grp->max_rdcs = 1; 17366495Sspeer } else { 17376495Sspeer rdc_grp->max_rdcs++; 17386495Sspeer } 17396495Sspeer NXGE_DC_SET(rdc_grp->map, channel); 17406495Sspeer 17416495Sspeer if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 17426495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 17436495Sspeer "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 17446495Sspeer return (-EIO); 17456495Sspeer } 17466495Sspeer 17476495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 17486495Sspeer 17496495Sspeer return (0); 17506495Sspeer } 17516495Sspeer 17526495Sspeer /* 17536495Sspeer * nxge_hio_dc_share 17546495Sspeer * 17556495Sspeer * Share a DMA channel with a guest domain. 17566495Sspeer * 17576495Sspeer * Arguments: 17586495Sspeer * nxge 17596495Sspeer * vr The VR that <channel> will belong to. 17606495Sspeer * type Tx or Rx. 17616495Sspeer * res_map The resource map used by the caller, which we will 17626495Sspeer * update if successful. 17636495Sspeer * 17646495Sspeer * Notes: 17656495Sspeer * 17666495Sspeer * Context: 17676495Sspeer * Service domain 17686495Sspeer */ 17696495Sspeer int 17706495Sspeer nxge_hio_dc_share( 17716495Sspeer nxge_t *nxge, 17726495Sspeer nxge_hio_vr_t *vr, 17736495Sspeer mac_ring_type_t type) 17746495Sspeer { 17756495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 17766495Sspeer nxge_hw_pt_cfg_t *hardware; 17776495Sspeer nxge_hio_dc_t *dc; 17786495Sspeer int channel, limit; 17796495Sspeer 17806495Sspeer nxge_grp_set_t *set; 17816495Sspeer nxge_grp_t *group; 17826495Sspeer 17836495Sspeer int slot; 17846495Sspeer 17856495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 17866495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 17876495Sspeer 17886495Sspeer /* 17896495Sspeer * In version 1.0, we may only give a VR 2 RDCs or TDCs. 17906495Sspeer * Not only that, but the HV has statically assigned the 17916495Sspeer * channels like so: 17926495Sspeer * VR0: RDC0 & RDC1 17936495Sspeer * VR1: RDC2 & RDC3, etc. 17946495Sspeer * The TDCs are assigned in exactly the same way. 17956495Sspeer * 17966495Sspeer * So, for example 17976495Sspeer * hardware->start_rdc + vr->region * 2; 17986495Sspeer * VR1: hardware->start_rdc + 1 * 2; 17996495Sspeer * VR3: hardware->start_rdc + 3 * 2; 18006495Sspeer * If start_rdc is 0, we end up with 2 or 6. 18016495Sspeer * If start_rdc is 8, we end up with 10 or 14. 18026495Sspeer */ 18036495Sspeer 18046495Sspeer set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 18056495Sspeer hardware = &nxge->pt_config.hw_config; 18066495Sspeer 18076495Sspeer // This code is still NIU-specific (assuming only 2 ports) 18086495Sspeer channel = hardware->start_rdc + (vr->region % 4) * 2; 18096495Sspeer limit = channel + 2; 18106495Sspeer 18116495Sspeer MUTEX_ENTER(&nhd->lock); 18126495Sspeer for (; channel < limit; channel++) { 18136495Sspeer if ((1 << channel) & set->owned.map) { 18146495Sspeer break; 18156495Sspeer } 18166495Sspeer } 18176495Sspeer 18186495Sspeer if (channel == limit) { 18196495Sspeer MUTEX_EXIT(&nhd->lock); 18206495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 18216495Sspeer "nx_hio_dc_share: there are no channels to share")); 18226495Sspeer return (-EIO); 18236495Sspeer } 18246495Sspeer 18256495Sspeer MUTEX_EXIT(&nhd->lock); 18266495Sspeer 18276495Sspeer /* -------------------------------------------------- */ 18286495Sspeer slot = (type == MAC_RING_TYPE_TX) ? 18296495Sspeer nxge_hio_tdc_share(nxge, channel) : 18306495Sspeer nxge_hio_rdc_share(nxge, vr, channel); 18316495Sspeer 18326495Sspeer if (slot < 0) { 18336495Sspeer if (type == MAC_RING_TYPE_RX) { 18346495Sspeer nxge_hio_rdc_unshare(nxge, channel); 18356495Sspeer } else { 18366495Sspeer nxge_hio_tdc_unshare(nxge, channel); 18376495Sspeer } 18386495Sspeer return (slot); 18396495Sspeer } 18406495Sspeer 18416495Sspeer MUTEX_ENTER(&nhd->lock); 18426495Sspeer 18436495Sspeer /* 18446495Sspeer * Tag this channel. 18456495Sspeer * -------------------------------------------------- 18466495Sspeer */ 18476495Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 18486495Sspeer 18496495Sspeer dc->vr = vr; 18506495Sspeer dc->channel = (nxge_channel_t)channel; 18516495Sspeer 18526495Sspeer MUTEX_EXIT(&nhd->lock); 18536495Sspeer 18546495Sspeer /* 18556495Sspeer * vr->[t|r]x_group is used by the service domain to 18566495Sspeer * keep track of its shared DMA channels. 18576495Sspeer */ 18586495Sspeer MUTEX_ENTER(&nxge->group_lock); 18596495Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 18606495Sspeer 18616495Sspeer dc->group = (vr_handle_t)group; 18626495Sspeer 18636495Sspeer /* Initialize <group>, if necessary */ 18646495Sspeer if (group->count == 0) { 18656495Sspeer group->nxge = nxge; 18666495Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 18676495Sspeer VP_BOUND_TX : VP_BOUND_RX; 18686495Sspeer group->sequence = nhd->sequence++; 18696495Sspeer group->active = B_TRUE; 18706495Sspeer } 18716495Sspeer 18726495Sspeer MUTEX_EXIT(&nxge->group_lock); 18736495Sspeer 18746495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 18756495Sspeer "DC share: %cDC %d was assigned to slot %d", 18766495Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 18776495Sspeer 18786495Sspeer nxge_grp_dc_append(nxge, group, dc); 18796495Sspeer 18806495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 18816495Sspeer 18826495Sspeer return (0); 18836495Sspeer } 18846495Sspeer 18856495Sspeer /* 18866495Sspeer * nxge_hio_tdc_unshare 18876495Sspeer * 18886495Sspeer * Unshare a TDC. 18896495Sspeer * 18906495Sspeer * Arguments: 18916495Sspeer * nxge 18926495Sspeer * channel The channel to unshare (add again). 18936495Sspeer * 18946495Sspeer * Notes: 18956495Sspeer * 18966495Sspeer * Context: 18976495Sspeer * Service domain 18986495Sspeer */ 18996495Sspeer void 19006495Sspeer nxge_hio_tdc_unshare( 19016495Sspeer nxge_t *nxge, 19026495Sspeer int channel) 19036495Sspeer { 19046495Sspeer nxge_grp_set_t *set = &nxge->tx_set; 19056495Sspeer vr_handle_t handle = (vr_handle_t)set->group[0]; 19066495Sspeer 19076495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 19086495Sspeer 19096495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19106495Sspeer set->shared.count--; 19116495Sspeer 19126495Sspeer if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_TX, channel))) { 19136495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19146495Sspeer "Failed to initialize TxDMA channel %d", channel)); 19156495Sspeer return; 19166495Sspeer } 19176495Sspeer 19186495Sspeer /* Re-add this interrupt. */ 19196495Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 19206495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 19216495Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 19226495Sspeer } 19236495Sspeer 19246495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 19256495Sspeer } 19266495Sspeer 19276495Sspeer /* 19286495Sspeer * nxge_hio_rdc_unshare 19296495Sspeer * 19306495Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 19316495Sspeer * 19326495Sspeer * Arguments: 19336495Sspeer * nxge 19346495Sspeer * channel The channel to unshare (add again). 19356495Sspeer * 19366495Sspeer * Notes: 19376495Sspeer * 19386495Sspeer * Context: 19396495Sspeer * Service domain 19406495Sspeer */ 19416495Sspeer void 19426495Sspeer nxge_hio_rdc_unshare( 19436495Sspeer nxge_t *nxge, 19446495Sspeer int channel) 19456495Sspeer { 19466495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 19476495Sspeer nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 19486495Sspeer 19496495Sspeer nxge_grp_set_t *set = &nxge->rx_set; 19506495Sspeer vr_handle_t handle = (vr_handle_t)set->group[0]; 19516495Sspeer int current, last; 19526495Sspeer 19536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 19546495Sspeer 19556495Sspeer /* Stop RxMAC = A.9.2.6 */ 19566495Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 19576495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19586495Sspeer "Failed to disable RxMAC")); 19596495Sspeer } 19606495Sspeer 19616495Sspeer /* Drain IPP Port = A.9.3.6 */ 19626495Sspeer (void) nxge_ipp_drain(nxge); 19636495Sspeer 19646495Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 19656495Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 19666495Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 19676495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19686495Sspeer "Failed to disable RxDMA channel %d", channel)); 19696495Sspeer } 19706495Sspeer 19716495Sspeer NXGE_DC_RESET(set->shared.map, channel); 19726495Sspeer set->shared.count--; 19736495Sspeer 19746495Sspeer /* 19756495Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 19766495Sspeer * 19776495Sspeer * Initialize RxDMA A.9.5.4 19786495Sspeer * Reconfigure RxDMA 19796495Sspeer * Enable RxDMA A.9.5.5 19806495Sspeer */ 19816495Sspeer if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_RX, channel))) { 19826495Sspeer /* Be sure to re-enable the RX MAC. */ 19836495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 19846495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 19856495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 19866495Sspeer } 19876495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 19886495Sspeer "Failed to initialize RxDMA channel %d", channel)); 19896495Sspeer return; 19906495Sspeer } 19916495Sspeer 19926495Sspeer /* 19936495Sspeer * We have to reconfigure the RDC table(s) 19946495Sspeer * to which this channel once again belongs. 19956495Sspeer */ 19966495Sspeer current = hardware->def_mac_rxdma_grpid; 19976495Sspeer last = current + hardware->max_rdc_grpids; 19986495Sspeer for (; current < last; current++) { 19996495Sspeer if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 20006495Sspeer nxge_rdc_grp_t *group; 20016495Sspeer group = &nxge->pt_config.rdc_grps[current]; 20026495Sspeer group->map = set->owned.map; 20036495Sspeer group->max_rdcs++; 20046495Sspeer (void) nxge_init_fzc_rdc_tbl(nxge, current); 20056495Sspeer } 20066495Sspeer } 20076495Sspeer 20086495Sspeer /* 20096495Sspeer * Enable RxMAC = A.9.2.10 20106495Sspeer */ 20116495Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 20126495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20136495Sspeer "nx_hio_rdc_share: Rx MAC still disabled")); 20146495Sspeer return; 20156495Sspeer } 20166495Sspeer 20176495Sspeer /* Re-add this interrupt. */ 20186495Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 20196495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20206495Sspeer "nx_hio_rdc_unshare: Failed to add interrupt for " 20216495Sspeer "RxDMA CHANNEL %d", channel)); 20226495Sspeer } 20236495Sspeer 20246495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 20256495Sspeer } 20266495Sspeer 20276495Sspeer /* 20286495Sspeer * nxge_hio_dc_unshare 20296495Sspeer * 20306495Sspeer * Unshare (reuse) a DMA channel. 20316495Sspeer * 20326495Sspeer * Arguments: 20336495Sspeer * nxge 20346495Sspeer * vr The VR that <channel> belongs to. 20356495Sspeer * type Tx or Rx. 20366495Sspeer * channel The DMA channel to reuse. 20376495Sspeer * 20386495Sspeer * Notes: 20396495Sspeer * 20406495Sspeer * Context: 20416495Sspeer * Service domain 20426495Sspeer */ 20436495Sspeer void 20446495Sspeer nxge_hio_dc_unshare( 20456495Sspeer nxge_t *nxge, 20466495Sspeer nxge_hio_vr_t *vr, 20476495Sspeer mac_ring_type_t type, 20486495Sspeer int channel) 20496495Sspeer { 20506495Sspeer nxge_grp_t *group; 20516495Sspeer nxge_hio_dc_t *dc; 20526495Sspeer 20536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 20546495Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 20556495Sspeer 20566495Sspeer /* Unlink the channel from its group. */ 20576495Sspeer /* -------------------------------------------------- */ 20586495Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 20596602Sspeer NXGE_DC_RESET(group->map, channel); 20606495Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 20616495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 20626495Sspeer "nx_hio_dc_unshare(%d) failed", channel)); 20636495Sspeer return; 20646495Sspeer } 20656495Sspeer 20666495Sspeer dc->vr = 0; 20676495Sspeer dc->cookie = 0; 20686495Sspeer 20696495Sspeer if (type == MAC_RING_TYPE_RX) { 20706495Sspeer nxge_hio_rdc_unshare(nxge, channel); 20716495Sspeer } else { 20726495Sspeer nxge_hio_tdc_unshare(nxge, channel); 20736495Sspeer } 20746495Sspeer 20756495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 20766495Sspeer } 20776495Sspeer 20786495Sspeer #endif /* if defined(sun4v) */ 2079