16495Sspeer /*
26495Sspeer * CDDL HEADER START
36495Sspeer *
46495Sspeer * The contents of this file are subject to the terms of the
56495Sspeer * Common Development and Distribution License (the "License").
66495Sspeer * You may not use this file except in compliance with the License.
76495Sspeer *
86495Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
96495Sspeer * or http://www.opensolaris.org/os/licensing.
106495Sspeer * See the License for the specific language governing permissions
116495Sspeer * and limitations under the License.
126495Sspeer *
136495Sspeer * When distributing Covered Code, include this CDDL HEADER in each
146495Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
156495Sspeer * If applicable, add the following below this CDDL HEADER, with the
166495Sspeer * fields enclosed by brackets "[]" replaced with your own identifying
176495Sspeer * information: Portions Copyright [yyyy] [name of copyright owner]
186495Sspeer *
196495Sspeer * CDDL HEADER END
206495Sspeer */
216495Sspeer
226495Sspeer /*
23*11878SVenu.Iyer@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
246495Sspeer * Use is subject to license terms.
256495Sspeer */
266495Sspeer
276495Sspeer /*
286495Sspeer * nxge_hio_guest.c
296495Sspeer *
306495Sspeer * This file manages the virtualization resources for a guest domain.
316495Sspeer *
326495Sspeer */
336495Sspeer
346495Sspeer #include <sys/nxge/nxge_impl.h>
356495Sspeer #include <sys/nxge/nxge_fzc.h>
366495Sspeer #include <sys/nxge/nxge_rxdma.h>
376495Sspeer #include <sys/nxge/nxge_txdma.h>
386495Sspeer #include <sys/nxge/nxge_hio.h>
396495Sspeer
406495Sspeer /*
416495Sspeer * nxge_guest_regs_map
426495Sspeer *
436495Sspeer * Map in a guest domain's register set(s).
446495Sspeer *
456495Sspeer * Arguments:
466495Sspeer * nxge
476495Sspeer *
486495Sspeer * Notes:
496495Sspeer * Note that we set <is_vraddr> to TRUE.
506495Sspeer *
516495Sspeer * Context:
526495Sspeer * Guest domain
536495Sspeer */
546495Sspeer static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
556495Sspeer DDI_DEVICE_ATTR_V0,
566495Sspeer DDI_STRUCTURE_LE_ACC,
576495Sspeer DDI_STRICTORDER_ACC,
586495Sspeer };
596495Sspeer
606495Sspeer int
nxge_guest_regs_map(nxge_t * nxge)6110309SSriharsha.Basavapatna@Sun.COM nxge_guest_regs_map(nxge_t *nxge)
626495Sspeer {
636495Sspeer dev_regs_t *regs;
646495Sspeer off_t regsize;
656495Sspeer int rv;
666495Sspeer
676495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
686495Sspeer
696495Sspeer /* So we can allocate properly-aligned memory. */
706495Sspeer nxge->niu_type = N2_NIU; /* Version 1.0 only */
716495Sspeer nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
726495Sspeer
736495Sspeer nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
746495Sspeer regs = nxge->dev_regs;
756495Sspeer
766495Sspeer if ((rv = ddi_dev_regsize(nxge->dip, 0, ®size)) != DDI_SUCCESS) {
776495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
786495Sspeer return (NXGE_ERROR);
796495Sspeer }
806495Sspeer
816495Sspeer rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)®s->nxge_regp, 0, 0,
826495Sspeer &nxge_guest_register_access_attributes, ®s->nxge_regh);
836495Sspeer
846495Sspeer if (rv != DDI_SUCCESS) {
856495Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
866495Sspeer return (NXGE_ERROR);
876495Sspeer }
886495Sspeer
896495Sspeer nxge->npi_handle.regh = regs->nxge_regh;
906495Sspeer nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
916495Sspeer nxge->npi_handle.is_vraddr = B_TRUE;
926495Sspeer nxge->npi_handle.function.instance = nxge->instance;
936495Sspeer nxge->npi_handle.function.function = nxge->function_num;
946495Sspeer nxge->npi_handle.nxgep = (void *)nxge;
956495Sspeer
966495Sspeer /* NPI_REG_ADD_HANDLE_SET() */
976495Sspeer nxge->npi_reg_handle.regh = regs->nxge_regh;
986495Sspeer nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
996495Sspeer nxge->npi_reg_handle.is_vraddr = B_TRUE;
1006495Sspeer nxge->npi_reg_handle.function.instance = nxge->instance;
1016495Sspeer nxge->npi_reg_handle.function.function = nxge->function_num;
1026495Sspeer nxge->npi_reg_handle.nxgep = (void *)nxge;
1036495Sspeer
1046495Sspeer /* NPI_VREG_ADD_HANDLE_SET() */
1056495Sspeer nxge->npi_vreg_handle.regh = regs->nxge_regh;
1066495Sspeer nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
1076495Sspeer nxge->npi_vreg_handle.is_vraddr = B_TRUE;
1086495Sspeer nxge->npi_vreg_handle.function.instance = nxge->instance;
1096495Sspeer nxge->npi_vreg_handle.function.function = nxge->function_num;
1106495Sspeer nxge->npi_vreg_handle.nxgep = (void *)nxge;
1116495Sspeer
1126495Sspeer regs->nxge_vir_regp = regs->nxge_regp;
1136495Sspeer regs->nxge_vir_regh = regs->nxge_regh;
1146495Sspeer
1156495Sspeer /*
1166495Sspeer * We do NOT set the PCI, MSI-X, 2nd Virtualization,
1176495Sspeer * or FCODE reg variables.
1186495Sspeer */
1196495Sspeer
1206495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
1216495Sspeer
1226495Sspeer return (NXGE_OK);
1236495Sspeer }
1246495Sspeer
1256495Sspeer void
nxge_guest_regs_map_free(nxge_t * nxge)1266495Sspeer nxge_guest_regs_map_free(
1276495Sspeer nxge_t *nxge)
1286495Sspeer {
1296495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
1306495Sspeer
1316495Sspeer if (nxge->dev_regs) {
1326495Sspeer if (nxge->dev_regs->nxge_regh) {
1336495Sspeer NXGE_DEBUG_MSG((nxge, DDI_CTL,
1346495Sspeer "==> nxge_unmap_regs: device registers"));
1356495Sspeer ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
1366495Sspeer nxge->dev_regs->nxge_regh = NULL;
1376495Sspeer }
1386495Sspeer kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
1396495Sspeer nxge->dev_regs = 0;
1406495Sspeer }
1416495Sspeer
1426495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
1436495Sspeer }
1446495Sspeer
1456495Sspeer #if defined(sun4v)
1466495Sspeer
1476495Sspeer /*
1486495Sspeer * -------------------------------------------------------------
1496495Sspeer * Local prototypes
1506495Sspeer * -------------------------------------------------------------
1516495Sspeer */
1526495Sspeer static nxge_hio_dc_t *nxge_guest_dc_alloc(
1536495Sspeer nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
1546495Sspeer
1556495Sspeer static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
1566495Sspeer static void nxge_check_guest_state(nxge_hio_vr_t *);
1576495Sspeer
1586495Sspeer /*
1596495Sspeer * nxge_hio_vr_add
1606495Sspeer *
1616495Sspeer * If we have been given a virtualization region (VR),
1626495Sspeer * then initialize it.
1636495Sspeer *
1646495Sspeer * Arguments:
1656495Sspeer * nxge
1666495Sspeer *
1676495Sspeer * Notes:
1686495Sspeer *
1696495Sspeer * Context:
1706495Sspeer * Guest domain
1716495Sspeer */
1726495Sspeer int
nxge_hio_vr_add(nxge_t * nxge)1737812SMichael.Speer@Sun.COM nxge_hio_vr_add(nxge_t *nxge)
1746495Sspeer {
17510309SSriharsha.Basavapatna@Sun.COM extern nxge_status_t nxge_mac_register(p_nxge_t);
1766495Sspeer
17710309SSriharsha.Basavapatna@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
17810309SSriharsha.Basavapatna@Sun.COM nxge_hio_vr_t *vr;
17910309SSriharsha.Basavapatna@Sun.COM nxge_hio_dc_t *dc;
18010309SSriharsha.Basavapatna@Sun.COM int *reg_val;
18110309SSriharsha.Basavapatna@Sun.COM uint_t reg_len;
18210309SSriharsha.Basavapatna@Sun.COM uint8_t vr_index;
18310309SSriharsha.Basavapatna@Sun.COM nxhv_vr_fp_t *fp;
18410309SSriharsha.Basavapatna@Sun.COM uint64_t vr_address, vr_size;
18510309SSriharsha.Basavapatna@Sun.COM uint32_t cookie;
18610309SSriharsha.Basavapatna@Sun.COM nxhv_dc_fp_t *tx, *rx;
18710309SSriharsha.Basavapatna@Sun.COM uint64_t tx_map, rx_map;
18810309SSriharsha.Basavapatna@Sun.COM uint64_t hv_rv;
18910309SSriharsha.Basavapatna@Sun.COM int i;
19010309SSriharsha.Basavapatna@Sun.COM nxge_status_t status;
1916495Sspeer
1926495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
1936495Sspeer
19410577SMichael.Speer@Sun.COM if (nhd->type == NXGE_HIO_TYPE_SERVICE) {
19510577SMichael.Speer@Sun.COM /*
19610577SMichael.Speer@Sun.COM * Can't add VR to the service domain from which we came.
19710577SMichael.Speer@Sun.COM */
19810577SMichael.Speer@Sun.COM ASSERT(nhd->type == NXGE_HIO_TYPE_GUEST);
19910577SMichael.Speer@Sun.COM return (DDI_FAILURE);
20010577SMichael.Speer@Sun.COM }
20110577SMichael.Speer@Sun.COM
2026495Sspeer /*
2036495Sspeer * Get our HV cookie.
2046495Sspeer */
2056495Sspeer if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
2066495Sspeer 0, "reg", ®_val, ®_len) != DDI_PROP_SUCCESS) {
2076495Sspeer NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
20810577SMichael.Speer@Sun.COM return (DDI_FAILURE);
2096495Sspeer }
2106495Sspeer
2118275SEric Cheng cookie = (uint32_t)(reg_val[0]);
2126495Sspeer ddi_prop_free(reg_val);
2136495Sspeer
2146495Sspeer fp = &nhd->hio.vr;
2156495Sspeer hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
2166495Sspeer if (hv_rv != 0) {
2176495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2186495Sspeer "vr->getinfo() failed"));
21910577SMichael.Speer@Sun.COM return (DDI_FAILURE);
2206495Sspeer }
2216495Sspeer
2226495Sspeer /*
2236495Sspeer * In the guest domain, we can use any VR data structure
2246495Sspeer * we want, because we're not supposed to know which VR
2256495Sspeer * the service domain has allocated to us.
2266495Sspeer *
2276495Sspeer * In the current version, the least significant nybble of
2286495Sspeer * the cookie is the VR region, but that could change
2296495Sspeer * very easily.
2306495Sspeer *
2316495Sspeer * In the future, a guest may have more than one VR allocated
2326495Sspeer * to it, which is why we go through this exercise.
2336495Sspeer */
2346495Sspeer MUTEX_ENTER(&nhd->lock);
2356495Sspeer for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
2366495Sspeer if (nhd->vr[vr_index].nxge == 0) {
2376495Sspeer nhd->vr[vr_index].nxge = (uintptr_t)nxge;
2386495Sspeer break;
2396495Sspeer }
2406495Sspeer }
2416495Sspeer MUTEX_EXIT(&nhd->lock);
2426495Sspeer
2436495Sspeer if (vr_index == FUNC_VIR_MAX) {
2446495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
2456495Sspeer "no VRs available"));
2467812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2477812SMichael.Speer@Sun.COM "nxge_hio_vr_add(%d): cookie(0x%x)\n",
2487812SMichael.Speer@Sun.COM nxge->instance, cookie));
24910577SMichael.Speer@Sun.COM return (DDI_FAILURE);
2506495Sspeer }
2516495Sspeer
2526495Sspeer vr = &nhd->vr[vr_index];
2536495Sspeer
2546495Sspeer vr->nxge = (uintptr_t)nxge;
2556495Sspeer vr->cookie = (uint32_t)cookie;
2566495Sspeer vr->address = vr_address;
2576495Sspeer vr->size = vr_size;
2586495Sspeer vr->region = vr_index;
2596495Sspeer
2606495Sspeer /*
2616495Sspeer * This is redundant data, but useful nonetheless. It helps
2626495Sspeer * us to keep track of which RDCs & TDCs belong to us.
2636495Sspeer */
2646495Sspeer if (nxge->tx_set.lg.count == 0)
2656495Sspeer (void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
2666495Sspeer if (nxge->rx_set.lg.count == 0)
2676495Sspeer (void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
2686495Sspeer
2696495Sspeer /*
2706495Sspeer * See nxge_intr.c.
2716495Sspeer */
2726495Sspeer if (nxge_hio_intr_init(nxge) != NXGE_OK) {
2736495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2746495Sspeer "nxge_hio_intr_init() failed"));
27510577SMichael.Speer@Sun.COM return (DDI_FAILURE);
2766495Sspeer }
2776495Sspeer
2786495Sspeer /*
2796495Sspeer * Now we find out which RDCs & TDCs have been allocated to us.
2806495Sspeer */
2816495Sspeer tx = &nhd->hio.tx;
2826495Sspeer if (tx->get_map) {
2836495Sspeer /*
2846495Sspeer * The map we get back is a bitmap of the
2856495Sspeer * virtual Tx DMA channels we own -
2866495Sspeer * they are NOT real channel numbers.
2876495Sspeer */
2886495Sspeer hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
2896495Sspeer if (hv_rv != 0) {
2906495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2916495Sspeer "tx->get_map() failed"));
29210577SMichael.Speer@Sun.COM return (DDI_FAILURE);
2936495Sspeer }
2946495Sspeer res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
2956495Sspeer
2966495Sspeer /*
2976495Sspeer * For each channel, mark these two fields
2986495Sspeer * while we have the VR data structure.
2996495Sspeer */
3006495Sspeer for (i = 0; i < VP_CHANNEL_MAX; i++) {
3016495Sspeer if ((1 << i) & tx_map) {
3026495Sspeer dc = nxge_guest_dc_alloc(nxge, vr,
3036495Sspeer NXGE_TRANSMIT_GROUP);
3046495Sspeer if (dc == 0) {
3056495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
3066495Sspeer "DC add failed"));
30710577SMichael.Speer@Sun.COM return (DDI_FAILURE);
3086495Sspeer }
3096495Sspeer dc->channel = (nxge_channel_t)i;
3106495Sspeer }
3116495Sspeer }
3126495Sspeer }
3136495Sspeer
3146495Sspeer rx = &nhd->hio.rx;
3156495Sspeer if (rx->get_map) {
3166495Sspeer /*
3176495Sspeer * I repeat, the map we get back is a bitmap of
3186495Sspeer * the virtual Rx DMA channels we own -
3196495Sspeer * they are NOT real channel numbers.
3206495Sspeer */
3216495Sspeer hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
3226495Sspeer if (hv_rv != 0) {
3236495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
3246495Sspeer "rx->get_map() failed"));
32510577SMichael.Speer@Sun.COM return (DDI_FAILURE);
3266495Sspeer }
3276495Sspeer res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
3286495Sspeer
3296495Sspeer /*
3306495Sspeer * For each channel, mark these two fields
3316495Sspeer * while we have the VR data structure.
3326495Sspeer */
3336495Sspeer for (i = 0; i < VP_CHANNEL_MAX; i++) {
3346495Sspeer if ((1 << i) & rx_map) {
3356495Sspeer dc = nxge_guest_dc_alloc(nxge, vr,
3366495Sspeer NXGE_RECEIVE_GROUP);
3376495Sspeer if (dc == 0) {
3386495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
3396495Sspeer "DC add failed"));
34010577SMichael.Speer@Sun.COM return (DDI_FAILURE);
3416495Sspeer }
3426495Sspeer dc->channel = (nxge_channel_t)i;
3436495Sspeer }
3446495Sspeer }
3456495Sspeer }
3466495Sspeer
34710309SSriharsha.Basavapatna@Sun.COM status = nxge_mac_register(nxge);
34810309SSriharsha.Basavapatna@Sun.COM if (status != NXGE_OK) {
34910309SSriharsha.Basavapatna@Sun.COM cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n",
35010309SSriharsha.Basavapatna@Sun.COM nxge->instance);
35110577SMichael.Speer@Sun.COM return (DDI_FAILURE);
3526495Sspeer }
3536495Sspeer
3546495Sspeer nxge->hio_vr = vr; /* For faster lookups. */
3556495Sspeer
3566495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
3576495Sspeer
35810577SMichael.Speer@Sun.COM return (DDI_SUCCESS);
3596495Sspeer }
3606495Sspeer
3616495Sspeer /*
3626495Sspeer * nxge_guest_dc_alloc
3636495Sspeer *
3646495Sspeer * Find a free nxge_hio_dc_t data structure.
3656495Sspeer *
3666495Sspeer * Arguments:
3676495Sspeer * nxge
3686495Sspeer * type TRANSMIT or RECEIVE.
3696495Sspeer *
3706495Sspeer * Notes:
3716495Sspeer *
3726495Sspeer * Context:
3736495Sspeer * Guest domain
3746495Sspeer */
3756495Sspeer nxge_hio_dc_t *
nxge_guest_dc_alloc(nxge_t * nxge,nxge_hio_vr_t * vr,nxge_grp_type_t type)3766495Sspeer nxge_guest_dc_alloc(
3776495Sspeer nxge_t *nxge,
3786495Sspeer nxge_hio_vr_t *vr,
3796495Sspeer nxge_grp_type_t type)
3806495Sspeer {
3816495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
3826495Sspeer nxge_hio_dc_t *dc;
3836495Sspeer int limit, i;
3846495Sspeer
3856495Sspeer /*
3866495Sspeer * In the guest domain, there may be more than one VR.
3876495Sspeer * each one of which will be using the same slots, or
3886495Sspeer * virtual channel numbers. So the <nhd>'s rdc & tdc
3896495Sspeer * tables must be shared.
3906495Sspeer */
3916495Sspeer if (type == NXGE_TRANSMIT_GROUP) {
3926495Sspeer dc = &nhd->tdc[0];
3936495Sspeer limit = NXGE_MAX_TDCS;
3946495Sspeer } else {
3956495Sspeer dc = &nhd->rdc[0];
3966495Sspeer limit = NXGE_MAX_RDCS;
3976495Sspeer }
3986495Sspeer
3996495Sspeer MUTEX_ENTER(&nhd->lock);
4006495Sspeer for (i = 0; i < limit; i++, dc++) {
4016495Sspeer if (dc->vr == 0) {
4026495Sspeer dc->vr = vr;
4036495Sspeer dc->cookie = vr->cookie;
4046495Sspeer MUTEX_EXIT(&nhd->lock);
4056495Sspeer return (dc);
4066495Sspeer }
4076495Sspeer }
4086495Sspeer MUTEX_EXIT(&nhd->lock);
4096495Sspeer
4106495Sspeer return (0);
4116495Sspeer }
4126495Sspeer
413*11878SVenu.Iyer@Sun.COM int
nxge_hio_get_dc_htable_idx(nxge_t * nxge,vpc_type_t type,uint32_t channel)414*11878SVenu.Iyer@Sun.COM nxge_hio_get_dc_htable_idx(nxge_t *nxge, vpc_type_t type, uint32_t channel)
415*11878SVenu.Iyer@Sun.COM {
416*11878SVenu.Iyer@Sun.COM nxge_hio_dc_t *dc;
417*11878SVenu.Iyer@Sun.COM
418*11878SVenu.Iyer@Sun.COM ASSERT(isLDOMguest(nxge));
419*11878SVenu.Iyer@Sun.COM
420*11878SVenu.Iyer@Sun.COM dc = nxge_grp_dc_find(nxge, type, channel);
421*11878SVenu.Iyer@Sun.COM if (dc == NULL)
422*11878SVenu.Iyer@Sun.COM return (-1);
423*11878SVenu.Iyer@Sun.COM
424*11878SVenu.Iyer@Sun.COM return (dc->ldg.vector);
425*11878SVenu.Iyer@Sun.COM }
426*11878SVenu.Iyer@Sun.COM
4276495Sspeer /*
4286495Sspeer * res_map_parse
4296495Sspeer *
4306495Sspeer * Parse a resource map. The resources are DMA channels, receive
4316495Sspeer * or transmit, depending on <type>.
4326495Sspeer *
4336495Sspeer * Arguments:
4346495Sspeer * nxge
4356495Sspeer * type Transmit or receive.
4366495Sspeer * res_map The resource map to parse.
4376495Sspeer *
4386495Sspeer * Notes:
4396495Sspeer *
4406495Sspeer * Context:
4416495Sspeer * Guest domain
4426495Sspeer */
4436495Sspeer void
res_map_parse(nxge_t * nxge,nxge_grp_type_t type,uint64_t res_map)4446495Sspeer res_map_parse(
4456495Sspeer nxge_t *nxge,
4466495Sspeer nxge_grp_type_t type,
4476495Sspeer uint64_t res_map)
4486495Sspeer {
4496495Sspeer uint8_t slots, mask, slot;
4506495Sspeer int first, count;
4516495Sspeer
4526495Sspeer nxge_hw_pt_cfg_t *hardware;
4536495Sspeer nxge_grp_t *group;
4546495Sspeer
4556495Sspeer /* Slots are numbered 0 - 7. */
4566495Sspeer slots = (uint8_t)(res_map & 0xff);
4576495Sspeer
4586495Sspeer /* Count the number of bits in the bitmap. */
4596495Sspeer for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
4606495Sspeer if (slots & mask)
4616495Sspeer count++;
4626495Sspeer if (count == 1)
4636495Sspeer first = slot;
4646495Sspeer mask <<= 1;
4656495Sspeer }
4666495Sspeer
4676495Sspeer hardware = &nxge->pt_config.hw_config;
4686495Sspeer group = (type == NXGE_TRANSMIT_GROUP) ?
4696495Sspeer nxge->tx_set.group[0] : nxge->rx_set.group[0];
4706495Sspeer
4716495Sspeer /*
4726495Sspeer * A guest domain has one Tx & one Rx group, so far.
4736495Sspeer * In the future, there may be more than one.
4746495Sspeer */
4756495Sspeer if (type == NXGE_TRANSMIT_GROUP) {
4766495Sspeer nxge_dma_pt_cfg_t *port = &nxge->pt_config;
4778275SEric Cheng nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0];
4786495Sspeer
4796495Sspeer hardware->tdc.start = first;
4806495Sspeer hardware->tdc.count = count;
4816495Sspeer hardware->tdc.owned = count;
4826495Sspeer
4838275SEric Cheng tdc_grp->start_tdc = first;
4848275SEric Cheng tdc_grp->max_tdcs = (uint8_t)count;
4858275SEric Cheng tdc_grp->grp_index = group->index;
4868275SEric Cheng tdc_grp->map = slots;
4878275SEric Cheng
4886495Sspeer group->map = slots;
4896495Sspeer
4906495Sspeer /*
4916495Sspeer * Pointless in a guest domain. This bitmap is used
4926495Sspeer * in only one place: nxge_txc_init(),
4936495Sspeer * a service-domain-only function.
4946495Sspeer */
4956495Sspeer port->tx_dma_map = slots;
4966495Sspeer
4976495Sspeer nxge->tx_set.owned.map |= slots;
4986495Sspeer } else {
4996495Sspeer nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
5006495Sspeer
5016495Sspeer hardware->start_rdc = first;
5026495Sspeer hardware->max_rdcs = count;
5036495Sspeer
5046495Sspeer rdc_grp->start_rdc = (uint8_t)first;
5056495Sspeer rdc_grp->max_rdcs = (uint8_t)count;
5066495Sspeer rdc_grp->def_rdc = (uint8_t)first;
5076495Sspeer
5086495Sspeer rdc_grp->map = slots;
5096495Sspeer group->map = slots;
5106495Sspeer
5116495Sspeer nxge->rx_set.owned.map |= slots;
5126495Sspeer }
5136495Sspeer }
5146495Sspeer
5156495Sspeer /*
5166495Sspeer * nxge_hio_vr_release
5176495Sspeer *
5186495Sspeer * Release a virtualization region (VR).
5196495Sspeer *
5206495Sspeer * Arguments:
5216495Sspeer * nxge
5226495Sspeer *
5236495Sspeer * Notes:
5246495Sspeer * We must uninitialize all DMA channels associated with the VR, too.
5256495Sspeer *
5266495Sspeer * The service domain will re-initialize these DMA channels later.
5276495Sspeer * See nxge_hio.c:nxge_hio_share_free() for details.
5286495Sspeer *
5296495Sspeer * Context:
5306495Sspeer * Guest domain
5316495Sspeer */
5326495Sspeer int
nxge_hio_vr_release(nxge_t * nxge)5337812SMichael.Speer@Sun.COM nxge_hio_vr_release(nxge_t *nxge)
5346495Sspeer {
5357812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
5367812SMichael.Speer@Sun.COM int vr_index;
5377812SMichael.Speer@Sun.COM
5386495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
5396495Sspeer
5407812SMichael.Speer@Sun.COM if (nxge->hio_vr == NULL) {
5417812SMichael.Speer@Sun.COM return (NXGE_OK);
5427812SMichael.Speer@Sun.COM }
5437812SMichael.Speer@Sun.COM
5446495Sspeer /*
5456495Sspeer * Uninitialize interrupts.
5466495Sspeer */
5476495Sspeer nxge_hio_intr_uninit(nxge);
5486495Sspeer
5496495Sspeer /*
5506495Sspeer * Uninitialize the receive DMA channels.
5516495Sspeer */
5526495Sspeer nxge_uninit_rxdma_channels(nxge);
5536495Sspeer
5546495Sspeer /*
5556495Sspeer * Uninitialize the transmit DMA channels.
5566495Sspeer */
5576495Sspeer nxge_uninit_txdma_channels(nxge);
5586495Sspeer
5597587SMichael.Speer@Sun.COM /*
5607587SMichael.Speer@Sun.COM * Remove both groups. Assumption: only two groups!
5617587SMichael.Speer@Sun.COM */
5627587SMichael.Speer@Sun.COM if (nxge->rx_set.group[0] != NULL)
5637755SMisaki.Kataoka@Sun.COM nxge_grp_remove(nxge, nxge->rx_set.group[0]);
5647587SMichael.Speer@Sun.COM if (nxge->tx_set.group[0] != NULL)
5657755SMisaki.Kataoka@Sun.COM nxge_grp_remove(nxge, nxge->tx_set.group[0]);
5666495Sspeer
5676495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
5686495Sspeer
5697812SMichael.Speer@Sun.COM /*
5707812SMichael.Speer@Sun.COM * Clean up.
5717812SMichael.Speer@Sun.COM */
5727812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock);
5737812SMichael.Speer@Sun.COM for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
5747812SMichael.Speer@Sun.COM if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
5757812SMichael.Speer@Sun.COM nhd->vr[vr_index].nxge = NULL;
5767812SMichael.Speer@Sun.COM break;
5777812SMichael.Speer@Sun.COM }
5787812SMichael.Speer@Sun.COM }
5797812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock);
5807812SMichael.Speer@Sun.COM
5816495Sspeer return (NXGE_OK);
5826495Sspeer }
5836495Sspeer
5846495Sspeer #if defined(NIU_LP_WORKAROUND)
5856495Sspeer /*
5866495Sspeer * nxge_tdc_lp_conf
5876495Sspeer *
5886495Sspeer * Configure the logical pages for a TDC.
5896495Sspeer *
5906495Sspeer * Arguments:
5916495Sspeer * nxge
5926495Sspeer * channel The TDC to configure.
5936495Sspeer *
5946495Sspeer * Notes:
5956495Sspeer *
5966495Sspeer * Context:
5976495Sspeer * Guest domain
5986495Sspeer */
5996495Sspeer nxge_status_t
nxge_tdc_lp_conf(p_nxge_t nxge,int channel)6006495Sspeer nxge_tdc_lp_conf(
6016495Sspeer p_nxge_t nxge,
6026495Sspeer int channel)
6036495Sspeer {
6046495Sspeer nxge_hio_dc_t *dc;
6056495Sspeer nxge_dma_common_t *data;
6066495Sspeer nxge_dma_common_t *control;
6076495Sspeer tx_ring_t *ring;
6086495Sspeer
6096495Sspeer uint64_t hv_rv;
6106495Sspeer uint64_t ra, size;
6116495Sspeer
6126495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
6136495Sspeer
6146495Sspeer ring = nxge->tx_rings->rings[channel];
6156495Sspeer
6166495Sspeer if (ring->hv_set) {
6176495Sspeer /* This shouldn't happen. */
6186495Sspeer return (NXGE_OK);
6196495Sspeer }
6206495Sspeer
6216495Sspeer if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
6226495Sspeer return (NXGE_ERROR);
6236495Sspeer
6246495Sspeer /*
6256495Sspeer * Initialize logical page 0 for data buffers.
6266495Sspeer *
6276495Sspeer * <orig_ioaddr_pp> & <orig_alength> are initialized in
6286495Sspeer * nxge_main.c:nxge_dma_mem_alloc().
6296495Sspeer */
6306495Sspeer data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
6316495Sspeer ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
6326495Sspeer ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
6336495Sspeer
6346495Sspeer hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
6356495Sspeer (uint64_t)channel, 0,
6366495Sspeer ring->hv_tx_buf_base_ioaddr_pp,
6376495Sspeer ring->hv_tx_buf_ioaddr_size);
6386495Sspeer
6396495Sspeer if (hv_rv != 0) {
6406495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
6416495Sspeer "<== nxge_tdc_lp_conf: channel %d "
6426495Sspeer "(page 0 data buf) hv: %d "
6436495Sspeer "ioaddr_pp $%p size 0x%llx ",
6446495Sspeer channel, hv_rv,
6456495Sspeer ring->hv_tx_buf_base_ioaddr_pp,
6466495Sspeer ring->hv_tx_buf_ioaddr_size));
6476495Sspeer return (NXGE_ERROR | hv_rv);
6486495Sspeer }
6496495Sspeer
6506495Sspeer ra = size = 0;
6516495Sspeer hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
6526495Sspeer (uint64_t)channel, 0, &ra, &size);
6536495Sspeer
6546495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL,
6556495Sspeer "==> nxge_tdc_lp_conf: channel %d "
6566495Sspeer "(page 0 data buf) hv_rv 0x%llx "
6576495Sspeer "set ioaddr_pp $%p set size 0x%llx "
6586495Sspeer "get ra ioaddr_pp $%p get size 0x%llx ",
6596495Sspeer channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
6606495Sspeer ring->hv_tx_buf_ioaddr_size, ra, size));
6616495Sspeer
6626495Sspeer /*
6636495Sspeer * Initialize logical page 1 for control buffers.
6646495Sspeer */
6656495Sspeer control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
6666495Sspeer ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
6676495Sspeer ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
6686495Sspeer
6696495Sspeer hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
6706495Sspeer (uint64_t)channel, (uint64_t)1,
6716495Sspeer ring->hv_tx_cntl_base_ioaddr_pp,
6726495Sspeer ring->hv_tx_cntl_ioaddr_size);
6736495Sspeer
6746495Sspeer if (hv_rv != 0) {
6756495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
6766495Sspeer "<== nxge_tdc_lp_conf: channel %d "
6776495Sspeer "(page 1 cntl buf) hv_rv 0x%llx "
6786495Sspeer "ioaddr_pp $%p size 0x%llx ",
6796495Sspeer channel, hv_rv,
6806495Sspeer ring->hv_tx_cntl_base_ioaddr_pp,
6816495Sspeer ring->hv_tx_cntl_ioaddr_size));
6826495Sspeer return (NXGE_ERROR | hv_rv);
6836495Sspeer }
6846495Sspeer
6856495Sspeer ra = size = 0;
6866495Sspeer hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
6876495Sspeer (uint64_t)channel, (uint64_t)1, &ra, &size);
6886495Sspeer
6896495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL,
6906495Sspeer "==> nxge_tdc_lp_conf: channel %d "
6916495Sspeer "(page 1 cntl buf) hv_rv 0x%llx "
6926495Sspeer "set ioaddr_pp $%p set size 0x%llx "
6936495Sspeer "get ra ioaddr_pp $%p get size 0x%llx ",
6946495Sspeer channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
6956495Sspeer ring->hv_tx_cntl_ioaddr_size, ra, size));
6966495Sspeer
6976495Sspeer ring->hv_set = B_TRUE;
6986495Sspeer
6996495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
7006495Sspeer
7016495Sspeer return (NXGE_OK);
7026495Sspeer }
7036495Sspeer
7046495Sspeer /*
7056495Sspeer * nxge_rdc_lp_conf
7066495Sspeer *
7076495Sspeer * Configure an RDC's logical pages.
7086495Sspeer *
7096495Sspeer * Arguments:
7106495Sspeer * nxge
7116495Sspeer * channel The RDC to configure.
7126495Sspeer *
7136495Sspeer * Notes:
7146495Sspeer *
7156495Sspeer * Context:
7166495Sspeer * Guest domain
7176495Sspeer */
7186495Sspeer nxge_status_t
nxge_rdc_lp_conf(p_nxge_t nxge,int channel)7196495Sspeer nxge_rdc_lp_conf(
7206495Sspeer p_nxge_t nxge,
7216495Sspeer int channel)
7226495Sspeer {
7236495Sspeer nxge_hio_dc_t *dc;
7246495Sspeer nxge_dma_common_t *data;
7256495Sspeer nxge_dma_common_t *control;
7266495Sspeer rx_rbr_ring_t *ring;
7276495Sspeer
7286495Sspeer uint64_t hv_rv;
7296495Sspeer uint64_t ra, size;
7306495Sspeer
7316495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
7326495Sspeer
7336495Sspeer ring = nxge->rx_rbr_rings->rbr_rings[channel];
7346495Sspeer
7356495Sspeer if (ring->hv_set) {
7366495Sspeer return (NXGE_OK);
7376495Sspeer }
7386495Sspeer
7396495Sspeer if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
7406495Sspeer return (NXGE_ERROR);
7416495Sspeer
7426495Sspeer /*
7436495Sspeer * Initialize logical page 0 for data buffers.
7446495Sspeer *
7456495Sspeer * <orig_ioaddr_pp> & <orig_alength> are initialized in
7466495Sspeer * nxge_main.c:nxge_dma_mem_alloc().
7476495Sspeer */
7486495Sspeer data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
7496495Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
7506495Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
7516495Sspeer
7526495Sspeer hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
7536495Sspeer (uint64_t)channel, 0,
7546495Sspeer ring->hv_rx_buf_base_ioaddr_pp,
7556495Sspeer ring->hv_rx_buf_ioaddr_size);
7566495Sspeer
7576495Sspeer if (hv_rv != 0) {
7586495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
7596495Sspeer "<== nxge_rdc_lp_conf: channel %d "
7606495Sspeer "(page 0 data buf) hv_rv 0x%llx "
7616495Sspeer "ioaddr_pp $%p size 0x%llx ",
7626495Sspeer channel, hv_rv,
7636495Sspeer ring->hv_rx_buf_base_ioaddr_pp,
7646495Sspeer ring->hv_rx_buf_ioaddr_size));
7656495Sspeer return (NXGE_ERROR | hv_rv);
7666495Sspeer }
7676495Sspeer
7686495Sspeer ra = size = 0;
7696495Sspeer hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
7706495Sspeer (uint64_t)channel, 0, &ra, &size);
7716495Sspeer
7726495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL,
7736495Sspeer "==> nxge_rdc_lp_conf: channel %d "
7746495Sspeer "(page 0 data buf) hv_rv 0x%llx "
7756495Sspeer "set ioaddr_pp $%p set size 0x%llx "
7766495Sspeer "get ra ioaddr_pp $%p get size 0x%llx ",
7776495Sspeer channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
7786495Sspeer ring->hv_rx_buf_ioaddr_size, ra, size));
7796495Sspeer
7806495Sspeer /*
7816495Sspeer * Initialize logical page 1 for control buffers.
7826495Sspeer */
7836495Sspeer control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
7846495Sspeer ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
7856495Sspeer ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
7866495Sspeer
7876495Sspeer hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
7886495Sspeer (uint64_t)channel, (uint64_t)1,
7896495Sspeer ring->hv_rx_cntl_base_ioaddr_pp,
7906495Sspeer ring->hv_rx_cntl_ioaddr_size);
7916495Sspeer
7926495Sspeer if (hv_rv != 0) {
7936495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
7946495Sspeer "<== nxge_rdc_lp_conf: channel %d "
7956495Sspeer "(page 1 cntl buf) hv_rv 0x%llx "
7966495Sspeer "ioaddr_pp $%p size 0x%llx ",
7976495Sspeer channel, hv_rv,
7986495Sspeer ring->hv_rx_cntl_base_ioaddr_pp,
7996495Sspeer ring->hv_rx_cntl_ioaddr_size));
8006495Sspeer return (NXGE_ERROR | hv_rv);
8016495Sspeer }
8026495Sspeer
8036495Sspeer ra = size = 0;
8046495Sspeer hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
8056495Sspeer (uint64_t)channel, (uint64_t)1, &ra, &size);
8066495Sspeer
8076495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL,
8086495Sspeer "==> nxge_rdc_lp_conf: channel %d "
8096495Sspeer "(page 1 cntl buf) hv_rv 0x%llx "
8106495Sspeer "set ioaddr_pp $%p set size 0x%llx "
8116495Sspeer "get ra ioaddr_pp $%p get size 0x%llx ",
8126495Sspeer channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
8136495Sspeer ring->hv_rx_cntl_ioaddr_size, ra, size));
8146495Sspeer
8156495Sspeer ring->hv_set = B_TRUE;
8166495Sspeer
8176495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
8186495Sspeer
8196495Sspeer return (NXGE_OK);
8206495Sspeer }
8216495Sspeer #endif /* defined(NIU_LP_WORKAROUND) */
8226495Sspeer
8236495Sspeer /*
8246495Sspeer * This value is in milliseconds.
8256495Sspeer */
8266495Sspeer #define NXGE_GUEST_TIMER 500 /* 1/2 second, for now */
8276495Sspeer
8286495Sspeer /*
8296495Sspeer * nxge_hio_start_timer
8306495Sspeer *
8316495Sspeer * Start the timer which checks for Tx hangs.
8326495Sspeer *
8336495Sspeer * Arguments:
8346495Sspeer * nxge
8356495Sspeer *
8366495Sspeer * Notes:
8376495Sspeer * This function is called from nxge_attach().
8386495Sspeer *
8396495Sspeer * This function kicks off the guest domain equivalent of
8406495Sspeer * nxge_check_hw_state(). It is called only once, from attach.
8416495Sspeer *
8426495Sspeer * Context:
8436495Sspeer * Guest domain
8446495Sspeer */
8456495Sspeer void
nxge_hio_start_timer(nxge_t * nxge)8466495Sspeer nxge_hio_start_timer(
8476495Sspeer nxge_t *nxge)
8486495Sspeer {
8496495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
8506495Sspeer nxge_hio_vr_t *vr;
8516495Sspeer int region;
8526495Sspeer
8536495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
8546495Sspeer
8556495Sspeer MUTEX_ENTER(&nhd->lock);
8566495Sspeer
8576495Sspeer /*
8586495Sspeer * Find our VR data structure. (We are currently assuming
8596495Sspeer * one VR per guest domain. That may change in the future.)
8606495Sspeer */
8616495Sspeer for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
8626495Sspeer if (nhd->vr[region].nxge == (uintptr_t)nxge)
8636495Sspeer break;
8646495Sspeer }
8656495Sspeer
8666495Sspeer MUTEX_EXIT(&nhd->lock);
8676495Sspeer
8686495Sspeer if (region == NXGE_VR_SR_MAX) {
8696495Sspeer return;
8706495Sspeer }
8716495Sspeer
8726495Sspeer vr = (nxge_hio_vr_t *)&nhd->vr[region];
8736495Sspeer
8746495Sspeer nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
8756495Sspeer (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
8766495Sspeer
8776495Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
8786495Sspeer }
8796495Sspeer
8806495Sspeer /*
8816495Sspeer * nxge_check_guest_state
8826495Sspeer *
8836495Sspeer * Essentially, check for Tx hangs. In the future, if we are
8846495Sspeer * polling the hardware, we may do so here.
8856495Sspeer *
8866495Sspeer * Arguments:
8876495Sspeer * vr The virtualization region (VR) data structure.
8886495Sspeer *
8896495Sspeer * Notes:
8906495Sspeer * This function is the guest domain equivalent of
8916495Sspeer * nxge_check_hw_state(). Since we have no hardware to
8926495Sspeer * check, we simply call nxge_check_tx_hang().
8936495Sspeer *
8946495Sspeer * Context:
8956495Sspeer * Guest domain
8966495Sspeer */
8976495Sspeer void
nxge_check_guest_state(nxge_hio_vr_t * vr)8986495Sspeer nxge_check_guest_state(
8996495Sspeer nxge_hio_vr_t *vr)
9006495Sspeer {
9016495Sspeer nxge_t *nxge = (nxge_t *)vr->nxge;
9026495Sspeer
9036495Sspeer NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
9046495Sspeer
9056495Sspeer MUTEX_ENTER(nxge->genlock);
9066495Sspeer nxge->nxge_timerid = 0;
9076495Sspeer
9087466SMisaki.Kataoka@Sun.COM if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
9097466SMisaki.Kataoka@Sun.COM nxge_check_tx_hang(nxge);
9106495Sspeer
9117466SMisaki.Kataoka@Sun.COM nxge->nxge_timerid = timeout((void(*)(void *))
9127466SMisaki.Kataoka@Sun.COM nxge_check_guest_state, (caddr_t)vr,
9137466SMisaki.Kataoka@Sun.COM drv_usectohz(1000 * NXGE_GUEST_TIMER));
9147466SMisaki.Kataoka@Sun.COM }
9156495Sspeer
9166495Sspeer nxge_check_guest_state_exit:
9176495Sspeer MUTEX_EXIT(nxge->genlock);
9186495Sspeer NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
9196495Sspeer }
9206495Sspeer
9219232SMichael.Speer@Sun.COM nxge_status_t
nxge_hio_rdc_intr_arm(p_nxge_t nxge,boolean_t arm)9229232SMichael.Speer@Sun.COM nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm)
9239232SMichael.Speer@Sun.COM {
9249232SMichael.Speer@Sun.COM nxge_grp_t *group;
9259232SMichael.Speer@Sun.COM uint32_t channel;
9269232SMichael.Speer@Sun.COM nxge_hio_dc_t *dc;
9279232SMichael.Speer@Sun.COM nxge_ldg_t *ldgp;
9289232SMichael.Speer@Sun.COM
9299232SMichael.Speer@Sun.COM /*
9309232SMichael.Speer@Sun.COM * Validate state of guest interface before
9319232SMichael.Speer@Sun.COM * proceeeding.
9329232SMichael.Speer@Sun.COM */
9339232SMichael.Speer@Sun.COM if (!isLDOMguest(nxge))
9349232SMichael.Speer@Sun.COM return (NXGE_ERROR);
9359232SMichael.Speer@Sun.COM if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
9369232SMichael.Speer@Sun.COM return (NXGE_ERROR);
9379232SMichael.Speer@Sun.COM
9389232SMichael.Speer@Sun.COM /*
9399232SMichael.Speer@Sun.COM * In guest domain, always and only dealing with
9409232SMichael.Speer@Sun.COM * group 0 for an instance of nxge.
9419232SMichael.Speer@Sun.COM */
9429232SMichael.Speer@Sun.COM group = nxge->rx_set.group[0];
9439232SMichael.Speer@Sun.COM
9449232SMichael.Speer@Sun.COM /*
9459232SMichael.Speer@Sun.COM * Look to arm the the RDCs for the group.
9469232SMichael.Speer@Sun.COM */
9479232SMichael.Speer@Sun.COM for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
9489232SMichael.Speer@Sun.COM if ((1 << channel) & group->map) {
9499232SMichael.Speer@Sun.COM /*
9509232SMichael.Speer@Sun.COM * Get the RDC.
9519232SMichael.Speer@Sun.COM */
9529232SMichael.Speer@Sun.COM dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel);
9539232SMichael.Speer@Sun.COM if (dc == NULL)
9549232SMichael.Speer@Sun.COM return (NXGE_ERROR);
9559232SMichael.Speer@Sun.COM
9569232SMichael.Speer@Sun.COM /*
9579232SMichael.Speer@Sun.COM * Get the RDC's ldg group.
9589232SMichael.Speer@Sun.COM */
9599232SMichael.Speer@Sun.COM ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector];
9609232SMichael.Speer@Sun.COM if (ldgp == NULL)
9619232SMichael.Speer@Sun.COM return (NXGE_ERROR);
9629232SMichael.Speer@Sun.COM
9639232SMichael.Speer@Sun.COM /*
9649232SMichael.Speer@Sun.COM * Set the state of the group.
9659232SMichael.Speer@Sun.COM */
9669232SMichael.Speer@Sun.COM ldgp->arm = arm;
9679232SMichael.Speer@Sun.COM
9689232SMichael.Speer@Sun.COM nxge_hio_ldgimgn(nxge, ldgp);
9699232SMichael.Speer@Sun.COM }
9709232SMichael.Speer@Sun.COM }
9719232SMichael.Speer@Sun.COM
9729232SMichael.Speer@Sun.COM return (NXGE_OK);
9739232SMichael.Speer@Sun.COM }
9749232SMichael.Speer@Sun.COM
9759232SMichael.Speer@Sun.COM nxge_status_t
nxge_hio_rdc_enable(p_nxge_t nxge)9769232SMichael.Speer@Sun.COM nxge_hio_rdc_enable(p_nxge_t nxge)
9779232SMichael.Speer@Sun.COM {
9789232SMichael.Speer@Sun.COM nxge_grp_t *group;
9799232SMichael.Speer@Sun.COM npi_handle_t handle;
9809232SMichael.Speer@Sun.COM uint32_t channel;
9819232SMichael.Speer@Sun.COM npi_status_t rval;
9829232SMichael.Speer@Sun.COM
9839232SMichael.Speer@Sun.COM /*
9849232SMichael.Speer@Sun.COM * Validate state of guest interface before
9859232SMichael.Speer@Sun.COM * proceeeding.
9869232SMichael.Speer@Sun.COM */
9879232SMichael.Speer@Sun.COM if (!isLDOMguest(nxge))
9889232SMichael.Speer@Sun.COM return (NXGE_ERROR);
9899232SMichael.Speer@Sun.COM if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
9909232SMichael.Speer@Sun.COM return (NXGE_ERROR);
9919232SMichael.Speer@Sun.COM
9929232SMichael.Speer@Sun.COM /*
9939232SMichael.Speer@Sun.COM * In guest domain, always and only dealing with
9949232SMichael.Speer@Sun.COM * group 0 for an instance of nxge.
9959232SMichael.Speer@Sun.COM */
9969232SMichael.Speer@Sun.COM group = nxge->rx_set.group[0];
9979232SMichael.Speer@Sun.COM
9989232SMichael.Speer@Sun.COM /*
9999232SMichael.Speer@Sun.COM * Get the PIO handle.
10009232SMichael.Speer@Sun.COM */
10019232SMichael.Speer@Sun.COM handle = NXGE_DEV_NPI_HANDLE(nxge);
10029232SMichael.Speer@Sun.COM
10039232SMichael.Speer@Sun.COM for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
10049232SMichael.Speer@Sun.COM /*
10059232SMichael.Speer@Sun.COM * If this channel is in the map, then enable
10069232SMichael.Speer@Sun.COM * it.
10079232SMichael.Speer@Sun.COM */
10089232SMichael.Speer@Sun.COM if ((1 << channel) & group->map) {
10099232SMichael.Speer@Sun.COM /*
10109232SMichael.Speer@Sun.COM * Enable the RDC and clear the empty bit.
10119232SMichael.Speer@Sun.COM */
10129232SMichael.Speer@Sun.COM rval = npi_rxdma_cfg_rdc_enable(handle, channel);
10139232SMichael.Speer@Sun.COM if (rval != NPI_SUCCESS)
10149232SMichael.Speer@Sun.COM return (NXGE_ERROR);
10159232SMichael.Speer@Sun.COM
10169232SMichael.Speer@Sun.COM (void) npi_rxdma_channel_rbr_empty_clear(handle,
10179232SMichael.Speer@Sun.COM channel);
10189232SMichael.Speer@Sun.COM }
10199232SMichael.Speer@Sun.COM }
10209232SMichael.Speer@Sun.COM
10219232SMichael.Speer@Sun.COM return (NXGE_OK);
10229232SMichael.Speer@Sun.COM }
10236495Sspeer #endif /* defined(sun4v) */
1024