11369Sdduvall /* 21369Sdduvall * CDDL HEADER START 31369Sdduvall * 41369Sdduvall * The contents of this file are subject to the terms of the 51369Sdduvall * Common Development and Distribution License (the "License"). 61369Sdduvall * You may not use this file except in compliance with the License. 71369Sdduvall * 81369Sdduvall * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 91369Sdduvall * or http://www.opensolaris.org/os/licensing. 101369Sdduvall * See the License for the specific language governing permissions 111369Sdduvall * and limitations under the License. 121369Sdduvall * 131369Sdduvall * When distributing Covered Code, include this CDDL HEADER in each 141369Sdduvall * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 151369Sdduvall * If applicable, add the following below this CDDL HEADER, with the 161369Sdduvall * fields enclosed by brackets "[]" replaced with your own identifying 171369Sdduvall * information: Portions Copyright [yyyy] [name of copyright owner] 181369Sdduvall * 191369Sdduvall * CDDL HEADER END 201369Sdduvall */ 211369Sdduvall 221369Sdduvall /* 231369Sdduvall * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 241369Sdduvall * Use is subject to license terms. 251369Sdduvall */ 261369Sdduvall 271369Sdduvall #pragma ident "%Z%%M% %I% %E% SMI" 281369Sdduvall 29*2675Szh199473 #include "bge_impl.h" 301369Sdduvall 311369Sdduvall #define U32TOPTR(x) ((void *)(uintptr_t)(uint32_t)(x)) 321369Sdduvall #define PTRTOU32(x) ((uint32_t)(uintptr_t)(void *)(x)) 331369Sdduvall 341369Sdduvall /* 351369Sdduvall * ========== RX side routines ========== 361369Sdduvall */ 371369Sdduvall 381369Sdduvall #define BGE_DBG BGE_DBG_RECV /* debug flag for this code */ 391369Sdduvall 401369Sdduvall static void bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp); 411369Sdduvall #pragma inline(bge_refill) 421369Sdduvall 431369Sdduvall /* 441369Sdduvall * Return the specified buffer (srbdp) to the ring it came from (brp). 451369Sdduvall * 461369Sdduvall * Note: 471369Sdduvall * If the driver is compiled with only one buffer ring *and* one 481369Sdduvall * return ring, then the buffers must be returned in sequence. 491369Sdduvall * In this case, we don't have to consider anything about the 501369Sdduvall * buffer at all; we can simply advance the cyclic counter. And 511369Sdduvall * we don't even need the refill mutex <rf_lock>, as the caller 521369Sdduvall * will already be holding the (one-and-only) <rx_lock>. 531369Sdduvall * 541369Sdduvall * If the driver supports multiple buffer rings, but only one 551369Sdduvall * return ring, the same still applies (to each buffer ring 561369Sdduvall * separately). 571369Sdduvall */ 581369Sdduvall static void 591369Sdduvall bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp) 601369Sdduvall { 611369Sdduvall uint64_t slot; 621369Sdduvall 631369Sdduvall _NOTE(ARGUNUSED(srbdp)) 641369Sdduvall 651369Sdduvall slot = brp->rf_next; 661369Sdduvall brp->rf_next = NEXT(slot, brp->desc.nslots); 671369Sdduvall bge_mbx_put(bgep, brp->chip_mbx_reg, slot); 681369Sdduvall } 691369Sdduvall 701369Sdduvall static mblk_t *bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p); 711369Sdduvall #pragma inline(bge_receive_packet) 721369Sdduvall 731369Sdduvall static mblk_t * 741369Sdduvall bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p) 751369Sdduvall { 761369Sdduvall bge_rbd_t hw_rbd; 771369Sdduvall buff_ring_t *brp; 781369Sdduvall sw_rbd_t *srbdp; 791369Sdduvall uchar_t *dp; 801369Sdduvall mblk_t *mp; 811369Sdduvall uint_t len; 821369Sdduvall uint_t minsize; 831369Sdduvall uint_t maxsize; 841369Sdduvall uint32_t pflags; 851369Sdduvall 861369Sdduvall mp = NULL; 871369Sdduvall hw_rbd = *hw_rbd_p; 881369Sdduvall 891369Sdduvall switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) { 901369Sdduvall case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING: 911369Sdduvall default: 921369Sdduvall /* error, this shouldn't happen */ 931369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!")); 941369Sdduvall goto error; 951369Sdduvall 961369Sdduvall case RBD_FLAG_JUMBO_RING: 971369Sdduvall brp = &bgep->buff[BGE_JUMBO_BUFF_RING]; 981369Sdduvall break; 991369Sdduvall 1001369Sdduvall #if (BGE_BUFF_RINGS_USED > 2) 1011369Sdduvall case RBD_FLAG_MINI_RING: 1021369Sdduvall brp = &bgep->buff[BGE_MINI_BUFF_RING]; 1031369Sdduvall break; 1041369Sdduvall #endif /* BGE_BUFF_RINGS_USED > 2 */ 1051369Sdduvall 1061369Sdduvall case 0: 1071369Sdduvall brp = &bgep->buff[BGE_STD_BUFF_RING]; 1081369Sdduvall break; 1091369Sdduvall } 1101369Sdduvall 1111369Sdduvall if (hw_rbd.index >= brp->desc.nslots) { 1121369Sdduvall /* error, this shouldn't happen */ 1131369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!")); 1141369Sdduvall goto error; 1151369Sdduvall } 1161369Sdduvall 1171369Sdduvall srbdp = &brp->sw_rbds[hw_rbd.index]; 1181369Sdduvall if (hw_rbd.opaque != srbdp->pbuf.token) { 1191369Sdduvall /* bogus, drop the packet */ 1201369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token")); 1211369Sdduvall goto refill; 1221369Sdduvall } 1231369Sdduvall 1241369Sdduvall if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) { 1251369Sdduvall /* bogus, drop the packet */ 1261369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet")); 1271369Sdduvall goto refill; 1281369Sdduvall } 1291369Sdduvall 1301369Sdduvall if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) { 1311369Sdduvall /* bogus, drop the packet */ 1321369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet")); 1331369Sdduvall goto refill; 1341369Sdduvall } 1351369Sdduvall 1361369Sdduvall len = hw_rbd.len; 1371369Sdduvall 1381408Srandyf #ifdef BGE_IPMI_ASF 1391369Sdduvall /* 1401408Srandyf * When IPMI/ASF is enabled, VLAN tag must be stripped. 1411369Sdduvall */ 1421408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) 1431408Srandyf maxsize = bgep->chipid.ethmax_size + ETHERFCSL; 1441408Srandyf else 1451408Srandyf #endif 1461408Srandyf /* 1471408Srandyf * H/W will not strip the VLAN tag from incoming packet 1481408Srandyf * now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in 1491408Srandyf * RECEIVE_MAC_MODE_REG register. 1501408Srandyf */ 1511408Srandyf maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL; 1521369Sdduvall if (len > maxsize) { 1531369Sdduvall /* bogus, drop the packet */ 1541369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet")); 1551369Sdduvall goto refill; 1561369Sdduvall } 1571369Sdduvall 1581408Srandyf #ifdef BGE_IPMI_ASF 1591408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) 1601408Srandyf minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ; 1611408Srandyf else 1621408Srandyf #endif 1631408Srandyf minsize = ETHERMIN + ETHERFCSL; 1641369Sdduvall if (len < minsize) { 1651369Sdduvall /* bogus, drop the packet */ 1661369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet")); 1671369Sdduvall goto refill; 1681369Sdduvall } 1691369Sdduvall 1701369Sdduvall /* 1711369Sdduvall * Packet looks good; get a buffer to copy it into. 1721369Sdduvall * We want to leave some space at the front of the allocated 1731369Sdduvall * buffer in case any upstream modules want to prepend some 1741369Sdduvall * sort of header. This also has the side-effect of making 1751369Sdduvall * the packet *contents* 4-byte aligned, as required by NCA! 1761369Sdduvall */ 1771408Srandyf #ifdef BGE_IPMI_ASF 1781408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) { 1791408Srandyf mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0); 1801408Srandyf } else { 1811408Srandyf #endif 1821408Srandyf 1831408Srandyf mp = allocb(BGE_HEADROOM + len, 0); 1841408Srandyf #ifdef BGE_IPMI_ASF 1851408Srandyf } 1861408Srandyf #endif 1871369Sdduvall if (mp == NULL) { 1881369Sdduvall /* Nothing to do but drop the packet */ 1891369Sdduvall goto refill; 1901369Sdduvall } 1911369Sdduvall 1921369Sdduvall /* 1931369Sdduvall * Sync the data and copy it to the STREAMS buffer. 1941369Sdduvall */ 1951369Sdduvall DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL); 1961865Sdilpreet if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) { 1971865Sdilpreet bgep->bge_dma_error = B_TRUE; 1981865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR; 1991865Sdilpreet return (NULL); 2001865Sdilpreet } 2011408Srandyf #ifdef BGE_IPMI_ASF 2021408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) { 2031408Srandyf /* 2041408Srandyf * As VLAN tag has been stripped from incoming packet in ASF 2051408Srandyf * scenario, we insert it into this packet again. 2061408Srandyf */ 2071408Srandyf struct ether_vlan_header *ehp; 2081408Srandyf mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ; 2091408Srandyf bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL); 2101408Srandyf ehp = (struct ether_vlan_header *)dp; 2111408Srandyf ehp->ether_tpid = ntohs(VLAN_TPID); 2121408Srandyf ehp->ether_tci = ntohs(hw_rbd.vlan_tci); 2131408Srandyf bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL, 2141408Srandyf dp + 2 * ETHERADDRL + VLAN_TAGSZ, 2151408Srandyf len - 2 * ETHERADDRL); 2161408Srandyf } else { 2171408Srandyf #endif 2181408Srandyf mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM; 2191408Srandyf bcopy(DMA_VPTR(srbdp->pbuf), dp, len); 2201408Srandyf #ifdef BGE_IPMI_ASF 2211408Srandyf } 2221408Srandyf 2231408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) { 2241408Srandyf mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL; 2251408Srandyf } else 2261408Srandyf #endif 2271408Srandyf mp->b_wptr = dp + len - ETHERFCSL; 2281369Sdduvall 2291369Sdduvall /* 2301369Sdduvall * Special check for one specific type of data corruption; 2311369Sdduvall * in a good packet, the first 8 bytes are *very* unlikely 2321369Sdduvall * to be the same as the second 8 bytes ... but we let the 2331369Sdduvall * packet through just in case. 2341369Sdduvall */ 2351369Sdduvall if (bcmp(dp, dp+8, 8) == 0) 2361369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?")); 2371369Sdduvall 2381369Sdduvall pflags = 0; 2391369Sdduvall if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM) 2401369Sdduvall pflags |= HCK_FULLCKSUM; 2411369Sdduvall if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM) 2421369Sdduvall pflags |= HCK_IPV4_HDRCKSUM; 2431369Sdduvall if (pflags != 0) 2441369Sdduvall (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 2451369Sdduvall hw_rbd.tcp_udp_cksum, pflags, 0); 2461369Sdduvall 2471369Sdduvall refill: 2481369Sdduvall /* 2491369Sdduvall * Replace the buffer in the ring it came from ... 2501369Sdduvall */ 2511369Sdduvall bge_refill(bgep, brp, srbdp); 2521369Sdduvall return (mp); 2531369Sdduvall 2541369Sdduvall error: 2551369Sdduvall /* 2561369Sdduvall * We come here if the integrity of the ring descriptors 2571369Sdduvall * (rather than merely packet data) appears corrupted. 2581369Sdduvall * The factotum will attempt to reset-and-recover. 2591369Sdduvall */ 2601369Sdduvall bgep->bge_chip_state = BGE_CHIP_ERROR; 2611865Sdilpreet bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE); 2621369Sdduvall return (NULL); 2631369Sdduvall } 2641369Sdduvall 2651369Sdduvall /* 2661369Sdduvall * Accept the packets received in the specified ring up to 2671369Sdduvall * (but not including) the producer index in the status block. 2681369Sdduvall * 2691369Sdduvall * Returns a chain of mblks containing the received data, to be 2701369Sdduvall * passed up to gld_recv() (we can't call gld_recv() from here, 2711369Sdduvall * 'cos we're holding the per-ring receive lock at this point). 2721369Sdduvall * 2731369Sdduvall * This function must advance (rrp->rx_next) and write it back to 2741369Sdduvall * the chip to indicate the packets it has accepted from the ring. 2751369Sdduvall */ 2761369Sdduvall static mblk_t *bge_receive_ring(bge_t *bgep, recv_ring_t *rrp); 2771369Sdduvall #pragma inline(bge_receive_ring) 2781369Sdduvall 2791369Sdduvall static mblk_t * 2801369Sdduvall bge_receive_ring(bge_t *bgep, recv_ring_t *rrp) 2811369Sdduvall { 2821369Sdduvall bge_rbd_t *hw_rbd_p; 2831369Sdduvall uint64_t slot; 2841369Sdduvall mblk_t *head; 2851369Sdduvall mblk_t **tail; 2861369Sdduvall mblk_t *mp; 2871369Sdduvall 2881369Sdduvall ASSERT(mutex_owned(rrp->rx_lock)); 2891369Sdduvall 2901369Sdduvall /* 2911369Sdduvall * Sync (all) the receive ring descriptors 2921369Sdduvall * before accepting the packets they describe 2931369Sdduvall */ 2941369Sdduvall DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL); 2951865Sdilpreet if (*rrp->prod_index_p >= rrp->desc.nslots) { 2961865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR; 2971865Sdilpreet bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE); 2981865Sdilpreet return (NULL); 2991865Sdilpreet } 3001865Sdilpreet if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) { 3011865Sdilpreet rrp->rx_next = *rrp->prod_index_p; 3021865Sdilpreet bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next); 3031865Sdilpreet bgep->bge_dma_error = B_TRUE; 3041865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR; 3051865Sdilpreet return (NULL); 3061865Sdilpreet } 3071865Sdilpreet 3081369Sdduvall hw_rbd_p = DMA_VPTR(rrp->desc); 3091369Sdduvall head = NULL; 3101369Sdduvall tail = &head; 3111369Sdduvall slot = rrp->rx_next; 3121369Sdduvall 3131369Sdduvall while (slot != *rrp->prod_index_p) { /* Note: volatile */ 3141369Sdduvall if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot])) != NULL) { 3151369Sdduvall *tail = mp; 3161369Sdduvall tail = &mp->b_next; 3171369Sdduvall } 3181369Sdduvall rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots); 3191369Sdduvall } 3201369Sdduvall 3211369Sdduvall bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next); 3221865Sdilpreet if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3231865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR; 3241369Sdduvall return (head); 3251369Sdduvall } 3261369Sdduvall 3271369Sdduvall /* 3281369Sdduvall * Receive all packets in all rings. 3291369Sdduvall * 3301369Sdduvall * To give priority to low-numbered rings, whenever we have received any 3311369Sdduvall * packets in any ring except 0, we restart scanning again from ring 0. 3321369Sdduvall * Thus, for example, if rings 0, 3, and 10 are carrying traffic, the 3331369Sdduvall * pattern of receives might go 0, 3, 10, 3, 0, 10, 0: 3341369Sdduvall * 3351369Sdduvall * 0 found some - receive them 3361369Sdduvall * 1..2 none found 3371369Sdduvall * 3 found some - receive them and restart scan 3381369Sdduvall * 0..9 none found 3391369Sdduvall * 10 found some - receive them and restart scan 3401369Sdduvall * 0..2 none found 3411369Sdduvall * 3 found some more - receive them and restart scan 3421369Sdduvall * 0 found some more - receive them 3431369Sdduvall * 1..9 none found 3441369Sdduvall * 10 found some more - receive them and restart scan 3451369Sdduvall * 0 found some more - receive them 3461369Sdduvall * 1..15 none found 3471369Sdduvall * 3481369Sdduvall * The routine returns only when a complete scan has been performed 3491369Sdduvall * without finding any packets to receive. 3501369Sdduvall * 3511369Sdduvall * Note that driver-defined locks may *NOT* be held across calls 3521369Sdduvall * to gld_recv(). 3531369Sdduvall * 3541369Sdduvall * Note: the expression (BGE_RECV_RINGS_USED > 1), yields a compile-time 3551369Sdduvall * constant and allows the compiler to optimise away the outer do-loop 3561369Sdduvall * if only one receive ring is being used. 3571369Sdduvall */ 3581369Sdduvall void bge_receive(bge_t *bgep, bge_status_t *bsp); 3591369Sdduvall #pragma no_inline(bge_receive) 3601369Sdduvall 3611369Sdduvall void 3621369Sdduvall bge_receive(bge_t *bgep, bge_status_t *bsp) 3631369Sdduvall { 3641369Sdduvall recv_ring_t *rrp; 3651369Sdduvall uint64_t ring; 3661369Sdduvall uint64_t rx_rings = bgep->chipid.rx_rings; 3671369Sdduvall mblk_t *mp; 3681369Sdduvall 3691369Sdduvall restart: 3701369Sdduvall ring = 0; 3711369Sdduvall rrp = &bgep->recv[ring]; 3721369Sdduvall do { 3731369Sdduvall /* 3741369Sdduvall * For each ring, (rrp->prod_index_p) points to the 3751369Sdduvall * proper index within the status block (which has 3761369Sdduvall * already been sync'd by the caller) 3771369Sdduvall */ 3781369Sdduvall ASSERT(rrp->prod_index_p == RECV_INDEX_P(bsp, ring)); 3791369Sdduvall 3801369Sdduvall if (*rrp->prod_index_p == rrp->rx_next) 3811369Sdduvall continue; /* no packets */ 3821369Sdduvall if (mutex_tryenter(rrp->rx_lock) == 0) 3831369Sdduvall continue; /* already in process */ 3841369Sdduvall mp = bge_receive_ring(bgep, rrp); 3851369Sdduvall mutex_exit(rrp->rx_lock); 3861369Sdduvall 3871369Sdduvall if (mp != NULL) { 3882311Sseb mac_rx(bgep->mh, rrp->handle, mp); 3891369Sdduvall 3901369Sdduvall /* 3911369Sdduvall * Restart from ring 0, if the driver is compiled 3921369Sdduvall * with multiple rings and we're not on ring 0 now 3931369Sdduvall */ 3941369Sdduvall if (rx_rings > 1 && ring > 0) 3951369Sdduvall goto restart; 3961369Sdduvall } 3971369Sdduvall 3981369Sdduvall /* 3991369Sdduvall * Loop over all rings (if there *are* multiple rings) 4001369Sdduvall */ 4011369Sdduvall } while (++rrp, ++ring < rx_rings); 4021369Sdduvall } 403