11369Sdduvall /*
21369Sdduvall * CDDL HEADER START
31369Sdduvall *
41369Sdduvall * The contents of this file are subject to the terms of the
51369Sdduvall * Common Development and Distribution License (the "License").
61369Sdduvall * You may not use this file except in compliance with the License.
71369Sdduvall *
81369Sdduvall * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91369Sdduvall * or http://www.opensolaris.org/os/licensing.
101369Sdduvall * See the License for the specific language governing permissions
111369Sdduvall * and limitations under the License.
121369Sdduvall *
131369Sdduvall * When distributing Covered Code, include this CDDL HEADER in each
141369Sdduvall * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151369Sdduvall * If applicable, add the following below this CDDL HEADER, with the
161369Sdduvall * fields enclosed by brackets "[]" replaced with your own identifying
171369Sdduvall * information: Portions Copyright [yyyy] [name of copyright owner]
181369Sdduvall *
191369Sdduvall * CDDL HEADER END
201369Sdduvall */
211369Sdduvall
221369Sdduvall /*
23*11878SVenu.Iyer@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
241369Sdduvall * Use is subject to license terms.
251369Sdduvall */
261369Sdduvall
272675Szh199473 #include "bge_impl.h"
281369Sdduvall
291369Sdduvall #define U32TOPTR(x) ((void *)(uintptr_t)(uint32_t)(x))
301369Sdduvall #define PTRTOU32(x) ((uint32_t)(uintptr_t)(void *)(x))
311369Sdduvall
321369Sdduvall /*
331369Sdduvall * ========== RX side routines ==========
341369Sdduvall */
351369Sdduvall
361369Sdduvall #define BGE_DBG BGE_DBG_RECV /* debug flag for this code */
371369Sdduvall
381369Sdduvall static void bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp);
391369Sdduvall #pragma inline(bge_refill)
401369Sdduvall
411369Sdduvall /*
421369Sdduvall * Return the specified buffer (srbdp) to the ring it came from (brp).
431369Sdduvall *
441369Sdduvall * Note:
451369Sdduvall * If the driver is compiled with only one buffer ring *and* one
461369Sdduvall * return ring, then the buffers must be returned in sequence.
471369Sdduvall * In this case, we don't have to consider anything about the
481369Sdduvall * buffer at all; we can simply advance the cyclic counter. And
491369Sdduvall * we don't even need the refill mutex <rf_lock>, as the caller
501369Sdduvall * will already be holding the (one-and-only) <rx_lock>.
511369Sdduvall *
521369Sdduvall * If the driver supports multiple buffer rings, but only one
531369Sdduvall * return ring, the same still applies (to each buffer ring
541369Sdduvall * separately).
551369Sdduvall */
561369Sdduvall static void
bge_refill(bge_t * bgep,buff_ring_t * brp,sw_rbd_t * srbdp)571369Sdduvall bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp)
581369Sdduvall {
591369Sdduvall uint64_t slot;
601369Sdduvall
611369Sdduvall _NOTE(ARGUNUSED(srbdp))
621369Sdduvall
631369Sdduvall slot = brp->rf_next;
641369Sdduvall brp->rf_next = NEXT(slot, brp->desc.nslots);
651369Sdduvall bge_mbx_put(bgep, brp->chip_mbx_reg, slot);
661369Sdduvall }
671369Sdduvall
68*11878SVenu.Iyer@Sun.COM static mblk_t *bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p,
69*11878SVenu.Iyer@Sun.COM recv_ring_t *rrp);
701369Sdduvall #pragma inline(bge_receive_packet)
711369Sdduvall
721369Sdduvall static mblk_t *
bge_receive_packet(bge_t * bgep,bge_rbd_t * hw_rbd_p,recv_ring_t * rrp)73*11878SVenu.Iyer@Sun.COM bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p, recv_ring_t *rrp)
741369Sdduvall {
751369Sdduvall bge_rbd_t hw_rbd;
761369Sdduvall buff_ring_t *brp;
771369Sdduvall sw_rbd_t *srbdp;
781369Sdduvall uchar_t *dp;
791369Sdduvall mblk_t *mp;
801369Sdduvall uint_t len;
811369Sdduvall uint_t minsize;
821369Sdduvall uint_t maxsize;
831369Sdduvall uint32_t pflags;
841369Sdduvall
851369Sdduvall mp = NULL;
861369Sdduvall hw_rbd = *hw_rbd_p;
871369Sdduvall
881369Sdduvall switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) {
891369Sdduvall case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING:
901369Sdduvall default:
911369Sdduvall /* error, this shouldn't happen */
921369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!"));
931369Sdduvall goto error;
941369Sdduvall
951369Sdduvall case RBD_FLAG_JUMBO_RING:
961369Sdduvall brp = &bgep->buff[BGE_JUMBO_BUFF_RING];
971369Sdduvall break;
981369Sdduvall
991369Sdduvall #if (BGE_BUFF_RINGS_USED > 2)
1001369Sdduvall case RBD_FLAG_MINI_RING:
1011369Sdduvall brp = &bgep->buff[BGE_MINI_BUFF_RING];
1021369Sdduvall break;
1031369Sdduvall #endif /* BGE_BUFF_RINGS_USED > 2 */
1041369Sdduvall
1051369Sdduvall case 0:
1061369Sdduvall brp = &bgep->buff[BGE_STD_BUFF_RING];
1071369Sdduvall break;
1081369Sdduvall }
1091369Sdduvall
1101369Sdduvall if (hw_rbd.index >= brp->desc.nslots) {
1111369Sdduvall /* error, this shouldn't happen */
1121369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!"));
1131369Sdduvall goto error;
1141369Sdduvall }
1151369Sdduvall
1161369Sdduvall srbdp = &brp->sw_rbds[hw_rbd.index];
1171369Sdduvall if (hw_rbd.opaque != srbdp->pbuf.token) {
1181369Sdduvall /* bogus, drop the packet */
1191369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token"));
1201369Sdduvall goto refill;
1211369Sdduvall }
1221369Sdduvall
1231369Sdduvall if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) {
1241369Sdduvall /* bogus, drop the packet */
1251369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet"));
1261369Sdduvall goto refill;
1271369Sdduvall }
1281369Sdduvall
1291369Sdduvall if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) {
1301369Sdduvall /* bogus, drop the packet */
1311369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet"));
1321369Sdduvall goto refill;
1331369Sdduvall }
1341369Sdduvall
1351369Sdduvall len = hw_rbd.len;
1361369Sdduvall
1371408Srandyf #ifdef BGE_IPMI_ASF
1381369Sdduvall /*
1391408Srandyf * When IPMI/ASF is enabled, VLAN tag must be stripped.
1401369Sdduvall */
1411408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
1421408Srandyf maxsize = bgep->chipid.ethmax_size + ETHERFCSL;
1431408Srandyf else
1441408Srandyf #endif
1451408Srandyf /*
1461408Srandyf * H/W will not strip the VLAN tag from incoming packet
1471408Srandyf * now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in
1481408Srandyf * RECEIVE_MAC_MODE_REG register.
1491408Srandyf */
1501408Srandyf maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL;
1511369Sdduvall if (len > maxsize) {
1521369Sdduvall /* bogus, drop the packet */
1531369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet"));
1541369Sdduvall goto refill;
1551369Sdduvall }
1561369Sdduvall
1571408Srandyf #ifdef BGE_IPMI_ASF
1581408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
1591408Srandyf minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ;
1601408Srandyf else
1611408Srandyf #endif
1621408Srandyf minsize = ETHERMIN + ETHERFCSL;
1631369Sdduvall if (len < minsize) {
1641369Sdduvall /* bogus, drop the packet */
1651369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet"));
1661369Sdduvall goto refill;
1671369Sdduvall }
1681369Sdduvall
1691369Sdduvall /*
1701369Sdduvall * Packet looks good; get a buffer to copy it into.
1711369Sdduvall * We want to leave some space at the front of the allocated
1721369Sdduvall * buffer in case any upstream modules want to prepend some
1731369Sdduvall * sort of header. This also has the side-effect of making
1741369Sdduvall * the packet *contents* 4-byte aligned, as required by NCA!
1751369Sdduvall */
1761408Srandyf #ifdef BGE_IPMI_ASF
1771408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
1781408Srandyf mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0);
1791408Srandyf } else {
1801408Srandyf #endif
1811408Srandyf
1821408Srandyf mp = allocb(BGE_HEADROOM + len, 0);
1831408Srandyf #ifdef BGE_IPMI_ASF
1841408Srandyf }
1851408Srandyf #endif
1861369Sdduvall if (mp == NULL) {
1871369Sdduvall /* Nothing to do but drop the packet */
1881369Sdduvall goto refill;
1891369Sdduvall }
1901369Sdduvall
1911369Sdduvall /*
1921369Sdduvall * Sync the data and copy it to the STREAMS buffer.
1931369Sdduvall */
1941369Sdduvall DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL);
1951865Sdilpreet if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) {
1961865Sdilpreet bgep->bge_dma_error = B_TRUE;
1971865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR;
1981865Sdilpreet return (NULL);
1991865Sdilpreet }
2001408Srandyf #ifdef BGE_IPMI_ASF
2011408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
2021408Srandyf /*
2031408Srandyf * As VLAN tag has been stripped from incoming packet in ASF
2041408Srandyf * scenario, we insert it into this packet again.
2051408Srandyf */
2061408Srandyf struct ether_vlan_header *ehp;
2071408Srandyf mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ;
2081408Srandyf bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL);
2097099Syt223700 ehp = (void *)dp;
2102760Sdg199075 ehp->ether_tpid = ntohs(ETHERTYPE_VLAN);
2111408Srandyf ehp->ether_tci = ntohs(hw_rbd.vlan_tci);
2121408Srandyf bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL,
2135903Ssowmini dp + 2 * ETHERADDRL + VLAN_TAGSZ,
2145903Ssowmini len - 2 * ETHERADDRL);
2151408Srandyf } else {
2161408Srandyf #endif
2171408Srandyf mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM;
2181408Srandyf bcopy(DMA_VPTR(srbdp->pbuf), dp, len);
2191408Srandyf #ifdef BGE_IPMI_ASF
2201408Srandyf }
2211408Srandyf
2221408Srandyf if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
2231408Srandyf mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL;
2241408Srandyf } else
2251408Srandyf #endif
2261408Srandyf mp->b_wptr = dp + len - ETHERFCSL;
2271369Sdduvall
2281369Sdduvall /*
2291369Sdduvall * Special check for one specific type of data corruption;
2301369Sdduvall * in a good packet, the first 8 bytes are *very* unlikely
2311369Sdduvall * to be the same as the second 8 bytes ... but we let the
2321369Sdduvall * packet through just in case.
2331369Sdduvall */
2341369Sdduvall if (bcmp(dp, dp+8, 8) == 0)
2351369Sdduvall BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?"));
2361369Sdduvall
2371369Sdduvall pflags = 0;
2381369Sdduvall if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM)
2391369Sdduvall pflags |= HCK_FULLCKSUM;
2401369Sdduvall if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM)
241*11878SVenu.Iyer@Sun.COM pflags |= HCK_IPV4_HDRCKSUM_OK;
2421369Sdduvall if (pflags != 0)
243*11878SVenu.Iyer@Sun.COM mac_hcksum_set(mp, 0, 0, 0, hw_rbd.tcp_udp_cksum, pflags);
244*11878SVenu.Iyer@Sun.COM
245*11878SVenu.Iyer@Sun.COM /* Update per-ring rx statistics */
246*11878SVenu.Iyer@Sun.COM rrp->rx_pkts++;
247*11878SVenu.Iyer@Sun.COM rrp->rx_bytes += len;
2481369Sdduvall
2491369Sdduvall refill:
2501369Sdduvall /*
2511369Sdduvall * Replace the buffer in the ring it came from ...
2521369Sdduvall */
2531369Sdduvall bge_refill(bgep, brp, srbdp);
2541369Sdduvall return (mp);
2551369Sdduvall
2561369Sdduvall error:
2571369Sdduvall /*
2581369Sdduvall * We come here if the integrity of the ring descriptors
2591369Sdduvall * (rather than merely packet data) appears corrupted.
2601369Sdduvall * The factotum will attempt to reset-and-recover.
2611369Sdduvall */
2621369Sdduvall bgep->bge_chip_state = BGE_CHIP_ERROR;
2631865Sdilpreet bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
2641369Sdduvall return (NULL);
2651369Sdduvall }
2661369Sdduvall
2671369Sdduvall /*
2681369Sdduvall * Accept the packets received in the specified ring up to
2691369Sdduvall * (but not including) the producer index in the status block.
2701369Sdduvall *
2711369Sdduvall * Returns a chain of mblks containing the received data, to be
2721369Sdduvall * passed up to gld_recv() (we can't call gld_recv() from here,
2731369Sdduvall * 'cos we're holding the per-ring receive lock at this point).
2741369Sdduvall *
2751369Sdduvall * This function must advance (rrp->rx_next) and write it back to
2761369Sdduvall * the chip to indicate the packets it has accepted from the ring.
2771369Sdduvall */
2781369Sdduvall static mblk_t *bge_receive_ring(bge_t *bgep, recv_ring_t *rrp);
2798275SEric Cheng #ifndef DEBUG
2801369Sdduvall #pragma inline(bge_receive_ring)
2818275SEric Cheng #endif
2821369Sdduvall
2831369Sdduvall static mblk_t *
bge_receive_ring(bge_t * bgep,recv_ring_t * rrp)2841369Sdduvall bge_receive_ring(bge_t *bgep, recv_ring_t *rrp)
2851369Sdduvall {
2861369Sdduvall bge_rbd_t *hw_rbd_p;
2871369Sdduvall uint64_t slot;
2881369Sdduvall mblk_t *head;
2891369Sdduvall mblk_t **tail;
2901369Sdduvall mblk_t *mp;
2913918Sml149210 int recv_cnt = 0;
2921369Sdduvall
2931369Sdduvall ASSERT(mutex_owned(rrp->rx_lock));
2941369Sdduvall
2951369Sdduvall /*
2961369Sdduvall * Sync (all) the receive ring descriptors
2971369Sdduvall * before accepting the packets they describe
2981369Sdduvall */
2991369Sdduvall DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
3001865Sdilpreet if (*rrp->prod_index_p >= rrp->desc.nslots) {
3011865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR;
3021865Sdilpreet bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
3031865Sdilpreet return (NULL);
3041865Sdilpreet }
3051865Sdilpreet if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
3061865Sdilpreet rrp->rx_next = *rrp->prod_index_p;
3071865Sdilpreet bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
3081865Sdilpreet bgep->bge_dma_error = B_TRUE;
3091865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR;
3101865Sdilpreet return (NULL);
3111865Sdilpreet }
3121865Sdilpreet
3131369Sdduvall hw_rbd_p = DMA_VPTR(rrp->desc);
3141369Sdduvall head = NULL;
3151369Sdduvall tail = &head;
3161369Sdduvall slot = rrp->rx_next;
3171369Sdduvall
3183918Sml149210 while ((slot != *rrp->prod_index_p) && /* Note: volatile */
3195903Ssowmini (recv_cnt < BGE_MAXPKT_RCVED)) {
320*11878SVenu.Iyer@Sun.COM if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
321*11878SVenu.Iyer@Sun.COM != NULL) {
3221369Sdduvall *tail = mp;
3231369Sdduvall tail = &mp->b_next;
3243918Sml149210 recv_cnt++;
3251369Sdduvall }
3261369Sdduvall rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
3271369Sdduvall }
3281369Sdduvall
3291369Sdduvall bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
3301865Sdilpreet if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
3311865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR;
3321369Sdduvall return (head);
3331369Sdduvall }
3341369Sdduvall
3351369Sdduvall /*
3368275SEric Cheng * XXX: Poll a particular ring. The implementation is incomplete.
3378275SEric Cheng * Once the ring interrupts are disabled, we need to do bge_recyle()
3388275SEric Cheng * for the ring as well and re enable the ring interrupt automatically
3398275SEric Cheng * if the poll doesn't find any packets in the ring. We need to
3408275SEric Cheng * have MSI-X interrupts support for this.
3411369Sdduvall *
3428275SEric Cheng * The basic poll policy is that rings that are dealing with explicit
3438275SEric Cheng * flows (like TCP or some service) and are marked as such should
3448275SEric Cheng * have their own MSI-X interrupt per ring. bge_intr() should leave
3458275SEric Cheng * that interrupt disabled after an upcall. The ring is in poll mode.
3468275SEric Cheng * When a poll thread comes down and finds nothing, the MSI-X interrupt
3478275SEric Cheng * is automatically enabled. Squeue needs to deal with the race of
3488275SEric Cheng * a new interrupt firing and reaching before poll thread returns.
3498275SEric Cheng */
3508275SEric Cheng mblk_t *
bge_poll_ring(void * arg,int bytes_to_pickup)3518275SEric Cheng bge_poll_ring(void *arg, int bytes_to_pickup)
3528275SEric Cheng {
3538275SEric Cheng recv_ring_t *rrp = arg;
3548275SEric Cheng bge_t *bgep = rrp->bgep;
3558275SEric Cheng bge_rbd_t *hw_rbd_p;
3568275SEric Cheng uint64_t slot;
3578275SEric Cheng mblk_t *head;
3588275SEric Cheng mblk_t **tail;
3598275SEric Cheng mblk_t *mp;
3608275SEric Cheng size_t sz = 0;
3618275SEric Cheng
3628275SEric Cheng mutex_enter(rrp->rx_lock);
3638275SEric Cheng
3648275SEric Cheng /*
3658275SEric Cheng * Sync (all) the receive ring descriptors
3668275SEric Cheng * before accepting the packets they describe
3678275SEric Cheng */
3688275SEric Cheng DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
36911236SStephen.Hanson@Sun.COM if (*rrp->prod_index_p >= rrp->desc.nslots) {
37011236SStephen.Hanson@Sun.COM bgep->bge_chip_state = BGE_CHIP_ERROR;
37111236SStephen.Hanson@Sun.COM bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
37211236SStephen.Hanson@Sun.COM mutex_exit(rrp->rx_lock);
37311236SStephen.Hanson@Sun.COM return (NULL);
37411236SStephen.Hanson@Sun.COM }
37511236SStephen.Hanson@Sun.COM if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
37611236SStephen.Hanson@Sun.COM rrp->rx_next = *rrp->prod_index_p;
37711236SStephen.Hanson@Sun.COM bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
37811236SStephen.Hanson@Sun.COM bgep->bge_dma_error = B_TRUE;
37911236SStephen.Hanson@Sun.COM bgep->bge_chip_state = BGE_CHIP_ERROR;
38011236SStephen.Hanson@Sun.COM mutex_exit(rrp->rx_lock);
38111236SStephen.Hanson@Sun.COM return (NULL);
38211236SStephen.Hanson@Sun.COM }
38311236SStephen.Hanson@Sun.COM
3848275SEric Cheng hw_rbd_p = DMA_VPTR(rrp->desc);
3858275SEric Cheng head = NULL;
3868275SEric Cheng tail = &head;
3878275SEric Cheng slot = rrp->rx_next;
3888275SEric Cheng
3898275SEric Cheng /* Note: volatile */
3908275SEric Cheng while ((slot != *rrp->prod_index_p) && (sz <= bytes_to_pickup)) {
391*11878SVenu.Iyer@Sun.COM if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
392*11878SVenu.Iyer@Sun.COM != NULL) {
3938275SEric Cheng *tail = mp;
3948275SEric Cheng sz += msgdsize(mp);
3958275SEric Cheng tail = &mp->b_next;
3968275SEric Cheng }
3978275SEric Cheng rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
3988275SEric Cheng }
3998275SEric Cheng
4008275SEric Cheng bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
40111236SStephen.Hanson@Sun.COM if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
40211236SStephen.Hanson@Sun.COM bgep->bge_chip_state = BGE_CHIP_ERROR;
4038275SEric Cheng mutex_exit(rrp->rx_lock);
4048275SEric Cheng return (head);
4058275SEric Cheng }
4068275SEric Cheng
4078275SEric Cheng /*
4088275SEric Cheng * Receive all packets in all rings.
4091369Sdduvall */
4101369Sdduvall void bge_receive(bge_t *bgep, bge_status_t *bsp);
4111369Sdduvall #pragma no_inline(bge_receive)
4121369Sdduvall
4131369Sdduvall void
bge_receive(bge_t * bgep,bge_status_t * bsp)4141369Sdduvall bge_receive(bge_t *bgep, bge_status_t *bsp)
4151369Sdduvall {
4161369Sdduvall recv_ring_t *rrp;
4178275SEric Cheng uint64_t index;
4181369Sdduvall mblk_t *mp;
4191369Sdduvall
4208275SEric Cheng for (index = 0; index < bgep->chipid.rx_rings; index++) {
4218275SEric Cheng /*
4228275SEric Cheng * Start from the first ring.
4238275SEric Cheng */
4248275SEric Cheng rrp = &bgep->recv[index];
4258275SEric Cheng
4261369Sdduvall /*
4271369Sdduvall * For each ring, (rrp->prod_index_p) points to the
4281369Sdduvall * proper index within the status block (which has
4291369Sdduvall * already been sync'd by the caller)
4301369Sdduvall */
4318275SEric Cheng ASSERT(rrp->prod_index_p == RECV_INDEX_P(bsp, index));
4321369Sdduvall
4338275SEric Cheng if (*rrp->prod_index_p == rrp->rx_next || rrp->poll_flag)
4341369Sdduvall continue; /* no packets */
4351369Sdduvall if (mutex_tryenter(rrp->rx_lock) == 0)
4361369Sdduvall continue; /* already in process */
4371369Sdduvall mp = bge_receive_ring(bgep, rrp);
4381369Sdduvall mutex_exit(rrp->rx_lock);
4391369Sdduvall
4408275SEric Cheng if (mp != NULL)
4418275SEric Cheng mac_rx_ring(bgep->mh, rrp->ring_handle, mp,
4428275SEric Cheng rrp->ring_gen_num);
4438275SEric Cheng }
4441369Sdduvall }
445