10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 230Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include "sys/bge_impl.h" 300Sstevel@tonic-gate 310Sstevel@tonic-gate 320Sstevel@tonic-gate /* 330Sstevel@tonic-gate * The transmit-side code uses an allocation process which is similar 340Sstevel@tonic-gate * to some theme park roller-coaster rides, where riders sit in cars 350Sstevel@tonic-gate * that can go individually, but work better in a train. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * 1) RESERVE a place - this doesn't refer to any specific car or 380Sstevel@tonic-gate * seat, just that you will get a ride. The attempt to RESERVE a 390Sstevel@tonic-gate * place can fail if all spaces in all cars are already committed. 400Sstevel@tonic-gate * 410Sstevel@tonic-gate * 2) Prepare yourself; this may take an arbitrary (but not unbounded) 420Sstevel@tonic-gate * time, and you can back out at this stage, in which case you must 430Sstevel@tonic-gate * give up (RENOUNCE) your place. 440Sstevel@tonic-gate * 450Sstevel@tonic-gate * 3) CLAIM your space - a specific car (the next sequentially 460Sstevel@tonic-gate * numbered one) is allocated at this stage, and is guaranteed 470Sstevel@tonic-gate * to be part of the next train to depart. Once you've done 480Sstevel@tonic-gate * this, you can't back out, nor wait for any external event 490Sstevel@tonic-gate * or resource. 500Sstevel@tonic-gate * 510Sstevel@tonic-gate * 4) Occupy your car - when all CLAIMED cars are OCCUPIED, they 520Sstevel@tonic-gate * all depart together as a single train! 530Sstevel@tonic-gate * 540Sstevel@tonic-gate * 5) At the end of the ride, you climb out of the car and RENOUNCE 550Sstevel@tonic-gate * your right to it, so that it can be recycled for another rider. 560Sstevel@tonic-gate * 570Sstevel@tonic-gate * For each rider, these have to occur in this order, but the riders 580Sstevel@tonic-gate * don't have to stay in the same order at each stage. In particular, 590Sstevel@tonic-gate * they may overtake each other between RESERVING a place and CLAIMING 600Sstevel@tonic-gate * it, or between CLAIMING and OCCUPYING a space. 610Sstevel@tonic-gate * 620Sstevel@tonic-gate * Once a car is CLAIMED, the train currently being assembled can't go 630Sstevel@tonic-gate * without that car (this guarantees that the cars in a single train 640Sstevel@tonic-gate * make up a consecutively-numbered set). Therefore, when any train 650Sstevel@tonic-gate * leaves, we know there can't be any riders in transit between CLAIMING 660Sstevel@tonic-gate * and OCCUPYING their cars. There can be some who have RESERVED but 670Sstevel@tonic-gate * not yet CLAIMED their places. That's OK, though, because they'll go 680Sstevel@tonic-gate * into the next train. 690Sstevel@tonic-gate */ 700Sstevel@tonic-gate 710Sstevel@tonic-gate #define BGE_DBG BGE_DBG_SEND /* debug flag for this code */ 720Sstevel@tonic-gate 730Sstevel@tonic-gate 740Sstevel@tonic-gate /* 750Sstevel@tonic-gate * ========== Send-side recycle routines ========== 760Sstevel@tonic-gate */ 770Sstevel@tonic-gate 780Sstevel@tonic-gate /* 790Sstevel@tonic-gate * Recycle all the completed buffers in the specified send ring up to 800Sstevel@tonic-gate * (but not including) the consumer index in the status block. 810Sstevel@tonic-gate * 820Sstevel@tonic-gate * This function must advance (srp->tc_next) AND adjust (srp->tx_free) 830Sstevel@tonic-gate * to account for the packets it has recycled. 840Sstevel@tonic-gate * 850Sstevel@tonic-gate * This is a trivial version that just does that and nothing more, but 860Sstevel@tonic-gate * it suffices while there's only one method for sending messages (by 870Sstevel@tonic-gate * copying) and that method doesn't need any special per-buffer action 880Sstevel@tonic-gate * for recycling. 890Sstevel@tonic-gate */ 900Sstevel@tonic-gate static void bge_recycle_ring(bge_t *bgep, send_ring_t *srp); 910Sstevel@tonic-gate #pragma inline(bge_recycle_ring) 920Sstevel@tonic-gate 930Sstevel@tonic-gate static void 940Sstevel@tonic-gate bge_recycle_ring(bge_t *bgep, send_ring_t *srp) 950Sstevel@tonic-gate { 960Sstevel@tonic-gate uint64_t slot; 970Sstevel@tonic-gate uint64_t n; 980Sstevel@tonic-gate 990Sstevel@tonic-gate _NOTE(ARGUNUSED(bgep)) 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate ASSERT(mutex_owned(srp->tc_lock)); 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate slot = *srp->cons_index_p; /* volatile */ 1040Sstevel@tonic-gate n = slot - srp->tc_next; 1050Sstevel@tonic-gate if (slot < srp->tc_next) 1060Sstevel@tonic-gate n += srp->desc.nslots; 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate /* 1090Sstevel@tonic-gate * We're about to release one or more places :-) 1100Sstevel@tonic-gate * These ASSERTions check that our invariants still hold: 1110Sstevel@tonic-gate * there must always be at least one free place 1120Sstevel@tonic-gate * at this point, there must be at least one place NOT free 1130Sstevel@tonic-gate * we're not about to free more places than were claimed! 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate ASSERT(srp->tx_free > 0); 1160Sstevel@tonic-gate ASSERT(srp->tx_free < srp->desc.nslots); 1170Sstevel@tonic-gate ASSERT(srp->tx_free + n <= srp->desc.nslots); 1180Sstevel@tonic-gate 1190Sstevel@tonic-gate srp->tc_next = slot; 1200Sstevel@tonic-gate bge_atomic_renounce(&srp->tx_free, n); 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate /* 1230Sstevel@tonic-gate * Reset the watchdog count: to 0 if all buffers are 1240Sstevel@tonic-gate * now free, or to 1 if some are still outstanding. 1250Sstevel@tonic-gate * Note: non-synchonised access here means we may get 1260Sstevel@tonic-gate * the "wrong" answer, but only in a harmless fashion 1270Sstevel@tonic-gate * (i.e. we deactivate the watchdog because all buffers 1280Sstevel@tonic-gate * are apparently free, even though another thread may 1290Sstevel@tonic-gate * have claimed one before we leave here; in this case 1300Sstevel@tonic-gate * the watchdog will restart on the next send() call). 1310Sstevel@tonic-gate */ 1320Sstevel@tonic-gate bgep->watchdog = srp->tx_free == srp->desc.nslots ? 0 : 1; 1330Sstevel@tonic-gate } 1340Sstevel@tonic-gate 1350Sstevel@tonic-gate /* 1360Sstevel@tonic-gate * Recycle all returned slots in all rings. 1370Sstevel@tonic-gate * 1380Sstevel@tonic-gate * To give priority to low-numbered rings, whenever we have recycled any 1390Sstevel@tonic-gate * slots in any ring except 0, we restart scanning again from ring 0. 1400Sstevel@tonic-gate * Thus, for example, if rings 0, 3, and 10 are carrying traffic, the 1410Sstevel@tonic-gate * pattern of recycles might go 0, 3, 10, 3, 0, 10, 0: 1420Sstevel@tonic-gate * 1430Sstevel@tonic-gate * 0 found some - recycle them 1440Sstevel@tonic-gate * 1..2 none found 1450Sstevel@tonic-gate * 3 found some - recycle them and restart scan 1460Sstevel@tonic-gate * 0..9 none found 1470Sstevel@tonic-gate * 10 found some - recycle them and restart scan 1480Sstevel@tonic-gate * 0..2 none found 1490Sstevel@tonic-gate * 3 found some more - recycle them and restart scan 1500Sstevel@tonic-gate * 0 found some more - recycle them 1510Sstevel@tonic-gate * 0..9 none found 1520Sstevel@tonic-gate * 10 found some more - recycle them and restart scan 1530Sstevel@tonic-gate * 0 found some more - recycle them 1540Sstevel@tonic-gate * 1..15 none found 1550Sstevel@tonic-gate * 1560Sstevel@tonic-gate * The routine returns only when a complete scan has been performed 1570Sstevel@tonic-gate * without finding any slots to recycle. 1580Sstevel@tonic-gate * 1590Sstevel@tonic-gate * Note: the expression (BGE_SEND_RINGS_USED > 1) yields a compile-time 1600Sstevel@tonic-gate * constant and allows the compiler to optimise away the outer do-loop 1610Sstevel@tonic-gate * if only one send ring is being used. 1620Sstevel@tonic-gate */ 1630Sstevel@tonic-gate void bge_recycle(bge_t *bgep, bge_status_t *bsp); 1640Sstevel@tonic-gate #pragma no_inline(bge_recycle) 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate void 1670Sstevel@tonic-gate bge_recycle(bge_t *bgep, bge_status_t *bsp) 1680Sstevel@tonic-gate { 1690Sstevel@tonic-gate send_ring_t *srp; 1700Sstevel@tonic-gate uint64_t ring; 1710Sstevel@tonic-gate uint64_t tx_rings = bgep->chipid.tx_rings; 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate restart: 1740Sstevel@tonic-gate ring = 0; 1750Sstevel@tonic-gate srp = &bgep->send[ring]; 1760Sstevel@tonic-gate do { 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * For each ring, (srp->cons_index_p) points to the 1790Sstevel@tonic-gate * proper index within the status block (which has 1800Sstevel@tonic-gate * already been sync'd by the caller). 1810Sstevel@tonic-gate */ 1820Sstevel@tonic-gate ASSERT(srp->cons_index_p == SEND_INDEX_P(bsp, ring)); 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate if (*srp->cons_index_p == srp->tc_next) 1850Sstevel@tonic-gate continue; /* no slots to recycle */ 1860Sstevel@tonic-gate if (mutex_tryenter(srp->tc_lock) == 0) 1870Sstevel@tonic-gate continue; /* already in process */ 1880Sstevel@tonic-gate bge_recycle_ring(bgep, srp); 1890Sstevel@tonic-gate mutex_exit(srp->tc_lock); 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate if (bgep->resched_needed) 1920Sstevel@tonic-gate ddi_trigger_softintr(bgep->resched_id); 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate /* 1950Sstevel@tonic-gate * Restart from ring 0, if we're not on ring 0 already. 1960Sstevel@tonic-gate * As H/W selects send BDs totally based on priority and 1970Sstevel@tonic-gate * available BDs on the higher priority ring are always 1980Sstevel@tonic-gate * selected first, driver should keep consistence with H/W 1990Sstevel@tonic-gate * and gives lower-numbered ring with higher priority. 2000Sstevel@tonic-gate */ 2010Sstevel@tonic-gate if (tx_rings > 1 && ring > 0) 2020Sstevel@tonic-gate goto restart; 2030Sstevel@tonic-gate 2040Sstevel@tonic-gate /* 2050Sstevel@tonic-gate * Loop over all rings (if there *are* multiple rings) 2060Sstevel@tonic-gate */ 2070Sstevel@tonic-gate } while (++srp, ++ring < tx_rings); 2080Sstevel@tonic-gate } 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate 2110Sstevel@tonic-gate /* 2120Sstevel@tonic-gate * ========== Send-side transmit routines ========== 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate 2150Sstevel@tonic-gate /* 2160Sstevel@tonic-gate * CLAIM an already-reserved place on the next train 2170Sstevel@tonic-gate * 2180Sstevel@tonic-gate * This is the point of no return! 2190Sstevel@tonic-gate */ 2200Sstevel@tonic-gate static uint64_t bge_send_claim(bge_t *bgep, send_ring_t *srp); 2210Sstevel@tonic-gate #pragma inline(bge_send_claim) 2220Sstevel@tonic-gate 2230Sstevel@tonic-gate static uint64_t 2240Sstevel@tonic-gate bge_send_claim(bge_t *bgep, send_ring_t *srp) 2250Sstevel@tonic-gate { 2260Sstevel@tonic-gate uint64_t slot; 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate rw_enter(srp->tx_lock, RW_READER); 2290Sstevel@tonic-gate atomic_add_64(&srp->tx_flow, 1); 2300Sstevel@tonic-gate slot = bge_atomic_claim(&srp->tx_next, srp->desc.nslots); 2310Sstevel@tonic-gate rw_exit(srp->tx_lock); 2320Sstevel@tonic-gate 2330Sstevel@tonic-gate /* 2340Sstevel@tonic-gate * Bump the watchdog counter, thus guaranteeing that it's 2350Sstevel@tonic-gate * nonzero (watchdog activated). Note that non-synchonised 2360Sstevel@tonic-gate * access here means we may race with the reclaim() code 2370Sstevel@tonic-gate * above, but the outcome will be harmless. At worst, the 2380Sstevel@tonic-gate * counter may not get reset on a partial reclaim; but the 2390Sstevel@tonic-gate * large trigger threshold makes false positives unlikely 2400Sstevel@tonic-gate */ 2410Sstevel@tonic-gate bgep->watchdog += 1; 2420Sstevel@tonic-gate 2430Sstevel@tonic-gate return (slot); 2440Sstevel@tonic-gate } 2450Sstevel@tonic-gate 2460Sstevel@tonic-gate /* 2470Sstevel@tonic-gate * Send a message by copying it into a preallocated (and premapped) buffer 2480Sstevel@tonic-gate */ 2490Sstevel@tonic-gate static enum send_status bge_send_copy(bge_t *bgep, mblk_t *mp, 2500Sstevel@tonic-gate send_ring_t *srp, uint16_t tci); 2510Sstevel@tonic-gate #pragma inline(bge_send_copy) 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate static enum send_status 2540Sstevel@tonic-gate bge_send_copy(bge_t *bgep, mblk_t *mp, send_ring_t *srp, uint16_t tci) 2550Sstevel@tonic-gate { 2560Sstevel@tonic-gate bge_sbd_t *hw_sbd_p; 2570Sstevel@tonic-gate sw_sbd_t *ssbdp; 2580Sstevel@tonic-gate mblk_t *bp; 2590Sstevel@tonic-gate char *txb; 2600Sstevel@tonic-gate uint64_t slot; 2610Sstevel@tonic-gate size_t totlen; 2620Sstevel@tonic-gate size_t mblen; 2630Sstevel@tonic-gate uint32_t pflags; 2640Sstevel@tonic-gate 2650Sstevel@tonic-gate BGE_TRACE(("bge_send_copy($%p, $%p, $%p, 0x%x)", 2660Sstevel@tonic-gate (void *)bgep, (void *)mp, (void *)srp)); 2670Sstevel@tonic-gate 2680Sstevel@tonic-gate /* 2690Sstevel@tonic-gate * IMPORTANT: 2700Sstevel@tonic-gate * Up to the point where it claims a place, a send_msg() 2710Sstevel@tonic-gate * routine can indicate failure by returning SEND_FAIL. 2720Sstevel@tonic-gate * Once it's claimed a place, it mustn't fail. 2730Sstevel@tonic-gate * 2740Sstevel@tonic-gate * In this version, there's no setup to be done here, and there's 2750Sstevel@tonic-gate * nothing that can fail, so we can go straight to claiming our 2760Sstevel@tonic-gate * already-reserved place on the train. 2770Sstevel@tonic-gate * 2780Sstevel@tonic-gate * This is the point of no return! 2790Sstevel@tonic-gate */ 2800Sstevel@tonic-gate slot = bge_send_claim(bgep, srp); 2810Sstevel@tonic-gate ssbdp = &srp->sw_sbds[slot]; 2820Sstevel@tonic-gate 2830Sstevel@tonic-gate /* 2840Sstevel@tonic-gate * Copy the data into a pre-mapped buffer, which avoids the 2850Sstevel@tonic-gate * overhead (and complication) of mapping/unmapping STREAMS 2860Sstevel@tonic-gate * buffers and keeping hold of them until the DMA has completed. 2870Sstevel@tonic-gate * 2880Sstevel@tonic-gate * Because all buffers are the same size, and larger than the 2890Sstevel@tonic-gate * longest single valid message, we don't have to bother about 2900Sstevel@tonic-gate * splitting the message across multiple buffers either. 2910Sstevel@tonic-gate */ 2920Sstevel@tonic-gate txb = DMA_VPTR(ssbdp->pbuf); 2930Sstevel@tonic-gate for (totlen = 0, bp = mp; bp != NULL; bp = bp->b_cont) { 2940Sstevel@tonic-gate mblen = bp->b_wptr - bp->b_rptr; 2950Sstevel@tonic-gate if ((totlen += mblen) <= bgep->chipid.ethmax_size) { 2960Sstevel@tonic-gate bcopy(bp->b_rptr, txb, mblen); 2970Sstevel@tonic-gate txb += mblen; 2980Sstevel@tonic-gate } 2990Sstevel@tonic-gate } 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * We'e reached the end of the chain; and we should have 3030Sstevel@tonic-gate * collected no more than ETHERMAX bytes into our buffer. 3040Sstevel@tonic-gate */ 3050Sstevel@tonic-gate ASSERT(bp == NULL); 3060Sstevel@tonic-gate ASSERT(totlen <= bgep->chipid.ethmax_size); 3070Sstevel@tonic-gate DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV); 3080Sstevel@tonic-gate 3090Sstevel@tonic-gate /* 3100Sstevel@tonic-gate * Update the hardware send buffer descriptor; then we're done. 3110Sstevel@tonic-gate * The return status indicates that the message can be freed 3120Sstevel@tonic-gate * right away, as we've already copied the contents ... 3130Sstevel@tonic-gate */ 3140Sstevel@tonic-gate hw_sbd_p = DMA_VPTR(ssbdp->desc); 3150Sstevel@tonic-gate hw_sbd_p->host_buf_addr = ssbdp->pbuf.cookie.dmac_laddress; 3160Sstevel@tonic-gate hw_sbd_p->len = totlen; 3170Sstevel@tonic-gate hw_sbd_p->flags = SBD_FLAG_PACKET_END; 3180Sstevel@tonic-gate if (tci != 0) { 3190Sstevel@tonic-gate hw_sbd_p->vlan_tci = tci; 3200Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_VLAN_TAG; 3210Sstevel@tonic-gate } 3220Sstevel@tonic-gate 3230Sstevel@tonic-gate hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags); 3240Sstevel@tonic-gate if (pflags & HCK_IPV4_HDRCKSUM) 3250Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_IP_CKSUM; 3260Sstevel@tonic-gate if (pflags & HCK_FULLCKSUM) 3270Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_TCP_UDP_CKSUM; 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate return (SEND_FREE); 3300Sstevel@tonic-gate } 3310Sstevel@tonic-gate 3320Sstevel@tonic-gate static boolean_t 3330Sstevel@tonic-gate bge_send(bge_t *bgep, mblk_t *mp) 3340Sstevel@tonic-gate { 3350Sstevel@tonic-gate send_ring_t *srp; 3360Sstevel@tonic-gate enum send_status status; 3370Sstevel@tonic-gate struct ether_vlan_header *ehp; 3380Sstevel@tonic-gate boolean_t need_strip = B_FALSE; 3390Sstevel@tonic-gate uint16_t tci; 3400Sstevel@tonic-gate uint_t ring = 0; 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 3430Sstevel@tonic-gate 3440Sstevel@tonic-gate /* 3450Sstevel@tonic-gate * Determine if the packet is VLAN tagged. 3460Sstevel@tonic-gate */ 3470Sstevel@tonic-gate ASSERT(MBLKL(mp) >= sizeof (struct ether_header)); 3480Sstevel@tonic-gate ehp = (struct ether_vlan_header *)mp->b_rptr; 3490Sstevel@tonic-gate 3500Sstevel@tonic-gate if (ehp->ether_tpid == htons(VLAN_TPID)) { 3510Sstevel@tonic-gate if (MBLKL(mp) < sizeof (struct ether_vlan_header)) { 3520Sstevel@tonic-gate uint32_t pflags; 3530Sstevel@tonic-gate 3540Sstevel@tonic-gate /* 3550Sstevel@tonic-gate * Need to preserve checksum flags across pullup. 3560Sstevel@tonic-gate */ 3570Sstevel@tonic-gate hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, 3580Sstevel@tonic-gate NULL, &pflags); 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate if (!pullupmsg(mp, 3610Sstevel@tonic-gate sizeof (struct ether_vlan_header))) { 3620Sstevel@tonic-gate BGE_DEBUG(("bge_send: pullup failure")); 3630Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 3640Sstevel@tonic-gate return (B_FALSE); 3650Sstevel@tonic-gate } 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate (void) hcksum_assoc(mp, NULL, NULL, NULL, NULL, NULL, 3680Sstevel@tonic-gate NULL, pflags, KM_NOSLEEP); 3690Sstevel@tonic-gate } 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate ehp = (struct ether_vlan_header *)mp->b_rptr; 3720Sstevel@tonic-gate need_strip = B_TRUE; 3730Sstevel@tonic-gate } 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate /* 3760Sstevel@tonic-gate * Try to reserve a place in the chosen ring. Shouldn't try next 3770Sstevel@tonic-gate * higher-numbered (lower-priority) ring, if there aren't any 3780Sstevel@tonic-gate * available. Otherwise, packets with same priority may get 3790Sstevel@tonic-gate * transmission starvation. 3800Sstevel@tonic-gate */ 3810Sstevel@tonic-gate srp = &bgep->send[ring]; 3820Sstevel@tonic-gate if (!bge_atomic_reserve(&srp->tx_free, 1)) { 3830Sstevel@tonic-gate BGE_DEBUG(("bge_send: no free slots")); 3840Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 3850Sstevel@tonic-gate return (B_FALSE); 3860Sstevel@tonic-gate } 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate /* 3890Sstevel@tonic-gate * Now that we know that there is space to transmit the packet 3900Sstevel@tonic-gate * strip any VLAN tag that is present. 3910Sstevel@tonic-gate */ 3920Sstevel@tonic-gate if (need_strip) { 3930Sstevel@tonic-gate tci = ntohs(ehp->ether_tci); 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate (void) memmove(mp->b_rptr + VLAN_TAGSZ, mp->b_rptr, 3960Sstevel@tonic-gate 2 * ETHERADDRL); 3970Sstevel@tonic-gate mp->b_rptr += VLAN_TAGSZ; 3980Sstevel@tonic-gate } else { 3990Sstevel@tonic-gate tci = 0; 4000Sstevel@tonic-gate } 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /* 4030Sstevel@tonic-gate * We've reserved a place :-) 4040Sstevel@tonic-gate * These ASSERTions check that our invariants still hold: 4050Sstevel@tonic-gate * there must still be at least one free place 4060Sstevel@tonic-gate * there must be at least one place NOT free (ours!) 4070Sstevel@tonic-gate */ 4080Sstevel@tonic-gate ASSERT(srp->tx_free > 0); 4090Sstevel@tonic-gate ASSERT(srp->tx_free < srp->desc.nslots); 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate if ((status = bge_send_copy(bgep, mp, srp, tci)) == SEND_FAIL) { 4120Sstevel@tonic-gate /* 4130Sstevel@tonic-gate * The send routine failed :( So we have to renounce 4140Sstevel@tonic-gate * our reservation before returning the error. 4150Sstevel@tonic-gate */ 4160Sstevel@tonic-gate bge_atomic_renounce(&srp->tx_free, 1); 4170Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 4180Sstevel@tonic-gate return (B_FALSE); 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate /* 4220Sstevel@tonic-gate * The send routine succeeded; it will have updated the 4230Sstevel@tonic-gate * h/w ring descriptor, and the <tx_next> and <tx_flow> 4240Sstevel@tonic-gate * counters. 4250Sstevel@tonic-gate * 4260Sstevel@tonic-gate * Because there can be multiple concurrent threads in 4270Sstevel@tonic-gate * transit through this code, we only want to prod the 4280Sstevel@tonic-gate * hardware once the last one is departing ... 4290Sstevel@tonic-gate */ 4300Sstevel@tonic-gate rw_enter(srp->tx_lock, RW_WRITER); 4310Sstevel@tonic-gate if (--srp->tx_flow == 0) { 4320Sstevel@tonic-gate DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 4330Sstevel@tonic-gate bge_mbx_put(bgep, srp->chip_mbx_reg, srp->tx_next); 4340Sstevel@tonic-gate } 4350Sstevel@tonic-gate rw_exit(srp->tx_lock); 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate if (status == SEND_FREE) 4380Sstevel@tonic-gate freemsg(mp); 4390Sstevel@tonic-gate return (B_TRUE); 4400Sstevel@tonic-gate } 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate uint_t 4430Sstevel@tonic-gate bge_reschedule(caddr_t arg) 4440Sstevel@tonic-gate { 4450Sstevel@tonic-gate bge_t *bgep; 4460Sstevel@tonic-gate uint_t rslt; 4470Sstevel@tonic-gate 4480Sstevel@tonic-gate bgep = (bge_t *)arg; 4490Sstevel@tonic-gate rslt = DDI_INTR_UNCLAIMED; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate BGE_TRACE(("bge_reschedule($%p)", (void *)bgep)); 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate if (bgep->bge_mac_state == BGE_MAC_STARTED && bgep->resched_needed) { 4540Sstevel@tonic-gate mac_tx_update(bgep->macp); 4550Sstevel@tonic-gate bgep->resched_needed = B_FALSE; 4560Sstevel@tonic-gate rslt = DDI_INTR_CLAIMED; 4570Sstevel@tonic-gate } 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate return (rslt); 4600Sstevel@tonic-gate } 4610Sstevel@tonic-gate 4620Sstevel@tonic-gate /* 4630Sstevel@tonic-gate * bge_m_tx() - send a chain of packets 4640Sstevel@tonic-gate */ 4650Sstevel@tonic-gate mblk_t * 4660Sstevel@tonic-gate bge_m_tx(void *arg, mblk_t *mp) 4670Sstevel@tonic-gate { 4680Sstevel@tonic-gate bge_t *bgep = arg; /* private device info */ 4690Sstevel@tonic-gate mblk_t *next; 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate BGE_TRACE(("bge_m_tx($%p, $%p)", arg, (void *)mp)); 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate ASSERT(mp != NULL); 4740Sstevel@tonic-gate ASSERT(bgep->bge_mac_state == BGE_MAC_STARTED); 4750Sstevel@tonic-gate 4760Sstevel@tonic-gate if (bgep->bge_chip_state != BGE_CHIP_RUNNING) { 4770Sstevel@tonic-gate BGE_DEBUG(("bge_m_tx: chip not running")); 4780Sstevel@tonic-gate return (mp); 4790Sstevel@tonic-gate } 4800Sstevel@tonic-gate 481*152Sly149593 rw_enter(bgep->errlock, RW_READER); 4820Sstevel@tonic-gate while (mp != NULL) { 4830Sstevel@tonic-gate next = mp->b_next; 4840Sstevel@tonic-gate mp->b_next = NULL; 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate if (!bge_send(bgep, mp)) { 4870Sstevel@tonic-gate mp->b_next = next; 4880Sstevel@tonic-gate break; 4890Sstevel@tonic-gate } 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate mp = next; 4920Sstevel@tonic-gate } 493*152Sly149593 rw_exit(bgep->errlock); 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate return (mp); 4960Sstevel@tonic-gate } 497