10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51369Sdduvall * Common Development and Distribution License (the "License"). 61369Sdduvall * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211369Sdduvall 220Sstevel@tonic-gate /* 231200Sly149593 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 29*2675Szh199473 #include "bge_impl.h" 300Sstevel@tonic-gate 310Sstevel@tonic-gate 320Sstevel@tonic-gate /* 330Sstevel@tonic-gate * The transmit-side code uses an allocation process which is similar 340Sstevel@tonic-gate * to some theme park roller-coaster rides, where riders sit in cars 350Sstevel@tonic-gate * that can go individually, but work better in a train. 360Sstevel@tonic-gate * 370Sstevel@tonic-gate * 1) RESERVE a place - this doesn't refer to any specific car or 380Sstevel@tonic-gate * seat, just that you will get a ride. The attempt to RESERVE a 390Sstevel@tonic-gate * place can fail if all spaces in all cars are already committed. 400Sstevel@tonic-gate * 410Sstevel@tonic-gate * 2) Prepare yourself; this may take an arbitrary (but not unbounded) 420Sstevel@tonic-gate * time, and you can back out at this stage, in which case you must 430Sstevel@tonic-gate * give up (RENOUNCE) your place. 440Sstevel@tonic-gate * 450Sstevel@tonic-gate * 3) CLAIM your space - a specific car (the next sequentially 460Sstevel@tonic-gate * numbered one) is allocated at this stage, and is guaranteed 470Sstevel@tonic-gate * to be part of the next train to depart. Once you've done 480Sstevel@tonic-gate * this, you can't back out, nor wait for any external event 490Sstevel@tonic-gate * or resource. 500Sstevel@tonic-gate * 510Sstevel@tonic-gate * 4) Occupy your car - when all CLAIMED cars are OCCUPIED, they 520Sstevel@tonic-gate * all depart together as a single train! 530Sstevel@tonic-gate * 540Sstevel@tonic-gate * 5) At the end of the ride, you climb out of the car and RENOUNCE 550Sstevel@tonic-gate * your right to it, so that it can be recycled for another rider. 560Sstevel@tonic-gate * 570Sstevel@tonic-gate * For each rider, these have to occur in this order, but the riders 580Sstevel@tonic-gate * don't have to stay in the same order at each stage. In particular, 590Sstevel@tonic-gate * they may overtake each other between RESERVING a place and CLAIMING 600Sstevel@tonic-gate * it, or between CLAIMING and OCCUPYING a space. 610Sstevel@tonic-gate * 620Sstevel@tonic-gate * Once a car is CLAIMED, the train currently being assembled can't go 630Sstevel@tonic-gate * without that car (this guarantees that the cars in a single train 640Sstevel@tonic-gate * make up a consecutively-numbered set). Therefore, when any train 650Sstevel@tonic-gate * leaves, we know there can't be any riders in transit between CLAIMING 660Sstevel@tonic-gate * and OCCUPYING their cars. There can be some who have RESERVED but 670Sstevel@tonic-gate * not yet CLAIMED their places. That's OK, though, because they'll go 680Sstevel@tonic-gate * into the next train. 690Sstevel@tonic-gate */ 700Sstevel@tonic-gate 710Sstevel@tonic-gate #define BGE_DBG BGE_DBG_SEND /* debug flag for this code */ 720Sstevel@tonic-gate 730Sstevel@tonic-gate 740Sstevel@tonic-gate /* 750Sstevel@tonic-gate * ========== Send-side recycle routines ========== 760Sstevel@tonic-gate */ 770Sstevel@tonic-gate 780Sstevel@tonic-gate /* 790Sstevel@tonic-gate * Recycle all the completed buffers in the specified send ring up to 800Sstevel@tonic-gate * (but not including) the consumer index in the status block. 810Sstevel@tonic-gate * 820Sstevel@tonic-gate * This function must advance (srp->tc_next) AND adjust (srp->tx_free) 830Sstevel@tonic-gate * to account for the packets it has recycled. 840Sstevel@tonic-gate * 850Sstevel@tonic-gate * This is a trivial version that just does that and nothing more, but 860Sstevel@tonic-gate * it suffices while there's only one method for sending messages (by 870Sstevel@tonic-gate * copying) and that method doesn't need any special per-buffer action 880Sstevel@tonic-gate * for recycling. 890Sstevel@tonic-gate */ 900Sstevel@tonic-gate static void bge_recycle_ring(bge_t *bgep, send_ring_t *srp); 910Sstevel@tonic-gate #pragma inline(bge_recycle_ring) 920Sstevel@tonic-gate 930Sstevel@tonic-gate static void 940Sstevel@tonic-gate bge_recycle_ring(bge_t *bgep, send_ring_t *srp) 950Sstevel@tonic-gate { 960Sstevel@tonic-gate uint64_t slot; 970Sstevel@tonic-gate uint64_t n; 980Sstevel@tonic-gate 990Sstevel@tonic-gate _NOTE(ARGUNUSED(bgep)) 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate ASSERT(mutex_owned(srp->tc_lock)); 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate slot = *srp->cons_index_p; /* volatile */ 1040Sstevel@tonic-gate n = slot - srp->tc_next; 1050Sstevel@tonic-gate if (slot < srp->tc_next) 1060Sstevel@tonic-gate n += srp->desc.nslots; 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate /* 1090Sstevel@tonic-gate * We're about to release one or more places :-) 1100Sstevel@tonic-gate * These ASSERTions check that our invariants still hold: 1110Sstevel@tonic-gate * there must always be at least one free place 1120Sstevel@tonic-gate * at this point, there must be at least one place NOT free 1130Sstevel@tonic-gate * we're not about to free more places than were claimed! 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate ASSERT(srp->tx_free > 0); 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate srp->tc_next = slot; 1180Sstevel@tonic-gate bge_atomic_renounce(&srp->tx_free, n); 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* 1210Sstevel@tonic-gate * Reset the watchdog count: to 0 if all buffers are 1220Sstevel@tonic-gate * now free, or to 1 if some are still outstanding. 1230Sstevel@tonic-gate * Note: non-synchonised access here means we may get 1240Sstevel@tonic-gate * the "wrong" answer, but only in a harmless fashion 1250Sstevel@tonic-gate * (i.e. we deactivate the watchdog because all buffers 1260Sstevel@tonic-gate * are apparently free, even though another thread may 1270Sstevel@tonic-gate * have claimed one before we leave here; in this case 1280Sstevel@tonic-gate * the watchdog will restart on the next send() call). 1290Sstevel@tonic-gate */ 1300Sstevel@tonic-gate bgep->watchdog = srp->tx_free == srp->desc.nslots ? 0 : 1; 1310Sstevel@tonic-gate } 1320Sstevel@tonic-gate 1330Sstevel@tonic-gate /* 1340Sstevel@tonic-gate * Recycle all returned slots in all rings. 1350Sstevel@tonic-gate * 1360Sstevel@tonic-gate * To give priority to low-numbered rings, whenever we have recycled any 1370Sstevel@tonic-gate * slots in any ring except 0, we restart scanning again from ring 0. 1380Sstevel@tonic-gate * Thus, for example, if rings 0, 3, and 10 are carrying traffic, the 1390Sstevel@tonic-gate * pattern of recycles might go 0, 3, 10, 3, 0, 10, 0: 1400Sstevel@tonic-gate * 1410Sstevel@tonic-gate * 0 found some - recycle them 1420Sstevel@tonic-gate * 1..2 none found 1430Sstevel@tonic-gate * 3 found some - recycle them and restart scan 1440Sstevel@tonic-gate * 0..9 none found 1450Sstevel@tonic-gate * 10 found some - recycle them and restart scan 1460Sstevel@tonic-gate * 0..2 none found 1470Sstevel@tonic-gate * 3 found some more - recycle them and restart scan 1480Sstevel@tonic-gate * 0 found some more - recycle them 1490Sstevel@tonic-gate * 0..9 none found 1500Sstevel@tonic-gate * 10 found some more - recycle them and restart scan 1510Sstevel@tonic-gate * 0 found some more - recycle them 1520Sstevel@tonic-gate * 1..15 none found 1530Sstevel@tonic-gate * 1540Sstevel@tonic-gate * The routine returns only when a complete scan has been performed 1550Sstevel@tonic-gate * without finding any slots to recycle. 1560Sstevel@tonic-gate * 1570Sstevel@tonic-gate * Note: the expression (BGE_SEND_RINGS_USED > 1) yields a compile-time 1580Sstevel@tonic-gate * constant and allows the compiler to optimise away the outer do-loop 1590Sstevel@tonic-gate * if only one send ring is being used. 1600Sstevel@tonic-gate */ 1610Sstevel@tonic-gate void bge_recycle(bge_t *bgep, bge_status_t *bsp); 1620Sstevel@tonic-gate #pragma no_inline(bge_recycle) 1630Sstevel@tonic-gate 1640Sstevel@tonic-gate void 1650Sstevel@tonic-gate bge_recycle(bge_t *bgep, bge_status_t *bsp) 1660Sstevel@tonic-gate { 1670Sstevel@tonic-gate send_ring_t *srp; 1680Sstevel@tonic-gate uint64_t ring; 1690Sstevel@tonic-gate uint64_t tx_rings = bgep->chipid.tx_rings; 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate restart: 1720Sstevel@tonic-gate ring = 0; 1730Sstevel@tonic-gate srp = &bgep->send[ring]; 1740Sstevel@tonic-gate do { 1750Sstevel@tonic-gate /* 1760Sstevel@tonic-gate * For each ring, (srp->cons_index_p) points to the 1770Sstevel@tonic-gate * proper index within the status block (which has 1780Sstevel@tonic-gate * already been sync'd by the caller). 1790Sstevel@tonic-gate */ 1800Sstevel@tonic-gate ASSERT(srp->cons_index_p == SEND_INDEX_P(bsp, ring)); 1810Sstevel@tonic-gate 1820Sstevel@tonic-gate if (*srp->cons_index_p == srp->tc_next) 1830Sstevel@tonic-gate continue; /* no slots to recycle */ 1841504Sly149593 1851504Sly149593 mutex_enter(srp->tc_lock); 1860Sstevel@tonic-gate bge_recycle_ring(bgep, srp); 1870Sstevel@tonic-gate mutex_exit(srp->tc_lock); 1880Sstevel@tonic-gate 1891504Sly149593 if (bgep->resched_needed && !bgep->resched_running) { 1901504Sly149593 bgep->resched_running = B_TRUE; 1910Sstevel@tonic-gate ddi_trigger_softintr(bgep->resched_id); 1921504Sly149593 } 1930Sstevel@tonic-gate /* 1940Sstevel@tonic-gate * Restart from ring 0, if we're not on ring 0 already. 1950Sstevel@tonic-gate * As H/W selects send BDs totally based on priority and 1960Sstevel@tonic-gate * available BDs on the higher priority ring are always 1970Sstevel@tonic-gate * selected first, driver should keep consistence with H/W 1980Sstevel@tonic-gate * and gives lower-numbered ring with higher priority. 1990Sstevel@tonic-gate */ 2000Sstevel@tonic-gate if (tx_rings > 1 && ring > 0) 2010Sstevel@tonic-gate goto restart; 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate /* 2040Sstevel@tonic-gate * Loop over all rings (if there *are* multiple rings) 2050Sstevel@tonic-gate */ 2060Sstevel@tonic-gate } while (++srp, ++ring < tx_rings); 2070Sstevel@tonic-gate } 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate /* 2110Sstevel@tonic-gate * ========== Send-side transmit routines ========== 2120Sstevel@tonic-gate */ 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate /* 2150Sstevel@tonic-gate * CLAIM an already-reserved place on the next train 2160Sstevel@tonic-gate * 2170Sstevel@tonic-gate * This is the point of no return! 2180Sstevel@tonic-gate */ 2190Sstevel@tonic-gate static uint64_t bge_send_claim(bge_t *bgep, send_ring_t *srp); 2200Sstevel@tonic-gate #pragma inline(bge_send_claim) 2210Sstevel@tonic-gate 2220Sstevel@tonic-gate static uint64_t 2230Sstevel@tonic-gate bge_send_claim(bge_t *bgep, send_ring_t *srp) 2240Sstevel@tonic-gate { 2250Sstevel@tonic-gate uint64_t slot; 2260Sstevel@tonic-gate 2271102Sly149593 mutex_enter(srp->tx_lock); 2280Sstevel@tonic-gate atomic_add_64(&srp->tx_flow, 1); 2290Sstevel@tonic-gate slot = bge_atomic_claim(&srp->tx_next, srp->desc.nslots); 2301102Sly149593 mutex_exit(srp->tx_lock); 2310Sstevel@tonic-gate 2320Sstevel@tonic-gate /* 2330Sstevel@tonic-gate * Bump the watchdog counter, thus guaranteeing that it's 2340Sstevel@tonic-gate * nonzero (watchdog activated). Note that non-synchonised 2350Sstevel@tonic-gate * access here means we may race with the reclaim() code 2360Sstevel@tonic-gate * above, but the outcome will be harmless. At worst, the 2370Sstevel@tonic-gate * counter may not get reset on a partial reclaim; but the 2380Sstevel@tonic-gate * large trigger threshold makes false positives unlikely 2390Sstevel@tonic-gate */ 2400Sstevel@tonic-gate bgep->watchdog += 1; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate return (slot); 2430Sstevel@tonic-gate } 2440Sstevel@tonic-gate 2452135Szh199473 #define TCP_CKSUM_OFFSET 16 2462135Szh199473 #define UDP_CKSUM_OFFSET 6 2472135Szh199473 2482135Szh199473 static void 2492135Szh199473 bge_pseudo_cksum(uint8_t *buf) 2502135Szh199473 { 2512135Szh199473 uint32_t cksum; 2522135Szh199473 uint16_t iphl; 2532135Szh199473 uint16_t proto; 2542135Szh199473 2552135Szh199473 /* 2562135Szh199473 * Point it to the ip header. 2572135Szh199473 */ 2582135Szh199473 buf += sizeof (struct ether_header); 2592135Szh199473 2602135Szh199473 /* 2612135Szh199473 * Calculate the pseudo-header checksum. 2622135Szh199473 */ 2632135Szh199473 iphl = 4 * (buf[0] & 0xF); 2642135Szh199473 cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl; 2652135Szh199473 cksum += proto = buf[9]; 2662135Szh199473 cksum += (((uint16_t)buf[12])<<8) + buf[13]; 2672135Szh199473 cksum += (((uint16_t)buf[14])<<8) + buf[15]; 2682135Szh199473 cksum += (((uint16_t)buf[16])<<8) + buf[17]; 2692135Szh199473 cksum += (((uint16_t)buf[18])<<8) + buf[19]; 2702135Szh199473 cksum = (cksum>>16) + (cksum & 0xFFFF); 2712135Szh199473 cksum = (cksum>>16) + (cksum & 0xFFFF); 2722135Szh199473 2732135Szh199473 /* 2742135Szh199473 * Point it to the TCP/UDP header, and 2752135Szh199473 * update the checksum field. 2762135Szh199473 */ 2772135Szh199473 buf += iphl + ((proto == IPPROTO_TCP) ? 2782135Szh199473 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET); 2792135Szh199473 2802135Szh199473 *(uint16_t *)buf = htons((uint16_t)cksum); 2812135Szh199473 } 2822135Szh199473 2830Sstevel@tonic-gate /* 2840Sstevel@tonic-gate * Send a message by copying it into a preallocated (and premapped) buffer 2850Sstevel@tonic-gate */ 2860Sstevel@tonic-gate static enum send_status bge_send_copy(bge_t *bgep, mblk_t *mp, 2870Sstevel@tonic-gate send_ring_t *srp, uint16_t tci); 2880Sstevel@tonic-gate #pragma inline(bge_send_copy) 2890Sstevel@tonic-gate 2900Sstevel@tonic-gate static enum send_status 2910Sstevel@tonic-gate bge_send_copy(bge_t *bgep, mblk_t *mp, send_ring_t *srp, uint16_t tci) 2920Sstevel@tonic-gate { 2930Sstevel@tonic-gate bge_sbd_t *hw_sbd_p; 2940Sstevel@tonic-gate sw_sbd_t *ssbdp; 2950Sstevel@tonic-gate mblk_t *bp; 2960Sstevel@tonic-gate char *txb; 2970Sstevel@tonic-gate uint64_t slot; 2980Sstevel@tonic-gate size_t totlen; 2990Sstevel@tonic-gate size_t mblen; 3000Sstevel@tonic-gate uint32_t pflags; 3010Sstevel@tonic-gate 3020Sstevel@tonic-gate BGE_TRACE(("bge_send_copy($%p, $%p, $%p, 0x%x)", 3030Sstevel@tonic-gate (void *)bgep, (void *)mp, (void *)srp)); 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate /* 3060Sstevel@tonic-gate * IMPORTANT: 3070Sstevel@tonic-gate * Up to the point where it claims a place, a send_msg() 3080Sstevel@tonic-gate * routine can indicate failure by returning SEND_FAIL. 3090Sstevel@tonic-gate * Once it's claimed a place, it mustn't fail. 3100Sstevel@tonic-gate * 3110Sstevel@tonic-gate * In this version, there's no setup to be done here, and there's 3120Sstevel@tonic-gate * nothing that can fail, so we can go straight to claiming our 3130Sstevel@tonic-gate * already-reserved place on the train. 3140Sstevel@tonic-gate * 3150Sstevel@tonic-gate * This is the point of no return! 3160Sstevel@tonic-gate */ 3170Sstevel@tonic-gate slot = bge_send_claim(bgep, srp); 3180Sstevel@tonic-gate ssbdp = &srp->sw_sbds[slot]; 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * Copy the data into a pre-mapped buffer, which avoids the 3220Sstevel@tonic-gate * overhead (and complication) of mapping/unmapping STREAMS 3230Sstevel@tonic-gate * buffers and keeping hold of them until the DMA has completed. 3240Sstevel@tonic-gate * 3250Sstevel@tonic-gate * Because all buffers are the same size, and larger than the 3260Sstevel@tonic-gate * longest single valid message, we don't have to bother about 3270Sstevel@tonic-gate * splitting the message across multiple buffers either. 3280Sstevel@tonic-gate */ 3290Sstevel@tonic-gate txb = DMA_VPTR(ssbdp->pbuf); 3302135Szh199473 totlen = 0; 3312135Szh199473 bp = mp; 3322135Szh199473 if (tci != 0) { 3332135Szh199473 mblen = bp->b_wptr - bp->b_rptr; 3342135Szh199473 3352135Szh199473 ASSERT(mblen >= 2 * ETHERADDRL + VLAN_TAGSZ); 3362135Szh199473 3372135Szh199473 bcopy(bp->b_rptr, txb, 2 * ETHERADDRL); 3382135Szh199473 txb += 2 * ETHERADDRL; 3392135Szh199473 totlen = 2 * ETHERADDRL; 3402135Szh199473 3412135Szh199473 if (mblen -= 2 * ETHERADDRL + VLAN_TAGSZ) { 3422135Szh199473 if ((totlen += mblen) <= bgep->chipid.ethmax_size) { 3432135Szh199473 bcopy(bp->b_wptr-mblen, txb, mblen); 3442135Szh199473 txb += mblen; 3452135Szh199473 } 3462135Szh199473 } 3472135Szh199473 bp = bp->b_cont; 3482135Szh199473 } 3492135Szh199473 for (; bp != NULL; bp = bp->b_cont) { 3500Sstevel@tonic-gate mblen = bp->b_wptr - bp->b_rptr; 3510Sstevel@tonic-gate if ((totlen += mblen) <= bgep->chipid.ethmax_size) { 3520Sstevel@tonic-gate bcopy(bp->b_rptr, txb, mblen); 3530Sstevel@tonic-gate txb += mblen; 3540Sstevel@tonic-gate } 3550Sstevel@tonic-gate } 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate /* 3582135Szh199473 * We've reached the end of the chain; and we should have 3590Sstevel@tonic-gate * collected no more than ETHERMAX bytes into our buffer. 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate ASSERT(bp == NULL); 3620Sstevel@tonic-gate ASSERT(totlen <= bgep->chipid.ethmax_size); 3630Sstevel@tonic-gate DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV); 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate /* 3660Sstevel@tonic-gate * Update the hardware send buffer descriptor; then we're done. 3670Sstevel@tonic-gate * The return status indicates that the message can be freed 3680Sstevel@tonic-gate * right away, as we've already copied the contents ... 3690Sstevel@tonic-gate */ 3700Sstevel@tonic-gate hw_sbd_p = DMA_VPTR(ssbdp->desc); 3710Sstevel@tonic-gate hw_sbd_p->host_buf_addr = ssbdp->pbuf.cookie.dmac_laddress; 3720Sstevel@tonic-gate hw_sbd_p->len = totlen; 3730Sstevel@tonic-gate hw_sbd_p->flags = SBD_FLAG_PACKET_END; 3740Sstevel@tonic-gate if (tci != 0) { 3750Sstevel@tonic-gate hw_sbd_p->vlan_tci = tci; 3760Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_VLAN_TAG; 3770Sstevel@tonic-gate } 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags); 3800Sstevel@tonic-gate if (pflags & HCK_IPV4_HDRCKSUM) 3810Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_IP_CKSUM; 3822135Szh199473 if (pflags & HCK_FULLCKSUM) { 3830Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_TCP_UDP_CKSUM; 3842135Szh199473 if (bgep->chipid.flags & CHIP_FLAG_PARTIAL_CSUM) 3852135Szh199473 bge_pseudo_cksum((uint8_t *)DMA_VPTR(ssbdp->pbuf)); 3862135Szh199473 } 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate return (SEND_FREE); 3890Sstevel@tonic-gate } 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate static boolean_t 3920Sstevel@tonic-gate bge_send(bge_t *bgep, mblk_t *mp) 3930Sstevel@tonic-gate { 3940Sstevel@tonic-gate send_ring_t *srp; 3950Sstevel@tonic-gate enum send_status status; 3960Sstevel@tonic-gate struct ether_vlan_header *ehp; 3970Sstevel@tonic-gate boolean_t need_strip = B_FALSE; 3981504Sly149593 bge_status_t *bsp; 3990Sstevel@tonic-gate uint16_t tci; 4000Sstevel@tonic-gate uint_t ring = 0; 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate /* 4050Sstevel@tonic-gate * Determine if the packet is VLAN tagged. 4060Sstevel@tonic-gate */ 4070Sstevel@tonic-gate ASSERT(MBLKL(mp) >= sizeof (struct ether_header)); 4080Sstevel@tonic-gate ehp = (struct ether_vlan_header *)mp->b_rptr; 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate if (ehp->ether_tpid == htons(VLAN_TPID)) { 4110Sstevel@tonic-gate if (MBLKL(mp) < sizeof (struct ether_vlan_header)) { 4120Sstevel@tonic-gate uint32_t pflags; 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate /* 4150Sstevel@tonic-gate * Need to preserve checksum flags across pullup. 4160Sstevel@tonic-gate */ 4170Sstevel@tonic-gate hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, 4180Sstevel@tonic-gate NULL, &pflags); 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate if (!pullupmsg(mp, 4210Sstevel@tonic-gate sizeof (struct ether_vlan_header))) { 4220Sstevel@tonic-gate BGE_DEBUG(("bge_send: pullup failure")); 4230Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 4240Sstevel@tonic-gate return (B_FALSE); 4250Sstevel@tonic-gate } 4260Sstevel@tonic-gate 4270Sstevel@tonic-gate (void) hcksum_assoc(mp, NULL, NULL, NULL, NULL, NULL, 4280Sstevel@tonic-gate NULL, pflags, KM_NOSLEEP); 4290Sstevel@tonic-gate } 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate ehp = (struct ether_vlan_header *)mp->b_rptr; 4320Sstevel@tonic-gate need_strip = B_TRUE; 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate /* 4360Sstevel@tonic-gate * Try to reserve a place in the chosen ring. Shouldn't try next 4370Sstevel@tonic-gate * higher-numbered (lower-priority) ring, if there aren't any 4380Sstevel@tonic-gate * available. Otherwise, packets with same priority may get 4390Sstevel@tonic-gate * transmission starvation. 4400Sstevel@tonic-gate */ 4410Sstevel@tonic-gate srp = &bgep->send[ring]; 4420Sstevel@tonic-gate if (!bge_atomic_reserve(&srp->tx_free, 1)) { 4430Sstevel@tonic-gate BGE_DEBUG(("bge_send: no free slots")); 4440Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 4450Sstevel@tonic-gate return (B_FALSE); 4460Sstevel@tonic-gate } 4470Sstevel@tonic-gate 4480Sstevel@tonic-gate /* 4490Sstevel@tonic-gate * Now that we know that there is space to transmit the packet 4500Sstevel@tonic-gate * strip any VLAN tag that is present. 4510Sstevel@tonic-gate */ 4520Sstevel@tonic-gate if (need_strip) { 4530Sstevel@tonic-gate tci = ntohs(ehp->ether_tci); 4540Sstevel@tonic-gate } else { 4550Sstevel@tonic-gate tci = 0; 4560Sstevel@tonic-gate } 4570Sstevel@tonic-gate 4581504Sly149593 if (srp->tx_free <= 16) { 4591504Sly149593 bsp = DMA_VPTR(bgep->status_block); 4601504Sly149593 bge_recycle(bgep, bsp); 4611504Sly149593 } 4620Sstevel@tonic-gate /* 4630Sstevel@tonic-gate * We've reserved a place :-) 4640Sstevel@tonic-gate * These ASSERTions check that our invariants still hold: 4650Sstevel@tonic-gate * there must still be at least one free place 4660Sstevel@tonic-gate * there must be at least one place NOT free (ours!) 4670Sstevel@tonic-gate */ 4680Sstevel@tonic-gate ASSERT(srp->tx_free > 0); 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate if ((status = bge_send_copy(bgep, mp, srp, tci)) == SEND_FAIL) { 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * The send routine failed :( So we have to renounce 4730Sstevel@tonic-gate * our reservation before returning the error. 4740Sstevel@tonic-gate */ 4750Sstevel@tonic-gate bge_atomic_renounce(&srp->tx_free, 1); 4760Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 4770Sstevel@tonic-gate return (B_FALSE); 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate /* 4810Sstevel@tonic-gate * The send routine succeeded; it will have updated the 4820Sstevel@tonic-gate * h/w ring descriptor, and the <tx_next> and <tx_flow> 4830Sstevel@tonic-gate * counters. 4840Sstevel@tonic-gate * 4850Sstevel@tonic-gate * Because there can be multiple concurrent threads in 4860Sstevel@tonic-gate * transit through this code, we only want to prod the 4870Sstevel@tonic-gate * hardware once the last one is departing ... 4880Sstevel@tonic-gate */ 4891102Sly149593 mutex_enter(srp->tx_lock); 4900Sstevel@tonic-gate if (--srp->tx_flow == 0) { 4910Sstevel@tonic-gate DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 4920Sstevel@tonic-gate bge_mbx_put(bgep, srp->chip_mbx_reg, srp->tx_next); 4931865Sdilpreet if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 4941865Sdilpreet bgep->bge_chip_state = BGE_CHIP_ERROR; 4950Sstevel@tonic-gate } 4961102Sly149593 mutex_exit(srp->tx_lock); 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate if (status == SEND_FREE) 4990Sstevel@tonic-gate freemsg(mp); 5000Sstevel@tonic-gate return (B_TRUE); 5010Sstevel@tonic-gate } 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate uint_t 5040Sstevel@tonic-gate bge_reschedule(caddr_t arg) 5050Sstevel@tonic-gate { 5060Sstevel@tonic-gate bge_t *bgep; 5070Sstevel@tonic-gate 5080Sstevel@tonic-gate bgep = (bge_t *)arg; 5090Sstevel@tonic-gate 5100Sstevel@tonic-gate BGE_TRACE(("bge_reschedule($%p)", (void *)bgep)); 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate if (bgep->bge_mac_state == BGE_MAC_STARTED && bgep->resched_needed) { 5132311Sseb mac_tx_update(bgep->mh); 5140Sstevel@tonic-gate bgep->resched_needed = B_FALSE; 5151504Sly149593 bgep->resched_running = B_FALSE; 5160Sstevel@tonic-gate } 5170Sstevel@tonic-gate 5181200Sly149593 return (DDI_INTR_CLAIMED); 5190Sstevel@tonic-gate } 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate /* 5220Sstevel@tonic-gate * bge_m_tx() - send a chain of packets 5230Sstevel@tonic-gate */ 5240Sstevel@tonic-gate mblk_t * 5250Sstevel@tonic-gate bge_m_tx(void *arg, mblk_t *mp) 5260Sstevel@tonic-gate { 5270Sstevel@tonic-gate bge_t *bgep = arg; /* private device info */ 5280Sstevel@tonic-gate mblk_t *next; 5290Sstevel@tonic-gate 5300Sstevel@tonic-gate BGE_TRACE(("bge_m_tx($%p, $%p)", arg, (void *)mp)); 5310Sstevel@tonic-gate 5320Sstevel@tonic-gate ASSERT(mp != NULL); 5330Sstevel@tonic-gate ASSERT(bgep->bge_mac_state == BGE_MAC_STARTED); 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate if (bgep->bge_chip_state != BGE_CHIP_RUNNING) { 5360Sstevel@tonic-gate BGE_DEBUG(("bge_m_tx: chip not running")); 5370Sstevel@tonic-gate return (mp); 5380Sstevel@tonic-gate } 5390Sstevel@tonic-gate 540152Sly149593 rw_enter(bgep->errlock, RW_READER); 5410Sstevel@tonic-gate while (mp != NULL) { 5420Sstevel@tonic-gate next = mp->b_next; 5430Sstevel@tonic-gate mp->b_next = NULL; 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate if (!bge_send(bgep, mp)) { 5460Sstevel@tonic-gate mp->b_next = next; 5470Sstevel@tonic-gate break; 5480Sstevel@tonic-gate } 5490Sstevel@tonic-gate 5500Sstevel@tonic-gate mp = next; 5510Sstevel@tonic-gate } 552152Sly149593 rw_exit(bgep->errlock); 5530Sstevel@tonic-gate 5540Sstevel@tonic-gate return (mp); 5550Sstevel@tonic-gate } 556