1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include "sys/bge_impl.h" 30*0Sstevel@tonic-gate 31*0Sstevel@tonic-gate 32*0Sstevel@tonic-gate /* 33*0Sstevel@tonic-gate * The transmit-side code uses an allocation process which is similar 34*0Sstevel@tonic-gate * to some theme park roller-coaster rides, where riders sit in cars 35*0Sstevel@tonic-gate * that can go individually, but work better in a train. 36*0Sstevel@tonic-gate * 37*0Sstevel@tonic-gate * 1) RESERVE a place - this doesn't refer to any specific car or 38*0Sstevel@tonic-gate * seat, just that you will get a ride. The attempt to RESERVE a 39*0Sstevel@tonic-gate * place can fail if all spaces in all cars are already committed. 40*0Sstevel@tonic-gate * 41*0Sstevel@tonic-gate * 2) Prepare yourself; this may take an arbitrary (but not unbounded) 42*0Sstevel@tonic-gate * time, and you can back out at this stage, in which case you must 43*0Sstevel@tonic-gate * give up (RENOUNCE) your place. 44*0Sstevel@tonic-gate * 45*0Sstevel@tonic-gate * 3) CLAIM your space - a specific car (the next sequentially 46*0Sstevel@tonic-gate * numbered one) is allocated at this stage, and is guaranteed 47*0Sstevel@tonic-gate * to be part of the next train to depart. Once you've done 48*0Sstevel@tonic-gate * this, you can't back out, nor wait for any external event 49*0Sstevel@tonic-gate * or resource. 50*0Sstevel@tonic-gate * 51*0Sstevel@tonic-gate * 4) Occupy your car - when all CLAIMED cars are OCCUPIED, they 52*0Sstevel@tonic-gate * all depart together as a single train! 53*0Sstevel@tonic-gate * 54*0Sstevel@tonic-gate * 5) At the end of the ride, you climb out of the car and RENOUNCE 55*0Sstevel@tonic-gate * your right to it, so that it can be recycled for another rider. 56*0Sstevel@tonic-gate * 57*0Sstevel@tonic-gate * For each rider, these have to occur in this order, but the riders 58*0Sstevel@tonic-gate * don't have to stay in the same order at each stage. In particular, 59*0Sstevel@tonic-gate * they may overtake each other between RESERVING a place and CLAIMING 60*0Sstevel@tonic-gate * it, or between CLAIMING and OCCUPYING a space. 61*0Sstevel@tonic-gate * 62*0Sstevel@tonic-gate * Once a car is CLAIMED, the train currently being assembled can't go 63*0Sstevel@tonic-gate * without that car (this guarantees that the cars in a single train 64*0Sstevel@tonic-gate * make up a consecutively-numbered set). Therefore, when any train 65*0Sstevel@tonic-gate * leaves, we know there can't be any riders in transit between CLAIMING 66*0Sstevel@tonic-gate * and OCCUPYING their cars. There can be some who have RESERVED but 67*0Sstevel@tonic-gate * not yet CLAIMED their places. That's OK, though, because they'll go 68*0Sstevel@tonic-gate * into the next train. 69*0Sstevel@tonic-gate */ 70*0Sstevel@tonic-gate 71*0Sstevel@tonic-gate #define BGE_DBG BGE_DBG_SEND /* debug flag for this code */ 72*0Sstevel@tonic-gate 73*0Sstevel@tonic-gate 74*0Sstevel@tonic-gate /* 75*0Sstevel@tonic-gate * ========== Send-side recycle routines ========== 76*0Sstevel@tonic-gate */ 77*0Sstevel@tonic-gate 78*0Sstevel@tonic-gate /* 79*0Sstevel@tonic-gate * Recycle all the completed buffers in the specified send ring up to 80*0Sstevel@tonic-gate * (but not including) the consumer index in the status block. 81*0Sstevel@tonic-gate * 82*0Sstevel@tonic-gate * This function must advance (srp->tc_next) AND adjust (srp->tx_free) 83*0Sstevel@tonic-gate * to account for the packets it has recycled. 84*0Sstevel@tonic-gate * 85*0Sstevel@tonic-gate * This is a trivial version that just does that and nothing more, but 86*0Sstevel@tonic-gate * it suffices while there's only one method for sending messages (by 87*0Sstevel@tonic-gate * copying) and that method doesn't need any special per-buffer action 88*0Sstevel@tonic-gate * for recycling. 89*0Sstevel@tonic-gate */ 90*0Sstevel@tonic-gate static void bge_recycle_ring(bge_t *bgep, send_ring_t *srp); 91*0Sstevel@tonic-gate #pragma inline(bge_recycle_ring) 92*0Sstevel@tonic-gate 93*0Sstevel@tonic-gate static void 94*0Sstevel@tonic-gate bge_recycle_ring(bge_t *bgep, send_ring_t *srp) 95*0Sstevel@tonic-gate { 96*0Sstevel@tonic-gate uint64_t slot; 97*0Sstevel@tonic-gate uint64_t n; 98*0Sstevel@tonic-gate 99*0Sstevel@tonic-gate _NOTE(ARGUNUSED(bgep)) 100*0Sstevel@tonic-gate 101*0Sstevel@tonic-gate ASSERT(mutex_owned(srp->tc_lock)); 102*0Sstevel@tonic-gate 103*0Sstevel@tonic-gate slot = *srp->cons_index_p; /* volatile */ 104*0Sstevel@tonic-gate n = slot - srp->tc_next; 105*0Sstevel@tonic-gate if (slot < srp->tc_next) 106*0Sstevel@tonic-gate n += srp->desc.nslots; 107*0Sstevel@tonic-gate 108*0Sstevel@tonic-gate /* 109*0Sstevel@tonic-gate * We're about to release one or more places :-) 110*0Sstevel@tonic-gate * These ASSERTions check that our invariants still hold: 111*0Sstevel@tonic-gate * there must always be at least one free place 112*0Sstevel@tonic-gate * at this point, there must be at least one place NOT free 113*0Sstevel@tonic-gate * we're not about to free more places than were claimed! 114*0Sstevel@tonic-gate */ 115*0Sstevel@tonic-gate ASSERT(srp->tx_free > 0); 116*0Sstevel@tonic-gate ASSERT(srp->tx_free < srp->desc.nslots); 117*0Sstevel@tonic-gate ASSERT(srp->tx_free + n <= srp->desc.nslots); 118*0Sstevel@tonic-gate 119*0Sstevel@tonic-gate srp->tc_next = slot; 120*0Sstevel@tonic-gate bge_atomic_renounce(&srp->tx_free, n); 121*0Sstevel@tonic-gate 122*0Sstevel@tonic-gate /* 123*0Sstevel@tonic-gate * Reset the watchdog count: to 0 if all buffers are 124*0Sstevel@tonic-gate * now free, or to 1 if some are still outstanding. 125*0Sstevel@tonic-gate * Note: non-synchonised access here means we may get 126*0Sstevel@tonic-gate * the "wrong" answer, but only in a harmless fashion 127*0Sstevel@tonic-gate * (i.e. we deactivate the watchdog because all buffers 128*0Sstevel@tonic-gate * are apparently free, even though another thread may 129*0Sstevel@tonic-gate * have claimed one before we leave here; in this case 130*0Sstevel@tonic-gate * the watchdog will restart on the next send() call). 131*0Sstevel@tonic-gate */ 132*0Sstevel@tonic-gate bgep->watchdog = srp->tx_free == srp->desc.nslots ? 0 : 1; 133*0Sstevel@tonic-gate } 134*0Sstevel@tonic-gate 135*0Sstevel@tonic-gate /* 136*0Sstevel@tonic-gate * Recycle all returned slots in all rings. 137*0Sstevel@tonic-gate * 138*0Sstevel@tonic-gate * To give priority to low-numbered rings, whenever we have recycled any 139*0Sstevel@tonic-gate * slots in any ring except 0, we restart scanning again from ring 0. 140*0Sstevel@tonic-gate * Thus, for example, if rings 0, 3, and 10 are carrying traffic, the 141*0Sstevel@tonic-gate * pattern of recycles might go 0, 3, 10, 3, 0, 10, 0: 142*0Sstevel@tonic-gate * 143*0Sstevel@tonic-gate * 0 found some - recycle them 144*0Sstevel@tonic-gate * 1..2 none found 145*0Sstevel@tonic-gate * 3 found some - recycle them and restart scan 146*0Sstevel@tonic-gate * 0..9 none found 147*0Sstevel@tonic-gate * 10 found some - recycle them and restart scan 148*0Sstevel@tonic-gate * 0..2 none found 149*0Sstevel@tonic-gate * 3 found some more - recycle them and restart scan 150*0Sstevel@tonic-gate * 0 found some more - recycle them 151*0Sstevel@tonic-gate * 0..9 none found 152*0Sstevel@tonic-gate * 10 found some more - recycle them and restart scan 153*0Sstevel@tonic-gate * 0 found some more - recycle them 154*0Sstevel@tonic-gate * 1..15 none found 155*0Sstevel@tonic-gate * 156*0Sstevel@tonic-gate * The routine returns only when a complete scan has been performed 157*0Sstevel@tonic-gate * without finding any slots to recycle. 158*0Sstevel@tonic-gate * 159*0Sstevel@tonic-gate * Note: the expression (BGE_SEND_RINGS_USED > 1) yields a compile-time 160*0Sstevel@tonic-gate * constant and allows the compiler to optimise away the outer do-loop 161*0Sstevel@tonic-gate * if only one send ring is being used. 162*0Sstevel@tonic-gate */ 163*0Sstevel@tonic-gate void bge_recycle(bge_t *bgep, bge_status_t *bsp); 164*0Sstevel@tonic-gate #pragma no_inline(bge_recycle) 165*0Sstevel@tonic-gate 166*0Sstevel@tonic-gate void 167*0Sstevel@tonic-gate bge_recycle(bge_t *bgep, bge_status_t *bsp) 168*0Sstevel@tonic-gate { 169*0Sstevel@tonic-gate send_ring_t *srp; 170*0Sstevel@tonic-gate uint64_t ring; 171*0Sstevel@tonic-gate uint64_t tx_rings = bgep->chipid.tx_rings; 172*0Sstevel@tonic-gate 173*0Sstevel@tonic-gate restart: 174*0Sstevel@tonic-gate ring = 0; 175*0Sstevel@tonic-gate srp = &bgep->send[ring]; 176*0Sstevel@tonic-gate do { 177*0Sstevel@tonic-gate /* 178*0Sstevel@tonic-gate * For each ring, (srp->cons_index_p) points to the 179*0Sstevel@tonic-gate * proper index within the status block (which has 180*0Sstevel@tonic-gate * already been sync'd by the caller). 181*0Sstevel@tonic-gate */ 182*0Sstevel@tonic-gate ASSERT(srp->cons_index_p == SEND_INDEX_P(bsp, ring)); 183*0Sstevel@tonic-gate 184*0Sstevel@tonic-gate if (*srp->cons_index_p == srp->tc_next) 185*0Sstevel@tonic-gate continue; /* no slots to recycle */ 186*0Sstevel@tonic-gate if (mutex_tryenter(srp->tc_lock) == 0) 187*0Sstevel@tonic-gate continue; /* already in process */ 188*0Sstevel@tonic-gate bge_recycle_ring(bgep, srp); 189*0Sstevel@tonic-gate mutex_exit(srp->tc_lock); 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate if (bgep->resched_needed) 192*0Sstevel@tonic-gate ddi_trigger_softintr(bgep->resched_id); 193*0Sstevel@tonic-gate 194*0Sstevel@tonic-gate /* 195*0Sstevel@tonic-gate * Restart from ring 0, if we're not on ring 0 already. 196*0Sstevel@tonic-gate * As H/W selects send BDs totally based on priority and 197*0Sstevel@tonic-gate * available BDs on the higher priority ring are always 198*0Sstevel@tonic-gate * selected first, driver should keep consistence with H/W 199*0Sstevel@tonic-gate * and gives lower-numbered ring with higher priority. 200*0Sstevel@tonic-gate */ 201*0Sstevel@tonic-gate if (tx_rings > 1 && ring > 0) 202*0Sstevel@tonic-gate goto restart; 203*0Sstevel@tonic-gate 204*0Sstevel@tonic-gate /* 205*0Sstevel@tonic-gate * Loop over all rings (if there *are* multiple rings) 206*0Sstevel@tonic-gate */ 207*0Sstevel@tonic-gate } while (++srp, ++ring < tx_rings); 208*0Sstevel@tonic-gate } 209*0Sstevel@tonic-gate 210*0Sstevel@tonic-gate 211*0Sstevel@tonic-gate /* 212*0Sstevel@tonic-gate * ========== Send-side transmit routines ========== 213*0Sstevel@tonic-gate */ 214*0Sstevel@tonic-gate 215*0Sstevel@tonic-gate /* 216*0Sstevel@tonic-gate * CLAIM an already-reserved place on the next train 217*0Sstevel@tonic-gate * 218*0Sstevel@tonic-gate * This is the point of no return! 219*0Sstevel@tonic-gate */ 220*0Sstevel@tonic-gate static uint64_t bge_send_claim(bge_t *bgep, send_ring_t *srp); 221*0Sstevel@tonic-gate #pragma inline(bge_send_claim) 222*0Sstevel@tonic-gate 223*0Sstevel@tonic-gate static uint64_t 224*0Sstevel@tonic-gate bge_send_claim(bge_t *bgep, send_ring_t *srp) 225*0Sstevel@tonic-gate { 226*0Sstevel@tonic-gate uint64_t slot; 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate rw_enter(srp->tx_lock, RW_READER); 229*0Sstevel@tonic-gate atomic_add_64(&srp->tx_flow, 1); 230*0Sstevel@tonic-gate slot = bge_atomic_claim(&srp->tx_next, srp->desc.nslots); 231*0Sstevel@tonic-gate rw_exit(srp->tx_lock); 232*0Sstevel@tonic-gate 233*0Sstevel@tonic-gate /* 234*0Sstevel@tonic-gate * Bump the watchdog counter, thus guaranteeing that it's 235*0Sstevel@tonic-gate * nonzero (watchdog activated). Note that non-synchonised 236*0Sstevel@tonic-gate * access here means we may race with the reclaim() code 237*0Sstevel@tonic-gate * above, but the outcome will be harmless. At worst, the 238*0Sstevel@tonic-gate * counter may not get reset on a partial reclaim; but the 239*0Sstevel@tonic-gate * large trigger threshold makes false positives unlikely 240*0Sstevel@tonic-gate */ 241*0Sstevel@tonic-gate bgep->watchdog += 1; 242*0Sstevel@tonic-gate 243*0Sstevel@tonic-gate return (slot); 244*0Sstevel@tonic-gate } 245*0Sstevel@tonic-gate 246*0Sstevel@tonic-gate /* 247*0Sstevel@tonic-gate * Send a message by copying it into a preallocated (and premapped) buffer 248*0Sstevel@tonic-gate */ 249*0Sstevel@tonic-gate static enum send_status bge_send_copy(bge_t *bgep, mblk_t *mp, 250*0Sstevel@tonic-gate send_ring_t *srp, uint16_t tci); 251*0Sstevel@tonic-gate #pragma inline(bge_send_copy) 252*0Sstevel@tonic-gate 253*0Sstevel@tonic-gate static enum send_status 254*0Sstevel@tonic-gate bge_send_copy(bge_t *bgep, mblk_t *mp, send_ring_t *srp, uint16_t tci) 255*0Sstevel@tonic-gate { 256*0Sstevel@tonic-gate bge_sbd_t *hw_sbd_p; 257*0Sstevel@tonic-gate sw_sbd_t *ssbdp; 258*0Sstevel@tonic-gate mblk_t *bp; 259*0Sstevel@tonic-gate char *txb; 260*0Sstevel@tonic-gate uint64_t slot; 261*0Sstevel@tonic-gate size_t totlen; 262*0Sstevel@tonic-gate size_t mblen; 263*0Sstevel@tonic-gate uint32_t pflags; 264*0Sstevel@tonic-gate 265*0Sstevel@tonic-gate BGE_TRACE(("bge_send_copy($%p, $%p, $%p, 0x%x)", 266*0Sstevel@tonic-gate (void *)bgep, (void *)mp, (void *)srp)); 267*0Sstevel@tonic-gate 268*0Sstevel@tonic-gate /* 269*0Sstevel@tonic-gate * IMPORTANT: 270*0Sstevel@tonic-gate * Up to the point where it claims a place, a send_msg() 271*0Sstevel@tonic-gate * routine can indicate failure by returning SEND_FAIL. 272*0Sstevel@tonic-gate * Once it's claimed a place, it mustn't fail. 273*0Sstevel@tonic-gate * 274*0Sstevel@tonic-gate * In this version, there's no setup to be done here, and there's 275*0Sstevel@tonic-gate * nothing that can fail, so we can go straight to claiming our 276*0Sstevel@tonic-gate * already-reserved place on the train. 277*0Sstevel@tonic-gate * 278*0Sstevel@tonic-gate * This is the point of no return! 279*0Sstevel@tonic-gate */ 280*0Sstevel@tonic-gate slot = bge_send_claim(bgep, srp); 281*0Sstevel@tonic-gate ssbdp = &srp->sw_sbds[slot]; 282*0Sstevel@tonic-gate 283*0Sstevel@tonic-gate /* 284*0Sstevel@tonic-gate * Copy the data into a pre-mapped buffer, which avoids the 285*0Sstevel@tonic-gate * overhead (and complication) of mapping/unmapping STREAMS 286*0Sstevel@tonic-gate * buffers and keeping hold of them until the DMA has completed. 287*0Sstevel@tonic-gate * 288*0Sstevel@tonic-gate * Because all buffers are the same size, and larger than the 289*0Sstevel@tonic-gate * longest single valid message, we don't have to bother about 290*0Sstevel@tonic-gate * splitting the message across multiple buffers either. 291*0Sstevel@tonic-gate */ 292*0Sstevel@tonic-gate txb = DMA_VPTR(ssbdp->pbuf); 293*0Sstevel@tonic-gate for (totlen = 0, bp = mp; bp != NULL; bp = bp->b_cont) { 294*0Sstevel@tonic-gate mblen = bp->b_wptr - bp->b_rptr; 295*0Sstevel@tonic-gate if ((totlen += mblen) <= bgep->chipid.ethmax_size) { 296*0Sstevel@tonic-gate bcopy(bp->b_rptr, txb, mblen); 297*0Sstevel@tonic-gate txb += mblen; 298*0Sstevel@tonic-gate } 299*0Sstevel@tonic-gate } 300*0Sstevel@tonic-gate 301*0Sstevel@tonic-gate /* 302*0Sstevel@tonic-gate * We'e reached the end of the chain; and we should have 303*0Sstevel@tonic-gate * collected no more than ETHERMAX bytes into our buffer. 304*0Sstevel@tonic-gate */ 305*0Sstevel@tonic-gate ASSERT(bp == NULL); 306*0Sstevel@tonic-gate ASSERT(totlen <= bgep->chipid.ethmax_size); 307*0Sstevel@tonic-gate DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV); 308*0Sstevel@tonic-gate 309*0Sstevel@tonic-gate /* 310*0Sstevel@tonic-gate * Update the hardware send buffer descriptor; then we're done. 311*0Sstevel@tonic-gate * The return status indicates that the message can be freed 312*0Sstevel@tonic-gate * right away, as we've already copied the contents ... 313*0Sstevel@tonic-gate */ 314*0Sstevel@tonic-gate hw_sbd_p = DMA_VPTR(ssbdp->desc); 315*0Sstevel@tonic-gate hw_sbd_p->host_buf_addr = ssbdp->pbuf.cookie.dmac_laddress; 316*0Sstevel@tonic-gate hw_sbd_p->len = totlen; 317*0Sstevel@tonic-gate hw_sbd_p->flags = SBD_FLAG_PACKET_END; 318*0Sstevel@tonic-gate if (tci != 0) { 319*0Sstevel@tonic-gate hw_sbd_p->vlan_tci = tci; 320*0Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_VLAN_TAG; 321*0Sstevel@tonic-gate } 322*0Sstevel@tonic-gate 323*0Sstevel@tonic-gate hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags); 324*0Sstevel@tonic-gate if (pflags & HCK_IPV4_HDRCKSUM) 325*0Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_IP_CKSUM; 326*0Sstevel@tonic-gate if (pflags & HCK_FULLCKSUM) 327*0Sstevel@tonic-gate hw_sbd_p->flags |= SBD_FLAG_TCP_UDP_CKSUM; 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate return (SEND_FREE); 330*0Sstevel@tonic-gate } 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate static boolean_t 333*0Sstevel@tonic-gate bge_send(bge_t *bgep, mblk_t *mp) 334*0Sstevel@tonic-gate { 335*0Sstevel@tonic-gate send_ring_t *srp; 336*0Sstevel@tonic-gate enum send_status status; 337*0Sstevel@tonic-gate struct ether_vlan_header *ehp; 338*0Sstevel@tonic-gate boolean_t need_strip = B_FALSE; 339*0Sstevel@tonic-gate uint16_t tci; 340*0Sstevel@tonic-gate uint_t ring = 0; 341*0Sstevel@tonic-gate 342*0Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate /* 345*0Sstevel@tonic-gate * Determine if the packet is VLAN tagged. 346*0Sstevel@tonic-gate */ 347*0Sstevel@tonic-gate ASSERT(MBLKL(mp) >= sizeof (struct ether_header)); 348*0Sstevel@tonic-gate ehp = (struct ether_vlan_header *)mp->b_rptr; 349*0Sstevel@tonic-gate 350*0Sstevel@tonic-gate if (ehp->ether_tpid == htons(VLAN_TPID)) { 351*0Sstevel@tonic-gate if (MBLKL(mp) < sizeof (struct ether_vlan_header)) { 352*0Sstevel@tonic-gate uint32_t pflags; 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate /* 355*0Sstevel@tonic-gate * Need to preserve checksum flags across pullup. 356*0Sstevel@tonic-gate */ 357*0Sstevel@tonic-gate hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, 358*0Sstevel@tonic-gate NULL, &pflags); 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate if (!pullupmsg(mp, 361*0Sstevel@tonic-gate sizeof (struct ether_vlan_header))) { 362*0Sstevel@tonic-gate BGE_DEBUG(("bge_send: pullup failure")); 363*0Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 364*0Sstevel@tonic-gate return (B_FALSE); 365*0Sstevel@tonic-gate } 366*0Sstevel@tonic-gate 367*0Sstevel@tonic-gate (void) hcksum_assoc(mp, NULL, NULL, NULL, NULL, NULL, 368*0Sstevel@tonic-gate NULL, pflags, KM_NOSLEEP); 369*0Sstevel@tonic-gate } 370*0Sstevel@tonic-gate 371*0Sstevel@tonic-gate ehp = (struct ether_vlan_header *)mp->b_rptr; 372*0Sstevel@tonic-gate need_strip = B_TRUE; 373*0Sstevel@tonic-gate } 374*0Sstevel@tonic-gate 375*0Sstevel@tonic-gate /* 376*0Sstevel@tonic-gate * Try to reserve a place in the chosen ring. Shouldn't try next 377*0Sstevel@tonic-gate * higher-numbered (lower-priority) ring, if there aren't any 378*0Sstevel@tonic-gate * available. Otherwise, packets with same priority may get 379*0Sstevel@tonic-gate * transmission starvation. 380*0Sstevel@tonic-gate */ 381*0Sstevel@tonic-gate srp = &bgep->send[ring]; 382*0Sstevel@tonic-gate if (!bge_atomic_reserve(&srp->tx_free, 1)) { 383*0Sstevel@tonic-gate BGE_DEBUG(("bge_send: no free slots")); 384*0Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 385*0Sstevel@tonic-gate return (B_FALSE); 386*0Sstevel@tonic-gate } 387*0Sstevel@tonic-gate 388*0Sstevel@tonic-gate /* 389*0Sstevel@tonic-gate * Now that we know that there is space to transmit the packet 390*0Sstevel@tonic-gate * strip any VLAN tag that is present. 391*0Sstevel@tonic-gate */ 392*0Sstevel@tonic-gate if (need_strip) { 393*0Sstevel@tonic-gate tci = ntohs(ehp->ether_tci); 394*0Sstevel@tonic-gate 395*0Sstevel@tonic-gate (void) memmove(mp->b_rptr + VLAN_TAGSZ, mp->b_rptr, 396*0Sstevel@tonic-gate 2 * ETHERADDRL); 397*0Sstevel@tonic-gate mp->b_rptr += VLAN_TAGSZ; 398*0Sstevel@tonic-gate } else { 399*0Sstevel@tonic-gate tci = 0; 400*0Sstevel@tonic-gate } 401*0Sstevel@tonic-gate 402*0Sstevel@tonic-gate /* 403*0Sstevel@tonic-gate * We've reserved a place :-) 404*0Sstevel@tonic-gate * These ASSERTions check that our invariants still hold: 405*0Sstevel@tonic-gate * there must still be at least one free place 406*0Sstevel@tonic-gate * there must be at least one place NOT free (ours!) 407*0Sstevel@tonic-gate */ 408*0Sstevel@tonic-gate ASSERT(srp->tx_free > 0); 409*0Sstevel@tonic-gate ASSERT(srp->tx_free < srp->desc.nslots); 410*0Sstevel@tonic-gate 411*0Sstevel@tonic-gate if ((status = bge_send_copy(bgep, mp, srp, tci)) == SEND_FAIL) { 412*0Sstevel@tonic-gate /* 413*0Sstevel@tonic-gate * The send routine failed :( So we have to renounce 414*0Sstevel@tonic-gate * our reservation before returning the error. 415*0Sstevel@tonic-gate */ 416*0Sstevel@tonic-gate bge_atomic_renounce(&srp->tx_free, 1); 417*0Sstevel@tonic-gate bgep->resched_needed = B_TRUE; 418*0Sstevel@tonic-gate return (B_FALSE); 419*0Sstevel@tonic-gate } 420*0Sstevel@tonic-gate 421*0Sstevel@tonic-gate /* 422*0Sstevel@tonic-gate * The send routine succeeded; it will have updated the 423*0Sstevel@tonic-gate * h/w ring descriptor, and the <tx_next> and <tx_flow> 424*0Sstevel@tonic-gate * counters. 425*0Sstevel@tonic-gate * 426*0Sstevel@tonic-gate * Because there can be multiple concurrent threads in 427*0Sstevel@tonic-gate * transit through this code, we only want to prod the 428*0Sstevel@tonic-gate * hardware once the last one is departing ... 429*0Sstevel@tonic-gate */ 430*0Sstevel@tonic-gate rw_enter(srp->tx_lock, RW_WRITER); 431*0Sstevel@tonic-gate if (--srp->tx_flow == 0) { 432*0Sstevel@tonic-gate DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 433*0Sstevel@tonic-gate bge_mbx_put(bgep, srp->chip_mbx_reg, srp->tx_next); 434*0Sstevel@tonic-gate } 435*0Sstevel@tonic-gate rw_exit(srp->tx_lock); 436*0Sstevel@tonic-gate 437*0Sstevel@tonic-gate if (status == SEND_FREE) 438*0Sstevel@tonic-gate freemsg(mp); 439*0Sstevel@tonic-gate return (B_TRUE); 440*0Sstevel@tonic-gate } 441*0Sstevel@tonic-gate 442*0Sstevel@tonic-gate uint_t 443*0Sstevel@tonic-gate bge_reschedule(caddr_t arg) 444*0Sstevel@tonic-gate { 445*0Sstevel@tonic-gate bge_t *bgep; 446*0Sstevel@tonic-gate uint_t rslt; 447*0Sstevel@tonic-gate 448*0Sstevel@tonic-gate bgep = (bge_t *)arg; 449*0Sstevel@tonic-gate rslt = DDI_INTR_UNCLAIMED; 450*0Sstevel@tonic-gate 451*0Sstevel@tonic-gate BGE_TRACE(("bge_reschedule($%p)", (void *)bgep)); 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate if (bgep->bge_mac_state == BGE_MAC_STARTED && bgep->resched_needed) { 454*0Sstevel@tonic-gate mac_tx_update(bgep->macp); 455*0Sstevel@tonic-gate bgep->resched_needed = B_FALSE; 456*0Sstevel@tonic-gate rslt = DDI_INTR_CLAIMED; 457*0Sstevel@tonic-gate } 458*0Sstevel@tonic-gate 459*0Sstevel@tonic-gate return (rslt); 460*0Sstevel@tonic-gate } 461*0Sstevel@tonic-gate 462*0Sstevel@tonic-gate /* 463*0Sstevel@tonic-gate * bge_m_tx() - send a chain of packets 464*0Sstevel@tonic-gate */ 465*0Sstevel@tonic-gate mblk_t * 466*0Sstevel@tonic-gate bge_m_tx(void *arg, mblk_t *mp) 467*0Sstevel@tonic-gate { 468*0Sstevel@tonic-gate bge_t *bgep = arg; /* private device info */ 469*0Sstevel@tonic-gate mblk_t *next; 470*0Sstevel@tonic-gate 471*0Sstevel@tonic-gate BGE_TRACE(("bge_m_tx($%p, $%p)", arg, (void *)mp)); 472*0Sstevel@tonic-gate 473*0Sstevel@tonic-gate ASSERT(mp != NULL); 474*0Sstevel@tonic-gate ASSERT(bgep->bge_mac_state == BGE_MAC_STARTED); 475*0Sstevel@tonic-gate 476*0Sstevel@tonic-gate if (bgep->bge_chip_state != BGE_CHIP_RUNNING) { 477*0Sstevel@tonic-gate BGE_DEBUG(("bge_m_tx: chip not running")); 478*0Sstevel@tonic-gate return (mp); 479*0Sstevel@tonic-gate } 480*0Sstevel@tonic-gate 481*0Sstevel@tonic-gate while (mp != NULL) { 482*0Sstevel@tonic-gate next = mp->b_next; 483*0Sstevel@tonic-gate mp->b_next = NULL; 484*0Sstevel@tonic-gate 485*0Sstevel@tonic-gate if (!bge_send(bgep, mp)) { 486*0Sstevel@tonic-gate mp->b_next = next; 487*0Sstevel@tonic-gate break; 488*0Sstevel@tonic-gate } 489*0Sstevel@tonic-gate 490*0Sstevel@tonic-gate mp = next; 491*0Sstevel@tonic-gate } 492*0Sstevel@tonic-gate 493*0Sstevel@tonic-gate return (mp); 494*0Sstevel@tonic-gate } 495