1*3833Sxw161283 /*
2*3833Sxw161283 * CDDL HEADER START
3*3833Sxw161283 *
4*3833Sxw161283 * The contents of this file are subject to the terms of the
5*3833Sxw161283 * Common Development and Distribution License (the "License").
6*3833Sxw161283 * You may not use this file except in compliance with the License.
7*3833Sxw161283 *
8*3833Sxw161283 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*3833Sxw161283 * or http://www.opensolaris.org/os/licensing.
10*3833Sxw161283 * See the License for the specific language governing permissions
11*3833Sxw161283 * and limitations under the License.
12*3833Sxw161283 *
13*3833Sxw161283 * When distributing Covered Code, include this CDDL HEADER in each
14*3833Sxw161283 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*3833Sxw161283 * If applicable, add the following below this CDDL HEADER, with the
16*3833Sxw161283 * fields enclosed by brackets "[]" replaced with your own identifying
17*3833Sxw161283 * information: Portions Copyright [yyyy] [name of copyright owner]
18*3833Sxw161283 *
19*3833Sxw161283 * CDDL HEADER END
20*3833Sxw161283 */
21*3833Sxw161283
22*3833Sxw161283 /*
23*3833Sxw161283 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24*3833Sxw161283 * Use is subject to license terms.
25*3833Sxw161283 */
26*3833Sxw161283
27*3833Sxw161283 /*
28*3833Sxw161283 * This file is part of the Chelsio T1 Ethernet driver.
29*3833Sxw161283 *
30*3833Sxw161283 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
31*3833Sxw161283 */
32*3833Sxw161283
33*3833Sxw161283 /*
34*3833Sxw161283 * Solaris Multithreaded STREAMS Chelsio PCI Ethernet Driver.
35*3833Sxw161283 * Interface code
36*3833Sxw161283 */
37*3833Sxw161283
38*3833Sxw161283 #pragma ident "%Z%%M% %I% %E% SMI"
39*3833Sxw161283
40*3833Sxw161283 #include <sys/types.h>
41*3833Sxw161283 #include <sys/systm.h>
42*3833Sxw161283 #include <sys/cmn_err.h>
43*3833Sxw161283 #include <sys/ddi.h>
44*3833Sxw161283 #include <sys/sunddi.h>
45*3833Sxw161283 #include <sys/byteorder.h>
46*3833Sxw161283 #include <sys/atomic.h>
47*3833Sxw161283 #include <sys/ethernet.h>
48*3833Sxw161283 #if PE_PROFILING_ENABLED
49*3833Sxw161283 #include <sys/time.h>
50*3833Sxw161283 #endif
51*3833Sxw161283 #include <sys/gld.h>
52*3833Sxw161283 #include "ostypes.h"
53*3833Sxw161283 #include "common.h"
54*3833Sxw161283 #include "oschtoe.h"
55*3833Sxw161283 #ifdef CONFIG_CHELSIO_T1_1G
56*3833Sxw161283 #include "fpga_defs.h"
57*3833Sxw161283 #endif
58*3833Sxw161283 #include "regs.h"
59*3833Sxw161283 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
60*3833Sxw161283 #include "mc3.h"
61*3833Sxw161283 #include "mc4.h"
62*3833Sxw161283 #endif
63*3833Sxw161283 #include "sge.h"
64*3833Sxw161283 #include "tp.h"
65*3833Sxw161283 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
66*3833Sxw161283 #include "ulp.h"
67*3833Sxw161283 #endif
68*3833Sxw161283 #include "espi.h"
69*3833Sxw161283 #include "elmer0.h"
70*3833Sxw161283 #include "gmac.h"
71*3833Sxw161283 #include "cphy.h"
72*3833Sxw161283 #include "suni1x10gexp_regs.h"
73*3833Sxw161283 #include "ch.h"
74*3833Sxw161283
75*3833Sxw161283 #define MLEN(mp) ((mp)->b_wptr - (mp)->b_rptr)
76*3833Sxw161283
77*3833Sxw161283 extern uint32_t buffers_in_use[];
78*3833Sxw161283 extern kmutex_t in_use_l;
79*3833Sxw161283 extern uint32_t in_use_index;
80*3833Sxw161283
81*3833Sxw161283 static void link_start(ch_t *sa, struct pe_port_t *pp);
82*3833Sxw161283 static ch_esb_t *ch_alloc_small_esbbuf(ch_t *sa, uint32_t i);
83*3833Sxw161283 static ch_esb_t *ch_alloc_big_esbbuf(ch_t *sa, uint32_t i);
84*3833Sxw161283 void ch_big_rbuf_recycle(ch_esb_t *rbp);
85*3833Sxw161283 void ch_small_rbuf_recycle(ch_esb_t *rbp);
86*3833Sxw161283 static const struct board_info *pe_sa_init(ch_t *sa);
87*3833Sxw161283 static int ch_set_config_data(ch_t *chp);
88*3833Sxw161283 void pe_rbuf_pool_free(ch_t *chp);
89*3833Sxw161283 static void pe_free_driver_resources(ch_t *sa);
90*3833Sxw161283 static void update_mtu_tab(ch_t *adapter);
91*3833Sxw161283 static int pe_change_mtu(ch_t *chp);
92*3833Sxw161283
93*3833Sxw161283 /*
94*3833Sxw161283 * CPL5 Defines (from netinet/cpl5_commands.h)
95*3833Sxw161283 */
96*3833Sxw161283 #define FLITSTOBYTES 8
97*3833Sxw161283
98*3833Sxw161283 #define CPL_FORMAT_0_SIZE 8
99*3833Sxw161283 #define CPL_FORMAT_1_SIZE 16
100*3833Sxw161283 #define CPL_FORMAT_2_SIZE 24
101*3833Sxw161283 #define CPL_FORMAT_3_SIZE 32
102*3833Sxw161283 #define CPL_FORMAT_4_SIZE 40
103*3833Sxw161283 #define CPL_FORMAT_5_SIZE 48
104*3833Sxw161283
105*3833Sxw161283 #define TID_MASK 0xffffff
106*3833Sxw161283
107*3833Sxw161283 #define PE_LINK_SPEED_AUTONEG 5
108*3833Sxw161283
109*3833Sxw161283 static int pe_small_rbuf_pool_init(ch_t *sa);
110*3833Sxw161283 static int pe_big_rbuf_pool_init(ch_t *sa);
111*3833Sxw161283 static int pe_make_fake_arp(ch_t *chp, unsigned char *arpp);
112*3833Sxw161283 static uint32_t pe_get_ip(unsigned char *arpp);
113*3833Sxw161283
114*3833Sxw161283 /*
115*3833Sxw161283 * May be set in /etc/system to 0 to use default latency timer for 10G.
116*3833Sxw161283 * See PCI register 0xc definition.
117*3833Sxw161283 */
118*3833Sxw161283 int enable_latency_timer = 1;
119*3833Sxw161283
120*3833Sxw161283 /*
121*3833Sxw161283 * May be set in /etc/system to 0 to disable hardware checksum for
122*3833Sxw161283 * TCP and UDP.
123*3833Sxw161283 */
124*3833Sxw161283 int enable_checksum_offload = 1;
125*3833Sxw161283
126*3833Sxw161283 /*
127*3833Sxw161283 * Multiplier for freelist pool.
128*3833Sxw161283 */
129*3833Sxw161283 int fl_sz_multiplier = 6;
130*3833Sxw161283
131*3833Sxw161283 uint_t
pe_intr(ch_t * sa)132*3833Sxw161283 pe_intr(ch_t *sa)
133*3833Sxw161283 {
134*3833Sxw161283 mutex_enter(&sa->ch_intr);
135*3833Sxw161283
136*3833Sxw161283 if (sge_data_in(sa->sge)) {
137*3833Sxw161283 sa->isr_intr++;
138*3833Sxw161283 mutex_exit(&sa->ch_intr);
139*3833Sxw161283 return (DDI_INTR_CLAIMED);
140*3833Sxw161283 }
141*3833Sxw161283
142*3833Sxw161283 mutex_exit(&sa->ch_intr);
143*3833Sxw161283
144*3833Sxw161283 return (DDI_INTR_UNCLAIMED);
145*3833Sxw161283 }
146*3833Sxw161283
147*3833Sxw161283 /*
148*3833Sxw161283 * Each setup struct will call this function to
149*3833Sxw161283 * initialize.
150*3833Sxw161283 */
151*3833Sxw161283 void
pe_init(void * xsa)152*3833Sxw161283 pe_init(void* xsa)
153*3833Sxw161283 {
154*3833Sxw161283 ch_t *sa = NULL;
155*3833Sxw161283 int i = 0;
156*3833Sxw161283
157*3833Sxw161283 sa = (ch_t *)xsa;
158*3833Sxw161283
159*3833Sxw161283 /*
160*3833Sxw161283 * Need to count the number of times this routine is called
161*3833Sxw161283 * because we only want the resources to be allocated once.
162*3833Sxw161283 * The 7500 has four ports and so this routine can be called
163*3833Sxw161283 * once for each port.
164*3833Sxw161283 */
165*3833Sxw161283 if (sa->init_counter == 0) {
166*3833Sxw161283 for_each_port(sa, i) {
167*3833Sxw161283
168*3833Sxw161283 /*
169*3833Sxw161283 * We only want to initialize the line if it is down.
170*3833Sxw161283 */
171*3833Sxw161283 if (sa->port[i].line_up == 0) {
172*3833Sxw161283 link_start(sa, &sa->port[i]);
173*3833Sxw161283 sa->port[i].line_up = 1;
174*3833Sxw161283 }
175*3833Sxw161283 }
176*3833Sxw161283
177*3833Sxw161283 (void) t1_init_hw_modules(sa);
178*3833Sxw161283
179*3833Sxw161283 /*
180*3833Sxw161283 * Enable/Disable checksum offloading.
181*3833Sxw161283 */
182*3833Sxw161283 if (sa->ch_config.cksum_enabled) {
183*3833Sxw161283 if (sa->config_data.offload_ip_cksum) {
184*3833Sxw161283 /* Notify that HW will do the checksum. */
185*3833Sxw161283 t1_tp_set_ip_checksum_offload(sa->tp, 1);
186*3833Sxw161283 }
187*3833Sxw161283
188*3833Sxw161283 if (sa->config_data.offload_tcp_cksum) {
189*3833Sxw161283 /* Notify that HW will do the checksum. */
190*3833Sxw161283 t1_tp_set_tcp_checksum_offload(sa->tp, 1);
191*3833Sxw161283 }
192*3833Sxw161283
193*3833Sxw161283 if (sa->config_data.offload_udp_cksum) {
194*3833Sxw161283 /* Notify that HW will do the checksum. */
195*3833Sxw161283 t1_tp_set_udp_checksum_offload(sa->tp, 1);
196*3833Sxw161283 }
197*3833Sxw161283 }
198*3833Sxw161283
199*3833Sxw161283 sa->ch_flags |= PEINITDONE;
200*3833Sxw161283
201*3833Sxw161283 sa->init_counter++;
202*3833Sxw161283 }
203*3833Sxw161283
204*3833Sxw161283 /*
205*3833Sxw161283 * Enable interrupts after starting the SGE so
206*3833Sxw161283 * that the SGE is ready to handle interrupts.
207*3833Sxw161283 */
208*3833Sxw161283 (void) sge_start(sa->sge);
209*3833Sxw161283 t1_interrupts_enable(sa);
210*3833Sxw161283
211*3833Sxw161283 /*
212*3833Sxw161283 * set mtu (either 1500 or bigger)
213*3833Sxw161283 */
214*3833Sxw161283 (void) pe_change_mtu(sa);
215*3833Sxw161283 #ifdef HOST_PAUSE
216*3833Sxw161283 /*
217*3833Sxw161283 * get the configured value of the MAC.
218*3833Sxw161283 */
219*3833Sxw161283 (void) t1_tpi_read(sa, SUNI1x10GEXP_REG_TXXG_CONFIG_1 << 2,
220*3833Sxw161283 &sa->txxg_cfg1);
221*3833Sxw161283 #endif
222*3833Sxw161283 }
223*3833Sxw161283
224*3833Sxw161283 /* ARGSUSED */
225*3833Sxw161283 static void
link_start(ch_t * sa,struct pe_port_t * p)226*3833Sxw161283 link_start(ch_t *sa, struct pe_port_t *p)
227*3833Sxw161283 {
228*3833Sxw161283 struct cmac *mac = p->mac;
229*3833Sxw161283
230*3833Sxw161283 mac->ops->reset(mac);
231*3833Sxw161283 if (mac->ops->macaddress_set)
232*3833Sxw161283 mac->ops->macaddress_set(mac, p->enaddr);
233*3833Sxw161283 (void) t1_link_start(p->phy, mac, &p->link_config);
234*3833Sxw161283 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
235*3833Sxw161283 }
236*3833Sxw161283
237*3833Sxw161283 /*
238*3833Sxw161283 * turn off interrupts...
239*3833Sxw161283 */
240*3833Sxw161283 void
pe_stop(ch_t * sa)241*3833Sxw161283 pe_stop(ch_t *sa)
242*3833Sxw161283 {
243*3833Sxw161283 t1_interrupts_disable(sa);
244*3833Sxw161283 (void) sge_stop(sa->sge);
245*3833Sxw161283
246*3833Sxw161283 /*
247*3833Sxw161283 * we can still be running an interrupt thread in sge_data_in().
248*3833Sxw161283 * If we are, we'll block on the ch_intr lock
249*3833Sxw161283 */
250*3833Sxw161283 mutex_enter(&sa->ch_intr);
251*3833Sxw161283 mutex_exit(&sa->ch_intr);
252*3833Sxw161283 }
253*3833Sxw161283
254*3833Sxw161283 /*
255*3833Sxw161283 * output mblk to SGE level and out to the wire.
256*3833Sxw161283 */
257*3833Sxw161283
258*3833Sxw161283 int
pe_start(ch_t * sa,mblk_t * mp,uint32_t flg)259*3833Sxw161283 pe_start(ch_t *sa, mblk_t *mp, uint32_t flg)
260*3833Sxw161283 {
261*3833Sxw161283 mblk_t *m0 = mp;
262*3833Sxw161283 cmdQ_ce_t cm[16];
263*3833Sxw161283 cmdQ_ce_t *cmp;
264*3833Sxw161283 cmdQ_ce_t *hmp = &cm[0]; /* head of cm table (may be kmem_alloed) */
265*3833Sxw161283 int cm_flg = 0; /* flag (1 - if kmem-alloced) */
266*3833Sxw161283 int nseg = 0; /* number cmdQ_ce entries created */
267*3833Sxw161283 int mseg = 16; /* maximum entries in hmp arrary */
268*3833Sxw161283 int freeme = 0; /* we have an mblk to free in case of error */
269*3833Sxw161283 uint32_t ch_bind_dma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
270*3833Sxw161283 uint32_t);
271*3833Sxw161283 #if defined(__sparc)
272*3833Sxw161283 uint32_t ch_bind_dvma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
273*3833Sxw161283 uint32_t);
274*3833Sxw161283 #endif
275*3833Sxw161283 int rv; /* return value on error */
276*3833Sxw161283
277*3833Sxw161283 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
278*3833Sxw161283 if (flg & CH_OFFLOAD) {
279*3833Sxw161283 hmp->ce_pa = ((tbuf_t *)mp)->tb_pa;
280*3833Sxw161283 hmp->ce_dh = NULL;
281*3833Sxw161283 hmp->ce_flg = DH_TOE;
282*3833Sxw161283 hmp->ce_len = ((tbuf_t *)mp)->tb_len;
283*3833Sxw161283 hmp->ce_mp = mp;
284*3833Sxw161283
285*3833Sxw161283 /* make sure data is flushed to physical memory */
286*3833Sxw161283 (void) ddi_dma_sync((ddi_dma_handle_t)((tbuf_t *)mp)->tb_dh,
287*3833Sxw161283 (off_t)0, hmp->ce_len, DDI_DMA_SYNC_FORDEV);
288*3833Sxw161283
289*3833Sxw161283 if (sge_data_out(sa->sge, 0, mp, hmp, 1, flg) == 0) {
290*3833Sxw161283 return (0);
291*3833Sxw161283 }
292*3833Sxw161283
293*3833Sxw161283 /*
294*3833Sxw161283 * set a flag so we'll restart upper layer when
295*3833Sxw161283 * resources become available.
296*3833Sxw161283 */
297*3833Sxw161283 sa->ch_blked = 1;
298*3833Sxw161283 return (1);
299*3833Sxw161283 }
300*3833Sxw161283 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */
301*3833Sxw161283
302*3833Sxw161283 /* writes from toe will always have CPL header in place */
303*3833Sxw161283 if (flg & CH_NO_CPL) {
304*3833Sxw161283 struct cpl_tx_pkt *cpl;
305*3833Sxw161283
306*3833Sxw161283 /* PR2928 & PR3309 */
307*3833Sxw161283 if (sa->ch_ip == NULL) {
308*3833Sxw161283 ushort_t ethertype = ntohs(*(short *)&mp->b_rptr[12]);
309*3833Sxw161283 if (ethertype == ETHERTYPE_ARP) {
310*3833Sxw161283 if (is_T2(sa)) {
311*3833Sxw161283 /*
312*3833Sxw161283 * We assume here that the arp will be
313*3833Sxw161283 * contained in one mblk.
314*3833Sxw161283 */
315*3833Sxw161283 if (pe_make_fake_arp(sa, mp->b_rptr)) {
316*3833Sxw161283 freemsg(mp);
317*3833Sxw161283 sa->oerr++;
318*3833Sxw161283 return (0);
319*3833Sxw161283 }
320*3833Sxw161283 } else {
321*3833Sxw161283 sa->ch_ip = pe_get_ip(mp->b_rptr);
322*3833Sxw161283 }
323*3833Sxw161283 }
324*3833Sxw161283 }
325*3833Sxw161283
326*3833Sxw161283 /*
327*3833Sxw161283 * if space in front of packet big enough for CPL
328*3833Sxw161283 * header, then use it. We'll allocate an mblk
329*3833Sxw161283 * otherwise.
330*3833Sxw161283 */
331*3833Sxw161283 if ((mp->b_rptr - mp->b_datap->db_base) >= SZ_CPL_TX_PKT) {
332*3833Sxw161283
333*3833Sxw161283 mp->b_rptr -= SZ_CPL_TX_PKT;
334*3833Sxw161283
335*3833Sxw161283 } else {
336*3833Sxw161283
337*3833Sxw161283 #ifdef SUN_KSTATS
338*3833Sxw161283 sa->sge->intr_cnt.tx_need_cpl_space++;
339*3833Sxw161283 #endif
340*3833Sxw161283 m0 = allocb(SZ_CPL_TX_PKT, BPRI_HI);
341*3833Sxw161283 if (m0 == NULL) {
342*3833Sxw161283 freemsg(mp);
343*3833Sxw161283 sa->oerr++;
344*3833Sxw161283 return (0);
345*3833Sxw161283 }
346*3833Sxw161283
347*3833Sxw161283 m0->b_wptr = m0->b_rptr + SZ_CPL_TX_PKT;
348*3833Sxw161283 m0->b_cont = mp;
349*3833Sxw161283 freeme = 1;
350*3833Sxw161283
351*3833Sxw161283 mp = m0;
352*3833Sxw161283 }
353*3833Sxw161283
354*3833Sxw161283 /* fill in cpl header */
355*3833Sxw161283 cpl = (struct cpl_tx_pkt *)mp->b_rptr;
356*3833Sxw161283 cpl->opcode = CPL_TX_PKT;
357*3833Sxw161283 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
358*3833Sxw161283 cpl->ip_csum_dis = 1; /* no IP header cksum */
359*3833Sxw161283 cpl->l4_csum_dis =
360*3833Sxw161283 flg & CH_NO_HWCKSUM; /* CH_NO_HWCKSUM == 1 */
361*3833Sxw161283 cpl->vlan_valid = 0; /* no vlan */
362*3833Sxw161283 }
363*3833Sxw161283
364*3833Sxw161283 if (m0->b_cont) {
365*3833Sxw161283
366*3833Sxw161283 #ifdef SUN_KSTATS
367*3833Sxw161283 sa->sge->intr_cnt.tx_multi_mblks++;
368*3833Sxw161283 #endif
369*3833Sxw161283
370*3833Sxw161283 while (mp) {
371*3833Sxw161283 int lseg; /* added by ch_bind_dma_handle() */
372*3833Sxw161283 int len;
373*3833Sxw161283
374*3833Sxw161283 len = MLEN(mp);
375*3833Sxw161283 /* skip mlks with no data */
376*3833Sxw161283 if (len == 0) {
377*3833Sxw161283 mp = mp->b_cont;
378*3833Sxw161283 continue;
379*3833Sxw161283 }
380*3833Sxw161283
381*3833Sxw161283 /*
382*3833Sxw161283 * if we've run out of space on stack, then we
383*3833Sxw161283 * allocate a temporary buffer to hold the
384*3833Sxw161283 * information. This will kill the the performance,
385*3833Sxw161283 * but since it shouldn't really occur, we can live
386*3833Sxw161283 * with it. Since jumbo frames may map multiple
387*3833Sxw161283 * descriptors, we reallocate the hmp[] array before
388*3833Sxw161283 * we reach the end.
389*3833Sxw161283 */
390*3833Sxw161283 if (nseg >= (mseg-4)) {
391*3833Sxw161283 cmdQ_ce_t *buf;
392*3833Sxw161283 int j;
393*3833Sxw161283
394*3833Sxw161283 buf = kmem_alloc(sizeof (cmdQ_ce_t) * 2 * mseg,
395*3833Sxw161283 KM_SLEEP);
396*3833Sxw161283
397*3833Sxw161283 for (j = 0; j < nseg; j++)
398*3833Sxw161283 buf[j] = hmp[j];
399*3833Sxw161283
400*3833Sxw161283 if (cm_flg) {
401*3833Sxw161283 kmem_free(hmp,
402*3833Sxw161283 mseg * sizeof (cmdQ_ce_t));
403*3833Sxw161283 } else
404*3833Sxw161283 cm_flg = 1;
405*3833Sxw161283
406*3833Sxw161283 hmp = buf;
407*3833Sxw161283 mseg = 2*mseg;
408*3833Sxw161283
409*3833Sxw161283 /*
410*3833Sxw161283 * We've used up ch table on stack
411*3833Sxw161283 */
412*3833Sxw161283 }
413*3833Sxw161283
414*3833Sxw161283 #if defined(__sparc)
415*3833Sxw161283 if (sa->ch_config.enable_dvma) {
416*3833Sxw161283 lseg = ch_bind_dvma_handle(sa, len,
417*3833Sxw161283 (void *)mp->b_rptr,
418*3833Sxw161283 &hmp[nseg], mseg - nseg);
419*3833Sxw161283 if (lseg == NULL) {
420*3833Sxw161283 sa->sge->intr_cnt.tx_no_dvma1++;
421*3833Sxw161283 if ((lseg = ch_bind_dma_handle(sa, len,
422*3833Sxw161283 (void *)mp->b_rptr,
423*3833Sxw161283 &hmp[nseg],
424*3833Sxw161283 mseg - nseg)) == NULL) {
425*3833Sxw161283 sa->sge->intr_cnt.tx_no_dma1++;
426*3833Sxw161283
427*3833Sxw161283 /*
428*3833Sxw161283 * ran out of space. Gonna bale
429*3833Sxw161283 */
430*3833Sxw161283 rv = 0;
431*3833Sxw161283
432*3833Sxw161283 /*
433*3833Sxw161283 * we may have processed
434*3833Sxw161283 * previous mblks and have
435*3833Sxw161283 * descriptors. If so, we need
436*3833Sxw161283 * to free the meta struct
437*3833Sxw161283 * entries before freeing
438*3833Sxw161283 * the mblk.
439*3833Sxw161283 */
440*3833Sxw161283 if (nseg)
441*3833Sxw161283 goto error;
442*3833Sxw161283 goto error1;
443*3833Sxw161283 }
444*3833Sxw161283 }
445*3833Sxw161283 } else {
446*3833Sxw161283 lseg = ch_bind_dma_handle(sa, len,
447*3833Sxw161283 (void *)mp->b_rptr, &hmp[nseg],
448*3833Sxw161283 mseg - nseg);
449*3833Sxw161283 if (lseg == NULL) {
450*3833Sxw161283 sa->sge->intr_cnt.tx_no_dma1++;
451*3833Sxw161283
452*3833Sxw161283 /*
453*3833Sxw161283 * ran out of space. Gona bale
454*3833Sxw161283 */
455*3833Sxw161283 rv = 0;
456*3833Sxw161283
457*3833Sxw161283 /*
458*3833Sxw161283 * we may have processed previous
459*3833Sxw161283 * mblks and have descriptors. If so,
460*3833Sxw161283 * we need to free the meta struct
461*3833Sxw161283 * entries before freeing the mblk.
462*3833Sxw161283 */
463*3833Sxw161283 if (nseg)
464*3833Sxw161283 goto error;
465*3833Sxw161283 goto error1;
466*3833Sxw161283 }
467*3833Sxw161283 }
468*3833Sxw161283 #else /* defined(__sparc) */
469*3833Sxw161283 lseg = ch_bind_dma_handle(sa, len,
470*3833Sxw161283 (void *)mp->b_rptr, &hmp[nseg],
471*3833Sxw161283 mseg - nseg);
472*3833Sxw161283 if (lseg == NULL) {
473*3833Sxw161283 sa->sge->intr_cnt.tx_no_dma1++;
474*3833Sxw161283
475*3833Sxw161283 /*
476*3833Sxw161283 * ran out of space. Gona bale
477*3833Sxw161283 */
478*3833Sxw161283 rv = 0;
479*3833Sxw161283
480*3833Sxw161283 /*
481*3833Sxw161283 * we may have processed previous mblks and
482*3833Sxw161283 * have descriptors. If so, we need to free
483*3833Sxw161283 * the meta struct entries before freeing
484*3833Sxw161283 * the mblk.
485*3833Sxw161283 */
486*3833Sxw161283 if (nseg)
487*3833Sxw161283 goto error;
488*3833Sxw161283 goto error1;
489*3833Sxw161283 }
490*3833Sxw161283 #endif /* defined(__sparc) */
491*3833Sxw161283 nseg += lseg;
492*3833Sxw161283 mp = mp->b_cont;
493*3833Sxw161283 }
494*3833Sxw161283
495*3833Sxw161283 /*
496*3833Sxw161283 * SHOULD NEVER OCCUR, BUT...
497*3833Sxw161283 * no data if nseg 0 or
498*3833Sxw161283 * nseg 1 and a CPL mblk (CPL mblk only with offload mode)
499*3833Sxw161283 * and no data
500*3833Sxw161283 */
501*3833Sxw161283 if ((nseg == 0) || (freeme && (nseg == 1))) {
502*3833Sxw161283 rv = 0;
503*3833Sxw161283 goto error1;
504*3833Sxw161283 }
505*3833Sxw161283
506*3833Sxw161283 } else {
507*3833Sxw161283 int len;
508*3833Sxw161283
509*3833Sxw161283 /* we assume that we always have data with one packet */
510*3833Sxw161283 len = MLEN(mp);
511*3833Sxw161283
512*3833Sxw161283 #if defined(__sparc)
513*3833Sxw161283 if (sa->ch_config.enable_dvma) {
514*3833Sxw161283 nseg = ch_bind_dvma_handle(sa, len,
515*3833Sxw161283 (void *)mp->b_rptr,
516*3833Sxw161283 &hmp[0], 16);
517*3833Sxw161283 if (nseg == NULL) {
518*3833Sxw161283 sa->sge->intr_cnt.tx_no_dvma2++;
519*3833Sxw161283 nseg = ch_bind_dma_handle(sa, len,
520*3833Sxw161283 (void *)mp->b_rptr,
521*3833Sxw161283 &hmp[0], 16);
522*3833Sxw161283 if (nseg == NULL) {
523*3833Sxw161283 sa->sge->intr_cnt.tx_no_dma2++;
524*3833Sxw161283
525*3833Sxw161283 /*
526*3833Sxw161283 * ran out of space. Gona bale
527*3833Sxw161283 */
528*3833Sxw161283 rv = 0;
529*3833Sxw161283 goto error1;
530*3833Sxw161283 }
531*3833Sxw161283 }
532*3833Sxw161283 } else {
533*3833Sxw161283 nseg = ch_bind_dma_handle(sa, len,
534*3833Sxw161283 (void *)mp->b_rptr, &hmp[0], 16);
535*3833Sxw161283 if (nseg == NULL) {
536*3833Sxw161283 sa->sge->intr_cnt.tx_no_dma2++;
537*3833Sxw161283
538*3833Sxw161283 /*
539*3833Sxw161283 * ran out of space. Gona bale
540*3833Sxw161283 */
541*3833Sxw161283 rv = 0;
542*3833Sxw161283 goto error1;
543*3833Sxw161283 }
544*3833Sxw161283 }
545*3833Sxw161283 #else /* defined(__sparc) */
546*3833Sxw161283 nseg = ch_bind_dma_handle(sa, len,
547*3833Sxw161283 (void *)mp->b_rptr, &hmp[0], 16);
548*3833Sxw161283 if (nseg == NULL) {
549*3833Sxw161283 sa->sge->intr_cnt.tx_no_dma2++;
550*3833Sxw161283
551*3833Sxw161283 /*
552*3833Sxw161283 * ran out of space. Gona bale
553*3833Sxw161283 */
554*3833Sxw161283 rv = 0;
555*3833Sxw161283 goto error1;
556*3833Sxw161283 }
557*3833Sxw161283 #endif /* defined(__sparc) */
558*3833Sxw161283
559*3833Sxw161283 /*
560*3833Sxw161283 * dummy arp message to handle PR3309 & PR2928
561*3833Sxw161283 */
562*3833Sxw161283 if (flg & CH_ARP)
563*3833Sxw161283 hmp->ce_flg |= DH_ARP;
564*3833Sxw161283 }
565*3833Sxw161283
566*3833Sxw161283 if (sge_data_out(sa->sge, 0, m0, hmp, nseg, flg) == 0) {
567*3833Sxw161283 if (cm_flg)
568*3833Sxw161283 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
569*3833Sxw161283 return (0);
570*3833Sxw161283 }
571*3833Sxw161283
572*3833Sxw161283 /*
573*3833Sxw161283 * set a flag so we'll restart upper layer when
574*3833Sxw161283 * resources become available.
575*3833Sxw161283 */
576*3833Sxw161283 if ((flg & CH_ARP) == 0)
577*3833Sxw161283 sa->ch_blked = 1;
578*3833Sxw161283 rv = 1;
579*3833Sxw161283
580*3833Sxw161283 error:
581*3833Sxw161283 /*
582*3833Sxw161283 * unmap the physical addresses allocated earlier.
583*3833Sxw161283 */
584*3833Sxw161283 cmp = hmp;
585*3833Sxw161283 for (--nseg; nseg >= 0; nseg--) {
586*3833Sxw161283 if (cmp->ce_dh) {
587*3833Sxw161283 if (cmp->ce_flg == DH_DMA)
588*3833Sxw161283 ch_unbind_dma_handle(sa, cmp->ce_dh);
589*3833Sxw161283 #if defined(__sparc)
590*3833Sxw161283 else
591*3833Sxw161283 ch_unbind_dvma_handle(sa, cmp->ce_dh);
592*3833Sxw161283 #endif
593*3833Sxw161283 }
594*3833Sxw161283 cmp++;
595*3833Sxw161283 }
596*3833Sxw161283
597*3833Sxw161283 error1:
598*3833Sxw161283
599*3833Sxw161283 /* free the temporary array */
600*3833Sxw161283 if (cm_flg)
601*3833Sxw161283 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
602*3833Sxw161283
603*3833Sxw161283 /*
604*3833Sxw161283 * if we've allocated an mblk above, then we need to free it
605*3833Sxw161283 * before returning. This is safe since we haven't done anything to
606*3833Sxw161283 * the original message. The caller, gld, will still have a pointer
607*3833Sxw161283 * to the original mblk.
608*3833Sxw161283 */
609*3833Sxw161283 if (rv == 1) {
610*3833Sxw161283 if (freeme) {
611*3833Sxw161283 /* we had to allocate an mblk. Free it. */
612*3833Sxw161283 freeb(m0);
613*3833Sxw161283 } else {
614*3833Sxw161283 /* adjust the mblk back to original start */
615*3833Sxw161283 if (flg & CH_NO_CPL)
616*3833Sxw161283 m0->b_rptr += SZ_CPL_TX_PKT;
617*3833Sxw161283 }
618*3833Sxw161283 } else {
619*3833Sxw161283 freemsg(m0);
620*3833Sxw161283 sa->oerr++;
621*3833Sxw161283 }
622*3833Sxw161283
623*3833Sxw161283 return (rv);
624*3833Sxw161283 }
625*3833Sxw161283
626*3833Sxw161283 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
627*3833Sxw161283 void
pe_set_mac(ch_t * sa,unsigned char * ac_enaddr)628*3833Sxw161283 pe_set_mac(ch_t *sa, unsigned char *ac_enaddr)
629*3833Sxw161283 {
630*3833Sxw161283 sa->port[0].mac->ops->macaddress_set(sa->port[0].mac, ac_enaddr);
631*3833Sxw161283 }
632*3833Sxw161283
633*3833Sxw161283 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
634*3833Sxw161283 unsigned char *
pe_get_mac(ch_t * sa)635*3833Sxw161283 pe_get_mac(ch_t *sa)
636*3833Sxw161283 {
637*3833Sxw161283 return (sa->port[0].enaddr);
638*3833Sxw161283 }
639*3833Sxw161283
640*3833Sxw161283 /* KLUDGE ALERT. HARD WIRED TO ONE PORT */
641*3833Sxw161283 void
pe_set_promiscuous(ch_t * sa,int flag)642*3833Sxw161283 pe_set_promiscuous(ch_t *sa, int flag)
643*3833Sxw161283 {
644*3833Sxw161283 struct cmac *mac = sa->port[0].mac;
645*3833Sxw161283 struct t1_rx_mode rm;
646*3833Sxw161283
647*3833Sxw161283 switch (flag) {
648*3833Sxw161283 case 0: /* turn off promiscuous mode */
649*3833Sxw161283 sa->ch_flags &= ~(PEPROMISC|PEALLMULTI);
650*3833Sxw161283 break;
651*3833Sxw161283
652*3833Sxw161283 case 1: /* turn on promiscuous mode */
653*3833Sxw161283 sa->ch_flags |= PEPROMISC;
654*3833Sxw161283 break;
655*3833Sxw161283
656*3833Sxw161283 case 2: /* turn on multicast reception */
657*3833Sxw161283 sa->ch_flags |= PEALLMULTI;
658*3833Sxw161283 break;
659*3833Sxw161283 }
660*3833Sxw161283
661*3833Sxw161283 mutex_enter(&sa->ch_mc_lck);
662*3833Sxw161283 rm.chp = sa;
663*3833Sxw161283 rm.mc = sa->ch_mc;
664*3833Sxw161283
665*3833Sxw161283 mac->ops->set_rx_mode(mac, &rm);
666*3833Sxw161283 mutex_exit(&sa->ch_mc_lck);
667*3833Sxw161283 }
668*3833Sxw161283
669*3833Sxw161283 int
pe_set_mc(ch_t * sa,uint8_t * ep,int flg)670*3833Sxw161283 pe_set_mc(ch_t *sa, uint8_t *ep, int flg)
671*3833Sxw161283 {
672*3833Sxw161283 struct cmac *mac = sa->port[0].mac;
673*3833Sxw161283 struct t1_rx_mode rm;
674*3833Sxw161283
675*3833Sxw161283 if (flg == GLD_MULTI_ENABLE) {
676*3833Sxw161283 ch_mc_t *mcp;
677*3833Sxw161283
678*3833Sxw161283 mcp = (ch_mc_t *)kmem_zalloc(sizeof (struct ch_mc),
679*3833Sxw161283 KM_NOSLEEP);
680*3833Sxw161283 if (mcp == NULL)
681*3833Sxw161283 return (GLD_NORESOURCES);
682*3833Sxw161283
683*3833Sxw161283 bcopy(ep, &mcp->cmc_mca, 6);
684*3833Sxw161283
685*3833Sxw161283 mutex_enter(&sa->ch_mc_lck);
686*3833Sxw161283 mcp->cmc_next = sa->ch_mc;
687*3833Sxw161283 sa->ch_mc = mcp;
688*3833Sxw161283 sa->ch_mc_cnt++;
689*3833Sxw161283 mutex_exit(&sa->ch_mc_lck);
690*3833Sxw161283
691*3833Sxw161283 } else if (flg == GLD_MULTI_DISABLE) {
692*3833Sxw161283 ch_mc_t **p = &sa->ch_mc;
693*3833Sxw161283 ch_mc_t *q = NULL;
694*3833Sxw161283
695*3833Sxw161283 mutex_enter(&sa->ch_mc_lck);
696*3833Sxw161283 p = &sa->ch_mc;
697*3833Sxw161283 while (*p) {
698*3833Sxw161283 if (bcmp(ep, (*p)->cmc_mca, 6) == 0) {
699*3833Sxw161283 q = *p;
700*3833Sxw161283 *p = (*p)->cmc_next;
701*3833Sxw161283 kmem_free(q, sizeof (*q));
702*3833Sxw161283 sa->ch_mc_cnt--;
703*3833Sxw161283 break;
704*3833Sxw161283 }
705*3833Sxw161283
706*3833Sxw161283 p = &(*p)->cmc_next;
707*3833Sxw161283 }
708*3833Sxw161283 mutex_exit(&sa->ch_mc_lck);
709*3833Sxw161283
710*3833Sxw161283 if (q == NULL)
711*3833Sxw161283 return (GLD_BADARG);
712*3833Sxw161283 } else
713*3833Sxw161283 return (GLD_BADARG);
714*3833Sxw161283
715*3833Sxw161283 mutex_enter(&sa->ch_mc_lck);
716*3833Sxw161283 rm.chp = sa;
717*3833Sxw161283 rm.mc = sa->ch_mc;
718*3833Sxw161283
719*3833Sxw161283 mac->ops->set_rx_mode(mac, &rm);
720*3833Sxw161283 mutex_exit(&sa->ch_mc_lck);
721*3833Sxw161283
722*3833Sxw161283 return (GLD_SUCCESS);
723*3833Sxw161283 }
724*3833Sxw161283
725*3833Sxw161283 /*
726*3833Sxw161283 * return: speed - bandwidth of interface
727*3833Sxw161283 * return: intrcnt - # interrupts
728*3833Sxw161283 * return: norcvbuf - # recedived packets dropped by driver
729*3833Sxw161283 * return: oerrors - # bad send packets
730*3833Sxw161283 * return: ierrors - # bad receive packets
731*3833Sxw161283 * return: underrun - # bad underrun xmit packets
732*3833Sxw161283 * return: overrun - # bad overrun recv packets
733*3833Sxw161283 * return: framing - # bad aligned recv packets
734*3833Sxw161283 * return: crc - # bad FCS (crc) recv packets
735*3833Sxw161283 * return: carrier - times carrier was lost
736*3833Sxw161283 * return: collisions - # xmit collisions
737*3833Sxw161283 * return: xcollisions - # xmit pkts dropped due to collisions
738*3833Sxw161283 * return: late - # late xmit collisions
739*3833Sxw161283 * return: defer - # deferred xmit packets
740*3833Sxw161283 * return: xerrs - # xmit dropped packets
741*3833Sxw161283 * return: rerrs - # recv dropped packets
742*3833Sxw161283 * return: toolong - # recv pkts too long
743*3833Sxw161283 * return: runt - # recv runt pkts
744*3833Sxw161283 * return: multixmt - # multicast pkts xmitted
745*3833Sxw161283 * return: multircv - # multicast pkts recved
746*3833Sxw161283 * return: brdcstxmt - # broadcast pkts xmitted
747*3833Sxw161283 * return: brdcstrcv - # broadcast pkts rcv
748*3833Sxw161283 */
749*3833Sxw161283
750*3833Sxw161283 int
pe_get_stats(ch_t * sa,uint64_t * speed,uint32_t * intrcnt,uint32_t * norcvbuf,uint32_t * oerrors,uint32_t * ierrors,uint32_t * underrun,uint32_t * overrun,uint32_t * framing,uint32_t * crc,uint32_t * carrier,uint32_t * collisions,uint32_t * xcollisions,uint32_t * late,uint32_t * defer,uint32_t * xerrs,uint32_t * rerrs,uint32_t * toolong,uint32_t * runt,ulong_t * multixmt,ulong_t * multircv,ulong_t * brdcstxmt,ulong_t * brdcstrcv)751*3833Sxw161283 pe_get_stats(ch_t *sa, uint64_t *speed, uint32_t *intrcnt, uint32_t *norcvbuf,
752*3833Sxw161283 uint32_t *oerrors, uint32_t *ierrors, uint32_t *underrun,
753*3833Sxw161283 uint32_t *overrun, uint32_t *framing, uint32_t *crc,
754*3833Sxw161283 uint32_t *carrier, uint32_t *collisions, uint32_t *xcollisions,
755*3833Sxw161283 uint32_t *late, uint32_t *defer, uint32_t *xerrs, uint32_t *rerrs,
756*3833Sxw161283 uint32_t *toolong, uint32_t *runt, ulong_t *multixmt, ulong_t *multircv,
757*3833Sxw161283 ulong_t *brdcstxmt, ulong_t *brdcstrcv)
758*3833Sxw161283 {
759*3833Sxw161283 struct pe_port_t *pt;
760*3833Sxw161283 int line_speed;
761*3833Sxw161283 int line_duplex;
762*3833Sxw161283 int line_is_active;
763*3833Sxw161283 uint64_t v;
764*3833Sxw161283 const struct cmac_statistics *sp;
765*3833Sxw161283
766*3833Sxw161283 pt = &(sa->port[0]);
767*3833Sxw161283 (void) pt->phy->ops->get_link_status(pt->phy,
768*3833Sxw161283 &line_is_active, &line_speed, &line_duplex, NULL);
769*3833Sxw161283
770*3833Sxw161283 switch (line_speed) {
771*3833Sxw161283 case SPEED_10:
772*3833Sxw161283 *speed = 10000000;
773*3833Sxw161283 break;
774*3833Sxw161283 case SPEED_100:
775*3833Sxw161283 *speed = 100000000;
776*3833Sxw161283 break;
777*3833Sxw161283 case SPEED_1000:
778*3833Sxw161283 *speed = 1000000000;
779*3833Sxw161283 break;
780*3833Sxw161283 case SPEED_10000:
781*3833Sxw161283 /*
782*3833Sxw161283 * kludge to get 10,000,000,000 constant (and keep
783*3833Sxw161283 * compiler happy).
784*3833Sxw161283 */
785*3833Sxw161283 v = 10000000;
786*3833Sxw161283 v *= 1000;
787*3833Sxw161283 *speed = v;
788*3833Sxw161283 break;
789*3833Sxw161283 default:
790*3833Sxw161283 goto error;
791*3833Sxw161283 }
792*3833Sxw161283
793*3833Sxw161283 *intrcnt = sa->isr_intr;
794*3833Sxw161283 *norcvbuf = sa->norcvbuf;
795*3833Sxw161283
796*3833Sxw161283 sp = sa->port[0].mac->ops->statistics_update(sa->port[0].mac,
797*3833Sxw161283 MAC_STATS_UPDATE_FULL);
798*3833Sxw161283
799*3833Sxw161283 *ierrors = sp->RxOctetsBad;
800*3833Sxw161283
801*3833Sxw161283 /*
802*3833Sxw161283 * not sure this is correct. # aborted at driver level +
803*3833Sxw161283 * # at hardware level
804*3833Sxw161283 */
805*3833Sxw161283 *oerrors = sa->oerr + sp->TxFramesAbortedDueToXSCollisions +
806*3833Sxw161283 sp->TxUnderrun + sp->TxLengthErrors +
807*3833Sxw161283 sp->TxInternalMACXmitError +
808*3833Sxw161283 sp->TxFramesWithExcessiveDeferral +
809*3833Sxw161283 sp->TxFCSErrors;
810*3833Sxw161283
811*3833Sxw161283 *underrun = sp->TxUnderrun;
812*3833Sxw161283 *overrun = sp->RxFrameTooLongErrors;
813*3833Sxw161283 *framing = sp->RxAlignErrors;
814*3833Sxw161283 *crc = sp->RxFCSErrors;
815*3833Sxw161283 *carrier = 0; /* need to find this */
816*3833Sxw161283 *collisions = sp->TxTotalCollisions;
817*3833Sxw161283 *xcollisions = sp->TxFramesAbortedDueToXSCollisions;
818*3833Sxw161283 *late = sp->TxLateCollisions;
819*3833Sxw161283 *defer = sp->TxFramesWithDeferredXmissions;
820*3833Sxw161283 *xerrs = sp->TxUnderrun + sp->TxLengthErrors +
821*3833Sxw161283 sp->TxInternalMACXmitError + sp->TxFCSErrors;
822*3833Sxw161283 *rerrs = sp->RxSymbolErrors + sp->RxSequenceErrors + sp->RxRuntErrors +
823*3833Sxw161283 sp->RxJabberErrors + sp->RxInternalMACRcvError +
824*3833Sxw161283 sp->RxInRangeLengthErrors + sp->RxOutOfRangeLengthField;
825*3833Sxw161283 *toolong = sp->RxFrameTooLongErrors;
826*3833Sxw161283 *runt = sp->RxRuntErrors;
827*3833Sxw161283
828*3833Sxw161283 *multixmt = sp->TxMulticastFramesOK;
829*3833Sxw161283 *multircv = sp->RxMulticastFramesOK;
830*3833Sxw161283 *brdcstxmt = sp->TxBroadcastFramesOK;
831*3833Sxw161283 *brdcstrcv = sp->RxBroadcastFramesOK;
832*3833Sxw161283
833*3833Sxw161283 return (0);
834*3833Sxw161283
835*3833Sxw161283 error:
836*3833Sxw161283 *speed = 0;
837*3833Sxw161283 *intrcnt = 0;
838*3833Sxw161283 *norcvbuf = 0;
839*3833Sxw161283 *norcvbuf = 0;
840*3833Sxw161283 *oerrors = 0;
841*3833Sxw161283 *ierrors = 0;
842*3833Sxw161283 *underrun = 0;
843*3833Sxw161283 *overrun = 0;
844*3833Sxw161283 *framing = 0;
845*3833Sxw161283 *crc = 0;
846*3833Sxw161283 *carrier = 0;
847*3833Sxw161283 *collisions = 0;
848*3833Sxw161283 *xcollisions = 0;
849*3833Sxw161283 *late = 0;
850*3833Sxw161283 *defer = 0;
851*3833Sxw161283 *xerrs = 0;
852*3833Sxw161283 *rerrs = 0;
853*3833Sxw161283 *toolong = 0;
854*3833Sxw161283 *runt = 0;
855*3833Sxw161283 *multixmt = 0;
856*3833Sxw161283 *multircv = 0;
857*3833Sxw161283 *brdcstxmt = 0;
858*3833Sxw161283 *brdcstrcv = 0;
859*3833Sxw161283
860*3833Sxw161283 return (1);
861*3833Sxw161283 }
862*3833Sxw161283
863*3833Sxw161283 uint32_t ch_gtm = 0; /* Default: Global Tunnel Mode off */
864*3833Sxw161283 uint32_t ch_global_config = 0x07000000; /* Default: errors, warnings, status */
865*3833Sxw161283 uint32_t ch_is_asic = 0; /* Default: non-ASIC */
866*3833Sxw161283 uint32_t ch_link_speed = PE_LINK_SPEED_AUTONEG; /* Default: auto-negoiate */
867*3833Sxw161283 uint32_t ch_num_of_ports = 1; /* Default: 1 port */
868*3833Sxw161283 uint32_t ch_tp_reset_cm = 1; /* Default: reset CM memory map */
869*3833Sxw161283 uint32_t ch_phy_tx_fifo = 0; /* Default: 0 phy tx fifo depth */
870*3833Sxw161283 uint32_t ch_phy_rx_fifo = 0; /* Default: 0 phy rx fifo depth */
871*3833Sxw161283 uint32_t ch_phy_force_master = 1; /* Default: link always master mode */
872*3833Sxw161283 uint32_t ch_mc5_rtbl_size = 2048; /* Default: TCAM routing table size */
873*3833Sxw161283 uint32_t ch_mc5_dbsvr_size = 128; /* Default: TCAM server size */
874*3833Sxw161283 uint32_t ch_mc5_parity = 1; /* Default: parity error checking */
875*3833Sxw161283 uint32_t ch_mc5_issue_syn = 0; /* Default: Allow transaction overlap */
876*3833Sxw161283 uint32_t ch_packet_tracing = 0; /* Default: no packet tracing */
877*3833Sxw161283 uint32_t ch_server_region_len =
878*3833Sxw161283 DEFAULT_SERVER_REGION_LEN;
879*3833Sxw161283 uint32_t ch_rt_region_len =
880*3833Sxw161283 DEFAULT_RT_REGION_LEN;
881*3833Sxw161283 uint32_t ch_offload_ip_cksum = 0; /* Default: no checksum offloading */
882*3833Sxw161283 uint32_t ch_offload_udp_cksum = 1; /* Default: offload UDP ckecksum */
883*3833Sxw161283 uint32_t ch_offload_tcp_cksum = 1; /* Default: offload TCP checksum */
884*3833Sxw161283 uint32_t ch_sge_cmdq_threshold = 0; /* Default: threshold 0 */
885*3833Sxw161283 uint32_t ch_sge_flq_threshold = 0; /* Default: SGE flq threshold */
886*3833Sxw161283 uint32_t ch_sge_cmdq0_cnt = /* Default: cmd queue 0 size */
887*3833Sxw161283 SGE_CMDQ0_CNT;
888*3833Sxw161283 uint32_t ch_sge_cmdq1_cnt = /* Default: cmd queue 1 size */
889*3833Sxw161283 SGE_CMDQ0_CNT;
890*3833Sxw161283 uint32_t ch_sge_flq0_cnt = /* Default: free list queue-0 length */
891*3833Sxw161283 SGE_FLQ0_CNT;
892*3833Sxw161283 uint32_t ch_sge_flq1_cnt = /* Default: free list queue-1 length */
893*3833Sxw161283 SGE_FLQ0_CNT;
894*3833Sxw161283 uint32_t ch_sge_respq_cnt = /* Default: reqsponse queue size */
895*3833Sxw161283 SGE_RESPQ_CNT;
896*3833Sxw161283 uint32_t ch_stats = 1; /* Default: Automatic Update MAC stats */
897*3833Sxw161283 uint32_t ch_tx_delay_us = 0; /* Default: No Msec delay to Tx pkts */
898*3833Sxw161283 int32_t ch_chip = -1; /* Default: use hardware lookup tbl */
899*3833Sxw161283 uint32_t ch_exit_early = 0; /* Default: complete initialization */
900*3833Sxw161283 uint32_t ch_rb_num_of_entries = 1000; /* Default: number ring buffer entries */
901*3833Sxw161283 uint32_t ch_rb_size_of_entries = 64; /* Default: ring buffer entry size */
902*3833Sxw161283 uint32_t ch_rb_flag = 1; /* Default: ring buffer flag */
903*3833Sxw161283 uint32_t ch_type;
904*3833Sxw161283 uint64_t ch_cat_opt0 = 0;
905*3833Sxw161283 uint64_t ch_cat_opt1 = 0;
906*3833Sxw161283 uint32_t ch_timer_delay = 0; /* Default: use value from board entry */
907*3833Sxw161283
908*3833Sxw161283 int
pe_attach(ch_t * chp)909*3833Sxw161283 pe_attach(ch_t *chp)
910*3833Sxw161283 {
911*3833Sxw161283 int return_val = 1;
912*3833Sxw161283 const struct board_info *bi;
913*3833Sxw161283 uint32_t pcix_cmd;
914*3833Sxw161283
915*3833Sxw161283 (void) ch_set_config_data(chp);
916*3833Sxw161283
917*3833Sxw161283 bi = pe_sa_init(chp);
918*3833Sxw161283 if (bi == 0)
919*3833Sxw161283 return (1);
920*3833Sxw161283
921*3833Sxw161283 if (t1_init_sw_modules(chp, bi) < 0)
922*3833Sxw161283 return (1);
923*3833Sxw161283
924*3833Sxw161283 if (pe_small_rbuf_pool_init(chp) == NULL)
925*3833Sxw161283 return (1);
926*3833Sxw161283
927*3833Sxw161283 if (pe_big_rbuf_pool_init(chp) == NULL)
928*3833Sxw161283 return (1);
929*3833Sxw161283
930*3833Sxw161283 /*
931*3833Sxw161283 * We gain significaint performance improvements when we
932*3833Sxw161283 * increase the PCI's maximum memory read byte count to
933*3833Sxw161283 * 2K(HW doesn't support 4K at this time) and set the PCI's
934*3833Sxw161283 * maximum outstanding split transactions to 4. We want to do
935*3833Sxw161283 * this for 10G. Done by software utility.
936*3833Sxw161283 */
937*3833Sxw161283
938*3833Sxw161283 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
939*3833Sxw161283 (void) t1_os_pci_read_config_4(chp, A_PCICFG_PCIX_CMD,
940*3833Sxw161283 &pcix_cmd);
941*3833Sxw161283 /*
942*3833Sxw161283 * if the burstsize is set, then use it instead of default
943*3833Sxw161283 */
944*3833Sxw161283 if (chp->ch_config.burstsize_set) {
945*3833Sxw161283 pcix_cmd &= ~0xc0000;
946*3833Sxw161283 pcix_cmd |= (chp->ch_config.burstsize << 18);
947*3833Sxw161283 }
948*3833Sxw161283 /*
949*3833Sxw161283 * if the split transaction count is set, then use it.
950*3833Sxw161283 */
951*3833Sxw161283 if (chp->ch_config.transaction_cnt_set) {
952*3833Sxw161283 pcix_cmd &= ~ 0x700000;
953*3833Sxw161283 pcix_cmd |= (chp->ch_config.transaction_cnt << 20);
954*3833Sxw161283 }
955*3833Sxw161283
956*3833Sxw161283 /*
957*3833Sxw161283 * set ralaxed ordering flag as configured in chxge.conf
958*3833Sxw161283 */
959*3833Sxw161283 pcix_cmd |= (chp->ch_config.relaxed_ordering << 17);
960*3833Sxw161283
961*3833Sxw161283 (void) t1_os_pci_write_config_4(chp, A_PCICFG_PCIX_CMD,
962*3833Sxw161283 pcix_cmd);
963*3833Sxw161283 }
964*3833Sxw161283
965*3833Sxw161283 /*
966*3833Sxw161283 * set the latency time to F8 for 10G cards.
967*3833Sxw161283 * Done by software utiltiy.
968*3833Sxw161283 */
969*3833Sxw161283 if (enable_latency_timer) {
970*3833Sxw161283 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
971*3833Sxw161283 (void) t1_os_pci_write_config_4(chp, 0xc, 0xf800);
972*3833Sxw161283 }
973*3833Sxw161283 }
974*3833Sxw161283
975*3833Sxw161283 /*
976*3833Sxw161283 * update mtu table (regs: 0x404 - 0x420) with bigger values than
977*3833Sxw161283 * default.
978*3833Sxw161283 */
979*3833Sxw161283 update_mtu_tab(chp);
980*3833Sxw161283
981*3833Sxw161283 /*
982*3833Sxw161283 * Clear all interrupts now. Don't enable
983*3833Sxw161283 * them until later.
984*3833Sxw161283 */
985*3833Sxw161283 t1_interrupts_clear(chp);
986*3833Sxw161283
987*3833Sxw161283 /*
988*3833Sxw161283 * Function succeeded.
989*3833Sxw161283 */
990*3833Sxw161283 return_val = 0;
991*3833Sxw161283
992*3833Sxw161283 return (return_val);
993*3833Sxw161283 }
994*3833Sxw161283
995*3833Sxw161283 /*
996*3833Sxw161283 * DESC: Read variables set in /boot/loader.conf and save
997*3833Sxw161283 * them internally. These internal values are then
998*3833Sxw161283 * used to make decisions at run-time on behavior thus
999*3833Sxw161283 * allowing a certain level of customization.
1000*3833Sxw161283 * OUT: p_config - pointer to config structure that
1001*3833Sxw161283 * contains all of the new values.
1002*3833Sxw161283 * RTN: 0 - Success;
1003*3833Sxw161283 */
1004*3833Sxw161283 static int
ch_set_config_data(ch_t * chp)1005*3833Sxw161283 ch_set_config_data(ch_t *chp)
1006*3833Sxw161283 {
1007*3833Sxw161283 pe_config_data_t *p_config = (pe_config_data_t *)&chp->config_data;
1008*3833Sxw161283
1009*3833Sxw161283 bzero(p_config, sizeof (pe_config_data_t));
1010*3833Sxw161283
1011*3833Sxw161283 /*
1012*3833Sxw161283 * Global Tunnel Mode configuration
1013*3833Sxw161283 */
1014*3833Sxw161283 p_config->gtm = ch_gtm;
1015*3833Sxw161283
1016*3833Sxw161283 p_config->global_config = ch_global_config;
1017*3833Sxw161283
1018*3833Sxw161283 if (p_config->gtm)
1019*3833Sxw161283 p_config->global_config |= CFGMD_TUNNEL;
1020*3833Sxw161283
1021*3833Sxw161283 p_config->tp_reset_cm = ch_tp_reset_cm;
1022*3833Sxw161283 p_config->is_asic = ch_is_asic;
1023*3833Sxw161283
1024*3833Sxw161283 /*
1025*3833Sxw161283 * MC5 configuration.
1026*3833Sxw161283 */
1027*3833Sxw161283 p_config->mc5_rtbl_size = ch_mc5_rtbl_size;
1028*3833Sxw161283 p_config->mc5_dbsvr_size = ch_mc5_dbsvr_size;
1029*3833Sxw161283 p_config->mc5_parity = ch_mc5_parity;
1030*3833Sxw161283 p_config->mc5_issue_syn = ch_mc5_issue_syn;
1031*3833Sxw161283
1032*3833Sxw161283 p_config->offload_ip_cksum = ch_offload_ip_cksum;
1033*3833Sxw161283 p_config->offload_udp_cksum = ch_offload_udp_cksum;
1034*3833Sxw161283 p_config->offload_tcp_cksum = ch_offload_tcp_cksum;
1035*3833Sxw161283
1036*3833Sxw161283 p_config->packet_tracing = ch_packet_tracing;
1037*3833Sxw161283
1038*3833Sxw161283 p_config->server_region_len = ch_server_region_len;
1039*3833Sxw161283 p_config->rt_region_len = ch_rt_region_len;
1040*3833Sxw161283
1041*3833Sxw161283 /*
1042*3833Sxw161283 * Link configuration.
1043*3833Sxw161283 *
1044*3833Sxw161283 * 5-auto-neg 2-1000Gbps; 1-100Gbps; 0-10Gbps
1045*3833Sxw161283 */
1046*3833Sxw161283 p_config->link_speed = ch_link_speed;
1047*3833Sxw161283 p_config->num_of_ports = ch_num_of_ports;
1048*3833Sxw161283
1049*3833Sxw161283 /*
1050*3833Sxw161283 * Catp options
1051*3833Sxw161283 */
1052*3833Sxw161283 p_config->cat_opt0 = ch_cat_opt0;
1053*3833Sxw161283 p_config->cat_opt1 = ch_cat_opt1;
1054*3833Sxw161283
1055*3833Sxw161283 /*
1056*3833Sxw161283 * SGE configuration.
1057*3833Sxw161283 */
1058*3833Sxw161283 p_config->sge_cmdq0_cnt = ch_sge_cmdq0_cnt;
1059*3833Sxw161283 p_config->sge_cmdq1_cnt = ch_sge_cmdq1_cnt;
1060*3833Sxw161283 p_config->sge_flq0_cnt = ch_sge_flq0_cnt;
1061*3833Sxw161283 p_config->sge_flq1_cnt = ch_sge_flq1_cnt;
1062*3833Sxw161283 p_config->sge_respq_cnt = ch_sge_respq_cnt;
1063*3833Sxw161283
1064*3833Sxw161283 p_config->phy_rx_fifo = ch_phy_rx_fifo;
1065*3833Sxw161283 p_config->phy_tx_fifo = ch_phy_tx_fifo;
1066*3833Sxw161283
1067*3833Sxw161283 p_config->sge_cmdq_threshold = ch_sge_cmdq_threshold;
1068*3833Sxw161283
1069*3833Sxw161283 p_config->sge_flq_threshold = ch_sge_flq_threshold;
1070*3833Sxw161283
1071*3833Sxw161283 p_config->phy_force_master = ch_phy_force_master;
1072*3833Sxw161283
1073*3833Sxw161283 p_config->rb_num_of_entries = ch_rb_num_of_entries;
1074*3833Sxw161283
1075*3833Sxw161283 p_config->rb_size_of_entries = ch_rb_size_of_entries;
1076*3833Sxw161283
1077*3833Sxw161283 p_config->rb_flag = ch_rb_flag;
1078*3833Sxw161283
1079*3833Sxw161283 p_config->exit_early = ch_exit_early;
1080*3833Sxw161283
1081*3833Sxw161283 p_config->chip = ch_chip;
1082*3833Sxw161283
1083*3833Sxw161283 p_config->stats = ch_stats;
1084*3833Sxw161283
1085*3833Sxw161283 p_config->tx_delay_us = ch_tx_delay_us;
1086*3833Sxw161283
1087*3833Sxw161283 return (0);
1088*3833Sxw161283 }
1089*3833Sxw161283
1090*3833Sxw161283 static const struct board_info *
pe_sa_init(ch_t * sa)1091*3833Sxw161283 pe_sa_init(ch_t *sa)
1092*3833Sxw161283 {
1093*3833Sxw161283 uint16_t device_id;
1094*3833Sxw161283 uint16_t device_subid;
1095*3833Sxw161283 const struct board_info *bi;
1096*3833Sxw161283
1097*3833Sxw161283 sa->config = sa->config_data.global_config;
1098*3833Sxw161283 device_id = pci_config_get16(sa->ch_hpci, 2);
1099*3833Sxw161283 device_subid = pci_config_get16(sa->ch_hpci, 0x2e);
1100*3833Sxw161283
1101*3833Sxw161283 bi = t1_get_board_info_from_ids(device_id, device_subid);
1102*3833Sxw161283 if (bi == NULL) {
1103*3833Sxw161283 cmn_err(CE_NOTE,
1104*3833Sxw161283 "The adapter with device_id %d %d is not supported.\n",
1105*3833Sxw161283 device_id, device_subid);
1106*3833Sxw161283 return (NULL);
1107*3833Sxw161283 }
1108*3833Sxw161283
1109*3833Sxw161283 if (t1_get_board_rev(sa, bi, &sa->params)) {
1110*3833Sxw161283 cmn_err(CE_NOTE, "unknown device_id %d %d\n",
1111*3833Sxw161283 device_id, device_subid);
1112*3833Sxw161283 return ((const struct board_info *)NULL);
1113*3833Sxw161283 }
1114*3833Sxw161283
1115*3833Sxw161283 return (bi);
1116*3833Sxw161283 }
1117*3833Sxw161283
1118*3833Sxw161283 /*
1119*3833Sxw161283 * allocate pool of small receive buffers (with vaddr & paddr) and
1120*3833Sxw161283 * receiver buffer control structure (ch_esb_t *rbp).
1121*3833Sxw161283 * XXX we should allow better tuning of the # of preallocated
1122*3833Sxw161283 * free buffers against the # of freelist entries.
1123*3833Sxw161283 */
1124*3833Sxw161283 static int
pe_small_rbuf_pool_init(ch_t * sa)1125*3833Sxw161283 pe_small_rbuf_pool_init(ch_t *sa)
1126*3833Sxw161283 {
1127*3833Sxw161283 int i;
1128*3833Sxw161283 ch_esb_t *rbp;
1129*3833Sxw161283 extern uint32_t sge_flq0_cnt;
1130*3833Sxw161283 extern uint32_t sge_flq1_cnt;
1131*3833Sxw161283 int size;
1132*3833Sxw161283 uint32_t j;
1133*3833Sxw161283
1134*3833Sxw161283 if (is_T2(sa))
1135*3833Sxw161283 size = sge_flq1_cnt * fl_sz_multiplier;
1136*3833Sxw161283 else
1137*3833Sxw161283 size = sge_flq0_cnt * fl_sz_multiplier;
1138*3833Sxw161283
1139*3833Sxw161283 mutex_init(&sa->ch_small_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1140*3833Sxw161283
1141*3833Sxw161283 mutex_enter(&in_use_l);
1142*3833Sxw161283 j = in_use_index++;
1143*3833Sxw161283 if (in_use_index >= SZ_INUSE)
1144*3833Sxw161283 in_use_index = 0;
1145*3833Sxw161283 mutex_exit(&in_use_l);
1146*3833Sxw161283
1147*3833Sxw161283 sa->ch_small_owner = NULL;
1148*3833Sxw161283 sa->ch_sm_index = j;
1149*3833Sxw161283 sa->ch_small_esb_free = NULL;
1150*3833Sxw161283 for (i = 0; i < size; i++) {
1151*3833Sxw161283 rbp = ch_alloc_small_esbbuf(sa, j);
1152*3833Sxw161283 if (rbp == NULL)
1153*3833Sxw161283 goto error;
1154*3833Sxw161283 /*
1155*3833Sxw161283 * add entry to free list
1156*3833Sxw161283 */
1157*3833Sxw161283 rbp->cs_next = sa->ch_small_esb_free;
1158*3833Sxw161283 sa->ch_small_esb_free = rbp;
1159*3833Sxw161283
1160*3833Sxw161283 /*
1161*3833Sxw161283 * add entry to owned list
1162*3833Sxw161283 */
1163*3833Sxw161283 rbp->cs_owner = sa->ch_small_owner;
1164*3833Sxw161283 sa->ch_small_owner = rbp;
1165*3833Sxw161283 }
1166*3833Sxw161283 return (1);
1167*3833Sxw161283
1168*3833Sxw161283 error:
1169*3833Sxw161283 sa->ch_small_owner = NULL;
1170*3833Sxw161283
1171*3833Sxw161283 /* free whatever we've already allocated */
1172*3833Sxw161283 pe_rbuf_pool_free(sa);
1173*3833Sxw161283
1174*3833Sxw161283 return (0);
1175*3833Sxw161283 }
1176*3833Sxw161283
1177*3833Sxw161283 /*
1178*3833Sxw161283 * allocate pool of receive buffers (with vaddr & paddr) and
1179*3833Sxw161283 * receiver buffer control structure (ch_esb_t *rbp).
1180*3833Sxw161283 * XXX we should allow better tuning of the # of preallocated
1181*3833Sxw161283 * free buffers against the # of freelist entries.
1182*3833Sxw161283 */
1183*3833Sxw161283 static int
pe_big_rbuf_pool_init(ch_t * sa)1184*3833Sxw161283 pe_big_rbuf_pool_init(ch_t *sa)
1185*3833Sxw161283 {
1186*3833Sxw161283 int i;
1187*3833Sxw161283 ch_esb_t *rbp;
1188*3833Sxw161283 extern uint32_t sge_flq0_cnt;
1189*3833Sxw161283 extern uint32_t sge_flq1_cnt;
1190*3833Sxw161283 int size;
1191*3833Sxw161283 uint32_t j;
1192*3833Sxw161283
1193*3833Sxw161283 if (is_T2(sa))
1194*3833Sxw161283 size = sge_flq0_cnt * fl_sz_multiplier;
1195*3833Sxw161283 else
1196*3833Sxw161283 size = sge_flq1_cnt * fl_sz_multiplier;
1197*3833Sxw161283
1198*3833Sxw161283 mutex_init(&sa->ch_big_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1199*3833Sxw161283
1200*3833Sxw161283 mutex_enter(&in_use_l);
1201*3833Sxw161283 j = in_use_index++;
1202*3833Sxw161283 if (in_use_index >= SZ_INUSE)
1203*3833Sxw161283 in_use_index = 0;
1204*3833Sxw161283 mutex_exit(&in_use_l);
1205*3833Sxw161283
1206*3833Sxw161283 sa->ch_big_owner = NULL;
1207*3833Sxw161283 sa->ch_big_index = j;
1208*3833Sxw161283 sa->ch_big_esb_free = NULL;
1209*3833Sxw161283 for (i = 0; i < size; i++) {
1210*3833Sxw161283 rbp = ch_alloc_big_esbbuf(sa, j);
1211*3833Sxw161283 if (rbp == NULL)
1212*3833Sxw161283 goto error;
1213*3833Sxw161283 rbp->cs_next = sa->ch_big_esb_free;
1214*3833Sxw161283 sa->ch_big_esb_free = rbp;
1215*3833Sxw161283
1216*3833Sxw161283 /*
1217*3833Sxw161283 * add entry to owned list
1218*3833Sxw161283 */
1219*3833Sxw161283 rbp->cs_owner = sa->ch_big_owner;
1220*3833Sxw161283 sa->ch_big_owner = rbp;
1221*3833Sxw161283 }
1222*3833Sxw161283 return (1);
1223*3833Sxw161283
1224*3833Sxw161283 error:
1225*3833Sxw161283 sa->ch_big_owner = NULL;
1226*3833Sxw161283
1227*3833Sxw161283 /* free whatever we've already allocated */
1228*3833Sxw161283 pe_rbuf_pool_free(sa);
1229*3833Sxw161283
1230*3833Sxw161283 return (0);
1231*3833Sxw161283 }
1232*3833Sxw161283
1233*3833Sxw161283 /*
1234*3833Sxw161283 * allocate receive buffer structure and dma mapped buffer (SGE_SM_BUF_SZ bytes)
1235*3833Sxw161283 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1236*3833Sxw161283 * is enabled.
1237*3833Sxw161283 */
1238*3833Sxw161283 static ch_esb_t *
ch_alloc_small_esbbuf(ch_t * sa,uint32_t i)1239*3833Sxw161283 ch_alloc_small_esbbuf(ch_t *sa, uint32_t i)
1240*3833Sxw161283 {
1241*3833Sxw161283 ch_esb_t *rbp;
1242*3833Sxw161283
1243*3833Sxw161283 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1244*3833Sxw161283 if (rbp == NULL) {
1245*3833Sxw161283 return ((ch_esb_t *)0);
1246*3833Sxw161283 }
1247*3833Sxw161283
1248*3833Sxw161283 #if BYTE_ORDER == BIG_ENDIAN
1249*3833Sxw161283 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_SMALN,
1250*3833Sxw161283 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1251*3833Sxw161283 #else
1252*3833Sxw161283 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_SMALN,
1253*3833Sxw161283 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1254*3833Sxw161283 #endif
1255*3833Sxw161283
1256*3833Sxw161283 if (rbp->cs_buf == NULL) {
1257*3833Sxw161283 kmem_free(rbp, sizeof (ch_esb_t));
1258*3833Sxw161283 return ((ch_esb_t *)0);
1259*3833Sxw161283 }
1260*3833Sxw161283
1261*3833Sxw161283 rbp->cs_sa = sa;
1262*3833Sxw161283 rbp->cs_index = i;
1263*3833Sxw161283
1264*3833Sxw161283 rbp->cs_frtn.free_func = (void (*)())&ch_small_rbuf_recycle;
1265*3833Sxw161283 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1266*3833Sxw161283
1267*3833Sxw161283 return (rbp);
1268*3833Sxw161283 }
1269*3833Sxw161283
1270*3833Sxw161283 /*
1271*3833Sxw161283 * allocate receive buffer structure and dma mapped buffer (SGE_BG_BUF_SZ bytes)
1272*3833Sxw161283 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1273*3833Sxw161283 * is enabled.
1274*3833Sxw161283 */
1275*3833Sxw161283 static ch_esb_t *
ch_alloc_big_esbbuf(ch_t * sa,uint32_t i)1276*3833Sxw161283 ch_alloc_big_esbbuf(ch_t *sa, uint32_t i)
1277*3833Sxw161283 {
1278*3833Sxw161283 ch_esb_t *rbp;
1279*3833Sxw161283
1280*3833Sxw161283 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1281*3833Sxw161283 if (rbp == NULL) {
1282*3833Sxw161283 return ((ch_esb_t *)0);
1283*3833Sxw161283 }
1284*3833Sxw161283
1285*3833Sxw161283 #if BYTE_ORDER == BIG_ENDIAN
1286*3833Sxw161283 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_BGALN,
1287*3833Sxw161283 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1288*3833Sxw161283 #else
1289*3833Sxw161283 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_BGALN,
1290*3833Sxw161283 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1291*3833Sxw161283 #endif
1292*3833Sxw161283
1293*3833Sxw161283 if (rbp->cs_buf == NULL) {
1294*3833Sxw161283 kmem_free(rbp, sizeof (ch_esb_t));
1295*3833Sxw161283 return ((ch_esb_t *)0);
1296*3833Sxw161283 }
1297*3833Sxw161283
1298*3833Sxw161283 rbp->cs_sa = sa;
1299*3833Sxw161283 rbp->cs_index = i;
1300*3833Sxw161283
1301*3833Sxw161283 rbp->cs_frtn.free_func = (void (*)())&ch_big_rbuf_recycle;
1302*3833Sxw161283 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1303*3833Sxw161283
1304*3833Sxw161283 return (rbp);
1305*3833Sxw161283 }
1306*3833Sxw161283
1307*3833Sxw161283 /*
1308*3833Sxw161283 * free entries on the receive buffer list.
1309*3833Sxw161283 */
1310*3833Sxw161283 void
pe_rbuf_pool_free(ch_t * sa)1311*3833Sxw161283 pe_rbuf_pool_free(ch_t *sa)
1312*3833Sxw161283 {
1313*3833Sxw161283 ch_esb_t *rbp;
1314*3833Sxw161283
1315*3833Sxw161283 mutex_enter(&sa->ch_small_esbl);
1316*3833Sxw161283
1317*3833Sxw161283 /*
1318*3833Sxw161283 * Now set-up the rest to commit suicide.
1319*3833Sxw161283 */
1320*3833Sxw161283 while (sa->ch_small_owner) {
1321*3833Sxw161283 rbp = sa->ch_small_owner;
1322*3833Sxw161283 sa->ch_small_owner = rbp->cs_owner;
1323*3833Sxw161283 rbp->cs_owner = NULL;
1324*3833Sxw161283 rbp->cs_flag = 1;
1325*3833Sxw161283 }
1326*3833Sxw161283
1327*3833Sxw161283 while ((rbp = sa->ch_small_esb_free) != NULL) {
1328*3833Sxw161283 /* advance head ptr to next entry */
1329*3833Sxw161283 sa->ch_small_esb_free = rbp->cs_next;
1330*3833Sxw161283 /* free private buffer allocated in ch_alloc_esbbuf() */
1331*3833Sxw161283 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1332*3833Sxw161283 /* free descripter buffer */
1333*3833Sxw161283 kmem_free(rbp, sizeof (ch_esb_t));
1334*3833Sxw161283 }
1335*3833Sxw161283
1336*3833Sxw161283 mutex_exit(&sa->ch_small_esbl);
1337*3833Sxw161283
1338*3833Sxw161283 /* destroy ch_esbl lock */
1339*3833Sxw161283 mutex_destroy(&sa->ch_small_esbl);
1340*3833Sxw161283
1341*3833Sxw161283
1342*3833Sxw161283 mutex_enter(&sa->ch_big_esbl);
1343*3833Sxw161283
1344*3833Sxw161283 /*
1345*3833Sxw161283 * Now set-up the rest to commit suicide.
1346*3833Sxw161283 */
1347*3833Sxw161283 while (sa->ch_big_owner) {
1348*3833Sxw161283 rbp = sa->ch_big_owner;
1349*3833Sxw161283 sa->ch_big_owner = rbp->cs_owner;
1350*3833Sxw161283 rbp->cs_owner = NULL;
1351*3833Sxw161283 rbp->cs_flag = 1;
1352*3833Sxw161283 }
1353*3833Sxw161283
1354*3833Sxw161283 while ((rbp = sa->ch_big_esb_free) != NULL) {
1355*3833Sxw161283 /* advance head ptr to next entry */
1356*3833Sxw161283 sa->ch_big_esb_free = rbp->cs_next;
1357*3833Sxw161283 /* free private buffer allocated in ch_alloc_esbbuf() */
1358*3833Sxw161283 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1359*3833Sxw161283 /* free descripter buffer */
1360*3833Sxw161283 kmem_free(rbp, sizeof (ch_esb_t));
1361*3833Sxw161283 }
1362*3833Sxw161283
1363*3833Sxw161283 mutex_exit(&sa->ch_big_esbl);
1364*3833Sxw161283
1365*3833Sxw161283 /* destroy ch_esbl lock */
1366*3833Sxw161283 mutex_destroy(&sa->ch_big_esbl);
1367*3833Sxw161283 }
1368*3833Sxw161283
1369*3833Sxw161283 void
ch_small_rbuf_recycle(ch_esb_t * rbp)1370*3833Sxw161283 ch_small_rbuf_recycle(ch_esb_t *rbp)
1371*3833Sxw161283 {
1372*3833Sxw161283 ch_t *sa = rbp->cs_sa;
1373*3833Sxw161283
1374*3833Sxw161283 if (rbp->cs_flag) {
1375*3833Sxw161283 uint32_t i;
1376*3833Sxw161283 /*
1377*3833Sxw161283 * free private buffer allocated in ch_alloc_esbbuf()
1378*3833Sxw161283 */
1379*3833Sxw161283 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1380*3833Sxw161283
1381*3833Sxw161283 i = rbp->cs_index;
1382*3833Sxw161283
1383*3833Sxw161283 /*
1384*3833Sxw161283 * free descripter buffer
1385*3833Sxw161283 */
1386*3833Sxw161283 kmem_free(rbp, sizeof (ch_esb_t));
1387*3833Sxw161283
1388*3833Sxw161283 /*
1389*3833Sxw161283 * decrement count of receive buffers freed by callback
1390*3833Sxw161283 * We decrement here so anyone trying to do fini will
1391*3833Sxw161283 * only remove the driver once the counts go to 0.
1392*3833Sxw161283 */
1393*3833Sxw161283 atomic_add_32(&buffers_in_use[i], -1);
1394*3833Sxw161283
1395*3833Sxw161283 return;
1396*3833Sxw161283 }
1397*3833Sxw161283
1398*3833Sxw161283 mutex_enter(&sa->ch_small_esbl);
1399*3833Sxw161283 rbp->cs_next = sa->ch_small_esb_free;
1400*3833Sxw161283 sa->ch_small_esb_free = rbp;
1401*3833Sxw161283 mutex_exit(&sa->ch_small_esbl);
1402*3833Sxw161283
1403*3833Sxw161283 /*
1404*3833Sxw161283 * decrement count of receive buffers freed by callback
1405*3833Sxw161283 */
1406*3833Sxw161283 atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
1407*3833Sxw161283 }
1408*3833Sxw161283
1409*3833Sxw161283 /*
1410*3833Sxw161283 * callback function from freeb() when esballoced mblk freed.
1411*3833Sxw161283 */
1412*3833Sxw161283 void
ch_big_rbuf_recycle(ch_esb_t * rbp)1413*3833Sxw161283 ch_big_rbuf_recycle(ch_esb_t *rbp)
1414*3833Sxw161283 {
1415*3833Sxw161283 ch_t *sa = rbp->cs_sa;
1416*3833Sxw161283
1417*3833Sxw161283 if (rbp->cs_flag) {
1418*3833Sxw161283 uint32_t i;
1419*3833Sxw161283 /*
1420*3833Sxw161283 * free private buffer allocated in ch_alloc_esbbuf()
1421*3833Sxw161283 */
1422*3833Sxw161283 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1423*3833Sxw161283
1424*3833Sxw161283 i = rbp->cs_index;
1425*3833Sxw161283
1426*3833Sxw161283 /*
1427*3833Sxw161283 * free descripter buffer
1428*3833Sxw161283 */
1429*3833Sxw161283 kmem_free(rbp, sizeof (ch_esb_t));
1430*3833Sxw161283
1431*3833Sxw161283 /*
1432*3833Sxw161283 * decrement count of receive buffers freed by callback
1433*3833Sxw161283 * We decrement here so anyone trying to do fini will
1434*3833Sxw161283 * only remove the driver once the counts go to 0.
1435*3833Sxw161283 */
1436*3833Sxw161283 atomic_add_32(&buffers_in_use[i], -1);
1437*3833Sxw161283
1438*3833Sxw161283 return;
1439*3833Sxw161283 }
1440*3833Sxw161283
1441*3833Sxw161283 mutex_enter(&sa->ch_big_esbl);
1442*3833Sxw161283 rbp->cs_next = sa->ch_big_esb_free;
1443*3833Sxw161283 sa->ch_big_esb_free = rbp;
1444*3833Sxw161283 mutex_exit(&sa->ch_big_esbl);
1445*3833Sxw161283
1446*3833Sxw161283 /*
1447*3833Sxw161283 * decrement count of receive buffers freed by callback
1448*3833Sxw161283 */
1449*3833Sxw161283 atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
1450*3833Sxw161283 }
1451*3833Sxw161283
1452*3833Sxw161283 /*
1453*3833Sxw161283 * get a pre-allocated, pre-mapped receive buffer from free list.
1454*3833Sxw161283 * (used sge.c)
1455*3833Sxw161283 */
1456*3833Sxw161283 ch_esb_t *
ch_get_small_rbuf(ch_t * sa)1457*3833Sxw161283 ch_get_small_rbuf(ch_t *sa)
1458*3833Sxw161283 {
1459*3833Sxw161283 ch_esb_t *rbp;
1460*3833Sxw161283
1461*3833Sxw161283 mutex_enter(&sa->ch_small_esbl);
1462*3833Sxw161283 rbp = sa->ch_small_esb_free;
1463*3833Sxw161283 if (rbp) {
1464*3833Sxw161283 sa->ch_small_esb_free = rbp->cs_next;
1465*3833Sxw161283 }
1466*3833Sxw161283 mutex_exit(&sa->ch_small_esbl);
1467*3833Sxw161283
1468*3833Sxw161283 return (rbp);
1469*3833Sxw161283 }
1470*3833Sxw161283
1471*3833Sxw161283 /*
1472*3833Sxw161283 * get a pre-allocated, pre-mapped receive buffer from free list.
1473*3833Sxw161283 * (used sge.c)
1474*3833Sxw161283 */
1475*3833Sxw161283
1476*3833Sxw161283 ch_esb_t *
ch_get_big_rbuf(ch_t * sa)1477*3833Sxw161283 ch_get_big_rbuf(ch_t *sa)
1478*3833Sxw161283 {
1479*3833Sxw161283 ch_esb_t *rbp;
1480*3833Sxw161283
1481*3833Sxw161283 mutex_enter(&sa->ch_big_esbl);
1482*3833Sxw161283 rbp = sa->ch_big_esb_free;
1483*3833Sxw161283 if (rbp) {
1484*3833Sxw161283 sa->ch_big_esb_free = rbp->cs_next;
1485*3833Sxw161283 }
1486*3833Sxw161283 mutex_exit(&sa->ch_big_esbl);
1487*3833Sxw161283
1488*3833Sxw161283 return (rbp);
1489*3833Sxw161283 }
1490*3833Sxw161283
1491*3833Sxw161283 void
pe_detach(ch_t * sa)1492*3833Sxw161283 pe_detach(ch_t *sa)
1493*3833Sxw161283 {
1494*3833Sxw161283 (void) sge_stop(sa->sge);
1495*3833Sxw161283
1496*3833Sxw161283 pe_free_driver_resources(sa);
1497*3833Sxw161283 }
1498*3833Sxw161283
1499*3833Sxw161283 static void
pe_free_driver_resources(ch_t * sa)1500*3833Sxw161283 pe_free_driver_resources(ch_t *sa)
1501*3833Sxw161283 {
1502*3833Sxw161283 if (sa) {
1503*3833Sxw161283 t1_free_sw_modules(sa);
1504*3833Sxw161283
1505*3833Sxw161283 /* free pool of receive buffers */
1506*3833Sxw161283 pe_rbuf_pool_free(sa);
1507*3833Sxw161283 }
1508*3833Sxw161283 }
1509*3833Sxw161283
1510*3833Sxw161283 /*
1511*3833Sxw161283 * Processes elmer0 external interrupts in process context.
1512*3833Sxw161283 */
1513*3833Sxw161283 static void
ext_intr_task(ch_t * adapter)1514*3833Sxw161283 ext_intr_task(ch_t *adapter)
1515*3833Sxw161283 {
1516*3833Sxw161283 u32 enable;
1517*3833Sxw161283
1518*3833Sxw161283 (void) elmer0_ext_intr_handler(adapter);
1519*3833Sxw161283
1520*3833Sxw161283 /* Now reenable external interrupts */
1521*3833Sxw161283 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT);
1522*3833Sxw161283 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1523*3833Sxw161283 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
1524*3833Sxw161283 adapter->slow_intr_mask |= F_PL_INTR_EXT;
1525*3833Sxw161283 }
1526*3833Sxw161283
1527*3833Sxw161283 /*
1528*3833Sxw161283 * Interrupt-context handler for elmer0 external interrupts.
1529*3833Sxw161283 */
1530*3833Sxw161283 void
t1_os_elmer0_ext_intr(ch_t * adapter)1531*3833Sxw161283 t1_os_elmer0_ext_intr(ch_t *adapter)
1532*3833Sxw161283 {
1533*3833Sxw161283 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1534*3833Sxw161283
1535*3833Sxw161283 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
1536*3833Sxw161283 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT);
1537*3833Sxw161283 #ifdef NOTYET
1538*3833Sxw161283 schedule_work(&adapter->ext_intr_handler_task);
1539*3833Sxw161283 #else
1540*3833Sxw161283 ext_intr_task(adapter);
1541*3833Sxw161283 #endif
1542*3833Sxw161283 }
1543*3833Sxw161283
1544*3833Sxw161283 uint8_t *
t1_get_next_mcaddr(struct t1_rx_mode * rmp)1545*3833Sxw161283 t1_get_next_mcaddr(struct t1_rx_mode *rmp)
1546*3833Sxw161283 {
1547*3833Sxw161283 uint8_t *addr = 0;
1548*3833Sxw161283 if (rmp->mc) {
1549*3833Sxw161283 addr = rmp->mc->cmc_mca;
1550*3833Sxw161283 rmp->mc = rmp->mc->cmc_next;
1551*3833Sxw161283 }
1552*3833Sxw161283 return (addr);
1553*3833Sxw161283 }
1554*3833Sxw161283
1555*3833Sxw161283 void
pe_dma_handle_init(ch_t * chp,int cnt)1556*3833Sxw161283 pe_dma_handle_init(ch_t *chp, int cnt)
1557*3833Sxw161283 {
1558*3833Sxw161283 free_dh_t *dhe;
1559*3833Sxw161283 #if defined(__sparc)
1560*3833Sxw161283 int tcnt = cnt/2;
1561*3833Sxw161283
1562*3833Sxw161283 for (; cnt; cnt--) {
1563*3833Sxw161283 dhe = ch_get_dvma_handle(chp);
1564*3833Sxw161283 if (dhe == NULL)
1565*3833Sxw161283 break;
1566*3833Sxw161283 mutex_enter(&chp->ch_dh_lck);
1567*3833Sxw161283 dhe->dhe_next = chp->ch_vdh;
1568*3833Sxw161283 chp->ch_vdh = dhe;
1569*3833Sxw161283 mutex_exit(&chp->ch_dh_lck);
1570*3833Sxw161283 }
1571*3833Sxw161283
1572*3833Sxw161283 cnt += tcnt;
1573*3833Sxw161283 #endif
1574*3833Sxw161283 while (cnt--) {
1575*3833Sxw161283 dhe = ch_get_dma_handle(chp);
1576*3833Sxw161283 if (dhe == NULL)
1577*3833Sxw161283 return;
1578*3833Sxw161283 mutex_enter(&chp->ch_dh_lck);
1579*3833Sxw161283 dhe->dhe_next = chp->ch_dh;
1580*3833Sxw161283 chp->ch_dh = dhe;
1581*3833Sxw161283 mutex_exit(&chp->ch_dh_lck);
1582*3833Sxw161283 }
1583*3833Sxw161283 }
1584*3833Sxw161283
1585*3833Sxw161283 /*
1586*3833Sxw161283 * Write new values to the MTU table. Caller must validate that the new MTUs
1587*3833Sxw161283 * are in ascending order. params.mtus[] is initialized by init_mtus()
1588*3833Sxw161283 * called in t1_init_sw_modules().
1589*3833Sxw161283 */
1590*3833Sxw161283 #define MTUREG(idx) (A_TP_MTU_REG0 + (idx) * 4)
1591*3833Sxw161283
1592*3833Sxw161283 static void
update_mtu_tab(ch_t * adapter)1593*3833Sxw161283 update_mtu_tab(ch_t *adapter)
1594*3833Sxw161283 {
1595*3833Sxw161283 int i;
1596*3833Sxw161283
1597*3833Sxw161283 for (i = 0; i < NMTUS; ++i) {
1598*3833Sxw161283 int mtu = (unsigned int)adapter->params.mtus[i];
1599*3833Sxw161283
1600*3833Sxw161283 t1_write_reg_4(adapter, MTUREG(i), mtu);
1601*3833Sxw161283 }
1602*3833Sxw161283 }
1603*3833Sxw161283
1604*3833Sxw161283 static int
pe_change_mtu(ch_t * chp)1605*3833Sxw161283 pe_change_mtu(ch_t *chp)
1606*3833Sxw161283 {
1607*3833Sxw161283 struct cmac *mac = chp->port[0].mac;
1608*3833Sxw161283 int ret;
1609*3833Sxw161283
1610*3833Sxw161283 if (!mac->ops->set_mtu) {
1611*3833Sxw161283 return (EOPNOTSUPP);
1612*3833Sxw161283 }
1613*3833Sxw161283 if (chp->ch_mtu < 68) {
1614*3833Sxw161283 return (EINVAL);
1615*3833Sxw161283 }
1616*3833Sxw161283 if (ret = mac->ops->set_mtu(mac, chp->ch_mtu)) {
1617*3833Sxw161283 return (ret);
1618*3833Sxw161283 }
1619*3833Sxw161283
1620*3833Sxw161283 return (0);
1621*3833Sxw161283 }
1622*3833Sxw161283
1623*3833Sxw161283 typedef struct fake_arp {
1624*3833Sxw161283 char fa_dst[6]; /* ethernet header */
1625*3833Sxw161283 char fa_src[6]; /* ethernet header */
1626*3833Sxw161283 ushort_t fa_typ; /* ethernet header */
1627*3833Sxw161283
1628*3833Sxw161283 ushort_t fa_hrd; /* arp */
1629*3833Sxw161283 ushort_t fa_pro;
1630*3833Sxw161283 char fa_hln;
1631*3833Sxw161283 char fa_pln;
1632*3833Sxw161283 ushort_t fa_op;
1633*3833Sxw161283 char fa_src_mac[6];
1634*3833Sxw161283 uint_t fa_src_ip;
1635*3833Sxw161283 char fa_dst_mac[6];
1636*3833Sxw161283 char fa_dst_ip[4];
1637*3833Sxw161283 } fake_arp_t;
1638*3833Sxw161283
1639*3833Sxw161283 /*
1640*3833Sxw161283 * PR2928 & PR3309
1641*3833Sxw161283 * construct packet in mblk and attach it to sge structure.
1642*3833Sxw161283 */
1643*3833Sxw161283 static int
pe_make_fake_arp(ch_t * chp,unsigned char * arpp)1644*3833Sxw161283 pe_make_fake_arp(ch_t *chp, unsigned char *arpp)
1645*3833Sxw161283 {
1646*3833Sxw161283 pesge *sge = chp->sge;
1647*3833Sxw161283 mblk_t *bp;
1648*3833Sxw161283 fake_arp_t *fap;
1649*3833Sxw161283 static char buf[6] = {0, 7, 0x43, 0, 0, 0};
1650*3833Sxw161283 struct cpl_tx_pkt *cpl;
1651*3833Sxw161283
1652*3833Sxw161283 bp = allocb(sizeof (struct fake_arp) + SZ_CPL_TX_PKT, BPRI_HI);
1653*3833Sxw161283 if (bp == NULL) {
1654*3833Sxw161283 return (1);
1655*3833Sxw161283 }
1656*3833Sxw161283 bzero(bp->b_rptr, sizeof (struct fake_arp) + SZ_CPL_TX_PKT);
1657*3833Sxw161283
1658*3833Sxw161283 /* fill in cpl header */
1659*3833Sxw161283 cpl = (struct cpl_tx_pkt *)bp->b_rptr;
1660*3833Sxw161283 cpl->opcode = CPL_TX_PKT;
1661*3833Sxw161283 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
1662*3833Sxw161283 cpl->ip_csum_dis = 1; /* no IP header cksum */
1663*3833Sxw161283 cpl->l4_csum_dis = 1; /* no tcp/udp cksum */
1664*3833Sxw161283 cpl->vlan_valid = 0; /* no vlan */
1665*3833Sxw161283
1666*3833Sxw161283 fap = (fake_arp_t *)&bp->b_rptr[SZ_CPL_TX_PKT];
1667*3833Sxw161283
1668*3833Sxw161283 bcopy(arpp, fap, sizeof (*fap)); /* copy first arp to mblk */
1669*3833Sxw161283
1670*3833Sxw161283 bcopy(buf, fap->fa_dst, 6); /* overwrite dst mac */
1671*3833Sxw161283 chp->ch_ip = fap->fa_src_ip; /* not used yet */
1672*3833Sxw161283 bcopy(buf, fap->fa_dst_mac, 6); /* overwrite dst mac */
1673*3833Sxw161283
1674*3833Sxw161283 bp->b_wptr = bp->b_rptr + sizeof (struct fake_arp)+SZ_CPL_TX_PKT;
1675*3833Sxw161283
1676*3833Sxw161283 sge_add_fake_arp(sge, (void *)bp);
1677*3833Sxw161283
1678*3833Sxw161283 return (0);
1679*3833Sxw161283 }
1680*3833Sxw161283
1681*3833Sxw161283 /*
1682*3833Sxw161283 * PR2928 & PR3309
1683*3833Sxw161283 * free the fake arp's mblk on sge structure.
1684*3833Sxw161283 */
1685*3833Sxw161283 void
pe_free_fake_arp(void * arp)1686*3833Sxw161283 pe_free_fake_arp(void *arp)
1687*3833Sxw161283 {
1688*3833Sxw161283 mblk_t *bp = (mblk_t *)(arp);
1689*3833Sxw161283
1690*3833Sxw161283 freemsg(bp);
1691*3833Sxw161283 }
1692*3833Sxw161283
1693*3833Sxw161283 /*
1694*3833Sxw161283 * extract ip address of nic from first outgoing arp.
1695*3833Sxw161283 */
1696*3833Sxw161283 static uint32_t
pe_get_ip(unsigned char * arpp)1697*3833Sxw161283 pe_get_ip(unsigned char *arpp)
1698*3833Sxw161283 {
1699*3833Sxw161283 fake_arp_t fap;
1700*3833Sxw161283
1701*3833Sxw161283 /*
1702*3833Sxw161283 * first copy packet to buffer so we know
1703*3833Sxw161283 * it will be properly aligned.
1704*3833Sxw161283 */
1705*3833Sxw161283 bcopy(arpp, &fap, sizeof (fap)); /* copy first arp to buffer */
1706*3833Sxw161283 return (fap.fa_src_ip);
1707*3833Sxw161283 }
1708*3833Sxw161283
1709*3833Sxw161283 /* ARGSUSED */
1710*3833Sxw161283 void
t1_os_link_changed(ch_t * obj,int port_id,int link_status,int speed,int duplex,int fc)1711*3833Sxw161283 t1_os_link_changed(ch_t *obj, int port_id, int link_status,
1712*3833Sxw161283 int speed, int duplex, int fc)
1713*3833Sxw161283 {
1714*3833Sxw161283 gld_mac_info_t *macinfo = obj->ch_macp;
1715*3833Sxw161283 if (link_status) {
1716*3833Sxw161283 gld_linkstate(macinfo, GLD_LINKSTATE_UP);
1717*3833Sxw161283 /*
1718*3833Sxw161283 * Link states should be reported to user
1719*3833Sxw161283 * whenever it changes
1720*3833Sxw161283 */
1721*3833Sxw161283 cmn_err(CE_NOTE, "%s: link is up", adapter_name(obj));
1722*3833Sxw161283 } else {
1723*3833Sxw161283 gld_linkstate(macinfo, GLD_LINKSTATE_DOWN);
1724*3833Sxw161283 /*
1725*3833Sxw161283 * Link states should be reported to user
1726*3833Sxw161283 * whenever it changes
1727*3833Sxw161283 */
1728*3833Sxw161283 cmn_err(CE_NOTE, "%s: link is down", adapter_name(obj));
1729*3833Sxw161283 }
1730*3833Sxw161283 }
1731