1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * This file is part of the Chelsio T1 Ethernet driver.
29 *
30 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
31 */
32
33 /*
34 * Solaris Multithreaded STREAMS Chelsio PCI Ethernet Driver.
35 * Interface code
36 */
37
38 #pragma ident "%Z%%M% %I% %E% SMI"
39
40 #include <sys/types.h>
41 #include <sys/systm.h>
42 #include <sys/cmn_err.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/byteorder.h>
46 #include <sys/atomic.h>
47 #include <sys/ethernet.h>
48 #if PE_PROFILING_ENABLED
49 #include <sys/time.h>
50 #endif
51 #include <sys/gld.h>
52 #include "ostypes.h"
53 #include "common.h"
54 #include "oschtoe.h"
55 #ifdef CONFIG_CHELSIO_T1_1G
56 #include "fpga_defs.h"
57 #endif
58 #include "regs.h"
59 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
60 #include "mc3.h"
61 #include "mc4.h"
62 #endif
63 #include "sge.h"
64 #include "tp.h"
65 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
66 #include "ulp.h"
67 #endif
68 #include "espi.h"
69 #include "elmer0.h"
70 #include "gmac.h"
71 #include "cphy.h"
72 #include "suni1x10gexp_regs.h"
73 #include "ch.h"
74
75 #define MLEN(mp) ((mp)->b_wptr - (mp)->b_rptr)
76
77 extern uint32_t buffers_in_use[];
78 extern kmutex_t in_use_l;
79 extern uint32_t in_use_index;
80
81 static void link_start(ch_t *sa, struct pe_port_t *pp);
82 static ch_esb_t *ch_alloc_small_esbbuf(ch_t *sa, uint32_t i);
83 static ch_esb_t *ch_alloc_big_esbbuf(ch_t *sa, uint32_t i);
84 void ch_big_rbuf_recycle(ch_esb_t *rbp);
85 void ch_small_rbuf_recycle(ch_esb_t *rbp);
86 static const struct board_info *pe_sa_init(ch_t *sa);
87 static int ch_set_config_data(ch_t *chp);
88 void pe_rbuf_pool_free(ch_t *chp);
89 static void pe_free_driver_resources(ch_t *sa);
90 static void update_mtu_tab(ch_t *adapter);
91 static int pe_change_mtu(ch_t *chp);
92
93 /*
94 * CPL5 Defines (from netinet/cpl5_commands.h)
95 */
96 #define FLITSTOBYTES 8
97
98 #define CPL_FORMAT_0_SIZE 8
99 #define CPL_FORMAT_1_SIZE 16
100 #define CPL_FORMAT_2_SIZE 24
101 #define CPL_FORMAT_3_SIZE 32
102 #define CPL_FORMAT_4_SIZE 40
103 #define CPL_FORMAT_5_SIZE 48
104
105 #define TID_MASK 0xffffff
106
107 #define PE_LINK_SPEED_AUTONEG 5
108
109 static int pe_small_rbuf_pool_init(ch_t *sa);
110 static int pe_big_rbuf_pool_init(ch_t *sa);
111 static int pe_make_fake_arp(ch_t *chp, unsigned char *arpp);
112 static uint32_t pe_get_ip(unsigned char *arpp);
113
114 /*
115 * May be set in /etc/system to 0 to use default latency timer for 10G.
116 * See PCI register 0xc definition.
117 */
118 int enable_latency_timer = 1;
119
120 /*
121 * May be set in /etc/system to 0 to disable hardware checksum for
122 * TCP and UDP.
123 */
124 int enable_checksum_offload = 1;
125
126 /*
127 * Multiplier for freelist pool.
128 */
129 int fl_sz_multiplier = 6;
130
131 uint_t
pe_intr(ch_t * sa)132 pe_intr(ch_t *sa)
133 {
134 mutex_enter(&sa->ch_intr);
135
136 if (sge_data_in(sa->sge)) {
137 sa->isr_intr++;
138 mutex_exit(&sa->ch_intr);
139 return (DDI_INTR_CLAIMED);
140 }
141
142 mutex_exit(&sa->ch_intr);
143
144 return (DDI_INTR_UNCLAIMED);
145 }
146
147 /*
148 * Each setup struct will call this function to
149 * initialize.
150 */
151 void
pe_init(void * xsa)152 pe_init(void* xsa)
153 {
154 ch_t *sa = NULL;
155 int i = 0;
156
157 sa = (ch_t *)xsa;
158
159 /*
160 * Need to count the number of times this routine is called
161 * because we only want the resources to be allocated once.
162 * The 7500 has four ports and so this routine can be called
163 * once for each port.
164 */
165 if (sa->init_counter == 0) {
166 for_each_port(sa, i) {
167
168 /*
169 * We only want to initialize the line if it is down.
170 */
171 if (sa->port[i].line_up == 0) {
172 link_start(sa, &sa->port[i]);
173 sa->port[i].line_up = 1;
174 }
175 }
176
177 (void) t1_init_hw_modules(sa);
178
179 /*
180 * Enable/Disable checksum offloading.
181 */
182 if (sa->ch_config.cksum_enabled) {
183 if (sa->config_data.offload_ip_cksum) {
184 /* Notify that HW will do the checksum. */
185 t1_tp_set_ip_checksum_offload(sa->tp, 1);
186 }
187
188 if (sa->config_data.offload_tcp_cksum) {
189 /* Notify that HW will do the checksum. */
190 t1_tp_set_tcp_checksum_offload(sa->tp, 1);
191 }
192
193 if (sa->config_data.offload_udp_cksum) {
194 /* Notify that HW will do the checksum. */
195 t1_tp_set_udp_checksum_offload(sa->tp, 1);
196 }
197 }
198
199 sa->ch_flags |= PEINITDONE;
200
201 sa->init_counter++;
202 }
203
204 /*
205 * Enable interrupts after starting the SGE so
206 * that the SGE is ready to handle interrupts.
207 */
208 (void) sge_start(sa->sge);
209 t1_interrupts_enable(sa);
210
211 /*
212 * set mtu (either 1500 or bigger)
213 */
214 (void) pe_change_mtu(sa);
215 #ifdef HOST_PAUSE
216 /*
217 * get the configured value of the MAC.
218 */
219 (void) t1_tpi_read(sa, SUNI1x10GEXP_REG_TXXG_CONFIG_1 << 2,
220 &sa->txxg_cfg1);
221 #endif
222 }
223
224 /* ARGSUSED */
225 static void
link_start(ch_t * sa,struct pe_port_t * p)226 link_start(ch_t *sa, struct pe_port_t *p)
227 {
228 struct cmac *mac = p->mac;
229
230 mac->ops->reset(mac);
231 if (mac->ops->macaddress_set)
232 mac->ops->macaddress_set(mac, p->enaddr);
233 (void) t1_link_start(p->phy, mac, &p->link_config);
234 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
235 }
236
237 /*
238 * turn off interrupts...
239 */
240 void
pe_stop(ch_t * sa)241 pe_stop(ch_t *sa)
242 {
243 t1_interrupts_disable(sa);
244 (void) sge_stop(sa->sge);
245
246 /*
247 * we can still be running an interrupt thread in sge_data_in().
248 * If we are, we'll block on the ch_intr lock
249 */
250 mutex_enter(&sa->ch_intr);
251 mutex_exit(&sa->ch_intr);
252 }
253
254 /*
255 * output mblk to SGE level and out to the wire.
256 */
257
258 int
pe_start(ch_t * sa,mblk_t * mp,uint32_t flg)259 pe_start(ch_t *sa, mblk_t *mp, uint32_t flg)
260 {
261 mblk_t *m0 = mp;
262 cmdQ_ce_t cm[16];
263 cmdQ_ce_t *cmp;
264 cmdQ_ce_t *hmp = &cm[0]; /* head of cm table (may be kmem_alloed) */
265 int cm_flg = 0; /* flag (1 - if kmem-alloced) */
266 int nseg = 0; /* number cmdQ_ce entries created */
267 int mseg = 16; /* maximum entries in hmp arrary */
268 int freeme = 0; /* we have an mblk to free in case of error */
269 uint32_t ch_bind_dma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
270 uint32_t);
271 #if defined(__sparc)
272 uint32_t ch_bind_dvma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
273 uint32_t);
274 #endif
275 int rv; /* return value on error */
276
277 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
278 if (flg & CH_OFFLOAD) {
279 hmp->ce_pa = ((tbuf_t *)mp)->tb_pa;
280 hmp->ce_dh = NULL;
281 hmp->ce_flg = DH_TOE;
282 hmp->ce_len = ((tbuf_t *)mp)->tb_len;
283 hmp->ce_mp = mp;
284
285 /* make sure data is flushed to physical memory */
286 (void) ddi_dma_sync((ddi_dma_handle_t)((tbuf_t *)mp)->tb_dh,
287 (off_t)0, hmp->ce_len, DDI_DMA_SYNC_FORDEV);
288
289 if (sge_data_out(sa->sge, 0, mp, hmp, 1, flg) == 0) {
290 return (0);
291 }
292
293 /*
294 * set a flag so we'll restart upper layer when
295 * resources become available.
296 */
297 sa->ch_blked = 1;
298 return (1);
299 }
300 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */
301
302 /* writes from toe will always have CPL header in place */
303 if (flg & CH_NO_CPL) {
304 struct cpl_tx_pkt *cpl;
305
306 /* PR2928 & PR3309 */
307 if (sa->ch_ip == NULL) {
308 ushort_t ethertype = ntohs(*(short *)&mp->b_rptr[12]);
309 if (ethertype == ETHERTYPE_ARP) {
310 if (is_T2(sa)) {
311 /*
312 * We assume here that the arp will be
313 * contained in one mblk.
314 */
315 if (pe_make_fake_arp(sa, mp->b_rptr)) {
316 freemsg(mp);
317 sa->oerr++;
318 return (0);
319 }
320 } else {
321 sa->ch_ip = pe_get_ip(mp->b_rptr);
322 }
323 }
324 }
325
326 /*
327 * if space in front of packet big enough for CPL
328 * header, then use it. We'll allocate an mblk
329 * otherwise.
330 */
331 if ((mp->b_rptr - mp->b_datap->db_base) >= SZ_CPL_TX_PKT) {
332
333 mp->b_rptr -= SZ_CPL_TX_PKT;
334
335 } else {
336
337 #ifdef SUN_KSTATS
338 sa->sge->intr_cnt.tx_need_cpl_space++;
339 #endif
340 m0 = allocb(SZ_CPL_TX_PKT, BPRI_HI);
341 if (m0 == NULL) {
342 freemsg(mp);
343 sa->oerr++;
344 return (0);
345 }
346
347 m0->b_wptr = m0->b_rptr + SZ_CPL_TX_PKT;
348 m0->b_cont = mp;
349 freeme = 1;
350
351 mp = m0;
352 }
353
354 /* fill in cpl header */
355 cpl = (struct cpl_tx_pkt *)mp->b_rptr;
356 cpl->opcode = CPL_TX_PKT;
357 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
358 cpl->ip_csum_dis = 1; /* no IP header cksum */
359 cpl->l4_csum_dis =
360 flg & CH_NO_HWCKSUM; /* CH_NO_HWCKSUM == 1 */
361 cpl->vlan_valid = 0; /* no vlan */
362 }
363
364 if (m0->b_cont) {
365
366 #ifdef SUN_KSTATS
367 sa->sge->intr_cnt.tx_multi_mblks++;
368 #endif
369
370 while (mp) {
371 int lseg; /* added by ch_bind_dma_handle() */
372 int len;
373
374 len = MLEN(mp);
375 /* skip mlks with no data */
376 if (len == 0) {
377 mp = mp->b_cont;
378 continue;
379 }
380
381 /*
382 * if we've run out of space on stack, then we
383 * allocate a temporary buffer to hold the
384 * information. This will kill the the performance,
385 * but since it shouldn't really occur, we can live
386 * with it. Since jumbo frames may map multiple
387 * descriptors, we reallocate the hmp[] array before
388 * we reach the end.
389 */
390 if (nseg >= (mseg-4)) {
391 cmdQ_ce_t *buf;
392 int j;
393
394 buf = kmem_alloc(sizeof (cmdQ_ce_t) * 2 * mseg,
395 KM_SLEEP);
396
397 for (j = 0; j < nseg; j++)
398 buf[j] = hmp[j];
399
400 if (cm_flg) {
401 kmem_free(hmp,
402 mseg * sizeof (cmdQ_ce_t));
403 } else
404 cm_flg = 1;
405
406 hmp = buf;
407 mseg = 2*mseg;
408
409 /*
410 * We've used up ch table on stack
411 */
412 }
413
414 #if defined(__sparc)
415 if (sa->ch_config.enable_dvma) {
416 lseg = ch_bind_dvma_handle(sa, len,
417 (void *)mp->b_rptr,
418 &hmp[nseg], mseg - nseg);
419 if (lseg == NULL) {
420 sa->sge->intr_cnt.tx_no_dvma1++;
421 if ((lseg = ch_bind_dma_handle(sa, len,
422 (void *)mp->b_rptr,
423 &hmp[nseg],
424 mseg - nseg)) == NULL) {
425 sa->sge->intr_cnt.tx_no_dma1++;
426
427 /*
428 * ran out of space. Gonna bale
429 */
430 rv = 0;
431
432 /*
433 * we may have processed
434 * previous mblks and have
435 * descriptors. If so, we need
436 * to free the meta struct
437 * entries before freeing
438 * the mblk.
439 */
440 if (nseg)
441 goto error;
442 goto error1;
443 }
444 }
445 } else {
446 lseg = ch_bind_dma_handle(sa, len,
447 (void *)mp->b_rptr, &hmp[nseg],
448 mseg - nseg);
449 if (lseg == NULL) {
450 sa->sge->intr_cnt.tx_no_dma1++;
451
452 /*
453 * ran out of space. Gona bale
454 */
455 rv = 0;
456
457 /*
458 * we may have processed previous
459 * mblks and have descriptors. If so,
460 * we need to free the meta struct
461 * entries before freeing the mblk.
462 */
463 if (nseg)
464 goto error;
465 goto error1;
466 }
467 }
468 #else /* defined(__sparc) */
469 lseg = ch_bind_dma_handle(sa, len,
470 (void *)mp->b_rptr, &hmp[nseg],
471 mseg - nseg);
472 if (lseg == NULL) {
473 sa->sge->intr_cnt.tx_no_dma1++;
474
475 /*
476 * ran out of space. Gona bale
477 */
478 rv = 0;
479
480 /*
481 * we may have processed previous mblks and
482 * have descriptors. If so, we need to free
483 * the meta struct entries before freeing
484 * the mblk.
485 */
486 if (nseg)
487 goto error;
488 goto error1;
489 }
490 #endif /* defined(__sparc) */
491 nseg += lseg;
492 mp = mp->b_cont;
493 }
494
495 /*
496 * SHOULD NEVER OCCUR, BUT...
497 * no data if nseg 0 or
498 * nseg 1 and a CPL mblk (CPL mblk only with offload mode)
499 * and no data
500 */
501 if ((nseg == 0) || (freeme && (nseg == 1))) {
502 rv = 0;
503 goto error1;
504 }
505
506 } else {
507 int len;
508
509 /* we assume that we always have data with one packet */
510 len = MLEN(mp);
511
512 #if defined(__sparc)
513 if (sa->ch_config.enable_dvma) {
514 nseg = ch_bind_dvma_handle(sa, len,
515 (void *)mp->b_rptr,
516 &hmp[0], 16);
517 if (nseg == NULL) {
518 sa->sge->intr_cnt.tx_no_dvma2++;
519 nseg = ch_bind_dma_handle(sa, len,
520 (void *)mp->b_rptr,
521 &hmp[0], 16);
522 if (nseg == NULL) {
523 sa->sge->intr_cnt.tx_no_dma2++;
524
525 /*
526 * ran out of space. Gona bale
527 */
528 rv = 0;
529 goto error1;
530 }
531 }
532 } else {
533 nseg = ch_bind_dma_handle(sa, len,
534 (void *)mp->b_rptr, &hmp[0], 16);
535 if (nseg == NULL) {
536 sa->sge->intr_cnt.tx_no_dma2++;
537
538 /*
539 * ran out of space. Gona bale
540 */
541 rv = 0;
542 goto error1;
543 }
544 }
545 #else /* defined(__sparc) */
546 nseg = ch_bind_dma_handle(sa, len,
547 (void *)mp->b_rptr, &hmp[0], 16);
548 if (nseg == NULL) {
549 sa->sge->intr_cnt.tx_no_dma2++;
550
551 /*
552 * ran out of space. Gona bale
553 */
554 rv = 0;
555 goto error1;
556 }
557 #endif /* defined(__sparc) */
558
559 /*
560 * dummy arp message to handle PR3309 & PR2928
561 */
562 if (flg & CH_ARP)
563 hmp->ce_flg |= DH_ARP;
564 }
565
566 if (sge_data_out(sa->sge, 0, m0, hmp, nseg, flg) == 0) {
567 if (cm_flg)
568 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
569 return (0);
570 }
571
572 /*
573 * set a flag so we'll restart upper layer when
574 * resources become available.
575 */
576 if ((flg & CH_ARP) == 0)
577 sa->ch_blked = 1;
578 rv = 1;
579
580 error:
581 /*
582 * unmap the physical addresses allocated earlier.
583 */
584 cmp = hmp;
585 for (--nseg; nseg >= 0; nseg--) {
586 if (cmp->ce_dh) {
587 if (cmp->ce_flg == DH_DMA)
588 ch_unbind_dma_handle(sa, cmp->ce_dh);
589 #if defined(__sparc)
590 else
591 ch_unbind_dvma_handle(sa, cmp->ce_dh);
592 #endif
593 }
594 cmp++;
595 }
596
597 error1:
598
599 /* free the temporary array */
600 if (cm_flg)
601 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
602
603 /*
604 * if we've allocated an mblk above, then we need to free it
605 * before returning. This is safe since we haven't done anything to
606 * the original message. The caller, gld, will still have a pointer
607 * to the original mblk.
608 */
609 if (rv == 1) {
610 if (freeme) {
611 /* we had to allocate an mblk. Free it. */
612 freeb(m0);
613 } else {
614 /* adjust the mblk back to original start */
615 if (flg & CH_NO_CPL)
616 m0->b_rptr += SZ_CPL_TX_PKT;
617 }
618 } else {
619 freemsg(m0);
620 sa->oerr++;
621 }
622
623 return (rv);
624 }
625
626 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
627 void
pe_set_mac(ch_t * sa,unsigned char * ac_enaddr)628 pe_set_mac(ch_t *sa, unsigned char *ac_enaddr)
629 {
630 sa->port[0].mac->ops->macaddress_set(sa->port[0].mac, ac_enaddr);
631 }
632
633 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
634 unsigned char *
pe_get_mac(ch_t * sa)635 pe_get_mac(ch_t *sa)
636 {
637 return (sa->port[0].enaddr);
638 }
639
640 /* KLUDGE ALERT. HARD WIRED TO ONE PORT */
641 void
pe_set_promiscuous(ch_t * sa,int flag)642 pe_set_promiscuous(ch_t *sa, int flag)
643 {
644 struct cmac *mac = sa->port[0].mac;
645 struct t1_rx_mode rm;
646
647 switch (flag) {
648 case 0: /* turn off promiscuous mode */
649 sa->ch_flags &= ~(PEPROMISC|PEALLMULTI);
650 break;
651
652 case 1: /* turn on promiscuous mode */
653 sa->ch_flags |= PEPROMISC;
654 break;
655
656 case 2: /* turn on multicast reception */
657 sa->ch_flags |= PEALLMULTI;
658 break;
659 }
660
661 mutex_enter(&sa->ch_mc_lck);
662 rm.chp = sa;
663 rm.mc = sa->ch_mc;
664
665 mac->ops->set_rx_mode(mac, &rm);
666 mutex_exit(&sa->ch_mc_lck);
667 }
668
669 int
pe_set_mc(ch_t * sa,uint8_t * ep,int flg)670 pe_set_mc(ch_t *sa, uint8_t *ep, int flg)
671 {
672 struct cmac *mac = sa->port[0].mac;
673 struct t1_rx_mode rm;
674
675 if (flg == GLD_MULTI_ENABLE) {
676 ch_mc_t *mcp;
677
678 mcp = (ch_mc_t *)kmem_zalloc(sizeof (struct ch_mc),
679 KM_NOSLEEP);
680 if (mcp == NULL)
681 return (GLD_NORESOURCES);
682
683 bcopy(ep, &mcp->cmc_mca, 6);
684
685 mutex_enter(&sa->ch_mc_lck);
686 mcp->cmc_next = sa->ch_mc;
687 sa->ch_mc = mcp;
688 sa->ch_mc_cnt++;
689 mutex_exit(&sa->ch_mc_lck);
690
691 } else if (flg == GLD_MULTI_DISABLE) {
692 ch_mc_t **p = &sa->ch_mc;
693 ch_mc_t *q = NULL;
694
695 mutex_enter(&sa->ch_mc_lck);
696 p = &sa->ch_mc;
697 while (*p) {
698 if (bcmp(ep, (*p)->cmc_mca, 6) == 0) {
699 q = *p;
700 *p = (*p)->cmc_next;
701 kmem_free(q, sizeof (*q));
702 sa->ch_mc_cnt--;
703 break;
704 }
705
706 p = &(*p)->cmc_next;
707 }
708 mutex_exit(&sa->ch_mc_lck);
709
710 if (q == NULL)
711 return (GLD_BADARG);
712 } else
713 return (GLD_BADARG);
714
715 mutex_enter(&sa->ch_mc_lck);
716 rm.chp = sa;
717 rm.mc = sa->ch_mc;
718
719 mac->ops->set_rx_mode(mac, &rm);
720 mutex_exit(&sa->ch_mc_lck);
721
722 return (GLD_SUCCESS);
723 }
724
725 /*
726 * return: speed - bandwidth of interface
727 * return: intrcnt - # interrupts
728 * return: norcvbuf - # recedived packets dropped by driver
729 * return: oerrors - # bad send packets
730 * return: ierrors - # bad receive packets
731 * return: underrun - # bad underrun xmit packets
732 * return: overrun - # bad overrun recv packets
733 * return: framing - # bad aligned recv packets
734 * return: crc - # bad FCS (crc) recv packets
735 * return: carrier - times carrier was lost
736 * return: collisions - # xmit collisions
737 * return: xcollisions - # xmit pkts dropped due to collisions
738 * return: late - # late xmit collisions
739 * return: defer - # deferred xmit packets
740 * return: xerrs - # xmit dropped packets
741 * return: rerrs - # recv dropped packets
742 * return: toolong - # recv pkts too long
743 * return: runt - # recv runt pkts
744 * return: multixmt - # multicast pkts xmitted
745 * return: multircv - # multicast pkts recved
746 * return: brdcstxmt - # broadcast pkts xmitted
747 * return: brdcstrcv - # broadcast pkts rcv
748 */
749
750 int
pe_get_stats(ch_t * sa,uint64_t * speed,uint32_t * intrcnt,uint32_t * norcvbuf,uint32_t * oerrors,uint32_t * ierrors,uint32_t * underrun,uint32_t * overrun,uint32_t * framing,uint32_t * crc,uint32_t * carrier,uint32_t * collisions,uint32_t * xcollisions,uint32_t * late,uint32_t * defer,uint32_t * xerrs,uint32_t * rerrs,uint32_t * toolong,uint32_t * runt,ulong_t * multixmt,ulong_t * multircv,ulong_t * brdcstxmt,ulong_t * brdcstrcv)751 pe_get_stats(ch_t *sa, uint64_t *speed, uint32_t *intrcnt, uint32_t *norcvbuf,
752 uint32_t *oerrors, uint32_t *ierrors, uint32_t *underrun,
753 uint32_t *overrun, uint32_t *framing, uint32_t *crc,
754 uint32_t *carrier, uint32_t *collisions, uint32_t *xcollisions,
755 uint32_t *late, uint32_t *defer, uint32_t *xerrs, uint32_t *rerrs,
756 uint32_t *toolong, uint32_t *runt, ulong_t *multixmt, ulong_t *multircv,
757 ulong_t *brdcstxmt, ulong_t *brdcstrcv)
758 {
759 struct pe_port_t *pt;
760 int line_speed;
761 int line_duplex;
762 int line_is_active;
763 uint64_t v;
764 const struct cmac_statistics *sp;
765
766 pt = &(sa->port[0]);
767 (void) pt->phy->ops->get_link_status(pt->phy,
768 &line_is_active, &line_speed, &line_duplex, NULL);
769
770 switch (line_speed) {
771 case SPEED_10:
772 *speed = 10000000;
773 break;
774 case SPEED_100:
775 *speed = 100000000;
776 break;
777 case SPEED_1000:
778 *speed = 1000000000;
779 break;
780 case SPEED_10000:
781 /*
782 * kludge to get 10,000,000,000 constant (and keep
783 * compiler happy).
784 */
785 v = 10000000;
786 v *= 1000;
787 *speed = v;
788 break;
789 default:
790 goto error;
791 }
792
793 *intrcnt = sa->isr_intr;
794 *norcvbuf = sa->norcvbuf;
795
796 sp = sa->port[0].mac->ops->statistics_update(sa->port[0].mac,
797 MAC_STATS_UPDATE_FULL);
798
799 *ierrors = sp->RxOctetsBad;
800
801 /*
802 * not sure this is correct. # aborted at driver level +
803 * # at hardware level
804 */
805 *oerrors = sa->oerr + sp->TxFramesAbortedDueToXSCollisions +
806 sp->TxUnderrun + sp->TxLengthErrors +
807 sp->TxInternalMACXmitError +
808 sp->TxFramesWithExcessiveDeferral +
809 sp->TxFCSErrors;
810
811 *underrun = sp->TxUnderrun;
812 *overrun = sp->RxFrameTooLongErrors;
813 *framing = sp->RxAlignErrors;
814 *crc = sp->RxFCSErrors;
815 *carrier = 0; /* need to find this */
816 *collisions = sp->TxTotalCollisions;
817 *xcollisions = sp->TxFramesAbortedDueToXSCollisions;
818 *late = sp->TxLateCollisions;
819 *defer = sp->TxFramesWithDeferredXmissions;
820 *xerrs = sp->TxUnderrun + sp->TxLengthErrors +
821 sp->TxInternalMACXmitError + sp->TxFCSErrors;
822 *rerrs = sp->RxSymbolErrors + sp->RxSequenceErrors + sp->RxRuntErrors +
823 sp->RxJabberErrors + sp->RxInternalMACRcvError +
824 sp->RxInRangeLengthErrors + sp->RxOutOfRangeLengthField;
825 *toolong = sp->RxFrameTooLongErrors;
826 *runt = sp->RxRuntErrors;
827
828 *multixmt = sp->TxMulticastFramesOK;
829 *multircv = sp->RxMulticastFramesOK;
830 *brdcstxmt = sp->TxBroadcastFramesOK;
831 *brdcstrcv = sp->RxBroadcastFramesOK;
832
833 return (0);
834
835 error:
836 *speed = 0;
837 *intrcnt = 0;
838 *norcvbuf = 0;
839 *norcvbuf = 0;
840 *oerrors = 0;
841 *ierrors = 0;
842 *underrun = 0;
843 *overrun = 0;
844 *framing = 0;
845 *crc = 0;
846 *carrier = 0;
847 *collisions = 0;
848 *xcollisions = 0;
849 *late = 0;
850 *defer = 0;
851 *xerrs = 0;
852 *rerrs = 0;
853 *toolong = 0;
854 *runt = 0;
855 *multixmt = 0;
856 *multircv = 0;
857 *brdcstxmt = 0;
858 *brdcstrcv = 0;
859
860 return (1);
861 }
862
863 uint32_t ch_gtm = 0; /* Default: Global Tunnel Mode off */
864 uint32_t ch_global_config = 0x07000000; /* Default: errors, warnings, status */
865 uint32_t ch_is_asic = 0; /* Default: non-ASIC */
866 uint32_t ch_link_speed = PE_LINK_SPEED_AUTONEG; /* Default: auto-negoiate */
867 uint32_t ch_num_of_ports = 1; /* Default: 1 port */
868 uint32_t ch_tp_reset_cm = 1; /* Default: reset CM memory map */
869 uint32_t ch_phy_tx_fifo = 0; /* Default: 0 phy tx fifo depth */
870 uint32_t ch_phy_rx_fifo = 0; /* Default: 0 phy rx fifo depth */
871 uint32_t ch_phy_force_master = 1; /* Default: link always master mode */
872 uint32_t ch_mc5_rtbl_size = 2048; /* Default: TCAM routing table size */
873 uint32_t ch_mc5_dbsvr_size = 128; /* Default: TCAM server size */
874 uint32_t ch_mc5_parity = 1; /* Default: parity error checking */
875 uint32_t ch_mc5_issue_syn = 0; /* Default: Allow transaction overlap */
876 uint32_t ch_packet_tracing = 0; /* Default: no packet tracing */
877 uint32_t ch_server_region_len =
878 DEFAULT_SERVER_REGION_LEN;
879 uint32_t ch_rt_region_len =
880 DEFAULT_RT_REGION_LEN;
881 uint32_t ch_offload_ip_cksum = 0; /* Default: no checksum offloading */
882 uint32_t ch_offload_udp_cksum = 1; /* Default: offload UDP ckecksum */
883 uint32_t ch_offload_tcp_cksum = 1; /* Default: offload TCP checksum */
884 uint32_t ch_sge_cmdq_threshold = 0; /* Default: threshold 0 */
885 uint32_t ch_sge_flq_threshold = 0; /* Default: SGE flq threshold */
886 uint32_t ch_sge_cmdq0_cnt = /* Default: cmd queue 0 size */
887 SGE_CMDQ0_CNT;
888 uint32_t ch_sge_cmdq1_cnt = /* Default: cmd queue 1 size */
889 SGE_CMDQ0_CNT;
890 uint32_t ch_sge_flq0_cnt = /* Default: free list queue-0 length */
891 SGE_FLQ0_CNT;
892 uint32_t ch_sge_flq1_cnt = /* Default: free list queue-1 length */
893 SGE_FLQ0_CNT;
894 uint32_t ch_sge_respq_cnt = /* Default: reqsponse queue size */
895 SGE_RESPQ_CNT;
896 uint32_t ch_stats = 1; /* Default: Automatic Update MAC stats */
897 uint32_t ch_tx_delay_us = 0; /* Default: No Msec delay to Tx pkts */
898 int32_t ch_chip = -1; /* Default: use hardware lookup tbl */
899 uint32_t ch_exit_early = 0; /* Default: complete initialization */
900 uint32_t ch_rb_num_of_entries = 1000; /* Default: number ring buffer entries */
901 uint32_t ch_rb_size_of_entries = 64; /* Default: ring buffer entry size */
902 uint32_t ch_rb_flag = 1; /* Default: ring buffer flag */
903 uint32_t ch_type;
904 uint64_t ch_cat_opt0 = 0;
905 uint64_t ch_cat_opt1 = 0;
906 uint32_t ch_timer_delay = 0; /* Default: use value from board entry */
907
908 int
pe_attach(ch_t * chp)909 pe_attach(ch_t *chp)
910 {
911 int return_val = 1;
912 const struct board_info *bi;
913 uint32_t pcix_cmd;
914
915 (void) ch_set_config_data(chp);
916
917 bi = pe_sa_init(chp);
918 if (bi == 0)
919 return (1);
920
921 if (t1_init_sw_modules(chp, bi) < 0)
922 return (1);
923
924 if (pe_small_rbuf_pool_init(chp) == NULL)
925 return (1);
926
927 if (pe_big_rbuf_pool_init(chp) == NULL)
928 return (1);
929
930 /*
931 * We gain significaint performance improvements when we
932 * increase the PCI's maximum memory read byte count to
933 * 2K(HW doesn't support 4K at this time) and set the PCI's
934 * maximum outstanding split transactions to 4. We want to do
935 * this for 10G. Done by software utility.
936 */
937
938 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
939 (void) t1_os_pci_read_config_4(chp, A_PCICFG_PCIX_CMD,
940 &pcix_cmd);
941 /*
942 * if the burstsize is set, then use it instead of default
943 */
944 if (chp->ch_config.burstsize_set) {
945 pcix_cmd &= ~0xc0000;
946 pcix_cmd |= (chp->ch_config.burstsize << 18);
947 }
948 /*
949 * if the split transaction count is set, then use it.
950 */
951 if (chp->ch_config.transaction_cnt_set) {
952 pcix_cmd &= ~ 0x700000;
953 pcix_cmd |= (chp->ch_config.transaction_cnt << 20);
954 }
955
956 /*
957 * set ralaxed ordering flag as configured in chxge.conf
958 */
959 pcix_cmd |= (chp->ch_config.relaxed_ordering << 17);
960
961 (void) t1_os_pci_write_config_4(chp, A_PCICFG_PCIX_CMD,
962 pcix_cmd);
963 }
964
965 /*
966 * set the latency time to F8 for 10G cards.
967 * Done by software utiltiy.
968 */
969 if (enable_latency_timer) {
970 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
971 (void) t1_os_pci_write_config_4(chp, 0xc, 0xf800);
972 }
973 }
974
975 /*
976 * update mtu table (regs: 0x404 - 0x420) with bigger values than
977 * default.
978 */
979 update_mtu_tab(chp);
980
981 /*
982 * Clear all interrupts now. Don't enable
983 * them until later.
984 */
985 t1_interrupts_clear(chp);
986
987 /*
988 * Function succeeded.
989 */
990 return_val = 0;
991
992 return (return_val);
993 }
994
995 /*
996 * DESC: Read variables set in /boot/loader.conf and save
997 * them internally. These internal values are then
998 * used to make decisions at run-time on behavior thus
999 * allowing a certain level of customization.
1000 * OUT: p_config - pointer to config structure that
1001 * contains all of the new values.
1002 * RTN: 0 - Success;
1003 */
1004 static int
ch_set_config_data(ch_t * chp)1005 ch_set_config_data(ch_t *chp)
1006 {
1007 pe_config_data_t *p_config = (pe_config_data_t *)&chp->config_data;
1008
1009 bzero(p_config, sizeof (pe_config_data_t));
1010
1011 /*
1012 * Global Tunnel Mode configuration
1013 */
1014 p_config->gtm = ch_gtm;
1015
1016 p_config->global_config = ch_global_config;
1017
1018 if (p_config->gtm)
1019 p_config->global_config |= CFGMD_TUNNEL;
1020
1021 p_config->tp_reset_cm = ch_tp_reset_cm;
1022 p_config->is_asic = ch_is_asic;
1023
1024 /*
1025 * MC5 configuration.
1026 */
1027 p_config->mc5_rtbl_size = ch_mc5_rtbl_size;
1028 p_config->mc5_dbsvr_size = ch_mc5_dbsvr_size;
1029 p_config->mc5_parity = ch_mc5_parity;
1030 p_config->mc5_issue_syn = ch_mc5_issue_syn;
1031
1032 p_config->offload_ip_cksum = ch_offload_ip_cksum;
1033 p_config->offload_udp_cksum = ch_offload_udp_cksum;
1034 p_config->offload_tcp_cksum = ch_offload_tcp_cksum;
1035
1036 p_config->packet_tracing = ch_packet_tracing;
1037
1038 p_config->server_region_len = ch_server_region_len;
1039 p_config->rt_region_len = ch_rt_region_len;
1040
1041 /*
1042 * Link configuration.
1043 *
1044 * 5-auto-neg 2-1000Gbps; 1-100Gbps; 0-10Gbps
1045 */
1046 p_config->link_speed = ch_link_speed;
1047 p_config->num_of_ports = ch_num_of_ports;
1048
1049 /*
1050 * Catp options
1051 */
1052 p_config->cat_opt0 = ch_cat_opt0;
1053 p_config->cat_opt1 = ch_cat_opt1;
1054
1055 /*
1056 * SGE configuration.
1057 */
1058 p_config->sge_cmdq0_cnt = ch_sge_cmdq0_cnt;
1059 p_config->sge_cmdq1_cnt = ch_sge_cmdq1_cnt;
1060 p_config->sge_flq0_cnt = ch_sge_flq0_cnt;
1061 p_config->sge_flq1_cnt = ch_sge_flq1_cnt;
1062 p_config->sge_respq_cnt = ch_sge_respq_cnt;
1063
1064 p_config->phy_rx_fifo = ch_phy_rx_fifo;
1065 p_config->phy_tx_fifo = ch_phy_tx_fifo;
1066
1067 p_config->sge_cmdq_threshold = ch_sge_cmdq_threshold;
1068
1069 p_config->sge_flq_threshold = ch_sge_flq_threshold;
1070
1071 p_config->phy_force_master = ch_phy_force_master;
1072
1073 p_config->rb_num_of_entries = ch_rb_num_of_entries;
1074
1075 p_config->rb_size_of_entries = ch_rb_size_of_entries;
1076
1077 p_config->rb_flag = ch_rb_flag;
1078
1079 p_config->exit_early = ch_exit_early;
1080
1081 p_config->chip = ch_chip;
1082
1083 p_config->stats = ch_stats;
1084
1085 p_config->tx_delay_us = ch_tx_delay_us;
1086
1087 return (0);
1088 }
1089
1090 static const struct board_info *
pe_sa_init(ch_t * sa)1091 pe_sa_init(ch_t *sa)
1092 {
1093 uint16_t device_id;
1094 uint16_t device_subid;
1095 const struct board_info *bi;
1096
1097 sa->config = sa->config_data.global_config;
1098 device_id = pci_config_get16(sa->ch_hpci, 2);
1099 device_subid = pci_config_get16(sa->ch_hpci, 0x2e);
1100
1101 bi = t1_get_board_info_from_ids(device_id, device_subid);
1102 if (bi == NULL) {
1103 cmn_err(CE_NOTE,
1104 "The adapter with device_id %d %d is not supported.\n",
1105 device_id, device_subid);
1106 return (NULL);
1107 }
1108
1109 if (t1_get_board_rev(sa, bi, &sa->params)) {
1110 cmn_err(CE_NOTE, "unknown device_id %d %d\n",
1111 device_id, device_subid);
1112 return ((const struct board_info *)NULL);
1113 }
1114
1115 return (bi);
1116 }
1117
1118 /*
1119 * allocate pool of small receive buffers (with vaddr & paddr) and
1120 * receiver buffer control structure (ch_esb_t *rbp).
1121 * XXX we should allow better tuning of the # of preallocated
1122 * free buffers against the # of freelist entries.
1123 */
1124 static int
pe_small_rbuf_pool_init(ch_t * sa)1125 pe_small_rbuf_pool_init(ch_t *sa)
1126 {
1127 int i;
1128 ch_esb_t *rbp;
1129 extern uint32_t sge_flq0_cnt;
1130 extern uint32_t sge_flq1_cnt;
1131 int size;
1132 uint32_t j;
1133
1134 if (is_T2(sa))
1135 size = sge_flq1_cnt * fl_sz_multiplier;
1136 else
1137 size = sge_flq0_cnt * fl_sz_multiplier;
1138
1139 mutex_init(&sa->ch_small_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1140
1141 mutex_enter(&in_use_l);
1142 j = in_use_index++;
1143 if (in_use_index >= SZ_INUSE)
1144 in_use_index = 0;
1145 mutex_exit(&in_use_l);
1146
1147 sa->ch_small_owner = NULL;
1148 sa->ch_sm_index = j;
1149 sa->ch_small_esb_free = NULL;
1150 for (i = 0; i < size; i++) {
1151 rbp = ch_alloc_small_esbbuf(sa, j);
1152 if (rbp == NULL)
1153 goto error;
1154 /*
1155 * add entry to free list
1156 */
1157 rbp->cs_next = sa->ch_small_esb_free;
1158 sa->ch_small_esb_free = rbp;
1159
1160 /*
1161 * add entry to owned list
1162 */
1163 rbp->cs_owner = sa->ch_small_owner;
1164 sa->ch_small_owner = rbp;
1165 }
1166 return (1);
1167
1168 error:
1169 sa->ch_small_owner = NULL;
1170
1171 /* free whatever we've already allocated */
1172 pe_rbuf_pool_free(sa);
1173
1174 return (0);
1175 }
1176
1177 /*
1178 * allocate pool of receive buffers (with vaddr & paddr) and
1179 * receiver buffer control structure (ch_esb_t *rbp).
1180 * XXX we should allow better tuning of the # of preallocated
1181 * free buffers against the # of freelist entries.
1182 */
1183 static int
pe_big_rbuf_pool_init(ch_t * sa)1184 pe_big_rbuf_pool_init(ch_t *sa)
1185 {
1186 int i;
1187 ch_esb_t *rbp;
1188 extern uint32_t sge_flq0_cnt;
1189 extern uint32_t sge_flq1_cnt;
1190 int size;
1191 uint32_t j;
1192
1193 if (is_T2(sa))
1194 size = sge_flq0_cnt * fl_sz_multiplier;
1195 else
1196 size = sge_flq1_cnt * fl_sz_multiplier;
1197
1198 mutex_init(&sa->ch_big_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1199
1200 mutex_enter(&in_use_l);
1201 j = in_use_index++;
1202 if (in_use_index >= SZ_INUSE)
1203 in_use_index = 0;
1204 mutex_exit(&in_use_l);
1205
1206 sa->ch_big_owner = NULL;
1207 sa->ch_big_index = j;
1208 sa->ch_big_esb_free = NULL;
1209 for (i = 0; i < size; i++) {
1210 rbp = ch_alloc_big_esbbuf(sa, j);
1211 if (rbp == NULL)
1212 goto error;
1213 rbp->cs_next = sa->ch_big_esb_free;
1214 sa->ch_big_esb_free = rbp;
1215
1216 /*
1217 * add entry to owned list
1218 */
1219 rbp->cs_owner = sa->ch_big_owner;
1220 sa->ch_big_owner = rbp;
1221 }
1222 return (1);
1223
1224 error:
1225 sa->ch_big_owner = NULL;
1226
1227 /* free whatever we've already allocated */
1228 pe_rbuf_pool_free(sa);
1229
1230 return (0);
1231 }
1232
1233 /*
1234 * allocate receive buffer structure and dma mapped buffer (SGE_SM_BUF_SZ bytes)
1235 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1236 * is enabled.
1237 */
1238 static ch_esb_t *
ch_alloc_small_esbbuf(ch_t * sa,uint32_t i)1239 ch_alloc_small_esbbuf(ch_t *sa, uint32_t i)
1240 {
1241 ch_esb_t *rbp;
1242
1243 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1244 if (rbp == NULL) {
1245 return ((ch_esb_t *)0);
1246 }
1247
1248 #if BYTE_ORDER == BIG_ENDIAN
1249 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_SMALN,
1250 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1251 #else
1252 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_SMALN,
1253 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1254 #endif
1255
1256 if (rbp->cs_buf == NULL) {
1257 kmem_free(rbp, sizeof (ch_esb_t));
1258 return ((ch_esb_t *)0);
1259 }
1260
1261 rbp->cs_sa = sa;
1262 rbp->cs_index = i;
1263
1264 rbp->cs_frtn.free_func = (void (*)())&ch_small_rbuf_recycle;
1265 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1266
1267 return (rbp);
1268 }
1269
1270 /*
1271 * allocate receive buffer structure and dma mapped buffer (SGE_BG_BUF_SZ bytes)
1272 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1273 * is enabled.
1274 */
1275 static ch_esb_t *
ch_alloc_big_esbbuf(ch_t * sa,uint32_t i)1276 ch_alloc_big_esbbuf(ch_t *sa, uint32_t i)
1277 {
1278 ch_esb_t *rbp;
1279
1280 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1281 if (rbp == NULL) {
1282 return ((ch_esb_t *)0);
1283 }
1284
1285 #if BYTE_ORDER == BIG_ENDIAN
1286 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_BGALN,
1287 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1288 #else
1289 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_BGALN,
1290 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1291 #endif
1292
1293 if (rbp->cs_buf == NULL) {
1294 kmem_free(rbp, sizeof (ch_esb_t));
1295 return ((ch_esb_t *)0);
1296 }
1297
1298 rbp->cs_sa = sa;
1299 rbp->cs_index = i;
1300
1301 rbp->cs_frtn.free_func = (void (*)())&ch_big_rbuf_recycle;
1302 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1303
1304 return (rbp);
1305 }
1306
1307 /*
1308 * free entries on the receive buffer list.
1309 */
1310 void
pe_rbuf_pool_free(ch_t * sa)1311 pe_rbuf_pool_free(ch_t *sa)
1312 {
1313 ch_esb_t *rbp;
1314
1315 mutex_enter(&sa->ch_small_esbl);
1316
1317 /*
1318 * Now set-up the rest to commit suicide.
1319 */
1320 while (sa->ch_small_owner) {
1321 rbp = sa->ch_small_owner;
1322 sa->ch_small_owner = rbp->cs_owner;
1323 rbp->cs_owner = NULL;
1324 rbp->cs_flag = 1;
1325 }
1326
1327 while ((rbp = sa->ch_small_esb_free) != NULL) {
1328 /* advance head ptr to next entry */
1329 sa->ch_small_esb_free = rbp->cs_next;
1330 /* free private buffer allocated in ch_alloc_esbbuf() */
1331 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1332 /* free descripter buffer */
1333 kmem_free(rbp, sizeof (ch_esb_t));
1334 }
1335
1336 mutex_exit(&sa->ch_small_esbl);
1337
1338 /* destroy ch_esbl lock */
1339 mutex_destroy(&sa->ch_small_esbl);
1340
1341
1342 mutex_enter(&sa->ch_big_esbl);
1343
1344 /*
1345 * Now set-up the rest to commit suicide.
1346 */
1347 while (sa->ch_big_owner) {
1348 rbp = sa->ch_big_owner;
1349 sa->ch_big_owner = rbp->cs_owner;
1350 rbp->cs_owner = NULL;
1351 rbp->cs_flag = 1;
1352 }
1353
1354 while ((rbp = sa->ch_big_esb_free) != NULL) {
1355 /* advance head ptr to next entry */
1356 sa->ch_big_esb_free = rbp->cs_next;
1357 /* free private buffer allocated in ch_alloc_esbbuf() */
1358 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1359 /* free descripter buffer */
1360 kmem_free(rbp, sizeof (ch_esb_t));
1361 }
1362
1363 mutex_exit(&sa->ch_big_esbl);
1364
1365 /* destroy ch_esbl lock */
1366 mutex_destroy(&sa->ch_big_esbl);
1367 }
1368
1369 void
ch_small_rbuf_recycle(ch_esb_t * rbp)1370 ch_small_rbuf_recycle(ch_esb_t *rbp)
1371 {
1372 ch_t *sa = rbp->cs_sa;
1373
1374 if (rbp->cs_flag) {
1375 uint32_t i;
1376 /*
1377 * free private buffer allocated in ch_alloc_esbbuf()
1378 */
1379 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1380
1381 i = rbp->cs_index;
1382
1383 /*
1384 * free descripter buffer
1385 */
1386 kmem_free(rbp, sizeof (ch_esb_t));
1387
1388 /*
1389 * decrement count of receive buffers freed by callback
1390 * We decrement here so anyone trying to do fini will
1391 * only remove the driver once the counts go to 0.
1392 */
1393 atomic_add_32(&buffers_in_use[i], -1);
1394
1395 return;
1396 }
1397
1398 mutex_enter(&sa->ch_small_esbl);
1399 rbp->cs_next = sa->ch_small_esb_free;
1400 sa->ch_small_esb_free = rbp;
1401 mutex_exit(&sa->ch_small_esbl);
1402
1403 /*
1404 * decrement count of receive buffers freed by callback
1405 */
1406 atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
1407 }
1408
1409 /*
1410 * callback function from freeb() when esballoced mblk freed.
1411 */
1412 void
ch_big_rbuf_recycle(ch_esb_t * rbp)1413 ch_big_rbuf_recycle(ch_esb_t *rbp)
1414 {
1415 ch_t *sa = rbp->cs_sa;
1416
1417 if (rbp->cs_flag) {
1418 uint32_t i;
1419 /*
1420 * free private buffer allocated in ch_alloc_esbbuf()
1421 */
1422 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1423
1424 i = rbp->cs_index;
1425
1426 /*
1427 * free descripter buffer
1428 */
1429 kmem_free(rbp, sizeof (ch_esb_t));
1430
1431 /*
1432 * decrement count of receive buffers freed by callback
1433 * We decrement here so anyone trying to do fini will
1434 * only remove the driver once the counts go to 0.
1435 */
1436 atomic_add_32(&buffers_in_use[i], -1);
1437
1438 return;
1439 }
1440
1441 mutex_enter(&sa->ch_big_esbl);
1442 rbp->cs_next = sa->ch_big_esb_free;
1443 sa->ch_big_esb_free = rbp;
1444 mutex_exit(&sa->ch_big_esbl);
1445
1446 /*
1447 * decrement count of receive buffers freed by callback
1448 */
1449 atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
1450 }
1451
1452 /*
1453 * get a pre-allocated, pre-mapped receive buffer from free list.
1454 * (used sge.c)
1455 */
1456 ch_esb_t *
ch_get_small_rbuf(ch_t * sa)1457 ch_get_small_rbuf(ch_t *sa)
1458 {
1459 ch_esb_t *rbp;
1460
1461 mutex_enter(&sa->ch_small_esbl);
1462 rbp = sa->ch_small_esb_free;
1463 if (rbp) {
1464 sa->ch_small_esb_free = rbp->cs_next;
1465 }
1466 mutex_exit(&sa->ch_small_esbl);
1467
1468 return (rbp);
1469 }
1470
1471 /*
1472 * get a pre-allocated, pre-mapped receive buffer from free list.
1473 * (used sge.c)
1474 */
1475
1476 ch_esb_t *
ch_get_big_rbuf(ch_t * sa)1477 ch_get_big_rbuf(ch_t *sa)
1478 {
1479 ch_esb_t *rbp;
1480
1481 mutex_enter(&sa->ch_big_esbl);
1482 rbp = sa->ch_big_esb_free;
1483 if (rbp) {
1484 sa->ch_big_esb_free = rbp->cs_next;
1485 }
1486 mutex_exit(&sa->ch_big_esbl);
1487
1488 return (rbp);
1489 }
1490
1491 void
pe_detach(ch_t * sa)1492 pe_detach(ch_t *sa)
1493 {
1494 (void) sge_stop(sa->sge);
1495
1496 pe_free_driver_resources(sa);
1497 }
1498
1499 static void
pe_free_driver_resources(ch_t * sa)1500 pe_free_driver_resources(ch_t *sa)
1501 {
1502 if (sa) {
1503 t1_free_sw_modules(sa);
1504
1505 /* free pool of receive buffers */
1506 pe_rbuf_pool_free(sa);
1507 }
1508 }
1509
1510 /*
1511 * Processes elmer0 external interrupts in process context.
1512 */
1513 static void
ext_intr_task(ch_t * adapter)1514 ext_intr_task(ch_t *adapter)
1515 {
1516 u32 enable;
1517
1518 (void) elmer0_ext_intr_handler(adapter);
1519
1520 /* Now reenable external interrupts */
1521 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT);
1522 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1523 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
1524 adapter->slow_intr_mask |= F_PL_INTR_EXT;
1525 }
1526
1527 /*
1528 * Interrupt-context handler for elmer0 external interrupts.
1529 */
1530 void
t1_os_elmer0_ext_intr(ch_t * adapter)1531 t1_os_elmer0_ext_intr(ch_t *adapter)
1532 {
1533 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1534
1535 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
1536 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT);
1537 #ifdef NOTYET
1538 schedule_work(&adapter->ext_intr_handler_task);
1539 #else
1540 ext_intr_task(adapter);
1541 #endif
1542 }
1543
1544 uint8_t *
t1_get_next_mcaddr(struct t1_rx_mode * rmp)1545 t1_get_next_mcaddr(struct t1_rx_mode *rmp)
1546 {
1547 uint8_t *addr = 0;
1548 if (rmp->mc) {
1549 addr = rmp->mc->cmc_mca;
1550 rmp->mc = rmp->mc->cmc_next;
1551 }
1552 return (addr);
1553 }
1554
1555 void
pe_dma_handle_init(ch_t * chp,int cnt)1556 pe_dma_handle_init(ch_t *chp, int cnt)
1557 {
1558 free_dh_t *dhe;
1559 #if defined(__sparc)
1560 int tcnt = cnt/2;
1561
1562 for (; cnt; cnt--) {
1563 dhe = ch_get_dvma_handle(chp);
1564 if (dhe == NULL)
1565 break;
1566 mutex_enter(&chp->ch_dh_lck);
1567 dhe->dhe_next = chp->ch_vdh;
1568 chp->ch_vdh = dhe;
1569 mutex_exit(&chp->ch_dh_lck);
1570 }
1571
1572 cnt += tcnt;
1573 #endif
1574 while (cnt--) {
1575 dhe = ch_get_dma_handle(chp);
1576 if (dhe == NULL)
1577 return;
1578 mutex_enter(&chp->ch_dh_lck);
1579 dhe->dhe_next = chp->ch_dh;
1580 chp->ch_dh = dhe;
1581 mutex_exit(&chp->ch_dh_lck);
1582 }
1583 }
1584
1585 /*
1586 * Write new values to the MTU table. Caller must validate that the new MTUs
1587 * are in ascending order. params.mtus[] is initialized by init_mtus()
1588 * called in t1_init_sw_modules().
1589 */
1590 #define MTUREG(idx) (A_TP_MTU_REG0 + (idx) * 4)
1591
1592 static void
update_mtu_tab(ch_t * adapter)1593 update_mtu_tab(ch_t *adapter)
1594 {
1595 int i;
1596
1597 for (i = 0; i < NMTUS; ++i) {
1598 int mtu = (unsigned int)adapter->params.mtus[i];
1599
1600 t1_write_reg_4(adapter, MTUREG(i), mtu);
1601 }
1602 }
1603
1604 static int
pe_change_mtu(ch_t * chp)1605 pe_change_mtu(ch_t *chp)
1606 {
1607 struct cmac *mac = chp->port[0].mac;
1608 int ret;
1609
1610 if (!mac->ops->set_mtu) {
1611 return (EOPNOTSUPP);
1612 }
1613 if (chp->ch_mtu < 68) {
1614 return (EINVAL);
1615 }
1616 if (ret = mac->ops->set_mtu(mac, chp->ch_mtu)) {
1617 return (ret);
1618 }
1619
1620 return (0);
1621 }
1622
1623 typedef struct fake_arp {
1624 char fa_dst[6]; /* ethernet header */
1625 char fa_src[6]; /* ethernet header */
1626 ushort_t fa_typ; /* ethernet header */
1627
1628 ushort_t fa_hrd; /* arp */
1629 ushort_t fa_pro;
1630 char fa_hln;
1631 char fa_pln;
1632 ushort_t fa_op;
1633 char fa_src_mac[6];
1634 uint_t fa_src_ip;
1635 char fa_dst_mac[6];
1636 char fa_dst_ip[4];
1637 } fake_arp_t;
1638
1639 /*
1640 * PR2928 & PR3309
1641 * construct packet in mblk and attach it to sge structure.
1642 */
1643 static int
pe_make_fake_arp(ch_t * chp,unsigned char * arpp)1644 pe_make_fake_arp(ch_t *chp, unsigned char *arpp)
1645 {
1646 pesge *sge = chp->sge;
1647 mblk_t *bp;
1648 fake_arp_t *fap;
1649 static char buf[6] = {0, 7, 0x43, 0, 0, 0};
1650 struct cpl_tx_pkt *cpl;
1651
1652 bp = allocb(sizeof (struct fake_arp) + SZ_CPL_TX_PKT, BPRI_HI);
1653 if (bp == NULL) {
1654 return (1);
1655 }
1656 bzero(bp->b_rptr, sizeof (struct fake_arp) + SZ_CPL_TX_PKT);
1657
1658 /* fill in cpl header */
1659 cpl = (struct cpl_tx_pkt *)bp->b_rptr;
1660 cpl->opcode = CPL_TX_PKT;
1661 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
1662 cpl->ip_csum_dis = 1; /* no IP header cksum */
1663 cpl->l4_csum_dis = 1; /* no tcp/udp cksum */
1664 cpl->vlan_valid = 0; /* no vlan */
1665
1666 fap = (fake_arp_t *)&bp->b_rptr[SZ_CPL_TX_PKT];
1667
1668 bcopy(arpp, fap, sizeof (*fap)); /* copy first arp to mblk */
1669
1670 bcopy(buf, fap->fa_dst, 6); /* overwrite dst mac */
1671 chp->ch_ip = fap->fa_src_ip; /* not used yet */
1672 bcopy(buf, fap->fa_dst_mac, 6); /* overwrite dst mac */
1673
1674 bp->b_wptr = bp->b_rptr + sizeof (struct fake_arp)+SZ_CPL_TX_PKT;
1675
1676 sge_add_fake_arp(sge, (void *)bp);
1677
1678 return (0);
1679 }
1680
1681 /*
1682 * PR2928 & PR3309
1683 * free the fake arp's mblk on sge structure.
1684 */
1685 void
pe_free_fake_arp(void * arp)1686 pe_free_fake_arp(void *arp)
1687 {
1688 mblk_t *bp = (mblk_t *)(arp);
1689
1690 freemsg(bp);
1691 }
1692
1693 /*
1694 * extract ip address of nic from first outgoing arp.
1695 */
1696 static uint32_t
pe_get_ip(unsigned char * arpp)1697 pe_get_ip(unsigned char *arpp)
1698 {
1699 fake_arp_t fap;
1700
1701 /*
1702 * first copy packet to buffer so we know
1703 * it will be properly aligned.
1704 */
1705 bcopy(arpp, &fap, sizeof (fap)); /* copy first arp to buffer */
1706 return (fap.fa_src_ip);
1707 }
1708
1709 /* ARGSUSED */
1710 void
t1_os_link_changed(ch_t * obj,int port_id,int link_status,int speed,int duplex,int fc)1711 t1_os_link_changed(ch_t *obj, int port_id, int link_status,
1712 int speed, int duplex, int fc)
1713 {
1714 gld_mac_info_t *macinfo = obj->ch_macp;
1715 if (link_status) {
1716 gld_linkstate(macinfo, GLD_LINKSTATE_UP);
1717 /*
1718 * Link states should be reported to user
1719 * whenever it changes
1720 */
1721 cmn_err(CE_NOTE, "%s: link is up", adapter_name(obj));
1722 } else {
1723 gld_linkstate(macinfo, GLD_LINKSTATE_DOWN);
1724 /*
1725 * Link states should be reported to user
1726 * whenever it changes
1727 */
1728 cmn_err(CE_NOTE, "%s: link is down", adapter_name(obj));
1729 }
1730 }
1731