1 /* $OpenBSD: if_pcn.c,v 1.50 2024/05/24 06:02:56 jsg Exp $ */
2 /* $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $ */
3
4 /*
5 * Copyright (c) 2001 Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the AMD PCnet-PCI series of Ethernet
41 * chips:
42 *
43 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
44 * Local Bus
45 *
46 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
47 * for PCI Local Bus
48 *
49 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
50 * Ethernet Controller for PCI Local Bus
51 *
52 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
53 * with OnNow Support
54 *
55 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
56 * Ethernet Controller with Integrated PHY
57 *
58 * This also supports the virtual PCnet-PCI Ethernet interface found
59 * in VMware.
60 *
61 * TODO:
62 *
63 * * Split this into bus-specific and bus-independent portions.
64 * The core could also be used for the ILACC (Am79900) 32-bit
65 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
66 */
67
68 #include "bpfilter.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/timeout.h>
73 #include <sys/mbuf.h>
74 #include <sys/ioctl.h>
75 #include <sys/errno.h>
76 #include <sys/device.h>
77 #include <sys/queue.h>
78 #include <sys/endian.h>
79
80 #include <net/if.h>
81 #include <net/if_dl.h>
82
83 #include <netinet/in.h>
84 #include <netinet/if_ether.h>
85
86 #include <net/if_media.h>
87
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91
92 #include <machine/bus.h>
93 #include <machine/intr.h>
94
95 #include <dev/mii/miivar.h>
96
97 #include <dev/ic/am79900reg.h>
98 #include <dev/ic/lancereg.h>
99
100 #include <dev/pci/pcireg.h>
101 #include <dev/pci/pcivar.h>
102 #include <dev/pci/pcidevs.h>
103
104 /*
105 * Register definitions for the AMD PCnet-PCI series of Ethernet
106 * chips.
107 *
108 * These are only the registers that we access directly from PCI
109 * space. Everything else (accessed via the RAP + RDP/BDP) is
110 * defined in <dev/ic/lancereg.h>.
111 */
112
113 /*
114 * PCI configuration space.
115 */
116
117 #define PCN_PCI_CBIO (PCI_MAPREG_START + 0x00)
118 #define PCN_PCI_CBMEM (PCI_MAPREG_START + 0x04)
119
120 /*
121 * I/O map in Word I/O mode.
122 */
123
124 #define PCN16_APROM 0x00
125 #define PCN16_RDP 0x10
126 #define PCN16_RAP 0x12
127 #define PCN16_RESET 0x14
128 #define PCN16_BDP 0x16
129
130 /*
131 * I/O map in DWord I/O mode.
132 */
133
134 #define PCN32_APROM 0x00
135 #define PCN32_RDP 0x10
136 #define PCN32_RAP 0x14
137 #define PCN32_RESET 0x18
138 #define PCN32_BDP 0x1c
139
140 /*
141 * Transmit descriptor list size. This is arbitrary, but allocate
142 * enough descriptors for 128 pending transmissions, and 4 segments
143 * per packet. This MUST work out to a power of 2.
144 *
145 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
146 *
147 * So we play a little trick here. We give each packet up to 16
148 * DMA segments, but only allocate the max of 512 descriptors. The
149 * transmit logic can deal with this, we just are hoping to sneak by.
150 */
151 #define PCN_NTXSEGS 16
152
153 #define PCN_TXQUEUELEN 128
154 #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1)
155 #define PCN_NTXDESC 512
156 #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1)
157 #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK)
158 #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK)
159
160 /* Tx interrupt every N + 1 packets. */
161 #define PCN_TXINTR_MASK 7
162
163 /*
164 * Receive descriptor list size. We have one Rx buffer per incoming
165 * packet, so this logic is a little simpler.
166 */
167 #define PCN_NRXDESC 128
168 #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1)
169 #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK)
170
171 /*
172 * Control structures are DMA'd to the PCnet chip. We allocate them in
173 * a single clump that maps to a single DMA segment to make several things
174 * easier.
175 */
176 struct pcn_control_data {
177 /* The transmit descriptors. */
178 struct letmd pcd_txdescs[PCN_NTXDESC];
179
180 /* The receive descriptors. */
181 struct lermd pcd_rxdescs[PCN_NRXDESC];
182
183 /* The init block. */
184 struct leinit pcd_initblock;
185 };
186
187 #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x)
188 #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)])
189 #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)])
190 #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock)
191
192 /*
193 * Software state for transmit jobs.
194 */
195 struct pcn_txsoft {
196 struct mbuf *txs_mbuf; /* head of our mbuf chain */
197 bus_dmamap_t txs_dmamap; /* our DMA map */
198 int txs_firstdesc; /* first descriptor in packet */
199 int txs_lastdesc; /* last descriptor in packet */
200 };
201
202 /*
203 * Software state for receive jobs.
204 */
205 struct pcn_rxsoft {
206 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
207 bus_dmamap_t rxs_dmamap; /* our DMA map */
208 };
209
210 /*
211 * Description of Rx FIFO watermarks for various revisions.
212 */
213 static const char * const pcn_79c970_rcvfw[] = {
214 "16 bytes",
215 "64 bytes",
216 "128 bytes",
217 NULL,
218 };
219
220 static const char * const pcn_79c971_rcvfw[] = {
221 "16 bytes",
222 "64 bytes",
223 "112 bytes",
224 NULL,
225 };
226
227 /*
228 * Description of Tx start points for various revisions.
229 */
230 static const char * const pcn_79c970_xmtsp[] = {
231 "8 bytes",
232 "64 bytes",
233 "128 bytes",
234 "248 bytes",
235 };
236
237 static const char * const pcn_79c971_xmtsp[] = {
238 "20 bytes",
239 "64 bytes",
240 "128 bytes",
241 "248 bytes",
242 };
243
244 static const char * const pcn_79c971_xmtsp_sram[] = {
245 "44 bytes",
246 "64 bytes",
247 "128 bytes",
248 "store-and-forward",
249 };
250
251 /*
252 * Description of Tx FIFO watermarks for various revisions.
253 */
254 static const char * const pcn_79c970_xmtfw[] = {
255 "16 bytes",
256 "64 bytes",
257 "128 bytes",
258 NULL,
259 };
260
261 static const char * const pcn_79c971_xmtfw[] = {
262 "16 bytes",
263 "64 bytes",
264 "108 bytes",
265 NULL,
266 };
267
268 /*
269 * Software state per device.
270 */
271 struct pcn_softc {
272 struct device sc_dev; /* generic device information */
273 bus_space_tag_t sc_st; /* bus space tag */
274 bus_space_handle_t sc_sh; /* bus space handle */
275 bus_dma_tag_t sc_dmat; /* bus DMA tag */
276 struct arpcom sc_arpcom; /* Ethernet common data */
277
278 /* Points to our media routines, etc. */
279 const struct pcn_variant *sc_variant;
280
281 void *sc_ih; /* interrupt cookie */
282
283 struct mii_data sc_mii; /* MII/media information */
284
285 struct timeout sc_tick_timeout; /* tick timeout */
286
287 bus_dmamap_t sc_cddmamap; /* control data DMA map */
288 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
289
290 /* Software state for transmit and receive descriptors. */
291 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN];
292 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC];
293
294 /* Control data structures */
295 struct pcn_control_data *sc_control_data;
296 #define sc_txdescs sc_control_data->pcd_txdescs
297 #define sc_rxdescs sc_control_data->pcd_rxdescs
298 #define sc_initblock sc_control_data->pcd_initblock
299
300 const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */
301 int sc_rcvfw;
302
303 const char * const *sc_xmtsp_desc; /* Tx start point info */
304 int sc_xmtsp;
305
306 const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */
307 int sc_xmtfw;
308
309 int sc_flags; /* misc. flags; see below */
310 int sc_swstyle; /* the software style in use */
311
312 int sc_txfree; /* number of free Tx descriptors */
313 int sc_txnext; /* next ready Tx descriptor */
314
315 int sc_txsfree; /* number of free Tx jobs */
316 int sc_txsnext; /* next free Tx job */
317 int sc_txsdirty; /* dirty Tx jobs */
318
319 int sc_rxptr; /* next ready Rx descriptor/job */
320
321 uint32_t sc_csr5; /* prototype CSR5 register */
322 uint32_t sc_mode; /* prototype MODE register */
323 };
324
325 /* sc_flags */
326 #define PCN_F_HAS_MII 0x0001 /* has MII */
327
328 #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x)))
329 #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x)))
330 #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF)
331
332 #define PCN_CDTXSYNC(sc, x, n, ops) \
333 do { \
334 int __x, __n; \
335 \
336 __x = (x); \
337 __n = (n); \
338 \
339 /* If it will wrap around, sync to the end of the ring. */ \
340 if ((__x + __n) > PCN_NTXDESC) { \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 PCN_CDTXOFF(__x), sizeof(struct letmd) * \
343 (PCN_NTXDESC - __x), (ops)); \
344 __n -= (PCN_NTXDESC - __x); \
345 __x = 0; \
346 } \
347 \
348 /* Now sync whatever is left. */ \
349 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
350 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \
351 } while (/*CONSTCOND*/0)
352
353 #define PCN_CDRXSYNC(sc, x, ops) \
354 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
355 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))
356
357 #define PCN_CDINITSYNC(sc, ops) \
358 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
359 PCN_CDINITOFF, sizeof(struct leinit), (ops))
360
361 #define PCN_INIT_RXDESC(sc, x) \
362 do { \
363 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
364 struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \
365 struct mbuf *__m = __rxs->rxs_mbuf; \
366 \
367 /* \
368 * Note: We scoot the packet forward 2 bytes in the buffer \
369 * so that the payload after the Ethernet header is aligned \
370 * to a 4-byte boundary. \
371 */ \
372 __m->m_data = __m->m_ext.ext_buf + 2; \
373 \
374 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \
375 __rmd->rmd2 = \
376 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
377 __rmd->rmd0 = 0; \
378 } else { \
379 __rmd->rmd2 = 0; \
380 __rmd->rmd0 = \
381 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
382 } \
383 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \
384 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \
385 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\
386 } while(/*CONSTCOND*/0)
387
388 void pcn_start(struct ifnet *);
389 void pcn_watchdog(struct ifnet *);
390 int pcn_ioctl(struct ifnet *, u_long, caddr_t);
391 int pcn_init(struct ifnet *);
392 void pcn_stop(struct ifnet *, int);
393
394 void pcn_reset(struct pcn_softc *);
395 void pcn_rxdrain(struct pcn_softc *);
396 int pcn_add_rxbuf(struct pcn_softc *, int);
397 void pcn_tick(void *);
398
399 void pcn_spnd(struct pcn_softc *);
400
401 void pcn_set_filter(struct pcn_softc *);
402
403 int pcn_intr(void *);
404 void pcn_txintr(struct pcn_softc *);
405 int pcn_rxintr(struct pcn_softc *);
406
407 int pcn_mii_readreg(struct device *, int, int);
408 void pcn_mii_writereg(struct device *, int, int, int);
409 void pcn_mii_statchg(struct device *);
410
411 void pcn_79c970_mediainit(struct pcn_softc *);
412 int pcn_79c970_mediachange(struct ifnet *);
413 void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
414
415 void pcn_79c971_mediainit(struct pcn_softc *);
416 int pcn_79c971_mediachange(struct ifnet *);
417 void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
418
419 /*
420 * Description of a PCnet-PCI variant. Used to select media access
421 * method, mostly, and to print a nice description of the chip.
422 */
423 static const struct pcn_variant {
424 const char *pcv_desc;
425 void (*pcv_mediainit)(struct pcn_softc *);
426 uint16_t pcv_chipid;
427 } pcn_variants[] = {
428 { "Am79c970",
429 pcn_79c970_mediainit,
430 PARTID_Am79c970 },
431
432 { "Am79c970A",
433 pcn_79c970_mediainit,
434 PARTID_Am79c970A },
435
436 { "Am79c971",
437 pcn_79c971_mediainit,
438 PARTID_Am79c971 },
439
440 { "Am79c972",
441 pcn_79c971_mediainit,
442 PARTID_Am79c972 },
443
444 { "Am79c973",
445 pcn_79c971_mediainit,
446 PARTID_Am79c973 },
447
448 { "Am79c975",
449 pcn_79c971_mediainit,
450 PARTID_Am79c975 },
451
452 { "Am79c976",
453 pcn_79c971_mediainit,
454 PARTID_Am79c976 },
455
456 { "Am79c978",
457 pcn_79c971_mediainit,
458 PARTID_Am79c978 },
459
460 { "Unknown",
461 pcn_79c971_mediainit,
462 0 },
463 };
464
465 int pcn_copy_small = 0;
466
467 int pcn_match(struct device *, void *, void *);
468 void pcn_attach(struct device *, struct device *, void *);
469
470 const struct cfattach pcn_ca = {
471 sizeof(struct pcn_softc), pcn_match, pcn_attach,
472 };
473
474 const struct pci_matchid pcn_devices[] = {
475 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI },
476 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCHOME_PCI }
477 };
478
479 struct cfdriver pcn_cd = {
480 NULL, "pcn", DV_IFNET
481 };
482
483 /*
484 * Routines to read and write the PCnet-PCI CSR/BCR space.
485 */
486
487 static __inline uint32_t
pcn_csr_read(struct pcn_softc * sc,int reg)488 pcn_csr_read(struct pcn_softc *sc, int reg)
489 {
490
491 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
492 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP));
493 }
494
495 static __inline void
pcn_csr_write(struct pcn_softc * sc,int reg,uint32_t val)496 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
497 {
498
499 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
500 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val);
501 }
502
503 static __inline uint32_t
pcn_bcr_read(struct pcn_softc * sc,int reg)504 pcn_bcr_read(struct pcn_softc *sc, int reg)
505 {
506
507 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
508 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP));
509 }
510
511 static __inline void
pcn_bcr_write(struct pcn_softc * sc,int reg,uint32_t val)512 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
513 {
514
515 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
516 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val);
517 }
518
519 static const struct pcn_variant *
pcn_lookup_variant(uint16_t chipid)520 pcn_lookup_variant(uint16_t chipid)
521 {
522 const struct pcn_variant *pcv;
523
524 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
525 if (chipid == pcv->pcv_chipid)
526 return (pcv);
527 }
528
529 /*
530 * This covers unknown chips, which we simply treat like
531 * a generic PCnet-FAST.
532 */
533 return (pcv);
534 }
535
536 int
pcn_match(struct device * parent,void * match,void * aux)537 pcn_match(struct device *parent, void *match, void *aux)
538 {
539 struct pci_attach_args *pa = aux;
540
541 /*
542 * IBM makes a PCI variant of this card which shows up as a
543 * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25)
544 * this card is truly a pcn card, so we have a special case match for
545 * it.
546 */
547 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TRIDENT &&
548 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TRIDENT_4DWAVE_DX &&
549 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
550 return(1);
551
552 return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices,
553 nitems(pcn_devices)));
554 }
555
556 void
pcn_attach(struct device * parent,struct device * self,void * aux)557 pcn_attach(struct device *parent, struct device *self, void *aux)
558 {
559 struct pcn_softc *sc = (struct pcn_softc *) self;
560 struct pci_attach_args *pa = aux;
561 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
562 pci_chipset_tag_t pc = pa->pa_pc;
563 pci_intr_handle_t ih;
564 const char *intrstr = NULL;
565 bus_space_tag_t iot, memt;
566 bus_space_handle_t ioh, memh;
567 bus_dma_segment_t seg;
568 int ioh_valid, memh_valid;
569 int i, rseg, error;
570 uint32_t chipid, reg;
571 uint8_t enaddr[ETHER_ADDR_LEN];
572
573 timeout_set(&sc->sc_tick_timeout, pcn_tick, sc);
574
575 /*
576 * Map the device.
577 */
578 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
579 &iot, &ioh, NULL, NULL, 0) == 0);
580 memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM,
581 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
582 &memt, &memh, NULL, NULL, 0) == 0);
583
584 if (memh_valid) {
585 sc->sc_st = memt;
586 sc->sc_sh = memh;
587 } else if (ioh_valid) {
588 sc->sc_st = iot;
589 sc->sc_sh = ioh;
590 } else {
591 printf(": unable to map device registers\n");
592 return;
593 }
594
595 sc->sc_dmat = pa->pa_dmat;
596
597 /* Get it out of power save mode, if needed. */
598 pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
599
600 /*
601 * Reset the chip to a known state. This also puts the
602 * chip into 32-bit mode.
603 */
604 pcn_reset(sc);
605
606 #if !defined(PCN_NO_PROM)
607
608 /*
609 * Read the Ethernet address from the EEPROM.
610 */
611 for (i = 0; i < ETHER_ADDR_LEN; i++)
612 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,
613 PCN32_APROM + i);
614 #else
615 /*
616 * The PROM is not used; instead we assume that the MAC address
617 * has been programmed into the device's physical address
618 * registers by the boot firmware
619 */
620
621 for (i=0; i < 3; i++) {
622 uint32_t val;
623 val = pcn_csr_read(sc, LE_CSR12 + i);
624 enaddr[2*i] = val & 0x0ff;
625 enaddr[2*i+1] = (val >> 8) & 0x0ff;
626 }
627 #endif
628
629 /*
630 * Now that the device is mapped, attempt to figure out what
631 * kind of chip we have. Note that IDL has all 32 bits of
632 * the chip ID when we're in 32-bit mode.
633 */
634 chipid = pcn_csr_read(sc, LE_CSR88);
635 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid));
636
637 /*
638 * Map and establish our interrupt.
639 */
640 if (pci_intr_map(pa, &ih)) {
641 printf(": unable to map interrupt\n");
642 return;
643 }
644 intrstr = pci_intr_string(pc, ih);
645 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc,
646 self->dv_xname);
647 if (sc->sc_ih == NULL) {
648 printf(": unable to establish interrupt");
649 if (intrstr != NULL)
650 printf(" at %s", intrstr);
651 printf("\n");
652 return;
653 }
654
655 /*
656 * Allocate the control data structures, and create and load the
657 * DMA map for it.
658 */
659 if ((error = bus_dmamem_alloc(sc->sc_dmat,
660 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
661 0)) != 0) {
662 printf(": unable to allocate control data, error = %d\n",
663 error);
664 return;
665 }
666
667 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
668 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,
669 BUS_DMA_COHERENT)) != 0) {
670 printf(": unable to map control data, error = %d\n",
671 error);
672 goto fail_1;
673 }
674
675 if ((error = bus_dmamap_create(sc->sc_dmat,
676 sizeof(struct pcn_control_data), 1,
677 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
678 printf(": unable to create control data DMA map, "
679 "error = %d\n", error);
680 goto fail_2;
681 }
682
683 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
684 sc->sc_control_data, sizeof(struct pcn_control_data), NULL,
685 0)) != 0) {
686 printf(": unable to load control data DMA map, error = %d\n",
687 error);
688 goto fail_3;
689 }
690
691 /* Create the transmit buffer DMA maps. */
692 for (i = 0; i < PCN_TXQUEUELEN; i++) {
693 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
694 PCN_NTXSEGS, MCLBYTES, 0, 0,
695 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
696 printf(": unable to create tx DMA map %d, "
697 "error = %d\n", i, error);
698 goto fail_4;
699 }
700 }
701
702 /* Create the receive buffer DMA maps. */
703 for (i = 0; i < PCN_NRXDESC; i++) {
704 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
705 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
706 printf(": unable to create rx DMA map %d, "
707 "error = %d\n", i, error);
708 goto fail_5;
709 }
710 sc->sc_rxsoft[i].rxs_mbuf = NULL;
711 }
712
713 printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc,
714 CHIPID_VER(chipid), intrstr, ether_sprintf(enaddr));
715
716 /* Initialize our media structures. */
717 (*sc->sc_variant->pcv_mediainit)(sc);
718
719 /*
720 * Initialize FIFO watermark info.
721 */
722 switch (sc->sc_variant->pcv_chipid) {
723 case PARTID_Am79c970:
724 case PARTID_Am79c970A:
725 sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
726 sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
727 sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
728 break;
729
730 default:
731 sc->sc_rcvfw_desc = pcn_79c971_rcvfw;
732 /*
733 * Read BCR25 to determine how much SRAM is
734 * on the board. If > 0, then we the chip
735 * uses different Start Point thresholds.
736 *
737 * Note BCR25 and BCR26 are loaded from the
738 * EEPROM on RST, and unaffected by S_RESET,
739 * so we don't really have to worry about
740 * them except for this.
741 */
742 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff;
743 if (reg != 0)
744 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
745 else
746 sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
747 sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
748 break;
749 }
750
751 /*
752 * Set up defaults -- see the tables above for what these
753 * values mean.
754 *
755 * XXX How should we tune RCVFW and XMTFW?
756 */
757 sc->sc_rcvfw = 1; /* minimum for full-duplex */
758 sc->sc_xmtsp = 1;
759 sc->sc_xmtfw = 0;
760
761 ifp = &sc->sc_arpcom.ac_if;
762 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
763 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
764 ifp->if_softc = sc;
765 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
766 ifp->if_ioctl = pcn_ioctl;
767 ifp->if_start = pcn_start;
768 ifp->if_watchdog = pcn_watchdog;
769 ifq_init_maxlen(&ifp->if_snd, PCN_NTXDESC -1);
770
771 /* Attach the interface. */
772 if_attach(ifp);
773 ether_ifattach(ifp);
774 return;
775
776 /*
777 * Free any resources we've allocated during the failed attach
778 * attempt. Do this in reverse order and fall through.
779 */
780 fail_5:
781 for (i = 0; i < PCN_NRXDESC; i++) {
782 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
783 bus_dmamap_destroy(sc->sc_dmat,
784 sc->sc_rxsoft[i].rxs_dmamap);
785 }
786 fail_4:
787 for (i = 0; i < PCN_TXQUEUELEN; i++) {
788 if (sc->sc_txsoft[i].txs_dmamap != NULL)
789 bus_dmamap_destroy(sc->sc_dmat,
790 sc->sc_txsoft[i].txs_dmamap);
791 }
792 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
793 fail_3:
794 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
795 fail_2:
796 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
797 sizeof(struct pcn_control_data));
798 fail_1:
799 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
800 }
801
802 /*
803 * pcn_start: [ifnet interface function]
804 *
805 * Start packet transmission on the interface.
806 */
807 void
pcn_start(struct ifnet * ifp)808 pcn_start(struct ifnet *ifp)
809 {
810 struct pcn_softc *sc = ifp->if_softc;
811 struct mbuf *m0;
812 struct pcn_txsoft *txs;
813 bus_dmamap_t dmamap;
814 int nexttx, lasttx = -1, ofree, seg;
815
816 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
817 return;
818
819 /*
820 * Remember the previous number of free descriptors and
821 * the first descriptor we'll use.
822 */
823 ofree = sc->sc_txfree;
824
825 /*
826 * Loop through the send queue, setting up transmit descriptors
827 * until we drain the queue, or use up all available transmit
828 * descriptors.
829 */
830 for (;;) {
831 if (sc->sc_txsfree == 0 ||
832 sc->sc_txfree < (PCN_NTXSEGS + 1)) {
833 ifq_set_oactive(&ifp->if_snd);
834 break;
835 }
836
837 /* Grab a packet off the queue. */
838 m0 = ifq_dequeue(&ifp->if_snd);
839 if (m0 == NULL)
840 break;
841
842 txs = &sc->sc_txsoft[sc->sc_txsnext];
843 dmamap = txs->txs_dmamap;
844
845 switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
846 BUS_DMA_NOWAIT)) {
847 case 0:
848 break;
849 case EFBIG:
850 if (m_defrag(m0, M_DONTWAIT) == 0 &&
851 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
852 BUS_DMA_NOWAIT) == 0)
853 break;
854
855 /* FALLTHROUGH */
856 default:
857 m_freem(m0);
858 continue;
859 }
860
861 /*
862 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
863 */
864
865 /* Sync the DMA map. */
866 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
867 BUS_DMASYNC_PREWRITE);
868
869 /*
870 * Initialize the transmit descriptors.
871 */
872 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {
873 for (nexttx = sc->sc_txnext, seg = 0;
874 seg < dmamap->dm_nsegs;
875 seg++, nexttx = PCN_NEXTTX(nexttx)) {
876 /*
877 * If this is the first descriptor we're
878 * enqueueing, don't set the OWN bit just
879 * yet. That could cause a race condition.
880 * We'll do it below.
881 */
882 sc->sc_txdescs[nexttx].tmd0 = 0;
883 sc->sc_txdescs[nexttx].tmd2 =
884 htole32(dmamap->dm_segs[seg].ds_addr);
885 sc->sc_txdescs[nexttx].tmd1 =
886 htole32(LE_T1_ONES |
887 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
888 (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
889 LE_T1_BCNT_MASK));
890 lasttx = nexttx;
891 }
892 } else {
893 for (nexttx = sc->sc_txnext, seg = 0;
894 seg < dmamap->dm_nsegs;
895 seg++, nexttx = PCN_NEXTTX(nexttx)) {
896 /*
897 * If this is the first descriptor we're
898 * enqueueing, don't set the OWN bit just
899 * yet. That could cause a race condition.
900 * We'll do it below.
901 */
902 sc->sc_txdescs[nexttx].tmd0 =
903 htole32(dmamap->dm_segs[seg].ds_addr);
904 sc->sc_txdescs[nexttx].tmd2 = 0;
905 sc->sc_txdescs[nexttx].tmd1 =
906 htole32(LE_T1_ONES |
907 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
908 (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
909 LE_T1_BCNT_MASK));
910 lasttx = nexttx;
911 }
912 }
913
914 KASSERT(lasttx != -1);
915 /* Interrupt on the packet, if appropriate. */
916 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0)
917 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT);
918
919 /* Set `start of packet' and `end of packet' appropriately. */
920 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP);
921 sc->sc_txdescs[sc->sc_txnext].tmd1 |=
922 htole32(LE_T1_OWN|LE_T1_STP);
923
924 /* Sync the descriptors we're using. */
925 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
926 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
927
928 /* Kick the transmitter. */
929 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD);
930
931 /*
932 * Store a pointer to the packet so we can free it later,
933 * and remember what txdirty will be once the packet is
934 * done.
935 */
936 txs->txs_mbuf = m0;
937 txs->txs_firstdesc = sc->sc_txnext;
938 txs->txs_lastdesc = lasttx;
939
940 /* Advance the tx pointer. */
941 sc->sc_txfree -= dmamap->dm_nsegs;
942 sc->sc_txnext = nexttx;
943
944 sc->sc_txsfree--;
945 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext);
946
947 #if NBPFILTER > 0
948 /* Pass the packet to any BPF listeners. */
949 if (ifp->if_bpf)
950 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
951 #endif /* NBPFILTER > 0 */
952 }
953
954 if (sc->sc_txfree != ofree) {
955 /* Set a watchdog timer in case the chip flakes out. */
956 ifp->if_timer = 5;
957 }
958 }
959
960 /*
961 * pcn_watchdog: [ifnet interface function]
962 *
963 * Watchdog timer handler.
964 */
965 void
pcn_watchdog(struct ifnet * ifp)966 pcn_watchdog(struct ifnet *ifp)
967 {
968 struct pcn_softc *sc = ifp->if_softc;
969
970 /*
971 * Since we're not interrupting every packet, sweep
972 * up before we report an error.
973 */
974 pcn_txintr(sc);
975
976 if (sc->sc_txfree != PCN_NTXDESC) {
977 printf("%s: device timeout (txfree %d txsfree %d)\n",
978 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
979 ifp->if_oerrors++;
980
981 /* Reset the interface. */
982 (void) pcn_init(ifp);
983 }
984
985 /* Try to get more packets going. */
986 pcn_start(ifp);
987 }
988
989 /*
990 * pcn_ioctl: [ifnet interface function]
991 *
992 * Handle control requests from the operator.
993 */
994 int
pcn_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)995 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
996 {
997 struct pcn_softc *sc = ifp->if_softc;
998 struct ifreq *ifr = (struct ifreq *) data;
999 int s, error = 0;
1000
1001 s = splnet();
1002
1003 switch (cmd) {
1004 case SIOCSIFADDR:
1005 ifp->if_flags |= IFF_UP;
1006 if (!(ifp->if_flags & IFF_RUNNING))
1007 pcn_init(ifp);
1008 break;
1009
1010 case SIOCSIFFLAGS:
1011 if (ifp->if_flags & IFF_UP) {
1012 if (ifp->if_flags & IFF_RUNNING)
1013 error = ENETRESET;
1014 else
1015 pcn_init(ifp);
1016 } else {
1017 if (ifp->if_flags & IFF_RUNNING)
1018 pcn_stop(ifp, 1);
1019 }
1020 break;
1021
1022 case SIOCSIFMEDIA:
1023 case SIOCGIFMEDIA:
1024 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1025 break;
1026
1027 default:
1028 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1029 }
1030
1031 if (error == ENETRESET) {
1032 if (ifp->if_flags & IFF_RUNNING)
1033 error = pcn_init(ifp);
1034 else
1035 error = 0;
1036 }
1037
1038 splx(s);
1039 return (error);
1040 }
1041
1042 /*
1043 * pcn_intr:
1044 *
1045 * Interrupt service routine.
1046 */
1047 int
pcn_intr(void * arg)1048 pcn_intr(void *arg)
1049 {
1050 struct pcn_softc *sc = arg;
1051 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1052 uint32_t csr0;
1053 int wantinit, handled = 0;
1054
1055 for (wantinit = 0; wantinit == 0;) {
1056 csr0 = pcn_csr_read(sc, LE_CSR0);
1057 if ((csr0 & LE_C0_INTR) == 0)
1058 break;
1059
1060 /* ACK the bits and re-enable interrupts. */
1061 pcn_csr_write(sc, LE_CSR0, csr0 &
1062 (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT|
1063 LE_C0_TINT|LE_C0_IDON));
1064
1065 handled = 1;
1066
1067 if (csr0 & LE_C0_RINT)
1068 wantinit = pcn_rxintr(sc);
1069
1070 if (csr0 & LE_C0_TINT)
1071 pcn_txintr(sc);
1072
1073 if (csr0 & LE_C0_ERR) {
1074 if (csr0 & LE_C0_BABL)
1075 ifp->if_oerrors++;
1076 if (csr0 & LE_C0_MISS)
1077 ifp->if_ierrors++;
1078 if (csr0 & LE_C0_MERR) {
1079 printf("%s: memory error\n",
1080 sc->sc_dev.dv_xname);
1081 wantinit = 1;
1082 break;
1083 }
1084 }
1085
1086 if ((csr0 & LE_C0_RXON) == 0) {
1087 printf("%s: receiver disabled\n",
1088 sc->sc_dev.dv_xname);
1089 ifp->if_ierrors++;
1090 wantinit = 1;
1091 }
1092
1093 if ((csr0 & LE_C0_TXON) == 0) {
1094 printf("%s: transmitter disabled\n",
1095 sc->sc_dev.dv_xname);
1096 ifp->if_oerrors++;
1097 wantinit = 1;
1098 }
1099 }
1100
1101 if (handled) {
1102 if (wantinit)
1103 pcn_init(ifp);
1104
1105 /* Try to get more packets going. */
1106 pcn_start(ifp);
1107 }
1108
1109 return (handled);
1110 }
1111
1112 /*
1113 * pcn_spnd:
1114 *
1115 * Suspend the chip.
1116 */
1117 void
pcn_spnd(struct pcn_softc * sc)1118 pcn_spnd(struct pcn_softc *sc)
1119 {
1120 int i;
1121
1122 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND);
1123
1124 for (i = 0; i < 10000; i++) {
1125 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND)
1126 return;
1127 delay(5);
1128 }
1129
1130 printf("%s: WARNING: chip failed to enter suspended state\n",
1131 sc->sc_dev.dv_xname);
1132 }
1133
1134 /*
1135 * pcn_txintr:
1136 *
1137 * Helper; handle transmit interrupts.
1138 */
1139 void
pcn_txintr(struct pcn_softc * sc)1140 pcn_txintr(struct pcn_softc *sc)
1141 {
1142 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1143 struct pcn_txsoft *txs;
1144 uint32_t tmd1, tmd2, tmd;
1145 int i, j;
1146
1147 /*
1148 * Go through our Tx list and free mbufs for those
1149 * frames which have been transmitted.
1150 */
1151 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN;
1152 i = PCN_NEXTTXS(i), sc->sc_txsfree++) {
1153 txs = &sc->sc_txsoft[i];
1154
1155 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1156 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1157
1158 tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1);
1159 if (tmd1 & LE_T1_OWN)
1160 break;
1161
1162 /*
1163 * Slightly annoying -- we have to loop through the
1164 * descriptors we've used looking for ERR, since it
1165 * can appear on any descriptor in the chain.
1166 */
1167 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) {
1168 tmd = letoh32(sc->sc_txdescs[j].tmd1);
1169 if (tmd & LE_T1_ERR) {
1170 ifp->if_oerrors++;
1171 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1172 tmd2 = letoh32(sc->sc_txdescs[j].tmd0);
1173 else
1174 tmd2 = letoh32(sc->sc_txdescs[j].tmd2);
1175 if (tmd2 & LE_T2_UFLO) {
1176 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) {
1177 sc->sc_xmtsp++;
1178 printf("%s: transmit "
1179 "underrun; new threshold: "
1180 "%s\n",
1181 sc->sc_dev.dv_xname,
1182 sc->sc_xmtsp_desc[
1183 sc->sc_xmtsp]);
1184 pcn_spnd(sc);
1185 pcn_csr_write(sc, LE_CSR80,
1186 LE_C80_RCVFW(sc->sc_rcvfw) |
1187 LE_C80_XMTSP(sc->sc_xmtsp) |
1188 LE_C80_XMTFW(sc->sc_xmtfw));
1189 pcn_csr_write(sc, LE_CSR5,
1190 sc->sc_csr5);
1191 } else {
1192 printf("%s: transmit "
1193 "underrun\n",
1194 sc->sc_dev.dv_xname);
1195 }
1196 } else if (tmd2 & LE_T2_BUFF) {
1197 printf("%s: transmit buffer error\n",
1198 sc->sc_dev.dv_xname);
1199 }
1200 if (tmd2 & LE_T2_LCOL)
1201 ifp->if_collisions++;
1202 if (tmd2 & LE_T2_RTRY)
1203 ifp->if_collisions += 16;
1204 goto next_packet;
1205 }
1206 if (j == txs->txs_lastdesc)
1207 break;
1208 }
1209 if (tmd1 & LE_T1_ONE)
1210 ifp->if_collisions++;
1211 else if (tmd & LE_T1_MORE) {
1212 /* Real number is unknown. */
1213 ifp->if_collisions += 2;
1214 }
1215 next_packet:
1216 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1217 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1218 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1219 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1220 m_freem(txs->txs_mbuf);
1221 txs->txs_mbuf = NULL;
1222 }
1223
1224 /* Update the dirty transmit buffer pointer. */
1225 sc->sc_txsdirty = i;
1226
1227 /*
1228 * If there are no more pending transmissions, cancel the watchdog
1229 * timer.
1230 */
1231 if (sc->sc_txsfree == PCN_TXQUEUELEN)
1232 ifp->if_timer = 0;
1233
1234 if (ifq_is_oactive(&ifp->if_snd))
1235 ifq_restart(&ifp->if_snd);
1236 }
1237
1238 /*
1239 * pcn_rxintr:
1240 *
1241 * Helper; handle receive interrupts.
1242 */
1243 int
pcn_rxintr(struct pcn_softc * sc)1244 pcn_rxintr(struct pcn_softc *sc)
1245 {
1246 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1247 struct pcn_rxsoft *rxs;
1248 struct mbuf *m;
1249 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1250 uint32_t rmd1;
1251 int i, len;
1252 int rv = 0;
1253
1254 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) {
1255 rxs = &sc->sc_rxsoft[i];
1256
1257 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1258
1259 rmd1 = letoh32(sc->sc_rxdescs[i].rmd1);
1260
1261 if (rmd1 & LE_R1_OWN)
1262 break;
1263
1264 /*
1265 * Check for errors and make sure the packet fit into
1266 * a single buffer. We have structured this block of
1267 * code the way it is in order to compress it into
1268 * one test in the common case (no error).
1269 */
1270 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=
1271 (LE_R1_STP|LE_R1_ENP))) {
1272 /* Make sure the packet is in a single buffer. */
1273 if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) !=
1274 (LE_R1_STP|LE_R1_ENP)) {
1275 printf("%s: packet spilled into next buffer\n",
1276 sc->sc_dev.dv_xname);
1277 rv = 1; /* pcn_intr() will re-init */
1278 goto done;
1279 }
1280
1281 /*
1282 * If the packet had an error, simple recycle the
1283 * buffer.
1284 */
1285 if (rmd1 & LE_R1_ERR) {
1286 ifp->if_ierrors++;
1287 /*
1288 * If we got an overflow error, chances
1289 * are there will be a CRC error. In
1290 * this case, just print the overflow
1291 * error, and skip the others.
1292 */
1293 if (rmd1 & LE_R1_OFLO)
1294 printf("%s: overflow error\n",
1295 sc->sc_dev.dv_xname);
1296 else {
1297 #define PRINTIT(x, str) \
1298 if (rmd1 & (x)) \
1299 printf("%s: %s\n", \
1300 sc->sc_dev.dv_xname, str);
1301 PRINTIT(LE_R1_FRAM, "framing error");
1302 PRINTIT(LE_R1_CRC, "CRC error");
1303 PRINTIT(LE_R1_BUFF, "buffer error");
1304 }
1305 #undef PRINTIT
1306 PCN_INIT_RXDESC(sc, i);
1307 continue;
1308 }
1309 }
1310
1311 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1312 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1313
1314 /*
1315 * No errors; receive the packet.
1316 */
1317 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1318 len = letoh32(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK;
1319 else
1320 len = letoh32(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK;
1321
1322 /*
1323 * The LANCE family includes the CRC with every packet;
1324 * trim it off here.
1325 */
1326 len -= ETHER_CRC_LEN;
1327
1328 /*
1329 * If the packet is small enough to fit in a
1330 * single header mbuf, allocate one and copy
1331 * the data into it. This greatly reduces
1332 * memory consumption when we receive lots
1333 * of small packets.
1334 *
1335 * Otherwise, we add a new buffer to the receive
1336 * chain. If this fails, we drop the packet and
1337 * recycle the old buffer.
1338 */
1339 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) {
1340 MGETHDR(m, M_DONTWAIT, MT_DATA);
1341 if (m == NULL)
1342 goto dropit;
1343 m->m_data += 2;
1344 memcpy(mtod(m, caddr_t),
1345 mtod(rxs->rxs_mbuf, caddr_t), len);
1346 PCN_INIT_RXDESC(sc, i);
1347 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1348 rxs->rxs_dmamap->dm_mapsize,
1349 BUS_DMASYNC_PREREAD);
1350 } else {
1351 m = rxs->rxs_mbuf;
1352 if (pcn_add_rxbuf(sc, i) != 0) {
1353 dropit:
1354 ifp->if_ierrors++;
1355 PCN_INIT_RXDESC(sc, i);
1356 bus_dmamap_sync(sc->sc_dmat,
1357 rxs->rxs_dmamap, 0,
1358 rxs->rxs_dmamap->dm_mapsize,
1359 BUS_DMASYNC_PREREAD);
1360 continue;
1361 }
1362 }
1363
1364 m->m_pkthdr.len = m->m_len = len;
1365
1366 ml_enqueue(&ml, m);
1367 }
1368
1369 /* Update the receive pointer. */
1370 sc->sc_rxptr = i;
1371 done:
1372 if_input(ifp, &ml);
1373 return (rv);
1374 }
1375
1376 /*
1377 * pcn_tick:
1378 *
1379 * One second timer, used to tick the MII.
1380 */
1381 void
pcn_tick(void * arg)1382 pcn_tick(void *arg)
1383 {
1384 struct pcn_softc *sc = arg;
1385 int s;
1386
1387 s = splnet();
1388 mii_tick(&sc->sc_mii);
1389 splx(s);
1390
1391 timeout_add_sec(&sc->sc_tick_timeout, 1);
1392 }
1393
1394 /*
1395 * pcn_reset:
1396 *
1397 * Perform a soft reset on the PCnet-PCI.
1398 */
1399 void
pcn_reset(struct pcn_softc * sc)1400 pcn_reset(struct pcn_softc *sc)
1401 {
1402
1403 /*
1404 * The PCnet-PCI chip is reset by reading from the
1405 * RESET register. Note that while the NE2100 LANCE
1406 * boards require a write after the read, the PCnet-PCI
1407 * chips do not require this.
1408 *
1409 * Since we don't know if we're in 16-bit or 32-bit
1410 * mode right now, issue both (it's safe) in the
1411 * hopes that one will succeed.
1412 */
1413 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET);
1414 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET);
1415
1416 /* Wait 1ms for it to finish. */
1417 delay(1000);
1418
1419 /*
1420 * Select 32-bit I/O mode by issuing a 32-bit write to the
1421 * RDP. Since the RAP is 0 after a reset, writing a 0
1422 * to RDP is safe (since it simply clears CSR0).
1423 */
1424 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0);
1425 }
1426
1427 /*
1428 * pcn_init: [ifnet interface function]
1429 *
1430 * Initialize the interface. Must be called at splnet().
1431 */
1432 int
pcn_init(struct ifnet * ifp)1433 pcn_init(struct ifnet *ifp)
1434 {
1435 struct pcn_softc *sc = ifp->if_softc;
1436 struct pcn_rxsoft *rxs;
1437 uint8_t *enaddr = LLADDR(ifp->if_sadl);
1438 int i, error = 0;
1439 uint32_t reg;
1440
1441 /* Cancel any pending I/O. */
1442 pcn_stop(ifp, 0);
1443
1444 /* Reset the chip to a known state. */
1445 pcn_reset(sc);
1446
1447 /*
1448 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
1449 * else.
1450 *
1451 * XXX It'd be really nice to use SSTYLE 2 on all the chips,
1452 * because the structure layout is compatible with ILACC,
1453 * but the burst mode is only available in SSTYLE 3, and
1454 * burst mode should provide some performance enhancement.
1455 */
1456 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970)
1457 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2;
1458 else
1459 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3;
1460 pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle);
1461
1462 /* Initialize the transmit descriptor ring. */
1463 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1464 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,
1465 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1466 sc->sc_txfree = PCN_NTXDESC;
1467 sc->sc_txnext = 0;
1468
1469 /* Initialize the transmit job descriptors. */
1470 for (i = 0; i < PCN_TXQUEUELEN; i++)
1471 sc->sc_txsoft[i].txs_mbuf = NULL;
1472 sc->sc_txsfree = PCN_TXQUEUELEN;
1473 sc->sc_txsnext = 0;
1474 sc->sc_txsdirty = 0;
1475
1476 /*
1477 * Initialize the receive descriptor and receive job
1478 * descriptor rings.
1479 */
1480 for (i = 0; i < PCN_NRXDESC; i++) {
1481 rxs = &sc->sc_rxsoft[i];
1482 if (rxs->rxs_mbuf == NULL) {
1483 if ((error = pcn_add_rxbuf(sc, i)) != 0) {
1484 printf("%s: unable to allocate or map rx "
1485 "buffer %d, error = %d\n",
1486 sc->sc_dev.dv_xname, i, error);
1487 /*
1488 * XXX Should attempt to run with fewer receive
1489 * XXX buffers instead of just failing.
1490 */
1491 pcn_rxdrain(sc);
1492 goto out;
1493 }
1494 } else
1495 PCN_INIT_RXDESC(sc, i);
1496 }
1497 sc->sc_rxptr = 0;
1498
1499 /* Initialize MODE for the initialization block. */
1500 sc->sc_mode = 0;
1501
1502 /*
1503 * If we have MII, simply select MII in the MODE register,
1504 * and clear ASEL. Otherwise, let ASEL stand (for now),
1505 * and leave PORTSEL alone (it is ignored with ASEL is set).
1506 */
1507 if (sc->sc_flags & PCN_F_HAS_MII) {
1508 pcn_bcr_write(sc, LE_BCR2,
1509 pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL);
1510 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII);
1511
1512 /*
1513 * Disable MII auto-negotiation. We handle that in
1514 * our own MII layer.
1515 */
1516 pcn_bcr_write(sc, LE_BCR32,
1517 pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS);
1518 }
1519
1520 /* Set the multicast filter in the init block. */
1521 pcn_set_filter(sc);
1522
1523 /*
1524 * Set the Tx and Rx descriptor ring addresses in the init
1525 * block, the TLEN and RLEN other fields of the init block
1526 * MODE register.
1527 */
1528 sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0));
1529 sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0));
1530 sc->sc_initblock.init_mode = htole32(sc->sc_mode |
1531 ((ffs(PCN_NTXDESC) - 1) << 28) |
1532 ((ffs(PCN_NRXDESC) - 1) << 20));
1533
1534 /* Set the station address in the init block. */
1535 sc->sc_initblock.init_padr[0] = htole32(enaddr[0] |
1536 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24));
1537 sc->sc_initblock.init_padr[1] = htole32(enaddr[4] |
1538 (enaddr[5] << 8));
1539
1540 /* Initialize CSR3. */
1541 pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO);
1542
1543 /* Initialize CSR4. */
1544 pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT|
1545 LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM);
1546
1547 /* Initialize CSR5. */
1548 sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE;
1549 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5);
1550
1551 /*
1552 * If we have an Am79c971 or greater, initialize CSR7.
1553 *
1554 * XXX Might be nice to use the MII auto-poll interrupt someday.
1555 */
1556 switch (sc->sc_variant->pcv_chipid) {
1557 case PARTID_Am79c970:
1558 case PARTID_Am79c970A:
1559 /* Not available on these chips. */
1560 break;
1561
1562 default:
1563 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE);
1564 break;
1565 }
1566
1567 /*
1568 * On the Am79c970A and greater, initialize BCR18 to
1569 * enable burst mode.
1570 *
1571 * Also enable the "no underflow" option on the Am79c971 and
1572 * higher, which prevents the chip from generating transmit
1573 * underflows, yet sill provides decent performance. Note if
1574 * chip is not connected to external SRAM, then we still have
1575 * to handle underflow errors (the NOUFLO bit is ignored in
1576 * that case).
1577 */
1578 reg = pcn_bcr_read(sc, LE_BCR18);
1579 switch (sc->sc_variant->pcv_chipid) {
1580 case PARTID_Am79c970:
1581 break;
1582
1583 case PARTID_Am79c970A:
1584 reg |= LE_B18_BREADE|LE_B18_BWRITE;
1585 break;
1586
1587 default:
1588 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO;
1589 break;
1590 }
1591 pcn_bcr_write(sc, LE_BCR18, reg);
1592
1593 /*
1594 * Initialize CSR80 (FIFO thresholds for Tx and Rx).
1595 */
1596 pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) |
1597 LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw));
1598
1599 /*
1600 * Send the init block to the chip, and wait for it
1601 * to be processed.
1602 */
1603 PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE);
1604 pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff);
1605 pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff);
1606 pcn_csr_write(sc, LE_CSR0, LE_C0_INIT);
1607 delay(100);
1608 for (i = 0; i < 10000; i++) {
1609 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON)
1610 break;
1611 delay(10);
1612 }
1613 PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE);
1614 if (i == 10000) {
1615 printf("%s: timeout processing init block\n",
1616 sc->sc_dev.dv_xname);
1617 error = EIO;
1618 goto out;
1619 }
1620
1621 /* Set the media. */
1622 (void) (*sc->sc_mii.mii_media.ifm_change_cb)(ifp);
1623
1624 /* Enable interrupts and external activity (and ACK IDON). */
1625 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON);
1626
1627 if (sc->sc_flags & PCN_F_HAS_MII) {
1628 /* Start the one second MII clock. */
1629 timeout_add_sec(&sc->sc_tick_timeout, 1);
1630 }
1631
1632 /* ...all done! */
1633 ifp->if_flags |= IFF_RUNNING;
1634 ifq_clr_oactive(&ifp->if_snd);
1635
1636 out:
1637 if (error)
1638 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1639 return (error);
1640 }
1641
1642 /*
1643 * pcn_rxdrain:
1644 *
1645 * Drain the receive queue.
1646 */
1647 void
pcn_rxdrain(struct pcn_softc * sc)1648 pcn_rxdrain(struct pcn_softc *sc)
1649 {
1650 struct pcn_rxsoft *rxs;
1651 int i;
1652
1653 for (i = 0; i < PCN_NRXDESC; i++) {
1654 rxs = &sc->sc_rxsoft[i];
1655 if (rxs->rxs_mbuf != NULL) {
1656 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1657 m_freem(rxs->rxs_mbuf);
1658 rxs->rxs_mbuf = NULL;
1659 }
1660 }
1661 }
1662
1663 /*
1664 * pcn_stop: [ifnet interface function]
1665 *
1666 * Stop transmission on the interface.
1667 */
1668 void
pcn_stop(struct ifnet * ifp,int disable)1669 pcn_stop(struct ifnet *ifp, int disable)
1670 {
1671 struct pcn_softc *sc = ifp->if_softc;
1672 struct pcn_txsoft *txs;
1673 int i;
1674
1675 if (sc->sc_flags & PCN_F_HAS_MII) {
1676 /* Stop the one second clock. */
1677 timeout_del(&sc->sc_tick_timeout);
1678
1679 /* Down the MII. */
1680 mii_down(&sc->sc_mii);
1681 }
1682
1683 /* Mark the interface as down and cancel the watchdog timer. */
1684 ifp->if_flags &= ~IFF_RUNNING;
1685 ifq_clr_oactive(&ifp->if_snd);
1686 ifp->if_timer = 0;
1687
1688 /* Stop the chip. */
1689 pcn_csr_write(sc, LE_CSR0, LE_C0_STOP);
1690
1691 /* Release any queued transmit buffers. */
1692 for (i = 0; i < PCN_TXQUEUELEN; i++) {
1693 txs = &sc->sc_txsoft[i];
1694 if (txs->txs_mbuf != NULL) {
1695 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1696 m_freem(txs->txs_mbuf);
1697 txs->txs_mbuf = NULL;
1698 }
1699 }
1700
1701 if (disable)
1702 pcn_rxdrain(sc);
1703 }
1704
1705 /*
1706 * pcn_add_rxbuf:
1707 *
1708 * Add a receive buffer to the indicated descriptor.
1709 */
1710 int
pcn_add_rxbuf(struct pcn_softc * sc,int idx)1711 pcn_add_rxbuf(struct pcn_softc *sc, int idx)
1712 {
1713 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
1714 struct mbuf *m;
1715 int error;
1716
1717 MGETHDR(m, M_DONTWAIT, MT_DATA);
1718 if (m == NULL)
1719 return (ENOBUFS);
1720
1721 MCLGET(m, M_DONTWAIT);
1722 if ((m->m_flags & M_EXT) == 0) {
1723 m_freem(m);
1724 return (ENOBUFS);
1725 }
1726
1727 if (rxs->rxs_mbuf != NULL)
1728 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1729
1730 rxs->rxs_mbuf = m;
1731
1732 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1733 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1734 BUS_DMA_READ|BUS_DMA_NOWAIT);
1735 if (error) {
1736 printf("%s: can't load rx DMA map %d, error = %d\n",
1737 sc->sc_dev.dv_xname, idx, error);
1738 panic("pcn_add_rxbuf");
1739 }
1740
1741 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1742 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1743
1744 PCN_INIT_RXDESC(sc, idx);
1745
1746 return (0);
1747 }
1748
1749 /*
1750 * pcn_set_filter:
1751 *
1752 * Set up the receive filter.
1753 */
1754 void
pcn_set_filter(struct pcn_softc * sc)1755 pcn_set_filter(struct pcn_softc *sc)
1756 {
1757 struct arpcom *ac = &sc->sc_arpcom;
1758 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1759 struct ether_multi *enm;
1760 struct ether_multistep step;
1761 uint32_t crc;
1762
1763 ifp->if_flags &= ~IFF_ALLMULTI;
1764
1765 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1766 ifp->if_flags |= IFF_ALLMULTI;
1767 if (ifp->if_flags & IFF_PROMISC)
1768 sc->sc_mode |= LE_C15_PROM;
1769 sc->sc_initblock.init_ladrf[0] =
1770 sc->sc_initblock.init_ladrf[1] =
1771 sc->sc_initblock.init_ladrf[2] =
1772 sc->sc_initblock.init_ladrf[3] = 0xffff;
1773 } else {
1774 sc->sc_initblock.init_ladrf[0] =
1775 sc->sc_initblock.init_ladrf[1] =
1776 sc->sc_initblock.init_ladrf[2] =
1777 sc->sc_initblock.init_ladrf[3] = 0;
1778
1779 /*
1780 * Set up the multicast address filter by passing all multicast
1781 * addresses through a CRC generator, and then using the high
1782 * order 6 bits as an index into the 64-bit logical address
1783 * filter. The high order bits select the word, while the rest
1784 * of the bits select the bit within the word.
1785 */
1786 ETHER_FIRST_MULTI(step, ac, enm);
1787 while (enm != NULL) {
1788 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1789
1790 /* Just want the 6 most significant bits. */
1791 crc >>= 26;
1792
1793 /* Set the corresponding bit in the filter. */
1794 sc->sc_initblock.init_ladrf[crc >> 4] |=
1795 htole16(1 << (crc & 0xf));
1796
1797 ETHER_NEXT_MULTI(step, enm);
1798 }
1799 }
1800 }
1801
1802 /*
1803 * pcn_79c970_mediainit:
1804 *
1805 * Initialize media for the Am79c970.
1806 */
1807 void
pcn_79c970_mediainit(struct pcn_softc * sc)1808 pcn_79c970_mediainit(struct pcn_softc *sc)
1809 {
1810 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange,
1811 pcn_79c970_mediastatus);
1812
1813 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5,
1814 PORTSEL_AUI, NULL);
1815 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1816 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5|IFM_FDX,
1817 PORTSEL_AUI, NULL);
1818
1819 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T,
1820 PORTSEL_10T, NULL);
1821 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1822 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T|IFM_FDX,
1823 PORTSEL_10T, NULL);
1824
1825 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO,
1826 0, NULL);
1827 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1828 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO|IFM_FDX,
1829 0, NULL);
1830
1831 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1832 }
1833
1834 /*
1835 * pcn_79c970_mediastatus: [ifmedia interface function]
1836 *
1837 * Get the current interface media status (Am79c970 version).
1838 */
1839 void
pcn_79c970_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1840 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1841 {
1842 struct pcn_softc *sc = ifp->if_softc;
1843
1844 /*
1845 * The currently selected media is always the active media.
1846 * Note: We have no way to determine what media the AUTO
1847 * process picked.
1848 */
1849 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
1850 }
1851
1852 /*
1853 * pcn_79c970_mediachange: [ifmedia interface function]
1854 *
1855 * Set hardware to newly-selected media (Am79c970 version).
1856 */
1857 int
pcn_79c970_mediachange(struct ifnet * ifp)1858 pcn_79c970_mediachange(struct ifnet *ifp)
1859 {
1860 struct pcn_softc *sc = ifp->if_softc;
1861 uint32_t reg;
1862
1863 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) {
1864 /*
1865 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL.
1866 */
1867 reg = pcn_bcr_read(sc, LE_BCR2);
1868 reg |= LE_B2_ASEL;
1869 pcn_bcr_write(sc, LE_BCR2, reg);
1870 } else {
1871 /*
1872 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
1873 */
1874 reg = pcn_bcr_read(sc, LE_BCR2);
1875 reg &= ~LE_B2_ASEL;
1876 pcn_bcr_write(sc, LE_BCR2, reg);
1877
1878 reg = pcn_csr_read(sc, LE_CSR15);
1879 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) |
1880 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data);
1881 pcn_csr_write(sc, LE_CSR15, reg);
1882 }
1883
1884 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) {
1885 reg = LE_B9_FDEN;
1886 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5)
1887 reg |= LE_B9_AUIFD;
1888 pcn_bcr_write(sc, LE_BCR9, reg);
1889 } else
1890 pcn_bcr_write(sc, LE_BCR9, 0);
1891
1892 return (0);
1893 }
1894
1895 /*
1896 * pcn_79c971_mediainit:
1897 *
1898 * Initialize media for the Am79c971.
1899 */
1900 void
pcn_79c971_mediainit(struct pcn_softc * sc)1901 pcn_79c971_mediainit(struct pcn_softc *sc)
1902 {
1903 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1904
1905 /* We have MII. */
1906 sc->sc_flags |= PCN_F_HAS_MII;
1907
1908 /*
1909 * The built-in 10BASE-T interface is mapped to the MII
1910 * on the PCNet-FAST. Unfortunately, there's no EEPROM
1911 * word that tells us which PHY to use.
1912 * This driver used to ignore all but the first PHY to
1913 * answer, but this code was removed to support multiple
1914 * external PHYs. As the default instance will be the first
1915 * one to answer, no harm is done by letting the possibly
1916 * non-connected internal PHY show up.
1917 */
1918
1919 /* Initialize our media structures and probe the MII. */
1920 sc->sc_mii.mii_ifp = ifp;
1921 sc->sc_mii.mii_readreg = pcn_mii_readreg;
1922 sc->sc_mii.mii_writereg = pcn_mii_writereg;
1923 sc->sc_mii.mii_statchg = pcn_mii_statchg;
1924 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
1925 pcn_79c971_mediastatus);
1926
1927 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1928 MII_OFFSET_ANY, 0);
1929 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1930 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1931 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1932 } else
1933 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1934 }
1935
1936 /*
1937 * pcn_79c971_mediastatus: [ifmedia interface function]
1938 *
1939 * Get the current interface media status (Am79c971 version).
1940 */
1941 void
pcn_79c971_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1942 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1943 {
1944 struct pcn_softc *sc = ifp->if_softc;
1945
1946 mii_pollstat(&sc->sc_mii);
1947 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1948 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1949 }
1950
1951 /*
1952 * pcn_79c971_mediachange: [ifmedia interface function]
1953 *
1954 * Set hardware to newly-selected media (Am79c971 version).
1955 */
1956 int
pcn_79c971_mediachange(struct ifnet * ifp)1957 pcn_79c971_mediachange(struct ifnet *ifp)
1958 {
1959 struct pcn_softc *sc = ifp->if_softc;
1960
1961 if (ifp->if_flags & IFF_UP)
1962 mii_mediachg(&sc->sc_mii);
1963 return (0);
1964 }
1965
1966 /*
1967 * pcn_mii_readreg: [mii interface function]
1968 *
1969 * Read a PHY register on the MII.
1970 */
1971 int
pcn_mii_readreg(struct device * self,int phy,int reg)1972 pcn_mii_readreg(struct device *self, int phy, int reg)
1973 {
1974 struct pcn_softc *sc = (void *) self;
1975 uint32_t rv;
1976
1977 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
1978 rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD;
1979 if (rv == 0xffff)
1980 return (0);
1981
1982 return (rv);
1983 }
1984
1985 /*
1986 * pcn_mii_writereg: [mii interface function]
1987 *
1988 * Write a PHY register on the MII.
1989 */
1990 void
pcn_mii_writereg(struct device * self,int phy,int reg,int val)1991 pcn_mii_writereg(struct device *self, int phy, int reg, int val)
1992 {
1993 struct pcn_softc *sc = (void *) self;
1994
1995 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
1996 pcn_bcr_write(sc, LE_BCR34, val);
1997 }
1998
1999 /*
2000 * pcn_mii_statchg: [mii interface function]
2001 *
2002 * Callback from MII layer when media changes.
2003 */
2004 void
pcn_mii_statchg(struct device * self)2005 pcn_mii_statchg(struct device *self)
2006 {
2007 struct pcn_softc *sc = (void *) self;
2008
2009 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2010 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN);
2011 else
2012 pcn_bcr_write(sc, LE_BCR9, 0);
2013 }
2014