1 /* $NetBSD: if_xge.c,v 1.35 2022/09/24 18:12:43 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5 * All rights reserved.
6 *
7 * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * SUNET, Swedish University Computer Network.
21 * 4. The name of SUNET may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Device driver for the S2io Xframe Ten Gigabit Ethernet controller.
39 *
40 * TODO (in no specific order):
41 * HW VLAN support.
42 * IPv6 HW cksum.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: if_xge.c,v 1.35 2022/09/24 18:12:43 thorpej Exp $");
47
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/kernel.h>
53 #include <sys/proc.h>
54 #include <sys/socket.h>
55 #include <sys/device.h>
56
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_ether.h>
61 #include <net/bpf.h>
62
63 #include <sys/bus.h>
64 #include <sys/intr.h>
65 #include <machine/endian.h>
66
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
69
70 #include <dev/pci/pcivar.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcidevs.h>
73
74 #include <dev/pci/if_xgereg.h>
75
76 /*
77 * Some tunable constants, tune with care!
78 */
79 #define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */
80 #define NRXDESCS 1016 /* # of receive descriptors (requested) */
81 #define NTXDESCS 8192 /* Number of transmit descriptors */
82 #define NTXFRAGS 100 /* Max fragments per packet */
83 #define XGE_EVENT_COUNTERS /* Instrumentation */
84
85 /*
86 * Receive buffer modes; 1, 3 or 5 buffers.
87 */
88 #define RX_MODE_1 1
89 #define RX_MODE_3 3
90 #define RX_MODE_5 5
91
92 /*
93 * Use clever macros to avoid a bunch of #ifdef's.
94 */
95 #define XCONCAT3(x, y, z) x ## y ## z
96 #define CONCAT3(x, y, z) XCONCAT3(x, y, z)
97 #define NDESC_BUFMODE CONCAT3(NDESC_, RX_MODE, BUFMODE)
98 #define rxd_4k CONCAT3(rxd, RX_MODE, _4k)
99 #define rxdesc ___CONCAT(rxd, RX_MODE)
100
101 #define NEXTTX(x) (((x)+1) % NTXDESCS)
102 #define NRXFRAGS RX_MODE /* hardware imposed frags */
103 #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1)
104 #define NRXREAL (NRXPAGES*NDESC_BUFMODE)
105 #define RXMAPSZ (NRXPAGES*PAGE_SIZE)
106
107 #ifdef XGE_EVENT_COUNTERS
108 #define XGE_EVCNT_INCR(ev) (ev)->ev_count++
109 #else
110 #define XGE_EVCNT_INCR(ev) /* nothing */
111 #endif
112
113 /*
114 * Magics to fix a bug when the mac address can't be read correctly.
115 * Comes from the Linux driver.
116 */
117 static uint64_t fix_mac[] = {
118 0x0060000000000000ULL, 0x0060600000000000ULL,
119 0x0040600000000000ULL, 0x0000600000000000ULL,
120 0x0020600000000000ULL, 0x0060600000000000ULL,
121 0x0020600000000000ULL, 0x0060600000000000ULL,
122 0x0020600000000000ULL, 0x0060600000000000ULL,
123 0x0020600000000000ULL, 0x0060600000000000ULL,
124 0x0020600000000000ULL, 0x0060600000000000ULL,
125 0x0020600000000000ULL, 0x0060600000000000ULL,
126 0x0020600000000000ULL, 0x0060600000000000ULL,
127 0x0020600000000000ULL, 0x0060600000000000ULL,
128 0x0020600000000000ULL, 0x0060600000000000ULL,
129 0x0020600000000000ULL, 0x0060600000000000ULL,
130 0x0020600000000000ULL, 0x0000600000000000ULL,
131 0x0040600000000000ULL, 0x0060600000000000ULL,
132 };
133
134
135 struct xge_softc {
136 device_t sc_dev;
137 struct ethercom sc_ethercom;
138 #define sc_if sc_ethercom.ec_if
139 bus_dma_tag_t sc_dmat;
140 bus_space_tag_t sc_st;
141 bus_space_handle_t sc_sh;
142 bus_space_tag_t sc_txt;
143 bus_space_handle_t sc_txh;
144 void *sc_ih;
145
146 struct ifmedia xena_media;
147 pcireg_t sc_pciregs[16];
148
149 /* Transmit structures */
150 struct txd *sc_txd[NTXDESCS]; /* transmit frags array */
151 bus_addr_t sc_txdp[NTXDESCS]; /* bus address of transmit frags */
152 bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */
153 struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */
154 int sc_nexttx, sc_lasttx;
155 bus_dmamap_t sc_txmap; /* transmit descriptor map */
156
157 /* Receive data */
158 bus_dmamap_t sc_rxmap; /* receive descriptor map */
159 struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */
160 bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */
161 struct mbuf *sc_rxb[NRXREAL]; /* mbufs on receive descriptors */
162 int sc_nextrx; /* next descriptor to check */
163
164 #ifdef XGE_EVENT_COUNTERS
165 struct evcnt sc_intr; /* # of interrupts */
166 struct evcnt sc_txintr; /* # of transmit interrupts */
167 struct evcnt sc_rxintr; /* # of receive interrupts */
168 struct evcnt sc_txqe; /* # of xmit intrs when board queue empty */
169 #endif
170 };
171
172 static int xge_match(device_t parent, cfdata_t cf, void *aux);
173 static void xge_attach(device_t parent, device_t self, void *aux);
174 static int xge_alloc_txmem(struct xge_softc *);
175 static int xge_alloc_rxmem(struct xge_softc *);
176 static void xge_start(struct ifnet *);
177 static void xge_stop(struct ifnet *, int);
178 static int xge_add_rxbuf(struct xge_softc *, int);
179 static void xge_mcast_filter(struct xge_softc *sc);
180 static int xge_setup_xgxs(struct xge_softc *sc);
181 static int xge_ioctl(struct ifnet *ifp, u_long cmd, void *data);
182 static int xge_init(struct ifnet *ifp);
183 static void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
184 static int xge_xgmii_mediachange(struct ifnet *);
185 static int xge_intr(void *);
186
187 /*
188 * Helpers to address registers.
189 */
190 #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val)
191 #define PIF_RCSR(csr) pif_rcsr(sc, csr)
192 #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val)
193 #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val)
194
195 static inline void
pif_wcsr(struct xge_softc * sc,bus_size_t csr,uint64_t val)196 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
197 {
198 uint32_t lval, hval;
199
200 lval = val&0xffffffff;
201 hval = val>>32;
202 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
203 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
204 }
205
206 static inline uint64_t
pif_rcsr(struct xge_softc * sc,bus_size_t csr)207 pif_rcsr(struct xge_softc *sc, bus_size_t csr)
208 {
209 uint64_t val, val2;
210 val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr);
211 val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4);
212 val |= (val2 << 32);
213 return val;
214 }
215
216 static inline void
txp_wcsr(struct xge_softc * sc,bus_size_t csr,uint64_t val)217 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
218 {
219 uint32_t lval, hval;
220
221 lval = val&0xffffffff;
222 hval = val>>32;
223 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval);
224 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
225 }
226
227
228 static inline void
pif_wkey(struct xge_softc * sc,bus_size_t csr,uint64_t val)229 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
230 {
231 uint32_t lval, hval;
232
233 lval = val&0xffffffff;
234 hval = val>>32;
235 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
236 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
237 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
238 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
239 }
240
241
242 CFATTACH_DECL_NEW(xge, sizeof(struct xge_softc),
243 xge_match, xge_attach, NULL, NULL);
244
245 #define XNAME device_xname(sc->sc_dev)
246
247 #define XGE_RXSYNC(desc, what) \
248 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
249 (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
250 (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
251 #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
252 r4_rxd[desc%NDESC_BUFMODE]
253
254 /*
255 * Non-tunable constants.
256 */
257 #define XGE_MAX_MTU 9600
258 #define XGE_IP_MAXPACKET 65535 /* same as IP_MAXPACKET */
259
260 static int
xge_match(device_t parent,cfdata_t cf,void * aux)261 xge_match(device_t parent, cfdata_t cf, void *aux)
262 {
263 struct pci_attach_args *pa = aux;
264
265 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_S2IO &&
266 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_S2IO_XFRAME)
267 return 1;
268
269 return 0;
270 }
271
272 void
xge_attach(device_t parent,device_t self,void * aux)273 xge_attach(device_t parent, device_t self, void *aux)
274 {
275 struct pci_attach_args *pa = aux;
276 struct xge_softc *sc;
277 struct ifnet *ifp;
278 pcireg_t memtype;
279 pci_intr_handle_t ih;
280 const char *intrstr = NULL;
281 pci_chipset_tag_t pc = pa->pa_pc;
282 uint8_t enaddr[ETHER_ADDR_LEN];
283 uint64_t val;
284 int i;
285 char intrbuf[PCI_INTRSTR_LEN];
286
287 sc = device_private(self);
288 sc->sc_dev = self;
289
290 if (pci_dma64_available(pa))
291 sc->sc_dmat = pa->pa_dmat64;
292 else
293 sc->sc_dmat = pa->pa_dmat;
294
295 /* Get BAR0 address */
296 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
297 if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
298 &sc->sc_st, &sc->sc_sh, 0, 0)) {
299 aprint_error("%s: unable to map PIF BAR registers\n", XNAME);
300 return;
301 }
302
303 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
304 if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
305 &sc->sc_txt, &sc->sc_txh, 0, 0)) {
306 aprint_error("%s: unable to map TXP BAR registers\n", XNAME);
307 return;
308 }
309
310 /* Save PCI config space */
311 for (i = 0; i < 64; i += 4)
312 sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
313
314 #if BYTE_ORDER == LITTLE_ENDIAN
315 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
316 val &= ~(TxF_R_SE | RxF_W_SE);
317 PIF_WCSR(SWAPPER_CTRL, val);
318 PIF_WCSR(SWAPPER_CTRL, val);
319 #elif BYTE_ORDER == BIG_ENDIAN
320 /* do nothing */
321 #else
322 #error bad endianness!
323 #endif
324
325 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
326 aprint_error("%s: failed configuring endian, %llx != %llx!\n",
327 XNAME, (unsigned long long)val, SWAPPER_MAGIC);
328 return;
329 }
330
331 /*
332 * The MAC addr may be all FF's, which is not good.
333 * Resolve it by writing some magics to GPIO_CONTROL and
334 * force a chip reset to read in the serial eeprom again.
335 */
336 for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) {
337 PIF_WCSR(GPIO_CONTROL, fix_mac[i]);
338 PIF_RCSR(GPIO_CONTROL);
339 }
340
341 /*
342 * Reset the chip and restore the PCI registers.
343 */
344 PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
345 DELAY(500000);
346 for (i = 0; i < 64; i += 4)
347 pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
348
349 /*
350 * Restore the byte order registers.
351 */
352 #if BYTE_ORDER == LITTLE_ENDIAN
353 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
354 val &= ~(TxF_R_SE | RxF_W_SE);
355 PIF_WCSR(SWAPPER_CTRL, val);
356 PIF_WCSR(SWAPPER_CTRL, val);
357 #elif BYTE_ORDER == BIG_ENDIAN
358 /* do nothing */
359 #else
360 #error bad endianness!
361 #endif
362
363 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
364 aprint_error("%s: failed configuring endian2, %llx != %llx!\n",
365 XNAME, (unsigned long long)val, SWAPPER_MAGIC);
366 return;
367 }
368
369 /*
370 * XGXS initialization.
371 */
372 /* 29, reset */
373 PIF_WCSR(SW_RESET, 0);
374 DELAY(500000);
375
376 /* 30, configure XGXS transceiver */
377 xge_setup_xgxs(sc);
378
379 /* 33, program MAC address (not needed here) */
380 /* Get ethernet address */
381 PIF_WCSR(RMAC_ADDR_CMD_MEM,
382 RMAC_ADDR_CMD_MEM_STR | RMAC_ADDR_CMD_MEM_OFF(0));
383 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
384 ;
385 val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
386 for (i = 0; i < ETHER_ADDR_LEN; i++)
387 enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
388
389 /*
390 * Get memory for transmit descriptor lists.
391 */
392 if (xge_alloc_txmem(sc)) {
393 aprint_error("%s: failed allocating txmem.\n", XNAME);
394 return;
395 }
396
397 /* 9 and 10 - set FIFO number/prio */
398 PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
399 PIF_WCSR(TX_FIFO_P1, 0ULL);
400 PIF_WCSR(TX_FIFO_P2, 0ULL);
401 PIF_WCSR(TX_FIFO_P3, 0ULL);
402
403 /* 11, XXX set round-robin prio? */
404
405 /* 12, enable transmit FIFO */
406 val = PIF_RCSR(TX_FIFO_P0);
407 val |= TX_FIFO_ENABLE;
408 PIF_WCSR(TX_FIFO_P0, val);
409
410 /* 13, disable some error checks */
411 PIF_WCSR(TX_PA_CFG,
412 TX_PA_CFG_IFR | TX_PA_CFG_ISO | TX_PA_CFG_ILC | TX_PA_CFG_ILE);
413
414 /*
415 * Create transmit DMA maps.
416 * Make them large for TSO.
417 */
418 for (i = 0; i < NTXDESCS; i++) {
419 if (bus_dmamap_create(sc->sc_dmat, XGE_IP_MAXPACKET,
420 NTXFRAGS, MCLBYTES, 0, 0, &sc->sc_txm[i])) {
421 aprint_error("%s: cannot create TX DMA maps\n", XNAME);
422 return;
423 }
424 }
425
426 sc->sc_lasttx = NTXDESCS-1;
427
428 /*
429 * RxDMA initialization.
430 * Only use one out of 8 possible receive queues.
431 */
432 if (xge_alloc_rxmem(sc)) { /* allocate rx descriptor memory */
433 aprint_error("%s: failed allocating rxmem\n", XNAME);
434 return;
435 }
436
437 /* Create receive buffer DMA maps */
438 for (i = 0; i < NRXREAL; i++) {
439 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_MTU,
440 NRXFRAGS, MCLBYTES, 0, 0, &sc->sc_rxm[i])) {
441 aprint_error("%s: cannot create RX DMA maps\n", XNAME);
442 return;
443 }
444 }
445
446 /* allocate mbufs to receive descriptors */
447 for (i = 0; i < NRXREAL; i++)
448 if (xge_add_rxbuf(sc, i))
449 panic("out of mbufs too early");
450
451 /* 14, setup receive ring priority */
452 PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
453
454 /* 15, setup receive ring round-robin calendar */
455 PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
456 PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
457 PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
458 PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
459 PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
460
461 /* 16, write receive ring start address */
462 PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
463 /* PRC_RXD0_[1-7] are not used */
464
465 /* 17, Setup alarm registers */
466 PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
467
468 /* 18, init receive ring controller */
469 #if RX_MODE == RX_MODE_1
470 val = RING_MODE_1;
471 #elif RX_MODE == RX_MODE_3
472 val = RING_MODE_3;
473 #else /* RX_MODE == RX_MODE_5 */
474 val = RING_MODE_5;
475 #endif
476 PIF_WCSR(PRC_CTRL_0, RC_IN_SVC | val);
477 /* leave 1-7 disabled */
478 /* XXXX snoop configuration? */
479
480 /* 19, set chip memory assigned to the queue */
481 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); /* all 64M to queue 0 */
482
483 /* 20, setup RLDRAM parameters */
484 /* do not touch it for now */
485
486 /* 21, setup pause frame thresholds */
487 /* so not touch the defaults */
488 /* XXX - must 0xff be written as stated in the manual? */
489
490 /* 22, configure RED */
491 /* we do not want to drop packets, so ignore */
492
493 /* 23, initiate RLDRAM */
494 val = PIF_RCSR(MC_RLDRAM_MRS);
495 val |= MC_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
496 PIF_WCSR(MC_RLDRAM_MRS, val);
497 DELAY(1000);
498
499 /*
500 * Setup interrupt policies.
501 */
502 /* 40, Transmit interrupts */
503 PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
504 TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
505 PIF_WCSR(TTI_DATA2_MEM,
506 TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
507 PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
508 while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
509 ;
510
511 /* 41, Receive interrupts */
512 PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
513 RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
514 PIF_WCSR(RTI_DATA2_MEM,
515 RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
516 PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
517 while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
518 ;
519
520 /*
521 * Setup media stuff.
522 */
523 sc->sc_ethercom.ec_ifmedia = &sc->xena_media;
524 ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
525 xge_ifmedia_status);
526 ifmedia_add(&sc->xena_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
527 ifmedia_set(&sc->xena_media, IFM_ETHER | IFM_10G_LR);
528
529 aprint_normal("%s: Ethernet address %s\n", XNAME,
530 ether_sprintf(enaddr));
531
532 ifp = &sc->sc_ethercom.ec_if;
533 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
534 ifp->if_baudrate = 10000000000LL;
535 ifp->if_init = xge_init;
536 ifp->if_stop = xge_stop;
537 ifp->if_softc = sc;
538 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
539 ifp->if_ioctl = xge_ioctl;
540 ifp->if_start = xge_start;
541 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(NTXDESCS - 1, IFQ_MAXLEN));
542 IFQ_SET_READY(&ifp->if_snd);
543
544 /*
545 * Offloading capabilities.
546 */
547 sc->sc_ethercom.ec_capabilities |=
548 ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
549 ifp->if_capabilities |=
550 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
551 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
552 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx | IFCAP_TSOv4;
553
554 /*
555 * Attach the interface.
556 */
557 if_attach(ifp);
558 if_deferred_start_init(ifp, NULL);
559 ether_ifattach(ifp, enaddr);
560
561 /*
562 * Setup interrupt vector before initializing.
563 */
564 if (pci_intr_map(pa, &ih)) {
565 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
566 return;
567 }
568 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
569 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, xge_intr, sc,
570 device_xname(self));
571 if (sc->sc_ih == NULL) {
572 aprint_error_dev(sc->sc_dev,
573 "unable to establish interrupt at %s\n",
574 intrstr ? intrstr : "<unknown>");
575 return;
576 }
577 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
578
579 #ifdef XGE_EVENT_COUNTERS
580 evcnt_attach_dynamic(&sc->sc_intr, EVCNT_TYPE_MISC,
581 NULL, XNAME, "intr");
582 evcnt_attach_dynamic(&sc->sc_txintr, EVCNT_TYPE_MISC,
583 NULL, XNAME, "txintr");
584 evcnt_attach_dynamic(&sc->sc_rxintr, EVCNT_TYPE_MISC,
585 NULL, XNAME, "rxintr");
586 evcnt_attach_dynamic(&sc->sc_txqe, EVCNT_TYPE_MISC,
587 NULL, XNAME, "txqe");
588 #endif
589 }
590
591 void
xge_ifmedia_status(struct ifnet * ifp,struct ifmediareq * ifmr)592 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
593 {
594 struct xge_softc *sc = ifp->if_softc;
595 uint64_t reg;
596
597 ifmr->ifm_status = IFM_AVALID;
598 ifmr->ifm_active = IFM_ETHER | IFM_10G_LR;
599
600 reg = PIF_RCSR(ADAPTER_STATUS);
601 if ((reg & (RMAC_REMOTE_FAULT | RMAC_LOCAL_FAULT)) == 0)
602 ifmr->ifm_status |= IFM_ACTIVE;
603 }
604
605 int
xge_xgmii_mediachange(struct ifnet * ifp)606 xge_xgmii_mediachange(struct ifnet *ifp)
607 {
608 return 0;
609 }
610
611 static void
xge_enable(struct xge_softc * sc)612 xge_enable(struct xge_softc *sc)
613 {
614 uint64_t val;
615
616 /* 2, enable adapter */
617 val = PIF_RCSR(ADAPTER_CONTROL);
618 val |= ADAPTER_EN;
619 PIF_WCSR(ADAPTER_CONTROL, val);
620
621 /* 3, light the card enable led */
622 val = PIF_RCSR(ADAPTER_CONTROL);
623 val |= LED_ON;
624 PIF_WCSR(ADAPTER_CONTROL, val);
625 printf("%s: link up\n", XNAME);
626
627 }
628
629 int
xge_init(struct ifnet * ifp)630 xge_init(struct ifnet *ifp)
631 {
632 struct xge_softc *sc = ifp->if_softc;
633 uint64_t val;
634
635 if (ifp->if_flags & IFF_RUNNING)
636 return 0;
637
638 /* 31+32, setup MAC config */
639 PIF_WKEY(MAC_CFG, TMAC_EN | RMAC_EN | TMAC_APPEND_PAD |
640 RMAC_STRIP_FCS | RMAC_BCAST_EN | RMAC_DISCARD_PFRM | RMAC_PROM_EN);
641
642 DELAY(1000);
643
644 /* 54, ensure that the adapter is 'quiescent' */
645 val = PIF_RCSR(ADAPTER_STATUS);
646 if ((val & QUIESCENT) != QUIESCENT) {
647 char buf[200];
648 printf("%s: adapter not quiescent, aborting\n", XNAME);
649 val = (val & QUIESCENT) ^ QUIESCENT;
650 snprintb(buf, sizeof buf, QUIESCENT_BMSK, val);
651 printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
652 return 1;
653 }
654
655 /* 56, enable the transmit laser */
656 val = PIF_RCSR(ADAPTER_CONTROL);
657 val |= EOI_TX_ON;
658 PIF_WCSR(ADAPTER_CONTROL, val);
659
660 xge_enable(sc);
661 /*
662 * Enable all interrupts
663 */
664 PIF_WCSR(TX_TRAFFIC_MASK, 0);
665 PIF_WCSR(RX_TRAFFIC_MASK, 0);
666 PIF_WCSR(GENERAL_INT_MASK, 0);
667 PIF_WCSR(TXPIC_INT_MASK, 0);
668 PIF_WCSR(RXPIC_INT_MASK, 0);
669 PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
670 PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
671
672
673 /* Done... */
674 ifp->if_flags |= IFF_RUNNING;
675 ifp->if_flags &= ~IFF_OACTIVE;
676
677 return 0;
678 }
679
680 static void
xge_stop(struct ifnet * ifp,int disable)681 xge_stop(struct ifnet *ifp, int disable)
682 {
683 struct xge_softc *sc = ifp->if_softc;
684 uint64_t val;
685
686 val = PIF_RCSR(ADAPTER_CONTROL);
687 val &= ~ADAPTER_EN;
688 PIF_WCSR(ADAPTER_CONTROL, val);
689
690 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
691 ;
692 }
693
694 int
xge_intr(void * pv)695 xge_intr(void *pv)
696 {
697 struct xge_softc *sc = pv;
698 struct txd *txd;
699 struct ifnet *ifp = &sc->sc_if;
700 bus_dmamap_t dmp;
701 uint64_t val;
702 int i, lasttx, plen;
703
704 val = PIF_RCSR(GENERAL_INT_STATUS);
705 if (val == 0)
706 return 0; /* no interrupt here */
707
708 XGE_EVCNT_INCR(&sc->sc_intr);
709
710 PIF_WCSR(GENERAL_INT_STATUS, val);
711
712 if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
713 /* Wait for quiescence */
714 printf("%s: link down\n", XNAME);
715 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
716 ;
717 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
718
719 val = PIF_RCSR(ADAPTER_STATUS);
720 if ((val & (RMAC_REMOTE_FAULT | RMAC_LOCAL_FAULT)) == 0)
721 xge_enable(sc); /* Only if link restored */
722 }
723
724 if ((val = PIF_RCSR(TX_TRAFFIC_INT))) {
725 XGE_EVCNT_INCR(&sc->sc_txintr);
726 PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
727 }
728 /*
729 * Collect sent packets.
730 */
731 lasttx = sc->sc_lasttx;
732 while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
733 txd = sc->sc_txd[i];
734 dmp = sc->sc_txm[i];
735
736 bus_dmamap_sync(sc->sc_dmat, dmp, 0,
737 dmp->dm_mapsize,
738 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
739
740 if (txd->txd_control1 & TXD_CTL1_OWN) {
741 bus_dmamap_sync(sc->sc_dmat, dmp, 0,
742 dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
743 break;
744 }
745 bus_dmamap_unload(sc->sc_dmat, dmp);
746 m_freem(sc->sc_txb[i]);
747 if_statinc(ifp, if_opackets);
748 sc->sc_lasttx = i;
749 }
750 if (i == sc->sc_nexttx) {
751 XGE_EVCNT_INCR(&sc->sc_txqe);
752 }
753
754 if (sc->sc_lasttx != lasttx)
755 ifp->if_flags &= ~IFF_OACTIVE;
756
757 /* Try to get more packets on the wire */
758 if_schedule_deferred_start(ifp);
759
760 if ((val = PIF_RCSR(RX_TRAFFIC_INT))) {
761 XGE_EVCNT_INCR(&sc->sc_rxintr);
762 PIF_WCSR(RX_TRAFFIC_INT, val); /* Clear interrupt bits */
763 }
764
765 for (;;) {
766 struct rxdesc *rxd;
767 struct mbuf *m;
768
769 XGE_RXSYNC(sc->sc_nextrx,
770 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
771
772 rxd = XGE_RXD(sc->sc_nextrx);
773 if (rxd->rxd_control1 & RXD_CTL1_OWN) {
774 XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
775 break;
776 }
777
778 /* Got a packet */
779 m = sc->sc_rxb[sc->sc_nextrx];
780 #if RX_MODE == RX_MODE_1
781 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
782 #elif RX_MODE == RX_MODE_3
783 #error Fix rxmodes in xge_intr
784 #elif RX_MODE == RX_MODE_5
785 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
786 plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
787 plen += m->m_next->m_next->m_len =
788 RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
789 plen += m->m_next->m_next->m_next->m_len =
790 RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
791 plen += m->m_next->m_next->m_next->m_next->m_len =
792 RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
793 #endif
794 m_set_rcvif(m, ifp);
795 m->m_pkthdr.len = plen;
796
797 val = rxd->rxd_control1;
798
799 if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
800 /* Failed, recycle this mbuf */
801 #if RX_MODE == RX_MODE_1
802 rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
803 rxd->rxd_control1 = RXD_CTL1_OWN;
804 #elif RX_MODE == RX_MODE_3
805 #elif RX_MODE == RX_MODE_5
806 #endif
807 XGE_RXSYNC(sc->sc_nextrx,
808 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
809 if_statinc(ifp, if_ierrors);
810 break;
811 }
812
813 if (RXD_CTL1_PROTOS(val) & (RXD_CTL1_P_IPv4|RXD_CTL1_P_IPv6)) {
814 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
815 if (RXD_CTL1_L3CSUM(val) != 0xffff)
816 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
817 }
818 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) {
819 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4 | M_CSUM_TCPv6;
820 if (RXD_CTL1_L4CSUM(val) != 0xffff)
821 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
822 }
823 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) {
824 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4 | M_CSUM_UDPv6;
825 if (RXD_CTL1_L4CSUM(val) != 0xffff)
826 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
827 }
828
829 if_percpuq_enqueue(ifp->if_percpuq, m);
830
831 if (++sc->sc_nextrx == NRXREAL)
832 sc->sc_nextrx = 0;
833
834 }
835
836 return 0;
837 }
838
839 int
xge_ioctl(struct ifnet * ifp,u_long cmd,void * data)840 xge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
841 {
842 struct xge_softc *sc = ifp->if_softc;
843 struct ifreq *ifr = (struct ifreq *) data;
844 int s, error = 0;
845
846 s = splnet();
847
848 switch (cmd) {
849 case SIOCSIFMTU:
850 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > XGE_MAX_MTU)
851 error = EINVAL;
852 else if ((error = ifioctl_common(ifp, cmd, data))
853 == ENETRESET) {
854 PIF_WCSR(RMAC_MAX_PYLD_LEN,
855 RMAC_PYLD_LEN(ifr->ifr_mtu));
856 error = 0;
857 }
858 break;
859
860 default:
861 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
862 break;
863
864 error = 0;
865
866 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
867 ;
868 else if (ifp->if_flags & IFF_RUNNING) {
869 /* Change multicast list */
870 xge_mcast_filter(sc);
871 }
872 break;
873 }
874
875 splx(s);
876 return error;
877 }
878
879 void
xge_mcast_filter(struct xge_softc * sc)880 xge_mcast_filter(struct xge_softc *sc)
881 {
882 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
883 struct ethercom *ec = &sc->sc_ethercom;
884 struct ether_multi *enm;
885 struct ether_multistep step;
886 int i, numaddr = 1; /* first slot used for card unicast address */
887 uint64_t val;
888
889 ETHER_LOCK(ec);
890 ETHER_FIRST_MULTI(step, ec, enm);
891 while (enm != NULL) {
892 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
893 /* Skip ranges */
894 ETHER_UNLOCK(ec);
895 goto allmulti;
896 }
897 if (numaddr == MAX_MCAST_ADDR) {
898 ETHER_UNLOCK(ec);
899 goto allmulti;
900 }
901 for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
902 val <<= 8;
903 val |= enm->enm_addrlo[i];
904 }
905 PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
906 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
907 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE |
908 RMAC_ADDR_CMD_MEM_STR | RMAC_ADDR_CMD_MEM_OFF(numaddr));
909 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
910 ;
911 numaddr++;
912 ETHER_NEXT_MULTI(step, enm);
913 }
914 ETHER_UNLOCK(ec);
915 /* set the remaining entries to the broadcast address */
916 for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
917 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
918 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
919 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE |
920 RMAC_ADDR_CMD_MEM_STR | RMAC_ADDR_CMD_MEM_OFF(i));
921 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
922 ;
923 }
924 ifp->if_flags &= ~IFF_ALLMULTI;
925 return;
926
927 allmulti:
928 /* Just receive everything with the multicast bit set */
929 ifp->if_flags |= IFF_ALLMULTI;
930 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
931 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
932 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE |
933 RMAC_ADDR_CMD_MEM_STR | RMAC_ADDR_CMD_MEM_OFF(1));
934 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
935 ;
936 }
937
938 void
xge_start(struct ifnet * ifp)939 xge_start(struct ifnet *ifp)
940 {
941 struct xge_softc *sc = ifp->if_softc;
942 struct txd *txd = NULL; /* XXX - gcc */
943 bus_dmamap_t dmp;
944 struct mbuf *m;
945 uint64_t par, lcr;
946 int nexttx = 0, ntxd, error, i;
947
948 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
949 return;
950
951 par = lcr = 0;
952 for (;;) {
953 IFQ_POLL(&ifp->if_snd, m);
954 if (m == NULL)
955 break; /* out of packets */
956
957 if (sc->sc_nexttx == sc->sc_lasttx)
958 break; /* No more space */
959
960 nexttx = sc->sc_nexttx;
961 dmp = sc->sc_txm[nexttx];
962
963 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
964 BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
965 printf("%s: bus_dmamap_load_mbuf error %d\n",
966 XNAME, error);
967 break;
968 }
969 IFQ_DEQUEUE(&ifp->if_snd, m);
970
971 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
972 BUS_DMASYNC_PREWRITE);
973
974 txd = sc->sc_txd[nexttx];
975 sc->sc_txb[nexttx] = m;
976 for (i = 0; i < dmp->dm_nsegs; i++) {
977 if (dmp->dm_segs[i].ds_len == 0)
978 continue;
979 txd->txd_control1 = dmp->dm_segs[i].ds_len;
980 txd->txd_control2 = 0;
981 txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
982 txd++;
983 }
984 ntxd = txd - sc->sc_txd[nexttx] - 1;
985 txd = sc->sc_txd[nexttx];
986 txd->txd_control1 |= TXD_CTL1_OWN | TXD_CTL1_GCF;
987 txd->txd_control2 = TXD_CTL2_UTIL;
988 if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) {
989 txd->txd_control1 |= TXD_CTL1_MSS(m->m_pkthdr.segsz);
990 txd->txd_control1 |= TXD_CTL1_LSO;
991 }
992
993 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
994 txd->txd_control2 |= TXD_CTL2_CIPv4;
995 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
996 txd->txd_control2 |= TXD_CTL2_CTCP;
997 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
998 txd->txd_control2 |= TXD_CTL2_CUDP;
999 txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
1000
1001 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1002 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1003
1004 par = sc->sc_txdp[nexttx];
1005 lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
1006 if (m->m_pkthdr.csum_flags & M_CSUM_TSOv4)
1007 lcr |= TXDL_SFF;
1008 TXP_WCSR(TXDL_PAR, par);
1009 TXP_WCSR(TXDL_LCR, lcr);
1010
1011 bpf_mtap(ifp, m, BPF_D_OUT);
1012
1013 sc->sc_nexttx = NEXTTX(nexttx);
1014 }
1015 }
1016
1017 /*
1018 * Allocate DMA memory for transmit descriptor fragments.
1019 * Only one map is used for all descriptors.
1020 */
1021 int
xge_alloc_txmem(struct xge_softc * sc)1022 xge_alloc_txmem(struct xge_softc *sc)
1023 {
1024 struct txd *txp;
1025 bus_dma_segment_t seg;
1026 bus_addr_t txdp;
1027 void *kva;
1028 int i, rseg, state;
1029
1030 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
1031 state = 0;
1032 if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
1033 &seg, 1, &rseg, BUS_DMA_NOWAIT))
1034 goto err;
1035 state++;
1036 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
1037 BUS_DMA_NOWAIT))
1038 goto err;
1039
1040 state++;
1041 if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
1042 BUS_DMA_NOWAIT, &sc->sc_txmap))
1043 goto err;
1044 state++;
1045 if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
1046 kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
1047 goto err;
1048
1049 /* setup transmit array pointers */
1050 txp = (struct txd *)kva;
1051 txdp = seg.ds_addr;
1052 for (txp = (struct txd *)kva, i = 0; i < NTXDESCS; i++) {
1053 sc->sc_txd[i] = txp;
1054 sc->sc_txdp[i] = txdp;
1055 txp += NTXFRAGS;
1056 txdp += (NTXFRAGS * sizeof(struct txd));
1057 }
1058
1059 return 0;
1060
1061 err:
1062 if (state > 2)
1063 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1064 if (state > 1)
1065 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1066 if (state > 0)
1067 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1068 return ENOBUFS;
1069 }
1070
1071 /*
1072 * Allocate DMA memory for receive descriptor,
1073 * only one map is used for all descriptors.
1074 * link receive descriptor pages together.
1075 */
1076 int
xge_alloc_rxmem(struct xge_softc * sc)1077 xge_alloc_rxmem(struct xge_softc *sc)
1078 {
1079 struct rxd_4k *rxpp;
1080 bus_dma_segment_t seg;
1081 void *kva;
1082 int i, rseg, state;
1083
1084 /* sanity check */
1085 if (sizeof(struct rxd_4k) != XGE_PAGE) {
1086 printf("bad compiler struct alignment, %d != %d\n",
1087 (int)sizeof(struct rxd_4k), XGE_PAGE);
1088 return EINVAL;
1089 }
1090
1091 state = 0;
1092 if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
1093 &seg, 1, &rseg, BUS_DMA_NOWAIT))
1094 goto err;
1095 state++;
1096 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
1097 BUS_DMA_NOWAIT))
1098 goto err;
1099
1100 state++;
1101 if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
1102 BUS_DMA_NOWAIT, &sc->sc_rxmap))
1103 goto err;
1104 state++;
1105 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
1106 kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
1107 goto err;
1108
1109 /* setup receive page link pointers */
1110 for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
1111 sc->sc_rxd_4k[i] = rxpp;
1112 rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1113 (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
1114 }
1115 sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
1116 (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1117
1118 return 0;
1119
1120 err:
1121 if (state > 2)
1122 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1123 if (state > 1)
1124 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1125 if (state > 0)
1126 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1127 return ENOBUFS;
1128 }
1129
1130
1131 /*
1132 * Add a new mbuf chain to descriptor id.
1133 */
1134 int
xge_add_rxbuf(struct xge_softc * sc,int id)1135 xge_add_rxbuf(struct xge_softc *sc, int id)
1136 {
1137 struct rxdesc *rxd;
1138 struct mbuf *m[5];
1139 int page, desc, error;
1140 #if RX_MODE == RX_MODE_5
1141 int i;
1142 #endif
1143
1144 page = id/NDESC_BUFMODE;
1145 desc = id%NDESC_BUFMODE;
1146
1147 rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1148
1149 /*
1150 * Allocate mbufs.
1151 * Currently five mbufs and two clusters are used,
1152 * the hardware will put (ethernet, ip, tcp/udp) headers in
1153 * their own buffer and the clusters are only used for data.
1154 */
1155 #if RX_MODE == RX_MODE_1
1156 MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1157 if (m[0] == NULL)
1158 return ENOBUFS;
1159 MCLGET(m[0], M_DONTWAIT);
1160 if ((m[0]->m_flags & M_EXT) == 0) {
1161 m_freem(m[0]);
1162 return ENOBUFS;
1163 }
1164 m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size;
1165 #elif RX_MODE == RX_MODE_3
1166 #error missing rxmode 3.
1167 #elif RX_MODE == RX_MODE_5
1168 MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1169 for (i = 1; i < 5; i++) {
1170 MGET(m[i], M_DONTWAIT, MT_DATA);
1171 }
1172 if (m[3])
1173 MCLGET(m[3], M_DONTWAIT);
1174 if (m[4])
1175 MCLGET(m[4], M_DONTWAIT);
1176 if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1177 ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
1178 /* Out of something */
1179 for (i = 0; i < 5; i++)
1180 if (m[i] != NULL)
1181 m_free(m[i]);
1182 return ENOBUFS;
1183 }
1184 /* Link'em together */
1185 m[0]->m_next = m[1];
1186 m[1]->m_next = m[2];
1187 m[2]->m_next = m[3];
1188 m[3]->m_next = m[4];
1189 #else
1190 #error bad mode RX_MODE
1191 #endif
1192
1193 if (sc->sc_rxb[id])
1194 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
1195 sc->sc_rxb[id] = m[0];
1196
1197 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
1198 BUS_DMA_READ | BUS_DMA_NOWAIT);
1199 if (error)
1200 return error;
1201 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
1202 sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
1203
1204 #if RX_MODE == RX_MODE_1
1205 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
1206 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1207 rxd->rxd_control1 = RXD_CTL1_OWN;
1208 #elif RX_MODE == RX_MODE_3
1209 #elif RX_MODE == RX_MODE_5
1210 rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
1211 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
1212 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1213 rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1214 rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1215 rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1216 rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1217 rxd->rxd_control1 = RXD_CTL1_OWN;
1218 #endif
1219
1220 XGE_RXSYNC(id, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1221 return 0;
1222 }
1223
1224 /*
1225 * These magics comes from the FreeBSD driver.
1226 */
1227 int
xge_setup_xgxs(struct xge_softc * sc)1228 xge_setup_xgxs(struct xge_softc *sc)
1229 {
1230 /* The magic numbers are described in the users guide */
1231
1232 /* Writing to MDIO 0x8000 (Global Config 0) */
1233 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1234 PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50);
1235 PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50);
1236
1237 /* Writing to MDIO 0x8000 (Global Config 1) */
1238 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1239 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1240 PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50);
1241
1242 /* Reset the Gigablaze */
1243 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1244 PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50);
1245 PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50);
1246
1247 /* read the pole settings */
1248 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1249 PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50);
1250 PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50);
1251
1252 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1253 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1254 PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50);
1255
1256 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1257 PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50);
1258 PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50);
1259
1260 /* Workaround for TX Lane XAUI initialization error.
1261 Read Xpak PHY register 24 for XAUI lane status */
1262 PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50);
1263 PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1264 PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50);
1265
1266 /*
1267 * Reading the MDIO control with value 0x1804001c0F001c
1268 * means the TxLanes were already in sync
1269 * Reading the MDIO control with value 0x1804000c0x001c
1270 * means some TxLanes are not in sync where x is a 4-bit
1271 * value representing each lanes
1272 */
1273 #if 0
1274 val = PIF_RCSR(MDIO_CONTROL);
1275 if (val != 0x1804001c0F001cULL) {
1276 printf("%s: MDIO_CONTROL: %llx != %llx\n",
1277 XNAME, val, 0x1804001c0F001cULL);
1278 return 1;
1279 }
1280 #endif
1281
1282 /* Set and remove the DTE XS INTLoopBackN */
1283 PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50);
1284 PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50);
1285 PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50);
1286 PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50);
1287 PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50);
1288
1289 #if 0
1290 /* Reading the DTX control register Should be 0x5152040001c */
1291 val = PIF_RCSR(DTX_CONTROL);
1292 if (val != 0x5152040001cULL) {
1293 printf("%s: DTX_CONTROL: %llx != %llx\n",
1294 XNAME, val, 0x5152040001cULL);
1295 return 1;
1296 }
1297 #endif
1298
1299 PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50);
1300 PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1301 PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50);
1302
1303 #if 0
1304 /* Reading the MIOD control should be 0x1804001c0f001c */
1305 val = PIF_RCSR(MDIO_CONTROL);
1306 if (val != 0x1804001c0f001cULL) {
1307 printf("%s: MDIO_CONTROL2: %llx != %llx\n",
1308 XNAME, val, 0x1804001c0f001cULL);
1309 return 1;
1310 }
1311 #endif
1312 return 0;
1313 }
1314