xref: /plan9/sys/src/9/kw/ether1116.c (revision 7ae8f4532bb599d9c475208824b53a0495a01a84)
1 /*
2  * marvell kirkwood gigabit ethernet (88e1116 and 88e1121) driver
3  * (as found in the sheevaplug, openrd and guruplug).
4  * the main difference is the flavour of phy kludgery necessary.
5  *
6  * from /public/doc/marvell/88f61xx.kirkwood.pdf,
7  *	/public/doc/marvell/88e1116.pdf, and
8  *	/public/doc/marvell/88e1121r.pdf.
9  */
10 
11 #include "u.h"
12 #include "../port/lib.h"
13 #include "mem.h"
14 #include "dat.h"
15 #include "fns.h"
16 #include "io.h"
17 #include "../port/error.h"
18 #include "../port/netif.h"
19 
20 #include "etherif.h"
21 #include "ethermii.h"
22 #include "../ip/ip.h"
23 
24 #define	MIIDBG	if(0)iprint
25 
26 #define WINATTR(v)      (((v) & MASK(8)) << 8)
27 #define WINSIZE(v)      (((v)/(64*1024) - 1) << 16)
28 
29 enum {
30 	Nrx		= 512,
31 	Ntx		= 32,
32 	Nrxblks		= 1024,
33 	Rxblklen	= 2+1522,  /* ifc. supplies first 2 bytes as padding */
34 
35 	Maxrxintrsec	= 20*1000,	/* max. rx intrs. / sec */
36 	Etherstuck	= 70,	/* must send or receive a packet in this many sec.s */
37 
38 	Descralign	= 16,
39 	Bufalign	= 8,
40 
41 	Pass		= 1,		/* accept packets */
42 
43 	Qno		= 0,		/* do everything on queue zero */
44 };
45 
46 typedef struct Ctlr Ctlr;
47 typedef struct Gbereg Gbereg;
48 typedef struct Mibstats Mibstats;
49 typedef struct Rx Rx;
50 typedef struct Tx Tx;
51 
52 static struct {
53 	Lock;
54 	Block	*head;
55 } freeblocks;
56 
57 /* hardware receive buffer descriptor */
58 struct Rx {
59 	ulong	cs;
60 	ulong	countsize;	/* bytes, buffer size */
61 	ulong	buf;		/* phys. addr. of packet buffer */
62 	ulong	next;		/* phys. addr. of next Rx */
63 };
64 
65 /* hardware transmit buffer descriptor */
66 struct Tx {
67 	ulong	cs;
68 	ulong	countchk;	/* bytes, checksum */
69 	ulong	buf;		/* phys. addr. of packet buffer */
70 	ulong	next;		/* phys. addr. of next Tx */
71 };
72 
73 /* fixed by hw; part of Gberegs */
74 struct Mibstats {
75 	union {
76 		uvlong	rxby;		/* good bytes rcv'd */
77 		struct {
78 			ulong	rxbylo;
79 			ulong	rxbyhi;
80 		};
81 	};
82 	ulong	badrxby;		/* bad bytes rcv'd */
83 	ulong	mactxerr;		/* tx err pkts */
84 	ulong	rxpkt;			/* good pkts rcv'd */
85 	ulong	badrxpkt;		/* bad pkts rcv'd */
86 	ulong	rxbcastpkt;		/* b'cast pkts rcv'd */
87 	ulong	rxmcastpkt;		/* m'cast pkts rcv'd */
88 
89 	ulong	rx64;			/* pkts <= 64 bytes */
90 	ulong	rx65_127;		/* pkts 65—127 bytes */
91 	ulong	rx128_255;		/* pkts 128—255 bytes */
92 	ulong	rx256_511;		/* pkts 256—511 bytes */
93 	ulong	rx512_1023;		/* pkts 512—1023 bytes */
94 	ulong	rx1024_max;		/* pkts >= 1024 bytes */
95 
96 	union {
97 		uvlong	txby;		/* good bytes sent */
98 		struct {
99 			ulong	txbylo;
100 			ulong	txbyhi;
101 		};
102 	};
103 	ulong	txpkt;			/* good pkts sent */
104 	/* half-duplex: pkts dropped due to excessive collisions */
105 	ulong	txcollpktdrop;
106 	ulong	txmcastpkt;		/* m'cast pkts sent */
107 	ulong	txbcastpkt;		/* b'cast pkts sent */
108 
109 	ulong	badmacctlpkts;		/* bad mac ctl pkts */
110 	ulong	txflctl;		/* flow-control pkts sent */
111 	ulong	rxflctl;		/* good flow-control pkts rcv'd */
112 	ulong	badrxflctl;		/* bad flow-control pkts rcv'd */
113 
114 	ulong	rxundersized;		/* runts */
115 	ulong	rxfrags;		/* fragments rcv'd */
116 	ulong	rxtoobig;		/* oversized pkts rcv'd */
117 	ulong	rxjabber;		/* jabber pkts rcv'd */
118 	ulong	rxerr;			/* rx error events */
119 	ulong	crcerr;			/* crc error events */
120 	ulong	collisions;		/* collision events */
121 	ulong	latecoll;		/* late collisions */
122 };
123 
124 struct Ctlr {
125 	Lock;
126 	Ether	*ether;
127 	Gbereg	*reg;
128 
129 	Lock	initlock;
130 	int	init;
131 
132 	Rx	*rx;		/* receive descriptors */
133 	Block	*rxb[Nrx];	/* blocks belonging to the descriptors */
134 	int	rxhead;		/* descr ethernet will write to next */
135 	int	rxtail;		/* next descr that might need a buffer */
136 	Rendez	rrendez;	/* interrupt wakes up read process */
137 	int	haveinput;
138 
139 	Tx	*tx;
140 	Block	*txb[Ntx];
141 	int	txhead;		/* next descr we can use for new packet */
142 	int	txtail;		/* next descr to reclaim on tx complete */
143 
144 	Mii	*mii;
145 	int	port;
146 
147 	/* stats */
148 	ulong	intrs;
149 	ulong	newintrs;
150 	ulong	txunderrun;
151 	ulong	txringfull;
152 	ulong	rxdiscard;
153 	ulong	rxoverrun;
154 	ulong	nofirstlast;
155 
156 	Mibstats;
157 };
158 
159 #define	Rxqon(q)	(1<<(q))
160 #define	Txqon(q)	(1<<(q))
161 
162 enum {
163 	/* euc bits */
164 	Portreset	= 1 << 20,
165 
166 	/* sdma config, sdc bits */
167 	Burst1		= 0,
168 	Burst2,
169 	Burst4,
170 	Burst8,
171 	Burst16,
172 	SDCrifb		= 1<<0,		/* rx intr on pkt boundaries */
173 #define SDCrxburst(v)	((v)<<1)
174 	SDCrxnobyteswap	= 1<<4,
175 	SDCtxnobyteswap	= 1<<5,
176 	SDCswap64byte	= 1<<6,
177 #define SDCtxburst(v)	((v)<<22)
178 	/* rx intr ipg (inter packet gap) */
179 #define SDCipgintrx(v)	((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
180 
181 	/* portcfg bits */
182 	PCFGupromisc		= 1<<0,	/* unicast promiscuous mode */
183 #define Rxqdefault(q)	((q)<<1)
184 #define Rxqarp(q)	((q)<<4)
185 	PCFGbcrejectnoiparp	= 1<<7,
186 	PCFGbcrejectip		= 1<<8,
187 	PCFGbcrejectarp		= 1<<9,
188 	PCFGamnotxes		= 1<<12, /* auto mode, no summary update on tx */
189 	PCFGtcpq	= 1<<14,	/* capture tcp frames to tcpq */
190 	PCFGudpq	= 1<<15,	/* capture udp frames to udpq */
191 #define	Rxqtcp(q)	((q)<<16)
192 #define	Rxqudp(q)	((q)<<19)
193 #define	Rxqbpdu(q)	((q)<<22)
194 	PCFGrxcs	= 1<<25,	/* rx tcp checksum mode with header */
195 
196 	/* portcfgx bits */
197 	PCFGXspanq	= 1<<1,
198 	PCFGXcrcoff	= 1<<2,		/* no ethernet crc */
199 
200 	/* port serial control0, psc0 bits */
201 	PSC0porton		= 1<<0,
202 	PSC0forcelinkup		= 1<<1,
203 	PSC0an_dplxoff		= 1<<2,	/* an_ = auto. negotiate */
204 	PSC0an_flctloff		= 1<<3,
205 	PSC0an_pauseadv		= 1<<4,
206 	PSC0nofrclinkdown	= 1<<10,
207 	PSC0an_spdoff		= 1<<13,
208 	PSC0dteadv		= 1<<14,	/* dte advertise */
209 
210 	/* max. input pkt size */
211 #define PSC0mru(v)	((v)<<17)
212 	PSC0mrumask	= PSC0mru(MASK(3)),
213 	PSC0mru1518	= 0,		/* 1500+2* 6(addrs) +2 + 4(crc) */
214 	PSC0mru1522,			/* 1518 + 4(vlan tags) */
215 	PSC0mru1552,			/* `baby giant' */
216 	PSC0mru9022,			/* `jumbo' */
217 	PSC0mru9192,			/* bigger jumbo */
218 	PSC0mru9700,			/* still bigger jumbo */
219 
220 	PSC0fd_frc		= 1<<21,	/* force full duplex */
221 	PSC0flctlfrc		= 1<<22,
222 	PSC0gmiispd_gbfrc	= 1<<23,
223 	PSC0miispdfrc100mbps	= 1<<24,
224 
225 	/* port status 0, ps0 bits */
226 	PS0linkup	= 1<<1,
227 	PS0fd		= 1<<2,			/* full duplex */
228 	PS0flctl	= 1<<3,
229 	PS0gmii_gb	= 1<<4,
230 	PS0mii100mbps	= 1<<5,
231 	PS0txbusy	= 1<<7,
232 	PS0txfifoempty	= 1<<10,
233 	PS0rxfifo1empty	= 1<<11,
234 	PS0rxfifo2empty	= 1<<12,
235 
236 	/* port serial control 1, psc1 bits */
237 	PSC1loopback	= 1<<1,
238 	PSC1mii		= 0<<2,
239 	PSC1rgmii	= 1<<3,			/* enable RGMII */
240 	PSC1portreset	= 1<<4,
241 	PSC1clockbypass	= 1<<5,
242 	PSC1iban	= 1<<6,
243 	PSC1iban_bypass	= 1<<7,
244 	PSC1iban_restart= 1<<8,
245 	PSC1_gbonly	= 1<<11,
246 	PSC1encolonbp	= 1<<15, /* "collision during back-pressure mib counting" */
247 	PSC1coldomlimmask= MASK(6)<<16,
248 #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
249 	PSC1miiallowoddpreamble	= 1<<22,
250 
251 	/* port status 1, ps1 bits */
252 	PS1rxpause	= 1<<0,
253 	PS1txpause	= 1<<1,
254 	PS1pressure	= 1<<2,
255 	PS1syncfail10ms	= 1<<3,
256 	PS1an_done	= 1<<4,
257 	PS1inbandan_bypassed	= 1<<5,
258 	PS1serdesplllocked	= 1<<6,
259 	PS1syncok	= 1<<7,
260 	PS1nosquelch	= 1<<8,
261 
262 	/* irq bits */
263 	/* rx buf returned to cpu ownership, or frame reception finished */
264 	Irx		= 1<<0,
265 	Iextend		= 1<<1,		/* IEsum of irqe set */
266 #define Irxbufferq(q)	(1<<((q)+2))	/* rx buf returned to cpu ownership */
267 	Irxerr		= 1<<10,	/* input ring full, usually */
268 #define Irxerrq(q)	(1<<((q)+11))
269 #define Itxendq(q)	(1<<((q)+19))	/* tx dma stopped for q */
270 	Isum		= 1<<31,
271 
272 	/* irq extended, irqe bits */
273 #define	IEtxbufferq(q)	(1<<((q)+0))	/* tx buf returned to cpu ownership */
274 #define	IEtxerrq(q)	(1<<((q)+8))
275 	IEphystschg	= 1<<16,
276 	IEptp		= 1<<17,
277 	IErxoverrun	= 1<<18,
278 	IEtxunderrun	= 1<<19,
279 	IElinkchg	= 1<<20,
280 	IEintaddrerr	= 1<<23,
281 	IEprbserr	= 1<<25,
282 	IEsum		= 1<<31,
283 
284 	/* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
285 #define TFUTipginttx(v)	(((v) & MASK(16))<<4);
286 
287 	/* minimal frame size, mfs */
288 	MFS40by	= 10<<2,
289 	MFS44by	= 11<<2,
290 	MFS48by	= 12<<2,
291 	MFS52by	= 13<<2,
292 	MFS56by	= 14<<2,
293 	MFS60by	= 15<<2,
294 	MFS64by	= 16<<2,
295 
296 	/* receive descriptor status */
297 	RCSmacerr	= 1<<0,
298 	RCSmacmask	= 3<<1,
299 	RCSmacce	= 0<<1,
300 	RCSmacor	= 1<<1,
301 	RCSmacmf	= 2<<1,
302 	RCSl4chkshift	= 3,
303 	RCSl4chkmask	= MASK(16),
304 	RCSvlan		= 1<<17,
305 	RCSbpdu		= 1<<18,
306 	RCSl4mask	= 3<<21,
307 	RCSl4tcp4	= 0<<21,
308 	RCSl4udp4	= 1<<21,
309 	RCSl4other	= 2<<21,
310 	RCSl4rsvd	= 3<<21,
311 	RCSl2ev2	= 1<<23,
312 	RCSl3ip4	= 1<<24,
313 	RCSip4headok	= 1<<25,
314 	RCSlast		= 1<<26,
315 	RCSfirst	= 1<<27,
316 	RCSunknownaddr	= 1<<28,
317 	RCSenableintr	= 1<<29,
318 	RCSl4chkok	= 1<<30,
319 	RCSdmaown	= 1<<31,
320 
321 	/* transmit descriptor status */
322 	TCSmacerr	= 1<<0,
323 	TCSmacmask	= 3<<1,
324 	TCSmaclc	= 0<<1,
325 	TCSmacur	= 1<<1,
326 	TCSmacrl	= 2<<1,
327 	TCSllc		= 1<<9,
328 	TCSl4chkmode	= 1<<10,
329 	TCSipv4hdlenshift= 11,
330 	TCSvlan		= 1<<15,
331 	TCSl4type	= 1<<16,
332 	TCSgl4chk	= 1<<17,
333 	TCSgip4chk	= 1<<18,
334 	TCSpadding	= 1<<19,
335 	TCSlast		= 1<<20,
336 	TCSfirst	= 1<<21,
337 	TCSenableintr	= 1<<23,
338 	TCSautomode	= 1<<30,
339 	TCSdmaown	= 1<<31,
340 };
341 
342 enum {
343 	/* SMI regs */
344 	PhysmiTimeout	= 10000,	/* what units? in ms. */
345 	Physmidataoff	= 0,		/* Data */
346 	Physmidatamask	= 0xffff<<Physmidataoff,
347 
348 	Physmiaddroff 	= 16,		/* PHY device addr */
349 	Physmiaddrmask	= 0x1f << Physmiaddroff,
350 
351 	Physmiop	= 26,
352 	Physmiopmask	= 3<<Physmiop,
353 	PhysmiopWr	= 0<<Physmiop,
354 	PhysmiopRd	= 1<<Physmiop,
355 
356 	PhysmiReadok	= 1<<27,
357 	PhysmiBusy	= 1<<28,
358 
359 	SmiRegaddroff	= 21,		/* PHY device register addr */
360 	SmiRegaddrmask	= 0x1f << SmiRegaddroff,
361 };
362 
363 struct Gbereg {
364 	ulong	phy;			/* PHY address */
365 	ulong	smi;			/* serial mgmt. interface */
366 	ulong	euda;			/* ether default address */
367 	ulong	eudid;			/* ether default id */
368 	uchar	_pad0[0x80-0x10];
369 
370 	/* dma stuff */
371 	ulong	euirq;			/* interrupt cause */
372 	ulong	euirqmask;		/* interrupt mask */
373 	uchar	_pad1[0x94-0x88];
374 	ulong	euea;			/* error address */
375 	ulong	euiae;			/* internal error address */
376 	uchar	_pad2[0xb0-0x9c];
377 	ulong	euc;			/* control */
378 	uchar	_pad3[0x200-0xb4];
379 	struct {
380 		ulong	base;		/* window base */
381 		ulong	size;		/* window size */
382 	} base[6];
383 	uchar	_pad4[0x280-0x230];
384 	ulong	harr[4];		/* high address remap */
385 	ulong	bare;			/* base address enable */
386 	ulong	epap;			/* port access protect */
387 	uchar	_pad5[0x400-0x298];
388 
389 	ulong	portcfg;		/* port configuration */
390 	ulong	portcfgx;		/* port config. extend */
391 	ulong	mii;			/* mii serial parameters */
392 	ulong	_pad6;
393 	ulong	evlane;			/* vlan ether type */
394 	ulong	macal;			/* mac address low */
395 	ulong	macah;			/* mac address high */
396 	ulong	sdc;			/* sdma config. */
397 	ulong	dscp[7];		/* ip diff. serv. code point -> pri */
398 	ulong	psc0;			/* port serial control 0 */
399 	ulong	vpt2p;			/* vlan priority tag -> pri */
400 	ulong	ps0;			/* ether port status 0 */
401 	ulong	tqc;			/* transmit queue command */
402 	ulong	psc1;			/* port serial control 1 */
403 	ulong	ps1;			/* ether port status 1 */
404 	ulong	mvhdr;			/* marvell header */
405 	ulong	_pad8[2];
406 
407 	/* interrupts */
408 	ulong	irq;			/* interrupt cause; some rw0c bits */
409 	ulong	irqe;			/* " " extended; some rw0c bits */
410 	ulong	irqmask;		/* interrupt mask (actually enable) */
411 	ulong	irqemask;		/* " " extended */
412 
413 	ulong	_pad9;
414 	ulong	pxtfut;			/* port tx fifo urgent threshold */
415 	ulong	_pad10;
416 	ulong	pxmfs;			/* port rx minimum frame size */
417 	ulong	_pad11;
418 
419 	/*
420 	 * # of input frames discarded by addr filtering or lack of resources;
421 	 * zeroed upon read.
422 	 */
423 	ulong	pxdfc;			/* port rx discard frame counter */
424 	ulong	pxofc;			/* port overrun frame counter */
425 	ulong	_pad12[2];
426 	ulong	piae;			/* port internal address error */
427 	uchar	_pad13[0x4bc-0x498];
428 	ulong	etherprio;		/* ether type priority */
429 	uchar	_pad14[0x4dc-0x4c0];
430 	ulong	tqfpc;			/* tx queue fixed priority config. */
431 	ulong	pttbrc;			/* port tx token-bucket rate config. */
432 	ulong	tqc1;			/* tx queue command 1 */
433 	ulong	pmtu;			/* port maximum transmit unit */
434 	ulong	pmtbs;			/* port maximum token bucket size */
435 	uchar	_pad15[0x600-0x4f0];
436 
437 	struct {
438 		ulong	_pad[3];
439 		ulong	r;		/* phys. addr.: cur. rx desc. ptrs */
440 	} crdp[8];
441 	ulong	rqc;			/* rx queue command */
442 	ulong	tcsdp;			/* phys. addr.: cur. tx desc. ptr */
443 	uchar	_pad16[0x6c0-0x688];
444 
445 	ulong	tcqdp[8];		/* phys. addr.: cur. tx q. desc. ptr */
446 	uchar	_pad17[0x700-0x6e0];
447 
448 	struct {
449 		ulong	tbctr;		/* queue tx token-bucket counter */
450 		ulong	tbcfg;		/* tx queue token-bucket config. */
451 		ulong	acfg;		/* tx queue arbiter config. */
452 		ulong	_pad;
453 	} tq[8];
454 	ulong	pttbc;			/* port tx token-bucket counter */
455 	uchar	_pad18[0x7a8-0x784];
456 
457 	ulong	ipg2;			/* tx queue ipg */
458 	ulong	_pad19[3];
459 	ulong	ipg3;
460 	ulong	_pad20;
461 	ulong	htlp;			/* high token in low packet */
462 	ulong	htap;			/* high token in async packet */
463 	ulong	ltap;			/* low token in async packet */
464 	ulong	_pad21;
465 	ulong	ts;			/* tx speed */
466 	uchar	_pad22[0x1000-0x7d4];
467 
468 	/* mac mib counters: statistics */
469 	Mibstats;
470 	uchar	_pad23[0x1400-0x1080];
471 
472 	/* multicast filtering; each byte: Qno<<1 | Pass */
473 	ulong	dfsmt[64];	/* dest addr filter special m'cast table */
474 	ulong	dfomt[64];	/* dest addr filter other m'cast table */
475 	/* unicast filtering */
476 	ulong	dfut[4];		/* dest addr filter unicast table */
477 };
478 
479 static Ctlr *ctlrs[MaxEther];
480 static uchar zeroea[Eaddrlen];
481 
482 static void getmibstats(Ctlr *);
483 
484 static void
rxfreeb(Block * b)485 rxfreeb(Block *b)
486 {
487 	/* freeb(b) will have previously decremented b->ref to 0; raise to 1 */
488 	_xinc(&b->ref);
489 	b->wp = b->rp =
490 		(uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
491 	assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
492 	b->free = rxfreeb;
493 
494 	ilock(&freeblocks);
495 	b->next = freeblocks.head;
496 	freeblocks.head = b;
497 	iunlock(&freeblocks);
498 }
499 
500 static Block *
rxallocb(void)501 rxallocb(void)
502 {
503 	Block *b;
504 
505 	ilock(&freeblocks);
506 	b = freeblocks.head;
507 	if(b != nil) {
508 		freeblocks.head = b->next;
509 		b->next = nil;
510 		b->free = rxfreeb;
511 	}
512 	iunlock(&freeblocks);
513 	return b;
514 }
515 
516 static void
rxkick(Ctlr * ctlr)517 rxkick(Ctlr *ctlr)
518 {
519 	Gbereg *reg = ctlr->reg;
520 
521 	if (reg->crdp[Qno].r == 0)
522 		reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
523 	if ((reg->rqc & 0xff) == 0)		/* all queues are stopped? */
524 		reg->rqc = Rxqon(Qno);		/* restart */
525 	coherence();
526 }
527 
528 static void
txkick(Ctlr * ctlr)529 txkick(Ctlr *ctlr)
530 {
531 	Gbereg *reg = ctlr->reg;
532 
533 	if (reg->tcqdp[Qno] == 0)
534 		reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
535 	if ((reg->tqc & 0xff) == 0)		/* all q's stopped? */
536 		reg->tqc = Txqon(Qno);		/* restart */
537 	coherence();
538 }
539 
540 static void
rxreplenish(Ctlr * ctlr)541 rxreplenish(Ctlr *ctlr)
542 {
543 	Rx *r;
544 	Block *b;
545 
546 	while(ctlr->rxb[ctlr->rxtail] == nil) {
547 		b = rxallocb();
548 		if(b == nil) {
549 			iprint("#l%d: rxreplenish out of buffers\n",
550 				ctlr->ether->ctlrno);
551 			break;
552 		}
553 
554 		ctlr->rxb[ctlr->rxtail] = b;
555 
556 		/* set up uncached receive descriptor */
557 		r = &ctlr->rx[ctlr->rxtail];
558 		assert(((uintptr)r & (Descralign - 1)) == 0);
559 		r->countsize = ROUNDUP(Rxblklen, 8);
560 		r->buf = PADDR(b->rp);
561 		coherence();
562 
563 		/* and fire */
564 		r->cs = RCSdmaown | RCSenableintr;
565 		coherence();
566 
567 		ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
568 	}
569 }
570 
571 static void
dump(uchar * bp,long max)572 dump(uchar *bp, long max)
573 {
574 	if (max > 64)
575 		max = 64;
576 	for (; max > 0; max--, bp++)
577 		iprint("%02.2ux ", *bp);
578 	print("...\n");
579 }
580 
581 static void
etheractive(Ether * ether)582 etheractive(Ether *ether)
583 {
584 	ether->starttime = TK2MS(MACHP(0)->ticks)/1000;
585 }
586 
587 static void
ethercheck(Ether * ether)588 ethercheck(Ether *ether)
589 {
590 	if (ether->starttime != 0 &&
591 	    TK2MS(MACHP(0)->ticks)/1000 - ether->starttime > Etherstuck) {
592 		etheractive(ether);
593 		if (ether->ctlrno == 0)	/* only complain about main ether */
594 			iprint("#l%d: ethernet stuck\n", ether->ctlrno);
595 	}
596 }
597 
598 static void
receive(Ether * ether)599 receive(Ether *ether)
600 {
601 	int i;
602 	ulong n;
603 	Block *b;
604 	Ctlr *ctlr = ether->ctlr;
605 	Rx *r;
606 
607 	ethercheck(ether);
608 	for (i = Nrx-2; i > 0; i--) {
609 		r = &ctlr->rx[ctlr->rxhead];	/* *r is uncached */
610 		assert(((uintptr)r & (Descralign - 1)) == 0);
611 		if(r->cs & RCSdmaown)		/* descriptor busy? */
612 			break;
613 
614 		b = ctlr->rxb[ctlr->rxhead];	/* got input buffer? */
615 		if (b == nil)
616 			panic("ether1116: nil ctlr->rxb[ctlr->rxhead] "
617 				"in receive");
618 		ctlr->rxb[ctlr->rxhead] = nil;
619 		ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
620 
621 		if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
622 			ctlr->nofirstlast++;	/* partial packet */
623 			freeb(b);
624 			continue;
625 		}
626 		if(r->cs & RCSmacerr) {
627 			freeb(b);
628 			continue;
629 		}
630 
631 		n = r->countsize >> 16;		/* TODO includes 2 pad bytes? */
632 		assert(n >= 2 && n < 2048);
633 
634 		/* clear any cached packet or part thereof */
635 		l2cacheuinvse(b->rp, n+2);
636 		cachedinvse(b->rp, n+2);
637 		b->wp = b->rp + n;
638 		/*
639 		 * skip hardware padding intended to align ipv4 address
640 		 * in memory (mv-s104860-u0 §8.3.4.1)
641 		 */
642 		b->rp += 2;
643 		etheriq(ether, b, 1);
644 		etheractive(ether);
645 		if (i % (Nrx / 2) == 0) {
646 			rxreplenish(ctlr);
647 			rxkick(ctlr);
648 		}
649 	}
650 	rxreplenish(ctlr);
651 	rxkick(ctlr);
652 }
653 
654 static void
txreplenish(Ether * ether)655 txreplenish(Ether *ether)			/* free transmitted packets */
656 {
657 	Ctlr *ctlr;
658 
659 	ctlr = ether->ctlr;
660 	while(ctlr->txtail != ctlr->txhead) {
661 		/* ctlr->tx is uncached */
662 		if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
663 			break;
664 		if(ctlr->txb[ctlr->txtail] == nil)
665 			panic("no block for sent packet?!");
666 		freeb(ctlr->txb[ctlr->txtail]);
667 		ctlr->txb[ctlr->txtail] = nil;
668 
669 		ctlr->txtail = NEXT(ctlr->txtail, Ntx);
670 		etheractive(ether);
671 	}
672 }
673 
674 /*
675  * transmit strategy: fill the output ring as far as possible,
676  * perhaps leaving a few spare; kick off the output and take
677  * an interrupt only when the transmit queue is empty.
678  */
679 static void
transmit(Ether * ether)680 transmit(Ether *ether)
681 {
682 	int i, kick, len;
683 	Block *b;
684 	Ctlr *ctlr = ether->ctlr;
685 	Gbereg *reg = ctlr->reg;
686 	Tx *t;
687 
688 	ethercheck(ether);
689 	ilock(ctlr);
690 	txreplenish(ether);			/* reap old packets */
691 
692 	/* queue new packets; use at most half the tx descs to avoid livelock */
693 	kick = 0;
694 	for (i = Ntx/2 - 2; i > 0; i--) {
695 		t = &ctlr->tx[ctlr->txhead];	/* *t is uncached */
696 		assert(((uintptr)t & (Descralign - 1)) == 0);
697 		if(t->cs & TCSdmaown) {		/* descriptor busy? */
698 			ctlr->txringfull++;
699 			break;
700 		}
701 
702 		b = qget(ether->oq);		/* outgoing packet? */
703 		if (b == nil)
704 			break;
705 		len = BLEN(b);
706 		if(len < ether->minmtu || len > ether->maxmtu) {
707 			freeb(b);
708 			continue;
709 		}
710 		ctlr->txb[ctlr->txhead] = b;
711 
712 		/* make sure the whole packet is in memory */
713 		cachedwbse(b->rp, len);
714 		l2cacheuwbse(b->rp, len);
715 
716 		/* set up the transmit descriptor */
717 		t->buf = PADDR(b->rp);
718 		t->countchk = len << 16;
719 		coherence();
720 
721 		/* and fire */
722 		t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
723 			TCSenableintr;
724 		coherence();
725 
726 		kick++;
727 		ctlr->txhead = NEXT(ctlr->txhead, Ntx);
728 	}
729 	if (kick) {
730 		txkick(ctlr);
731 
732 		reg->irqmask  |= Itxendq(Qno);
733 		reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
734 	}
735 	iunlock(ctlr);
736 }
737 
738 static void
dumprxdescs(Ctlr * ctlr)739 dumprxdescs(Ctlr *ctlr)
740 {
741 	int i;
742 	Gbereg *reg = ctlr->reg;
743 
744 	iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
745 		ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
746 	for (i = 0; i < Nrx; i++) {
747 		iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
748 		delay(50);
749 	}
750 	for (i = 0; i < Nrx; i++) {
751 		iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
752 			i, &ctlr->rx[i], ctlr->rx[i].cs,
753 			ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
754 			ctlr->rx[i].next);
755 		delay(50);
756 	}
757 	delay(1000);
758 }
759 
760 static int
gotinput(void * ctlr)761 gotinput(void* ctlr)
762 {
763 	return ((Ctlr*)ctlr)->haveinput != 0;
764 }
765 
766 /*
767  * process any packets in the input ring.
768  * also sum mib stats frequently to avoid the overflow
769  * mentioned in the errata.
770  */
771 static void
rcvproc(void * arg)772 rcvproc(void* arg)
773 {
774 	Ctlr *ctlr;
775 	Ether *ether;
776 
777 	ether = arg;
778 	ctlr = ether->ctlr;
779 	for(;;){
780 		tsleep(&ctlr->rrendez, gotinput, ctlr, 10*1000);
781 		ilock(ctlr);
782 		getmibstats(ctlr);
783 		if (ctlr->haveinput) {
784 			ctlr->haveinput = 0;
785 			iunlock(ctlr);
786 			receive(ether);
787 		} else
788 			iunlock(ctlr);
789 	}
790 }
791 
792 static void
interrupt(Ureg *,void * arg)793 interrupt(Ureg*, void *arg)
794 {
795 	ulong irq, irqe, handled;
796 	Ether *ether = arg;
797 	Ctlr *ctlr = ether->ctlr;
798 	Gbereg *reg = ctlr->reg;
799 
800 	handled = 0;
801 	irq = reg->irq;
802 	irqe = reg->irqe;
803 	reg->irqe = 0;				/* extinguish intr causes */
804 	reg->irq = 0;				/* extinguish intr causes */
805 	ethercheck(ether);
806 
807 	if(irq & (Irx | Irxbufferq(Qno))) {
808 		/*
809 		 * letting a kproc process the input takes far less real time
810 		 * than doing it all at interrupt level.
811 		 */
812 		ctlr->haveinput = 1;
813 		wakeup(&ctlr->rrendez);
814 		irq &= ~(Irx | Irxbufferq(Qno));
815 		handled++;
816 	} else
817 		rxkick(ctlr);
818 
819 	if(irq & Itxendq(Qno)) {		/* transmit ring empty? */
820 		reg->irqmask  &= ~Itxendq(Qno);	/* prevent more interrupts */
821 		reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
822 		transmit(ether);
823 		irq &= ~Itxendq(Qno);
824 		handled++;
825 	}
826 
827 	if(irqe & IEsum) {
828 		/*
829 		 * IElinkchg appears to only be set when unplugging.
830 		 * autonegotiation is likely not done yet, so linkup not valid,
831 		 * thus we note the link change here, and check for
832 		 * that and autonegotiation done below.
833 		 */
834 		if(irqe & IEphystschg) {
835 			ether->link = (reg->ps0 & PS0linkup) != 0;
836 			ether->linkchg = 1;
837 		}
838 		if(irqe & IEtxerrq(Qno))
839 			ether->oerrs++;
840 		if(irqe & IErxoverrun)
841 			ether->overflows++;
842 		if(irqe & IEtxunderrun)
843 			ctlr->txunderrun++;
844 		if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
845 		    IEtxunderrun))
846 			handled++;
847 	}
848 	if (irq & Isum) {
849 		if (irq & Irxerr) {  /* nil desc. ptr. or desc. owned by cpu */
850 			ether->buffs++;		/* approx. error */
851 
852 			/* if the input ring is full, drain it */
853 			ctlr->haveinput = 1;
854 			wakeup(&ctlr->rrendez);
855 		}
856 		if(irq & (Irxerr | Irxerrq(Qno)))
857 			handled++;
858 		irq  &= ~(Irxerr | Irxerrq(Qno));
859 	}
860 
861 	if(ether->linkchg && (reg->ps1 & PS1an_done)) {
862 		handled++;
863 		ether->link = (reg->ps0 & PS0linkup) != 0;
864 		ether->linkchg = 0;
865 	}
866 	ctlr->newintrs++;
867 
868 	if (!handled) {
869 		irq  &= ~Isum;
870 		irqe &= ~IEtxbufferq(Qno);
871 		if (irq == 0 && irqe == 0) {
872 			/* seems to be triggered by continuous output */
873 			// iprint("ether1116: spurious interrupt\n");
874 		} else
875 			iprint("ether1116: interrupt cause unknown; "
876 				"irq %#lux irqe %#lux\n", irq, irqe);
877 	}
878 	intrclear(Irqlo, ether->irq);
879 }
880 
881 void
promiscuous(void * arg,int on)882 promiscuous(void *arg, int on)
883 {
884 	Ether *ether = arg;
885 	Ctlr *ctlr = ether->ctlr;
886 	Gbereg *reg = ctlr->reg;
887 
888 	ilock(ctlr);
889 	ether->prom = on;
890 	if(on)
891 		reg->portcfg |= PCFGupromisc;
892 	else
893 		reg->portcfg &= ~PCFGupromisc;
894 	iunlock(ctlr);
895 }
896 
897 void
multicast(void *,uchar *,int)898 multicast(void *, uchar *, int)
899 {
900 	/* nothing to do; we always accept multicast */
901 }
902 
903 static void quiesce(Gbereg *reg);
904 
905 static void
shutdown(Ether * ether)906 shutdown(Ether *ether)
907 {
908 	int i;
909 	Ctlr *ctlr = ether->ctlr;
910 	Gbereg *reg = ctlr->reg;
911 
912 	ilock(ctlr);
913 	quiesce(reg);
914 	reg->euc |= Portreset;
915 	coherence();
916 	iunlock(ctlr);
917 	delay(100);
918 	ilock(ctlr);
919 	reg->euc &= ~Portreset;
920 	coherence();
921 	delay(20);
922 
923 	reg->psc0 = 0;			/* no PSC0porton */
924 	reg->psc1 |= PSC1portreset;
925 	coherence();
926 	delay(50);
927 	reg->psc1 &= ~PSC1portreset;
928 	coherence();
929 
930 	for (i = 0; i < nelem(reg->tcqdp); i++)
931 		reg->tcqdp[i] = 0;
932 	for (i = 0; i < nelem(reg->crdp); i++)
933 		reg->crdp[i].r = 0;
934 	coherence();
935 
936 	iunlock(ctlr);
937 }
938 
939 enum {
940 	CMjumbo,
941 };
942 
943 static Cmdtab ctlmsg[] = {
944 	CMjumbo,	"jumbo",	2,
945 };
946 
947 long
ctl(Ether * e,void * p,long n)948 ctl(Ether *e, void *p, long n)
949 {
950 	Cmdbuf *cb;
951 	Cmdtab *ct;
952 	Ctlr *ctlr = e->ctlr;
953 	Gbereg *reg = ctlr->reg;
954 
955 	cb = parsecmd(p, n);
956 	if(waserror()) {
957 		free(cb);
958 		nexterror();
959 	}
960 
961 	ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
962 	switch(ct->index) {
963 	case CMjumbo:
964 		if(strcmp(cb->f[1], "on") == 0) {
965 			/* incoming packet queue doesn't expect jumbo frames */
966 			error("jumbo disabled");
967 			reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
968 				PSC0mru(PSC0mru9022);
969 			e->maxmtu = 9022;
970 		} else if(strcmp(cb->f[1], "off") == 0) {
971 			reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
972 				PSC0mru(PSC0mru1522);
973 			e->maxmtu = ETHERMAXTU;
974 		} else
975 			error(Ebadctl);
976 		break;
977 	default:
978 		error(Ebadctl);
979 		break;
980 	}
981 	free(cb);
982 	poperror();
983 	return n;
984 }
985 
986 /*
987  * phy/mii goo
988  */
989 
990 static int
smibusywait(Gbereg * reg,ulong waitbit)991 smibusywait(Gbereg *reg, ulong waitbit)
992 {
993 	ulong timeout, smi_reg;
994 
995 	timeout = PhysmiTimeout;
996 	/* wait till the SMI is not busy */
997 	do {
998 		/* read smi register */
999 		smi_reg = reg->smi;
1000 		if (timeout-- == 0) {
1001 			MIIDBG("SMI busy timeout\n");
1002 			return -1;
1003 		}
1004 //		delay(1);
1005 	} while (smi_reg & waitbit);
1006 	return 0;
1007 }
1008 
1009 static int
miird(Mii * mii,int pa,int ra)1010 miird(Mii *mii, int pa, int ra)
1011 {
1012 	ulong smi_reg, timeout;
1013 	Gbereg *reg;
1014 
1015 	reg = ((Ctlr*)mii->ctlr)->reg;
1016 
1017 	/* check params */
1018 	if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
1019 	    (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
1020 		return -1;
1021 
1022 	smibusywait(reg, PhysmiBusy);
1023 
1024 	/* fill the phy address and register offset and read opcode */
1025 	reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
1026 	coherence();
1027 
1028 	/* wait til read value is ready */
1029 	timeout = PhysmiTimeout;
1030 	do {
1031 		smi_reg = reg->smi;
1032 		if (timeout-- == 0) {
1033 			MIIDBG("SMI read-valid timeout\n");
1034 			return -1;
1035 		}
1036 	} while (!(smi_reg & PhysmiReadok));
1037 
1038 	/* Wait for the data to update in the SMI register */
1039 	for (timeout = 0; timeout < PhysmiTimeout; timeout++)
1040 		;
1041 	return reg->smi & Physmidatamask;
1042 }
1043 
1044 static int
miiwr(Mii * mii,int pa,int ra,int v)1045 miiwr(Mii *mii, int pa, int ra, int v)
1046 {
1047 	Gbereg *reg;
1048 	ulong smi_reg;
1049 
1050 	reg = ((Ctlr*)mii->ctlr)->reg;
1051 
1052 	/* check params */
1053 	if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
1054 	    ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
1055 		return -1;
1056 
1057 	smibusywait(reg, PhysmiBusy);
1058 
1059 	/* fill the phy address and register offset and read opcode */
1060 	smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
1061 	reg->smi = smi_reg & ~PhysmiopRd;
1062 	coherence();
1063 	return 0;
1064 }
1065 
1066 #define MIIMODEL(idr2)	(((idr2) >> 4) & MASK(6))
1067 
1068 enum {
1069 	Hacknone,
1070 	Hackdual,
1071 
1072 	Ouimarvell	= 0x005043,
1073 
1074 	/* idr2 mii/phy model numbers */
1075 	Phy1000		= 0x00,		/* 88E1000 Gb */
1076 	Phy1011		= 0x02,		/* 88E1011 Gb */
1077 	Phy1000_3	= 0x03,		/* 88E1000 Gb */
1078 	Phy1000s	= 0x04,		/* 88E1000S Gb */
1079 	Phy1000_5	= 0x05,		/* 88E1000 Gb */
1080 	Phy1000_6	= 0x06,		/* 88E1000 Gb */
1081 	Phy3082		= 0x08,		/* 88E3082 10/100 */
1082 	Phy1112		= 0x09,		/* 88E1112 Gb */
1083 	Phy1121r	= 0x0b,		/* says the 1121r manual */
1084 	Phy1149		= 0x0b,		/* 88E1149 Gb */
1085 	Phy1111		= 0x0c,		/* 88E1111 Gb */
1086 	Phy1116		= 0x21,		/* 88E1116 Gb */
1087 	Phy1116r	= 0x24,		/* 88E1116R Gb */
1088 	Phy1118		= 0x22,		/* 88E1118 Gb */
1089 	Phy3016		= 0x26,		/* 88E3016 10/100 */
1090 };
1091 
1092 static int hackflavour;
1093 
1094 /*
1095  * on openrd, ether0's phy has address 8, ether1's is ether0's 24.
1096  * on guruplug, ether0's is phy 0 and ether1's is ether0's phy 1.
1097  */
1098 int
mymii(Mii * mii,int mask)1099 mymii(Mii* mii, int mask)
1100 {
1101 	Ctlr *ctlr;
1102 	MiiPhy *miiphy;
1103 	int bit, ctlrno, oui, model, phyno, r, rmask;
1104 	static int dualport, phyidx;
1105 	static int phynos[NMiiPhy];
1106 
1107 	ctlr = mii->ctlr;
1108 	ctlrno = ctlr->ether->ctlrno;
1109 
1110 	/* first pass: figure out what kind of phy(s) we have. */
1111 	dualport = 0;
1112 	if (ctlrno == 0) {
1113 		for(phyno = 0; phyno < NMiiPhy; phyno++){
1114 			bit = 1<<phyno;
1115 			if(!(mask & bit) || mii->mask & bit)
1116 				continue;
1117 			if(mii->mir(mii, phyno, Bmsr) == -1)
1118 				continue;
1119 			r = mii->mir(mii, phyno, Phyidr1);
1120 			oui = (r & 0x3FFF)<<6;
1121 			r = mii->mir(mii, phyno, Phyidr2);
1122 			oui |= r>>10;
1123 			model = MIIMODEL(r);
1124 			if (oui == 0xfffff && model == 0x3f)
1125 				continue;
1126 			MIIDBG("ctlrno %d phy %d oui %#ux model %#ux\n",
1127 				ctlrno, phyno, oui, model);
1128 			if (oui == Ouimarvell &&
1129 			    (model == Phy1121r || model == Phy1116r))
1130 				++dualport;
1131 			phynos[phyidx++] = phyno;
1132 		}
1133 		hackflavour = dualport == 2 && phyidx == 2? Hackdual: Hacknone;
1134 		MIIDBG("ether1116: %s-port phy\n",
1135 			hackflavour == Hackdual? "dual": "single");
1136 	}
1137 
1138 	/*
1139 	 * Probe through mii for PHYs in mask;
1140 	 * return the mask of those found in the current probe.
1141 	 * If the PHY has not already been probed, update
1142 	 * the Mii information.
1143 	 */
1144 	rmask = 0;
1145 	if (hackflavour == Hackdual && ctlrno < phyidx) {
1146 		/*
1147 		 * openrd, guruplug or the like: use ether0's phys.
1148 		 * this is a nasty hack, but so is the hardware.
1149 		 */
1150 		MIIDBG("ctlrno %d using ctlrno 0's phyno %d\n",
1151 			ctlrno, phynos[ctlrno]);
1152 		ctlr->mii = mii = ctlrs[0]->mii;
1153 		mask = 1 << phynos[ctlrno];
1154 		mii->mask = ~mask;
1155 	}
1156 	for(phyno = 0; phyno < NMiiPhy; phyno++){
1157 		bit = 1<<phyno;
1158 		if(!(mask & bit))
1159 			continue;
1160 		if(mii->mask & bit){
1161 			rmask |= bit;
1162 			continue;
1163 		}
1164 		if(mii->mir(mii, phyno, Bmsr) == -1)
1165 			continue;
1166 		r = mii->mir(mii, phyno, Phyidr1);
1167 		oui = (r & 0x3FFF)<<6;
1168 		r = mii->mir(mii, phyno, Phyidr2);
1169 		oui |= r>>10;
1170 		if(oui == 0xFFFFF || oui == 0)
1171 			continue;
1172 
1173 		if((miiphy = malloc(sizeof(MiiPhy))) == nil)
1174 			continue;
1175 		miiphy->mii = mii;
1176 		miiphy->oui = oui;
1177 		miiphy->phyno = phyno;
1178 
1179 		miiphy->anar = ~0;
1180 		miiphy->fc = ~0;
1181 		miiphy->mscr = ~0;
1182 
1183 		mii->phy[phyno] = miiphy;
1184 		if(ctlrno == 0 || hackflavour != Hackdual && mii->curphy == nil)
1185 			mii->curphy = miiphy;
1186 		mii->mask |= bit;
1187 		mii->nphy++;
1188 
1189 		rmask |= bit;
1190 	}
1191 	return rmask;
1192 }
1193 
1194 static int
kirkwoodmii(Ether * ether)1195 kirkwoodmii(Ether *ether)
1196 {
1197 	int i;
1198 	Ctlr *ctlr;
1199 	MiiPhy *phy;
1200 
1201 	MIIDBG("mii\n");
1202 	ctlr = ether->ctlr;
1203 	if((ctlr->mii = malloc(sizeof(Mii))) == nil)
1204 		return -1;
1205 	ctlr->mii->ctlr = ctlr;
1206 	ctlr->mii->mir = miird;
1207 	ctlr->mii->miw = miiwr;
1208 
1209 	if(mymii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
1210 		print("#l%d: ether1116: init mii failure\n", ether->ctlrno);
1211 		free(ctlr->mii);
1212 		ctlr->mii = nil;
1213 		return -1;
1214 	}
1215 
1216 	/* oui 005043 is marvell */
1217 	MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
1218 	// TODO: does this make sense? shouldn't each phy be initialised?
1219 	if((ctlr->ether->ctlrno == 0 || hackflavour != Hackdual) &&
1220 	    miistatus(ctlr->mii) < 0){
1221 		miireset(ctlr->mii);
1222 		MIIDBG("miireset\n");
1223 		if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
1224 			iprint("miiane failed\n");
1225 			return -1;
1226 		}
1227 		MIIDBG("miistatus\n");
1228 		miistatus(ctlr->mii);
1229 		if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
1230 			for(i = 0; ; i++){
1231 				if(i > 600){
1232 					iprint("ether1116: autonegotiation failed\n");
1233 					break;
1234 				}
1235 				if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
1236 					break;
1237 				delay(10);
1238 			}
1239 			if(miistatus(ctlr->mii) < 0)
1240 				iprint("miistatus failed\n");
1241 		}else{
1242 			iprint("ether1116: no link\n");
1243 			phy->speed = 10;	/* simple default */
1244 		}
1245 	}
1246 
1247 	ether->mbps = phy->speed;
1248 	MIIDBG("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
1249 		ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
1250 	MIIDBG("mii done\n");
1251 	return 0;
1252 }
1253 
1254 enum {						/* PHY register pages */
1255 	Pagcopper,
1256 	Pagfiber,
1257 	Pagrgmii,
1258 	Pagled,
1259 	Pagrsvd1,
1260 	Pagvct,
1261 	Pagtest,
1262 	Pagrsvd2,
1263 	Pagfactest,
1264 };
1265 
1266 static void
miiregpage(Mii * mii,ulong dev,ulong page)1267 miiregpage(Mii *mii, ulong dev, ulong page)
1268 {
1269 	miiwr(mii, dev, Eadr, page);
1270 }
1271 
1272 static int
miiphyinit(Mii * mii)1273 miiphyinit(Mii *mii)
1274 {
1275 	ulong dev;
1276 	Ctlr *ctlr;
1277 	Gbereg *reg;
1278 
1279 	ctlr = (Ctlr*)mii->ctlr;
1280 	reg = ctlr->reg;
1281 	dev = reg->phy;
1282 	MIIDBG("phy dev addr %lux\n", dev);
1283 
1284 	/* leds link & activity */
1285 	miiregpage(mii, dev, Pagled);
1286 	/* low 4 bits == 1: on - link, blink - activity, off - no link */
1287 	miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
1288 
1289 	miiregpage(mii, dev, Pagrgmii);
1290 	miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
1291 	/* must now do a software reset, says the manual */
1292 	miireset(ctlr->mii);
1293 
1294 	/* enable RGMII delay on Tx and Rx for CPU port */
1295 	miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
1296 	/* must now do a software reset, says the manual */
1297 	miireset(ctlr->mii);
1298 
1299 	miiregpage(mii, dev, Pagcopper);
1300 	miiwr(mii, dev, Scr,
1301 		(miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
1302 
1303 	return 0;
1304 }
1305 
1306 /*
1307  * initialisation
1308  */
1309 
1310 static void
quiesce(Gbereg * reg)1311 quiesce(Gbereg *reg)
1312 {
1313 	ulong v;
1314 
1315 	v = reg->tqc;
1316 	if (v & 0xFF)
1317 		reg->tqc = v << 8;		/* stop active channels */
1318 	v = reg->rqc;
1319 	if (v & 0xFF)
1320 		reg->rqc = v << 8;		/* stop active channels */
1321 	/* wait for all queues to stop */
1322 	while (reg->tqc & 0xFF || reg->rqc & 0xFF)
1323 		;
1324 }
1325 
1326 static void
p16(uchar * p,ulong v)1327 p16(uchar *p, ulong v)		/* convert big-endian short to bytes */
1328 {
1329 	*p++ = v>>8;
1330 	*p   = v;
1331 }
1332 
1333 static void
p32(uchar * p,ulong v)1334 p32(uchar *p, ulong v)		/* convert big-endian long to bytes */
1335 {
1336 	*p++ = v>>24;
1337 	*p++ = v>>16;
1338 	*p++ = v>>8;
1339 	*p   = v;
1340 }
1341 
1342 /*
1343  * set ether->ea from hw mac address,
1344  * configure unicast filtering to accept it.
1345  */
1346 void
archetheraddr(Ether * ether,Gbereg * reg,int rxqno)1347 archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
1348 {
1349 	uchar *ea;
1350 	ulong nibble, ucreg, tbloff, regoff;
1351 
1352 	ea = ether->ea;
1353 	p32(ea,   reg->macah);
1354 	p16(ea+4, reg->macal);
1355 	if (memcmp(ea, zeroea, sizeof zeroea) == 0 && ether->ctlrno > 0) {
1356 		/* hack: use ctlr[0]'s + ctlrno */
1357 		memmove(ea, ctlrs[0]->ether->ea, Eaddrlen);
1358 		ea[Eaddrlen-1] += ether->ctlrno;
1359 		reg->macah = ea[0] << 24 | ea[1] << 16 | ea[2] << 8 | ea[3];
1360 		reg->macal = ea[4] <<  8 | ea[5];
1361 		coherence();
1362 	}
1363 
1364 	/* accept frames on ea */
1365 	nibble = ea[5] & 0xf;
1366 	tbloff = nibble / 4;
1367 	regoff = nibble % 4;
1368 
1369 	regoff *= 8;
1370 	ucreg = reg->dfut[tbloff] & (0xff << regoff);
1371 	ucreg |= (rxqno << 1 | Pass) << regoff;
1372 	reg->dfut[tbloff] = ucreg;
1373 
1374 	/* accept all multicast too.  set up special & other tables. */
1375 	memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
1376 	memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
1377 	coherence();
1378 }
1379 
1380 static void
cfgdramacc(Gbereg * reg)1381 cfgdramacc(Gbereg *reg)
1382 {
1383 	memset(reg->harr, 0, sizeof reg->harr);
1384 	memset(reg->base, 0, sizeof reg->base);
1385 
1386 	reg->bare = MASK(6) - MASK(2);	/* disable wins 2-5 */
1387 	/* this doesn't make any sense, but it's required */
1388 	reg->epap = 3 << 2 | 3;		/* full access for wins 0 & 1 */
1389 //	reg->epap = 0;		/* no access on access violation for all wins */
1390 	coherence();
1391 
1392 	reg->base[0].base = PHYSDRAM | WINATTR(Attrcs0) | Targdram;
1393 	reg->base[0].size = WINSIZE(256*MB);
1394 	reg->base[1].base = (PHYSDRAM + 256*MB) | WINATTR(Attrcs1) | Targdram;
1395 	reg->base[1].size = WINSIZE(256*MB);
1396 	coherence();
1397 }
1398 
1399 static void
ctlralloc(Ctlr * ctlr)1400 ctlralloc(Ctlr *ctlr)
1401 {
1402 	int i;
1403 	Block *b;
1404 	Rx *r;
1405 	Tx *t;
1406 
1407 	ilock(&freeblocks);
1408 	for(i = 0; i < Nrxblks; i++) {
1409 		b = iallocb(Rxblklen+Bufalign-1);
1410 		if(b == nil) {
1411 			iprint("ether1116: no memory for rx buffers\n");
1412 			break;
1413 		}
1414 		assert(b->ref == 1);
1415 		b->wp = b->rp = (uchar*)
1416 			((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
1417 		assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
1418 		b->free = rxfreeb;
1419 		b->next = freeblocks.head;
1420 		freeblocks.head = b;
1421 	}
1422 	iunlock(&freeblocks);
1423 
1424 	/*
1425 	 * allocate uncached rx ring descriptors because rings are shared
1426 	 * with the ethernet controller and more than one fits in a cache line.
1427 	 */
1428 	ctlr->rx = ucallocalign(Nrx * sizeof(Rx), Descralign, 0);
1429 	if(ctlr->rx == nil)
1430 		panic("ether1116: no memory for rx ring");
1431 	for(i = 0; i < Nrx; i++) {
1432 		r = &ctlr->rx[i];
1433 		assert(((uintptr)r & (Descralign - 1)) == 0);
1434 		r->cs = 0;	/* owned by software until r->buf is non-nil */
1435 		r->buf = 0;
1436 		r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
1437 		ctlr->rxb[i] = nil;
1438 	}
1439 	ctlr->rxtail = ctlr->rxhead = 0;
1440 	rxreplenish(ctlr);
1441 
1442 	/* allocate uncached tx ring descriptors */
1443 	ctlr->tx = ucallocalign(Ntx * sizeof(Tx), Descralign, 0);
1444 	if(ctlr->tx == nil)
1445 		panic("ether1116: no memory for tx ring");
1446 	for(i = 0; i < Ntx; i++) {
1447 		t = &ctlr->tx[i];
1448 		assert(((uintptr)t & (Descralign - 1)) == 0);
1449 		t->cs = 0;
1450 		t->buf = 0;
1451 		t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
1452 		ctlr->txb[i] = nil;
1453 	}
1454 	ctlr->txtail = ctlr->txhead = 0;
1455 }
1456 
1457 static void
ctlrinit(Ether * ether)1458 ctlrinit(Ether *ether)
1459 {
1460 	int i;
1461 	Ctlr *ctlr = ether->ctlr;
1462 	Gbereg *reg = ctlr->reg;
1463 	static char name[KNAMELEN];
1464 	static Ctlr fakectlr;		/* bigger than 4K; keep off the stack */
1465 
1466 	for (i = 0; i < nelem(reg->tcqdp); i++)
1467 		reg->tcqdp[i] = 0;
1468 	for (i = 0; i < nelem(reg->crdp); i++)
1469 		reg->crdp[i].r = 0;
1470 	coherence();
1471 
1472 	cfgdramacc(reg);
1473 	ctlralloc(ctlr);
1474 
1475 	reg->tcqdp[Qno]  = PADDR(&ctlr->tx[ctlr->txhead]);
1476 	reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
1477 	coherence();
1478 
1479 //	dumprxdescs(ctlr);
1480 
1481 	/* clear stats by reading them into fake ctlr */
1482 	getmibstats(&fakectlr);
1483 
1484 	reg->pxmfs = MFS40by;			/* allow runts in */
1485 
1486 	/*
1487 	 * ipg's (inter packet gaps) for interrupt coalescing,
1488 	 * values in units of 64 clock cycles.  A full-sized
1489 	 * packet (1514 bytes) takes just over 12µs to transmit.
1490 	 */
1491 	if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
1492 		panic("rx coalescing value %d too big for short",
1493 			CLOCKFREQ/(Maxrxintrsec*64));
1494 	reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
1495 		SDCrxnobyteswap | SDCtxnobyteswap |
1496 		SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
1497 	reg->pxtfut = 0;	/* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
1498 
1499 	/* allow just these interrupts */
1500 	/* guruplug generates Irxerr interrupts continually */
1501 	reg->irqmask = Isum | Irx | Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
1502 	reg->irqemask = IEsum | IEtxerrq(Qno) | IEphystschg | IErxoverrun |
1503 		IEtxunderrun;
1504 
1505 	reg->irqe = 0;
1506 	reg->euirqmask = 0;
1507 	coherence();
1508 	reg->irq = 0;
1509 	reg->euirq = 0;
1510 	/* send errors to end of memory */
1511 //	reg->euda = PHYSDRAM + 512*MB - 8*1024;
1512 	reg->euda = 0;
1513 	reg->eudid = Attrcs1 << 4 | Targdram;
1514 
1515 //	archetheraddr(ether, ctlr->reg, Qno);	/* 2nd location */
1516 
1517 	reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
1518 	reg->portcfgx = 0;
1519 	coherence();
1520 
1521 	/*
1522 	 * start the controller running.
1523 	 * turn the port on, kick the receiver.
1524 	 */
1525 
1526 	reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
1527 	/* do this only when the controller is quiescent */
1528 	reg->psc0 = PSC0porton | PSC0an_flctloff |
1529 		PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
1530 	coherence();
1531 	for (i = 0; i < 4000; i++)		/* magic delay */
1532 		;
1533 
1534 	ether->link = (reg->ps0 & PS0linkup) != 0;
1535 
1536 	/* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
1537 	reg->pmtu = 0;
1538 	etheractive(ether);
1539 
1540 	snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
1541 	kproc(name, rcvproc, ether);
1542 
1543 	reg->rqc = Rxqon(Qno);
1544 	coherence();
1545 }
1546 
1547 static void
attach(Ether * ether)1548 attach(Ether* ether)
1549 {
1550 	Ctlr *ctlr = ether->ctlr;
1551 
1552 	lock(&ctlr->initlock);
1553 	if(ctlr->init == 0) {
1554 		ctlrinit(ether);
1555 		ctlr->init = 1;
1556 	}
1557 	unlock(&ctlr->initlock);
1558 }
1559 
1560 /*
1561  * statistics goo.
1562  * mib registers clear on read.
1563  */
1564 
1565 static void
getmibstats(Ctlr * ctlr)1566 getmibstats(Ctlr *ctlr)
1567 {
1568 	Gbereg *reg = ctlr->reg;
1569 
1570 	/*
1571 	 * Marvell 88f6281 errata FE-ETH-120: high long of rxby and txby
1572 	 * can't be read correctly, so read the low long frequently
1573 	 * (every 30 seconds or less), thus avoiding overflow into high long.
1574 	 */
1575 	ctlr->rxby	+= reg->rxbylo;
1576 	ctlr->txby	+= reg->txbylo;
1577 
1578 	ctlr->badrxby	+= reg->badrxby;
1579 	ctlr->mactxerr	+= reg->mactxerr;
1580 	ctlr->rxpkt	+= reg->rxpkt;
1581 	ctlr->badrxpkt	+= reg->badrxpkt;
1582 	ctlr->rxbcastpkt+= reg->rxbcastpkt;
1583 	ctlr->rxmcastpkt+= reg->rxmcastpkt;
1584 	ctlr->rx64	+= reg->rx64;
1585 	ctlr->rx65_127	+= reg->rx65_127;
1586 	ctlr->rx128_255	+= reg->rx128_255;
1587 	ctlr->rx256_511	+= reg->rx256_511;
1588 	ctlr->rx512_1023+= reg->rx512_1023;
1589 	ctlr->rx1024_max+= reg->rx1024_max;
1590 	ctlr->txpkt	+= reg->txpkt;
1591 	ctlr->txcollpktdrop+= reg->txcollpktdrop;
1592 	ctlr->txmcastpkt+= reg->txmcastpkt;
1593 	ctlr->txbcastpkt+= reg->txbcastpkt;
1594 	ctlr->badmacctlpkts+= reg->badmacctlpkts;
1595 	ctlr->txflctl	+= reg->txflctl;
1596 	ctlr->rxflctl	+= reg->rxflctl;
1597 	ctlr->badrxflctl+= reg->badrxflctl;
1598 	ctlr->rxundersized+= reg->rxundersized;
1599 	ctlr->rxfrags	+= reg->rxfrags;
1600 	ctlr->rxtoobig	+= reg->rxtoobig;
1601 	ctlr->rxjabber	+= reg->rxjabber;
1602 	ctlr->rxerr	+= reg->rxerr;
1603 	ctlr->crcerr	+= reg->crcerr;
1604 	ctlr->collisions+= reg->collisions;
1605 	ctlr->latecoll	+= reg->latecoll;
1606 }
1607 
1608 long
ifstat(Ether * ether,void * a,long n,ulong off)1609 ifstat(Ether *ether, void *a, long n, ulong off)
1610 {
1611 	Ctlr *ctlr = ether->ctlr;
1612 	Gbereg *reg = ctlr->reg;
1613 	char *buf, *p, *e;
1614 
1615 	buf = p = malloc(READSTR);
1616 	if(p == nil)
1617 		panic("ether1116 ifstat: no memory");
1618 	e = p + READSTR;
1619 
1620 	ilock(ctlr);
1621 	getmibstats(ctlr);
1622 
1623 	ctlr->intrs += ctlr->newintrs;
1624 	p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
1625 	p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
1626 	ctlr->newintrs = 0;
1627 	p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
1628 	p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
1629 
1630 	ctlr->rxdiscard += reg->pxdfc;
1631 	ctlr->rxoverrun += reg->pxofc;
1632 	p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
1633 	p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
1634 	p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
1635 
1636 	p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
1637 	p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
1638 	/* p = seprint(p, e, "speed: %d mbps\n", ); */
1639 
1640 	p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
1641 	p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
1642 	p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
1643 	p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
1644 	p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
1645 	p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
1646 	p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
1647 	p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
1648 	p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
1649 	p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
1650 	p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
1651 	p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
1652 	p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
1653 	p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
1654 	p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
1655 	p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
1656 	p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
1657 	p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
1658 	p = seprint(p, e, "misaligned buffers: %lud\n", ether->pktsmisaligned);
1659 
1660 	p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
1661 	p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
1662 	p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
1663 	p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
1664 	p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
1665 	p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
1666 	p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
1667 	p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
1668 	p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
1669 	p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
1670 	p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
1671 	p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
1672 	USED(p);
1673 	iunlock(ctlr);
1674 
1675 	n = readstr(off, a, n, buf);
1676 	free(buf);
1677 	return n;
1678 }
1679 
1680 
1681 static int
reset(Ether * ether)1682 reset(Ether *ether)
1683 {
1684 	Ctlr *ctlr;
1685 
1686 	ether->ctlr = ctlr = malloc(sizeof *ctlr);
1687 	if (ctlr == nil)
1688 		panic("ether1116 reset: no memory");
1689 	switch(ether->ctlrno) {
1690 	case 0:
1691 		ether->irq = IRQ0gbe0sum;
1692 		break;
1693 	case 1:
1694 		ether->irq = IRQ0gbe1sum;
1695 		break;
1696 	default:
1697 		panic("ether1116: bad ether ctlr #%d", ether->ctlrno);
1698 	}
1699 	ctlr->reg = (Gbereg*)soc.ether[ether->ctlrno];
1700 
1701 	/* need this for guruplug, at least */
1702 	*(ulong *)soc.iocfg |= 1 << 7 | 1 << 15;	/* io cfg 0: 1.8v gbe */
1703 	coherence();
1704 
1705 	ctlr->ether = ether;
1706 	ctlrs[ether->ctlrno] = ctlr;
1707 
1708 	shutdown(ether);
1709 	/* ensure that both interfaces are set to RGMII before calling mii */
1710 	((Gbereg*)soc.ether[0])->psc1 |= PSC1rgmii;
1711 	((Gbereg*)soc.ether[1])->psc1 |= PSC1rgmii;
1712 	coherence();
1713 
1714 	/* Set phy address of the port */
1715 	ctlr->port = ether->ctlrno;
1716 	ctlr->reg->phy = ether->ctlrno;
1717 	coherence();
1718 	ether->port = (uintptr)ctlr->reg;
1719 
1720 	if(kirkwoodmii(ether) < 0){
1721 		free(ctlr);
1722 		ether->ctlr = nil;
1723 		return -1;
1724 	}
1725 	miiphyinit(ctlr->mii);
1726 	archetheraddr(ether, ctlr->reg, Qno);	/* original location */
1727 	if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
1728 iprint("ether1116: reset: zero ether->ea\n");
1729 		free(ctlr);
1730 		ether->ctlr = nil;
1731 		return -1;			/* no rj45 for this ether */
1732 	}
1733 
1734 	ether->attach = attach;
1735 	ether->transmit = transmit;
1736 	ether->interrupt = interrupt;
1737 	ether->ifstat = ifstat;
1738 	ether->shutdown = shutdown;
1739 	ether->ctl = ctl;
1740 
1741 	ether->arg = ether;
1742 	ether->promiscuous = promiscuous;
1743 	ether->multicast = multicast;
1744 	return 0;
1745 }
1746 
1747 void
ether1116link(void)1748 ether1116link(void)
1749 {
1750 	addethercard("88e1116", reset);
1751 }
1752