xref: /openbsd-src/sys/arch/octeon/dev/if_cnmac.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_cnmac.c,v 1.14 2014/05/10 22:25:16 jasper Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include "bpfilter.h"
29 
30 /*
31  * XXXSEIL
32  * If no free send buffer is available, free all the sent buffer and bail out.
33  */
34 #define OCTEON_ETH_SEND_QUEUE_CHECK
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/mbuf.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/conf.h>
49 #include <sys/stdint.h> /* uintptr_t */
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #ifdef MBUF_TIMESTAMP
53 #include <sys/time.h>
54 #endif
55 
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <net/route.h>
62 
63 #if NBPFILTER > 0
64 #include <net/bpf.h>
65 #endif
66 
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73 #include <machine/endian.h>
74 #include <machine/octeonvar.h>
75 #include <machine/octeon_model.h>
76 
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 
80 #include <octeon/dev/cn30xxasxreg.h>
81 #include <octeon/dev/cn30xxciureg.h>
82 #include <octeon/dev/cn30xxnpireg.h>
83 #include <octeon/dev/cn30xxgmxreg.h>
84 #include <octeon/dev/cn30xxipdreg.h>
85 #include <octeon/dev/cn30xxpipreg.h>
86 #include <octeon/dev/cn30xxpowreg.h>
87 #include <octeon/dev/cn30xxfaureg.h>
88 #include <octeon/dev/cn30xxfpareg.h>
89 #include <octeon/dev/cn30xxbootbusreg.h>
90 #include <octeon/dev/cn30xxfpavar.h>
91 #include <octeon/dev/cn30xxgmxvar.h>
92 #include <octeon/dev/cn30xxfauvar.h>
93 #include <octeon/dev/cn30xxpowvar.h>
94 #include <octeon/dev/cn30xxipdvar.h>
95 #include <octeon/dev/cn30xxpipvar.h>
96 #include <octeon/dev/cn30xxpkovar.h>
97 #include <octeon/dev/cn30xxasxvar.h>
98 #include <octeon/dev/cn30xxsmivar.h>
99 #include <octeon/dev/iobusvar.h>
100 #include <octeon/dev/if_cnmacvar.h>
101 
102 #ifdef OCTEON_ETH_DEBUG
103 #define	OCTEON_ETH_KASSERT(x)	KASSERT(x)
104 #define	OCTEON_ETH_KDASSERT(x)	KDASSERT(x)
105 #else
106 #define	OCTEON_ETH_KASSERT(x)
107 #define	OCTEON_ETH_KDASSERT(x)
108 #endif
109 
110 /*
111  * Set the PKO to think command buffers are an odd length.  This makes it so we
112  * never have to divide a comamnd across two buffers.
113  */
114 #define OCTEON_POOL_NWORDS_CMD	\
115 	    (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
116 #define FPA_COMMAND_BUFFER_POOL_NWORDS	OCTEON_POOL_NWORDS_CMD	/* XXX */
117 
118 #if NBPFILTER > 0
119 #define	OCTEON_ETH_TAP(ifp, m, dir) \
120 	do { \
121 		/* Pass this up to any BPF listeners. */ \
122 		if ((ifp)->if_bpf) \
123 			bpf_mtap((ifp)->if_bpf, (m), (dir)); \
124 	} while (0/* CONSTCOND */)
125 #else
126 #define	OCTEON_ETH_TAP(ifp, m, dir)
127 #endif /* NBPFILTER > 0 */
128 
129 static void		octeon_eth_buf_init(struct octeon_eth_softc *);
130 
131 static int	octeon_eth_match(struct device *, void *, void *);
132 static void	octeon_eth_attach(struct device *, struct device *, void *);
133 static void	octeon_eth_pip_init(struct octeon_eth_softc *);
134 static void	octeon_eth_ipd_init(struct octeon_eth_softc *);
135 static void	octeon_eth_pko_init(struct octeon_eth_softc *);
136 static void	octeon_eth_asx_init(struct octeon_eth_softc *);
137 static void	octeon_eth_smi_init(struct octeon_eth_softc *);
138 
139 static void	octeon_eth_board_mac_addr(uint8_t *);
140 
141 static int	octeon_eth_mii_readreg(struct device *, int, int);
142 static void	octeon_eth_mii_writereg(struct device *, int, int, int);
143 static void	octeon_eth_mii_statchg(struct device *);
144 
145 static int	octeon_eth_mediainit(struct octeon_eth_softc *);
146 static void	octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
147 static int	octeon_eth_mediachange(struct ifnet *);
148 
149 static void	octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
150 static void	octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
151 static void	octeon_eth_send_queue_flush(struct octeon_eth_softc *);
152 static void	octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
153 static int	octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
154 static void	octeon_eth_send_queue_add(struct octeon_eth_softc *,
155 			    struct mbuf *, uint64_t *);
156 static void	octeon_eth_send_queue_del(struct octeon_eth_softc *,
157 			    struct mbuf **, uint64_t **);
158 static int	octeon_eth_buf_free_work(struct octeon_eth_softc *,
159 			    uint64_t *, uint64_t);
160 static void	octeon_eth_buf_ext_free_m(caddr_t, u_int, void *);
161 static void	octeon_eth_buf_ext_free_ext(caddr_t, u_int, void *);
162 
163 static int	octeon_eth_ioctl(struct ifnet *, u_long, caddr_t);
164 static void	octeon_eth_watchdog(struct ifnet *);
165 static int	octeon_eth_init(struct ifnet *);
166 static int	octeon_eth_stop(struct ifnet *, int);
167 static void	octeon_eth_start(struct ifnet *);
168 
169 static int	octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
170 			    uint64_t);
171 static uint64_t	octeon_eth_send_makecmd_w1(int, paddr_t);
172 static uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
173 			    int);
174 static int	octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
175 			    struct mbuf *, uint64_t *, int *);
176 static int	octeon_eth_send_makecmd(struct octeon_eth_softc *,
177 			    struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
178 static int	octeon_eth_send_buf(struct octeon_eth_softc *,
179 			    struct mbuf *, uint64_t *);
180 static int	octeon_eth_send(struct octeon_eth_softc *,
181 			    struct mbuf *);
182 
183 static int	octeon_eth_reset(struct octeon_eth_softc *);
184 static int	octeon_eth_configure(struct octeon_eth_softc *);
185 static int	octeon_eth_configure_common(struct octeon_eth_softc *);
186 
187 static void	octeon_eth_tick_free(void *arg);
188 static void	octeon_eth_tick_misc(void *);
189 
190 static int	octeon_eth_recv_mbuf(struct octeon_eth_softc *,
191 			    uint64_t *, struct mbuf **);
192 static int	octeon_eth_recv_check_code(struct octeon_eth_softc *,
193 			    uint64_t);
194 #if 0 /* not used */
195 static int      octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
196 			    uint64_t);
197 #endif
198 static int	octeon_eth_recv_check_link(struct octeon_eth_softc *,
199 			    uint64_t);
200 static int	octeon_eth_recv_check(struct octeon_eth_softc *,
201 			    uint64_t);
202 static int	octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
203 static void		octeon_eth_recv_intr(void *, uint64_t *);
204 
205 /* device driver context */
206 static struct	octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
207 static void	*octeon_eth_pow_recv_ih;
208 
209 /* sysctl'able parameters */
210 int		octeon_eth_param_pko_cmd_w0_n2 = 1;
211 int		octeon_eth_param_pip_dyn_rs = 1;
212 int		octeon_eth_param_redir = 0;
213 int		octeon_eth_param_pktbuf = 0;
214 int		octeon_eth_param_rate = 0;
215 int		octeon_eth_param_intr = 0;
216 
217 struct cfattach cnmac_ca = {sizeof(struct octeon_eth_softc),
218     octeon_eth_match, octeon_eth_attach, NULL, NULL};
219 
220 struct cfdriver cnmac_cd = {NULL, "cnmac", DV_IFNET};
221 
222 #ifdef OCTEON_ETH_DEBUG
223 
224 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
225 #define	_ENTRY(name, type, parent, descr) \
226 	OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
227 	_ENTRY(rx,			MISC, NULL, "rx"),
228 	_ENTRY(rxint,			INTR, NULL, "rx intr"),
229 	_ENTRY(rxrs,			MISC, NULL, "rx dynamic short"),
230 	_ENTRY(rxbufpkalloc,		MISC, NULL, "rx buf pkt alloc"),
231 	_ENTRY(rxbufpkput,		MISC, NULL, "rx buf pkt put"),
232 	_ENTRY(rxbufwqalloc,		MISC, NULL, "rx buf wqe alloc"),
233 	_ENTRY(rxbufwqput,		MISC, NULL, "rx buf wqe put"),
234 	_ENTRY(rxerrcode,		MISC, NULL, "rx code error"),
235 	_ENTRY(rxerrfix,		MISC, NULL, "rx fixup error"),
236 	_ENTRY(rxerrjmb,		MISC, NULL, "rx jmb error"),
237 	_ENTRY(rxerrlink,		MISC, NULL, "rx link error"),
238 	_ENTRY(rxerroff,		MISC, NULL, "rx offload error"),
239 	_ENTRY(rxonperrshort,		MISC, NULL, "rx onp fixup short error"),
240 	_ENTRY(rxonperrpreamble,	MISC, NULL, "rx onp fixup preamble error"),
241 	_ENTRY(rxonperrcrc,		MISC, NULL, "rx onp fixup crc error"),
242 	_ENTRY(rxonperraddress,		MISC, NULL, "rx onp fixup address error"),
243 	_ENTRY(rxonponp,		MISC, NULL, "rx onp fixup onp packets"),
244 	_ENTRY(rxonpok,			MISC, NULL, "rx onp fixup success packets"),
245 	_ENTRY(tx,			MISC, NULL, "tx"),
246 	_ENTRY(txadd,			MISC, NULL, "tx add"),
247 	_ENTRY(txbufcballoc,		MISC, NULL, "tx buf cb alloc"),
248 	_ENTRY(txbufcbget,		MISC, NULL, "tx buf cb get"),
249 	_ENTRY(txbufgballoc,		MISC, NULL, "tx buf gb alloc"),
250 	_ENTRY(txbufgbget,		MISC, NULL, "tx buf gb get"),
251 	_ENTRY(txbufgbput,		MISC, NULL, "tx buf gb put"),
252 	_ENTRY(txdel,			MISC, NULL, "tx del"),
253 	_ENTRY(txerr,			MISC, NULL, "tx error"),
254 	_ENTRY(txerrcmd,		MISC, NULL, "tx cmd error"),
255 	_ENTRY(txerrgbuf,		MISC, NULL, "tx gbuf error"),
256 	_ENTRY(txerrlink,		MISC, NULL, "tx link error"),
257 	_ENTRY(txerrmkcmd,		MISC, NULL, "tx makecmd error"),
258 #undef	_ENTRY
259 };
260 #endif
261 
262 /* ---- buffer management */
263 
264 static const struct octeon_eth_pool_param {
265 	int			poolno;
266 	size_t			size;
267 	size_t			nelems;
268 } octeon_eth_pool_params[] = {
269 #define	_ENTRY(x)	{ OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
270 	_ENTRY(PKT),
271 	_ENTRY(WQE),
272 	_ENTRY(CMD),
273 	_ENTRY(SG)
274 #undef	_ENTRY
275 };
276 struct cn30xxfpa_buf	*octeon_eth_pools[8/* XXX */];
277 #define	octeon_eth_fb_pkt	octeon_eth_pools[OCTEON_POOL_NO_PKT]
278 #define	octeon_eth_fb_wqe	octeon_eth_pools[OCTEON_POOL_NO_WQE]
279 #define	octeon_eth_fb_cmd	octeon_eth_pools[OCTEON_POOL_NO_CMD]
280 #define	octeon_eth_fb_sg	octeon_eth_pools[OCTEON_POOL_NO_SG]
281 
282 uint64_t octeon_eth_mac_addr = 0;
283 uint32_t octeon_eth_mac_addr_offset = 0;
284 
285 static void
286 octeon_eth_buf_init(struct octeon_eth_softc *sc)
287 {
288 	static int once;
289 	int i;
290 	const struct octeon_eth_pool_param *pp;
291 	struct cn30xxfpa_buf *fb;
292 
293 	if (once == 1)
294 		return;
295 	once = 1;
296 
297 	for (i = 0; i < (int)nitems(octeon_eth_pool_params); i++) {
298 		pp = &octeon_eth_pool_params[i];
299 		cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
300 		octeon_eth_pools[i] = fb;
301 	}
302 }
303 
304 /* ---- autoconf */
305 
306 static int
307 octeon_eth_match(struct device *parent, void *match, void *aux)
308 {
309 	struct cfdata *cf = (struct cfdata *)match;
310 	struct cn30xxgmx_attach_args *ga = aux;
311 
312 	if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) {
313 		return 0;
314 	}
315 	return 1;
316 }
317 
318 static void
319 octeon_eth_attach(struct device *parent, struct device *self, void *aux)
320 {
321 	struct octeon_eth_softc *sc = (void *)self;
322 	struct cn30xxgmx_attach_args *ga = aux;
323 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
324 	uint8_t enaddr[ETHER_ADDR_LEN];
325 
326 	sc->sc_regt = ga->ga_regt;
327 	sc->sc_dmat = ga->ga_dmat;
328 	sc->sc_port = ga->ga_portno;
329 	sc->sc_port_type = ga->ga_port_type;
330 	sc->sc_gmx = ga->ga_gmx;
331 	sc->sc_gmx_port = ga->ga_gmx_port;
332 	sc->sc_phy_addr = ga->ga_phy_addr;
333 
334 	sc->sc_init_flag = 0;
335 
336 	/*
337 	 * XXX
338 	 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
339 	 */
340 	sc->sc_ip_offset = 0/* XXX */;
341 
342 	octeon_eth_board_mac_addr(enaddr);
343 	printf(", address %s\n", ether_sprintf(enaddr));
344 
345 	octeon_eth_gsc[sc->sc_port] = sc;
346 
347 	SIMPLEQ_INIT(&sc->sc_sendq);
348 	sc->sc_soft_req_thresh = 15/* XXX */;
349 	sc->sc_ext_callback_cnt = 0;
350 
351 	cn30xxgmx_stats_init(sc->sc_gmx_port);
352 
353 	timeout_set(&sc->sc_tick_misc_ch, octeon_eth_tick_misc, sc);
354 	timeout_set(&sc->sc_tick_free_ch, octeon_eth_tick_free, sc);
355 
356 	cn30xxfau_op_init(&sc->sc_fau_done,
357 	    OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
358 	    OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
359 	cn30xxfau_op_set_8(&sc->sc_fau_done, 0);
360 
361 	octeon_eth_pip_init(sc);
362 	octeon_eth_ipd_init(sc);
363 	octeon_eth_pko_init(sc);
364 	octeon_eth_asx_init(sc);
365 	octeon_eth_smi_init(sc);
366 
367 	sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
368 	sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
369 	sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
370 	sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom;
371 
372 	/* XXX */
373 	sc->sc_pow = &cn30xxpow_softc;
374 
375 	octeon_eth_mediainit(sc);
376 
377 	strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
378 	ifp->if_softc = sc;
379 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
380 	ifp->if_ioctl = octeon_eth_ioctl;
381 	ifp->if_start = octeon_eth_start;
382 	ifp->if_watchdog = octeon_eth_watchdog;
383 	IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
384 	IFQ_SET_READY(&ifp->if_snd);
385 
386 	ifp->if_capabilities = IFCAP_VLAN_MTU;
387 
388 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, enaddr);
389 	cn30xxgmx_set_filter(sc->sc_gmx_port);
390 
391 	if_attach(ifp);
392 
393 	memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
394 	ether_ifattach(ifp);
395 
396 	/* XXX */
397 	sc->sc_rate_recv_check_link_cap.tv_sec = 1;
398 	sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
399 	sc->sc_rate_recv_check_code_cap.tv_sec = 1;
400 
401 #if 1
402 	octeon_eth_buf_init(sc);
403 #endif
404 
405 	if (octeon_eth_pow_recv_ih == NULL)
406 		octeon_eth_pow_recv_ih = cn30xxpow_intr_establish(OCTEON_POW_GROUP_PIP,
407 		    IPL_NET, octeon_eth_recv_intr, NULL, NULL, sc->sc_dev.dv_xname);
408 
409 	OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
410 	    sc->sc_dev.dv_xname);
411 }
412 
413 /* ---- submodules */
414 
415 /* XXX */
416 static void
417 octeon_eth_pip_init(struct octeon_eth_softc *sc)
418 {
419 	struct cn30xxpip_attach_args pip_aa;
420 
421 	pip_aa.aa_port = sc->sc_port;
422 	pip_aa.aa_regt = sc->sc_regt;
423 	pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
424 	pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
425 	pip_aa.aa_ip_offset = sc->sc_ip_offset;
426 	cn30xxpip_init(&pip_aa, &sc->sc_pip);
427 }
428 
429 /* XXX */
430 static void
431 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
432 {
433 	struct cn30xxipd_attach_args ipd_aa;
434 
435 	ipd_aa.aa_port = sc->sc_port;
436 	ipd_aa.aa_regt = sc->sc_regt;
437 	ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
438 	ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
439 	cn30xxipd_init(&ipd_aa, &sc->sc_ipd);
440 }
441 
442 /* XXX */
443 static void
444 octeon_eth_pko_init(struct octeon_eth_softc *sc)
445 {
446 	struct cn30xxpko_attach_args pko_aa;
447 
448 	pko_aa.aa_port = sc->sc_port;
449 	pko_aa.aa_regt = sc->sc_regt;
450 	pko_aa.aa_cmdptr = &sc->sc_cmdptr;
451 	pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
452 	pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
453 	cn30xxpko_init(&pko_aa, &sc->sc_pko);
454 }
455 
456 /* XXX */
457 static void
458 octeon_eth_asx_init(struct octeon_eth_softc *sc)
459 {
460 	struct cn30xxasx_attach_args asx_aa;
461 
462 	asx_aa.aa_port = sc->sc_port;
463 	asx_aa.aa_regt = sc->sc_regt;
464 	cn30xxasx_init(&asx_aa, &sc->sc_asx);
465 }
466 
467 static void
468 octeon_eth_smi_init(struct octeon_eth_softc *sc)
469 {
470 	struct cn30xxsmi_attach_args smi_aa;
471 
472 	smi_aa.aa_port = sc->sc_port;
473 	smi_aa.aa_regt = sc->sc_regt;
474 	cn30xxsmi_init(&smi_aa, &sc->sc_smi);
475 	cn30xxsmi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
476 }
477 
478 /* ---- XXX */
479 
480 static void
481 octeon_eth_board_mac_addr(uint8_t *enaddr)
482 {
483 	extern struct boot_info *octeon_boot_info;
484 	int id;
485 
486 	/* Initialize MAC addresses from the global address base. */
487 	if (octeon_eth_mac_addr == 0) {
488 		memcpy((uint8_t *)&octeon_eth_mac_addr + 2,
489 		       octeon_boot_info->mac_addr_base, 6);
490 
491 		/*
492 		 * Should be allowed to fail hard if couldn't read the
493 		 * mac_addr_base address...
494 		 */
495 		if (octeon_eth_mac_addr == 0)
496 			return;
497 
498 		/*
499 		 * Calculate the offset from the mac_addr_base that will be used
500 		 * for the next sc->sc_port.
501 		 */
502 		id = octeon_get_chipid();
503 
504 		switch (octeon_model_family(id)) {
505 		case OCTEON_MODEL_FAMILY_CN56XX:
506 			octeon_eth_mac_addr_offset = 1;
507 			break;
508 		/*
509 		case OCTEON_MODEL_FAMILY_CN52XX:
510 		case OCTEON_MODEL_FAMILY_CN63XX:
511 			octeon_eth_mac_addr_offset = 2;
512 			break;
513 		*/
514 		default:
515 			octeon_eth_mac_addr_offset = 0;
516 			break;
517 		}
518 
519 		enaddr += octeon_eth_mac_addr_offset;
520 	}
521 
522 	/* No more MAC addresses to assign. */
523 	if (octeon_eth_mac_addr_offset >= octeon_boot_info->mac_addr_count)
524 		return;
525 
526 	if (enaddr)
527 		memcpy(enaddr, (uint8_t *)&octeon_eth_mac_addr + 2, 6);
528 
529 	octeon_eth_mac_addr++;
530 	octeon_eth_mac_addr_offset++;
531 }
532 
533 /* ---- media */
534 
535 static int
536 octeon_eth_mii_readreg(struct device *self, int phy_no, int reg)
537 {
538 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
539 	return cn30xxsmi_read(sc->sc_smi, phy_no, reg);
540 }
541 
542 static void
543 octeon_eth_mii_writereg(struct device *self, int phy_no, int reg, int value)
544 {
545 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
546 	cn30xxsmi_write(sc->sc_smi, phy_no, reg, value);
547 }
548 
549 static void
550 octeon_eth_mii_statchg(struct device *self)
551 {
552 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
553 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
554 
555 	cn30xxpko_port_enable(sc->sc_pko, 0);
556 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
557 
558 	octeon_eth_reset(sc);
559 
560 	if (ISSET(ifp->if_flags, IFF_RUNNING))
561 		cn30xxgmx_set_filter(sc->sc_gmx_port);
562 
563 	cn30xxpko_port_enable(sc->sc_pko, 1);
564 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
565 }
566 
567 static int
568 octeon_eth_mediainit(struct octeon_eth_softc *sc)
569 {
570 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
571 	struct mii_softc *child;
572 
573 	sc->sc_mii.mii_ifp = ifp;
574 	sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
575 	sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
576 	sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
577 	ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
578 	    octeon_eth_mediastatus);
579 
580 	mii_attach(&sc->sc_dev, &sc->sc_mii,
581 	    0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
582 
583 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
584 	if (child == NULL) {
585                 /* No PHY attached. */
586 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
587 			    0, NULL);
588 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
589 	} else {
590 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
591 	}
592 
593 	return 0;
594 }
595 
596 static void
597 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
598 {
599 	struct octeon_eth_softc *sc = ifp->if_softc;
600 
601 	mii_pollstat(&sc->sc_mii);
602 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
603 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
604 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
605 	    sc->sc_gmx_port->sc_port_flowflags;
606 }
607 
608 static int
609 octeon_eth_mediachange(struct ifnet *ifp)
610 {
611 	struct octeon_eth_softc *sc = ifp->if_softc;
612 
613 	if ((ifp->if_flags & IFF_UP) == 0)
614 		return 0;
615 
616 	return mii_mediachg(&sc->sc_mii);
617 }
618 
619 /* ---- send buffer garbage collection */
620 
621 static void
622 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
623 {
624 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
625 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
626 	sc->sc_prefetch = 1;
627 }
628 
629 static void
630 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
631 {
632 #ifndef  OCTEON_ETH_DEBUG
633 	if (!sc->sc_prefetch)
634 		return;
635 #endif
636 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
637 	sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
638 	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
639 	sc->sc_prefetch = 0;
640 }
641 
642 static void
643 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
644 {
645 	const int64_t sent_count = sc->sc_hard_done_cnt;
646 	int i;
647 
648 	OCTEON_ETH_KASSERT(sc->sc_flush == 0);
649 	OCTEON_ETH_KASSERT(sent_count <= 0);
650 
651 	for (i = 0; i < 0 - sent_count; i++) {
652 		struct mbuf *m;
653 		uint64_t *gbuf;
654 
655 		octeon_eth_send_queue_del(sc, &m, &gbuf);
656 
657 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, CKSEG0_TO_PHYS(gbuf));
658 		OCTEON_EVCNT_INC(sc, txbufgbput);
659 
660 		m_freem(m);
661 	}
662 
663 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, i);
664 	sc->sc_flush = i;
665 }
666 
667 static void
668 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
669 {
670 	if (sc->sc_flush == 0)
671 		return;
672 
673 	OCTEON_ETH_KASSERT(sc->sc_flush > 0);
674 
675 	/* XXX */
676 	cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
677 	sc->sc_soft_req_cnt -= sc->sc_flush;
678 	OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
679 	/* XXX */
680 
681 	sc->sc_flush = 0;
682 }
683 
684 static int
685 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
686 {
687 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
688 	int64_t nofree_cnt;
689 
690 	nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
691 
692 	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
693 		octeon_eth_send_queue_flush(sc);
694 		OCTEON_EVCNT_INC(sc, txerrgbuf);
695 		octeon_eth_send_queue_flush_sync(sc);
696 		return 1;
697 	}
698 
699 #endif
700 	return 0;
701 }
702 
703 /*
704  * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
705  * buffer.  Other mbuf members may be used by m_freem(), so don't touch them!
706  */
707 
708 struct _send_queue_entry {
709 	union {
710 		struct mbuf _sqe_s_mbuf;
711 		struct {
712 			char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
713 			SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
714 		} _sqe_s_entry;
715 		struct {
716 			char _sqe_s_gbuf_pad[offsetof(struct mbuf, M_dat.MH.MH_pkthdr.rcvif)];
717 			uint64_t *_sqe_s_gbuf_gbuf;
718 		} _sqe_s_gbuf;
719 	} _sqe_u;
720 #define	_sqe_entry	_sqe_u._sqe_s_entry._sqe_s_entry_entry
721 #define	_sqe_gbuf	_sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
722 };
723 
724 static void
725 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
726     uint64_t *gbuf)
727 {
728 	struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
729 
730 	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);
731 
732 	sqe->_sqe_gbuf = gbuf;
733 	SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
734 
735 	if (m->m_ext.ext_free != NULL)
736 		sc->sc_ext_callback_cnt++;
737 
738 	OCTEON_EVCNT_INC(sc, txadd);
739 }
740 
741 static void
742 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
743     uint64_t **rgbuf)
744 {
745 	struct _send_queue_entry *sqe;
746 
747 	sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
748 	OCTEON_ETH_KASSERT(sqe != NULL);
749 	SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
750 
751 	*rm = (void *)sqe;
752 	*rgbuf = sqe->_sqe_gbuf;
753 
754 	if ((*rm)->m_ext.ext_free != NULL) {
755 		sc->sc_ext_callback_cnt--;
756 		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
757 	}
758 
759 	OCTEON_EVCNT_INC(sc, txdel);
760 }
761 
762 static int
763 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
764     uint64_t word2)
765 {
766 	/* XXX when jumbo frame */
767 	if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
768 		paddr_t addr;
769 		paddr_t start_buffer;
770 
771 		addr = CKSEG0_TO_PHYS(work[3] & PIP_WQE_WORD3_ADDR);
772 		start_buffer = addr & ~(2048 - 1);
773 
774 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
775 		OCTEON_EVCNT_INC(sc, rxbufpkput);
776 	}
777 
778 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, CKSEG0_TO_PHYS(work));
779 	OCTEON_EVCNT_INC(sc, rxbufwqput);
780 
781 	return 0;
782 }
783 
784 static void
785 octeon_eth_buf_ext_free_m(caddr_t buf, u_int size, void *arg)
786 {
787 	uint64_t *work = (void *)arg;
788 #ifdef OCTEON_ETH_DEBUG
789 	struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
790 #endif
791 	int s = splnet();
792 
793 	OCTEON_EVCNT_INC(sc, rxrs);
794 
795 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, CKSEG0_TO_PHYS(work));
796 	OCTEON_EVCNT_INC(sc, rxbufwqput);
797 
798 	splx(s);
799 }
800 
801 static void
802 octeon_eth_buf_ext_free_ext(caddr_t buf, u_int size,
803     void *arg)
804 {
805 	uint64_t *work = (void *)arg;
806 #ifdef OCTEON_ETH_DEBUG
807 	struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
808 #endif
809 	int s = splnet();
810 
811 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, CKSEG0_TO_PHYS(work));
812 	OCTEON_EVCNT_INC(sc, rxbufwqput);
813 
814 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_pkt, CKSEG0_TO_PHYS(buf));
815 	OCTEON_EVCNT_INC(sc, rxbufpkput);
816 
817 	splx(s);
818 }
819 
820 /* ---- ifnet interfaces */
821 
822 static int
823 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
824 {
825 	struct octeon_eth_softc *sc = ifp->if_softc;
826 	struct ifaddr *ifa = (struct ifaddr *)data;
827 	struct ifreq *ifr = (struct ifreq *)data;
828 	int s, error = 0;
829 
830 	s = splnet();
831 
832 	switch (cmd) {
833 	case SIOCSIFADDR:
834 		ifp->if_flags |= IFF_UP;
835 		if (!(ifp->if_flags & IFF_RUNNING))
836 			octeon_eth_init(ifp);
837 #ifdef INET
838 		if (ifa->ifa_addr->sa_family == AF_INET)
839 			arp_ifinit(&sc->sc_arpcom, ifa);
840 #endif
841 		break;
842 
843 	case SIOCSIFFLAGS:
844 		if (ifp->if_flags & IFF_UP) {
845 			if (ifp->if_flags & IFF_RUNNING)
846 				error = ENETRESET;
847 			else
848 				octeon_eth_init(ifp);
849 		} else {
850 			if (ifp->if_flags & IFF_RUNNING)
851 				octeon_eth_stop(ifp, 0);
852 		}
853 		break;
854 
855 	case SIOCSIFMEDIA:
856 		/* Flow control requires full-duplex mode. */
857 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
858 		    (ifr->ifr_media & IFM_FDX) == 0) {
859 			ifr->ifr_media &= ~IFM_ETH_FMASK;
860 		}
861 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
862 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
863 				ifr->ifr_media |=
864 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
865 			}
866 			sc->sc_gmx_port->sc_port_flowflags =
867 				ifr->ifr_media & IFM_ETH_FMASK;
868 		}
869 		/* FALLTHROUGH */
870 	case SIOCGIFMEDIA:
871 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
872 		break;
873 
874 	default:
875 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
876 	}
877 
878 	if (error == ENETRESET) {
879 		if (ISSET(ifp->if_flags, IFF_RUNNING))
880 			cn30xxgmx_set_filter(sc->sc_gmx_port);
881 		error = 0;
882 	}
883 
884 	octeon_eth_start(ifp);
885 
886 	splx(s);
887 	return (error);
888 }
889 
890 /* ---- send (output) */
891 
892 static uint64_t
893 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
894 {
895 	return cn30xxpko_cmd_word0(
896 		OCT_FAU_OP_SIZE_64,		/* sz1 */
897 		OCT_FAU_OP_SIZE_64,		/* sz0 */
898 		1, fau1, 1, fau0,		/* s1, reg1, s0, reg0 */
899 		0,				/* le */
900 		octeon_eth_param_pko_cmd_w0_n2,	/* n2 */
901 		1, 0,				/* q, r */
902 		(segs == 1) ? 0 : 1,		/* g */
903 		0, 0, 1,			/* ipoffp1, ii, df */
904 		segs, (int)len);		/* segs, totalbytes */
905 }
906 
907 static uint64_t
908 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
909 {
910 	return cn30xxpko_cmd_word1(
911 		0, 0,				/* i, back */
912 		FPA_GATHER_BUFFER_POOL,		/* pool */
913 		size, addr);			/* size, addr */
914 }
915 
916 /* TODO: use bus_dma(9) */
917 
918 #define KVTOPHYS(addr)	if_cnmac_kvtophys((vaddr_t)(addr))
919 paddr_t if_cnmac_kvtophys(vaddr_t);
920 
921 paddr_t
922 if_cnmac_kvtophys(vaddr_t kva)
923 {
924 	if (IS_XKPHYS(kva))
925 		return XKPHYS_TO_PHYS(kva);
926 	else if (kva >= CKSEG0_BASE && kva < CKSEG0_BASE + CKSEG_SIZE)
927 		return CKSEG0_TO_PHYS(kva);
928 	else if (kva >= CKSEG1_BASE && kva < CKSEG1_BASE + CKSEG_SIZE)
929 		return CKSEG1_TO_PHYS(kva);
930 
931 	printf("kva %lx is not be able to convert physical address\n", kva);
932 	panic("if_cnmac_kvtophys");
933 }
934 
935 static int
936 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
937     uint64_t *gbuf, int *rsegs)
938 {
939 	struct mbuf *m;
940 	int segs = 0;
941 	uint32_t laddr, rlen, nlen;
942 
943 	for (m = m0; m != NULL; m = m->m_next) {
944 
945 		if (__predict_false(m->m_len == 0))
946 			continue;
947 
948 #if 0
949 		OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
950 		   == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
951 #endif
952 
953 		/*
954 		 * aligned 4k
955 		 */
956 		laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
957 
958 		if (laddr + m->m_len > PAGE_SIZE) {
959 			/* XXX */
960 			rlen = PAGE_SIZE - laddr;
961 			nlen = m->m_len - rlen;
962 			*(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
963 			    KVTOPHYS(m->m_data));
964 			segs++;
965 			if (segs > 63) {
966 				return 1;
967 			}
968 			/* XXX */
969 		} else {
970 			rlen = 0;
971 			nlen = m->m_len;
972 		}
973 
974 		*(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
975 		    KVTOPHYS((caddr_t)m->m_data + rlen));
976 		segs++;
977 		if (segs > 63) {
978 			return 1;
979 		}
980 	}
981 
982 	OCTEON_ETH_KASSERT(m == NULL);
983 
984 	*rsegs = segs;
985 
986 	return 0;
987 }
988 
989 static int
990 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
991     uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
992 {
993 	uint64_t pko_cmd_w0, pko_cmd_w1;
994 	int segs;
995 	int result = 0;
996 
997 	if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
998 		log(LOG_WARNING, "%s: large number of transmission"
999 		    " data segments", sc->sc_dev.dv_xname);
1000 		result = 1;
1001 		goto done;
1002 	}
1003 
1004 	/*
1005 	 * segs == 1	-> link mode (single continuous buffer)
1006 	 *		   WORD1[size] is number of bytes pointed by segment
1007 	 *
1008 	 * segs > 1	-> gather mode (scatter-gather buffer)
1009 	 *		   WORD1[size] is number of segments
1010 	 */
1011 	pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
1012 	    0, m->m_pkthdr.len, segs);
1013 	pko_cmd_w1 = octeon_eth_send_makecmd_w1(
1014 	    (segs == 1) ? m->m_pkthdr.len : segs,
1015 	    (segs == 1) ?
1016 		KVTOPHYS(m->m_data) :
1017 		CKSEG0_TO_PHYS(gbuf));
1018 
1019 	*rpko_cmd_w0 = pko_cmd_w0;
1020 	*rpko_cmd_w1 = pko_cmd_w1;
1021 
1022 done:
1023 	return result;
1024 }
1025 
1026 static int
1027 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
1028     uint64_t pko_cmd_w1)
1029 {
1030 	uint64_t *cmdptr;
1031 	int result = 0;
1032 
1033 	cmdptr = (uint64_t *)PHYS_TO_CKSEG0(sc->sc_cmdptr.cmdptr);
1034 	cmdptr += sc->sc_cmdptr.cmdptr_idx;
1035 
1036 	OCTEON_ETH_KASSERT(cmdptr != NULL);
1037 
1038 	*cmdptr++ = pko_cmd_w0;
1039 	*cmdptr++ = pko_cmd_w1;
1040 
1041 	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
1042 
1043 	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
1044 		paddr_t buf;
1045 
1046 		buf = cn30xxfpa_buf_get_paddr(octeon_eth_fb_cmd);
1047 		if (buf == 0) {
1048 			log(LOG_WARNING,
1049 			    "%s: cannot allocate command buffer from free pool allocator\n",
1050 			    sc->sc_dev.dv_xname);
1051 			result = 1;
1052 			goto done;
1053 		}
1054 		OCTEON_EVCNT_INC(sc, txbufcbget);
1055 		*cmdptr++ = buf;
1056 		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1057 		sc->sc_cmdptr.cmdptr_idx = 0;
1058 	} else {
1059 		sc->sc_cmdptr.cmdptr_idx += 2;
1060 	}
1061 
1062 	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
1063 
1064 done:
1065 	return result;
1066 }
1067 
1068 static int
1069 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1070     uint64_t *gbuf)
1071 {
1072 	int result = 0, error;
1073 	uint64_t pko_cmd_w0, pko_cmd_w1;
1074 
1075 	error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1076 	if (error != 0) {
1077 		/* already logging */
1078 		OCTEON_EVCNT_INC(sc, txerrmkcmd);
1079 		result = error;
1080 		goto done;
1081 	}
1082 
1083 	error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
1084 	if (error != 0) {
1085 		/* already logging */
1086 		OCTEON_EVCNT_INC(sc, txerrcmd);
1087 		result = error;
1088 	}
1089 
1090 done:
1091 	return result;
1092 }
1093 
1094 static int
1095 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
1096 {
1097 	paddr_t gaddr = 0;
1098 	uint64_t *gbuf = NULL;
1099 	int result = 0, error;
1100 
1101 	OCTEON_EVCNT_INC(sc, tx);
1102 
1103 	gaddr = cn30xxfpa_buf_get_paddr(octeon_eth_fb_sg);
1104 	if (gaddr == 0) {
1105 		log(LOG_WARNING,
1106 		    "%s: cannot allocate gather buffer from free pool allocator\n",
1107 		    sc->sc_dev.dv_xname);
1108 		OCTEON_EVCNT_INC(sc, txerrgbuf);
1109 		result = 1;
1110 		goto done;
1111 	}
1112 	OCTEON_EVCNT_INC(sc, txbufgbget);
1113 
1114 	gbuf = (uint64_t *)(uintptr_t)PHYS_TO_CKSEG0(gaddr);
1115 
1116 	OCTEON_ETH_KASSERT(gbuf != NULL);
1117 
1118 	error = octeon_eth_send_buf(sc, m, gbuf);
1119 	if (error != 0) {
1120 		/* already logging */
1121 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1122 		OCTEON_EVCNT_INC(sc, txbufgbput);
1123 		result = error;
1124 		goto done;
1125 	}
1126 
1127 	octeon_eth_send_queue_add(sc, m, gbuf);
1128 
1129 done:
1130 	return result;
1131 }
1132 
1133 static void
1134 octeon_eth_start(struct ifnet *ifp)
1135 {
1136 	struct octeon_eth_softc *sc = ifp->if_softc;
1137 	struct mbuf *m;
1138 
1139 	/*
1140 	 * performance tuning
1141 	 * presend iobdma request
1142 	 */
1143 	octeon_eth_send_queue_flush_prefetch(sc);
1144 
1145 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1146 		goto last;
1147 
1148 	/* XXX assume that OCTEON doesn't buffer packets */
1149 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
1150 		/* dequeue and drop them */
1151 		while (1) {
1152 			IFQ_DEQUEUE(&ifp->if_snd, m);
1153 			if (m == NULL)
1154 				break;
1155 #if 0
1156 #ifdef DDB
1157 			m_print(m, "cd", printf);
1158 #endif
1159 			printf("%s: drop\n", sc->sc_dev.dv_xname);
1160 #endif
1161 			m_freem(m);
1162 			IF_DROP(&ifp->if_snd);
1163 			OCTEON_EVCNT_INC(sc, txerrlink);
1164 		}
1165 		goto last;
1166 	}
1167 
1168 	for (;;) {
1169 		IFQ_POLL(&ifp->if_snd, m);
1170 		if (__predict_false(m == NULL))
1171 			break;
1172 
1173 		octeon_eth_send_queue_flush_fetch(sc); /* XXX */
1174 
1175 		/*
1176 		 * XXXSEIL
1177 		 * If no free send buffer is available, free all the sent buffer
1178 		 * and bail out.
1179 		 */
1180 		if (octeon_eth_send_queue_is_full(sc)) {
1181 			return;
1182 		}
1183 		/* XXX */
1184 
1185 		IFQ_DEQUEUE(&ifp->if_snd, m);
1186 
1187 		OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_OUT);
1188 
1189 		/* XXX */
1190 		if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1191 			octeon_eth_send_queue_flush(sc);
1192 		if (octeon_eth_send(sc, m)) {
1193 			ifp->if_oerrors++;
1194 			m_freem(m);
1195 			log(LOG_WARNING,
1196 		  	  "%s: failed to transmit packet\n",
1197 		    	  sc->sc_dev.dv_xname);
1198 			OCTEON_EVCNT_INC(sc, txerr);
1199 		} else {
1200 			sc->sc_soft_req_cnt++;
1201 		}
1202 		if (sc->sc_flush)
1203 			octeon_eth_send_queue_flush_sync(sc);
1204 		/* XXX */
1205 
1206 		/*
1207 		 * send next iobdma request
1208 		 */
1209 		octeon_eth_send_queue_flush_prefetch(sc);
1210 	}
1211 
1212 /*
1213  * XXXSEIL
1214  * Don't schedule send-buffer-free callout every time - those buffers are freed
1215  * by "free tick".  This makes some packets like NFS slower, but it normally
1216  * doesn't happen on SEIL.
1217  */
1218 #ifdef OCTEON_ETH_USENFS
1219 	if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1220 		int timo;
1221 
1222 		/* ??? */
1223 		timo = hz - (100 * sc->sc_ext_callback_cnt);
1224 		if (timo < 10)
1225 			timo = 10;
1226 		callout_schedule(&sc->sc_tick_free_ch, timo);
1227 	}
1228 #endif
1229 
1230 last:
1231 	octeon_eth_send_queue_flush_fetch(sc);
1232 }
1233 
1234 static void
1235 octeon_eth_watchdog(struct ifnet *ifp)
1236 {
1237 	struct octeon_eth_softc *sc = ifp->if_softc;
1238 
1239 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1240 
1241 	octeon_eth_configure(sc);
1242 
1243 	SET(ifp->if_flags, IFF_RUNNING);
1244 	CLR(ifp->if_flags, IFF_OACTIVE);
1245 	ifp->if_timer = 0;
1246 
1247 	octeon_eth_start(ifp);
1248 }
1249 
1250 static int
1251 octeon_eth_init(struct ifnet *ifp)
1252 {
1253 	struct octeon_eth_softc *sc = ifp->if_softc;
1254 
1255 	/* XXX don't disable commonly used parts!!! XXX */
1256 	if (sc->sc_init_flag == 0) {
1257 		/* Cancel any pending I/O. */
1258 		octeon_eth_stop(ifp, 0);
1259 
1260 		/* Initialize the device */
1261 		octeon_eth_configure(sc);
1262 
1263 		cn30xxpko_enable(sc->sc_pko);
1264 		cn30xxipd_enable(sc->sc_ipd);
1265 
1266 		sc->sc_init_flag = 1;
1267 	} else {
1268 		cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1269 	}
1270 	octeon_eth_mediachange(ifp);
1271 
1272 	cn30xxgmx_set_filter(sc->sc_gmx_port);
1273 
1274 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1275 	timeout_add_sec(&sc->sc_tick_free_ch, 1);
1276 
1277 	SET(ifp->if_flags, IFF_RUNNING);
1278 	CLR(ifp->if_flags, IFF_OACTIVE);
1279 
1280 	return 0;
1281 }
1282 
1283 static int
1284 octeon_eth_stop(struct ifnet *ifp, int disable)
1285 {
1286 	struct octeon_eth_softc *sc = ifp->if_softc;
1287 
1288 	timeout_del(&sc->sc_tick_misc_ch);
1289 	timeout_del(&sc->sc_tick_free_ch);
1290 	timeout_del(&sc->sc_resume_ch);
1291 
1292 	mii_down(&sc->sc_mii);
1293 
1294 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1295 
1296 	/* Mark the interface as down and cancel the watchdog timer. */
1297 	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1298 	ifp->if_timer = 0;
1299 
1300 	return 0;
1301 }
1302 
1303 /* ---- misc */
1304 
1305 #define PKO_INDEX_MASK	((1ULL << 12/* XXX */) - 1)
1306 
1307 static int
1308 octeon_eth_reset(struct octeon_eth_softc *sc)
1309 {
1310 	cn30xxgmx_reset_speed(sc->sc_gmx_port);
1311 	cn30xxgmx_reset_flowctl(sc->sc_gmx_port);
1312 	cn30xxgmx_reset_timing(sc->sc_gmx_port);
1313 	cn30xxgmx_reset_board(sc->sc_gmx_port);
1314 
1315 	return 0;
1316 }
1317 
1318 static int
1319 octeon_eth_configure(struct octeon_eth_softc *sc)
1320 {
1321 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1322 
1323 	octeon_eth_reset(sc);
1324 
1325 	octeon_eth_configure_common(sc);
1326 
1327 	cn30xxpko_port_config(sc->sc_pko);
1328 	cn30xxpko_port_enable(sc->sc_pko, 1);
1329 	cn30xxpip_port_config(sc->sc_pip);
1330 
1331 	cn30xxgmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1332 	cn30xxgmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1333 
1334 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1335 
1336 	return 0;
1337 }
1338 
1339 static int
1340 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1341 {
1342 	static int once;
1343 
1344 	if (once == 1)
1345 		return 0;
1346 	once = 1;
1347 
1348 #if 0
1349 	octeon_eth_buf_init(sc);
1350 #endif
1351 
1352 	cn30xxipd_config(sc->sc_ipd);
1353 	cn30xxpko_config(sc->sc_pko);
1354 
1355 	cn30xxpow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1356 
1357 	return 0;
1358 }
1359 
1360 static int
1361 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1362     struct mbuf **rm)
1363 {
1364 	struct mbuf *m;
1365 	void (*ext_free)(caddr_t, u_int, void *);
1366 	void *ext_buf;
1367 	size_t ext_size;
1368 	void *data;
1369 	uint64_t word1 = work[1];
1370 	uint64_t word2 = work[2];
1371 	uint64_t word3 = work[3];
1372 
1373 	MGETHDR(m, M_NOWAIT, MT_DATA);
1374 	if (m == NULL)
1375 		return 1;
1376 	OCTEON_ETH_KASSERT(m != NULL);
1377 
1378 	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1379 		/* Dynamic short */
1380 		ext_free = octeon_eth_buf_ext_free_m;
1381 		ext_buf = &work[4];
1382 		ext_size = 96;
1383 
1384 		data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1385 	} else {
1386 		vaddr_t addr;
1387 		vaddr_t start_buffer;
1388 
1389 		addr = PHYS_TO_CKSEG0(word3 & PIP_WQE_WORD3_ADDR);
1390 		start_buffer = addr & ~(2048 - 1);
1391 
1392 		ext_free = octeon_eth_buf_ext_free_ext;
1393 		ext_buf = (void *)start_buffer;
1394 		ext_size = 2048;
1395 
1396 		data = (void *)addr;
1397 	}
1398 
1399 	/* embed sc pointer into work[0] for _ext_free evcnt */
1400 	work[0] = (uintptr_t)sc;
1401 
1402 	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1403 	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1404 
1405 	m->m_data = data;
1406 	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1407 	m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1408 #if 0
1409 	/*
1410 	 * not readonly buffer
1411 	 */
1412 	m->m_flags |= M_EXT_RW;
1413 #endif
1414 
1415 	*rm = m;
1416 
1417 	OCTEON_ETH_KASSERT(*rm != NULL);
1418 
1419 	return 0;
1420 }
1421 
1422 static int
1423 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1424 {
1425 	uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1426 
1427 	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1428 		return 0;
1429 
1430 	/* this error is harmless */
1431 	if (opecode == PIP_OVER_ERR)
1432 		return 0;
1433 
1434 	return 1;
1435 }
1436 
1437 #if 0 /* not used */
1438 static int
1439 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1440 {
1441 	if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1442 		return 1;
1443 	return 0;
1444 }
1445 #endif
1446 
1447 static int
1448 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1449 {
1450 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port)))
1451 		return 1;
1452 	return 0;
1453 }
1454 
1455 static int
1456 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1457 {
1458 	if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1459 		if (ratecheck(&sc->sc_rate_recv_check_link_last,
1460 		    &sc->sc_rate_recv_check_link_cap))
1461 			log(LOG_DEBUG,
1462 			    "%s: link is not up, the packet was dropped\n",
1463 			    sc->sc_dev.dv_xname);
1464 		OCTEON_EVCNT_INC(sc, rxerrlink);
1465 		return 1;
1466 	}
1467 
1468 #if 0 /* XXX Performance tuning (Jumbo-frame is not supported yet!) */
1469 	if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1470 		/* XXX jumbo frame */
1471 		if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1472 		    &sc->sc_rate_recv_check_jumbo_cap))
1473 			log(LOG_DEBUG,
1474 			    "jumbo frame was received\n");
1475 		OCTEON_EVCNT_INC(sc, rxerrjmb);
1476 		return 1;
1477 	}
1478 #endif
1479 
1480 	if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1481 		if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) == PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1482 			/* no logging */
1483 			/* XXX inclement special error count */
1484 		} else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1485 				PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1486 			/* not an error. it's because of overload */
1487 		}
1488 		else {
1489 			if (ratecheck(&sc->sc_rate_recv_check_code_last,
1490 			    &sc->sc_rate_recv_check_code_cap))
1491 				log(LOG_WARNING,
1492 				    "%s: a reception error occured, "
1493 				    "the packet was dropped (error code = %lld)\n",
1494 				    sc->sc_dev.dv_xname, word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1495 		}
1496 		OCTEON_EVCNT_INC(sc, rxerrcode);
1497 		return 1;
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 static int
1504 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1505 {
1506 	int result = 0;
1507 	struct ifnet *ifp;
1508 	struct mbuf *m;
1509 	uint64_t word2;
1510 
1511 	/* XXX */
1512 	/*
1513  	 * performance tuning
1514 	 * presend iobdma request
1515 	 */
1516 	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1517 		octeon_eth_send_queue_flush_prefetch(sc);
1518 	}
1519 	/* XXX */
1520 
1521 	OCTEON_ETH_KASSERT(sc != NULL);
1522 	OCTEON_ETH_KASSERT(work != NULL);
1523 
1524 	OCTEON_EVCNT_INC(sc, rx);
1525 
1526 	word2 = work[2];
1527 	ifp = &sc->sc_arpcom.ac_if;
1528 
1529 	OCTEON_ETH_KASSERT(ifp != NULL);
1530 
1531 	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1532 		ifp->if_ierrors++;
1533 		result = 1;
1534 		octeon_eth_buf_free_work(sc, work, word2);
1535 		goto drop;
1536 	}
1537 
1538 	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1539 		ifp->if_ierrors++;
1540 		result = 1;
1541 		octeon_eth_buf_free_work(sc, work, word2);
1542 		goto drop;
1543 	}
1544 
1545 	/* work[0] .. work[3] may not be valid any more */
1546 
1547 	OCTEON_ETH_KASSERT(m != NULL);
1548 
1549 	cn30xxipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1550 
1551 	/* XXX */
1552 	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1553 		octeon_eth_send_queue_flush_fetch(sc);
1554 		octeon_eth_send_queue_flush(sc);
1555 	}
1556 	/* XXX */
1557 
1558 	OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_IN);
1559 
1560 	/* XXX */
1561 	if (sc->sc_flush)
1562 		octeon_eth_send_queue_flush_sync(sc);
1563 	/* XXX */
1564 
1565 	ether_input_mbuf(ifp, m);
1566 
1567 	return 0;
1568 
1569 drop:
1570 	/* XXX */
1571 	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1572 		octeon_eth_send_queue_flush_fetch(sc);
1573 	}
1574 	/* XXX */
1575 
1576 	return result;
1577 }
1578 
1579 static void
1580 octeon_eth_recv_intr(void *data, uint64_t *work)
1581 {
1582 	struct octeon_eth_softc *sc;
1583 	int port;
1584 
1585 	OCTEON_ETH_KASSERT(work != NULL);
1586 
1587 	port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1588 
1589 	OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1590 
1591 	sc = octeon_eth_gsc[port];
1592 
1593 	OCTEON_ETH_KASSERT(sc != NULL);
1594 	OCTEON_ETH_KASSERT(port == sc->sc_port);
1595 
1596 	/* XXX process all work queue entries anyway */
1597 
1598 	(void)octeon_eth_recv(sc, work);
1599 }
1600 
1601 /* ---- tick */
1602 
1603 /*
1604  * octeon_eth_tick_free
1605  *
1606  * => garbage collect send gather buffer / mbuf
1607  * => called at softclock
1608  */
1609 static void
1610 octeon_eth_tick_free(void *arg)
1611 {
1612 	struct octeon_eth_softc *sc = arg;
1613 	int timo;
1614 	int s;
1615 
1616 	s = splnet();
1617 	/* XXX */
1618 	if (sc->sc_soft_req_cnt > 0) {
1619 		octeon_eth_send_queue_flush_prefetch(sc);
1620 		octeon_eth_send_queue_flush_fetch(sc);
1621 		octeon_eth_send_queue_flush(sc);
1622 		octeon_eth_send_queue_flush_sync(sc);
1623 	}
1624 	/* XXX */
1625 
1626 	/* XXX ??? */
1627 	timo = hz - (100 * sc->sc_ext_callback_cnt);
1628 	if (timo < 10)
1629 		 timo = 10;
1630 	timeout_add_msec(&sc->sc_tick_free_ch, 1000 * timo / hz);
1631 	/* XXX */
1632 	splx(s);
1633 }
1634 
1635 /*
1636  * octeon_eth_tick_misc
1637  *
1638  * => collect statistics
1639  * => check link status
1640  * => called at softclock
1641  */
1642 static void
1643 octeon_eth_tick_misc(void *arg)
1644 {
1645 	struct octeon_eth_softc *sc = arg;
1646 	struct ifnet *ifp;
1647 	u_quad_t iqdrops, delta;
1648 	int s;
1649 
1650 	s = splnet();
1651 
1652 	ifp = &sc->sc_arpcom.ac_if;
1653 
1654 	iqdrops = ifp->if_iqdrops;
1655 	cn30xxgmx_stats(sc->sc_gmx_port);
1656 #ifdef OCTEON_ETH_DEBUG
1657 	delta = ifp->if_iqdrops - iqdrops;
1658 	printf("%s: %qu packets dropped at GMX FIFO\n",
1659 			ifp->if_xname, delta);
1660 #endif
1661 	cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port);
1662 	delta = ifp->if_iqdrops - iqdrops;
1663 #ifdef OCTEON_ETH_DEBUG
1664 	printf("%s: %qu packets dropped at PIP + GMX FIFO\n",
1665 			ifp->if_xname, delta);
1666 #endif
1667 
1668 	mii_tick(&sc->sc_mii);
1669 
1670 #ifdef OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC
1671 	if (sc->sc_gmx_port->sc_proc_nibble_by_soft &&
1672 	    sc->sc_gmx_port->sc_even_nibble_cnt > PROC_NIBBLE_SOFT_THRESHOLD) {
1673 #ifdef OCTEON_ETH_DEBUG
1674 		log(LOG_DEBUG, "%s: even nibble preamble count %d\n",
1675 		    sc->sc_dev.dv_xname, sc->sc_gmx_port->sc_even_nibble_cnt);
1676 #endif
1677 		if (OCTEON_ETH_FIXUP_ODD_NIBBLE_MODEL_P(sc) &&
1678 		    OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC_SPEED_P(sc->sc_gmx_port, ifp)) {
1679 			log(LOG_NOTICE,
1680 			    "%s: the preamble processing switched to hardware\n",
1681 			    sc->sc_dev.dv_xname);
1682 		}
1683 		sc->sc_gmx_port->sc_proc_nibble_by_soft = 0;
1684 		octeon_eth_mii_statchg((struct device *)sc);
1685 		sc->sc_gmx_port->sc_even_nibble_cnt = 0;
1686 	}
1687 #endif
1688 	splx(s);
1689 
1690 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1691 }
1692