xref: /openbsd-src/sys/arch/octeon/dev/if_cnmac.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: if_cnmac.c,v 1.7 2011/07/03 21:42:11 yasuoka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include "bpfilter.h"
29 
30 /*
31  * XXXSEIL
32  * If no free send buffer is available, free all the sent buffer and bail out.
33  */
34 #define OCTEON_ETH_SEND_QUEUE_CHECK
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/mbuf.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/conf.h>
49 #include <sys/stdint.h> /* uintptr_t */
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #ifdef MBUF_TIMESTAMP
53 #include <sys/time.h>
54 #endif
55 
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <net/route.h>
62 
63 #if NBPFILTER > 0
64 #include <net/bpf.h>
65 #endif
66 
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in_var.h>
70 #include <netinet/ip.h>
71 
72 #include <machine/bus.h>
73 #include <machine/intr.h>
74 #include <machine/endian.h>
75 #include <machine/octeonvar.h>
76 #include <machine/octeon_model.h>
77 
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 
81 #include <octeon/dev/cn30xxasxreg.h>
82 #include <octeon/dev/cn30xxciureg.h>
83 #include <octeon/dev/cn30xxnpireg.h>
84 #include <octeon/dev/cn30xxgmxreg.h>
85 #include <octeon/dev/cn30xxipdreg.h>
86 #include <octeon/dev/cn30xxpipreg.h>
87 #include <octeon/dev/cn30xxpowreg.h>
88 #include <octeon/dev/cn30xxfaureg.h>
89 #include <octeon/dev/cn30xxfpareg.h>
90 #include <octeon/dev/cn30xxbootbusreg.h>
91 #include <octeon/dev/cn30xxfpavar.h>
92 #include <octeon/dev/cn30xxgmxvar.h>
93 #include <octeon/dev/cn30xxfauvar.h>
94 #include <octeon/dev/cn30xxpowvar.h>
95 #include <octeon/dev/cn30xxipdvar.h>
96 #include <octeon/dev/cn30xxpipvar.h>
97 #include <octeon/dev/cn30xxpkovar.h>
98 #include <octeon/dev/cn30xxasxvar.h>
99 #include <octeon/dev/cn30xxsmivar.h>
100 #include <octeon/dev/iobusvar.h>
101 #include <octeon/dev/if_cnmacvar.h>
102 
103 #ifdef OCTEON_ETH_DEBUG
104 #define	OCTEON_ETH_KASSERT(x)	KASSERT(x)
105 #define	OCTEON_ETH_KDASSERT(x)	KDASSERT(x)
106 #else
107 #define	OCTEON_ETH_KASSERT(x)
108 #define	OCTEON_ETH_KDASSERT(x)
109 #endif
110 
111 /*
112  * Set the PKO to think command buffers are an odd length.  This makes it so we
113  * never have to divide a comamnd across two buffers.
114  */
115 #define OCTEON_POOL_NWORDS_CMD	\
116 	    (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
117 #define FPA_COMMAND_BUFFER_POOL_NWORDS	OCTEON_POOL_NWORDS_CMD	/* XXX */
118 
119 #if NBPFILTER > 0
120 #define	OCTEON_ETH_TAP(ifp, m, dir) \
121 	do { \
122 		/* Pass this up to any BPF listeners. */ \
123 		if ((ifp)->if_bpf) \
124 			bpf_mtap((ifp)->if_bpf, (m), (dir)); \
125 	} while (0/* CONSTCOND */)
126 #else
127 #define	OCTEON_ETH_TAP(ifp, m, dir)
128 #endif /* NBPFILTER > 0 */
129 
130 static void		octeon_eth_buf_init(struct octeon_eth_softc *);
131 
132 static int	octeon_eth_match(struct device *, void *, void *);
133 static void	octeon_eth_attach(struct device *, struct device *, void *);
134 static void	octeon_eth_pip_init(struct octeon_eth_softc *);
135 static void	octeon_eth_ipd_init(struct octeon_eth_softc *);
136 static void	octeon_eth_pko_init(struct octeon_eth_softc *);
137 static void	octeon_eth_asx_init(struct octeon_eth_softc *);
138 static void	octeon_eth_smi_init(struct octeon_eth_softc *);
139 
140 static void	octeon_eth_board_mac_addr(uint8_t *, size_t, int);
141 
142 static int	octeon_eth_mii_readreg(struct device *, int, int);
143 static void	octeon_eth_mii_writereg(struct device *, int, int, int);
144 static void	octeon_eth_mii_statchg(struct device *);
145 
146 static int	octeon_eth_mediainit(struct octeon_eth_softc *);
147 static void	octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
148 static int	octeon_eth_mediachange(struct ifnet *);
149 
150 static void	octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
151 static void	octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
152 static void	octeon_eth_send_queue_flush(struct octeon_eth_softc *);
153 static void	octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *);
154 static int	octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
155 static void	octeon_eth_send_queue_add(struct octeon_eth_softc *,
156 			    struct mbuf *, uint64_t *);
157 static void	octeon_eth_send_queue_del(struct octeon_eth_softc *,
158 			    struct mbuf **, uint64_t **);
159 static int	octeon_eth_buf_free_work(struct octeon_eth_softc *,
160 			    uint64_t *, uint64_t);
161 static void	octeon_eth_buf_ext_free_m(caddr_t, u_int, void *);
162 static void	octeon_eth_buf_ext_free_ext(caddr_t, u_int, void *);
163 
164 static int	octeon_eth_ioctl(struct ifnet *, u_long, caddr_t);
165 static void	octeon_eth_watchdog(struct ifnet *);
166 static int	octeon_eth_init(struct ifnet *);
167 static int	octeon_eth_stop(struct ifnet *, int);
168 static void	octeon_eth_start(struct ifnet *);
169 
170 static int	octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t,
171 			    uint64_t);
172 static uint64_t	octeon_eth_send_makecmd_w1(int, paddr_t);
173 static uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t,
174 			    int);
175 static int	octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
176 			    struct mbuf *, uint64_t *, int *);
177 static int	octeon_eth_send_makecmd(struct octeon_eth_softc *,
178 			    struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
179 static int	octeon_eth_send_buf(struct octeon_eth_softc *,
180 			    struct mbuf *, uint64_t *);
181 static int	octeon_eth_send(struct octeon_eth_softc *,
182 			    struct mbuf *);
183 
184 static int	octeon_eth_reset(struct octeon_eth_softc *);
185 static int	octeon_eth_configure(struct octeon_eth_softc *);
186 static int	octeon_eth_configure_common(struct octeon_eth_softc *);
187 
188 static void	octeon_eth_tick_free(void *arg);
189 static void	octeon_eth_tick_misc(void *);
190 
191 static int	octeon_eth_recv_mbuf(struct octeon_eth_softc *,
192 			    uint64_t *, struct mbuf **);
193 static int	octeon_eth_recv_check_code(struct octeon_eth_softc *,
194 			    uint64_t);
195 #if 0 /* not used */
196 static int      octeon_eth_recv_check_jumbo(struct octeon_eth_softc *,
197 			    uint64_t);
198 #endif
199 static int	octeon_eth_recv_check_link(struct octeon_eth_softc *,
200 			    uint64_t);
201 static int	octeon_eth_recv_check(struct octeon_eth_softc *,
202 			    uint64_t);
203 static int	octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
204 static void		octeon_eth_recv_intr(void *, uint64_t *);
205 
206 /* device driver context */
207 static struct	octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
208 static void	*octeon_eth_pow_recv_ih;
209 
210 /* sysctl'able parameters */
211 int		octeon_eth_param_pko_cmd_w0_n2 = 1;
212 int		octeon_eth_param_pip_dyn_rs = 1;
213 int		octeon_eth_param_redir = 0;
214 int		octeon_eth_param_pktbuf = 0;
215 int		octeon_eth_param_rate = 0;
216 int		octeon_eth_param_intr = 0;
217 
218 struct cfattach cnmac_ca = {sizeof(struct octeon_eth_softc),
219     octeon_eth_match, octeon_eth_attach, NULL, NULL};
220 
221 struct cfdriver cnmac_cd = {NULL, "cnmac", DV_IFNET};
222 
223 #ifdef OCTEON_ETH_DEBUG
224 
225 static const struct octeon_evcnt_entry octeon_evcnt_entries[] = {
226 #define	_ENTRY(name, type, parent, descr) \
227 	OCTEON_EVCNT_ENTRY(struct octeon_eth_softc, name, type, parent, descr)
228 	_ENTRY(rx,			MISC, NULL, "rx"),
229 	_ENTRY(rxint,			INTR, NULL, "rx intr"),
230 	_ENTRY(rxrs,			MISC, NULL, "rx dynamic short"),
231 	_ENTRY(rxbufpkalloc,		MISC, NULL, "rx buf pkt alloc"),
232 	_ENTRY(rxbufpkput,		MISC, NULL, "rx buf pkt put"),
233 	_ENTRY(rxbufwqalloc,		MISC, NULL, "rx buf wqe alloc"),
234 	_ENTRY(rxbufwqput,		MISC, NULL, "rx buf wqe put"),
235 	_ENTRY(rxerrcode,		MISC, NULL, "rx code error"),
236 	_ENTRY(rxerrfix,		MISC, NULL, "rx fixup error"),
237 	_ENTRY(rxerrjmb,		MISC, NULL, "rx jmb error"),
238 	_ENTRY(rxerrlink,		MISC, NULL, "rx link error"),
239 	_ENTRY(rxerroff,		MISC, NULL, "rx offload error"),
240 	_ENTRY(rxonperrshort,		MISC, NULL, "rx onp fixup short error"),
241 	_ENTRY(rxonperrpreamble,	MISC, NULL, "rx onp fixup preamble error"),
242 	_ENTRY(rxonperrcrc,		MISC, NULL, "rx onp fixup crc error"),
243 	_ENTRY(rxonperraddress,		MISC, NULL, "rx onp fixup address error"),
244 	_ENTRY(rxonponp,		MISC, NULL, "rx onp fixup onp packets"),
245 	_ENTRY(rxonpok,			MISC, NULL, "rx onp fixup success packets"),
246 	_ENTRY(tx,			MISC, NULL, "tx"),
247 	_ENTRY(txadd,			MISC, NULL, "tx add"),
248 	_ENTRY(txbufcballoc,		MISC, NULL, "tx buf cb alloc"),
249 	_ENTRY(txbufcbget,		MISC, NULL, "tx buf cb get"),
250 	_ENTRY(txbufgballoc,		MISC, NULL, "tx buf gb alloc"),
251 	_ENTRY(txbufgbget,		MISC, NULL, "tx buf gb get"),
252 	_ENTRY(txbufgbput,		MISC, NULL, "tx buf gb put"),
253 	_ENTRY(txdel,			MISC, NULL, "tx del"),
254 	_ENTRY(txerr,			MISC, NULL, "tx error"),
255 	_ENTRY(txerrcmd,		MISC, NULL, "tx cmd error"),
256 	_ENTRY(txerrgbuf,		MISC, NULL, "tx gbuf error"),
257 	_ENTRY(txerrlink,		MISC, NULL, "tx link error"),
258 	_ENTRY(txerrmkcmd,		MISC, NULL, "tx makecmd error"),
259 #undef	_ENTRY
260 };
261 #endif
262 
263 /* XXX board-specific */
264 static const int	octeon_eth_phy_table[] = {
265 #if defined __seil5__
266 	0x04, 0x01, 0x02
267 #else
268 	0x02, 0x03, 0x22
269 #endif
270 };
271 
272 /* ---- buffer management */
273 
274 static const struct octeon_eth_pool_param {
275 	int			poolno;
276 	size_t			size;
277 	size_t			nelems;
278 } octeon_eth_pool_params[] = {
279 #define	_ENTRY(x)	{ OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
280 	_ENTRY(PKT),
281 	_ENTRY(WQE),
282 	_ENTRY(CMD),
283 	_ENTRY(SG)
284 #undef	_ENTRY
285 };
286 struct cn30xxfpa_buf	*octeon_eth_pools[8/* XXX */];
287 #define	octeon_eth_fb_pkt	octeon_eth_pools[OCTEON_POOL_NO_PKT]
288 #define	octeon_eth_fb_wqe	octeon_eth_pools[OCTEON_POOL_NO_WQE]
289 #define	octeon_eth_fb_cmd	octeon_eth_pools[OCTEON_POOL_NO_CMD]
290 #define	octeon_eth_fb_sg	octeon_eth_pools[OCTEON_POOL_NO_SG]
291 
292 static void
293 octeon_eth_buf_init(struct octeon_eth_softc *sc)
294 {
295 	static int once;
296 	int i;
297 	const struct octeon_eth_pool_param *pp;
298 	struct cn30xxfpa_buf *fb;
299 
300 	if (once == 1)
301 		return;
302 	once = 1;
303 
304 	for (i = 0; i < (int)nitems(octeon_eth_pool_params); i++) {
305 		pp = &octeon_eth_pool_params[i];
306 		cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
307 		octeon_eth_pools[i] = fb;
308 	}
309 }
310 
311 /* ---- autoconf */
312 
313 static int
314 octeon_eth_match(struct device *parent, void *match, void *aux)
315 {
316 	struct cfdata *cf = (struct cfdata *)match;
317 	struct cn30xxgmx_attach_args *ga = aux;
318 
319 	if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) {
320 		return 0;
321 	}
322 	return 1;
323 }
324 
325 static void
326 octeon_eth_attach(struct device *parent, struct device *self, void *aux)
327 {
328 	struct octeon_eth_softc *sc = (void *)self;
329 	struct cn30xxgmx_attach_args *ga = aux;
330 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
331 	uint8_t enaddr[ETHER_ADDR_LEN];
332 
333 	sc->sc_regt = ga->ga_regt;
334 	sc->sc_dmat = ga->ga_dmat;
335 	sc->sc_port = ga->ga_portno;
336 	sc->sc_port_type = ga->ga_port_type;
337 	sc->sc_gmx = ga->ga_gmx;
338 	sc->sc_gmx_port = ga->ga_gmx_port;
339 
340 	sc->sc_init_flag = 0;
341 
342 	/*
343 	 * XXX
344 	 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
345 	 */
346 	sc->sc_ip_offset = 0/* XXX */;
347 
348 	octeon_eth_board_mac_addr(enaddr, sizeof(enaddr), sc->sc_port);
349 	printf(", address %s\n", ether_sprintf(enaddr));
350 
351 	/*
352 	 * live lock control notifications.
353 	 * XXX: use sysctl ???
354 	 */
355 
356 	octeon_eth_gsc[sc->sc_port] = sc;
357 
358 	SIMPLEQ_INIT(&sc->sc_sendq);
359 	sc->sc_soft_req_thresh = 15/* XXX */;
360 	sc->sc_ext_callback_cnt = 0;
361 
362 	cn30xxgmx_stats_init(sc->sc_gmx_port);
363 
364 	timeout_set(&sc->sc_tick_misc_ch, octeon_eth_tick_misc, sc);
365 	timeout_set(&sc->sc_tick_free_ch, octeon_eth_tick_free, sc);
366 
367 	cn30xxfau_op_init(&sc->sc_fau_done,
368 	    OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done),
369 	    OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */);
370 	cn30xxfau_op_set_8(&sc->sc_fau_done, 0);
371 
372 	octeon_eth_pip_init(sc);
373 	octeon_eth_ipd_init(sc);
374 	octeon_eth_pko_init(sc);
375 	octeon_eth_asx_init(sc);
376 	octeon_eth_smi_init(sc);
377 
378 	sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
379 	sc->sc_gmx_port->sc_port_asx = sc->sc_asx;
380 	sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
381 	sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom;
382 
383 	/* XXX */
384 	sc->sc_pow = &cn30xxpow_softc;
385 
386 	octeon_eth_mediainit(sc);
387 
388 	strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
389 	ifp->if_softc = sc;
390 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
391 	ifp->if_ioctl = octeon_eth_ioctl;
392 	ifp->if_start = octeon_eth_start;
393 	ifp->if_watchdog = octeon_eth_watchdog;
394 	IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
395 	IFQ_SET_READY(&ifp->if_snd);
396 
397 	ifp->if_capabilities = IFCAP_VLAN_MTU;
398 
399 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, enaddr);
400 	cn30xxgmx_set_filter(sc->sc_gmx_port);
401 
402 	if_attach(ifp);
403 
404 	memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
405 	ether_ifattach(ifp);
406 
407 	/* XXX */
408 	sc->sc_rate_recv_check_link_cap.tv_sec = 1;
409 	sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1;
410 	sc->sc_rate_recv_check_code_cap.tv_sec = 1;
411 
412 #if 1
413 	octeon_eth_buf_init(sc);
414 #endif
415 
416 	if (octeon_eth_pow_recv_ih == NULL)
417 		octeon_eth_pow_recv_ih = cn30xxpow_intr_establish(OCTEON_POW_GROUP_PIP,
418 		    IPL_NET, octeon_eth_recv_intr, NULL, NULL, sc->sc_dev.dv_xname);
419 
420 	OCTEON_EVCNT_ATTACH_EVCNTS(sc, octeon_evcnt_entries,
421 	    sc->sc_dev.dv_xname);
422 }
423 
424 /* ---- submodules */
425 
426 /* XXX */
427 static void
428 octeon_eth_pip_init(struct octeon_eth_softc *sc)
429 {
430 	struct cn30xxpip_attach_args pip_aa;
431 
432 	pip_aa.aa_port = sc->sc_port;
433 	pip_aa.aa_regt = sc->sc_regt;
434 	pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
435 	pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
436 	pip_aa.aa_ip_offset = sc->sc_ip_offset;
437 	cn30xxpip_init(&pip_aa, &sc->sc_pip);
438 }
439 
440 /* XXX */
441 static void
442 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
443 {
444 	struct cn30xxipd_attach_args ipd_aa;
445 
446 	ipd_aa.aa_port = sc->sc_port;
447 	ipd_aa.aa_regt = sc->sc_regt;
448 	ipd_aa.aa_first_mbuff_skip = 184/* XXX */;
449 	ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
450 	cn30xxipd_init(&ipd_aa, &sc->sc_ipd);
451 }
452 
453 /* XXX */
454 static void
455 octeon_eth_pko_init(struct octeon_eth_softc *sc)
456 {
457 	struct cn30xxpko_attach_args pko_aa;
458 
459 	pko_aa.aa_port = sc->sc_port;
460 	pko_aa.aa_regt = sc->sc_regt;
461 	pko_aa.aa_cmdptr = &sc->sc_cmdptr;
462 	pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
463 	pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
464 	cn30xxpko_init(&pko_aa, &sc->sc_pko);
465 }
466 
467 /* XXX */
468 static void
469 octeon_eth_asx_init(struct octeon_eth_softc *sc)
470 {
471 	struct cn30xxasx_attach_args asx_aa;
472 
473 	asx_aa.aa_port = sc->sc_port;
474 	asx_aa.aa_regt = sc->sc_regt;
475 	cn30xxasx_init(&asx_aa, &sc->sc_asx);
476 }
477 
478 static void
479 octeon_eth_smi_init(struct octeon_eth_softc *sc)
480 {
481 	struct cn30xxsmi_attach_args smi_aa;
482 
483 	smi_aa.aa_port = sc->sc_port;
484 	smi_aa.aa_regt = sc->sc_regt;
485 	cn30xxsmi_init(&smi_aa, &sc->sc_smi);
486 	cn30xxsmi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
487 }
488 
489 /* ---- XXX */
490 
491 #define	ADDR2UINT64(u, a) \
492 	do { \
493 		u = \
494 		    (((uint64_t)a[0] << 40) | ((uint64_t)a[1] << 32) | \
495 		     ((uint64_t)a[2] << 24) | ((uint64_t)a[3] << 16) | \
496 		     ((uint64_t)a[4] <<  8) | ((uint64_t)a[5] <<  0)); \
497 	} while (0)
498 #define	UINT642ADDR(a, u) \
499 	do { \
500 		a[0] = (uint8_t)((u) >> 40); a[1] = (uint8_t)((u) >> 32); \
501 		a[2] = (uint8_t)((u) >> 24); a[3] = (uint8_t)((u) >> 16); \
502 		a[4] = (uint8_t)((u) >>  8); a[5] = (uint8_t)((u) >>  0); \
503 	} while (0)
504 
505 static void
506 octeon_eth_board_mac_addr(uint8_t *enaddr, size_t size, int port)
507 {
508 	uint64_t addr;
509 	int i;
510 
511 	/* XXX read a mac_dsc tuple from EEPROM */
512 	for (i = 0; i < size; i++)
513 		enaddr[i] = i;
514 
515 	ADDR2UINT64(addr, enaddr);
516 	addr += port;
517 	UINT642ADDR(enaddr, addr);
518 }
519 
520 /* ---- media */
521 
522 static int
523 octeon_eth_mii_readreg(struct device *self, int phy_no, int reg)
524 {
525 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
526 	int phy_addr = octeon_eth_phy_table[phy_no];
527 
528 	if (sc->sc_port >= (int)nitems(octeon_eth_phy_table) ||
529 	    phy_no != sc->sc_port) {
530 		log(LOG_ERR,
531 		    "mii read address is mismatch, phy number %d.\n", phy_no);
532 		return -1;
533 	}
534 	return cn30xxsmi_read(sc->sc_smi, phy_addr, reg);
535 }
536 
537 static void
538 octeon_eth_mii_writereg(struct device *self, int phy_no, int reg, int value)
539 {
540 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
541 	int phy_addr = octeon_eth_phy_table[phy_no];
542 
543 	if (sc->sc_port >= (int)nitems(octeon_eth_phy_table) ||
544 	    phy_no != sc->sc_port) {
545 		log(LOG_ERR,
546 		    "mii write address is mismatch, phy number %d.\n", phy_no);
547 		return;
548 	}
549 	cn30xxsmi_write(sc->sc_smi, phy_addr, reg, value);
550 }
551 
552 static void
553 octeon_eth_mii_statchg(struct device *self)
554 {
555 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
556 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
557 
558 	cn30xxpko_port_enable(sc->sc_pko, 0);
559 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
560 
561 	octeon_eth_reset(sc);
562 
563 	if (ISSET(ifp->if_flags, IFF_RUNNING))
564 		cn30xxgmx_set_filter(sc->sc_gmx_port);
565 
566 	cn30xxpko_port_enable(sc->sc_pko, 1);
567 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
568 }
569 
570 static int
571 octeon_eth_mediainit(struct octeon_eth_softc *sc)
572 {
573 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
574 
575 	sc->sc_mii.mii_ifp = ifp;
576 	sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
577 	sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
578 	sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
579 	ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
580 	    octeon_eth_mediastatus);
581 
582 	mii_attach(&sc->sc_dev, &sc->sc_mii,
583 	    0xffffffff, sc->sc_port, MII_OFFSET_ANY, MIIF_DOPAUSE);
584 
585 	/* XXX */
586 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) {
587 		/* XXX */
588 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
589 	} else {
590 		/* XXX */
591 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE,
592 		    MII_MEDIA_NONE, NULL);
593 		/* XXX */
594 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
595 	}
596 
597 	return 0;
598 }
599 
600 static void
601 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
602 {
603 	struct octeon_eth_softc *sc = ifp->if_softc;
604 
605 	mii_pollstat(&sc->sc_mii);
606 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
607 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
608 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
609 	    sc->sc_gmx_port->sc_port_flowflags;
610 }
611 
612 static int
613 octeon_eth_mediachange(struct ifnet *ifp)
614 {
615 	struct octeon_eth_softc *sc = ifp->if_softc;
616 
617 	mii_mediachg(&sc->sc_mii);
618 
619 	return 0;
620 }
621 
622 /* ---- send buffer garbage collection */
623 
624 static void
625 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
626 {
627 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
628 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
629 	sc->sc_prefetch = 1;
630 }
631 
632 static void
633 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
634 {
635 #ifndef  OCTEON_ETH_DEBUG
636 	if (!sc->sc_prefetch)
637 		return;
638 #endif
639 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
640 	sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
641 	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
642 	sc->sc_prefetch = 0;
643 }
644 
645 static void
646 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
647 {
648 	const int64_t sent_count = sc->sc_hard_done_cnt;
649 	int i;
650 
651 	OCTEON_ETH_KASSERT(sc->sc_flush == 0);
652 	OCTEON_ETH_KASSERT(sent_count <= 0);
653 
654 	for (i = 0; i < 0 - sent_count; i++) {
655 		struct mbuf *m;
656 		uint64_t *gbuf;
657 
658 		octeon_eth_send_queue_del(sc, &m, &gbuf);
659 
660 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, CKSEG0_TO_PHYS(gbuf));
661 		OCTEON_EVCNT_INC(sc, txbufgbput);
662 
663 		m_freem(m);
664 	}
665 
666 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, i);
667 	sc->sc_flush = i;
668 }
669 
670 static void
671 octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
672 {
673 	if (sc->sc_flush == 0)
674 		return;
675 
676 	OCTEON_ETH_KASSERT(sc->sc_flush > 0);
677 
678 	/* XXX */
679 	cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
680 	sc->sc_soft_req_cnt -= sc->sc_flush;
681 	OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
682 	/* XXX */
683 
684 	sc->sc_flush = 0;
685 }
686 
687 static int
688 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
689 {
690 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
691 	int64_t nofree_cnt;
692 
693 	nofree_cnt = sc->sc_soft_req_cnt + sc->sc_hard_done_cnt;
694 
695 	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
696 		octeon_eth_send_queue_flush(sc);
697 		OCTEON_EVCNT_INC(sc, txerrgbuf);
698 		octeon_eth_send_queue_flush_sync(sc);
699 		return 1;
700 	}
701 
702 #endif
703 	return 0;
704 }
705 
706 /*
707  * (Ab)use m_nextpkt and m_paddr to maintain mbuf chain and pointer to gather
708  * buffer.  Other mbuf members may be used by m_freem(), so don't touch them!
709  */
710 
711 struct _send_queue_entry {
712 	union {
713 		struct mbuf _sqe_s_mbuf;
714 		struct {
715 			char _sqe_s_entry_pad[offsetof(struct mbuf, m_nextpkt)];
716 			SIMPLEQ_ENTRY(_send_queue_entry) _sqe_s_entry_entry;
717 		} _sqe_s_entry;
718 		struct {
719 			char _sqe_s_gbuf_pad[offsetof(struct mbuf, M_dat.MH.MH_pkthdr.rcvif)];
720 			uint64_t *_sqe_s_gbuf_gbuf;
721 		} _sqe_s_gbuf;
722 	} _sqe_u;
723 #define	_sqe_entry	_sqe_u._sqe_s_entry._sqe_s_entry_entry
724 #define	_sqe_gbuf	_sqe_u._sqe_s_gbuf._sqe_s_gbuf_gbuf
725 };
726 
727 static void
728 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
729     uint64_t *gbuf)
730 {
731 	struct _send_queue_entry *sqe = (struct _send_queue_entry *)m;
732 
733 	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);
734 
735 	sqe->_sqe_gbuf = gbuf;
736 	SIMPLEQ_INSERT_TAIL(&sc->sc_sendq, sqe, _sqe_entry);
737 
738 	if (m->m_ext.ext_free != NULL)
739 		sc->sc_ext_callback_cnt++;
740 
741 	OCTEON_EVCNT_INC(sc, txadd);
742 }
743 
744 static void
745 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
746     uint64_t **rgbuf)
747 {
748 	struct _send_queue_entry *sqe;
749 
750 	sqe = SIMPLEQ_FIRST(&sc->sc_sendq);
751 	OCTEON_ETH_KASSERT(sqe != NULL);
752 	SIMPLEQ_REMOVE_HEAD(&sc->sc_sendq, _sqe_entry);
753 
754 	*rm = (void *)sqe;
755 	*rgbuf = sqe->_sqe_gbuf;
756 
757 	if ((*rm)->m_ext.ext_free != NULL) {
758 		sc->sc_ext_callback_cnt--;
759 		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
760 	}
761 
762 	OCTEON_EVCNT_INC(sc, txdel);
763 }
764 
765 static int
766 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work,
767     uint64_t word2)
768 {
769 	/* XXX when jumbo frame */
770 	if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) {
771 		paddr_t addr;
772 		paddr_t start_buffer;
773 
774 		addr = CKSEG0_TO_PHYS(work[3] & PIP_WQE_WORD3_ADDR);
775 		start_buffer = addr & ~(2048 - 1);
776 
777 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer);
778 		OCTEON_EVCNT_INC(sc, rxbufpkput);
779 	}
780 
781 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, CKSEG0_TO_PHYS(work));
782 	OCTEON_EVCNT_INC(sc, rxbufwqput);
783 
784 	return 0;
785 }
786 
787 static void
788 octeon_eth_buf_ext_free_m(caddr_t buf, u_int size, void *arg)
789 {
790 	uint64_t *work = (void *)arg;
791 #ifdef OCTEON_ETH_DEBUG
792 	struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
793 #endif
794 	int s = splnet();
795 
796 	OCTEON_EVCNT_INC(sc, rxrs);
797 
798 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, CKSEG0_TO_PHYS(work));
799 	OCTEON_EVCNT_INC(sc, rxbufwqput);
800 
801 	splx(s);
802 }
803 
804 static void
805 octeon_eth_buf_ext_free_ext(caddr_t buf, u_int size,
806     void *arg)
807 {
808 	uint64_t *work = (void *)arg;
809 #ifdef OCTEON_ETH_DEBUG
810 	struct octeon_eth_softc *sc = (void *)(uintptr_t)work[0];
811 #endif
812 	int s = splnet();
813 
814 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, CKSEG0_TO_PHYS(work));
815 	OCTEON_EVCNT_INC(sc, rxbufwqput);
816 
817 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_pkt, CKSEG0_TO_PHYS(buf));
818 	OCTEON_EVCNT_INC(sc, rxbufpkput);
819 
820 	splx(s);
821 }
822 
823 /* ---- ifnet interfaces */
824 
825 static int
826 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
827 {
828 	struct octeon_eth_softc *sc = ifp->if_softc;
829 	struct ifaddr *ifa = (struct ifaddr *)data;
830 	struct ifreq *ifr = (struct ifreq *)data;
831 	int s, error = 0;
832 
833 	s = splnet();
834 
835 	switch (cmd) {
836 	case SIOCSIFADDR:
837 		ifp->if_flags |= IFF_UP;
838 		if (!(ifp->if_flags & IFF_RUNNING))
839 			octeon_eth_init(ifp);
840 #ifdef INET
841 		if (ifa->ifa_addr->sa_family == AF_INET)
842 			arp_ifinit(&sc->sc_arpcom, ifa);
843 #endif
844 		break;
845 
846 	case SIOCSIFFLAGS:
847 		if (ifp->if_flags & IFF_UP) {
848 			if (ifp->if_flags & IFF_RUNNING)
849 				error = ENETRESET;
850 			else
851 				octeon_eth_init(ifp);
852 		} else {
853 			if (ifp->if_flags & IFF_RUNNING)
854 				octeon_eth_stop(ifp, 0);
855 		}
856 		break;
857 
858 	case SIOCSIFMEDIA:
859 		/* Flow control requires full-duplex mode. */
860 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
861 		    (ifr->ifr_media & IFM_FDX) == 0) {
862 			ifr->ifr_media &= ~IFM_ETH_FMASK;
863 		}
864 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
865 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
866 				ifr->ifr_media |=
867 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
868 			}
869 			sc->sc_gmx_port->sc_port_flowflags =
870 				ifr->ifr_media & IFM_ETH_FMASK;
871 		}
872 		/* FALLTHROUGH */
873 	case SIOCGIFMEDIA:
874 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
875 		break;
876 
877 	default:
878 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
879 	}
880 
881 	if (error == ENETRESET) {
882 		if (ISSET(ifp->if_flags, IFF_RUNNING))
883 			cn30xxgmx_set_filter(sc->sc_gmx_port);
884 		error = 0;
885 	}
886 
887 	octeon_eth_start(ifp);
888 
889 	splx(s);
890 	return (error);
891 }
892 
893 /* ---- send (output) */
894 
895 static uint64_t
896 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs)
897 {
898 	return cn30xxpko_cmd_word0(
899 		OCT_FAU_OP_SIZE_64,		/* sz1 */
900 		OCT_FAU_OP_SIZE_64,		/* sz0 */
901 		1, fau1, 1, fau0,		/* s1, reg1, s0, reg0 */
902 		0,				/* le */
903 		octeon_eth_param_pko_cmd_w0_n2,	/* n2 */
904 		1, 0,				/* q, r */
905 		(segs == 1) ? 0 : 1,		/* g */
906 		0, 0, 1,			/* ipoffp1, ii, df */
907 		segs, (int)len);		/* segs, totalbytes */
908 }
909 
910 static uint64_t
911 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
912 {
913 	return cn30xxpko_cmd_word1(
914 		0, 0,				/* i, back */
915 		FPA_GATHER_BUFFER_POOL,		/* pool */
916 		size, addr);			/* size, addr */
917 }
918 
919 /* TODO: use bus_dma(9) */
920 
921 #define KVTOPHYS(addr)	if_cnmac_kvtophys((vaddr_t)(addr))
922 paddr_t if_cnmac_kvtophys(vaddr_t);
923 
924 paddr_t
925 if_cnmac_kvtophys(vaddr_t kva)
926 {
927 	if (IS_XKPHYS(kva))
928 		return XKPHYS_TO_PHYS(kva);
929 	else if (kva >= CKSEG0_BASE && kva < CKSEG0_BASE + CKSEG_SIZE)
930 		return CKSEG0_TO_PHYS(kva);
931 	else if (kva >= CKSEG1_BASE && kva < CKSEG1_BASE + CKSEG_SIZE)
932 		return CKSEG1_TO_PHYS(kva);
933 
934 	printf("kva %p is not be able to convert physical address\n", kva);
935 	panic("if_cnmac_kvtophys");
936 }
937 
938 static int
939 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
940     uint64_t *gbuf, int *rsegs)
941 {
942 	struct mbuf *m;
943 	int segs = 0;
944 	uint32_t laddr, rlen, nlen;
945 
946 	for (m = m0; m != NULL; m = m->m_next) {
947 
948 		if (__predict_false(m->m_len == 0))
949 			continue;
950 
951 #if 0
952 		OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
953 		   == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
954 #endif
955 
956 		/*
957 		 * aligned 4k
958 		 */
959 		laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);
960 
961 		if (laddr + m->m_len > PAGE_SIZE) {
962 			/* XXX */
963 			rlen = PAGE_SIZE - laddr;
964 			nlen = m->m_len - rlen;
965 			*(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
966 			    KVTOPHYS(m->m_data));
967 			segs++;
968 			if (segs > 63) {
969 				return 1;
970 			}
971 			/* XXX */
972 		} else {
973 			rlen = 0;
974 			nlen = m->m_len;
975 		}
976 
977 		*(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
978 		    KVTOPHYS((caddr_t)m->m_data + rlen));
979 		segs++;
980 		if (segs > 63) {
981 			return 1;
982 		}
983 	}
984 
985 	OCTEON_ETH_KASSERT(m == NULL);
986 
987 	*rsegs = segs;
988 
989 	return 0;
990 }
991 
992 static int
993 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
994     uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
995 {
996 	uint64_t pko_cmd_w0, pko_cmd_w1;
997 	int segs;
998 	int result = 0;
999 
1000 	if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
1001 		log(LOG_WARNING, "%s: there are a lot of number of segments"
1002 		    " of transmission data", sc->sc_dev.dv_xname);
1003 		result = 1;
1004 		goto done;
1005 	}
1006 
1007 	/*
1008 	 * segs == 1	-> link mode (single continuous buffer)
1009 	 *		   WORD1[size] is number of bytes pointed by segment
1010 	 *
1011 	 * segs > 1	-> gather mode (scatter-gather buffer)
1012 	 *		   WORD1[size] is number of segments
1013 	 */
1014 	pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
1015 	    0, m->m_pkthdr.len, segs);
1016 	pko_cmd_w1 = octeon_eth_send_makecmd_w1(
1017 	    (segs == 1) ? m->m_pkthdr.len : segs,
1018 	    (segs == 1) ?
1019 		KVTOPHYS(m->m_data) :
1020 		CKSEG0_TO_PHYS(gbuf));
1021 
1022 	*rpko_cmd_w0 = pko_cmd_w0;
1023 	*rpko_cmd_w1 = pko_cmd_w1;
1024 
1025 done:
1026 	return result;
1027 }
1028 
1029 static int
1030 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
1031     uint64_t pko_cmd_w1)
1032 {
1033 	uint64_t *cmdptr;
1034 	int result = 0;
1035 
1036 	cmdptr = (uint64_t *)PHYS_TO_CKSEG0(sc->sc_cmdptr.cmdptr);
1037 	cmdptr += sc->sc_cmdptr.cmdptr_idx;
1038 
1039 	OCTEON_ETH_KASSERT(cmdptr != NULL);
1040 
1041 	*cmdptr++ = pko_cmd_w0;
1042 	*cmdptr++ = pko_cmd_w1;
1043 
1044 	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
1045 
1046 	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
1047 		paddr_t buf;
1048 
1049 		buf = cn30xxfpa_buf_get_paddr(octeon_eth_fb_cmd);
1050 		if (buf == 0) {
1051 			log(LOG_WARNING,
1052 			    "%s: can not allocate command buffer from free pool allocator\n",
1053 			    sc->sc_dev.dv_xname);
1054 			result = 1;
1055 			goto done;
1056 		}
1057 		OCTEON_EVCNT_INC(sc, txbufcbget);
1058 		*cmdptr++ = buf;
1059 		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
1060 		sc->sc_cmdptr.cmdptr_idx = 0;
1061 	} else {
1062 		sc->sc_cmdptr.cmdptr_idx += 2;
1063 	}
1064 
1065 	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
1066 
1067 done:
1068 	return result;
1069 }
1070 
1071 static int
1072 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
1073     uint64_t *gbuf)
1074 {
1075 	int result = 0, error;
1076 	uint64_t pko_cmd_w0, pko_cmd_w1;
1077 
1078 	error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
1079 	if (error != 0) {
1080 		/* already logging */
1081 		OCTEON_EVCNT_INC(sc, txerrmkcmd);
1082 		result = error;
1083 		goto done;
1084 	}
1085 
1086 	error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
1087 	if (error != 0) {
1088 		/* already logging */
1089 		OCTEON_EVCNT_INC(sc, txerrcmd);
1090 		result = error;
1091 	}
1092 
1093 done:
1094 	return result;
1095 }
1096 
1097 static int
1098 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
1099 {
1100 	paddr_t gaddr = 0;
1101 	uint64_t *gbuf = NULL;
1102 	int result = 0, error;
1103 
1104 	OCTEON_EVCNT_INC(sc, tx);
1105 
1106 	gaddr = cn30xxfpa_buf_get_paddr(octeon_eth_fb_sg);
1107 	if (gaddr == 0) {
1108 		log(LOG_WARNING,
1109 		    "%s: can not allocate gather buffer from free pool allocator\n",
1110 		    sc->sc_dev.dv_xname);
1111 		OCTEON_EVCNT_INC(sc, txerrgbuf);
1112 		result = 1;
1113 		goto done;
1114 	}
1115 	OCTEON_EVCNT_INC(sc, txbufgbget);
1116 
1117 	gbuf = (uint64_t *)(uintptr_t)PHYS_TO_CKSEG0(gaddr);
1118 
1119 	OCTEON_ETH_KASSERT(gbuf != NULL);
1120 
1121 	error = octeon_eth_send_buf(sc, m, gbuf);
1122 	if (error != 0) {
1123 		/* already logging */
1124 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
1125 		OCTEON_EVCNT_INC(sc, txbufgbput);
1126 		result = error;
1127 		goto done;
1128 	}
1129 
1130 	octeon_eth_send_queue_add(sc, m, gbuf);
1131 
1132 done:
1133 	return result;
1134 }
1135 
1136 static void
1137 octeon_eth_start(struct ifnet *ifp)
1138 {
1139 	struct octeon_eth_softc *sc = ifp->if_softc;
1140 	struct mbuf *m;
1141 
1142 	/*
1143 	 * performance tuning
1144 	 * presend iobdma request
1145 	 */
1146 	octeon_eth_send_queue_flush_prefetch(sc);
1147 
1148 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1149 		goto last;
1150 
1151 	/* XXX assume that OCTEON doesn't buffer packets */
1152 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
1153 		/* dequeue and drop them */
1154 		while (1) {
1155 			IFQ_DEQUEUE(&ifp->if_snd, m);
1156 			if (m == NULL)
1157 				break;
1158 #if 0
1159 #ifdef DDB
1160 			m_print(m, "cd", printf);
1161 #endif
1162 			printf("%s: drop\n", sc->sc_dev.dv_xname);
1163 #endif
1164 			m_freem(m);
1165 			IF_DROP(&ifp->if_snd);
1166 			OCTEON_EVCNT_INC(sc, txerrlink);
1167 		}
1168 		goto last;
1169 	}
1170 
1171 	for (;;) {
1172 		IFQ_POLL(&ifp->if_snd, m);
1173 		if (__predict_false(m == NULL))
1174 			break;
1175 
1176 		octeon_eth_send_queue_flush_fetch(sc); /* XXX */
1177 
1178 		/*
1179 		 * XXXSEIL
1180 		 * If no free send buffer is available, free all the sent buffer
1181 		 * and bail out.
1182 		 */
1183 		if (octeon_eth_send_queue_is_full(sc)) {
1184 			return;
1185 		}
1186 		/* XXX */
1187 
1188 		IFQ_DEQUEUE(&ifp->if_snd, m);
1189 
1190 		OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_OUT);
1191 
1192 		/* XXX */
1193 		if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
1194 			octeon_eth_send_queue_flush(sc);
1195 		if (octeon_eth_send(sc, m)) {
1196 			ifp->if_oerrors++;
1197 			m_freem(m);
1198 			log(LOG_WARNING,
1199 		  	  "%s: failed in the transmission of the packet\n",
1200 		    	  sc->sc_dev.dv_xname);
1201 			OCTEON_EVCNT_INC(sc, txerr);
1202 		} else {
1203 			sc->sc_soft_req_cnt++;
1204 		}
1205 		if (sc->sc_flush)
1206 			octeon_eth_send_queue_flush_sync(sc);
1207 		/* XXX */
1208 
1209 		/*
1210 		 * send next iobdma request
1211 		 */
1212 		octeon_eth_send_queue_flush_prefetch(sc);
1213 	}
1214 
1215 /*
1216  * XXXSEIL
1217  * Don't schedule send-buffer-free callout every time - those buffers are freed
1218  * by "free tick".  This makes some packets like NFS slower, but it normally
1219  * doesn't happen on SEIL.
1220  */
1221 #ifdef OCTEON_ETH_USENFS
1222 	if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
1223 		int timo;
1224 
1225 		/* ??? */
1226 		timo = hz - (100 * sc->sc_ext_callback_cnt);
1227 		if (timo < 10)
1228 			timo = 10;
1229 		callout_schedule(&sc->sc_tick_free_ch, timo);
1230 	}
1231 #endif
1232 
1233 last:
1234 	octeon_eth_send_queue_flush_fetch(sc);
1235 }
1236 
1237 static void
1238 octeon_eth_watchdog(struct ifnet *ifp)
1239 {
1240 	struct octeon_eth_softc *sc = ifp->if_softc;
1241 
1242 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1243 
1244 	octeon_eth_configure(sc);
1245 
1246 	SET(ifp->if_flags, IFF_RUNNING);
1247 	CLR(ifp->if_flags, IFF_OACTIVE);
1248 	ifp->if_timer = 0;
1249 
1250 	octeon_eth_start(ifp);
1251 }
1252 
1253 static int
1254 octeon_eth_init(struct ifnet *ifp)
1255 {
1256 	struct octeon_eth_softc *sc = ifp->if_softc;
1257 
1258 	/* XXX don't disable commonly used parts!!! XXX */
1259 	if (sc->sc_init_flag == 0) {
1260 		/* Cancel any pending I/O. */
1261 		octeon_eth_stop(ifp, 0);
1262 
1263 		/* Initialize the device */
1264 		octeon_eth_configure(sc);
1265 
1266 		cn30xxpko_enable(sc->sc_pko);
1267 		cn30xxipd_enable(sc->sc_ipd);
1268 
1269 		sc->sc_init_flag = 1;
1270 	} else {
1271 		cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1272 	}
1273 	octeon_eth_mediachange(ifp);
1274 
1275 	cn30xxgmx_set_filter(sc->sc_gmx_port);
1276 
1277 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1278 	timeout_add_sec(&sc->sc_tick_free_ch, 1);
1279 
1280 	SET(ifp->if_flags, IFF_RUNNING);
1281 	CLR(ifp->if_flags, IFF_OACTIVE);
1282 
1283 	return 0;
1284 }
1285 
1286 static int
1287 octeon_eth_stop(struct ifnet *ifp, int disable)
1288 {
1289 	struct octeon_eth_softc *sc = ifp->if_softc;
1290 
1291 	timeout_del(&sc->sc_tick_misc_ch);
1292 	timeout_del(&sc->sc_tick_free_ch);
1293 	timeout_del(&sc->sc_resume_ch);
1294 
1295 	mii_down(&sc->sc_mii);
1296 
1297 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1298 
1299 	/* Mark the interface as down and cancel the watchdog timer. */
1300 	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1301 	ifp->if_timer = 0;
1302 
1303 	return 0;
1304 }
1305 
1306 /* ---- misc */
1307 
1308 #define PKO_INDEX_MASK	((1ULL << 12/* XXX */) - 1)
1309 
1310 static int
1311 octeon_eth_reset(struct octeon_eth_softc *sc)
1312 {
1313 	cn30xxgmx_reset_speed(sc->sc_gmx_port);
1314 	cn30xxgmx_reset_flowctl(sc->sc_gmx_port);
1315 	cn30xxgmx_reset_timing(sc->sc_gmx_port);
1316 	cn30xxgmx_reset_board(sc->sc_gmx_port);
1317 
1318 	return 0;
1319 }
1320 
1321 static int
1322 octeon_eth_configure(struct octeon_eth_softc *sc)
1323 {
1324 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1325 
1326 	octeon_eth_reset(sc);
1327 
1328 	octeon_eth_configure_common(sc);
1329 
1330 	cn30xxpko_port_config(sc->sc_pko);
1331 	cn30xxpko_port_enable(sc->sc_pko, 1);
1332 	cn30xxpip_port_config(sc->sc_pip);
1333 
1334 	cn30xxgmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1335 	cn30xxgmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1336 
1337 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1338 
1339 	return 0;
1340 }
1341 
1342 static int
1343 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1344 {
1345 	static int once;
1346 
1347 	if (once == 1)
1348 		return 0;
1349 	once = 1;
1350 
1351 #if 0
1352 	octeon_eth_buf_init(sc);
1353 #endif
1354 
1355 	cn30xxipd_config(sc->sc_ipd);
1356 	cn30xxpko_config(sc->sc_pko);
1357 
1358 	cn30xxpow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1359 
1360 	return 0;
1361 }
1362 
1363 static int
1364 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1365     struct mbuf **rm)
1366 {
1367 	struct mbuf *m;
1368 	void (*ext_free)(caddr_t, u_int, void *);
1369 	void *ext_buf;
1370 	size_t ext_size;
1371 	void *data;
1372 	uint64_t word1 = work[1];
1373 	uint64_t word2 = work[2];
1374 	uint64_t word3 = work[3];
1375 
1376 	MGETHDR(m, M_NOWAIT, MT_DATA);
1377 	if (m == NULL)
1378 		return 1;
1379 	OCTEON_ETH_KASSERT(m != NULL);
1380 
1381 	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
1382 		/* Dynamic short */
1383 		ext_free = octeon_eth_buf_ext_free_m;
1384 		ext_buf = &work[4];
1385 		ext_size = 96;
1386 
1387 		data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
1388 	} else {
1389 		vaddr_t addr;
1390 		vaddr_t start_buffer;
1391 
1392 		addr = PHYS_TO_CKSEG0(word3 & PIP_WQE_WORD3_ADDR);
1393 		start_buffer = addr & ~(2048 - 1);
1394 
1395 		ext_free = octeon_eth_buf_ext_free_ext;
1396 		ext_buf = (void *)start_buffer;
1397 		ext_size = 2048;
1398 
1399 		data = (void *)addr;
1400 	}
1401 
1402 	/* embed sc pointer into work[0] for _ext_free evcnt */
1403 	work[0] = (uintptr_t)sc;
1404 
1405 	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
1406 	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));
1407 
1408 	m->m_data = data;
1409 	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1410 	m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1411 #if 0
1412 	/*
1413 	 * not readonly buffer
1414 	 */
1415 	m->m_flags |= M_EXT_RW;
1416 #endif
1417 
1418 	*rm = m;
1419 
1420 	OCTEON_ETH_KASSERT(*rm != NULL);
1421 
1422 	return 0;
1423 }
1424 
1425 static int
1426 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2)
1427 {
1428 	uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1429 
1430 	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1431 		return 0;
1432 
1433 	/* this error is harmless */
1434 	if (opecode == PIP_OVER_ERR)
1435 		return 0;
1436 
1437 	return 1;
1438 }
1439 
1440 #if 0 /* not used */
1441 static int
1442 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2)
1443 {
1444 	if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56)))
1445 		return 1;
1446 	return 0;
1447 }
1448 #endif
1449 
1450 static int
1451 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2)
1452 {
1453 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port)))
1454 		return 1;
1455 	return 0;
1456 }
1457 
1458 static int
1459 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1460 {
1461 	if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) {
1462 		if (ratecheck(&sc->sc_rate_recv_check_link_last,
1463 		    &sc->sc_rate_recv_check_link_cap))
1464 			log(LOG_DEBUG,
1465 			    "%s: link is not up, the packet was dropped\n",
1466 			    sc->sc_dev.dv_xname);
1467 		OCTEON_EVCNT_INC(sc, rxerrlink);
1468 		return 1;
1469 	}
1470 
1471 #if 0 /* XXX Performance tunig (Jumbo-frame is not supported yet!) */
1472 	if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) {
1473 		/* XXX jumbo frame */
1474 		if (ratecheck(&sc->sc_rate_recv_check_jumbo_last,
1475 		    &sc->sc_rate_recv_check_jumbo_cap))
1476 			log(LOG_DEBUG,
1477 			    "jumbo frame was received\n");
1478 		OCTEON_EVCNT_INC(sc, rxerrjmb);
1479 		return 1;
1480 	}
1481 #endif
1482 
1483 	if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) {
1484 		if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) == PIP_WQE_WORD2_RE_OPCODE_LENGTH) {
1485 			/* no logging */
1486 			/* XXX inclement special error count */
1487 		} else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) ==
1488 				PIP_WQE_WORD2_RE_OPCODE_PARTIAL) {
1489 			/* not an erorr. it's because of overload */
1490 		}
1491 		else {
1492 			if (ratecheck(&sc->sc_rate_recv_check_code_last,
1493 			    &sc->sc_rate_recv_check_code_cap))
1494 				log(LOG_WARNING,
1495 				    "%s: the reception error had occured, "
1496 				    "the packet was dropped (error code = %lld)\n",
1497 				    sc->sc_dev.dv_xname, word2 & PIP_WQE_WORD2_NOIP_OPECODE);
1498 		}
1499 		OCTEON_EVCNT_INC(sc, rxerrcode);
1500 		return 1;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static int
1507 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1508 {
1509 	int result = 0;
1510 	struct ifnet *ifp;
1511 	struct mbuf *m;
1512 	uint64_t word2;
1513 
1514 	/* XXX */
1515 	/*
1516  	 * performance tuning
1517 	 * presend iobdma request
1518 	 */
1519 	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1520 		octeon_eth_send_queue_flush_prefetch(sc);
1521 	}
1522 	/* XXX */
1523 
1524 	OCTEON_ETH_KASSERT(sc != NULL);
1525 	OCTEON_ETH_KASSERT(work != NULL);
1526 
1527 	OCTEON_EVCNT_INC(sc, rx);
1528 
1529 	word2 = work[2];
1530 	ifp = &sc->sc_arpcom.ac_if;
1531 
1532 	OCTEON_ETH_KASSERT(ifp != NULL);
1533 
1534 	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1535 		ifp->if_ierrors++;
1536 		result = 1;
1537 		octeon_eth_buf_free_work(sc, work, word2);
1538 		goto drop;
1539 	}
1540 
1541 	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
1542 		ifp->if_ierrors++;
1543 		result = 1;
1544 		octeon_eth_buf_free_work(sc, work, word2);
1545 		goto drop;
1546 	}
1547 
1548 	/* work[0] .. work[3] may not be valid any more */
1549 
1550 	OCTEON_ETH_KASSERT(m != NULL);
1551 
1552 	cn30xxipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);
1553 
1554 	/* XXX */
1555 	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1556 		octeon_eth_send_queue_flush_fetch(sc);
1557 		octeon_eth_send_queue_flush(sc);
1558 	}
1559 	/* XXX */
1560 
1561 	OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_IN);
1562 
1563 	/* XXX */
1564 	if (sc->sc_flush)
1565 		octeon_eth_send_queue_flush_sync(sc);
1566 	/* XXX */
1567 
1568 	ether_input_mbuf(ifp, m);
1569 
1570 	return 0;
1571 
1572 drop:
1573 	/* XXX */
1574 	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
1575 		octeon_eth_send_queue_flush_fetch(sc);
1576 	}
1577 	/* XXX */
1578 
1579 	return result;
1580 }
1581 
1582 static void
1583 octeon_eth_recv_intr(void *data, uint64_t *work)
1584 {
1585 	struct octeon_eth_softc *sc;
1586 	int port;
1587 
1588 	OCTEON_ETH_KASSERT(work != NULL);
1589 
1590 	port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1591 
1592 	OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1593 
1594 	sc = octeon_eth_gsc[port];
1595 
1596 	OCTEON_ETH_KASSERT(sc != NULL);
1597 	OCTEON_ETH_KASSERT(port == sc->sc_port);
1598 
1599 	/* XXX process all work queue entries anyway */
1600 
1601 	(void)octeon_eth_recv(sc, work);
1602 }
1603 
1604 /* ---- tick */
1605 
1606 /*
1607  * octeon_eth_tick_free
1608  *
1609  * => garbage collect send gather buffer / mbuf
1610  * => called at softclock
1611  */
1612 static void
1613 octeon_eth_tick_free(void *arg)
1614 {
1615 	struct octeon_eth_softc *sc = arg;
1616 	int timo;
1617 	int s;
1618 
1619 	s = splnet();
1620 	/* XXX */
1621 	if (sc->sc_soft_req_cnt > 0) {
1622 		octeon_eth_send_queue_flush_prefetch(sc);
1623 		octeon_eth_send_queue_flush_fetch(sc);
1624 		octeon_eth_send_queue_flush(sc);
1625 		octeon_eth_send_queue_flush_sync(sc);
1626 	}
1627 	/* XXX */
1628 
1629 	/* XXX ??? */
1630 	timo = hz - (100 * sc->sc_ext_callback_cnt);
1631 	if (timo < 10)
1632 		 timo = 10;
1633 	timeout_add_msec(&sc->sc_tick_free_ch, 1000 * timo / hz);
1634 	/* XXX */
1635 	splx(s);
1636 }
1637 
1638 /*
1639  * octeon_eth_tick_misc
1640  *
1641  * => collect statistics
1642  * => check link status
1643  * => called at softclock
1644  */
1645 static void
1646 octeon_eth_tick_misc(void *arg)
1647 {
1648 	struct octeon_eth_softc *sc = arg;
1649 	struct ifnet *ifp;
1650 	u_quad_t iqdrops, delta;
1651 	int s;
1652 
1653 	s = splnet();
1654 
1655 	ifp = &sc->sc_arpcom.ac_if;
1656 
1657 	iqdrops = ifp->if_iqdrops;
1658 	cn30xxgmx_stats(sc->sc_gmx_port);
1659 #ifdef OCTEON_ETH_DEBUG
1660 	delta = ifp->if_iqdrops - iqdrops;
1661 	printf("%s: %qu packets dropped at GMX FIFO\n",
1662 			ifp->if_xname, delta);
1663 #endif
1664 	cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port);
1665 	delta = ifp->if_iqdrops - iqdrops;
1666 #ifdef OCTEON_ETH_DEBUG
1667 	printf("%s: %qu packets dropped at PIP + GMX FIFO\n",
1668 			ifp->if_xname, delta);
1669 #endif
1670 
1671 	mii_tick(&sc->sc_mii);
1672 
1673 #ifdef OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC
1674 	if (sc->sc_gmx_port->sc_proc_nibble_by_soft &&
1675 	    sc->sc_gmx_port->sc_even_nibble_cnt > PROC_NIBBLE_SOFT_THRESHOLD) {
1676 #ifdef OCTEON_ETH_DEBUG
1677 		log(LOG_DEBUG, "%s: even nibble preamble count %d\n",
1678 		    sc->sc_dev.dv_xname, sc->sc_gmx_port->sc_even_nibble_cnt);
1679 #endif
1680 		if (OCTEON_ETH_FIXUP_ODD_NIBBLE_MODEL_P(sc) &&
1681 		    OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC_SPEED_P(sc->sc_gmx_port, ifp)) {
1682 			log(LOG_NOTICE,
1683 			    "%s: the preamble processing is switched to hardware\n",
1684 			    sc->sc_dev.dv_xname);
1685 		}
1686 		sc->sc_gmx_port->sc_proc_nibble_by_soft = 0;
1687 		octeon_eth_mii_statchg((struct device *)sc);
1688 		sc->sc_gmx_port->sc_even_nibble_cnt = 0;
1689 	}
1690 #endif
1691 	splx(s);
1692 
1693 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1694 }
1695