xref: /openbsd-src/sys/dev/pci/if_nep.c (revision fb8aa7497fded39583f40e800732f9c046411717)
1 /*	$OpenBSD: if_nep.c,v 1.25 2016/05/23 15:22:44 tedu Exp $	*/
2 /*
3  * Copyright (c) 2014, 2015 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bpfilter.h"
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/device.h>
23 #include <sys/ioctl.h>
24 #include <sys/malloc.h>
25 #include <sys/mbuf.h>
26 #include <sys/pool.h>
27 #include <sys/socket.h>
28 
29 #include <net/if.h>
30 #include <net/if_media.h>
31 
32 #include <netinet/in.h>
33 #include <netinet/if_ether.h>
34 
35 #if NBPFILTER > 0
36 #include <net/bpf.h>
37 #endif
38 
39 #include <dev/mii/mii.h>
40 #include <dev/mii/miivar.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #ifdef __sparc64__
47 #include <dev/ofw/openfirm.h>
48 #endif
49 
50 /*
51  * The virtualization features make this a really complex device.  For
52  * now we try to keep things simple and use one logical device per
53  * port, using port numbers as logical device numbers.
54  */
55 
56 #define PIO		0x000000
57 #define FZC_PIO		0x080000
58 #define FZC_MAC		0x180000
59 #define FZC_IPP		0x280000
60 #define FFLP		0x300000
61 #define FZC_FFLP	0x380000
62 #define ZCP		0x500000
63 #define FZC_ZCP		0x580000
64 #define DMC		0x600000
65 #define FZC_DMC		0x680000
66 #define TXC		0x700000
67 #define FZC_TXC		0x780000
68 #define PIO_LDSV	0x800000
69 #define PIO_IMASK0	0xa00000
70 #define PIO_IMASK1	0xb00000
71 
72 #define RST_CTL			(FZC_PIO + 0x00038)
73 #define SYS_ERR_MASK		(FZC_PIO + 0x00090)
74 #define SYS_ERR_STAT		(FZC_PIO + 0x00098)
75 
76 #define LDN_RXDMA(chan)		(0 + (chan))
77 #define LDN_TXDMA(chan)		(32 + (chan))
78 #define LDN_MIF			63
79 #define LDN_MAC(port)		(64 + (port))
80 #define LDN_SYSERR		68
81 
82 #define LDSV0(ldg)		(PIO_LDSV + 0x00000 + (ldg) * 0x02000)
83 #define LDSV1(ldg)		(PIO_LDSV + 0x00008 + (ldg) * 0x02000)
84 #define LDSV2(ldg)		(PIO_LDSV + 0x00010 + (ldg) * 0x02000)
85 #define LDGIMGN(ldg)		(PIO_LDSV + 0x00018 + (ldg) * 0x02000)
86 #define  LDGIMGN_ARM		(1ULL << 31)
87 #define  LDGIMGN_TIMER		(63ULL << 0)
88 
89 #define LD_IM0(idx)		(PIO_IMASK0 + 0x00000 + (idx) * 0x02000)
90 #define  LD_IM0_LDF_MASK	(3ULL << 0)
91 #define LD_IM1(idx)		(PIO_IMASK1 + 0x00000 + (idx - 64) * 0x02000)
92 #define  LD_IM1_LDF_MASK	(3ULL << 0)
93 
94 #define SID(ldg)		(FZC_PIO + 0x10200 + (ldg) * 0x00008)
95 #define LDG_NUM(ldn)		(FZC_PIO + 0x20000 + (ldn) * 0x00008)
96 
97 #define ipp_port(port)		(((port & 0x1) << 1) | (port & 0x2) >> 1)
98 #define IPP_CFIG(port)		(FZC_IPP + 0x00000 + ipp_port(port) * 0x04000)
99 #define  IPP_CFIG_SOFT_RST		(1ULL << 31)
100 #define  IPP_CFIG_DFIFO_PIO_W		(1ULL << 5)
101 #define  IPP_CFIG_IPP_ENABLE		(1ULL << 0)
102 #define IPP_INT_STAT(port)	(FZC_IPP + 0x00040 + ipp_port(port) * 0x04000)
103 #define IPP_MSK(port)		(FZC_IPP + 0x00048 + ipp_port(port) * 0x04000)
104 #define IPP_DFIFO_RD1(port)	(FZC_IPP + 0x000c0 + ipp_port(port) * 0x04000)
105 #define IPP_DFIFO_RD2(port)	(FZC_IPP + 0x000c8 + ipp_port(port) * 0x04000)
106 #define IPP_DFIFO_RD3(port)	(FZC_IPP + 0x000d0 + ipp_port(port) * 0x04000)
107 #define IPP_DFIFO_RD4(port)	(FZC_IPP + 0x000d8 + ipp_port(port) * 0x04000)
108 #define IPP_DFIFO_RD5(port)	(FZC_IPP + 0x000e0 + ipp_port(port) * 0x04000)
109 #define IPP_DFIFO_WR1(port)	(FZC_IPP + 0x000e8 + ipp_port(port) * 0x04000)
110 #define IPP_DFIFO_WR2(port)	(FZC_IPP + 0x000f0 + ipp_port(port) * 0x04000)
111 #define IPP_DFIFO_WR3(port)	(FZC_IPP + 0x000f8 + ipp_port(port) * 0x04000)
112 #define IPP_DFIFO_WR4(port)	(FZC_IPP + 0x00100 + ipp_port(port) * 0x04000)
113 #define IPP_DFIFO_WR5(port)	(FZC_IPP + 0x00108 + ipp_port(port) * 0x04000)
114 #define IPP_DFIFO_RD_PTR(port)	(FZC_IPP + 0x00110 + ipp_port(port) * 0x04000)
115 #define IPP_DFIFO_WR_PTR(port)	(FZC_IPP + 0x00118 + ipp_port(port) * 0x04000)
116 
117 #define IPP_NIU_DFIFO_ENTRIES		1024
118 #define	IPP_P0_P1_DFIFO_ENTRIES		2048
119 #define IPP_P2_P3_DFIFO_ENTRIES		1024
120 
121 #define ZCP_CFIG		(FZC_ZCP + 0x00000)
122 #define ZCP_INT_STAT		(FZC_ZCP + 0x00008)
123 #define ZCP_INT_MASK		(FZC_ZCP + 0x00010)
124 
125 #define TXC_DMA_MAX(chan)	(FZC_TXC + 0x00000 + (chan) * 0x01000)
126 #define TXC_CONTROL		(FZC_TXC + 0x20000)
127 #define  TXC_CONTROL_TXC_ENABLED	(1ULL << 4)
128 #define TXC_PORT_DMA(port)	(FZC_TXC + 0x20028 + (port) * 0x00100)
129 #define TXC_PKT_STUFFED(port)	(FZC_TXC + 0x20030 + (port) * 0x00100)
130 #define TXC_PKT_XMIT(port)	(FZC_TXC + 0x20038 + (port) * 0x00100)
131 #define TXC_INT_STAT_DBG	(FZC_TXC + 0x20420)
132 #define TXC_INT_STAT		(FZC_TXC + 0x20428)
133 #define TXC_INT_MASK		(FZC_TXC + 0x20430)
134 #define  TXC_INT_MASK_PORT_INT_MASK(port) (0x3fULL << ((port) * 8))
135 
136 #define XTXMAC_SW_RST(port)	(FZC_MAC + 0x00000 + (port) * 0x06000)
137 #define  XTXMAC_SW_RST_REG_RST		(1ULL << 1)
138 #define  XTXMAC_SW_RST_SOFT_RST		(1ULL << 0)
139 #define XRXMAC_SW_RST(port)	(FZC_MAC + 0x00008 + (port) * 0x06000)
140 #define  XRXMAC_SW_RST_REG_RST		(1ULL << 1)
141 #define  XRXMAC_SW_RST_SOFT_RST		(1ULL << 0)
142 #define XTXMAC_STATUS(port)	(FZC_MAC + 0x00020 + (port) * 0x06000)
143 #define XRXMAC_STATUS(port)	(FZC_MAC + 0x00028 + (port) * 0x06000)
144 #define XTXMAC_STAT_MSK(port)	(FZC_MAC + 0x00040 + (port) * 0x06000)
145 #define XRXMAC_STAT_MSK(port)	(FZC_MAC + 0x00048 + (port) * 0x06000)
146 #define XMAC_CONFIG(port)	(FZC_MAC + 0x00060 + (port) * 0x06000)
147 #define  XMAC_CONFIG_SEL_CLK_25MHZ	(1ULL << 31)
148 #define  XMAC_CONFIG_1G_PCS_BYPASS	(1ULL << 30)
149 #define  XMAC_CONFIG_MODE_MASK		(3ULL << 27)
150 #define  XMAC_CONFIG_MODE_XGMII		(0ULL << 27)
151 #define  XMAC_CONFIG_MODE_GMII		(1ULL << 27)
152 #define  XMAC_CONFIG_MODE_MII		(2ULL << 27)
153 #define  XMAC_CONFIG_LFS_DISABLE	(1ULL << 26)
154 #define  XMAC_CONFIG_LOOPBACK		(1ULL << 25)
155 #define  XMAC_CONFIG_TX_OUTPUT_EN	(1ULL << 24)
156 #define  XMAC_CONFIG_SEL_POR_CLK_SRC	(1ULL << 23)
157 #define  XMAC_CONFIG_HASH_FILTER_EN	(1ULL << 15)
158 #define  XMAC_CONFIG_PROMISCUOUS_GROUP	(1ULL << 10)
159 #define  XMAC_CONFIG_PROMISCUOUS	(1ULL << 9)
160 #define  XMAC_CONFIG_RX_MAC_ENABLE	(1ULL << 8)
161 #define  XMAC_CONFIG_ALWAYS_NO_CRC	(1ULL << 3)
162 #define  XMAC_CONFIG_VAR_MIN_IPG_EN	(1ULL << 2)
163 #define  XMAC_CONFIG_STRETCH_MODE	(1ULL << 1)
164 #define  XMAC_CONFIG_TX_ENABLE		(1ULL << 0)
165 
166 #define XMAC_IPG(port)		(FZC_MAC + 0x00080 + (port) * 0x06000)
167 #define  XMAC_IPG_IPG_VALUE1_MASK	(0xffULL << 8)
168 #define  XMAC_IPG_IPG_VALUE1_12		(10ULL << 8)
169 #define  XMAC_IPG_IPG_VALUE_MASK	(0x07ULL << 0)
170 #define  XMAC_IPG_IPG_VALUE_12_15	(3ULL << 0)
171 
172 #define XMAC_MIN(port)		(FZC_MAC + 0x00088 + (port) * 0x06000)
173 #define  XMAC_MIN_RX_MIN_PKT_SIZE_MASK	(0x3ffULL << 20)
174 #define  XMAC_MIN_RX_MIN_PKT_SIZE_SHIFT	20
175 #define  XMAC_MIN_TX_MIN_PKT_SIZE_MASK	(0x3ffULL << 0)
176 #define  XMAC_MIN_TX_MIN_PKT_SIZE_SHIFT	0
177 #define XMAC_MAX(port)		(FZC_MAC + 0x00090 + (port) * 0x06000)
178 
179 #define XMAC_ADDR0(port)	(FZC_MAC + 0x000a0 + (port) * 0x06000)
180 #define XMAC_ADDR1(port)	(FZC_MAC + 0x000a8 + (port) * 0x06000)
181 #define XMAC_ADDR2(port)	(FZC_MAC + 0x000b0 + (port) * 0x06000)
182 
183 #define XMAC_ADDR_CMPEN(port)	(FZC_MAC + 0x00208 + (port) * 0x06000)
184 
185 #define XMAC_ADD_FILT0(port)	(FZC_MAC + 0x00818 + (port) * 0x06000)
186 #define XMAC_ADD_FILT1(port)	(FZC_MAC + 0x00820 + (port) * 0x06000)
187 #define XMAC_ADD_FILT2(port)	(FZC_MAC + 0x00828 + (port) * 0x06000)
188 #define XMAC_ADD_FILT12_MASK(port) (FZC_MAC + 0x00830 + (port) * 0x06000)
189 #define XMAC_ADD_FILT00_MASK(port) (FZC_MAC + 0x00838 + (port) * 0x06000)
190 
191 #define XMAC_HASH_TBL0(port)	(FZC_MAC + 0x00840 + (port) * 0x06000)
192 #define XMAC_HASH_TBL(port, i)	(XMAC_HASH_TBL0(port) + (i) * 0x00008)
193 
194 #define XMAC_HOST_INFO0(port)	(FZC_MAC + 0x00900 + (port) * 0x06000)
195 #define XMAC_HOST_INFO(port, i)	(XMAC_HOST_INFO0(port) + (i) * 0x00008)
196 
197 #define RXMAC_BT_CNT(port)	(FZC_MAC + 0x00100 + (port) * 0x06000)
198 
199 #define TXMAC_FRM_CNT(port)	(FZC_MAC + 0x00170 + (port) * 0x06000)
200 #define TXMAC_BYTE_CNT(port)	(FZC_MAC + 0x00178 + (port) * 0x06000)
201 
202 #define LINK_FAULT_CNT(port)	(FZC_MAC + 0x00180 + (port) * 0x06000)
203 #define XMAC_SM_REG(port)	(FZC_MAC + 0x001a8 + (port) * 0x06000)
204 
205 #define TXMAC_SW_RST(port)	(FZC_MAC + 0x0c000 + ((port) - 2) * 0x04000)
206 #define  TXMAC_SW_RST_SW_RST		(1ULL << 0)
207 #define RXMAC_SW_RST(port)	(FZC_MAC + 0x0c008 + ((port) - 2) * 0x04000)
208 #define  RXMAC_SW_RST_SW_RST		(1ULL << 0)
209 #define TXMAC_CONFIG(port)	(FZC_MAC + 0x0c060 + ((port) - 2) * 0x04000)
210 #define  TXMAC_CONFIG_TX_ENABLE		(1ULL << 0)
211 #define RXMAC_CONFIG(port)	(FZC_MAC + 0x0c068 + ((port) - 2) * 0x04000)
212 #define  RXMAC_CONFIG_ERROR_CHK_DIS	(1ULL << 7)
213 #define  RXMAC_CONFIG_ADDR_FILTER_EN	(1ULL << 6)
214 #define  RXMAC_CONFIG_HASH_FILTER_EN	(1ULL << 5)
215 #define  RXMAC_CONFIG_PROMISCUOUS_GROUP	(1ULL << 4)
216 #define  RXMAC_CONFIG_PROMISCUOUS	(1ULL << 3)
217 #define  RXMAC_CONFIG_STRIP_FCS		(1ULL << 2)
218 #define  RXMAC_CONFIG_STRIP_PAD		(1ULL << 1)
219 #define  RXMAC_CONFIG_RX_ENABLE		(1ULL << 0)
220 #define MAC_XIF_CONFIG(port)	(FZC_MAC + 0x0c078 + ((port) - 2) * 0x04000)
221 #define  MAC_XIF_CONFIG_SEL_CLK_25MHZ	(1ULL << 7)
222 #define  MAC_XIF_CONFIG_GMII_MODE	(1ULL << 3)
223 #define  MAC_XIF_CONFIG_LOOPBACK	(1ULL << 1)
224 #define  MAC_XIF_CONFIG_TX_OUTPUT_EN	(1ULL << 0)
225 #define BMAC_MIN(port)		(FZC_MAC + 0x0c0a0 + ((port) - 2) * 0x04000)
226 #define BMAC_MAX(port)		(FZC_MAC + 0x0c0a8 + ((port) - 2) * 0x04000)
227 #define  BMAC_MAX_BURST_SHIFT		16
228 #define MAC_PA_SIZE(port)	(FZC_MAC + 0x0c0b0 + ((port) - 2) * 0x04000)
229 #define MAC_CTRL_TYPE(port)	(FZC_MAC + 0x0c0b8 + ((port) - 2) * 0x04000)
230 #define BMAC_ADDR0(port)	(FZC_MAC + 0x0c100 + ((port) - 2) * 0x04000)
231 #define BMAC_ADDR1(port)	(FZC_MAC + 0x0c108 + ((port) - 2) * 0x04000)
232 #define BMAC_ADDR2(port)	(FZC_MAC + 0x0c110 + ((port) - 2) * 0x04000)
233 
234 #define MAC_ADDR_FILT0(port)	(FZC_MAC + 0x0c298 + ((port) - 2) * 0x04000)
235 #define MAC_ADDR_FILT1(port)	(FZC_MAC + 0x0c2a0 + ((port) - 2) * 0x04000)
236 #define MAC_ADDR_FILT2(port)	(FZC_MAC + 0x0c2a8 + ((port) - 2) * 0x04000)
237 #define MAC_ADDR_FILT12_MASK(port) (FZC_MAC + 0x0c2b0 + ((port) - 2) * 0x04000)
238 #define MAC_ADDR_FILT00_MASK(port) (FZC_MAC + 0x0c2b8 + ((port) - 2) * 0x04000)
239 
240 #define MAC_HASH_TBL0(port)	(FZC_MAC + 0x0c2c0 + ((port) - 2) * 0x04000)
241 #define MAC_HASH_TBL(port, i)	(MAC_HASH_TBL0(port) + (i) * 0x00008)
242 
243 #define RXMAC_FRM_CNT(port)	(FZC_MAC + 0x0c370 + ((port) - 2) * 0x04000)
244 #define BMAC_ALTAD_CMPEN(port)	(FZC_MAC + 0x0c3f8 + ((port) - 2) * 0x04000)
245 
246 #define BMAC_HOST_INFO0(port)	(FZC_MAC + 0x0c400 + ((port) - 2) * 0x04000)
247 #define BMAC_HOST_INFO(port, i)	(BMAC_HOST_INFO0(port) + (i) * 0x00008)
248 
249 #define PCS_PORT_OFFSET(port)	((port < 2) ? ((port) * 0x06000) : \
250 					(0x02000 + (port) * 0x4000))
251 #define PCS_MII_CTL(port)	(FZC_MAC + 0x04000 + PCS_PORT_OFFSET(port))
252 #define  PCS_MII_CTL_RESET		(1ULL << 15)
253 #define PCS_DPATH_MODE(port)	(FZC_MAC + 0x040a0 + PCS_PORT_OFFSET(port))
254 #define  PCS_DPATH_MODE_MII		(1ULL << 1)
255 
256 #define MIF_FRAME_OUTPUT	(FZC_MAC + 0x16018)
257 #define  MIF_FRAME_DATA			0xffff
258 #define  MIF_FRAME_TA0			(1ULL << 16)
259 #define  MIF_FRAME_TA1			(1ULL << 17)
260 #define  MIF_FRAME_REG_SHIFT		18
261 #define  MIF_FRAME_PHY_SHIFT		23
262 #define  MIF_FRAME_READ			0x60020000
263 #define  MIF_FRAME_WRITE		0x50020000
264 #define MIF_CONFIG		(FZC_MAC + 0x16020)
265 #define  MIF_CONFIG_INDIRECT_MODE	(1ULL << 15)
266 
267 #define DEF_PT0_RDC		(FZC_DMC + 0x00008)
268 #define DEF_PT_RDC(port)	(DEF_PT0_RDC + (port) * 0x00008)
269 #define RDC_TBL(tbl, i)		(FZC_ZCP + 0x10000 + (tbl * 16 + i) * 0x00008)
270 
271 #define RX_LOG_PAGE_VLD(chan)	(FZC_DMC + 0x20000 + (chan) * 0x00040)
272 #define  RX_LOG_PAGE_VLD_PAGE0		(1ULL << 0)
273 #define  RX_LOG_PAGE_VLD_PAGE1		(1ULL << 1)
274 #define  RX_LOG_PAGE_VLD_FUNC_SHIFT	2
275 #define RX_LOG_MASK1(chan)	(FZC_DMC + 0x20008 + (chan) * 0x00040)
276 #define RX_LOG_VALUE1(chan)	(FZC_DMC + 0x20010 + (chan) * 0x00040)
277 #define RX_LOG_MASK2(chan)	(FZC_DMC + 0x20018 + (chan) * 0x00040)
278 #define RX_LOG_VALUE2(chan)	(FZC_DMC + 0x20020 + (chan) * 0x00040)
279 #define RX_LOG_PAGE_RELO1(chan)	(FZC_DMC + 0x20028 + (chan) * 0x00040)
280 #define RX_LOG_PAGE_RELO2(chan)	(FZC_DMC + 0x20030 + (chan) * 0x00040)
281 #define RX_LOG_PAGE_HDL(chan)	(FZC_DMC + 0x20038 + (chan) * 0x00040)
282 
283 #define RXDMA_CFIG1(chan)	(DMC + 0x00000 + (chan) * 0x00200)
284 #define  RXDMA_CFIG1_EN			(1ULL << 31)
285 #define  RXDMA_CFIG1_RST		(1ULL << 30)
286 #define  RXDMA_CFIG1_QST		(1ULL << 29)
287 #define RXDMA_CFIG2(chan)	(DMC + 0x00008 + (chan) * 0x00200)
288 #define  RXDMA_CFIG2_OFFSET_MASK	(3ULL << 2)
289 #define  RXDMA_CFIG2_OFFSET_0		(0ULL << 2)
290 #define  RXDMA_CFIG2_OFFSET_64		(1ULL << 2)
291 #define  RXDMA_CFIG2_OFFSET_128		(2ULL << 2)
292 #define  RXDMA_CFIG2_FULL_HDR		(1ULL << 0)
293 
294 #define RBR_CFIG_A(chan)	(DMC + 0x00010 + (chan) * 0x00200)
295 #define  RBR_CFIG_A_LEN_SHIFT		48
296 #define RBR_CFIG_B(chan)	(DMC + 0x00018 + (chan) * 0x00200)
297 #define  RBR_CFIG_B_BLKSIZE_MASK	(3ULL << 24)
298 #define  RBR_CFIG_B_BLKSIZE_4K		(0ULL << 24)
299 #define  RBR_CFIG_B_BLKSIZE_8K		(1ULL << 24)
300 #define  RBR_CFIG_B_BLKSIZE_16K		(2ULL << 24)
301 #define  RBR_CFIG_B_BLKSIZE_32K		(3ULL << 24)
302 #define  RBR_CFIG_B_VLD2		(1ULL << 23)
303 #define  RBR_CFIG_B_BUFSZ2_MASK		(3ULL << 16)
304 #define  RBR_CFIG_B_BUFSZ2_2K		(0ULL << 16)
305 #define  RBR_CFIG_B_BUFSZ2_4K		(1ULL << 16)
306 #define  RBR_CFIG_B_BUFSZ2_8K		(2ULL << 16)
307 #define  RBR_CFIG_B_BUFSZ2_16K		(3ULL << 16)
308 #define  RBR_CFIG_B_VLD1		(1ULL << 15)
309 #define  RBR_CFIG_B_BUFSZ1_MASK		(3ULL << 8)
310 #define  RBR_CFIG_B_BUFSZ1_1K		(0ULL << 8)
311 #define  RBR_CFIG_B_BUFSZ1_2K		(1ULL << 8)
312 #define  RBR_CFIG_B_BUFSZ1_4K		(2ULL << 8)
313 #define  RBR_CFIG_B_BUFSZ1_8K		(3ULL << 8)
314 #define  RBR_CFIG_B_VLD0		(1ULL << 7)
315 #define  RBR_CFIG_B_BUFSZ0_MASK		(3ULL << 0)
316 #define  RBR_CFIG_B_BUFSZ0_256		(0ULL << 0)
317 #define  RBR_CFIG_B_BUFSZ0_512		(1ULL << 0)
318 #define  RBR_CFIG_B_BUFSZ0_1K		(2ULL << 0)
319 #define  RBR_CFIG_B_BUFSZ0_2K		(3ULL << 0)
320 #define RBR_KICK(chan)		(DMC + 0x00020 + (chan) * 0x00200)
321 #define RBR_STAT(chan)		(DMC + 0x00028 + (chan) * 0x00200)
322 #define RBR_HDH(chan)		(DMC + 0x00030 + (chan) * 0x00200)
323 #define RBR_HDL(chan)		(DMC + 0x00038 + (chan) * 0x00200)
324 #define RCRCFIG_A(chan)		(DMC + 0x00040 + (chan) * 0x00200)
325 #define  RCRCFIG_A_LEN_SHIFT		48
326 #define RCRCFIG_B(chan)		(DMC + 0x00048 + (chan) * 0x00200)
327 #define  RCRCFIG_B_PTHRES_SHIFT		16
328 #define  RCRCFIG_B_ENTOUT		(1ULL << 15)
329 #define RCRSTAT_A(chan)		(DMC + 0x00050 + (chan) * 0x00200)
330 #define RCRSTAT_B(chan)		(DMC + 0x00058 + (chan) * 0x00200)
331 #define RCRSTAT_C(chan)		(DMC + 0x00060 + (chan) * 0x00200)
332 
333 #define RX_DMA_ENT_MSK(chan)	(DMC + 0x00068 + (chan) * 0x00200)
334 #define  RX_DMA_ENT_MSK_RBR_EMPTY	(1ULL << 3)
335 #define RX_DMA_CTL_STAT(chan)	(DMC + 0x00070 + (chan) * 0x00200)
336 #define  RX_DMA_CTL_STAT_MEX		(1ULL << 47)
337 #define  RX_DMA_CTL_STAT_RCRTHRES	(1ULL << 46)
338 #define  RX_DMA_CTL_STAT_RCRTO		(1ULL << 45)
339 #define  RX_DMA_CTL_STAT_RBR_EMPTY	(1ULL << 35)
340 #define  RX_DMA_CTL_STAT_PTRREAD_SHIFT	16
341 #define RX_DMA_CTL_STAT_DBG(chan) (DMC + 0x00098 + (chan) * 0x00200)
342 
343 #define TX_LOG_PAGE_VLD(chan)	(FZC_DMC + 0x40000 + (chan) * 0x00200)
344 #define  TX_LOG_PAGE_VLD_PAGE0		(1ULL << 0)
345 #define  TX_LOG_PAGE_VLD_PAGE1		(1ULL << 1)
346 #define  TX_LOG_PAGE_VLD_FUNC_SHIFT	2
347 #define TX_LOG_MASK1(chan)	(FZC_DMC + 0x40008 + (chan) * 0x00200)
348 #define TX_LOG_VALUE1(chan)	(FZC_DMC + 0x40010 + (chan) * 0x00200)
349 #define TX_LOG_MASK2(chan)	(FZC_DMC + 0x40018 + (chan) * 0x00200)
350 #define TX_LOG_VALUE2(chan)	(FZC_DMC + 0x40020 + (chan) * 0x00200)
351 #define TX_LOG_PAGE_RELO1(chan)	(FZC_DMC + 0x40028 + (chan) * 0x00200)
352 #define TX_LOG_PAGE_RELO2(chan)	(FZC_DMC + 0x40030 + (chan) * 0x00200)
353 #define TX_LOG_PAGE_HDL(chan)	(FZC_DMC + 0x40038 + (chan) * 0x00200)
354 
355 #define TX_RNG_CFIG(chan)	(DMC + 0x40000 + (chan) * 0x00200)
356 #define  TX_RNG_CFIG_LEN_SHIFT		48
357 #define TX_RING_HDL(chan)	(DMC + 0x40010 + (chan) * 0x00200)
358 #define TX_RING_KICK(chan)	(DMC + 0x40018 + (chan) * 0x00200)
359 #define  TX_RING_KICK_WRAP		(1ULL << 19)
360 #define TX_ENT_MSK(chan)	(DMC + 0x40020 + (chan) * 0x00200)
361 #define TX_CS(chan)		(DMC + 0x40028 + (chan) * 0x00200)
362 #define  TX_CS_PKT_CNT_MASK		(0xfffULL << 48)
363 #define  TX_CS_PKT_CNT_SHIFT		48
364 #define  TX_CS_RST			(1ULL << 31)
365 #define  TX_CS_STOP_N_GO		(1ULL << 28)
366 #define  TX_CS_SNG_STATE		(1ULL << 27)
367 #define TDMC_INTR_DBG(chan)	(DMC + 0x40060 + (chan) * 0x00200)
368 #define TXDMA_MBH(chan)		(DMC + 0x40030 + (chan) * 0x00200)
369 #define TXDMA_MBL(chan)		(DMC + 0x40038 + (chan) * 0x00200)
370 #define TX_RNG_ERR_LOGH(chan)	(DMC + 0x40048 + (chan) * 0x00200)
371 #define TX_RNG_ERR_LOGL(chan)	(DMC + 0x40050 + (chan) * 0x00200)
372 
373 #define RXD_MULTI		(1ULL << 63)
374 #define RXD_L2_LEN_MASK		(0x3fffULL << 40)
375 #define RXD_L2_LEN_SHIFT	40
376 #define RXD_PKT_BUF_ADDR_MASK	0x3fffffffffULL
377 #define RXD_PKT_BUF_ADDR_SHIFT	6
378 
379 struct nep_block {
380 	bus_dmamap_t	nb_map;
381 	void		*nb_block;
382 };
383 
384 #define NEP_NRBDESC	256
385 #define NEP_NRCDESC	512
386 
387 #define TXD_SOP			(1ULL << 63)
388 #define TXD_MARK		(1ULL << 62)
389 #define TXD_NUM_PTR_SHIFT	58
390 #define TXD_TR_LEN_SHIFT	44
391 
392 struct nep_txbuf_hdr {
393 	uint64_t	nh_flags;
394 	uint64_t	nh_reserved;
395 };
396 
397 struct nep_buf {
398 	bus_dmamap_t	nb_map;
399 	struct mbuf	*nb_m;
400 };
401 
402 #define NEP_NTXDESC	256
403 #define NEP_NTXSEGS	15
404 
405 struct nep_dmamem {
406 	bus_dmamap_t		ndm_map;
407 	bus_dma_segment_t	ndm_seg;
408 	size_t			ndm_size;
409 	caddr_t			ndm_kva;
410 };
411 #define NEP_DMA_MAP(_ndm)	((_ndm)->ndm_map)
412 #define NEP_DMA_LEN(_ndm)	((_ndm)->ndm_size)
413 #define NEP_DMA_DVA(_ndm)	((_ndm)->ndm_map->dm_segs[0].ds_addr)
414 #define NEP_DMA_KVA(_ndm)	((void *)(_ndm)->ndm_kva);
415 
416 struct pool *nep_block_pool;
417 
418 struct nep_softc {
419 	struct device		sc_dev;
420 	struct arpcom		sc_ac;
421 #define sc_lladdr	sc_ac.ac_enaddr
422 	struct mii_data		sc_mii;
423 #define sc_media	sc_mii.mii_media
424 
425 	bus_dma_tag_t		sc_dmat;
426 	bus_space_tag_t		sc_memt;
427 	bus_space_handle_t 	sc_memh;
428 	bus_size_t		sc_mems;
429 	void			*sc_ih;
430 
431 	int			sc_port;
432 
433 	struct nep_dmamem	*sc_txring;
434 	struct nep_buf		*sc_txbuf;
435 	uint64_t		*sc_txdesc;
436 	int			sc_tx_prod;
437 	int			sc_tx_cnt;
438 	int			sc_tx_cons;
439 
440 	uint64_t		sc_wrap;
441 	uint16_t		sc_pkt_cnt;
442 
443 	struct nep_dmamem	*sc_rbring;
444 	struct nep_block	*sc_rb;
445 	uint32_t		*sc_rbdesc;
446 	struct if_rxring	sc_rx_ring;
447 	int			sc_rx_prod;
448 	struct nep_dmamem	*sc_rcring;
449 	uint64_t		*sc_rcdesc;
450 	int			sc_rx_cons;
451 
452 	struct nep_dmamem	*sc_rxmbox;
453 
454 	struct timeout		sc_tick;
455 };
456 
457 int	nep_match(struct device *, void *, void *);
458 void	nep_attach(struct device *, struct device *, void *);
459 
460 struct cfattach nep_ca = {
461 	sizeof(struct nep_softc), nep_match, nep_attach
462 };
463 
464 struct cfdriver nep_cd = {
465 	NULL, "nep", DV_DULL
466 };
467 
468 static u_int	nep_mextfree_idx;
469 
470 int	nep_pci_enaddr(struct nep_softc *, struct pci_attach_args *);
471 
472 uint64_t nep_read(struct nep_softc *, uint32_t);
473 void	nep_write(struct nep_softc *, uint32_t, uint64_t);
474 int	nep_mii_readreg(struct device *, int, int);
475 void	nep_mii_writereg(struct device *, int, int, int);
476 void	nep_mii_statchg(struct device *);
477 void	nep_xmac_mii_statchg(struct nep_softc *);
478 void	nep_bmac_mii_statchg(struct nep_softc *);
479 int	nep_media_change(struct ifnet *);
480 void	nep_media_status(struct ifnet *, struct ifmediareq *);
481 int	nep_intr(void *);
482 
483 void	nep_rx_proc(struct nep_softc *);
484 void	nep_extfree(caddr_t, u_int, void *);
485 void	nep_tx_proc(struct nep_softc *);
486 
487 void	nep_init_ipp(struct nep_softc *);
488 void	nep_ipp_clear_dfifo(struct nep_softc *, uint64_t);
489 void	nep_init_rx_mac(struct nep_softc *);
490 void	nep_init_rx_xmac(struct nep_softc *);
491 void	nep_init_rx_bmac(struct nep_softc *);
492 void	nep_init_rx_channel(struct nep_softc *, int);
493 void	nep_init_tx_mac(struct nep_softc *);
494 void	nep_init_tx_xmac(struct nep_softc *);
495 void	nep_init_tx_bmac(struct nep_softc *);
496 void	nep_init_tx_channel(struct nep_softc *, int);
497 void	nep_enable_rx_mac(struct nep_softc *);
498 void	nep_disable_rx_mac(struct nep_softc *);
499 void	nep_stop_dma(struct nep_softc *);
500 
501 void	nep_fill_rx_ring(struct nep_softc *);
502 
503 void	nep_up(struct nep_softc *);
504 void	nep_down(struct nep_softc *);
505 void	nep_iff(struct nep_softc *);
506 int	nep_encap(struct nep_softc *, struct mbuf **, int *);
507 
508 void	nep_start(struct ifnet *);
509 void	nep_watchdog(struct ifnet *);
510 void	nep_tick(void *);
511 int	nep_ioctl(struct ifnet *, u_long, caddr_t);
512 
513 struct nep_dmamem *nep_dmamem_alloc(struct nep_softc *, size_t);
514 void	nep_dmamem_free(struct nep_softc *, struct nep_dmamem *);
515 
516 /*
517  * SUNW,pcie-neptune: 4x1G onboard on T5140/T5240
518  * SUNW,pcie-qgc: 4x1G, "Sun Quad GbE UTP x8 PCI Express Card"
519  * SUNW,pcie-qgc-pem: 4x1G, "Sun Quad GbE UTP x8 PCIe ExpressModule"
520  * SUNW,pcie-2xgf: 2x10G, "Sun Dual 10GbE XFP PCI Express Card"
521  * SUNW,pcie-2xgf-pem: 2x10G, "Sun Dual 10GbE XFP PCIe ExpressModule"
522  */
523 int
524 nep_match(struct device *parent, void *match, void *aux)
525 {
526 	struct pci_attach_args *pa = aux;
527 
528 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN &&
529 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_NEPTUNE)
530 		return (1);
531 	return (0);
532 }
533 
534 void
535 nep_attach(struct device *parent, struct device *self, void *aux)
536 {
537 	struct nep_softc *sc = (struct nep_softc *)self;
538 	struct pci_attach_args *pa = aux;
539 	pci_intr_handle_t ih;
540 	const char *intrstr = NULL;
541 	struct ifnet *ifp = &sc->sc_ac.ac_if;
542 	struct mii_data *mii = &sc->sc_mii;
543 	pcireg_t memtype;
544 	uint64_t val;
545 
546 	if (nep_mextfree_idx == 0)
547 		nep_mextfree_idx = mextfree_register(nep_extfree);
548 
549 	sc->sc_dmat = pa->pa_dmat;
550 
551 	memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT;
552 	if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
553 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
554 		printf(": can't map registers\n");
555 		return;
556 	}
557 
558 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
559 		printf(": can't map interrupt\n");
560 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
561 		return;
562 	}
563 
564 	intrstr = pci_intr_string(pa->pa_pc, ih);
565 	sc->sc_ih =  pci_intr_establish(pa->pa_pc, ih, IPL_NET,
566 	    nep_intr, sc, self->dv_xname);
567 	if (sc->sc_ih == NULL) {
568 		printf(": can't establish interrupt");
569 		if (intrstr != NULL)
570 			printf(" at %s", intrstr);
571 		printf("\n");
572 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
573 		return;
574 	}
575 
576 	printf(": %s", intrstr);
577 
578 	sc->sc_port = pa->pa_function;
579 
580 	nep_write(sc, SID(sc->sc_port), pa->pa_function << 5);
581 	nep_write(sc, LDG_NUM(LDN_RXDMA(sc->sc_port)), sc->sc_port);
582 	nep_write(sc, LDG_NUM(LDN_TXDMA(sc->sc_port)), sc->sc_port);
583 	nep_write(sc, LDG_NUM(LDN_MAC(sc->sc_port)), sc->sc_port);
584 
585 	/* Port 0 gets the MIF and error interrupts. */
586 	if (sc->sc_port == 0) {
587 		nep_write(sc, LDG_NUM(LDN_MIF), sc->sc_port);
588 		nep_write(sc, LDG_NUM(LDN_SYSERR), sc->sc_port);
589 		nep_write(sc, ZCP_INT_MASK, 0);
590 	}
591 
592 #ifdef __sparc64__
593 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
594 	    sc->sc_lladdr, ETHER_ADDR_LEN) <= 0)
595 #endif
596 		nep_pci_enaddr(sc, pa);
597 
598 	printf(", address %s\n", ether_sprintf(sc->sc_lladdr));
599 
600 	if (nep_block_pool == NULL) {
601 		nep_block_pool = malloc(sizeof(*nep_block_pool),
602 		    M_DEVBUF, M_WAITOK);
603 		if (nep_block_pool == NULL) {
604 			printf("%s: unable to allocate block pool\n",
605 			    sc->sc_dev.dv_xname);
606 			return;
607 		}
608 		pool_init(nep_block_pool, PAGE_SIZE, 0, 0, 0,
609 		    "nepblk", NULL);
610 		pool_setipl(nep_block_pool, IPL_NET);
611 	}
612 
613 	val = nep_read(sc, MIF_CONFIG);
614 	val &= ~MIF_CONFIG_INDIRECT_MODE;
615 	nep_write(sc, MIF_CONFIG, val);
616 
617 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
618 	ifp->if_softc = sc;
619 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
620 	ifp->if_ioctl = nep_ioctl;
621 	ifp->if_start = nep_start;
622 	ifp->if_watchdog = nep_watchdog;
623 
624 	mii->mii_ifp = ifp;
625 	mii->mii_readreg = nep_mii_readreg;
626 	mii->mii_writereg = nep_mii_writereg;
627 	mii->mii_statchg = nep_mii_statchg;
628 
629 	ifmedia_init(&sc->sc_media, 0, nep_media_change, nep_media_status);
630 
631 	/*
632 	 * The PHYs are wired up in reverse order on the 4x1G (RGMII)
633 	 * configuration.
634 	 */
635 	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
636 	    sc->sc_port ^ 0x3, 0);
637 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
638 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
639 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
640 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
641 	} else
642 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
643 
644 	if_attach(ifp);
645 	ether_ifattach(ifp);
646 
647 	timeout_set(&sc->sc_tick, nep_tick, sc);
648 
649 	/* Enable the MIF and error interrupts. */
650 	if (sc->sc_port == 0) {
651 		nep_write(sc, LD_IM0(LDN_MIF), 0);
652 		nep_write(sc, LD_IM1(LDN_SYSERR), 0);
653 	}
654 }
655 
656 #define PROMHDR_PTR_DATA	0x18
657 #define PROMDATA_PTR_VPD	0x08
658 #define PROMDATA_LEN		0x10
659 #define PROMDATA_TYPE		0x14
660 
661 static const uint8_t nep_promhdr[] = { 0x55, 0xaa };
662 static const uint8_t nep_promdat[] = {
663 	'P', 'C', 'I', 'R',
664 	PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8,
665 	PCI_PRODUCT_SUN_NEPTUNE & 0xff, PCI_PRODUCT_SUN_NEPTUNE >> 8
666 };
667 
668 int
669 nep_pci_enaddr(struct nep_softc *sc, struct pci_attach_args *pa)
670 {
671 	struct pci_vpd_largeres *res;
672 	struct pci_vpd *vpd;
673 	bus_space_handle_t romh;
674 	bus_space_tag_t romt;
675 	bus_size_t romsize = 0;
676 	u_int8_t buf[32], *desc;
677 	pcireg_t address;
678 	int dataoff, vpdoff, len;
679 	int off = 0;
680 	int rv = -1;
681 
682 	if (pci_mapreg_map(pa, PCI_ROM_REG, PCI_MAPREG_TYPE_MEM, 0,
683 	    &romt, &romh, 0, &romsize, 0))
684 		return (-1);
685 
686 	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
687 	address |= PCI_ROM_ENABLE;
688 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
689 
690 	while (off < romsize) {
691 		bus_space_read_region_1(romt, romh, off, buf, sizeof(buf));
692 		if (memcmp(buf, nep_promhdr, sizeof(nep_promhdr)))
693 			goto fail;
694 
695 		dataoff =
696 		    buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8);
697 		if (dataoff < 0x1c)
698 			goto fail;
699 		dataoff += off;
700 
701 		bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf));
702 		if (memcmp(buf, nep_promdat, sizeof(nep_promdat)))
703 			goto fail;
704 
705 		if (buf[PROMDATA_TYPE] == 1)
706 		    break;
707 
708 		len = buf[PROMDATA_LEN] | (buf[PROMDATA_LEN + 1] << 8);
709 		off += len * 512;
710 	}
711 
712 	vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8);
713 	if (vpdoff < 0x1c)
714 		goto fail;
715 	vpdoff += off;
716 
717 next:
718 	bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf));
719 	if (!PCI_VPDRES_ISLARGE(buf[0]))
720 		goto fail;
721 
722 	res = (struct pci_vpd_largeres *)buf;
723 	vpdoff += sizeof(*res);
724 
725 	len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
726 	switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) {
727 	case PCI_VPDRES_TYPE_IDENTIFIER_STRING:
728 		/* Skip identifier string. */
729 		vpdoff += len;
730 		goto next;
731 
732 	case PCI_VPDRES_TYPE_VPD:
733 		while (len > 0) {
734 			bus_space_read_region_1(romt, romh, vpdoff,
735 			     buf, sizeof(buf));
736 
737 			vpd = (struct pci_vpd *)buf;
738 			vpdoff += sizeof(*vpd) + vpd->vpd_len;
739 			len -= sizeof(*vpd) + vpd->vpd_len;
740 
741 			/*
742 			 * We're looking for an "Enhanced" VPD...
743 			 */
744 			if (vpd->vpd_key0 != 'Z')
745 				continue;
746 
747 			desc = buf + sizeof(*vpd);
748 
749 			/*
750 			 * ...which is an instance property...
751 			 */
752 			if (desc[0] != 'I')
753 				continue;
754 			desc += 3;
755 
756 			/*
757 			 * ...that's a byte array with the proper
758 			 * length for a MAC address...
759 			 */
760 			if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN)
761 				continue;
762 			desc += 2;
763 
764 			/*
765 			 * ...named "local-mac-address".
766 			 */
767 			if (strcmp(desc, "local-mac-address") != 0)
768 				continue;
769 			desc += strlen("local-mac-address") + 1;
770 
771 			memcpy(sc->sc_ac.ac_enaddr, desc, ETHER_ADDR_LEN);
772 			sc->sc_ac.ac_enaddr[5] += pa->pa_function;
773 			rv = 0;
774 		}
775 		break;
776 
777 	default:
778 		goto fail;
779 	}
780 
781  fail:
782 	if (romsize != 0)
783 		bus_space_unmap(romt, romh, romsize);
784 
785 	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
786 	address &= ~PCI_ROM_ENABLE;
787 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
788 
789 	return (rv);
790 }
791 
792 uint64_t
793 nep_read(struct nep_softc *sc, uint32_t reg)
794 {
795 	return (bus_space_read_8(sc->sc_memt, sc->sc_memh, reg));
796 }
797 
798 void
799 nep_write(struct nep_softc *sc, uint32_t reg, uint64_t value)
800 {
801 	bus_space_write_8(sc->sc_memt, sc->sc_memh, reg, value);
802 }
803 
804 int
805 nep_mii_readreg(struct device *self, int phy, int reg)
806 {
807 	struct nep_softc *sc = (struct nep_softc *)self;
808 	uint64_t frame;
809 	int n;
810 
811 	frame = MIF_FRAME_READ;
812 	frame |= (reg << MIF_FRAME_REG_SHIFT) | (phy << MIF_FRAME_PHY_SHIFT);
813 	nep_write(sc, MIF_FRAME_OUTPUT, frame);
814 	for (n = 0; n < 1000; n++) {
815 		delay(10);
816 		frame = nep_read(sc, MIF_FRAME_OUTPUT);
817 		if (frame & MIF_FRAME_TA0)
818 			return (frame & MIF_FRAME_DATA);
819 	}
820 
821 	printf("%s: %s timeout\n", sc->sc_dev.dv_xname, __func__);
822 	return (0);
823 }
824 
825 void
826 nep_mii_writereg(struct device *self, int phy, int reg, int val)
827 {
828 	struct nep_softc *sc = (struct nep_softc *)self;
829 	uint64_t frame;
830 	int n;
831 
832 	frame = MIF_FRAME_WRITE;
833 	frame |= (reg << MIF_FRAME_REG_SHIFT) | (phy << MIF_FRAME_PHY_SHIFT);
834 	frame |= (val & MIF_FRAME_DATA);
835 	nep_write(sc, MIF_FRAME_OUTPUT, frame);
836 	for (n = 0; n < 1000; n++) {
837 		delay(10);
838 		frame = nep_read(sc, MIF_FRAME_OUTPUT);
839 		if (frame & MIF_FRAME_TA0)
840 			return;
841 	}
842 
843 	printf("%s: %s timeout\n", sc->sc_dev.dv_xname, __func__);
844 	return;
845 }
846 
847 void
848 nep_mii_statchg(struct device *dev)
849 {
850 	struct nep_softc *sc = (struct nep_softc *)dev;
851 
852 	if (sc->sc_port < 2)
853 		nep_xmac_mii_statchg(sc);
854 	else
855 		nep_bmac_mii_statchg(sc);
856 }
857 
858 void
859 nep_xmac_mii_statchg(struct nep_softc *sc)
860 {
861 	struct mii_data *mii = &sc->sc_mii;
862 	uint64_t val;
863 
864 	val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
865 
866 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
867 		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
868 	else
869 		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
870 
871 	val |= XMAC_CONFIG_1G_PCS_BYPASS;
872 
873 	val &= ~XMAC_CONFIG_MODE_MASK;
874 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
875 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
876 		val |= XMAC_CONFIG_MODE_GMII;
877 	else
878 		val |= XMAC_CONFIG_MODE_MII;
879 
880 	val |= XMAC_CONFIG_LFS_DISABLE;
881 
882 	if (mii->mii_media_active & IFM_LOOP)
883 		val |= XMAC_CONFIG_LOOPBACK;
884 	else
885 		val &= ~XMAC_CONFIG_LOOPBACK;
886 
887 	val |= XMAC_CONFIG_TX_OUTPUT_EN;
888 
889 	nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
890 }
891 
892 void
893 nep_bmac_mii_statchg(struct nep_softc *sc)
894 {
895 	struct mii_data *mii = &sc->sc_mii;
896 	uint64_t val;
897 
898 	val = nep_read(sc, MAC_XIF_CONFIG(sc->sc_port));
899 
900 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
901 		val |= MAC_XIF_CONFIG_SEL_CLK_25MHZ;
902 	else
903 		val &= MAC_XIF_CONFIG_SEL_CLK_25MHZ;
904 
905 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
906 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
907 		val |= MAC_XIF_CONFIG_GMII_MODE;
908 	else
909 		val &= ~MAC_XIF_CONFIG_GMII_MODE;
910 
911 	if (mii->mii_media_active & IFM_LOOP)
912 		val |= MAC_XIF_CONFIG_LOOPBACK;
913 	else
914 		val &= ~MAC_XIF_CONFIG_LOOPBACK;
915 
916 	val |= MAC_XIF_CONFIG_TX_OUTPUT_EN;
917 
918 	nep_write(sc, MAC_XIF_CONFIG(sc->sc_port), val);
919 }
920 
921 int
922 nep_media_change(struct ifnet *ifp)
923 {
924 	struct nep_softc *sc = ifp->if_softc;
925 
926 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
927 		mii_mediachg(&sc->sc_mii);
928 
929 	return (0);
930 }
931 
932 void
933 nep_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
934 {
935 	struct nep_softc *sc = ifp->if_softc;
936 
937 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
938 		mii_pollstat(&sc->sc_mii);
939 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
940 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
941 	}
942 }
943 
944 int
945 nep_intr(void *arg)
946 {
947 	struct nep_softc *sc = arg;
948 	uint64_t sv0, sv1, sv2;
949 	int rearm = 0;
950 
951 	sv0 = nep_read(sc, LDSV0(sc->sc_port));
952 	sv1 = nep_read(sc, LDSV1(sc->sc_port));
953 	sv2 = nep_read(sc, LDSV2(sc->sc_port));
954 
955 	if ((sv0 | sv1 | sv2) == 0)
956 		return (0);
957 
958 	if (sv0 & (1ULL << LDN_TXDMA(sc->sc_port))) {
959 		nep_tx_proc(sc);
960 		rearm = 1;
961 	}
962 
963 	if (sv0 & (1ULL << LDN_RXDMA(sc->sc_port))) {
964 		nep_rx_proc(sc);
965 		rearm = 1;
966 	}
967 
968 	if (rearm)
969 		nep_write(sc, LDGIMGN(sc->sc_port), LDGIMGN_ARM | 2);
970 	else
971 		printf("%s: %s %llx %llx %llx\n", sc->sc_dev.dv_xname,
972 		    __func__, sv0, sv1, sv2);
973 
974 	return (1);
975 }
976 
977 void
978 nep_rx_proc(struct nep_softc *sc)
979 {
980 	struct ifnet *ifp = &sc->sc_ac.ac_if;
981 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
982 	uint64_t val;
983 	uint16_t count;
984 	uint16_t pktread = 0, ptrread = 0;
985 	uint64_t rxd;
986 	uint64_t addr;
987 	bus_addr_t page;
988 	bus_size_t off;
989 	char *block;
990 	struct mbuf *m;
991 	int idx, len, i;
992 
993 	val = nep_read(sc, RX_DMA_CTL_STAT(sc->sc_port));
994 	nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port),
995 	    RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO);
996 
997 	bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_rcring), 0,
998 	    NEP_DMA_LEN(sc->sc_rcring), BUS_DMASYNC_POSTREAD);
999 
1000 	count = nep_read(sc, RCRSTAT_A(sc->sc_port));
1001 	while (count > 0) {
1002 		idx = sc->sc_rx_cons;
1003 		KASSERT(idx < NEP_NRCDESC);
1004 
1005 		rxd = letoh64(sc->sc_rcdesc[idx]);
1006 
1007 		addr = (rxd & RXD_PKT_BUF_ADDR_MASK) << RXD_PKT_BUF_ADDR_SHIFT;
1008 		len = (rxd & RXD_L2_LEN_MASK) >> RXD_L2_LEN_SHIFT;
1009 		page = addr & ~PAGE_MASK;
1010 		off = addr & PAGE_MASK;
1011 		block = NULL;
1012 		for (i = 0; i < NEP_NRBDESC; i++) {
1013 			if (sc->sc_rb[i].nb_block &&
1014 			    sc->sc_rb[i].nb_map->dm_segs[0].ds_addr == page) {
1015 				block = sc->sc_rb[i].nb_block;
1016 				break;
1017 			}
1018 		}
1019 		if (block == NULL) {
1020 			m = NULL;
1021 		} else {
1022 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rb[i].nb_map);
1023 			sc->sc_rb[i].nb_block = NULL;
1024 
1025 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1026 		}
1027 
1028 		if (m == NULL) {
1029 			ifp->if_ierrors++;
1030 		} else {
1031 			MEXTADD(m, block + off, PAGE_SIZE, M_EXTWR,
1032 			    nep_mextfree_idx, block);
1033 			m->m_pkthdr.len = m->m_len = len;
1034 			m->m_data += ETHER_ALIGN;
1035 
1036 			ml_enqueue(&ml, m);
1037 		}
1038 
1039 		if_rxr_put(&sc->sc_rx_ring, 1);
1040 		if ((rxd & RXD_MULTI) == 0) {
1041 			count--;
1042 			pktread++;
1043 		}
1044 		ptrread++;
1045 		sc->sc_rx_cons++;
1046 		if (sc->sc_rx_cons >= NEP_NRCDESC)
1047 			sc->sc_rx_cons = 0;
1048 	}
1049 
1050 	bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_rcring), 0,
1051 	    NEP_DMA_LEN(sc->sc_rcring), BUS_DMASYNC_PREREAD);
1052 
1053 	if_input(ifp, &ml);
1054 
1055 	nep_fill_rx_ring(sc);
1056 
1057 	val = pktread | (ptrread << RX_DMA_CTL_STAT_PTRREAD_SHIFT);
1058 	val |= RX_DMA_CTL_STAT_MEX;
1059 	nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), val);
1060 }
1061 
1062 void
1063 nep_extfree(caddr_t buf, u_int size, void *arg)
1064 {
1065 	pool_put(nep_block_pool, arg);
1066 }
1067 
1068 void
1069 nep_tx_proc(struct nep_softc *sc)
1070 {
1071 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1072 	struct nep_buf *txb;
1073 	uint64_t val;
1074 	uint16_t pkt_cnt, count;
1075 	int idx;
1076 
1077 	val = nep_read(sc, TX_CS(sc->sc_port));
1078 	pkt_cnt = (val & TX_CS_PKT_CNT_MASK) >> TX_CS_PKT_CNT_SHIFT;
1079 	count = (pkt_cnt - sc->sc_pkt_cnt);
1080 	count &= (TX_CS_PKT_CNT_MASK >> TX_CS_PKT_CNT_SHIFT);
1081 	sc->sc_pkt_cnt = pkt_cnt;
1082 
1083 	while (count > 0) {
1084 		idx = sc->sc_tx_cons;
1085 		KASSERT(idx < NEP_NTXDESC);
1086 
1087 		txb = &sc->sc_txbuf[idx];
1088 		if (txb->nb_m) {
1089 			bus_dmamap_sync(sc->sc_dmat, txb->nb_map, 0,
1090 			    txb->nb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1091 			bus_dmamap_unload(sc->sc_dmat, txb->nb_map);
1092 
1093 			m_freem(txb->nb_m);
1094 			txb->nb_m = NULL;
1095 			ifp->if_opackets++;
1096 			count--;
1097 		}
1098 
1099 		ifq_clr_oactive(&ifp->if_snd);
1100 
1101 		sc->sc_tx_cnt--;
1102 		sc->sc_tx_cons++;
1103 		if (sc->sc_tx_cons >= NEP_NTXDESC)
1104 			sc->sc_tx_cons = 0;
1105 	}
1106 
1107 	if (sc->sc_tx_cnt == 0)
1108 		ifp->if_timer = 0;
1109 }
1110 
1111 void
1112 nep_init_ipp(struct nep_softc *sc)
1113 {
1114 	uint64_t val;
1115 	int num_entries;
1116 	int n, i;
1117 
1118 	if (sc->sc_port < 2)
1119 		num_entries = IPP_P0_P1_DFIFO_ENTRIES;
1120 	else
1121 		num_entries = IPP_P2_P3_DFIFO_ENTRIES;
1122 
1123 	for (i = 0; i < num_entries; i++)
1124 		nep_ipp_clear_dfifo(sc, i);
1125 
1126 	(void)nep_read(sc, IPP_INT_STAT(sc->sc_port));
1127 	(void)nep_read(sc, IPP_INT_STAT(sc->sc_port));
1128 
1129 	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1130 	val |= IPP_CFIG_SOFT_RST;
1131 	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1132 	n = 1000;
1133 	while (--n) {
1134 		val = nep_read(sc, IPP_CFIG(sc->sc_port));
1135 		if ((val & IPP_CFIG_SOFT_RST) == 0)
1136 			break;
1137 	}
1138 	if (n == 0)
1139 		printf("timeout resetting IPP\n");
1140 
1141 	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1142 	val |= IPP_CFIG_IPP_ENABLE;
1143 	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1144 
1145 	nep_write(sc, IPP_MSK(sc->sc_port), 0);
1146 }
1147 
1148 void
1149 nep_ipp_clear_dfifo(struct nep_softc *sc, uint64_t addr)
1150 {
1151 	uint64_t val;
1152 
1153 	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1154 	val |= IPP_CFIG_DFIFO_PIO_W;
1155 	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1156 
1157 	nep_write(sc, IPP_DFIFO_WR_PTR(sc->sc_port), addr);
1158 	nep_write(sc, IPP_DFIFO_WR1(sc->sc_port), 0);
1159 	nep_write(sc, IPP_DFIFO_WR2(sc->sc_port), 0);
1160 	nep_write(sc, IPP_DFIFO_WR3(sc->sc_port), 0);
1161 	nep_write(sc, IPP_DFIFO_WR4(sc->sc_port), 0);
1162 	nep_write(sc, IPP_DFIFO_WR5(sc->sc_port), 0);
1163 
1164 	val &= ~IPP_CFIG_DFIFO_PIO_W;
1165 	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1166 
1167 	nep_write(sc, IPP_DFIFO_RD_PTR(sc->sc_port), addr);
1168 	(void)nep_read(sc, IPP_DFIFO_RD1(sc->sc_port));
1169 	(void)nep_read(sc, IPP_DFIFO_RD2(sc->sc_port));
1170 	(void)nep_read(sc, IPP_DFIFO_RD3(sc->sc_port));
1171 	(void)nep_read(sc, IPP_DFIFO_RD4(sc->sc_port));
1172 	(void)nep_read(sc, IPP_DFIFO_RD5(sc->sc_port));
1173 }
1174 
1175 void
1176 nep_init_rx_mac(struct nep_softc *sc)
1177 {
1178 	if (sc->sc_port < 2)
1179 		nep_init_rx_xmac(sc);
1180 	else
1181 		nep_init_rx_bmac(sc);
1182 }
1183 
1184 void
1185 nep_init_rx_xmac(struct nep_softc *sc)
1186 {
1187 	uint64_t addr0, addr1, addr2;
1188 	uint64_t val;
1189 	int n, i;
1190 
1191 	nep_write(sc, XRXMAC_SW_RST(sc->sc_port),
1192 	    XRXMAC_SW_RST_REG_RST | XRXMAC_SW_RST_SOFT_RST);
1193 	n = 1000;
1194 	while (--n) {
1195 		val = nep_read(sc, XRXMAC_SW_RST(sc->sc_port));
1196 		if ((val & (XRXMAC_SW_RST_REG_RST |
1197 		    XRXMAC_SW_RST_SOFT_RST)) == 0)
1198 			break;
1199 	}
1200 	if (n == 0)
1201 		printf("timeout resetting Rx MAC\n");
1202 
1203 	addr0 = (sc->sc_lladdr[4] << 8) | sc->sc_lladdr[5];
1204 	addr1 = (sc->sc_lladdr[2] << 8) | sc->sc_lladdr[3];
1205 	addr2 = (sc->sc_lladdr[0] << 8) | sc->sc_lladdr[1];
1206 	nep_write(sc, XMAC_ADDR0(sc->sc_port), addr0);
1207 	nep_write(sc, XMAC_ADDR1(sc->sc_port), addr1);
1208 	nep_write(sc, XMAC_ADDR2(sc->sc_port), addr2);
1209 
1210 	nep_write(sc, XMAC_ADDR_CMPEN(sc->sc_port), 0);
1211 
1212 	nep_write(sc, XMAC_ADD_FILT0(sc->sc_port), 0);
1213 	nep_write(sc, XMAC_ADD_FILT1(sc->sc_port), 0);
1214 	nep_write(sc, XMAC_ADD_FILT2(sc->sc_port), 0);
1215 	nep_write(sc, XMAC_ADD_FILT12_MASK(sc->sc_port), 0);
1216 	nep_write(sc, XMAC_ADD_FILT00_MASK(sc->sc_port), 0);
1217 
1218 	for (i = 0; i < 16; i++)
1219 		nep_write(sc, XMAC_HASH_TBL(sc->sc_port, i), 0);
1220 
1221 	for (i = 0; i < 20; i++)
1222 		nep_write(sc, XMAC_HOST_INFO(sc->sc_port, i), sc->sc_port);
1223 }
1224 
1225 void
1226 nep_init_rx_bmac(struct nep_softc *sc)
1227 {
1228 	uint64_t addr0, addr1, addr2;
1229 	uint64_t val;
1230 	int n, i;
1231 
1232 	nep_write(sc, RXMAC_SW_RST(sc->sc_port), RXMAC_SW_RST_SW_RST);
1233 	n = 1000;
1234 	while (--n) {
1235 		val = nep_read(sc, RXMAC_SW_RST(sc->sc_port));
1236 		if ((val & RXMAC_SW_RST_SW_RST) == 0)
1237 			break;
1238 	}
1239 	if (n == 0)
1240 		printf("timeout resetting Rx MAC\n");
1241 
1242 	val = nep_read(sc, RXMAC_CONFIG(sc->sc_port));
1243 	val &= ~RXMAC_CONFIG_ERROR_CHK_DIS;
1244 	val &= ~RXMAC_CONFIG_PROMISCUOUS;
1245 	val &= ~RXMAC_CONFIG_PROMISCUOUS_GROUP;
1246 	val &= ~RXMAC_CONFIG_ADDR_FILTER_EN;
1247 	val &= ~RXMAC_CONFIG_HASH_FILTER_EN;
1248 	val &= ~RXMAC_CONFIG_STRIP_FCS;
1249 	val &= ~RXMAC_CONFIG_STRIP_PAD;
1250 	val &= ~RXMAC_CONFIG_RX_ENABLE;
1251 	nep_write(sc, RXMAC_CONFIG(sc->sc_port), val);
1252 
1253 	addr0 = (sc->sc_lladdr[4] << 8) | sc->sc_lladdr[5];
1254 	addr1 = (sc->sc_lladdr[2] << 8) | sc->sc_lladdr[3];
1255 	addr2 = (sc->sc_lladdr[0] << 8) | sc->sc_lladdr[1];
1256 	nep_write(sc, BMAC_ADDR0(sc->sc_port), addr0);
1257 	nep_write(sc, BMAC_ADDR1(sc->sc_port), addr1);
1258 	nep_write(sc, BMAC_ADDR2(sc->sc_port), addr2);
1259 
1260 	nep_write(sc, BMAC_ALTAD_CMPEN(sc->sc_port), 1);
1261 
1262 	nep_write(sc, MAC_ADDR_FILT0(sc->sc_port), 0);
1263 	nep_write(sc, MAC_ADDR_FILT1(sc->sc_port), 0);
1264 	nep_write(sc, MAC_ADDR_FILT2(sc->sc_port), 0);
1265 	nep_write(sc, MAC_ADDR_FILT12_MASK(sc->sc_port), 0);
1266 	nep_write(sc, MAC_ADDR_FILT00_MASK(sc->sc_port), 0);
1267 
1268 	for (i = 0; i < 16; i++)
1269 		nep_write(sc, MAC_HASH_TBL(sc->sc_port, i), 0);
1270 
1271 	for (i = 0; i < 9; i++)
1272 		nep_write(sc, BMAC_HOST_INFO(sc->sc_port, i), sc->sc_port);
1273 }
1274 
1275 void
1276 nep_init_rx_channel(struct nep_softc *sc, int chan)
1277 {
1278 	uint64_t val;
1279 	int i, n;
1280 
1281 	val = nep_read(sc, RXDMA_CFIG1(chan));
1282 	val &= ~RXDMA_CFIG1_EN;
1283 	val |= RXDMA_CFIG1_RST;
1284 	nep_write(sc, RXDMA_CFIG1(chan), RXDMA_CFIG1_RST);
1285 
1286 	n = 1000;
1287 	while (--n) {
1288 		val = nep_read(sc, RXDMA_CFIG1(chan));
1289 		if ((val & RXDMA_CFIG1_RST) == 0)
1290 			break;
1291 	}
1292 	if (n == 0)
1293 		printf("timeout resetting Rx DMA\n");
1294 
1295 	nep_write(sc, RX_LOG_MASK1(chan), 0);
1296 	nep_write(sc, RX_LOG_VALUE1(chan), 0);
1297 	nep_write(sc, RX_LOG_MASK2(chan), 0);
1298 	nep_write(sc, RX_LOG_VALUE2(chan), 0);
1299 	nep_write(sc, RX_LOG_PAGE_RELO1(chan), 0);
1300 	nep_write(sc, RX_LOG_PAGE_RELO2(chan), 0);
1301 	nep_write(sc, RX_LOG_PAGE_HDL(chan), 0);
1302 	nep_write(sc, RX_LOG_PAGE_VLD(chan),
1303 	    (sc->sc_port << RX_LOG_PAGE_VLD_FUNC_SHIFT) |
1304 	    RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
1305 
1306 	nep_write(sc, RX_DMA_ENT_MSK(chan), RX_DMA_ENT_MSK_RBR_EMPTY);
1307 	nep_write(sc, RX_DMA_CTL_STAT(chan), RX_DMA_CTL_STAT_MEX);
1308 
1309 	val = NEP_DMA_DVA(sc->sc_rxmbox) >> 32;
1310 	nep_write(sc, RXDMA_CFIG1(chan), val);
1311 
1312 	val = NEP_DMA_DVA(sc->sc_rxmbox) & 0xffffffc0;
1313 	nep_write(sc, RXDMA_CFIG2(chan), val);
1314 
1315 	val = NEP_DMA_DVA(sc->sc_rbring);
1316 	val |= (uint64_t)NEP_NRBDESC << RBR_CFIG_A_LEN_SHIFT;
1317 	nep_write(sc, RBR_CFIG_A(chan), val);
1318 
1319 	val = RBR_CFIG_B_BLKSIZE_8K;
1320 	val |= RBR_CFIG_B_BUFSZ1_8K | RBR_CFIG_B_VLD1;
1321 	nep_write(sc, RBR_CFIG_B(chan), val);
1322 
1323 	nep_write(sc, RBR_KICK(chan), 0);
1324 
1325 	val = NEP_DMA_DVA(sc->sc_rcring);
1326 	val |= (uint64_t)NEP_NRCDESC << RCRCFIG_A_LEN_SHIFT;
1327 	nep_write(sc, RCRCFIG_A(chan), val);
1328 
1329 	val = 8 | RCRCFIG_B_ENTOUT;
1330 	val |= (16 << RCRCFIG_B_PTHRES_SHIFT);
1331 	nep_write(sc, RCRCFIG_B(chan), val);
1332 
1333 	nep_write(sc, DEF_PT_RDC(sc->sc_port), chan);
1334 	for (i = 0; i < 16; i++)
1335 		nep_write(sc, RDC_TBL(sc->sc_port, i), chan);
1336 }
1337 
1338 void
1339 nep_init_tx_mac(struct nep_softc *sc)
1340 {
1341 	if (sc->sc_port < 2)
1342 		nep_init_tx_xmac(sc);
1343 	else
1344 		nep_init_tx_bmac(sc);
1345 }
1346 
1347 void
1348 nep_init_tx_xmac(struct nep_softc *sc)
1349 {
1350 	uint64_t val;
1351 	int n;
1352 
1353 	nep_write(sc, XTXMAC_SW_RST(sc->sc_port),
1354 	    XTXMAC_SW_RST_REG_RST | XTXMAC_SW_RST_SOFT_RST);
1355 	n = 1000;
1356 	while (--n) {
1357 		val = nep_read(sc, XTXMAC_SW_RST(sc->sc_port));
1358 		if ((val & (XTXMAC_SW_RST_REG_RST |
1359 		    XTXMAC_SW_RST_SOFT_RST)) == 0)
1360 			break;
1361 	}
1362 	if (n == 0)
1363 		printf("timeout resetting Tx MAC\n");
1364 
1365 	val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1366 	val &= ~XMAC_CONFIG_ALWAYS_NO_CRC;
1367 	val &= ~XMAC_CONFIG_VAR_MIN_IPG_EN;
1368 	val &= ~XMAC_CONFIG_STRETCH_MODE;
1369 	val &= ~XMAC_CONFIG_TX_ENABLE;
1370 	nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1371 
1372 	val = nep_read(sc, XMAC_IPG(sc->sc_port));
1373 	val &= ~XMAC_IPG_IPG_VALUE1_MASK;	/* MII/GMII mode */
1374 	val |= XMAC_IPG_IPG_VALUE1_12;
1375 	val &= ~XMAC_IPG_IPG_VALUE_MASK;	/* XGMII mode */
1376 	val |= XMAC_IPG_IPG_VALUE_12_15;
1377 	nep_write(sc, XMAC_IPG(sc->sc_port), val);
1378 
1379 	val = nep_read(sc, XMAC_MIN(sc->sc_port));
1380 	val &= ~XMAC_MIN_RX_MIN_PKT_SIZE_MASK;
1381 	val &= ~XMAC_MIN_TX_MIN_PKT_SIZE_MASK;
1382 	val |= (64 << XMAC_MIN_RX_MIN_PKT_SIZE_SHIFT);
1383 	val |= (64 << XMAC_MIN_TX_MIN_PKT_SIZE_SHIFT);
1384 	nep_write(sc, XMAC_MIN(sc->sc_port), val);
1385 	nep_write(sc, XMAC_MAX(sc->sc_port), ETHER_MAX_LEN);
1386 
1387 	nep_write(sc, TXMAC_FRM_CNT(sc->sc_port), 0);
1388 	nep_write(sc, TXMAC_BYTE_CNT(sc->sc_port), 0);
1389 }
1390 
1391 void
1392 nep_init_tx_bmac(struct nep_softc *sc)
1393 {
1394 	uint64_t val;
1395 	int n;
1396 
1397 	nep_write(sc, TXMAC_SW_RST(sc->sc_port), TXMAC_SW_RST_SW_RST);
1398 	n = 1000;
1399 	while (--n) {
1400 		val = nep_read(sc, TXMAC_SW_RST(sc->sc_port));
1401 		if ((val & TXMAC_SW_RST_SW_RST) == 0)
1402 			break;
1403 	}
1404 	if (n == 0)
1405 		printf("timeout resetting Tx MAC\n");
1406 
1407 	nep_write(sc, BMAC_MIN(sc->sc_port), 0x40);
1408 	nep_write(sc, BMAC_MAX(sc->sc_port), ETHER_MAX_LEN |
1409 	    (ETHER_MAX_LEN << BMAC_MAX_BURST_SHIFT));
1410 	nep_write(sc, MAC_CTRL_TYPE(sc->sc_port), 0x8808);
1411 	nep_write(sc, MAC_PA_SIZE(sc->sc_port), 0x7);
1412 }
1413 
1414 void
1415 nep_init_tx_channel(struct nep_softc *sc, int chan)
1416 {
1417 	uint64_t val;
1418 	int n;
1419 
1420 	val = nep_read(sc, TXC_CONTROL);
1421 	val |= TXC_CONTROL_TXC_ENABLED;
1422 	val |= (1ULL << sc->sc_port);
1423 	nep_write(sc, TXC_CONTROL, val);
1424 
1425 	nep_write(sc, TXC_PORT_DMA(sc->sc_port), 1ULL << chan);
1426 
1427 	val = nep_read(sc, TXC_INT_MASK);
1428 	val &= ~TXC_INT_MASK_PORT_INT_MASK(sc->sc_port);
1429 	nep_write(sc, TXC_INT_MASK, val);
1430 
1431 	val = nep_read(sc, TX_CS(chan));
1432 	val |= TX_CS_RST;
1433 	nep_write(sc, TX_CS(chan), val);
1434 
1435 	n = 1000;
1436 	while (--n) {
1437 		val = nep_read(sc, TX_CS(chan));
1438 		if ((val & TX_CS_RST) == 0)
1439 			break;
1440 	}
1441 	if (n == 0)
1442 		printf("timeout resetting Tx DMA\n");
1443 
1444 	nep_write(sc, TX_LOG_MASK1(chan), 0);
1445 	nep_write(sc, TX_LOG_VALUE1(chan), 0);
1446 	nep_write(sc, TX_LOG_MASK2(chan), 0);
1447 	nep_write(sc, TX_LOG_VALUE2(chan), 0);
1448 	nep_write(sc, TX_LOG_PAGE_RELO1(chan), 0);
1449 	nep_write(sc, TX_LOG_PAGE_RELO2(chan), 0);
1450 	nep_write(sc, TX_LOG_PAGE_HDL(chan), 0);
1451 	nep_write(sc, TX_LOG_PAGE_VLD(chan),
1452 	    (sc->sc_port << TX_LOG_PAGE_VLD_FUNC_SHIFT) |
1453 	    TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
1454 
1455 	nep_write(sc, TX_RING_KICK(chan), 0);
1456 
1457 	nep_write(sc, TXC_DMA_MAX(chan), ETHER_MAX_LEN + 64);
1458 	nep_write(sc, TX_ENT_MSK(chan), 0);
1459 
1460 	val = NEP_DMA_DVA(sc->sc_txring);
1461 	val |= (NEP_DMA_LEN(sc->sc_txring) / 64) << TX_RNG_CFIG_LEN_SHIFT;
1462 	nep_write(sc, TX_RNG_CFIG(chan), val);
1463 
1464 	nep_write(sc, TX_CS(chan), 0);
1465 }
1466 
1467 void
1468 nep_enable_rx_mac(struct nep_softc *sc)
1469 {
1470 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1471 	uint64_t val;
1472 
1473 	if (sc->sc_port < 2) {
1474 		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1475 		val &= ~XMAC_CONFIG_PROMISCUOUS;
1476 		val &= ~XMAC_CONFIG_PROMISCUOUS_GROUP;
1477 		val &= ~XMAC_CONFIG_HASH_FILTER_EN;
1478 		if (ifp->if_flags & IFF_PROMISC)
1479 			val |= XMAC_CONFIG_PROMISCUOUS;
1480 		if (ifp->if_flags & IFF_ALLMULTI)
1481 			val |= XMAC_CONFIG_PROMISCUOUS_GROUP;
1482 		else
1483 			val |= XMAC_CONFIG_HASH_FILTER_EN;
1484 		val |= XMAC_CONFIG_RX_MAC_ENABLE;
1485 		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1486 	} else {
1487 		val = nep_read(sc, RXMAC_CONFIG(sc->sc_port));
1488 		val &= ~RXMAC_CONFIG_PROMISCUOUS;
1489 		val &= ~RXMAC_CONFIG_PROMISCUOUS_GROUP;
1490 		val &= ~RXMAC_CONFIG_HASH_FILTER_EN;
1491 		if (ifp->if_flags & IFF_PROMISC)
1492 			val |= RXMAC_CONFIG_PROMISCUOUS;
1493 		if (ifp->if_flags & IFF_ALLMULTI)
1494 			val |= RXMAC_CONFIG_PROMISCUOUS_GROUP;
1495 		else
1496 			val |= RXMAC_CONFIG_HASH_FILTER_EN;
1497 		val |= RXMAC_CONFIG_RX_ENABLE;
1498 		nep_write(sc, RXMAC_CONFIG(sc->sc_port), val);
1499 	}
1500 }
1501 
1502 void
1503 nep_disable_rx_mac(struct nep_softc *sc)
1504 {
1505 	uint64_t val;
1506 
1507 	if (sc->sc_port < 2) {
1508 		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1509 		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
1510 		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1511 	} else {
1512 		val = nep_read(sc, RXMAC_CONFIG(sc->sc_port));
1513 		val &= ~RXMAC_CONFIG_RX_ENABLE;
1514 		nep_write(sc, RXMAC_CONFIG(sc->sc_port), val);
1515 	}
1516 }
1517 
1518 void
1519 nep_stop_dma(struct nep_softc *sc)
1520 {
1521 	uint64_t val;
1522 	int n;
1523 
1524 	val = nep_read(sc, TX_CS(sc->sc_port));
1525 	val |= TX_CS_STOP_N_GO;
1526 	nep_write(sc, TX_CS(sc->sc_port), val);
1527 
1528 	n = 1000;
1529 	while (--n) {
1530 		val = nep_read(sc, TX_CS(sc->sc_port));
1531 		if (val & TX_CS_SNG_STATE)
1532 			break;
1533 	}
1534 	if (n == 0)
1535 		printf("timeout stopping Tx DMA\n");
1536 
1537 	val = nep_read(sc, RXDMA_CFIG1(sc->sc_port));
1538 	val &= ~RXDMA_CFIG1_EN;
1539 	nep_write(sc, RXDMA_CFIG1(sc->sc_port), val);
1540 
1541 	n = 1000;
1542 	while (--n) {
1543 		val = nep_read(sc, RXDMA_CFIG1(sc->sc_port));
1544 		if (val & RXDMA_CFIG1_QST)
1545 			break;
1546 	}
1547 	if (n == 0)
1548 		printf("timeout stopping Rx DMA\n");
1549 }
1550 
1551 void
1552 nep_up(struct nep_softc *sc)
1553 {
1554 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1555 	struct nep_block *rb;
1556 	struct nep_buf *txb;
1557 	uint64_t val;
1558 	int i, n;
1559 
1560 	/* Allocate Rx block descriptor ring. */
1561 	sc->sc_rbring = nep_dmamem_alloc(sc, NEP_NRBDESC * sizeof(uint32_t));
1562 	if (sc->sc_rbring == NULL)
1563 		return;
1564 	sc->sc_rbdesc = NEP_DMA_KVA(sc->sc_rbring);
1565 
1566 	sc->sc_rb = malloc(sizeof(struct nep_block) * NEP_NRBDESC,
1567 	    M_DEVBUF, M_WAITOK);
1568 	for (i = 0; i < NEP_NRBDESC; i++) {
1569 		rb = &sc->sc_rb[i];
1570 		bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1571 		    BUS_DMA_WAITOK, &rb->nb_map);
1572 		rb->nb_block = NULL;
1573 	}
1574 
1575 	sc->sc_rx_prod = 0;
1576 	if_rxr_init(&sc->sc_rx_ring, 16, NEP_NRBDESC);
1577 
1578 	/* Allocate Rx completion descriptor ring. */
1579 	sc->sc_rcring = nep_dmamem_alloc(sc, NEP_NRCDESC * sizeof(uint64_t));
1580 	if (sc->sc_rcring == NULL)
1581 		goto free_rbring;
1582 	sc->sc_rcdesc = NEP_DMA_KVA(sc->sc_rcring);
1583 
1584 	sc->sc_rx_cons = 0;
1585 
1586 	/* Allocate Rx mailbox. */
1587 	sc->sc_rxmbox = nep_dmamem_alloc(sc, 64);
1588 	if (sc->sc_rxmbox == NULL)
1589 		goto free_rcring;
1590 
1591 	/* Allocate Tx descriptor ring. */
1592 	sc->sc_txring = nep_dmamem_alloc(sc, NEP_NTXDESC * sizeof(uint64_t));
1593 	if (sc->sc_txring == NULL)
1594 		goto free_rxmbox;
1595 	sc->sc_txdesc = NEP_DMA_KVA(sc->sc_txring);
1596 
1597 	sc->sc_txbuf = malloc(sizeof(struct nep_buf) * NEP_NTXDESC,
1598 	    M_DEVBUF, M_WAITOK);
1599 	for (i = 0; i < NEP_NTXDESC; i++) {
1600 		txb = &sc->sc_txbuf[i];
1601 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, NEP_NTXSEGS,
1602 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->nb_map);
1603 		txb->nb_m = NULL;
1604 	}
1605 
1606 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1607 	sc->sc_tx_cnt = 0;
1608 	sc->sc_wrap = 0;
1609 	sc->sc_pkt_cnt = 0;
1610 
1611 	if (sc->sc_port < 2) {
1612 		/* Disable the POR loopback clock source. */
1613 		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1614 		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
1615 		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1616 	}
1617 
1618 	nep_write(sc, PCS_DPATH_MODE(sc->sc_port), PCS_DPATH_MODE_MII);
1619 	val = nep_read(sc, PCS_MII_CTL(sc->sc_port));
1620 	val |= PCS_MII_CTL_RESET;
1621 	nep_write(sc, PCS_MII_CTL(sc->sc_port), val);
1622 	n = 1000;
1623 	while (--n) {
1624 		val = nep_read(sc, PCS_MII_CTL(sc->sc_port));
1625 		if ((val & PCS_MII_CTL_RESET) == 0)
1626 			break;
1627 	}
1628 	if (n == 0)
1629 		printf("timeout resetting PCS\n");
1630 
1631 	nep_init_rx_mac(sc);
1632 	nep_init_rx_channel(sc, sc->sc_port);
1633 	nep_init_ipp(sc);
1634 
1635 	nep_init_tx_mac(sc);
1636 	nep_init_tx_channel(sc, sc->sc_port);
1637 
1638 	nep_fill_rx_ring(sc);
1639 
1640 	nep_enable_rx_mac(sc);
1641 	if (sc->sc_port < 2) {
1642 		val = nep_read(sc, XMAC_CONFIG(sc->sc_port));
1643 		val |= XMAC_CONFIG_TX_ENABLE;
1644 		nep_write(sc, XMAC_CONFIG(sc->sc_port), val);
1645 	} else {
1646 		val = nep_read(sc, TXMAC_CONFIG(sc->sc_port));
1647 		val |= TXMAC_CONFIG_TX_ENABLE;
1648 		nep_write(sc, TXMAC_CONFIG(sc->sc_port), val);
1649 	}
1650 
1651 	val = nep_read(sc, RXDMA_CFIG1(sc->sc_port));
1652 	val |= RXDMA_CFIG1_EN;
1653 	nep_write(sc, RXDMA_CFIG1(sc->sc_port), val);
1654 
1655 	ifp->if_flags |= IFF_RUNNING;
1656 	ifq_clr_oactive(&ifp->if_snd);
1657 	ifp->if_timer = 0;
1658 
1659 	/* Enable interrupts. */
1660 	nep_write(sc, LD_IM1(LDN_MAC(sc->sc_port)), 0);
1661 	nep_write(sc, LD_IM0(LDN_RXDMA(sc->sc_port)), 0);
1662 	nep_write(sc, LD_IM0(LDN_TXDMA(sc->sc_port)), 0);
1663 	nep_write(sc, LDGIMGN(sc->sc_port), LDGIMGN_ARM | 2);
1664 
1665 	timeout_add_sec(&sc->sc_tick, 1);
1666 
1667 	return;
1668 
1669 free_rxmbox:
1670 	nep_dmamem_free(sc, sc->sc_rxmbox);
1671 free_rcring:
1672 	nep_dmamem_free(sc, sc->sc_rcring);
1673 free_rbring:
1674 	nep_dmamem_free(sc, sc->sc_rbring);
1675 }
1676 
1677 void
1678 nep_down(struct nep_softc *sc)
1679 {
1680 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1681 	struct nep_buf *txb;
1682 	struct nep_block *rb;
1683 	uint64_t val;
1684 	int i;
1685 
1686 	timeout_del(&sc->sc_tick);
1687 
1688 	/* Disable interrupts. */
1689 	nep_write(sc, LD_IM1(LDN_MAC(sc->sc_port)), 1);
1690 	nep_write(sc, LD_IM0(LDN_RXDMA(sc->sc_port)), 1);
1691 	nep_write(sc, LD_IM0(LDN_TXDMA(sc->sc_port)), 1);
1692 
1693 	ifp->if_flags &= ~IFF_RUNNING;
1694 	ifq_clr_oactive(&ifp->if_snd);
1695 	ifp->if_timer = 0;
1696 
1697 	nep_disable_rx_mac(sc);
1698 
1699 	val = nep_read(sc, IPP_CFIG(sc->sc_port));
1700 	val &= ~IPP_CFIG_IPP_ENABLE;
1701 	nep_write(sc, IPP_CFIG(sc->sc_port), val);
1702 
1703 	nep_stop_dma(sc);
1704 
1705 	for (i = 0; i < NEP_NTXDESC; i++) {
1706 		txb = &sc->sc_txbuf[i];
1707 		if (txb->nb_m) {
1708 			bus_dmamap_sync(sc->sc_dmat, txb->nb_map, 0,
1709 			    txb->nb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1710 			bus_dmamap_unload(sc->sc_dmat, txb->nb_map);
1711 			m_freem(txb->nb_m);
1712 		}
1713 		bus_dmamap_destroy(sc->sc_dmat, txb->nb_map);
1714 	}
1715 
1716 	nep_dmamem_free(sc, sc->sc_txring);
1717 	free(sc->sc_txbuf, M_DEVBUF, sizeof(struct nep_buf) * NEP_NTXDESC);
1718 
1719 	nep_dmamem_free(sc, sc->sc_rxmbox);
1720 	nep_dmamem_free(sc, sc->sc_rcring);
1721 
1722 	for (i = 0; i < NEP_NRBDESC; i++) {
1723 		rb = &sc->sc_rb[i];
1724 		if (rb->nb_block) {
1725 			bus_dmamap_sync(sc->sc_dmat, rb->nb_map, 0,
1726 			    rb->nb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1727 			bus_dmamap_unload(sc->sc_dmat, rb->nb_map);
1728 			pool_put(nep_block_pool, rb->nb_block);
1729 		}
1730 		bus_dmamap_destroy(sc->sc_dmat, rb->nb_map);
1731 	}
1732 
1733 	nep_dmamem_free(sc, sc->sc_rbring);
1734 	free(sc->sc_rb, M_DEVBUF, sizeof(struct nep_block) * NEP_NRBDESC);
1735 }
1736 
1737 void
1738 nep_iff(struct nep_softc *sc)
1739 {
1740 	struct arpcom *ac = &sc->sc_ac;
1741 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1742 	struct ether_multi *enm;
1743 	struct ether_multistep step;
1744 	uint32_t crc, hash[16];
1745 	int i;
1746 
1747 	nep_disable_rx_mac(sc);
1748 
1749 	ifp->if_flags &= ~IFF_ALLMULTI;
1750 	memset(hash, 0, sizeof(hash));
1751 
1752 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1753 		ifp->if_flags |= IFF_ALLMULTI;
1754 	} else {
1755 		ETHER_FIRST_MULTI(step, ac, enm);
1756 		while (enm != NULL) {
1757                         crc = ether_crc32_le(enm->enm_addrlo,
1758                             ETHER_ADDR_LEN);
1759 
1760                         crc >>= 24;
1761                         hash[crc >> 4] |= 1 << (15 - (crc & 15));
1762 
1763 			ETHER_NEXT_MULTI(step, enm);
1764 		}
1765 	}
1766 
1767 	for (i = 0; i < nitems(hash); i++) {
1768 		if (sc->sc_port < 2)
1769 			nep_write(sc, XMAC_HASH_TBL(sc->sc_port, i), hash[i]);
1770 		else
1771 			nep_write(sc, MAC_HASH_TBL(sc->sc_port, i), hash[i]);
1772 	}
1773 
1774 	nep_enable_rx_mac(sc);
1775 }
1776 
1777 int
1778 nep_encap(struct nep_softc *sc, struct mbuf **m0, int *idx)
1779 {
1780 	struct mbuf *m = *m0;
1781 	struct nep_txbuf_hdr *nh;
1782 	uint64_t txd;
1783 	bus_dmamap_t map;
1784 	int cur, frag, i;
1785 	int len, pad;
1786 	int err;
1787 
1788 	/*
1789 	 * MAC does not support padding of transmit packets that are
1790 	 * fewer than 60 bytes.
1791 	 */
1792 	if (m->m_pkthdr.len < (ETHER_MIN_LEN - ETHER_CRC_LEN)) {
1793 		struct mbuf *n;
1794 		int padlen;
1795 
1796 		padlen = (ETHER_MIN_LEN - ETHER_CRC_LEN) - m->m_pkthdr.len;
1797 		MGET(n, M_DONTWAIT, MT_DATA);
1798 		if (n == NULL) {
1799 			m_freem(m);
1800 			return (ENOBUFS);
1801 		}
1802 		memset(mtod(n, caddr_t), 0, padlen);
1803 		n->m_len = padlen;
1804 		m_cat(m, n);
1805 		m->m_pkthdr.len += padlen;
1806 	}
1807 
1808 	if (M_LEADINGSPACE(m) < 16)
1809 		pad = 0;
1810 	else
1811 		pad = mtod(m, u_long) % 16;
1812 	len = m->m_pkthdr.len + pad;
1813 	M_PREPEND(m, sizeof(*nh) + pad, M_DONTWAIT);
1814 	if (m == NULL)
1815 		return (ENOBUFS);
1816 	nh = mtod(m, struct nep_txbuf_hdr *);
1817 	nh->nh_flags = htole64((len << 16) | (pad / 2));
1818 	nh->nh_reserved = 0;
1819 
1820 	cur = frag = *idx;
1821 	map = sc->sc_txbuf[cur].nb_map;
1822 
1823 	err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1824 	if (err) {
1825 		/* XXX defrag */
1826 		m_freem(m);
1827 		return (ENOBUFS);
1828 	}
1829 
1830 	/* Sync the DMA map. */
1831 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1832 	    BUS_DMASYNC_PREWRITE);
1833 
1834 	txd = TXD_SOP | TXD_MARK;
1835 	txd |= ((uint64_t)map->dm_nsegs << TXD_NUM_PTR_SHIFT);
1836 	for (i = 0; i < map->dm_nsegs; i++) {
1837 		txd |= ((uint64_t)map->dm_segs[i].ds_len << TXD_TR_LEN_SHIFT);
1838 		txd |= map->dm_segs[i].ds_addr;
1839 		sc->sc_txdesc[frag] = htole64(txd);
1840 		txd = 0;
1841 
1842 		bus_dmamap_sync(sc->sc_dmat, NEP_DMA_MAP(sc->sc_txring),
1843 		    frag * sizeof(txd), sizeof(txd), BUS_DMASYNC_PREWRITE);
1844 
1845 		cur = frag++;
1846 		if (frag >= NEP_NTXDESC)
1847 			frag = 0;
1848 		KASSERT(frag != sc->sc_tx_cons);
1849 	}
1850 
1851 	KASSERT(sc->sc_txbuf[cur].nb_m == NULL);
1852 	sc->sc_txbuf[*idx].nb_map = sc->sc_txbuf[cur].nb_map;
1853 	sc->sc_txbuf[cur].nb_map = map;
1854 	sc->sc_txbuf[cur].nb_m = m;
1855 
1856 	if (frag < *idx)
1857 		sc->sc_wrap ^= TX_RING_KICK_WRAP;
1858 	nep_write(sc, TX_RING_KICK(sc->sc_port), sc->sc_wrap | (frag << 3));
1859 
1860 	sc->sc_tx_cnt += map->dm_nsegs;
1861 	*idx = frag;
1862 
1863 	m_adj(m, sizeof(*nh) + pad);
1864 	*m0 = m;
1865 
1866 	return (0);
1867 }
1868 
1869 void
1870 nep_start(struct ifnet *ifp)
1871 {
1872 	struct nep_softc *sc = (struct nep_softc *)ifp->if_softc;
1873 	struct mbuf *m;
1874 	int idx;
1875 
1876 	if (!(ifp->if_flags & IFF_RUNNING))
1877 		return;
1878 	if (ifq_is_oactive(&ifp->if_snd))
1879 		return;
1880 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1881 		return;
1882 
1883 	idx = sc->sc_tx_prod;
1884 	for (;;) {
1885 		m = ifq_deq_begin(&ifp->if_snd);
1886 		if (m == NULL)
1887 			break;
1888 
1889 		if (sc->sc_tx_cnt >= (NEP_NTXDESC - NEP_NTXSEGS)) {
1890 			ifq_deq_rollback(&ifp->if_snd, m);
1891 			ifq_set_oactive(&ifp->if_snd);
1892 			break;
1893 		}
1894 
1895 		/* Now we are committed to transmit the packet. */
1896 		ifq_deq_commit(&ifp->if_snd, m);
1897 
1898 		if (nep_encap(sc, &m, &idx))
1899 			break;
1900 
1901 #if NBPFILTER > 0
1902 		if (ifp->if_bpf)
1903 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1904 #endif
1905 	}
1906 
1907 	if (sc->sc_tx_prod != idx) {
1908 		sc->sc_tx_prod = idx;
1909 
1910 		/* Set a timeout in case the chip goes out to lunch. */
1911 		ifp->if_timer = 5;
1912 	}
1913 }
1914 
1915 void
1916 nep_watchdog(struct ifnet *ifp)
1917 {
1918 	printf("%s\n", __func__);
1919 }
1920 
1921 void
1922 nep_tick(void *arg)
1923 {
1924 	struct nep_softc *sc = arg;
1925 	int s;
1926 
1927 	s = splnet();
1928 	mii_tick(&sc->sc_mii);
1929 	splx(s);
1930 
1931 	timeout_add_sec(&sc->sc_tick, 1);
1932 }
1933 
1934 int
1935 nep_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1936 {
1937 	struct nep_softc *sc = (struct nep_softc *)ifp->if_softc;
1938 	struct ifreq *ifr = (struct ifreq *)data;
1939 	int s, error = 0;
1940 
1941 	s = splnet();
1942 
1943 	switch (cmd) {
1944 	case SIOCSIFADDR:
1945 		ifp->if_flags |= IFF_UP;
1946 		/* FALLTHROUGH */
1947 
1948 	case SIOCSIFFLAGS:
1949 		if (ISSET(ifp->if_flags, IFF_UP)) {
1950 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1951 				error = ENETRESET;
1952 			else
1953 				nep_up(sc);
1954 		} else {
1955 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1956 				nep_down(sc);
1957 		}
1958 		break;
1959 
1960 	case SIOCGIFMEDIA:
1961 	case SIOCSIFMEDIA:
1962 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1963 		break;
1964 
1965 	default:
1966 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1967 	}
1968 
1969 	if (error == ENETRESET) {
1970 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1971 		    (IFF_UP | IFF_RUNNING))
1972 			nep_iff(sc);
1973 		error = 0;
1974 	}
1975 
1976 	splx(s);
1977 	return (error);
1978 }
1979 
1980 void
1981 nep_fill_rx_ring(struct nep_softc *sc)
1982 {
1983 	struct nep_block *rb;
1984 	void *block;
1985 	uint64_t val;
1986 	u_int slots;
1987 	int desc, err;
1988 	int count = 0;
1989 
1990 	desc = sc->sc_rx_prod;
1991 	slots = if_rxr_get(&sc->sc_rx_ring, NEP_NRBDESC);
1992 	while (slots > 0) {
1993 		rb = &sc->sc_rb[desc];
1994 
1995 		block = pool_get(nep_block_pool, PR_NOWAIT);
1996 		if (block == NULL)
1997 			break;
1998 		err = bus_dmamap_load(sc->sc_dmat, rb->nb_map, block,
1999 		     PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2000 		if (err) {
2001 			pool_put(nep_block_pool, block);
2002 			break;
2003 		}
2004 		rb->nb_block = block;
2005 		sc->sc_rbdesc[desc++] =
2006 		    htole32(rb->nb_map->dm_segs[0].ds_addr >> 12);
2007 		count++;
2008 		slots--;
2009 		if (desc >= NEP_NRBDESC)
2010 			desc = 0;
2011 	}
2012 	if_rxr_put(&sc->sc_rx_ring, slots);
2013 	if (count > 0) {
2014 		nep_write(sc, RBR_KICK(sc->sc_port), count);
2015 		val = nep_read(sc, RX_DMA_CTL_STAT(sc->sc_port));
2016 		val |= RX_DMA_CTL_STAT_RBR_EMPTY;
2017 		nep_write(sc, RX_DMA_CTL_STAT(sc->sc_port), val);
2018 		sc->sc_rx_prod = desc;
2019 	}
2020 }
2021 
2022 struct nep_dmamem *
2023 nep_dmamem_alloc(struct nep_softc *sc, size_t size)
2024 {
2025 	struct nep_dmamem *m;
2026 	int nsegs;
2027 
2028 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
2029 	if (m == NULL)
2030 		return (NULL);
2031 
2032 	m->ndm_size = size;
2033 
2034 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2035 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->ndm_map) != 0)
2036 		goto qdmfree;
2037 
2038 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->ndm_seg, 1,
2039 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2040 		goto destroy;
2041 
2042 	if (bus_dmamem_map(sc->sc_dmat, &m->ndm_seg, nsegs, size, &m->ndm_kva,
2043 	    BUS_DMA_NOWAIT) != 0)
2044 		goto free;
2045 
2046 	if (bus_dmamap_load(sc->sc_dmat, m->ndm_map, m->ndm_kva, size, NULL,
2047 	    BUS_DMA_NOWAIT) != 0)
2048 		goto unmap;
2049 
2050 	return (m);
2051 
2052 unmap:
2053 	bus_dmamem_unmap(sc->sc_dmat, m->ndm_kva, m->ndm_size);
2054 free:
2055 	bus_dmamem_free(sc->sc_dmat, &m->ndm_seg, 1);
2056 destroy:
2057 	bus_dmamap_destroy(sc->sc_dmat, m->ndm_map);
2058 qdmfree:
2059 	free(m, M_DEVBUF, sizeof(*m));
2060 
2061 	return (NULL);
2062 }
2063 
2064 void
2065 nep_dmamem_free(struct nep_softc *sc, struct nep_dmamem *m)
2066 {
2067 	bus_dmamap_unload(sc->sc_dmat, m->ndm_map);
2068 	bus_dmamem_unmap(sc->sc_dmat, m->ndm_kva, m->ndm_size);
2069 	bus_dmamem_free(sc->sc_dmat, &m->ndm_seg, 1);
2070 	bus_dmamap_destroy(sc->sc_dmat, m->ndm_map);
2071 	free(m, M_DEVBUF, sizeof(*m));
2072 }
2073