xref: /dpdk/drivers/net/axgbe/axgbe_dev.c (revision 186f8e8c336158942d9dceae03db89266dddaa97)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_ethdev.h"
7 #include "axgbe_common.h"
8 #include "axgbe_phy.h"
9 #include "axgbe_rxtx.h"
10 
11 static uint32_t bitrev32(uint32_t x)
12 {
13 	x = (x >> 16) | (x << 16);
14 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
15 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
16 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
17 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
18 	return x;
19 }
20 
21 /*MSB set bit from 32 to 1*/
22 static int get_lastbit_set(int x)
23 {
24 	int r = 32;
25 
26 	if (!x)
27 		return 0;
28 	if (!(x & 0xffff0000)) {
29 		x <<= 16;
30 		r -= 16;
31 	}
32 	if (!(x & 0xff000000)) {
33 		x <<= 8;
34 		r -= 8;
35 	}
36 	if (!(x & 0xf0000000)) {
37 		x <<= 4;
38 		r -= 4;
39 	}
40 	if (!(x & 0xc0000000)) {
41 		x <<= 2;
42 		r -= 2;
43 	}
44 	if (!(x & 0x80000000)) {
45 		x <<= 1;
46 		r -= 1;
47 	}
48 	return r;
49 }
50 
51 static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
52 {
53 	return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
54 		RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN;
55 }
56 
57 /* query busy bit */
58 static int mdio_complete(struct axgbe_port *pdata)
59 {
60 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
61 		return 1;
62 
63 	return 0;
64 }
65 
66 static unsigned int axgbe_create_mdio_sca_c22(int port, int reg)
67 {
68 	unsigned int mdio_sca;
69 
70 	mdio_sca = 0;
71 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
72 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
73 
74 	return mdio_sca;
75 }
76 
77 static unsigned int axgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
78 {
79 	unsigned int mdio_sca;
80 
81 	mdio_sca = 0;
82 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
83 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
84 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
85 
86 	return mdio_sca;
87 }
88 
89 static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata,
90 						unsigned int mdio_sca, u16 val)
91 {
92 	unsigned int mdio_sccd;
93 	uint64_t timeout;
94 
95 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
96 
97 	mdio_sccd = 0;
98 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
99 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
100 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
101 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
102 
103 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
104 	while (time_before(rte_get_timer_cycles(), timeout)) {
105 		rte_delay_us(100);
106 		if (mdio_complete(pdata))
107 			return 0;
108 	}
109 
110 	PMD_DRV_LOG_LINE(ERR, "Mdio write operation timed out");
111 	return -ETIMEDOUT;
112 }
113 
114 
115 static int axgbe_write_ext_mii_regs_c22(struct axgbe_port *pdata,
116 							int addr, int reg, u16 val)
117 {
118 	unsigned int mdio_sca;
119 
120 	mdio_sca = axgbe_create_mdio_sca_c22(addr, reg);
121 
122 	return axgbe_write_ext_mii_regs(pdata, mdio_sca, val);
123 }
124 
125 static int axgbe_write_ext_mii_regs_c45(struct axgbe_port *pdata,
126 					int addr, int devad, int reg, u16 val)
127 {
128 	unsigned int mdio_sca;
129 
130 	mdio_sca = axgbe_create_mdio_sca_c45(addr, devad, reg);
131 
132 	return axgbe_write_ext_mii_regs(pdata, mdio_sca, val);
133 }
134 
135 
136 static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata,
137 							unsigned int mdio_sca)
138 {
139 	unsigned int mdio_sccd;
140 	uint64_t timeout;
141 
142 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
143 
144 	mdio_sccd = 0;
145 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
146 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
147 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
148 
149 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
150 
151 	while (time_before(rte_get_timer_cycles(), timeout)) {
152 		rte_delay_us(100);
153 		if (mdio_complete(pdata))
154 			goto success;
155 	}
156 
157 	PMD_DRV_LOG_LINE(ERR, "Mdio read operation timed out");
158 	return -ETIMEDOUT;
159 
160 success:
161 	return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
162 }
163 
164 static int axgbe_read_ext_mii_regs_c22(struct axgbe_port *pdata, int addr, int reg)
165 {
166 	unsigned int mdio_sca;
167 
168 	mdio_sca = axgbe_create_mdio_sca_c22(addr, reg);
169 
170 	return axgbe_read_ext_mii_regs(pdata, mdio_sca);
171 }
172 
173 static int axgbe_read_ext_mii_regs_c45(struct axgbe_port *pdata, int addr,
174 								int devad, int reg)
175 {
176 	unsigned int mdio_sca;
177 
178 	mdio_sca = axgbe_create_mdio_sca_c45(addr, devad, reg);
179 
180 	return axgbe_read_ext_mii_regs(pdata, mdio_sca);
181 }
182 
183 static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
184 				  enum axgbe_mdio_mode mode)
185 {
186 	unsigned int reg_val = 0;
187 
188 	switch (mode) {
189 	case AXGBE_MDIO_MODE_CL22:
190 		if (port > AXGMAC_MAX_C22_PORT)
191 			return -EINVAL;
192 		reg_val |= (1 << port);
193 		break;
194 	case AXGBE_MDIO_MODE_CL45:
195 		break;
196 	default:
197 		return -EINVAL;
198 	}
199 	AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
200 
201 	return 0;
202 }
203 
204 static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
205 				  int prtad __rte_unused, int mmd_reg)
206 {
207 	unsigned int mmd_address, index, offset;
208 	int mmd_data;
209 
210 	if (mmd_reg & AXGBE_ADDR_C45)
211 		mmd_address = mmd_reg & ~AXGBE_ADDR_C45;
212 	else
213 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
214 
215 	/* The PCS registers are accessed using mmio. The underlying
216 	 * management interface uses indirect addressing to access the MMD
217 	 * register sets. This requires accessing of the PCS register in two
218 	 * phases, an address phase and a data phase.
219 	 *
220 	 * The mmio interface is based on 16-bit offsets and values. All
221 	 * register offsets must therefore be adjusted by left shifting the
222 	 * offset 1 bit and reading 16 bits of data.
223 	 */
224 	mmd_address <<= 1;
225 	index = mmd_address & ~pdata->xpcs_window_mask;
226 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
227 
228 	pthread_mutex_lock(&pdata->xpcs_mutex);
229 
230 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
231 	mmd_data = XPCS16_IOREAD(pdata, offset);
232 
233 	pthread_mutex_unlock(&pdata->xpcs_mutex);
234 
235 	return mmd_data;
236 }
237 
238 static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
239 				    int prtad __rte_unused,
240 				    int mmd_reg, int mmd_data)
241 {
242 	unsigned int mmd_address, index, offset;
243 
244 	if (mmd_reg & AXGBE_ADDR_C45)
245 		mmd_address = mmd_reg & ~AXGBE_ADDR_C45;
246 	else
247 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
248 
249 	/* The PCS registers are accessed using mmio. The underlying
250 	 * management interface uses indirect addressing to access the MMD
251 	 * register sets. This requires accessing of the PCS register in two
252 	 * phases, an address phase and a data phase.
253 	 *
254 	 * The mmio interface is based on 16-bit offsets and values. All
255 	 * register offsets must therefore be adjusted by left shifting the
256 	 * offset 1 bit and writing 16 bits of data.
257 	 */
258 	mmd_address <<= 1;
259 	index = mmd_address & ~pdata->xpcs_window_mask;
260 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
261 
262 	pthread_mutex_lock(&pdata->xpcs_mutex);
263 
264 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
265 	XPCS16_IOWRITE(pdata, offset, mmd_data);
266 
267 	pthread_mutex_unlock(&pdata->xpcs_mutex);
268 }
269 
270 static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
271 			       int mmd_reg)
272 {
273 	switch (pdata->vdata->xpcs_access) {
274 	case AXGBE_XPCS_ACCESS_V1:
275 		PMD_DRV_LOG_LINE(ERR, "PHY_Version 1 is not supported");
276 		return -1;
277 	case AXGBE_XPCS_ACCESS_V2:
278 	default:
279 		return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
280 	}
281 }
282 
283 static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
284 				 int mmd_reg, int mmd_data)
285 {
286 	switch (pdata->vdata->xpcs_access) {
287 	case AXGBE_XPCS_ACCESS_V1:
288 		PMD_DRV_LOG_LINE(ERR, "PHY_Version 1 is not supported");
289 		return;
290 	case AXGBE_XPCS_ACCESS_V2:
291 	default:
292 		return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
293 	}
294 }
295 
296 static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
297 {
298 	unsigned int ss;
299 
300 	switch (speed) {
301 	case SPEED_10:
302 		ss = 0x07;
303 		break;
304 	case SPEED_1000:
305 		ss = 0x03;
306 		break;
307 	case SPEED_2500:
308 		ss = 0x02;
309 		break;
310 	case SPEED_10000:
311 		ss = 0x00;
312 		break;
313 	default:
314 		return -EINVAL;
315 	}
316 
317 	if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
318 		AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
319 
320 	return 0;
321 }
322 
323 static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata)
324 {
325 	unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
326 
327 	/* From MAC ver 30H the TFCR is per priority, instead of per queue */
328 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
329 		return max_q_count;
330 	else
331 		return (RTE_MIN(pdata->tx_q_count, max_q_count));
332 }
333 
334 static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
335 {
336 	unsigned int reg, reg_val;
337 	unsigned int i, q_count;
338 
339 	/* Clear MTL flow control */
340 	for (i = 0; i < pdata->rx_q_count; i++)
341 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
342 
343 	/* Clear MAC flow control */
344 	q_count = axgbe_get_fc_queue_count(pdata);
345 	reg = MAC_Q0TFCR;
346 	for (i = 0; i < q_count; i++) {
347 		reg_val = AXGMAC_IOREAD(pdata, reg);
348 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
349 		AXGMAC_IOWRITE(pdata, reg, reg_val);
350 
351 		reg += MAC_QTFCR_INC;
352 	}
353 
354 	return 0;
355 }
356 
357 static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
358 {
359 	unsigned int reg, reg_val;
360 	unsigned int i, q_count;
361 
362 	/* Set MTL flow control */
363 	for (i = 0; i < pdata->rx_q_count; i++) {
364 		unsigned int ehfc = 0;
365 
366 		/* Flow control thresholds are established */
367 		if (pdata->rx_rfd[i])
368 			ehfc = 1;
369 
370 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
371 
372 		PMD_DRV_LOG_LINE(DEBUG, "flow control %s for RXq%u",
373 			    ehfc ? "enabled" : "disabled", i);
374 	}
375 
376 	/* Set MAC flow control */
377 	q_count = axgbe_get_fc_queue_count(pdata);
378 	reg = MAC_Q0TFCR;
379 	for (i = 0; i < q_count; i++) {
380 		reg_val = AXGMAC_IOREAD(pdata, reg);
381 
382 		/* Enable transmit flow control */
383 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
384 		/* Set pause time */
385 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
386 
387 		AXGMAC_IOWRITE(pdata, reg, reg_val);
388 
389 		reg += MAC_QTFCR_INC;
390 	}
391 
392 	return 0;
393 }
394 
395 static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
396 {
397 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
398 
399 	return 0;
400 }
401 
402 static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
403 {
404 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
405 
406 	return 0;
407 }
408 
409 static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
410 {
411 	if (pdata->tx_pause)
412 		axgbe_enable_tx_flow_control(pdata);
413 	else
414 		axgbe_disable_tx_flow_control(pdata);
415 
416 	return 0;
417 }
418 
419 static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
420 {
421 	if (pdata->rx_pause)
422 		axgbe_enable_rx_flow_control(pdata);
423 	else
424 		axgbe_disable_rx_flow_control(pdata);
425 
426 	return 0;
427 }
428 
429 static void axgbe_config_flow_control(struct axgbe_port *pdata)
430 {
431 	axgbe_config_tx_flow_control(pdata);
432 	axgbe_config_rx_flow_control(pdata);
433 
434 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
435 }
436 
437 static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
438 					       unsigned int queue,
439 					       unsigned int q_fifo_size)
440 {
441 	unsigned int frame_fifo_size;
442 	unsigned int rfa, rfd;
443 
444 	frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
445 
446 	/* This path deals with just maximum frame sizes which are
447 	 * limited to a jumbo frame of 9,000 (plus headers, etc.)
448 	 * so we can never exceed the maximum allowable RFA/RFD
449 	 * values.
450 	 */
451 	if (q_fifo_size <= 2048) {
452 		/* rx_rfd to zero to signal no flow control */
453 		pdata->rx_rfa[queue] = 0;
454 		pdata->rx_rfd[queue] = 0;
455 		return;
456 	}
457 
458 	if (q_fifo_size <= 4096) {
459 		/* Between 2048 and 4096 */
460 		pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
461 		pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
462 		return;
463 	}
464 
465 	if (q_fifo_size <= frame_fifo_size) {
466 		/* Between 4096 and max-frame */
467 		pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
468 		pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
469 		return;
470 	}
471 
472 	if (q_fifo_size <= (frame_fifo_size * 3)) {
473 		/* Between max-frame and 3 max-frames,
474 		 * trigger if we get just over a frame of data and
475 		 * resume when we have just under half a frame left.
476 		 */
477 		rfa = q_fifo_size - frame_fifo_size;
478 		rfd = rfa + (frame_fifo_size / 2);
479 	} else {
480 		/* Above 3 max-frames - trigger when just over
481 		 * 2 frames of space available
482 		 */
483 		rfa = frame_fifo_size * 2;
484 		rfa += AXGMAC_FLOW_CONTROL_UNIT;
485 		rfd = rfa + frame_fifo_size;
486 	}
487 
488 	pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
489 	pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
490 }
491 
492 static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
493 {
494 	unsigned int q_fifo_size;
495 	unsigned int i;
496 
497 	for (i = 0; i < pdata->rx_q_count; i++) {
498 		q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
499 
500 		axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
501 	}
502 }
503 
504 static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
505 {
506 	unsigned int i;
507 
508 	for (i = 0; i < pdata->rx_q_count; i++) {
509 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
510 					pdata->rx_rfa[i]);
511 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
512 					pdata->rx_rfd[i]);
513 	}
514 }
515 
516 static int axgbe_enable_rx_vlan_stripping(struct axgbe_port *pdata)
517 {
518 	/* Put the VLAN tag in the Rx descriptor */
519 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
520 
521 	/* Don't check the VLAN type */
522 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
523 
524 	/* Check only C-TAG (0x8100) packets */
525 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
526 
527 	/* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
528 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
529 
530 	/* Enable VLAN tag stripping */
531 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
532 	return 0;
533 }
534 
535 static int axgbe_disable_rx_vlan_stripping(struct axgbe_port *pdata)
536 {
537 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
538 	return 0;
539 }
540 
541 static int axgbe_enable_rx_vlan_filtering(struct axgbe_port *pdata)
542 {
543 	/* Enable VLAN filtering */
544 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
545 
546 	/* Enable VLAN Hash Table filtering */
547 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
548 
549 	/* Disable VLAN tag inverse matching */
550 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
551 
552 	/* Only filter on the lower 12-bits of the VLAN tag */
553 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
554 
555 	/* In order for the VLAN Hash Table filtering to be effective,
556 	 * the VLAN tag identifier in the VLAN Tag Register must not
557 	 * be zero.  Set the VLAN tag identifier to "1" to enable the
558 	 * VLAN Hash Table filtering.  This implies that a VLAN tag of
559 	 * 1 will always pass filtering.
560 	 */
561 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
562 	return 0;
563 }
564 
565 static int axgbe_disable_rx_vlan_filtering(struct axgbe_port *pdata)
566 {
567 	/* Disable VLAN filtering */
568 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
569 	return 0;
570 }
571 
572 static u32 axgbe_vid_crc32_le(__le16 vid_le)
573 {
574 	u32 poly = 0xedb88320;  /* CRCPOLY_LE */
575 	u32 crc = ~0;
576 	u32 temp = 0;
577 	unsigned char *data = (unsigned char *)&vid_le;
578 	unsigned char data_byte = 0;
579 	int i, bits;
580 
581 	bits = get_lastbit_set(VLAN_VID_MASK);
582 	for (i = 0; i < bits; i++) {
583 		if ((i % 8) == 0)
584 			data_byte = data[i / 8];
585 
586 		temp = ((crc & 1) ^ data_byte) & 1;
587 		crc >>= 1;
588 		data_byte >>= 1;
589 
590 		if (temp)
591 			crc ^= poly;
592 	}
593 	return crc;
594 }
595 
596 static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
597 {
598 	u32 crc = 0;
599 	u16 vid;
600 	__le16 vid_le = 0;
601 	u16 vlan_hash_table = 0;
602 	unsigned int reg = 0;
603 	unsigned long vid_idx, vid_valid;
604 
605 	/* Generate the VLAN Hash Table value */
606 	for (vid = 0; vid < VLAN_N_VID; vid++) {
607 		vid_idx = VLAN_TABLE_IDX(vid);
608 		vid_valid = pdata->active_vlans[vid_idx];
609 		vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx));
610 		if (vid_valid & 1)
611 			PMD_DRV_LOG_LINE(DEBUG,
612 				    "vid:%d pdata->active_vlans[%ld]=0x%lx",
613 				    vid, vid_idx, pdata->active_vlans[vid_idx]);
614 		else
615 			continue;
616 
617 		vid_le = rte_cpu_to_le_16(vid);
618 		crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28;
619 		vlan_hash_table |= (1 << crc);
620 		PMD_DRV_LOG_LINE(DEBUG, "crc = %d vlan_hash_table = 0x%x",
621 			    crc, vlan_hash_table);
622 	}
623 	/* Set the VLAN Hash Table filtering register */
624 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
625 	reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR);
626 	PMD_DRV_LOG_LINE(DEBUG, "vlan_hash_table reg val = 0x%x", reg);
627 	return 0;
628 }
629 
630 static int __axgbe_exit(struct axgbe_port *pdata)
631 {
632 	unsigned int count = 2000;
633 
634 	/* Issue a software reset */
635 	AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
636 	rte_delay_us(10);
637 
638 	/* Poll Until Poll Condition */
639 	while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
640 		rte_delay_us(500);
641 
642 	if (!count)
643 		return -EBUSY;
644 
645 	return 0;
646 }
647 
648 static int axgbe_exit(struct axgbe_port *pdata)
649 {
650 	int ret;
651 
652 	/* To guard against possible incorrectly generated interrupts,
653 	 * issue the software reset twice.
654 	 */
655 	ret = __axgbe_exit(pdata);
656 	if (ret)
657 		return ret;
658 
659 	return __axgbe_exit(pdata);
660 }
661 
662 static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
663 {
664 	unsigned int i, count;
665 
666 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
667 		return 0;
668 
669 	for (i = 0; i < pdata->tx_q_count; i++)
670 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
671 
672 	/* Poll Until Poll Condition */
673 	for (i = 0; i < pdata->tx_q_count; i++) {
674 		count = 2000;
675 		while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
676 							 MTL_Q_TQOMR, FTQ))
677 			rte_delay_us(500);
678 
679 		if (!count)
680 			return -EBUSY;
681 	}
682 
683 	return 0;
684 }
685 
686 static void axgbe_config_dma_bus(struct axgbe_port *pdata)
687 {
688 	/* Set enhanced addressing mode */
689 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
690 
691 	/* Out standing read/write requests*/
692 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
693 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
694 
695 	/* Set the System Bus mode */
696 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
697 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
698 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
699 }
700 
701 static void axgbe_config_dma_cache(struct axgbe_port *pdata)
702 {
703 	unsigned int arcache, awcache, arwcache;
704 
705 	arcache = 0;
706 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf);
707 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf);
708 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf);
709 	AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
710 
711 	awcache = 0;
712 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf);
713 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf);
714 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf);
715 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf);
716 	AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
717 
718 	arwcache = 0;
719 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf);
720 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf);
721 	AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
722 }
723 
724 static void axgbe_config_edma_control(struct axgbe_port *pdata)
725 {
726 	AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
727 	AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
728 }
729 
730 static int axgbe_config_osp_mode(struct axgbe_port *pdata)
731 {
732 	/* Force DMA to operate on second packet before closing descriptors
733 	 *  of first packet
734 	 */
735 	struct axgbe_tx_queue *txq;
736 	unsigned int i;
737 
738 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
739 		txq = pdata->eth_dev->data->tx_queues[i];
740 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
741 					pdata->tx_osp_mode);
742 	}
743 
744 	return 0;
745 }
746 
747 static int axgbe_config_pblx8(struct axgbe_port *pdata)
748 {
749 	struct axgbe_tx_queue *txq;
750 	unsigned int i;
751 
752 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
753 		txq = pdata->eth_dev->data->tx_queues[i];
754 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
755 					pdata->pblx8);
756 	}
757 	return 0;
758 }
759 
760 static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
761 {
762 	struct axgbe_tx_queue *txq;
763 	unsigned int i;
764 
765 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
766 		txq = pdata->eth_dev->data->tx_queues[i];
767 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
768 				pdata->tx_pbl);
769 	}
770 
771 	return 0;
772 }
773 
774 static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
775 {
776 	struct axgbe_rx_queue *rxq;
777 	unsigned int i;
778 
779 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
780 		rxq = pdata->eth_dev->data->rx_queues[i];
781 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
782 				pdata->rx_pbl);
783 	}
784 
785 	return 0;
786 }
787 
788 static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
789 {
790 	struct axgbe_rx_queue *rxq;
791 	unsigned int i;
792 
793 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
794 		rxq = pdata->eth_dev->data->rx_queues[i];
795 
796 		rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
797 			RTE_PKTMBUF_HEADROOM;
798 		rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
799 			~(AXGBE_RX_BUF_ALIGN - 1);
800 
801 		if (rxq->buf_size > pdata->rx_buf_size)
802 			pdata->rx_buf_size = rxq->buf_size;
803 
804 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
805 					rxq->buf_size);
806 	}
807 }
808 
809 static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
810 			       unsigned int index, unsigned int val)
811 {
812 	unsigned int wait;
813 
814 	if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
815 		return -EBUSY;
816 
817 	AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
818 
819 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
820 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
821 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
822 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
823 
824 	wait = 1000;
825 	while (wait--) {
826 		if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
827 			return 0;
828 
829 		rte_delay_us(1500);
830 	}
831 
832 	return -EBUSY;
833 }
834 
835 int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
836 {
837 	struct rte_eth_rss_conf *rss_conf;
838 	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
839 	unsigned int *key;
840 	int ret;
841 
842 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
843 
844 	if (!rss_conf->rss_key)
845 		key = (unsigned int *)&pdata->rss_key;
846 	else
847 		key = (unsigned int *)&rss_conf->rss_key;
848 
849 	while (key_regs--) {
850 		ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
851 					  key_regs, *key++);
852 		if (ret)
853 			return ret;
854 	}
855 
856 	return 0;
857 }
858 
859 int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
860 {
861 	unsigned int i;
862 	int ret;
863 
864 	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
865 		ret = axgbe_write_rss_reg(pdata,
866 					  AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
867 					  pdata->rss_table[i]);
868 		if (ret)
869 			return ret;
870 	}
871 
872 	return 0;
873 }
874 
875 static void axgbe_config_tso_mode(struct axgbe_port *pdata)
876 {
877 	unsigned int i;
878 	struct axgbe_tx_queue *txq;
879 
880 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
881 		txq = pdata->eth_dev->data->tx_queues[i];
882 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, 1);
883 	}
884 }
885 
886 static int axgbe_enable_rss(struct axgbe_port *pdata)
887 {
888 	int ret;
889 
890 	/* Program the hash key */
891 	ret = axgbe_write_rss_hash_key(pdata);
892 	if (ret)
893 		return ret;
894 
895 	/* Program the lookup table */
896 	ret = axgbe_write_rss_lookup_table(pdata);
897 	if (ret)
898 		return ret;
899 
900 	/* Set the RSS options */
901 	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
902 
903 	/* Enable RSS */
904 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
905 
906 	return 0;
907 }
908 
909 static void axgbe_rss_options(struct axgbe_port *pdata)
910 {
911 	struct rte_eth_rss_conf *rss_conf;
912 	uint64_t rss_hf;
913 
914 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
915 	pdata->rss_hf = rss_conf->rss_hf;
916 	rss_hf = rss_conf->rss_hf;
917 
918 	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
919 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
920 	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
921 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
922 	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
923 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
924 }
925 
926 static int axgbe_config_rss(struct axgbe_port *pdata)
927 {
928 	uint32_t i;
929 
930 	if (pdata->rss_enable) {
931 		/* Initialize RSS hash key and lookup table */
932 		uint32_t *key = (uint32_t *)pdata->rss_key;
933 
934 		for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
935 			*key++ = (uint32_t)rte_rand();
936 		for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
937 			AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
938 					i % pdata->eth_dev->data->nb_rx_queues);
939 		axgbe_rss_options(pdata);
940 		if (axgbe_enable_rss(pdata)) {
941 			PMD_DRV_LOG_LINE(ERR, "Error in enabling RSS support");
942 			return -1;
943 		}
944 	} else {
945 		AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
946 	}
947 
948 	return 0;
949 }
950 
951 static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
952 {
953 	struct axgbe_tx_queue *txq;
954 	unsigned int dma_ch_isr, dma_ch_ier;
955 	unsigned int i;
956 
957 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
958 		txq = pdata->eth_dev->data->tx_queues[i];
959 
960 		/* Clear all the interrupts which are set */
961 		dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
962 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
963 
964 		/* Clear all interrupt enable bits */
965 		dma_ch_ier = 0;
966 
967 		/* Enable following interrupts
968 		 *   NIE  - Normal Interrupt Summary Enable
969 		 *   AIE  - Abnormal Interrupt Summary Enable
970 		 *   FBEE - Fatal Bus Error Enable
971 		 */
972 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
973 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
974 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
975 
976 		/* Enable following Rx interrupts
977 		 *   RBUE - Receive Buffer Unavailable Enable
978 		 *   RIE  - Receive Interrupt Enable (unless using
979 		 *          per channel interrupts in edge triggered
980 		 *          mode)
981 		 */
982 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
983 
984 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
985 	}
986 }
987 
988 static void wrapper_tx_desc_init(struct axgbe_port *pdata)
989 {
990 	struct axgbe_tx_queue *txq;
991 	unsigned int i;
992 
993 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
994 		txq = pdata->eth_dev->data->tx_queues[i];
995 		txq->cur = 0;
996 		txq->dirty = 0;
997 		/* Update the total number of Tx descriptors */
998 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
999 		/* Update the starting address of descriptor ring */
1000 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
1001 					high32_value(txq->ring_phys_addr));
1002 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
1003 					low32_value(txq->ring_phys_addr));
1004 	}
1005 }
1006 
1007 static int wrapper_rx_desc_init(struct axgbe_port *pdata)
1008 {
1009 	struct axgbe_rx_queue *rxq;
1010 	struct rte_mbuf *mbuf;
1011 	volatile union axgbe_rx_desc *desc;
1012 	unsigned int i, j;
1013 
1014 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
1015 		rxq = pdata->eth_dev->data->rx_queues[i];
1016 
1017 		/* Initialize software ring entries */
1018 		rxq->mbuf_alloc = 0;
1019 		rxq->cur = 0;
1020 		rxq->dirty = 0;
1021 		desc = AXGBE_GET_DESC_PT(rxq, 0);
1022 
1023 		for (j = 0; j < rxq->nb_desc; j++) {
1024 			mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1025 			if (mbuf == NULL) {
1026 				PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d",
1027 					    (unsigned int)rxq->queue_id, j);
1028 				axgbe_dev_rx_queue_release(pdata->eth_dev, i);
1029 				return -ENOMEM;
1030 			}
1031 			rxq->sw_ring[j] = mbuf;
1032 			/* Mbuf populate */
1033 			mbuf->next = NULL;
1034 			mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1035 			mbuf->nb_segs = 1;
1036 			mbuf->port = rxq->port_id;
1037 			desc->read.baddr =
1038 				rte_cpu_to_le_64(
1039 					rte_mbuf_data_iova_default(mbuf));
1040 			rte_wmb();
1041 			AXGMAC_SET_BITS_LE(desc->read.desc3,
1042 						RX_NORMAL_DESC3, OWN, 1);
1043 			rte_wmb();
1044 			rxq->mbuf_alloc++;
1045 			desc++;
1046 		}
1047 		/* Update the total number of Rx descriptors */
1048 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
1049 					rxq->nb_desc - 1);
1050 		/* Update the starting address of descriptor ring */
1051 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
1052 					high32_value(rxq->ring_phys_addr));
1053 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
1054 					low32_value(rxq->ring_phys_addr));
1055 		/* Update the Rx Descriptor Tail Pointer */
1056 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
1057 				   low32_value(rxq->ring_phys_addr +
1058 				   (rxq->nb_desc - 1) *
1059 				   sizeof(union axgbe_rx_desc)));
1060 	}
1061 	return 0;
1062 }
1063 
1064 static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
1065 {
1066 	unsigned int i;
1067 
1068 	/* Set Tx to weighted round robin scheduling algorithm */
1069 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1070 
1071 	/* Set Tx traffic classes to use WRR algorithm with equal weights */
1072 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1073 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1074 				MTL_TSA_ETS);
1075 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1076 	}
1077 
1078 	/* Set Rx to strict priority algorithm */
1079 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1080 }
1081 
1082 static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
1083 {
1084 	unsigned int i;
1085 
1086 	for (i = 0; i < pdata->tx_q_count; i++)
1087 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
1088 
1089 	return 0;
1090 }
1091 
1092 static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
1093 {
1094 	unsigned int i;
1095 
1096 	for (i = 0; i < pdata->rx_q_count; i++)
1097 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
1098 
1099 	return 0;
1100 }
1101 
1102 static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
1103 				     unsigned int val)
1104 {
1105 	unsigned int i;
1106 
1107 	for (i = 0; i < pdata->tx_q_count; i++)
1108 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
1109 
1110 	return 0;
1111 }
1112 
1113 static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
1114 				     unsigned int val)
1115 {
1116 	unsigned int i;
1117 
1118 	for (i = 0; i < pdata->rx_q_count; i++)
1119 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
1120 
1121 	return 0;
1122 }
1123 
1124 /* Distributing FIFO size */
1125 static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
1126 {
1127 	unsigned int fifo_size;
1128 	unsigned int q_fifo_size;
1129 	unsigned int p_fifo, i;
1130 
1131 	fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1132 			  pdata->hw_feat.rx_fifo_size);
1133 	q_fifo_size = fifo_size / pdata->rx_q_count;
1134 
1135 	/* Calculate the fifo setting by dividing the queue's fifo size
1136 	 * by the fifo allocation increment (with 0 representing the
1137 	 * base allocation increment so decrement the result
1138 	 * by 1).
1139 	 */
1140 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
1141 	if (p_fifo)
1142 		p_fifo--;
1143 
1144 	for (i = 0; i < pdata->rx_q_count; i++)
1145 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
1146 	pdata->fifo = p_fifo;
1147 
1148 	/*Calculate and config Flow control threshold*/
1149 	axgbe_calculate_flow_control_threshold(pdata);
1150 	axgbe_config_flow_control_threshold(pdata);
1151 
1152 	PMD_DRV_LOG_LINE(DEBUG, "%d Rx hardware queues, %d byte fifo per queue",
1153 		    pdata->rx_q_count, q_fifo_size);
1154 }
1155 
1156 static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
1157 {
1158 	unsigned int fifo_size;
1159 	unsigned int q_fifo_size;
1160 	unsigned int p_fifo, i;
1161 
1162 	fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1163 				pdata->hw_feat.tx_fifo_size);
1164 	q_fifo_size = fifo_size / pdata->tx_q_count;
1165 
1166 	/* Calculate the fifo setting by dividing the queue's fifo size
1167 	 * by the fifo allocation increment (with 0 representing the
1168 	 * base allocation increment so decrement the result
1169 	 * by 1).
1170 	 */
1171 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
1172 	if (p_fifo)
1173 		p_fifo--;
1174 
1175 	for (i = 0; i < pdata->tx_q_count; i++)
1176 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
1177 
1178 	PMD_DRV_LOG_LINE(DEBUG, "%d Tx hardware queues, %d byte fifo per queue",
1179 		    pdata->tx_q_count, q_fifo_size);
1180 }
1181 
1182 static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
1183 {
1184 	unsigned int qptc, qptc_extra, queue;
1185 	unsigned int i, j, reg, reg_val;
1186 
1187 	/* Map the MTL Tx Queues to Traffic Classes
1188 	 *   Note: Tx Queues >= Traffic Classes
1189 	 */
1190 	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
1191 	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
1192 
1193 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
1194 		for (j = 0; j < qptc; j++) {
1195 			PMD_DRV_LOG_LINE(DEBUG, "TXq%u mapped to TC%u", queue, i);
1196 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1197 						Q2TCMAP, i);
1198 		}
1199 		if (i < qptc_extra) {
1200 			PMD_DRV_LOG_LINE(DEBUG, "TXq%u mapped to TC%u", queue, i);
1201 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1202 						Q2TCMAP, i);
1203 		}
1204 	}
1205 
1206 	if (pdata->rss_enable) {
1207 		/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1208 		reg = MTL_RQDCM0R;
1209 		reg_val = 0;
1210 		for (i = 0; i < pdata->rx_q_count;) {
1211 			reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1212 
1213 			if ((i % MTL_RQDCM_Q_PER_REG) &&
1214 			    (i != pdata->rx_q_count))
1215 				continue;
1216 
1217 			AXGMAC_IOWRITE(pdata, reg, reg_val);
1218 
1219 			reg += MTL_RQDCM_INC;
1220 			reg_val = 0;
1221 		}
1222 	}
1223 }
1224 
1225 static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
1226 {
1227 	unsigned int mtl_q_isr;
1228 	unsigned int q_count, i;
1229 
1230 	q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
1231 	for (i = 0; i < q_count; i++) {
1232 		/* Clear all the interrupts which are set */
1233 		mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
1234 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
1235 
1236 		/* No MTL interrupts to be enabled */
1237 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
1238 	}
1239 }
1240 
1241 static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
1242 {
1243 	int i;
1244 	while (len--) {
1245 		crc ^= *p++;
1246 		for (i = 0; i < 8; i++)
1247 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
1248 	}
1249 	return crc;
1250 }
1251 
1252 void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
1253 {
1254 	uint32_t crc, htable_index, htable_bitmask;
1255 
1256 	crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN));
1257 	crc >>= pdata->hash_table_shift;
1258 	htable_index = crc >> 5;
1259 	htable_bitmask = 1 << (crc & 0x1f);
1260 
1261 	if (add) {
1262 		pdata->uc_hash_table[htable_index] |= htable_bitmask;
1263 		pdata->uc_hash_mac_addr++;
1264 	} else {
1265 		pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
1266 		pdata->uc_hash_mac_addr--;
1267 	}
1268 	PMD_DRV_LOG_LINE(DEBUG, "%s MAC hash table Bit %d at Index %#x",
1269 		    add ? "set" : "clear", (crc & 0x1f), htable_index);
1270 
1271 	AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
1272 		       pdata->uc_hash_table[htable_index]);
1273 }
1274 
1275 void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
1276 {
1277 	unsigned int mac_addr_hi, mac_addr_lo;
1278 	u8 *mac_addr;
1279 
1280 	mac_addr_lo = 0;
1281 	mac_addr_hi = 0;
1282 
1283 	if (addr) {
1284 		mac_addr = (u8 *)&mac_addr_lo;
1285 		mac_addr[0] = addr[0];
1286 		mac_addr[1] = addr[1];
1287 		mac_addr[2] = addr[2];
1288 		mac_addr[3] = addr[3];
1289 		mac_addr = (u8 *)&mac_addr_hi;
1290 		mac_addr[0] = addr[4];
1291 		mac_addr[1] = addr[5];
1292 
1293 		/*Address Enable: Use this Addr for Perfect Filtering */
1294 		AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
1295 	}
1296 
1297 	PMD_DRV_LOG_LINE(DEBUG, "%s mac address at %#x",
1298 		    addr ? "set" : "clear", index);
1299 
1300 	AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
1301 	AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo);
1302 }
1303 
1304 static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
1305 {
1306 	unsigned int mac_addr_hi, mac_addr_lo;
1307 
1308 	mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
1309 	mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1310 		(addr[1] <<  8) | (addr[0] <<  0);
1311 
1312 	AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1313 	AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1314 
1315 	return 0;
1316 }
1317 
1318 static void axgbe_config_mac_hash_table(struct axgbe_port *pdata)
1319 {
1320 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1321 
1322 	pdata->hash_table_shift = 0;
1323 	pdata->hash_table_count = 0;
1324 	pdata->uc_hash_mac_addr = 0;
1325 	memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table));
1326 
1327 	if (hw_feat->hash_table_size) {
1328 		pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7);
1329 		pdata->hash_table_count = hw_feat->hash_table_size / 32;
1330 	}
1331 }
1332 
1333 static void axgbe_config_mac_address(struct axgbe_port *pdata)
1334 {
1335 	axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
1336 }
1337 
1338 static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
1339 {
1340 	unsigned int val;
1341 
1342 	val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
1343 
1344 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1345 }
1346 
1347 static void axgbe_config_mac_speed(struct axgbe_port *pdata)
1348 {
1349 	axgbe_set_speed(pdata, pdata->phy_speed);
1350 }
1351 
1352 static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
1353 {
1354 	if (pdata->rx_csum_enable)
1355 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1356 	else
1357 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1358 }
1359 
1360 static void axgbe_config_mmc(struct axgbe_port *pdata)
1361 {
1362 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1363 
1364 	/* Reset stats */
1365 	memset(stats, 0, sizeof(*stats));
1366 
1367 	/* Set counters to reset on read */
1368 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1369 
1370 	/* Reset the counters */
1371 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1372 }
1373 
1374 static int axgbe_init(struct axgbe_port *pdata)
1375 {
1376 	int ret;
1377 
1378 	/* Flush Tx queues */
1379 	ret = axgbe_flush_tx_queues(pdata);
1380 	if (ret)
1381 		return ret;
1382 	/* Initialize DMA related features */
1383 	axgbe_config_dma_bus(pdata);
1384 	axgbe_config_dma_cache(pdata);
1385 	axgbe_config_edma_control(pdata);
1386 	axgbe_config_osp_mode(pdata);
1387 	axgbe_config_pblx8(pdata);
1388 	axgbe_config_tx_pbl_val(pdata);
1389 	axgbe_config_rx_pbl_val(pdata);
1390 	axgbe_config_rx_buffer_size(pdata);
1391 	axgbe_config_rss(pdata);
1392 	axgbe_config_tso_mode(pdata);
1393 	wrapper_tx_desc_init(pdata);
1394 	ret = wrapper_rx_desc_init(pdata);
1395 	if (ret)
1396 		return ret;
1397 	axgbe_enable_dma_interrupts(pdata);
1398 
1399 	/* Initialize MTL related features */
1400 	axgbe_config_mtl_mode(pdata);
1401 	axgbe_config_queue_mapping(pdata);
1402 	axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
1403 	axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
1404 	axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
1405 	axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
1406 	axgbe_config_tx_fifo_size(pdata);
1407 	axgbe_config_rx_fifo_size(pdata);
1408 
1409 	axgbe_enable_mtl_interrupts(pdata);
1410 
1411 	/* Initialize MAC related features */
1412 	axgbe_config_mac_hash_table(pdata);
1413 	axgbe_config_mac_address(pdata);
1414 	axgbe_config_jumbo_enable(pdata);
1415 	axgbe_config_flow_control(pdata);
1416 	axgbe_config_mac_speed(pdata);
1417 	axgbe_config_checksum_offload(pdata);
1418 	axgbe_config_mmc(pdata);
1419 
1420 	return 0;
1421 }
1422 
1423 void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1424 {
1425 	hw_if->exit = axgbe_exit;
1426 	hw_if->config_flow_control = axgbe_config_flow_control;
1427 
1428 	hw_if->init = axgbe_init;
1429 
1430 	hw_if->read_mmd_regs = axgbe_read_mmd_regs;
1431 	hw_if->write_mmd_regs = axgbe_write_mmd_regs;
1432 
1433 	hw_if->set_speed = axgbe_set_speed;
1434 
1435 	hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1436 	hw_if->read_ext_mii_regs_c22 = axgbe_read_ext_mii_regs_c22;
1437 	hw_if->write_ext_mii_regs_c22 = axgbe_write_ext_mii_regs_c22;
1438 	hw_if->read_ext_mii_regs_c45 = axgbe_read_ext_mii_regs_c45;
1439 	hw_if->write_ext_mii_regs_c45 = axgbe_write_ext_mii_regs_c45;
1440 
1441 	/* For FLOW ctrl */
1442 	hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
1443 	hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
1444 
1445 	/*vlan*/
1446 	hw_if->enable_rx_vlan_stripping = axgbe_enable_rx_vlan_stripping;
1447 	hw_if->disable_rx_vlan_stripping = axgbe_disable_rx_vlan_stripping;
1448 	hw_if->enable_rx_vlan_filtering = axgbe_enable_rx_vlan_filtering;
1449 	hw_if->disable_rx_vlan_filtering = axgbe_disable_rx_vlan_filtering;
1450 	hw_if->update_vlan_hash_table = axgbe_update_vlan_hash_table;
1451 }
1452