xref: /dpdk/drivers/net/axgbe/axgbe_dev.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_ethdev.h"
7 #include "axgbe_common.h"
8 #include "axgbe_phy.h"
9 #include "axgbe_rxtx.h"
10 
11 static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
12 {
13 	return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
14 		ETHER_CRC_LEN + VLAN_HLEN;
15 }
16 
17 /* query busy bit */
18 static int mdio_complete(struct axgbe_port *pdata)
19 {
20 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
21 		return 1;
22 
23 	return 0;
24 }
25 
26 static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
27 				    int reg, u16 val)
28 {
29 	unsigned int mdio_sca, mdio_sccd;
30 	uint64_t timeout;
31 
32 	mdio_sca = 0;
33 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
34 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
35 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
36 
37 	mdio_sccd = 0;
38 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
39 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
40 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
41 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
42 
43 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
44 	while (time_before(rte_get_timer_cycles(), timeout)) {
45 		rte_delay_us(100);
46 		if (mdio_complete(pdata))
47 			return 0;
48 	}
49 
50 	PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
51 	return -ETIMEDOUT;
52 }
53 
54 static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
55 				   int reg)
56 {
57 	unsigned int mdio_sca, mdio_sccd;
58 	uint64_t timeout;
59 
60 	mdio_sca = 0;
61 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
62 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
63 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
64 
65 	mdio_sccd = 0;
66 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
67 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
68 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
69 
70 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
71 
72 	while (time_before(rte_get_timer_cycles(), timeout)) {
73 		rte_delay_us(100);
74 		if (mdio_complete(pdata))
75 			goto success;
76 	}
77 
78 	PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
79 	return -ETIMEDOUT;
80 
81 success:
82 	return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
83 }
84 
85 static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
86 				  enum axgbe_mdio_mode mode)
87 {
88 	unsigned int reg_val = 0;
89 
90 	switch (mode) {
91 	case AXGBE_MDIO_MODE_CL22:
92 		if (port > AXGMAC_MAX_C22_PORT)
93 			return -EINVAL;
94 		reg_val |= (1 << port);
95 		break;
96 	case AXGBE_MDIO_MODE_CL45:
97 		break;
98 	default:
99 		return -EINVAL;
100 	}
101 	AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
102 
103 	return 0;
104 }
105 
106 static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
107 				  int prtad __rte_unused, int mmd_reg)
108 {
109 	unsigned int mmd_address, index, offset;
110 	int mmd_data;
111 
112 	if (mmd_reg & MII_ADDR_C45)
113 		mmd_address = mmd_reg & ~MII_ADDR_C45;
114 	else
115 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
116 
117 	/* The PCS registers are accessed using mmio. The underlying
118 	 * management interface uses indirect addressing to access the MMD
119 	 * register sets. This requires accessing of the PCS register in two
120 	 * phases, an address phase and a data phase.
121 	 *
122 	 * The mmio interface is based on 16-bit offsets and values. All
123 	 * register offsets must therefore be adjusted by left shifting the
124 	 * offset 1 bit and reading 16 bits of data.
125 	 */
126 	mmd_address <<= 1;
127 	index = mmd_address & ~pdata->xpcs_window_mask;
128 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
129 
130 	pthread_mutex_lock(&pdata->xpcs_mutex);
131 
132 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
133 	mmd_data = XPCS16_IOREAD(pdata, offset);
134 
135 	pthread_mutex_unlock(&pdata->xpcs_mutex);
136 
137 	return mmd_data;
138 }
139 
140 static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
141 				    int prtad __rte_unused,
142 				    int mmd_reg, int mmd_data)
143 {
144 	unsigned int mmd_address, index, offset;
145 
146 	if (mmd_reg & MII_ADDR_C45)
147 		mmd_address = mmd_reg & ~MII_ADDR_C45;
148 	else
149 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
150 
151 	/* The PCS registers are accessed using mmio. The underlying
152 	 * management interface uses indirect addressing to access the MMD
153 	 * register sets. This requires accessing of the PCS register in two
154 	 * phases, an address phase and a data phase.
155 	 *
156 	 * The mmio interface is based on 16-bit offsets and values. All
157 	 * register offsets must therefore be adjusted by left shifting the
158 	 * offset 1 bit and writing 16 bits of data.
159 	 */
160 	mmd_address <<= 1;
161 	index = mmd_address & ~pdata->xpcs_window_mask;
162 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
163 
164 	pthread_mutex_lock(&pdata->xpcs_mutex);
165 
166 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
167 	XPCS16_IOWRITE(pdata, offset, mmd_data);
168 
169 	pthread_mutex_unlock(&pdata->xpcs_mutex);
170 }
171 
172 static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
173 			       int mmd_reg)
174 {
175 	switch (pdata->vdata->xpcs_access) {
176 	case AXGBE_XPCS_ACCESS_V1:
177 		PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
178 		return -1;
179 	case AXGBE_XPCS_ACCESS_V2:
180 	default:
181 		return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
182 	}
183 }
184 
185 static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
186 				 int mmd_reg, int mmd_data)
187 {
188 	switch (pdata->vdata->xpcs_access) {
189 	case AXGBE_XPCS_ACCESS_V1:
190 		PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
191 		return;
192 	case AXGBE_XPCS_ACCESS_V2:
193 	default:
194 		return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
195 	}
196 }
197 
198 static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
199 {
200 	unsigned int ss;
201 
202 	switch (speed) {
203 	case SPEED_1000:
204 		ss = 0x03;
205 		break;
206 	case SPEED_2500:
207 		ss = 0x02;
208 		break;
209 	case SPEED_10000:
210 		ss = 0x00;
211 		break;
212 	default:
213 		return -EINVAL;
214 	}
215 
216 	if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
217 		AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
218 
219 	return 0;
220 }
221 
222 static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
223 {
224 	unsigned int max_q_count, q_count;
225 	unsigned int reg, reg_val;
226 	unsigned int i;
227 
228 	/* Clear MTL flow control */
229 	for (i = 0; i < pdata->rx_q_count; i++)
230 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
231 
232 	/* Clear MAC flow control */
233 	max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
234 	q_count = RTE_MIN(pdata->tx_q_count,
235 			max_q_count);
236 	reg = MAC_Q0TFCR;
237 	for (i = 0; i < q_count; i++) {
238 		reg_val = AXGMAC_IOREAD(pdata, reg);
239 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
240 		AXGMAC_IOWRITE(pdata, reg, reg_val);
241 
242 		reg += MAC_QTFCR_INC;
243 	}
244 
245 	return 0;
246 }
247 
248 static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
249 {
250 	unsigned int max_q_count, q_count;
251 	unsigned int reg, reg_val;
252 	unsigned int i;
253 
254 	/* Set MTL flow control */
255 	for (i = 0; i < pdata->rx_q_count; i++) {
256 		unsigned int ehfc = 0;
257 
258 		/* Flow control thresholds are established */
259 		if (pdata->rx_rfd[i])
260 			ehfc = 1;
261 
262 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
263 	}
264 
265 	/* Set MAC flow control */
266 	max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
267 	q_count = RTE_MIN(pdata->tx_q_count,
268 			max_q_count);
269 	reg = MAC_Q0TFCR;
270 	for (i = 0; i < q_count; i++) {
271 		reg_val = AXGMAC_IOREAD(pdata, reg);
272 
273 		/* Enable transmit flow control */
274 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
275 		/* Set pause time */
276 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
277 
278 		AXGMAC_IOWRITE(pdata, reg, reg_val);
279 
280 		reg += MAC_QTFCR_INC;
281 	}
282 
283 	return 0;
284 }
285 
286 static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
287 {
288 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
289 
290 	return 0;
291 }
292 
293 static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
294 {
295 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
296 
297 	return 0;
298 }
299 
300 static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
301 {
302 	if (pdata->tx_pause)
303 		axgbe_enable_tx_flow_control(pdata);
304 	else
305 		axgbe_disable_tx_flow_control(pdata);
306 
307 	return 0;
308 }
309 
310 static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
311 {
312 	if (pdata->rx_pause)
313 		axgbe_enable_rx_flow_control(pdata);
314 	else
315 		axgbe_disable_rx_flow_control(pdata);
316 
317 	return 0;
318 }
319 
320 static void axgbe_config_flow_control(struct axgbe_port *pdata)
321 {
322 	axgbe_config_tx_flow_control(pdata);
323 	axgbe_config_rx_flow_control(pdata);
324 
325 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
326 }
327 
328 static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
329 					       unsigned int queue,
330 					       unsigned int q_fifo_size)
331 {
332 	unsigned int frame_fifo_size;
333 	unsigned int rfa, rfd;
334 
335 	frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
336 
337 	/* This path deals with just maximum frame sizes which are
338 	 * limited to a jumbo frame of 9,000 (plus headers, etc.)
339 	 * so we can never exceed the maximum allowable RFA/RFD
340 	 * values.
341 	 */
342 	if (q_fifo_size <= 2048) {
343 		/* rx_rfd to zero to signal no flow control */
344 		pdata->rx_rfa[queue] = 0;
345 		pdata->rx_rfd[queue] = 0;
346 		return;
347 	}
348 
349 	if (q_fifo_size <= 4096) {
350 		/* Between 2048 and 4096 */
351 		pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
352 		pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
353 		return;
354 	}
355 
356 	if (q_fifo_size <= frame_fifo_size) {
357 		/* Between 4096 and max-frame */
358 		pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
359 		pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
360 		return;
361 	}
362 
363 	if (q_fifo_size <= (frame_fifo_size * 3)) {
364 		/* Between max-frame and 3 max-frames,
365 		 * trigger if we get just over a frame of data and
366 		 * resume when we have just under half a frame left.
367 		 */
368 		rfa = q_fifo_size - frame_fifo_size;
369 		rfd = rfa + (frame_fifo_size / 2);
370 	} else {
371 		/* Above 3 max-frames - trigger when just over
372 		 * 2 frames of space available
373 		 */
374 		rfa = frame_fifo_size * 2;
375 		rfa += AXGMAC_FLOW_CONTROL_UNIT;
376 		rfd = rfa + frame_fifo_size;
377 	}
378 
379 	pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
380 	pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
381 }
382 
383 static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
384 {
385 	unsigned int q_fifo_size;
386 	unsigned int i;
387 
388 	for (i = 0; i < pdata->rx_q_count; i++) {
389 		q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
390 
391 		axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
392 	}
393 }
394 
395 static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
396 {
397 	unsigned int i;
398 
399 	for (i = 0; i < pdata->rx_q_count; i++) {
400 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
401 					pdata->rx_rfa[i]);
402 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
403 					pdata->rx_rfd[i]);
404 	}
405 }
406 
407 static int __axgbe_exit(struct axgbe_port *pdata)
408 {
409 	unsigned int count = 2000;
410 
411 	/* Issue a software reset */
412 	AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
413 	rte_delay_us(10);
414 
415 	/* Poll Until Poll Condition */
416 	while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
417 		rte_delay_us(500);
418 
419 	if (!count)
420 		return -EBUSY;
421 
422 	return 0;
423 }
424 
425 static int axgbe_exit(struct axgbe_port *pdata)
426 {
427 	int ret;
428 
429 	/* To guard against possible incorrectly generated interrupts,
430 	 * issue the software reset twice.
431 	 */
432 	ret = __axgbe_exit(pdata);
433 	if (ret)
434 		return ret;
435 
436 	return __axgbe_exit(pdata);
437 }
438 
439 static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
440 {
441 	unsigned int i, count;
442 
443 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
444 		return 0;
445 
446 	for (i = 0; i < pdata->tx_q_count; i++)
447 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
448 
449 	/* Poll Until Poll Condition */
450 	for (i = 0; i < pdata->tx_q_count; i++) {
451 		count = 2000;
452 		while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
453 							 MTL_Q_TQOMR, FTQ))
454 			rte_delay_us(500);
455 
456 		if (!count)
457 			return -EBUSY;
458 	}
459 
460 	return 0;
461 }
462 
463 static void axgbe_config_dma_bus(struct axgbe_port *pdata)
464 {
465 	/* Set enhanced addressing mode */
466 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
467 
468 	/* Out standing read/write requests*/
469 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
470 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
471 
472 	/* Set the System Bus mode */
473 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
474 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
475 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
476 }
477 
478 static void axgbe_config_dma_cache(struct axgbe_port *pdata)
479 {
480 	unsigned int arcache, awcache, arwcache;
481 
482 	arcache = 0;
483 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
484 	AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
485 
486 	awcache = 0;
487 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
488 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
489 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
490 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
491 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
492 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
493 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
494 	AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
495 
496 	arwcache = 0;
497 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
498 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
499 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
500 	AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
501 }
502 
503 static void axgbe_config_edma_control(struct axgbe_port *pdata)
504 {
505 	AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
506 	AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
507 }
508 
509 static int axgbe_config_osp_mode(struct axgbe_port *pdata)
510 {
511 	/* Force DMA to operate on second packet before closing descriptors
512 	 *  of first packet
513 	 */
514 	struct axgbe_tx_queue *txq;
515 	unsigned int i;
516 
517 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
518 		txq = pdata->eth_dev->data->tx_queues[i];
519 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
520 					pdata->tx_osp_mode);
521 	}
522 
523 	return 0;
524 }
525 
526 static int axgbe_config_pblx8(struct axgbe_port *pdata)
527 {
528 	struct axgbe_tx_queue *txq;
529 	unsigned int i;
530 
531 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
532 		txq = pdata->eth_dev->data->tx_queues[i];
533 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
534 					pdata->pblx8);
535 	}
536 	return 0;
537 }
538 
539 static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
540 {
541 	struct axgbe_tx_queue *txq;
542 	unsigned int i;
543 
544 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
545 		txq = pdata->eth_dev->data->tx_queues[i];
546 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
547 				pdata->tx_pbl);
548 	}
549 
550 	return 0;
551 }
552 
553 static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
554 {
555 	struct axgbe_rx_queue *rxq;
556 	unsigned int i;
557 
558 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
559 		rxq = pdata->eth_dev->data->rx_queues[i];
560 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
561 				pdata->rx_pbl);
562 	}
563 
564 	return 0;
565 }
566 
567 static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
568 {
569 	struct axgbe_rx_queue *rxq;
570 	unsigned int i;
571 
572 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
573 		rxq = pdata->eth_dev->data->rx_queues[i];
574 
575 		rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
576 			RTE_PKTMBUF_HEADROOM;
577 		rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
578 			~(AXGBE_RX_BUF_ALIGN - 1);
579 
580 		if (rxq->buf_size > pdata->rx_buf_size)
581 			pdata->rx_buf_size = rxq->buf_size;
582 
583 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
584 					rxq->buf_size);
585 	}
586 }
587 
588 static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
589 			       unsigned int index, unsigned int val)
590 {
591 	unsigned int wait;
592 
593 	if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
594 		return -EBUSY;
595 
596 	AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
597 
598 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
599 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
600 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
601 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
602 
603 	wait = 1000;
604 	while (wait--) {
605 		if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
606 			return 0;
607 
608 		rte_delay_us(1500);
609 	}
610 
611 	return -EBUSY;
612 }
613 
614 static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
615 {
616 	struct rte_eth_rss_conf *rss_conf;
617 	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
618 	unsigned int *key;
619 	int ret;
620 
621 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
622 
623 	if (!rss_conf->rss_key)
624 		key = (unsigned int *)&pdata->rss_key;
625 	else
626 		key = (unsigned int *)&rss_conf->rss_key;
627 
628 	while (key_regs--) {
629 		ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
630 					  key_regs, *key++);
631 		if (ret)
632 			return ret;
633 	}
634 
635 	return 0;
636 }
637 
638 static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
639 {
640 	unsigned int i;
641 	int ret;
642 
643 	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
644 		ret = axgbe_write_rss_reg(pdata,
645 					  AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
646 					  pdata->rss_table[i]);
647 		if (ret)
648 			return ret;
649 	}
650 
651 	return 0;
652 }
653 
654 static int axgbe_enable_rss(struct axgbe_port *pdata)
655 {
656 	int ret;
657 
658 	/* Program the hash key */
659 	ret = axgbe_write_rss_hash_key(pdata);
660 	if (ret)
661 		return ret;
662 
663 	/* Program the lookup table */
664 	ret = axgbe_write_rss_lookup_table(pdata);
665 	if (ret)
666 		return ret;
667 
668 	/* Set the RSS options */
669 	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
670 
671 	/* Enable RSS */
672 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
673 
674 	return 0;
675 }
676 
677 static void axgbe_rss_options(struct axgbe_port *pdata)
678 {
679 	struct rte_eth_rss_conf *rss_conf;
680 	uint64_t rss_hf;
681 
682 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
683 	rss_hf = rss_conf->rss_hf;
684 
685 	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
686 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
687 	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
688 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
689 	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
690 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
691 }
692 
693 static int axgbe_config_rss(struct axgbe_port *pdata)
694 {
695 	uint32_t i;
696 
697 	if (pdata->rss_enable) {
698 		/* Initialize RSS hash key and lookup table */
699 		uint32_t *key = (uint32_t *)pdata->rss_key;
700 
701 		for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
702 			*key++ = (uint32_t)rte_rand();
703 		for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
704 			AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
705 					i % pdata->eth_dev->data->nb_rx_queues);
706 		axgbe_rss_options(pdata);
707 		if (axgbe_enable_rss(pdata)) {
708 			PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
709 			return -1;
710 		}
711 	} else {
712 		AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
713 	}
714 
715 	return 0;
716 }
717 
718 static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
719 {
720 	struct axgbe_tx_queue *txq;
721 	unsigned int dma_ch_isr, dma_ch_ier;
722 	unsigned int i;
723 
724 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
725 		txq = pdata->eth_dev->data->tx_queues[i];
726 
727 		/* Clear all the interrupts which are set */
728 		dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
729 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
730 
731 		/* Clear all interrupt enable bits */
732 		dma_ch_ier = 0;
733 
734 		/* Enable following interrupts
735 		 *   NIE  - Normal Interrupt Summary Enable
736 		 *   AIE  - Abnormal Interrupt Summary Enable
737 		 *   FBEE - Fatal Bus Error Enable
738 		 */
739 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
740 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
741 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
742 
743 		/* Enable following Rx interrupts
744 		 *   RBUE - Receive Buffer Unavailable Enable
745 		 *   RIE  - Receive Interrupt Enable (unless using
746 		 *          per channel interrupts in edge triggered
747 		 *          mode)
748 		 */
749 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
750 
751 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
752 	}
753 }
754 
755 static void wrapper_tx_desc_init(struct axgbe_port *pdata)
756 {
757 	struct axgbe_tx_queue *txq;
758 	unsigned int i;
759 
760 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
761 		txq = pdata->eth_dev->data->tx_queues[i];
762 		txq->cur = 0;
763 		txq->dirty = 0;
764 		/* Update the total number of Tx descriptors */
765 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
766 		/* Update the starting address of descriptor ring */
767 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
768 					high32_value(txq->ring_phys_addr));
769 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
770 					low32_value(txq->ring_phys_addr));
771 	}
772 }
773 
774 static int wrapper_rx_desc_init(struct axgbe_port *pdata)
775 {
776 	struct axgbe_rx_queue *rxq;
777 	struct rte_mbuf *mbuf;
778 	volatile union axgbe_rx_desc *desc;
779 	unsigned int i, j;
780 
781 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
782 		rxq = pdata->eth_dev->data->rx_queues[i];
783 
784 		/* Initialize software ring entries */
785 		rxq->mbuf_alloc = 0;
786 		rxq->cur = 0;
787 		rxq->dirty = 0;
788 		desc = AXGBE_GET_DESC_PT(rxq, 0);
789 
790 		for (j = 0; j < rxq->nb_desc; j++) {
791 			mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
792 			if (mbuf == NULL) {
793 				PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
794 					    (unsigned int)rxq->queue_id, j);
795 				axgbe_dev_rx_queue_release(rxq);
796 				return -ENOMEM;
797 			}
798 			rxq->sw_ring[j] = mbuf;
799 			/* Mbuf populate */
800 			mbuf->next = NULL;
801 			mbuf->data_off = RTE_PKTMBUF_HEADROOM;
802 			mbuf->nb_segs = 1;
803 			mbuf->port = rxq->port_id;
804 			desc->read.baddr =
805 				rte_cpu_to_le_64(
806 					rte_mbuf_data_iova_default(mbuf));
807 			rte_wmb();
808 			AXGMAC_SET_BITS_LE(desc->read.desc3,
809 						RX_NORMAL_DESC3, OWN, 1);
810 			rte_wmb();
811 			rxq->mbuf_alloc++;
812 			desc++;
813 		}
814 		/* Update the total number of Rx descriptors */
815 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
816 					rxq->nb_desc - 1);
817 		/* Update the starting address of descriptor ring */
818 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
819 					high32_value(rxq->ring_phys_addr));
820 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
821 					low32_value(rxq->ring_phys_addr));
822 		/* Update the Rx Descriptor Tail Pointer */
823 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
824 				   low32_value(rxq->ring_phys_addr +
825 				   (rxq->nb_desc - 1) *
826 				   sizeof(union axgbe_rx_desc)));
827 	}
828 	return 0;
829 }
830 
831 static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
832 {
833 	unsigned int i;
834 
835 	/* Set Tx to weighted round robin scheduling algorithm */
836 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
837 
838 	/* Set Tx traffic classes to use WRR algorithm with equal weights */
839 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
840 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
841 				MTL_TSA_ETS);
842 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
843 	}
844 
845 	/* Set Rx to strict priority algorithm */
846 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
847 }
848 
849 static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
850 {
851 	unsigned int i;
852 
853 	for (i = 0; i < pdata->tx_q_count; i++)
854 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
855 
856 	return 0;
857 }
858 
859 static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
860 {
861 	unsigned int i;
862 
863 	for (i = 0; i < pdata->rx_q_count; i++)
864 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
865 
866 	return 0;
867 }
868 
869 static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
870 				     unsigned int val)
871 {
872 	unsigned int i;
873 
874 	for (i = 0; i < pdata->tx_q_count; i++)
875 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
876 
877 	return 0;
878 }
879 
880 static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
881 				     unsigned int val)
882 {
883 	unsigned int i;
884 
885 	for (i = 0; i < pdata->rx_q_count; i++)
886 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
887 
888 	return 0;
889 }
890 
891 /*Distrubting fifo size  */
892 static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
893 {
894 	unsigned int fifo_size;
895 	unsigned int q_fifo_size;
896 	unsigned int p_fifo, i;
897 
898 	fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
899 			  pdata->hw_feat.rx_fifo_size);
900 	q_fifo_size = fifo_size / pdata->rx_q_count;
901 
902 	/* Calculate the fifo setting by dividing the queue's fifo size
903 	 * by the fifo allocation increment (with 0 representing the
904 	 * base allocation increment so decrement the result
905 	 * by 1).
906 	 */
907 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
908 	if (p_fifo)
909 		p_fifo--;
910 
911 	for (i = 0; i < pdata->rx_q_count; i++)
912 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
913 	pdata->fifo = p_fifo;
914 
915 	/*Calculate and config Flow control threshold*/
916 	axgbe_calculate_flow_control_threshold(pdata);
917 	axgbe_config_flow_control_threshold(pdata);
918 }
919 
920 static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
921 {
922 	unsigned int fifo_size;
923 	unsigned int q_fifo_size;
924 	unsigned int p_fifo, i;
925 
926 	fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
927 				pdata->hw_feat.tx_fifo_size);
928 	q_fifo_size = fifo_size / pdata->tx_q_count;
929 
930 	/* Calculate the fifo setting by dividing the queue's fifo size
931 	 * by the fifo allocation increment (with 0 representing the
932 	 * base allocation increment so decrement the result
933 	 * by 1).
934 	 */
935 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
936 	if (p_fifo)
937 		p_fifo--;
938 
939 	for (i = 0; i < pdata->tx_q_count; i++)
940 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
941 }
942 
943 static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
944 {
945 	unsigned int qptc, qptc_extra, queue;
946 	unsigned int i, j, reg, reg_val;
947 
948 	/* Map the MTL Tx Queues to Traffic Classes
949 	 *   Note: Tx Queues >= Traffic Classes
950 	 */
951 	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
952 	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
953 
954 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
955 		for (j = 0; j < qptc; j++)
956 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
957 						Q2TCMAP, i);
958 		if (i < qptc_extra)
959 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
960 						Q2TCMAP, i);
961 	}
962 
963 	if (pdata->rss_enable) {
964 		/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
965 		reg = MTL_RQDCM0R;
966 		reg_val = 0;
967 		for (i = 0; i < pdata->rx_q_count;) {
968 			reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
969 
970 			if ((i % MTL_RQDCM_Q_PER_REG) &&
971 			    (i != pdata->rx_q_count))
972 				continue;
973 
974 			AXGMAC_IOWRITE(pdata, reg, reg_val);
975 
976 			reg += MTL_RQDCM_INC;
977 			reg_val = 0;
978 		}
979 	}
980 }
981 
982 static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
983 {
984 	unsigned int mtl_q_isr;
985 	unsigned int q_count, i;
986 
987 	q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
988 	for (i = 0; i < q_count; i++) {
989 		/* Clear all the interrupts which are set */
990 		mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
991 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
992 
993 		/* No MTL interrupts to be enabled */
994 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
995 	}
996 }
997 
998 static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
999 {
1000 	unsigned int mac_addr_hi, mac_addr_lo;
1001 
1002 	mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
1003 	mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1004 		(addr[1] <<  8) | (addr[0] <<  0);
1005 
1006 	AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1007 	AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1008 
1009 	return 0;
1010 }
1011 
1012 static void axgbe_config_mac_address(struct axgbe_port *pdata)
1013 {
1014 	axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
1015 }
1016 
1017 static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
1018 {
1019 	unsigned int val;
1020 
1021 	val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
1022 
1023 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1024 }
1025 
1026 static void axgbe_config_mac_speed(struct axgbe_port *pdata)
1027 {
1028 	axgbe_set_speed(pdata, pdata->phy_speed);
1029 }
1030 
1031 static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
1032 {
1033 	if (pdata->rx_csum_enable)
1034 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1035 	else
1036 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1037 }
1038 
1039 static int axgbe_init(struct axgbe_port *pdata)
1040 {
1041 	int ret;
1042 
1043 	/* Flush Tx queues */
1044 	ret = axgbe_flush_tx_queues(pdata);
1045 	if (ret)
1046 		return ret;
1047 	/* Initialize DMA related features */
1048 	axgbe_config_dma_bus(pdata);
1049 	axgbe_config_dma_cache(pdata);
1050 	axgbe_config_edma_control(pdata);
1051 	axgbe_config_osp_mode(pdata);
1052 	axgbe_config_pblx8(pdata);
1053 	axgbe_config_tx_pbl_val(pdata);
1054 	axgbe_config_rx_pbl_val(pdata);
1055 	axgbe_config_rx_buffer_size(pdata);
1056 	axgbe_config_rss(pdata);
1057 	wrapper_tx_desc_init(pdata);
1058 	ret = wrapper_rx_desc_init(pdata);
1059 	if (ret)
1060 		return ret;
1061 	axgbe_enable_dma_interrupts(pdata);
1062 
1063 	/* Initialize MTL related features */
1064 	axgbe_config_mtl_mode(pdata);
1065 	axgbe_config_queue_mapping(pdata);
1066 	axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
1067 	axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
1068 	axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
1069 	axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
1070 	axgbe_config_tx_fifo_size(pdata);
1071 	axgbe_config_rx_fifo_size(pdata);
1072 
1073 	axgbe_enable_mtl_interrupts(pdata);
1074 
1075 	/* Initialize MAC related features */
1076 	axgbe_config_mac_address(pdata);
1077 	axgbe_config_jumbo_enable(pdata);
1078 	axgbe_config_flow_control(pdata);
1079 	axgbe_config_mac_speed(pdata);
1080 	axgbe_config_checksum_offload(pdata);
1081 
1082 	return 0;
1083 }
1084 
1085 void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1086 {
1087 	hw_if->exit = axgbe_exit;
1088 	hw_if->config_flow_control = axgbe_config_flow_control;
1089 
1090 	hw_if->init = axgbe_init;
1091 
1092 	hw_if->read_mmd_regs = axgbe_read_mmd_regs;
1093 	hw_if->write_mmd_regs = axgbe_write_mmd_regs;
1094 
1095 	hw_if->set_speed = axgbe_set_speed;
1096 
1097 	hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1098 	hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
1099 	hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
1100 	/* For FLOW ctrl */
1101 	hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
1102 	hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
1103 }
1104