xref: /dpdk/drivers/bus/dpaa/base/fman/fman_hw.c (revision 7e5f49ae767da93486d28142ef53a8fd745f240b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright 2017,2020,2022-2023 NXP
4  *
5  */
6 
7 #include <sys/types.h>
8 #include <sys/ioctl.h>
9 #include <ifaddrs.h>
10 #include <fman.h>
11 /* This header declares things about Fman hardware itself (the format of status
12  * words and an inline implementation of CRC64). We include it only in order to
13  * instantiate the one global variable it depends on.
14  */
15 #include <fsl_fman.h>
16 #include <fsl_fman_crc64.h>
17 #include <fsl_bman.h>
18 
19 #define FMAN_SP_SG_DISABLE                          0x80000000
20 #define FMAN_SP_EXT_BUF_MARG_START_SHIFT            16
21 
22 /* Instantiate the global variable that the inline CRC64 implementation (in
23  * <fsl_fman.h>) depends on.
24  */
25 DECLARE_FMAN_CRC64_TABLE();
26 
27 #define ETH_ADDR_TO_UINT64(eth_addr)                  \
28 	(uint64_t)(((uint64_t)(eth_addr)[0] << 40) |   \
29 	((uint64_t)(eth_addr)[1] << 32) |   \
30 	((uint64_t)(eth_addr)[2] << 24) |   \
31 	((uint64_t)(eth_addr)[3] << 16) |   \
32 	((uint64_t)(eth_addr)[4] << 8) |    \
33 	((uint64_t)(eth_addr)[5]))
34 
35 void
36 fman_if_set_mcast_filter_table(struct fman_if *p)
37 {
38 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
39 	void *hashtable_ctrl;
40 	uint32_t i;
41 
42 	hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
43 	for (i = 0; i < 64; i++)
44 		out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
45 }
46 
47 void
48 fman_if_reset_mcast_filter_table(struct fman_if *p)
49 {
50 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
51 	void *hashtable_ctrl;
52 	uint32_t i;
53 
54 	hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
55 	for (i = 0; i < 64; i++)
56 		out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
57 }
58 
59 static
60 uint32_t get_mac_hash_code(uint64_t eth_addr)
61 {
62 	uint64_t	mask1, mask2;
63 	uint32_t	xorVal = 0;
64 	uint8_t		i, j;
65 
66 	for (i = 0; i < 6; i++) {
67 		mask1 = eth_addr & (uint64_t)0x01;
68 		eth_addr >>= 1;
69 
70 		for (j = 0; j < 7; j++) {
71 			mask2 = eth_addr & (uint64_t)0x01;
72 			mask1 ^= mask2;
73 			eth_addr >>= 1;
74 		}
75 
76 		xorVal |= (mask1 << (5 - i));
77 	}
78 
79 	return xorVal;
80 }
81 
82 int
83 fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
84 {
85 	uint64_t eth_addr;
86 	void *hashtable_ctrl;
87 	uint32_t hash;
88 
89 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
90 
91 	/* Add hash mac addr not supported on Offline port and onic port */
92 	if (__if->__if.mac_type == fman_offline_internal ||
93 	    __if->__if.mac_type == fman_onic)
94 		return 0;
95 
96 	eth_addr = ETH_ADDR_TO_UINT64(eth);
97 
98 	if (!(eth_addr & GROUP_ADDRESS))
99 		return -1;
100 
101 	hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
102 	hash = hash | HASH_CTRL_MCAST_EN;
103 
104 	hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
105 	out_be32(hashtable_ctrl, hash);
106 
107 	return 0;
108 }
109 
110 int
111 fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
112 {
113 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
114 	void *mac_reg =
115 		&((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
116 	u32 val = in_be32(mac_reg);
117 	int i;
118 
119 	/* Get mac addr not supported on Offline port and onic port */
120 	/* Return NULL mac address */
121 	if (__if->__if.mac_type == fman_offline_internal ||
122 	    __if->__if.mac_type == fman_onic) {
123 		for (i = 0; i < 6; i++)
124 			eth[i] = 0x0;
125 		return 0;
126 	}
127 
128 	eth[0] = (val & 0x000000ff) >> 0;
129 	eth[1] = (val & 0x0000ff00) >> 8;
130 	eth[2] = (val & 0x00ff0000) >> 16;
131 	eth[3] = (val & 0xff000000) >> 24;
132 
133 	mac_reg =  &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
134 	val = in_be32(mac_reg);
135 
136 	eth[4] = (val & 0x000000ff) >> 0;
137 	eth[5] = (val & 0x0000ff00) >> 8;
138 
139 	return 0;
140 }
141 
142 void
143 fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
144 {
145 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
146 	void *reg;
147 
148 	/* Clear mac addr not supported on Offline port and onic port */
149 	if (m->__if.mac_type == fman_offline_internal ||
150 	    m->__if.mac_type == fman_onic)
151 		return;
152 
153 	if (addr_num) {
154 		reg = &((struct memac_regs *)m->ccsr_map)->
155 				mac_addr[addr_num-1].mac_addr_l;
156 		out_be32(reg, 0x0);
157 		reg = &((struct memac_regs *)m->ccsr_map)->
158 					mac_addr[addr_num-1].mac_addr_u;
159 		out_be32(reg, 0x0);
160 	} else {
161 		reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
162 		out_be32(reg, 0x0);
163 		reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
164 		out_be32(reg, 0x0);
165 	}
166 }
167 
168 int
169 fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
170 {
171 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
172 	void *reg;
173 	u32 val;
174 
175 	/* Set mac addr not supported on Offline port and onic port */
176 	if (m->__if.mac_type == fman_offline_internal ||
177 	    m->__if.mac_type == fman_onic)
178 		return 0;
179 
180 	memcpy(&m->__if.mac_addr, eth, ETHER_ADDR_LEN);
181 
182 	if (addr_num)
183 		reg = &((struct memac_regs *)m->ccsr_map)->
184 					mac_addr[addr_num-1].mac_addr_l;
185 	else
186 		reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
187 
188 	val = (m->__if.mac_addr.addr_bytes[0] |
189 	       (m->__if.mac_addr.addr_bytes[1] << 8) |
190 	       (m->__if.mac_addr.addr_bytes[2] << 16) |
191 	       (m->__if.mac_addr.addr_bytes[3] << 24));
192 	out_be32(reg, val);
193 
194 	if (addr_num)
195 		reg = &((struct memac_regs *)m->ccsr_map)->
196 					mac_addr[addr_num-1].mac_addr_u;
197 	else
198 		reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
199 
200 	val = ((m->__if.mac_addr.addr_bytes[4] << 0) |
201 	       (m->__if.mac_addr.addr_bytes[5] << 8));
202 	out_be32(reg, val);
203 
204 	return 0;
205 }
206 
207 void
208 fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
209 {
210 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
211 	u32 value = 0;
212 	void *cmdcfg;
213 
214 	assert(fman_ccsr_map_fd != -1);
215 
216 	/* Set Rx Ignore Pause Frames */
217 	cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
218 	if (enable)
219 		value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
220 	else
221 		value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
222 
223 	out_be32(cmdcfg, value);
224 }
225 
226 void
227 fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
228 {
229 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
230 	unsigned int *maxfrm;
231 
232 	assert(fman_ccsr_map_fd != -1);
233 
234 	/* Set Max frame length */
235 	maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
236 	out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
237 }
238 
239 void
240 fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
241 {
242 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
243 	struct memac_regs *regs = m->ccsr_map;
244 
245 	/* read recved packet count */
246 	stats->ipackets = (u64)in_be32(&regs->rfrm_l) |
247 			((u64)in_be32(&regs->rfrm_u)) << 32;
248 	stats->ibytes = (u64)in_be32(&regs->roct_l) |
249 			((u64)in_be32(&regs->roct_u)) << 32;
250 	stats->ierrors = (u64)in_be32(&regs->rerr_l) |
251 			((u64)in_be32(&regs->rerr_u)) << 32;
252 
253 	/* read xmited packet count */
254 	stats->opackets = (u64)in_be32(&regs->tfrm_l) |
255 			((u64)in_be32(&regs->tfrm_u)) << 32;
256 	stats->obytes = (u64)in_be32(&regs->toct_l) |
257 			((u64)in_be32(&regs->toct_u)) << 32;
258 	stats->oerrors = (u64)in_be32(&regs->terr_l) |
259 			((u64)in_be32(&regs->terr_u)) << 32;
260 }
261 
262 void
263 fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
264 {
265 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
266 	struct memac_regs *regs = m->ccsr_map;
267 	int i;
268 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
269 
270 	for (i = 0; i < n; i++) {
271 		uint64_t a = in_be32((char *)regs + base_offset + 8 * i);
272 		uint64_t b = in_be32((char *)regs + base_offset + 8 * i + 4);
273 		value[i] = a | b << 32;
274 	}
275 }
276 
277 void
278 fman_if_stats_reset(struct fman_if *p)
279 {
280 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
281 	struct memac_regs *regs = m->ccsr_map;
282 	uint32_t tmp;
283 
284 	tmp = in_be32(&regs->statn_config);
285 
286 	tmp |= STATS_CFG_CLR;
287 
288 	out_be32(&regs->statn_config, tmp);
289 
290 	while (in_be32(&regs->statn_config) & STATS_CFG_CLR)
291 		;
292 }
293 
294 void
295 fman_if_bmi_stats_enable(struct fman_if *p)
296 {
297 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
298 	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
299 	uint32_t tmp;
300 
301 	tmp = in_be32(&regs->fmbm_rstc);
302 
303 	tmp |= FMAN_BMI_COUNTERS_EN;
304 
305 	out_be32(&regs->fmbm_rstc, tmp);
306 }
307 
308 void
309 fman_if_bmi_stats_disable(struct fman_if *p)
310 {
311 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
312 	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
313 	uint32_t tmp;
314 
315 	tmp = in_be32(&regs->fmbm_rstc);
316 
317 	tmp &= ~FMAN_BMI_COUNTERS_EN;
318 
319 	out_be32(&regs->fmbm_rstc, tmp);
320 }
321 
322 void
323 fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
324 {
325 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
326 	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
327 	int i = 0;
328 
329 	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
330 	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
331 	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
332 	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
333 	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
334 	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
335 	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
336 	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
337 }
338 
339 void
340 fman_if_bmi_stats_reset(struct fman_if *p)
341 {
342 	struct __fman_if *m = container_of(p, struct __fman_if, __if);
343 	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
344 
345 	out_be32(&regs->fmbm_rfrc, 0);
346 	out_be32(&regs->fmbm_rfbc, 0);
347 	out_be32(&regs->fmbm_rlfc, 0);
348 	out_be32(&regs->fmbm_rffc, 0);
349 	out_be32(&regs->fmbm_rfdc, 0);
350 	out_be32(&regs->fmbm_rfldec, 0);
351 	out_be32(&regs->fmbm_rodc, 0);
352 	out_be32(&regs->fmbm_rbdc, 0);
353 }
354 
355 void
356 fman_if_promiscuous_enable(struct fman_if *p)
357 {
358 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
359 	void *cmdcfg;
360 
361 	assert(fman_ccsr_map_fd != -1);
362 
363 	/* Enable Rx promiscuous mode */
364 	cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
365 	out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_PROMIS_EN);
366 }
367 
368 void
369 fman_if_promiscuous_disable(struct fman_if *p)
370 {
371 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
372 	void *cmdcfg;
373 
374 	assert(fman_ccsr_map_fd != -1);
375 
376 	/* Disable Rx promiscuous mode */
377 	cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
378 	out_be32(cmdcfg, in_be32(cmdcfg) & (~CMD_CFG_PROMIS_EN));
379 }
380 
381 void
382 fman_if_enable_rx(struct fman_if *p)
383 {
384 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
385 
386 	assert(fman_ccsr_map_fd != -1);
387 
388 	/* enable Rx and Tx */
389 	out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) | 3);
390 }
391 
392 void
393 fman_if_disable_rx(struct fman_if *p)
394 {
395 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
396 
397 	assert(fman_ccsr_map_fd != -1);
398 
399 	/* only disable Rx, not Tx */
400 	out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
401 }
402 
403 int
404 fman_if_get_rx_status(struct fman_if *p)
405 {
406 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
407 
408 	assert(fman_ccsr_map_fd != -1);
409 
410 	/* return true if RX bit is set */
411 	return !!(in_be32(__if->ccsr_map + 8) & (u32)2);
412 }
413 
414 void
415 fman_if_loopback_enable(struct fman_if *p)
416 {
417 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
418 
419 	assert(fman_ccsr_map_fd != -1);
420 
421 	/* Enable loopback mode */
422 	if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
423 		unsigned int *ifmode =
424 			&((struct memac_regs *)__if->ccsr_map)->if_mode;
425 		out_be32(ifmode, in_be32(ifmode) | IF_MODE_RLP);
426 	} else{
427 		unsigned int *cmdcfg =
428 			&((struct memac_regs *)__if->ccsr_map)->command_config;
429 		out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_LOOPBACK_EN);
430 	}
431 }
432 
433 void
434 fman_if_loopback_disable(struct fman_if *p)
435 {
436 	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
437 
438 	assert(fman_ccsr_map_fd != -1);
439 	/* Disable loopback mode */
440 	if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
441 		unsigned int *ifmode =
442 			&((struct memac_regs *)__if->ccsr_map)->if_mode;
443 		out_be32(ifmode, in_be32(ifmode) & ~IF_MODE_RLP);
444 	} else {
445 		unsigned int *cmdcfg =
446 			&((struct memac_regs *)__if->ccsr_map)->command_config;
447 		out_be32(cmdcfg, in_be32(cmdcfg) & ~CMD_CFG_LOOPBACK_EN);
448 	}
449 }
450 
451 void
452 fman_if_set_bp(struct fman_if *fm_if, unsigned num __always_unused,
453 		    int bpid, size_t bufsize)
454 {
455 	u32 fmbm_ebmpi;
456 	u32 ebmpi_val_ace = 0xc0000000;
457 	u32 ebmpi_mask = 0xffc00000;
458 
459 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
460 
461 	assert(fman_ccsr_map_fd != -1);
462 
463 	fmbm_ebmpi =
464 	       in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0]);
465 	fmbm_ebmpi = ebmpi_val_ace | (fmbm_ebmpi & ebmpi_mask) | (bpid << 16) |
466 		     (bufsize);
467 
468 	out_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0],
469 		 fmbm_ebmpi);
470 }
471 
472 int
473 fman_if_get_fc_threshold(struct fman_if *fm_if)
474 {
475 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
476 	unsigned int *fmbm_mpd;
477 
478 	assert(fman_ccsr_map_fd != -1);
479 
480 	fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
481 	return in_be32(fmbm_mpd);
482 }
483 
484 int
485 fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
486 			 u32 low_water, u32 bpid)
487 {
488 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
489 	unsigned int *fmbm_mpd;
490 
491 	assert(fman_ccsr_map_fd != -1);
492 
493 	fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
494 	out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
495 	return bm_pool_set_hw_threshold(bpid, low_water, high_water);
496 
497 }
498 
499 int
500 fman_if_get_fc_quanta(struct fman_if *fm_if)
501 {
502 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
503 
504 	assert(fman_ccsr_map_fd != -1);
505 
506 	return in_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0]);
507 }
508 
509 int
510 fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
511 {
512 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
513 
514 	assert(fman_ccsr_map_fd != -1);
515 
516 	out_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0],
517 		 pause_quanta);
518 	return 0;
519 }
520 
521 int
522 fman_if_get_fdoff(struct fman_if *fm_if)
523 {
524 	u32 fmbm_rebm;
525 	int fdoff;
526 
527 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
528 
529 	assert(fman_ccsr_map_fd != -1);
530 
531 	fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
532 
533 	fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
534 
535 	return fdoff;
536 }
537 
538 void
539 fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
540 {
541 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
542 
543 	assert(fman_ccsr_map_fd != -1);
544 
545 	unsigned int *fmbm_refqid =
546 			&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_refqid;
547 	out_be32(fmbm_refqid, err_fqid);
548 }
549 
550 int
551 fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
552 {
553 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
554 	int val = 0;
555 	int iceof_mask = 0x001f0000;
556 	int icsz_mask = 0x0000001f;
557 	int iciof_mask = 0x00000f00;
558 
559 	assert(fman_ccsr_map_fd != -1);
560 
561 	unsigned int *fmbm_ricp =
562 		&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
563 	val = in_be32(fmbm_ricp);
564 
565 	icp->iceof = (val & iceof_mask) >> 12;
566 	icp->iciof = (val & iciof_mask) >> 4;
567 	icp->icsz = (val & icsz_mask) << 4;
568 
569 	return 0;
570 }
571 
572 int
573 fman_if_set_ic_params(struct fman_if *fm_if,
574 			  const struct fman_if_ic_params *icp)
575 {
576 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
577 	int val = 0;
578 	int iceof_mask = 0x001f0000;
579 	int icsz_mask = 0x0000001f;
580 	int iciof_mask = 0x00000f00;
581 
582 	assert(fman_ccsr_map_fd != -1);
583 
584 	val |= (icp->iceof << 12) & iceof_mask;
585 	val |= (icp->iciof << 4) & iciof_mask;
586 	val |= (icp->icsz >> 4) & icsz_mask;
587 
588 	unsigned int *fmbm_ricp =
589 		&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
590 	out_be32(fmbm_ricp, val);
591 
592 	unsigned int *fmbm_ticp =
593 		&((struct tx_bmi_regs *)__if->tx_bmi_map)->fmbm_ticp;
594 	out_be32(fmbm_ticp, val);
595 
596 	return 0;
597 }
598 
599 void
600 fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
601 {
602 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
603 	unsigned int *fmbm_rebm;
604 	int val = 0;
605 	int fmbm_mask = 0x01ff0000;
606 
607 	val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
608 
609 	assert(fman_ccsr_map_fd != -1);
610 
611 	fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
612 
613 	out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
614 }
615 
616 void
617 fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
618 {
619 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
620 	unsigned int *reg_maxfrm;
621 
622 	assert(fman_ccsr_map_fd != -1);
623 
624 	reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
625 
626 	out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
627 }
628 
629 uint16_t
630 fman_if_get_maxfrm(struct fman_if *fm_if)
631 {
632 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
633 	unsigned int *reg_maxfrm;
634 
635 	assert(fman_ccsr_map_fd != -1);
636 
637 	reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
638 
639 	return (in_be32(reg_maxfrm) | 0x0000FFFF);
640 }
641 
642 /* MSB in fmbm_rebm register
643  * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
644  *     of smaller size and store the frame in scatter gather (S/G) buffers
645  * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
646  *     store the frame in a single buffer, the frame is discarded.
647  */
648 
649 int
650 fman_if_get_sg_enable(struct fman_if *fm_if)
651 {
652 	u32 fmbm_rebm;
653 
654 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
655 
656 	assert(fman_ccsr_map_fd != -1);
657 
658 	fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
659 
660 	return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
661 }
662 
663 void
664 fman_if_set_sg(struct fman_if *fm_if, int enable)
665 {
666 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
667 	unsigned int *fmbm_rebm;
668 	int val;
669 	int fmbm_mask = FMAN_SP_SG_DISABLE;
670 
671 	if (enable)
672 		val = 0;
673 	else
674 		val = FMAN_SP_SG_DISABLE;
675 
676 	assert(fman_ccsr_map_fd != -1);
677 
678 	fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
679 
680 	out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
681 }
682 
683 void
684 fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
685 {
686 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
687 	unsigned int *fmqm_pndn;
688 
689 	assert(fman_ccsr_map_fd != -1);
690 
691 	fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
692 
693 	out_be32(fmqm_pndn, nia);
694 }
695 
696 void
697 fman_if_discard_rx_errors(struct fman_if *fm_if)
698 {
699 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
700 	unsigned int *fmbm_rfsdm, *fmbm_rfsem;
701 
702 	fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
703 	out_be32(fmbm_rfsem, 0);
704 
705 	/* Configure the discard mask to discard the error packets which have
706 	 * DMA errors, Frame size error, Header error etc. The mask 0x010EE3F0
707 	 * is to configured discard all the errors which come in the FD[STATUS]
708 	 */
709 	fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
710 	out_be32(fmbm_rfsdm, 0x010EE3F0);
711 }
712 
713 void
714 fman_if_receive_rx_errors(struct fman_if *fm_if,
715 	unsigned int err_eq)
716 {
717 	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
718 	unsigned int *fmbm_rcfg, *fmbm_rfsdm, *fmbm_rfsem;
719 	unsigned int val;
720 
721 	fmbm_rcfg = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rcfg;
722 	fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
723 	fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
724 
725 	val = in_be32(fmbm_rcfg);
726 	out_be32(fmbm_rcfg, val | BMI_PORT_CFG_FDOVR);
727 
728 	out_be32(fmbm_rfsdm, 0);
729 	out_be32(fmbm_rfsem, err_eq);
730 }
731