xref: /dpdk/drivers/net/pfe/pfe_hif.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include "pfe_logs.h"
6 #include "pfe_mod.h"
7 #include <sys/ioctl.h>
8 #include <sys/epoll.h>
9 #include <sys/eventfd.h>
10 #include <arpa/inet.h>
11 
12 static int
13 pfe_hif_alloc_descr(struct pfe_hif *hif)
14 {
15 	void *addr;
16 	int err = 0;
17 
18 	PMD_INIT_FUNC_TRACE();
19 
20 	addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
21 		HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
22 	if (!addr) {
23 		PFE_PMD_ERR("Could not allocate buffer descriptors!");
24 		err = -ENOMEM;
25 		goto err0;
26 	}
27 
28 	hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
29 	hif->descr_baseaddr_v = addr;
30 	hif->rx_ring_size = HIF_RX_DESC_NT;
31 	hif->tx_ring_size = HIF_TX_DESC_NT;
32 
33 	return 0;
34 
35 err0:
36 	return err;
37 }
38 
39 static void
40 pfe_hif_free_descr(struct pfe_hif *hif)
41 {
42 	PMD_INIT_FUNC_TRACE();
43 
44 	rte_free(hif->descr_baseaddr_v);
45 }
46 
47 /* pfe_hif_release_buffers */
48 static void
49 pfe_hif_release_buffers(struct pfe_hif *hif)
50 {
51 	struct hif_desc	*desc;
52 	uint32_t i = 0;
53 	struct rte_mbuf *mbuf;
54 	struct rte_pktmbuf_pool_private *mb_priv;
55 
56 	hif->rx_base = hif->descr_baseaddr_v;
57 
58 	/*Free Rx buffers */
59 	desc = hif->rx_base;
60 	mb_priv = rte_mempool_get_priv(hif->shm->pool);
61 	for (i = 0; i < hif->rx_ring_size; i++) {
62 		if (readl(&desc->data)) {
63 			if (i < hif->shm->rx_buf_pool_cnt &&
64 			    !hif->shm->rx_buf_pool[i]) {
65 				mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
66 					- sizeof(struct rte_mbuf)
67 					- RTE_PKTMBUF_HEADROOM
68 					- mb_priv->mbuf_priv_size;
69 				hif->shm->rx_buf_pool[i] = mbuf;
70 			}
71 		}
72 		writel(0, &desc->data);
73 		writel(0, &desc->status);
74 		writel(0, &desc->ctrl);
75 		desc++;
76 	}
77 }
78 
79 /*
80  * pfe_hif_init_buffers
81  * This function initializes the HIF Rx/Tx ring descriptors and
82  * initialize Rx queue with buffers.
83  */
84 int
85 pfe_hif_init_buffers(struct pfe_hif *hif)
86 {
87 	struct hif_desc	*desc, *first_desc_p;
88 	uint32_t i = 0;
89 
90 	PMD_INIT_FUNC_TRACE();
91 
92 	/* Check enough Rx buffers available in the shared memory */
93 	if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
94 		return -ENOMEM;
95 
96 	hif->rx_base = hif->descr_baseaddr_v;
97 	memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
98 
99 	/*Initialize Rx descriptors */
100 	desc = hif->rx_base;
101 	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
102 
103 	for (i = 0; i < hif->rx_ring_size; i++) {
104 		/* Initialize Rx buffers from the shared memory */
105 		struct rte_mbuf *mbuf =
106 			(struct rte_mbuf *)hif->shm->rx_buf_pool[i];
107 
108 		/* PFE mbuf structure is as follow:
109 		 * ----------------------------------------------------------+
110 		 * | mbuf  | priv | headroom (annotation + PFE data) | data  |
111 		 * ----------------------------------------------------------+
112 		 *
113 		 * As we are expecting additional information like parse
114 		 * results, eth id, queue id from PFE block along with data.
115 		 * so we have to provide additional memory for each packet to
116 		 * HIF rx rings so that PFE block can write its headers.
117 		 * so, we are giving the data pointer to HIF rings whose
118 		 * calculation is as below:
119 		 * mbuf->data_pointer - Required_header_size
120 		 *
121 		 * We are utilizing the HEADROOM area to receive the PFE
122 		 * block headers. On packet reception, HIF driver will use
123 		 * PFE headers information based on which it will decide
124 		 * the clients and fill the parse results.
125 		 * after that application can use/overwrite the HEADROOM area.
126 		 */
127 		hif->rx_buf_vaddr[i] =
128 			(void *)((size_t)mbuf->buf_addr + mbuf->data_off -
129 					PFE_PKT_HEADER_SZ);
130 		hif->rx_buf_addr[i] =
131 			(void *)(size_t)(rte_pktmbuf_iova(mbuf) -
132 					PFE_PKT_HEADER_SZ);
133 		hif->rx_buf_len[i] =  mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
134 
135 		hif->shm->rx_buf_pool[i] = NULL;
136 
137 		writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
138 					&desc->data);
139 		writel(0, &desc->status);
140 
141 		/*
142 		 * Ensure everything else is written to DDR before
143 		 * writing bd->ctrl
144 		 */
145 		rte_wmb();
146 
147 		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
148 			| BD_CTRL_DIR | BD_CTRL_DESC_EN
149 			| BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
150 
151 		/* Chain descriptors */
152 		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
153 		desc++;
154 	}
155 
156 	/* Overwrite last descriptor to chain it to first one*/
157 	desc--;
158 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
159 
160 	hif->rxtoclean_index = 0;
161 
162 	/*Initialize Rx buffer descriptor ring base address */
163 	writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
164 
165 	hif->tx_base = hif->rx_base + hif->rx_ring_size;
166 	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
167 				hif->rx_ring_size;
168 	memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
169 
170 	/*Initialize tx descriptors */
171 	desc = hif->tx_base;
172 
173 	for (i = 0; i < hif->tx_ring_size; i++) {
174 		/* Chain descriptors */
175 		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
176 		writel(0, &desc->ctrl);
177 		desc++;
178 	}
179 
180 	/* Overwrite last descriptor to chain it to first one */
181 	desc--;
182 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
183 	hif->txavail = hif->tx_ring_size;
184 	hif->txtosend = 0;
185 	hif->txtoclean = 0;
186 	hif->txtoflush = 0;
187 
188 	/*Initialize Tx buffer descriptor ring base address */
189 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
190 
191 	return 0;
192 }
193 
194 /*
195  * pfe_hif_client_register
196  *
197  * This function used to register a client driver with the HIF driver.
198  *
199  * Return value:
200  * 0 - on Successful registration
201  */
202 static int
203 pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
204 			struct hif_client_shm *client_shm)
205 {
206 	struct hif_client *client = &hif->client[client_id];
207 	u32 i, cnt;
208 	struct rx_queue_desc *rx_qbase;
209 	struct tx_queue_desc *tx_qbase;
210 	struct hif_rx_queue *rx_queue;
211 	struct hif_tx_queue *tx_queue;
212 	int err = 0;
213 
214 	PMD_INIT_FUNC_TRACE();
215 
216 	rte_spinlock_lock(&hif->tx_lock);
217 
218 	if (test_bit(client_id, &hif->shm->g_client_status[0])) {
219 		PFE_PMD_ERR("client %d already registered", client_id);
220 		err = -1;
221 		goto unlock;
222 	}
223 
224 	memset(client, 0, sizeof(struct hif_client));
225 
226 	/* Initialize client Rx queues baseaddr, size */
227 
228 	cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
229 	/* Check if client is requesting for more queues than supported */
230 	if (cnt > HIF_CLIENT_QUEUES_MAX)
231 		cnt = HIF_CLIENT_QUEUES_MAX;
232 
233 	client->rx_qn = cnt;
234 	rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
235 	for (i = 0; i < cnt; i++) {
236 		rx_queue = &client->rx_q[i];
237 		rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
238 		rx_queue->size = client_shm->rx_qsize;
239 		rx_queue->write_idx = 0;
240 	}
241 
242 	/* Initialize client Tx queues baseaddr, size */
243 	cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
244 
245 	/* Check if client is requesting for more queues than supported */
246 	if (cnt > HIF_CLIENT_QUEUES_MAX)
247 		cnt = HIF_CLIENT_QUEUES_MAX;
248 
249 	client->tx_qn = cnt;
250 	tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
251 	for (i = 0; i < cnt; i++) {
252 		tx_queue = &client->tx_q[i];
253 		tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
254 		tx_queue->size = client_shm->tx_qsize;
255 		tx_queue->ack_idx = 0;
256 	}
257 
258 	set_bit(client_id, &hif->shm->g_client_status[0]);
259 
260 unlock:
261 	rte_spinlock_unlock(&hif->tx_lock);
262 
263 	return err;
264 }
265 
266 /*
267  * pfe_hif_client_unregister
268  *
269  * This function used to unregister a client  from the HIF driver.
270  *
271  */
272 static void
273 pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
274 {
275 	PMD_INIT_FUNC_TRACE();
276 
277 	/*
278 	 * Mark client as no longer available (which prevents further packet
279 	 * receive for this client)
280 	 */
281 	rte_spinlock_lock(&hif->tx_lock);
282 
283 	if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
284 		PFE_PMD_ERR("client %d not registered", client_id);
285 
286 		rte_spinlock_unlock(&hif->tx_lock);
287 		return;
288 	}
289 
290 	clear_bit(client_id, &hif->shm->g_client_status[0]);
291 
292 	rte_spinlock_unlock(&hif->tx_lock);
293 }
294 
295 /*
296  * client_put_rxpacket-
297  */
298 static struct rte_mbuf *
299 client_put_rxpacket(struct hif_rx_queue *queue,
300 		void *pkt, u32 len,
301 		u32 flags, u32 client_ctrl,
302 		struct rte_mempool *pool,
303 		u32 *rem_len)
304 {
305 	struct rx_queue_desc *desc = queue->base + queue->write_idx;
306 	struct rte_mbuf *mbuf = NULL;
307 
308 
309 	if (readl(&desc->ctrl) & CL_DESC_OWN) {
310 		mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
311 		if (unlikely(!mbuf)) {
312 			PFE_PMD_WARN("Buffer allocation failure");
313 			return NULL;
314 		}
315 
316 		desc->data = pkt;
317 		desc->client_ctrl = client_ctrl;
318 		/*
319 		 * Ensure everything else is written to DDR before
320 		 * writing bd->ctrl
321 		 */
322 		rte_wmb();
323 		writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
324 		queue->write_idx = (queue->write_idx + 1)
325 				    & (queue->size - 1);
326 
327 		*rem_len = mbuf->buf_len;
328 	}
329 
330 	return mbuf;
331 }
332 
333 /*
334  * pfe_hif_rx_process-
335  * This function does pfe hif rx queue processing.
336  * Dequeue packet from Rx queue and send it to corresponding client queue
337  */
338 int
339 pfe_hif_rx_process(struct pfe *pfe, int budget)
340 {
341 	struct hif_desc	*desc;
342 	struct hif_hdr *pkt_hdr;
343 	struct __hif_hdr hif_hdr;
344 	void *free_buf;
345 	int rtc, len, rx_processed = 0;
346 	struct __hif_desc local_desc;
347 	int flags = 0, wait_for_last = 0, retry = 0;
348 	unsigned int buf_size = 0;
349 	struct rte_mbuf *mbuf = NULL;
350 	struct pfe_hif *hif = &pfe->hif;
351 
352 	rte_spinlock_lock(&hif->lock);
353 
354 	rtc = hif->rxtoclean_index;
355 
356 	while (rx_processed < budget) {
357 		desc = hif->rx_base + rtc;
358 
359 		__memcpy12(&local_desc, desc);
360 
361 		/* ACK pending Rx interrupt */
362 		if (local_desc.ctrl & BD_CTRL_DESC_EN) {
363 			if (unlikely(wait_for_last))
364 				continue;
365 			else
366 				break;
367 		}
368 
369 		len = BD_BUF_LEN(local_desc.ctrl);
370 		pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
371 
372 		/* Track last HIF header received */
373 		if (!hif->started) {
374 			hif->started = 1;
375 
376 			__memcpy8(&hif_hdr, pkt_hdr);
377 
378 			hif->qno = hif_hdr.hdr.q_num;
379 			hif->client_id = hif_hdr.hdr.client_id;
380 			hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
381 						hif_hdr.hdr.client_ctrl;
382 			flags = CL_DESC_FIRST;
383 
384 		} else {
385 			flags = 0;
386 		}
387 
388 		if (local_desc.ctrl & BD_CTRL_LIFM) {
389 			flags |= CL_DESC_LAST;
390 			wait_for_last = 0;
391 		} else {
392 			wait_for_last = 1;
393 		}
394 
395 		/* Check for valid client id and still registered */
396 		if (hif->client_id >= HIF_CLIENTS_MAX ||
397 		    !(test_bit(hif->client_id,
398 			&hif->shm->g_client_status[0]))) {
399 			PFE_PMD_INFO("packet with invalid client id %d qnum %d",
400 				hif->client_id, hif->qno);
401 
402 			free_buf = hif->rx_buf_addr[rtc];
403 
404 			goto pkt_drop;
405 		}
406 
407 		/* Check to valid queue number */
408 		if (hif->client[hif->client_id].rx_qn <= hif->qno) {
409 			PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
410 					hif->qno);
411 			hif->qno = 0;
412 		}
413 
414 retry:
415 		mbuf =
416 		client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
417 				    (void *)pkt_hdr, len, flags,
418 				    hif->client_ctrl, hif->shm->pool,
419 				    &buf_size);
420 
421 		if (unlikely(!mbuf)) {
422 			if (!retry) {
423 				pfe_tx_do_cleanup(pfe);
424 				retry = 1;
425 				goto retry;
426 			}
427 			rx_processed = budget;
428 
429 			if (flags & CL_DESC_FIRST)
430 				hif->started = 0;
431 
432 			PFE_DP_LOG(DEBUG, "No buffers");
433 			break;
434 		}
435 
436 		retry = 0;
437 
438 		free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
439 		free_buf = free_buf - PFE_PKT_HEADER_SZ;
440 
441 		/*Fill free buffer in the descriptor */
442 		hif->rx_buf_addr[rtc] = free_buf;
443 		hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
444 				mbuf->data_off - PFE_PKT_HEADER_SZ);
445 		hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
446 
447 pkt_drop:
448 		writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
449 		/*
450 		 * Ensure everything else is written to DDR before
451 		 * writing bd->ctrl
452 		 */
453 		rte_wmb();
454 		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
455 			BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
456 			&desc->ctrl);
457 
458 		rtc = (rtc + 1) & (hif->rx_ring_size - 1);
459 
460 		if (local_desc.ctrl & BD_CTRL_LIFM) {
461 			if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
462 				rx_processed++;
463 
464 			hif->started = 0;
465 		}
466 	}
467 
468 
469 	hif->rxtoclean_index = rtc;
470 	rte_spinlock_unlock(&hif->lock);
471 
472 	/* we made some progress, re-start rx dma in case it stopped */
473 	hif_rx_dma_start();
474 
475 	return rx_processed;
476 }
477 
478 /*
479  * client_ack_txpacket-
480  * This function ack the Tx packet in the give client Tx queue by resetting
481  * ownership bit in the descriptor.
482  */
483 static int
484 client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
485 		    unsigned int q_no)
486 {
487 	struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
488 	struct tx_queue_desc *desc = queue->base + queue->ack_idx;
489 
490 	if (readl(&desc->ctrl) & CL_DESC_OWN) {
491 		writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
492 		queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
493 
494 		return 0;
495 
496 	} else {
497 		/*This should not happen */
498 		PFE_PMD_ERR("%d %d %d %d %d %p %d",
499 		       hif->txtosend, hif->txtoclean, hif->txavail,
500 			client_id, q_no, queue, queue->ack_idx);
501 		return 1;
502 	}
503 }
504 
505 static void
506 __hif_tx_done_process(struct pfe *pfe, int count)
507 {
508 	struct hif_desc *desc;
509 	struct hif_desc_sw *desc_sw;
510 	unsigned int ttc, tx_avl;
511 	int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
512 	struct pfe_hif *hif = &pfe->hif;
513 
514 	ttc = hif->txtoclean;
515 	tx_avl = hif->txavail;
516 
517 	while ((tx_avl < hif->tx_ring_size) && count--) {
518 		desc = hif->tx_base + ttc;
519 
520 		if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
521 			break;
522 
523 		desc_sw = &hif->tx_sw_queue[ttc];
524 
525 		if (desc_sw->client_id > HIF_CLIENTS_MAX)
526 			PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
527 
528 		pkts_done[desc_sw->client_id]++;
529 
530 		client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
531 
532 		ttc = (ttc + 1) & (hif->tx_ring_size - 1);
533 		tx_avl++;
534 	}
535 
536 	if (pkts_done[0])
537 		hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
538 				0);
539 	if (pkts_done[1])
540 		hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
541 				0);
542 	hif->txtoclean = ttc;
543 	hif->txavail = tx_avl;
544 }
545 
546 static inline void
547 hif_tx_done_process(struct pfe *pfe, int count)
548 {
549 	struct pfe_hif *hif = &pfe->hif;
550 	rte_spinlock_lock(&hif->tx_lock);
551 	__hif_tx_done_process(pfe, count);
552 	rte_spinlock_unlock(&hif->tx_lock);
553 }
554 
555 void
556 pfe_tx_do_cleanup(struct pfe *pfe)
557 {
558 	hif_tx_done_process(pfe, HIF_TX_DESC_NT);
559 }
560 
561 /*
562  * __hif_xmit_pkt -
563  * This function puts one packet in the HIF Tx queue
564  */
565 void
566 hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
567 	     q_no, void *data, u32 len, unsigned int flags)
568 {
569 	struct hif_desc	*desc;
570 	struct hif_desc_sw *desc_sw;
571 
572 	desc = hif->tx_base + hif->txtosend;
573 	desc_sw = &hif->tx_sw_queue[hif->txtosend];
574 
575 	desc_sw->len = len;
576 	desc_sw->client_id = client_id;
577 	desc_sw->q_no = q_no;
578 	desc_sw->flags = flags;
579 
580 	writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
581 
582 	hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
583 	hif->txavail--;
584 
585 	if ((!((flags & HIF_DATA_VALID) && (flags &
586 				HIF_LAST_BUFFER))))
587 		goto skip_tx;
588 
589 	/*
590 	 * Ensure everything else is written to DDR before
591 	 * writing bd->ctrl
592 	 */
593 	rte_wmb();
594 
595 	do {
596 		desc_sw = &hif->tx_sw_queue[hif->txtoflush];
597 		desc = hif->tx_base + hif->txtoflush;
598 
599 		if (desc_sw->flags & HIF_LAST_BUFFER) {
600 			writel((BD_CTRL_LIFM |
601 			       BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
602 			       | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
603 				 BD_BUF_LEN(desc_sw->len)),
604 				&desc->ctrl);
605 		} else {
606 			writel((BD_CTRL_DESC_EN |
607 				BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
608 		}
609 		hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
610 	}
611 	while (hif->txtoflush != hif->txtosend)
612 		;
613 
614 skip_tx:
615 	return;
616 }
617 
618 void
619 hif_process_client_req(struct pfe_hif *hif, int req,
620 			    int data1, __rte_unused int data2)
621 {
622 	unsigned int client_id = data1;
623 
624 	if (client_id >= HIF_CLIENTS_MAX) {
625 		PFE_PMD_ERR("client id %d out of bounds", client_id);
626 		return;
627 	}
628 
629 	switch (req) {
630 	case REQUEST_CL_REGISTER:
631 			/* Request for register a client */
632 			PFE_PMD_INFO("register client_id %d", client_id);
633 			pfe_hif_client_register(hif, client_id, (struct
634 				hif_client_shm *)&hif->shm->client[client_id]);
635 			break;
636 
637 	case REQUEST_CL_UNREGISTER:
638 			PFE_PMD_INFO("unregister client_id %d", client_id);
639 
640 			/* Request for unregister a client */
641 			pfe_hif_client_unregister(hif, client_id);
642 
643 			break;
644 
645 	default:
646 			PFE_PMD_ERR("unsupported request %d", req);
647 			break;
648 	}
649 
650 	/*
651 	 * Process client Tx queues
652 	 * Currently we don't have checking for tx pending
653 	 */
654 }
655 
656 #if defined(LS1012A_PFE_RESET_WA)
657 static void
658 pfe_hif_disable_rx_desc(struct pfe_hif *hif)
659 {
660 	u32 ii;
661 	struct hif_desc	*desc = hif->rx_base;
662 
663 	/*Mark all descriptors as LAST_BD */
664 	for (ii = 0; ii < hif->rx_ring_size; ii++) {
665 		desc->ctrl |= BD_CTRL_LAST_BD;
666 		desc++;
667 	}
668 }
669 
670 struct class_rx_hdr_t {
671 	u32     next_ptr;       /* ptr to the start of the first DDR buffer */
672 	u16     length;         /* total packet length */
673 	u16     phyno;          /* input physical port number */
674 	u32     status;         /* gemac status bits */
675 	u32     status2;            /* reserved for software usage */
676 };
677 
678 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
679  * except overflow
680  */
681 #define STATUS_BAD_FRAME_ERR            BIT(16)
682 #define STATUS_LENGTH_ERR               BIT(17)
683 #define STATUS_CRC_ERR                  BIT(18)
684 #define STATUS_TOO_SHORT_ERR            BIT(19)
685 #define STATUS_TOO_LONG_ERR             BIT(20)
686 #define STATUS_CODE_ERR                 BIT(21)
687 #define STATUS_MC_HASH_MATCH            BIT(22)
688 #define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
689 #define STATUS_UNICAST_HASH_MATCH       BIT(24)
690 #define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
691 #define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
692 #define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
693 #define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
694 #define MIN_PKT_SIZE			64
695 #define DUMMY_PKT_COUNT			128
696 
697 static inline void
698 copy_to_lmem(u32 *dst, u32 *src, int len)
699 {
700 	int i;
701 
702 	for (i = 0; i < len; i += sizeof(u32))	{
703 		*dst = htonl(*src);
704 		dst++; src++;
705 	}
706 }
707 #if defined(RTE_TOOLCHAIN_GCC)
708 __attribute__ ((optimize(1)))
709 #endif
710 static void
711 send_dummy_pkt_to_hif(void)
712 {
713 	void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
714 	u64 physaddr;
715 	struct class_rx_hdr_t local_hdr;
716 	static u32 dummy_pkt[] =  {
717 		0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
718 		0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
719 		0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
720 		0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
721 
722 	ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
723 	if (!ddr_ptr)
724 		return;
725 
726 	lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
727 	if (!lmem_ptr)
728 		return;
729 
730 	PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
731 	physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
732 
733 	lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
734 
735 	local_hdr.phyno = htons(0); /* RX_PHY_0 */
736 	local_hdr.length = htons(MIN_PKT_SIZE);
737 
738 	local_hdr.next_ptr = htonl((u32)physaddr);
739 	/*Mark checksum is correct */
740 	local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
741 				STATUS_UDP_CHECKSUM_CORRECT |
742 				STATUS_TCP_CHECKSUM_CORRECT |
743 				STATUS_UNICAST_HASH_MATCH |
744 				STATUS_CUMULATIVE_ARC_HIT));
745 	copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
746 		     sizeof(local_hdr));
747 
748 	copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
749 		     0x40);
750 
751 	writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
752 }
753 
754 void
755 pfe_hif_rx_idle(struct pfe_hif *hif)
756 {
757 	int hif_stop_loop = DUMMY_PKT_COUNT;
758 	u32 rx_status;
759 
760 	pfe_hif_disable_rx_desc(hif);
761 	PFE_PMD_INFO("Bringing hif to idle state...");
762 	writel(0, HIF_INT_ENABLE);
763 	/*If HIF Rx BDP is busy send a dummy packet */
764 	do {
765 		rx_status = readl(HIF_RX_STATUS);
766 		if (rx_status & BDP_CSR_RX_DMA_ACTV)
767 			send_dummy_pkt_to_hif();
768 
769 		rte_delay_ms(1);
770 	} while (--hif_stop_loop);
771 
772 	if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
773 		PFE_PMD_ERR("Failed");
774 	else
775 		PFE_PMD_INFO("Done");
776 }
777 #endif
778 
779 /*
780  * pfe_hif_init
781  * This function initializes the baseaddresses and irq, etc.
782  */
783 int
784 pfe_hif_init(struct pfe *pfe)
785 {
786 	struct pfe_hif *hif = &pfe->hif;
787 	int err;
788 
789 	PMD_INIT_FUNC_TRACE();
790 
791 #if defined(LS1012A_PFE_RESET_WA)
792 	pfe_hif_rx_idle(hif);
793 #endif
794 
795 	err = pfe_hif_alloc_descr(hif);
796 	if (err)
797 		goto err0;
798 
799 	rte_spinlock_init(&hif->tx_lock);
800 	rte_spinlock_init(&hif->lock);
801 
802 	gpi_enable(HGPI_BASE_ADDR);
803 	if (getenv("PFE_INTR_SUPPORT")) {
804 		struct epoll_event epoll_ev;
805 		int event_fd = -1, epoll_fd, pfe_cdev_fd;
806 
807 		pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
808 		if (pfe_cdev_fd < 0) {
809 			PFE_PMD_WARN("Unable to open PFE device file (%s).",
810 				     PFE_CDEV_PATH);
811 			pfe->cdev_fd = PFE_CDEV_INVALID_FD;
812 			return -1;
813 		}
814 		pfe->cdev_fd = pfe_cdev_fd;
815 
816 		event_fd = eventfd(0, EFD_NONBLOCK);
817 		/* hif interrupt enable */
818 		err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
819 		if (err) {
820 			PFE_PMD_ERR("ioctl failed for intr enable err: %d",
821 					errno);
822 			goto err0;
823 		}
824 		epoll_fd = epoll_create(1);
825 		epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
826 		epoll_ev.data.fd = event_fd;
827 		err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
828 		if (err < 0) {
829 			PFE_PMD_ERR("epoll_ctl failed with err = %d", errno);
830 			goto err0;
831 		}
832 		pfe->hif.epoll_fd = epoll_fd;
833 	}
834 	return 0;
835 err0:
836 	return err;
837 }
838 
839 /* pfe_hif_exit- */
840 void
841 pfe_hif_exit(struct pfe *pfe)
842 {
843 	struct pfe_hif *hif = &pfe->hif;
844 
845 	PMD_INIT_FUNC_TRACE();
846 
847 	rte_spinlock_lock(&hif->lock);
848 	hif->shm->g_client_status[0] = 0;
849 	/* Make sure all clients are disabled*/
850 	hif->shm->g_client_status[1] = 0;
851 
852 	rte_spinlock_unlock(&hif->lock);
853 
854 	if (hif->setuped) {
855 #if defined(LS1012A_PFE_RESET_WA)
856 		pfe_hif_rx_idle(hif);
857 #endif
858 		/*Disable Rx/Tx */
859 		hif_rx_disable();
860 		hif_tx_disable();
861 
862 		pfe_hif_release_buffers(hif);
863 		pfe_hif_shm_clean(hif->shm);
864 
865 		pfe_hif_free_descr(hif);
866 		pfe->hif.setuped = 0;
867 	}
868 	gpi_disable(HGPI_BASE_ADDR);
869 }
870