xref: /dpdk/drivers/net/pfe/pfe_hif.c (revision 27b549c12df2ef2db6b271795b4df7b14a2d9c2c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include "pfe_logs.h"
6 #include "pfe_mod.h"
7 #include <sys/ioctl.h>
8 #include <sys/epoll.h>
9 #include <sys/eventfd.h>
10 
11 static int
12 pfe_hif_alloc_descr(struct pfe_hif *hif)
13 {
14 	void *addr;
15 	int err = 0;
16 
17 	PMD_INIT_FUNC_TRACE();
18 
19 	addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
20 		HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
21 	if (!addr) {
22 		PFE_PMD_ERR("Could not allocate buffer descriptors!");
23 		err = -ENOMEM;
24 		goto err0;
25 	}
26 
27 	hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
28 	hif->descr_baseaddr_v = addr;
29 	hif->rx_ring_size = HIF_RX_DESC_NT;
30 	hif->tx_ring_size = HIF_TX_DESC_NT;
31 
32 	return 0;
33 
34 err0:
35 	return err;
36 }
37 
38 static void
39 pfe_hif_free_descr(struct pfe_hif *hif)
40 {
41 	PMD_INIT_FUNC_TRACE();
42 
43 	rte_free(hif->descr_baseaddr_v);
44 }
45 
46 /* pfe_hif_release_buffers */
47 static void
48 pfe_hif_release_buffers(struct pfe_hif *hif)
49 {
50 	struct hif_desc	*desc;
51 	uint32_t i = 0;
52 	struct rte_mbuf *mbuf;
53 	struct rte_pktmbuf_pool_private *mb_priv;
54 
55 	hif->rx_base = hif->descr_baseaddr_v;
56 
57 	/*Free Rx buffers */
58 	desc = hif->rx_base;
59 	mb_priv = rte_mempool_get_priv(hif->shm->pool);
60 	for (i = 0; i < hif->rx_ring_size; i++) {
61 		if (readl(&desc->data)) {
62 			if (i < hif->shm->rx_buf_pool_cnt &&
63 			    !hif->shm->rx_buf_pool[i]) {
64 				mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
65 					- sizeof(struct rte_mbuf)
66 					- RTE_PKTMBUF_HEADROOM
67 					- mb_priv->mbuf_priv_size;
68 				hif->shm->rx_buf_pool[i] = mbuf;
69 			}
70 		}
71 		writel(0, &desc->data);
72 		writel(0, &desc->status);
73 		writel(0, &desc->ctrl);
74 		desc++;
75 	}
76 }
77 
78 /*
79  * pfe_hif_init_buffers
80  * This function initializes the HIF Rx/Tx ring descriptors and
81  * initialize Rx queue with buffers.
82  */
83 int
84 pfe_hif_init_buffers(struct pfe_hif *hif)
85 {
86 	struct hif_desc	*desc, *first_desc_p;
87 	uint32_t i = 0;
88 
89 	PMD_INIT_FUNC_TRACE();
90 
91 	/* Check enough Rx buffers available in the shared memory */
92 	if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
93 		return -ENOMEM;
94 
95 	hif->rx_base = hif->descr_baseaddr_v;
96 	memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
97 
98 	/*Initialize Rx descriptors */
99 	desc = hif->rx_base;
100 	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
101 
102 	for (i = 0; i < hif->rx_ring_size; i++) {
103 		/* Initialize Rx buffers from the shared memory */
104 		struct rte_mbuf *mbuf =
105 			(struct rte_mbuf *)hif->shm->rx_buf_pool[i];
106 
107 		/* PFE mbuf structure is as follow:
108 		 * ----------------------------------------------------------+
109 		 * | mbuf  | priv | headroom (annotation + PFE data) | data  |
110 		 * ----------------------------------------------------------+
111 		 *
112 		 * As we are expecting additional information like parse
113 		 * results, eth id, queue id from PFE block along with data.
114 		 * so we have to provide additional memory for each packet to
115 		 * HIF rx rings so that PFE block can write its headers.
116 		 * so, we are giving the data pointor to HIF rings whose
117 		 * calculation is as below:
118 		 * mbuf->data_pointor - Required_header_size
119 		 *
120 		 * We are utilizing the HEADROOM area to receive the PFE
121 		 * block headers. On packet reception, HIF driver will use
122 		 * PFE headers information based on which it will decide
123 		 * the clients and fill the parse results.
124 		 * after that application can use/overwrite the HEADROOM area.
125 		 */
126 		hif->rx_buf_vaddr[i] =
127 			(void *)((size_t)mbuf->buf_addr + mbuf->data_off -
128 					PFE_PKT_HEADER_SZ);
129 		hif->rx_buf_addr[i] =
130 			(void *)(size_t)(rte_pktmbuf_iova(mbuf) -
131 					PFE_PKT_HEADER_SZ);
132 		hif->rx_buf_len[i] =  mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
133 
134 		hif->shm->rx_buf_pool[i] = NULL;
135 
136 		writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
137 					&desc->data);
138 		writel(0, &desc->status);
139 
140 		/*
141 		 * Ensure everything else is written to DDR before
142 		 * writing bd->ctrl
143 		 */
144 		rte_wmb();
145 
146 		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
147 			| BD_CTRL_DIR | BD_CTRL_DESC_EN
148 			| BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
149 
150 		/* Chain descriptors */
151 		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
152 		desc++;
153 	}
154 
155 	/* Overwrite last descriptor to chain it to first one*/
156 	desc--;
157 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
158 
159 	hif->rxtoclean_index = 0;
160 
161 	/*Initialize Rx buffer descriptor ring base address */
162 	writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
163 
164 	hif->tx_base = hif->rx_base + hif->rx_ring_size;
165 	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
166 				hif->rx_ring_size;
167 	memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
168 
169 	/*Initialize tx descriptors */
170 	desc = hif->tx_base;
171 
172 	for (i = 0; i < hif->tx_ring_size; i++) {
173 		/* Chain descriptors */
174 		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
175 		writel(0, &desc->ctrl);
176 		desc++;
177 	}
178 
179 	/* Overwrite last descriptor to chain it to first one */
180 	desc--;
181 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
182 	hif->txavail = hif->tx_ring_size;
183 	hif->txtosend = 0;
184 	hif->txtoclean = 0;
185 	hif->txtoflush = 0;
186 
187 	/*Initialize Tx buffer descriptor ring base address */
188 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
189 
190 	return 0;
191 }
192 
193 /*
194  * pfe_hif_client_register
195  *
196  * This function used to register a client driver with the HIF driver.
197  *
198  * Return value:
199  * 0 - on Successful registration
200  */
201 static int
202 pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
203 			struct hif_client_shm *client_shm)
204 {
205 	struct hif_client *client = &hif->client[client_id];
206 	u32 i, cnt;
207 	struct rx_queue_desc *rx_qbase;
208 	struct tx_queue_desc *tx_qbase;
209 	struct hif_rx_queue *rx_queue;
210 	struct hif_tx_queue *tx_queue;
211 	int err = 0;
212 
213 	PMD_INIT_FUNC_TRACE();
214 
215 	rte_spinlock_lock(&hif->tx_lock);
216 
217 	if (test_bit(client_id, &hif->shm->g_client_status[0])) {
218 		PFE_PMD_ERR("client %d already registered", client_id);
219 		err = -1;
220 		goto unlock;
221 	}
222 
223 	memset(client, 0, sizeof(struct hif_client));
224 
225 	/* Initialize client Rx queues baseaddr, size */
226 
227 	cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
228 	/* Check if client is requesting for more queues than supported */
229 	if (cnt > HIF_CLIENT_QUEUES_MAX)
230 		cnt = HIF_CLIENT_QUEUES_MAX;
231 
232 	client->rx_qn = cnt;
233 	rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
234 	for (i = 0; i < cnt; i++) {
235 		rx_queue = &client->rx_q[i];
236 		rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
237 		rx_queue->size = client_shm->rx_qsize;
238 		rx_queue->write_idx = 0;
239 	}
240 
241 	/* Initialize client Tx queues baseaddr, size */
242 	cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
243 
244 	/* Check if client is requesting for more queues than supported */
245 	if (cnt > HIF_CLIENT_QUEUES_MAX)
246 		cnt = HIF_CLIENT_QUEUES_MAX;
247 
248 	client->tx_qn = cnt;
249 	tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
250 	for (i = 0; i < cnt; i++) {
251 		tx_queue = &client->tx_q[i];
252 		tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
253 		tx_queue->size = client_shm->tx_qsize;
254 		tx_queue->ack_idx = 0;
255 	}
256 
257 	set_bit(client_id, &hif->shm->g_client_status[0]);
258 
259 unlock:
260 	rte_spinlock_unlock(&hif->tx_lock);
261 
262 	return err;
263 }
264 
265 /*
266  * pfe_hif_client_unregister
267  *
268  * This function used to unregister a client  from the HIF driver.
269  *
270  */
271 static void
272 pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
273 {
274 	PMD_INIT_FUNC_TRACE();
275 
276 	/*
277 	 * Mark client as no longer available (which prevents further packet
278 	 * receive for this client)
279 	 */
280 	rte_spinlock_lock(&hif->tx_lock);
281 
282 	if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
283 		PFE_PMD_ERR("client %d not registered", client_id);
284 
285 		rte_spinlock_unlock(&hif->tx_lock);
286 		return;
287 	}
288 
289 	clear_bit(client_id, &hif->shm->g_client_status[0]);
290 
291 	rte_spinlock_unlock(&hif->tx_lock);
292 }
293 
294 /*
295  * client_put_rxpacket-
296  */
297 static struct rte_mbuf *
298 client_put_rxpacket(struct hif_rx_queue *queue,
299 		void *pkt, u32 len,
300 		u32 flags, u32 client_ctrl,
301 		struct rte_mempool *pool,
302 		u32 *rem_len)
303 {
304 	struct rx_queue_desc *desc = queue->base + queue->write_idx;
305 	struct rte_mbuf *mbuf = NULL;
306 
307 
308 	if (readl(&desc->ctrl) & CL_DESC_OWN) {
309 		mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
310 		if (unlikely(!mbuf)) {
311 			PFE_PMD_WARN("Buffer allocation failure\n");
312 			return NULL;
313 		}
314 
315 		desc->data = pkt;
316 		desc->client_ctrl = client_ctrl;
317 		/*
318 		 * Ensure everything else is written to DDR before
319 		 * writing bd->ctrl
320 		 */
321 		rte_wmb();
322 		writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
323 		queue->write_idx = (queue->write_idx + 1)
324 				    & (queue->size - 1);
325 
326 		*rem_len = mbuf->buf_len;
327 	}
328 
329 	return mbuf;
330 }
331 
332 /*
333  * pfe_hif_rx_process-
334  * This function does pfe hif rx queue processing.
335  * Dequeue packet from Rx queue and send it to corresponding client queue
336  */
337 int
338 pfe_hif_rx_process(struct pfe *pfe, int budget)
339 {
340 	struct hif_desc	*desc;
341 	struct hif_hdr *pkt_hdr;
342 	struct __hif_hdr hif_hdr;
343 	void *free_buf;
344 	int rtc, len, rx_processed = 0;
345 	struct __hif_desc local_desc;
346 	int flags = 0, wait_for_last = 0, retry = 0;
347 	unsigned int buf_size = 0;
348 	struct rte_mbuf *mbuf = NULL;
349 	struct pfe_hif *hif = &pfe->hif;
350 
351 	rte_spinlock_lock(&hif->lock);
352 
353 	rtc = hif->rxtoclean_index;
354 
355 	while (rx_processed < budget) {
356 		desc = hif->rx_base + rtc;
357 
358 		__memcpy12(&local_desc, desc);
359 
360 		/* ACK pending Rx interrupt */
361 		if (local_desc.ctrl & BD_CTRL_DESC_EN) {
362 			if (unlikely(wait_for_last))
363 				continue;
364 			else
365 				break;
366 		}
367 
368 		len = BD_BUF_LEN(local_desc.ctrl);
369 		pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
370 
371 		/* Track last HIF header received */
372 		if (!hif->started) {
373 			hif->started = 1;
374 
375 			__memcpy8(&hif_hdr, pkt_hdr);
376 
377 			hif->qno = hif_hdr.hdr.q_num;
378 			hif->client_id = hif_hdr.hdr.client_id;
379 			hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
380 						hif_hdr.hdr.client_ctrl;
381 			flags = CL_DESC_FIRST;
382 
383 		} else {
384 			flags = 0;
385 		}
386 
387 		if (local_desc.ctrl & BD_CTRL_LIFM) {
388 			flags |= CL_DESC_LAST;
389 			wait_for_last = 0;
390 		} else {
391 			wait_for_last = 1;
392 		}
393 
394 		/* Check for valid client id and still registered */
395 		if (hif->client_id >= HIF_CLIENTS_MAX ||
396 		    !(test_bit(hif->client_id,
397 			&hif->shm->g_client_status[0]))) {
398 			PFE_PMD_INFO("packet with invalid client id %d qnum %d",
399 				hif->client_id, hif->qno);
400 
401 			free_buf = hif->rx_buf_addr[rtc];
402 
403 			goto pkt_drop;
404 		}
405 
406 		/* Check to valid queue number */
407 		if (hif->client[hif->client_id].rx_qn <= hif->qno) {
408 			PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
409 					hif->qno);
410 			hif->qno = 0;
411 		}
412 
413 retry:
414 		mbuf =
415 		client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
416 				    (void *)pkt_hdr, len, flags,
417 				    hif->client_ctrl, hif->shm->pool,
418 				    &buf_size);
419 
420 		if (unlikely(!mbuf)) {
421 			if (!retry) {
422 				pfe_tx_do_cleanup(pfe);
423 				retry = 1;
424 				goto retry;
425 			}
426 			rx_processed = budget;
427 
428 			if (flags & CL_DESC_FIRST)
429 				hif->started = 0;
430 
431 			PFE_DP_LOG(DEBUG, "No buffers");
432 			break;
433 		}
434 
435 		retry = 0;
436 
437 		free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
438 		free_buf = free_buf - PFE_PKT_HEADER_SZ;
439 
440 		/*Fill free buffer in the descriptor */
441 		hif->rx_buf_addr[rtc] = free_buf;
442 		hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
443 				mbuf->data_off - PFE_PKT_HEADER_SZ);
444 		hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
445 
446 pkt_drop:
447 		writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
448 		/*
449 		 * Ensure everything else is written to DDR before
450 		 * writing bd->ctrl
451 		 */
452 		rte_wmb();
453 		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
454 			BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
455 			&desc->ctrl);
456 
457 		rtc = (rtc + 1) & (hif->rx_ring_size - 1);
458 
459 		if (local_desc.ctrl & BD_CTRL_LIFM) {
460 			if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
461 				rx_processed++;
462 
463 			hif->started = 0;
464 		}
465 	}
466 
467 
468 	hif->rxtoclean_index = rtc;
469 	rte_spinlock_unlock(&hif->lock);
470 
471 	/* we made some progress, re-start rx dma in case it stopped */
472 	hif_rx_dma_start();
473 
474 	return rx_processed;
475 }
476 
477 /*
478  * client_ack_txpacket-
479  * This function ack the Tx packet in the give client Tx queue by resetting
480  * ownership bit in the descriptor.
481  */
482 static int
483 client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
484 		    unsigned int q_no)
485 {
486 	struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
487 	struct tx_queue_desc *desc = queue->base + queue->ack_idx;
488 
489 	if (readl(&desc->ctrl) & CL_DESC_OWN) {
490 		writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
491 		queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
492 
493 		return 0;
494 
495 	} else {
496 		/*This should not happen */
497 		PFE_PMD_ERR("%d %d %d %d %d %p %d",
498 		       hif->txtosend, hif->txtoclean, hif->txavail,
499 			client_id, q_no, queue, queue->ack_idx);
500 		return 1;
501 	}
502 }
503 
504 static void
505 __hif_tx_done_process(struct pfe *pfe, int count)
506 {
507 	struct hif_desc *desc;
508 	struct hif_desc_sw *desc_sw;
509 	unsigned int ttc, tx_avl;
510 	int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
511 	struct pfe_hif *hif = &pfe->hif;
512 
513 	ttc = hif->txtoclean;
514 	tx_avl = hif->txavail;
515 
516 	while ((tx_avl < hif->tx_ring_size) && count--) {
517 		desc = hif->tx_base + ttc;
518 
519 		if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
520 			break;
521 
522 		desc_sw = &hif->tx_sw_queue[ttc];
523 
524 		if (desc_sw->client_id > HIF_CLIENTS_MAX)
525 			PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
526 
527 		pkts_done[desc_sw->client_id]++;
528 
529 		client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
530 
531 		ttc = (ttc + 1) & (hif->tx_ring_size - 1);
532 		tx_avl++;
533 	}
534 
535 	if (pkts_done[0])
536 		hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
537 				0);
538 	if (pkts_done[1])
539 		hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
540 				0);
541 	hif->txtoclean = ttc;
542 	hif->txavail = tx_avl;
543 }
544 
545 static inline void
546 hif_tx_done_process(struct pfe *pfe, int count)
547 {
548 	struct pfe_hif *hif = &pfe->hif;
549 	rte_spinlock_lock(&hif->tx_lock);
550 	__hif_tx_done_process(pfe, count);
551 	rte_spinlock_unlock(&hif->tx_lock);
552 }
553 
554 void
555 pfe_tx_do_cleanup(struct pfe *pfe)
556 {
557 	hif_tx_done_process(pfe, HIF_TX_DESC_NT);
558 }
559 
560 /*
561  * __hif_xmit_pkt -
562  * This function puts one packet in the HIF Tx queue
563  */
564 void
565 hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
566 	     q_no, void *data, u32 len, unsigned int flags)
567 {
568 	struct hif_desc	*desc;
569 	struct hif_desc_sw *desc_sw;
570 
571 	desc = hif->tx_base + hif->txtosend;
572 	desc_sw = &hif->tx_sw_queue[hif->txtosend];
573 
574 	desc_sw->len = len;
575 	desc_sw->client_id = client_id;
576 	desc_sw->q_no = q_no;
577 	desc_sw->flags = flags;
578 
579 	writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
580 
581 	hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
582 	hif->txavail--;
583 
584 	if ((!((flags & HIF_DATA_VALID) && (flags &
585 				HIF_LAST_BUFFER))))
586 		goto skip_tx;
587 
588 	/*
589 	 * Ensure everything else is written to DDR before
590 	 * writing bd->ctrl
591 	 */
592 	rte_wmb();
593 
594 	do {
595 		desc_sw = &hif->tx_sw_queue[hif->txtoflush];
596 		desc = hif->tx_base + hif->txtoflush;
597 
598 		if (desc_sw->flags & HIF_LAST_BUFFER) {
599 			writel((BD_CTRL_LIFM |
600 			       BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
601 			       | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
602 				 BD_BUF_LEN(desc_sw->len)),
603 				&desc->ctrl);
604 		} else {
605 			writel((BD_CTRL_DESC_EN |
606 				BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
607 		}
608 		hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
609 	}
610 	while (hif->txtoflush != hif->txtosend)
611 		;
612 
613 skip_tx:
614 	return;
615 }
616 
617 void
618 hif_process_client_req(struct pfe_hif *hif, int req,
619 			    int data1, __rte_unused int data2)
620 {
621 	unsigned int client_id = data1;
622 
623 	if (client_id >= HIF_CLIENTS_MAX) {
624 		PFE_PMD_ERR("client id %d out of bounds", client_id);
625 		return;
626 	}
627 
628 	switch (req) {
629 	case REQUEST_CL_REGISTER:
630 			/* Request for register a client */
631 			PFE_PMD_INFO("register client_id %d", client_id);
632 			pfe_hif_client_register(hif, client_id, (struct
633 				hif_client_shm *)&hif->shm->client[client_id]);
634 			break;
635 
636 	case REQUEST_CL_UNREGISTER:
637 			PFE_PMD_INFO("unregister client_id %d", client_id);
638 
639 			/* Request for unregister a client */
640 			pfe_hif_client_unregister(hif, client_id);
641 
642 			break;
643 
644 	default:
645 			PFE_PMD_ERR("unsupported request %d", req);
646 			break;
647 	}
648 
649 	/*
650 	 * Process client Tx queues
651 	 * Currently we don't have checking for tx pending
652 	 */
653 }
654 
655 #if defined(LS1012A_PFE_RESET_WA)
656 static void
657 pfe_hif_disable_rx_desc(struct pfe_hif *hif)
658 {
659 	u32 ii;
660 	struct hif_desc	*desc = hif->rx_base;
661 
662 	/*Mark all descriptors as LAST_BD */
663 	for (ii = 0; ii < hif->rx_ring_size; ii++) {
664 		desc->ctrl |= BD_CTRL_LAST_BD;
665 		desc++;
666 	}
667 }
668 
669 struct class_rx_hdr_t {
670 	u32     next_ptr;       /* ptr to the start of the first DDR buffer */
671 	u16     length;         /* total packet length */
672 	u16     phyno;          /* input physical port number */
673 	u32     status;         /* gemac status bits */
674 	u32     status2;            /* reserved for software usage */
675 };
676 
677 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
678  * except overflow
679  */
680 #define STATUS_BAD_FRAME_ERR            BIT(16)
681 #define STATUS_LENGTH_ERR               BIT(17)
682 #define STATUS_CRC_ERR                  BIT(18)
683 #define STATUS_TOO_SHORT_ERR            BIT(19)
684 #define STATUS_TOO_LONG_ERR             BIT(20)
685 #define STATUS_CODE_ERR                 BIT(21)
686 #define STATUS_MC_HASH_MATCH            BIT(22)
687 #define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
688 #define STATUS_UNICAST_HASH_MATCH       BIT(24)
689 #define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
690 #define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
691 #define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
692 #define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
693 #define MIN_PKT_SIZE			64
694 #define DUMMY_PKT_COUNT			128
695 
696 static inline void
697 copy_to_lmem(u32 *dst, u32 *src, int len)
698 {
699 	int i;
700 
701 	for (i = 0; i < len; i += sizeof(u32))	{
702 		*dst = htonl(*src);
703 		dst++; src++;
704 	}
705 }
706 #if defined(RTE_TOOLCHAIN_GCC)
707 __attribute__ ((optimize(1)))
708 #endif
709 static void
710 send_dummy_pkt_to_hif(void)
711 {
712 	void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
713 	u64 physaddr;
714 	struct class_rx_hdr_t local_hdr;
715 	static u32 dummy_pkt[] =  {
716 		0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
717 		0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
718 		0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
719 		0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
720 
721 	ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
722 	if (!ddr_ptr)
723 		return;
724 
725 	lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
726 	if (!lmem_ptr)
727 		return;
728 
729 	PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
730 	physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
731 
732 	lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
733 
734 	local_hdr.phyno = htons(0); /* RX_PHY_0 */
735 	local_hdr.length = htons(MIN_PKT_SIZE);
736 
737 	local_hdr.next_ptr = htonl((u32)physaddr);
738 	/*Mark checksum is correct */
739 	local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
740 				STATUS_UDP_CHECKSUM_CORRECT |
741 				STATUS_TCP_CHECKSUM_CORRECT |
742 				STATUS_UNICAST_HASH_MATCH |
743 				STATUS_CUMULATIVE_ARC_HIT));
744 	copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
745 		     sizeof(local_hdr));
746 
747 	copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
748 		     0x40);
749 
750 	writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
751 }
752 
753 void
754 pfe_hif_rx_idle(struct pfe_hif *hif)
755 {
756 	int hif_stop_loop = DUMMY_PKT_COUNT;
757 	u32 rx_status;
758 
759 	pfe_hif_disable_rx_desc(hif);
760 	PFE_PMD_INFO("Bringing hif to idle state...");
761 	writel(0, HIF_INT_ENABLE);
762 	/*If HIF Rx BDP is busy send a dummy packet */
763 	do {
764 		rx_status = readl(HIF_RX_STATUS);
765 		if (rx_status & BDP_CSR_RX_DMA_ACTV)
766 			send_dummy_pkt_to_hif();
767 
768 		sleep(1);
769 	} while (--hif_stop_loop);
770 
771 	if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
772 		PFE_PMD_ERR("Failed\n");
773 	else
774 		PFE_PMD_INFO("Done\n");
775 }
776 #endif
777 
778 /*
779  * pfe_hif_init
780  * This function initializes the baseaddresses and irq, etc.
781  */
782 int
783 pfe_hif_init(struct pfe *pfe)
784 {
785 	struct pfe_hif *hif = &pfe->hif;
786 	int err;
787 
788 	PMD_INIT_FUNC_TRACE();
789 
790 #if defined(LS1012A_PFE_RESET_WA)
791 	pfe_hif_rx_idle(hif);
792 #endif
793 
794 	err = pfe_hif_alloc_descr(hif);
795 	if (err)
796 		goto err0;
797 
798 	rte_spinlock_init(&hif->tx_lock);
799 	rte_spinlock_init(&hif->lock);
800 
801 	gpi_enable(HGPI_BASE_ADDR);
802 	if (getenv("PFE_INTR_SUPPORT")) {
803 		struct epoll_event epoll_ev;
804 		int event_fd = -1, epoll_fd, pfe_cdev_fd;
805 
806 		pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
807 		if (pfe_cdev_fd < 0) {
808 			PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
809 				     PFE_CDEV_PATH);
810 			pfe->cdev_fd = PFE_CDEV_INVALID_FD;
811 			return -1;
812 		}
813 		pfe->cdev_fd = pfe_cdev_fd;
814 
815 		event_fd = eventfd(0, EFD_NONBLOCK);
816 		/* hif interrupt enable */
817 		err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
818 		if (err) {
819 			PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
820 					errno);
821 			goto err0;
822 		}
823 		epoll_fd = epoll_create(1);
824 		epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
825 		epoll_ev.data.fd = event_fd;
826 		err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
827 		if (err < 0) {
828 			PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
829 			goto err0;
830 		}
831 		pfe->hif.epoll_fd = epoll_fd;
832 	}
833 	return 0;
834 err0:
835 	return err;
836 }
837 
838 /* pfe_hif_exit- */
839 void
840 pfe_hif_exit(struct pfe *pfe)
841 {
842 	struct pfe_hif *hif = &pfe->hif;
843 
844 	PMD_INIT_FUNC_TRACE();
845 
846 	rte_spinlock_lock(&hif->lock);
847 	hif->shm->g_client_status[0] = 0;
848 	/* Make sure all clients are disabled*/
849 	hif->shm->g_client_status[1] = 0;
850 
851 	rte_spinlock_unlock(&hif->lock);
852 
853 	if (hif->setuped) {
854 #if defined(LS1012A_PFE_RESET_WA)
855 		pfe_hif_rx_idle(hif);
856 #endif
857 		/*Disable Rx/Tx */
858 		hif_rx_disable();
859 		hif_tx_disable();
860 
861 		pfe_hif_release_buffers(hif);
862 		pfe_hif_shm_clean(hif->shm);
863 
864 		pfe_hif_free_descr(hif);
865 		pfe->hif.setuped = 0;
866 	}
867 	gpi_disable(HGPI_BASE_ADDR);
868 }
869