xref: /dpdk/drivers/net/pfe/pfe_hif.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
15253fe37SGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
2d86184dfSGagandeep Singh  * Copyright 2018-2021 NXP
35253fe37SGagandeep Singh  */
45253fe37SGagandeep Singh 
55253fe37SGagandeep Singh #include "pfe_logs.h"
65253fe37SGagandeep Singh #include "pfe_mod.h"
75253fe37SGagandeep Singh #include <sys/ioctl.h>
85253fe37SGagandeep Singh #include <sys/epoll.h>
95253fe37SGagandeep Singh #include <sys/eventfd.h>
10bd063651SFerruh Yigit #include <arpa/inet.h>
115253fe37SGagandeep Singh 
125253fe37SGagandeep Singh static int
135253fe37SGagandeep Singh pfe_hif_alloc_descr(struct pfe_hif *hif)
145253fe37SGagandeep Singh {
155253fe37SGagandeep Singh 	void *addr;
165253fe37SGagandeep Singh 	int err = 0;
175253fe37SGagandeep Singh 
185253fe37SGagandeep Singh 	PMD_INIT_FUNC_TRACE();
195253fe37SGagandeep Singh 
205253fe37SGagandeep Singh 	addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
215253fe37SGagandeep Singh 		HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
225253fe37SGagandeep Singh 	if (!addr) {
235253fe37SGagandeep Singh 		PFE_PMD_ERR("Could not allocate buffer descriptors!");
245253fe37SGagandeep Singh 		err = -ENOMEM;
255253fe37SGagandeep Singh 		goto err0;
265253fe37SGagandeep Singh 	}
275253fe37SGagandeep Singh 
285253fe37SGagandeep Singh 	hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
295253fe37SGagandeep Singh 	hif->descr_baseaddr_v = addr;
305253fe37SGagandeep Singh 	hif->rx_ring_size = HIF_RX_DESC_NT;
315253fe37SGagandeep Singh 	hif->tx_ring_size = HIF_TX_DESC_NT;
325253fe37SGagandeep Singh 
335253fe37SGagandeep Singh 	return 0;
345253fe37SGagandeep Singh 
355253fe37SGagandeep Singh err0:
365253fe37SGagandeep Singh 	return err;
375253fe37SGagandeep Singh }
385253fe37SGagandeep Singh 
395253fe37SGagandeep Singh static void
405253fe37SGagandeep Singh pfe_hif_free_descr(struct pfe_hif *hif)
415253fe37SGagandeep Singh {
425253fe37SGagandeep Singh 	PMD_INIT_FUNC_TRACE();
435253fe37SGagandeep Singh 
445253fe37SGagandeep Singh 	rte_free(hif->descr_baseaddr_v);
455253fe37SGagandeep Singh }
465253fe37SGagandeep Singh 
4736220514SGagandeep Singh /* pfe_hif_release_buffers */
4836220514SGagandeep Singh static void
4936220514SGagandeep Singh pfe_hif_release_buffers(struct pfe_hif *hif)
5036220514SGagandeep Singh {
5136220514SGagandeep Singh 	struct hif_desc	*desc;
5236220514SGagandeep Singh 	uint32_t i = 0;
5336220514SGagandeep Singh 	struct rte_mbuf *mbuf;
5436220514SGagandeep Singh 	struct rte_pktmbuf_pool_private *mb_priv;
5536220514SGagandeep Singh 
5636220514SGagandeep Singh 	hif->rx_base = hif->descr_baseaddr_v;
5736220514SGagandeep Singh 
5836220514SGagandeep Singh 	/*Free Rx buffers */
5936220514SGagandeep Singh 	desc = hif->rx_base;
6036220514SGagandeep Singh 	mb_priv = rte_mempool_get_priv(hif->shm->pool);
6136220514SGagandeep Singh 	for (i = 0; i < hif->rx_ring_size; i++) {
6236220514SGagandeep Singh 		if (readl(&desc->data)) {
6336220514SGagandeep Singh 			if (i < hif->shm->rx_buf_pool_cnt &&
6436220514SGagandeep Singh 			    !hif->shm->rx_buf_pool[i]) {
6536220514SGagandeep Singh 				mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
6636220514SGagandeep Singh 					- sizeof(struct rte_mbuf)
6736220514SGagandeep Singh 					- RTE_PKTMBUF_HEADROOM
6836220514SGagandeep Singh 					- mb_priv->mbuf_priv_size;
6936220514SGagandeep Singh 				hif->shm->rx_buf_pool[i] = mbuf;
7036220514SGagandeep Singh 			}
7136220514SGagandeep Singh 		}
7236220514SGagandeep Singh 		writel(0, &desc->data);
7336220514SGagandeep Singh 		writel(0, &desc->status);
7436220514SGagandeep Singh 		writel(0, &desc->ctrl);
7536220514SGagandeep Singh 		desc++;
7636220514SGagandeep Singh 	}
7736220514SGagandeep Singh }
7836220514SGagandeep Singh 
79fe38ad9bSGagandeep Singh /*
80592041a0SGagandeep Singh  * pfe_hif_init_buffers
81592041a0SGagandeep Singh  * This function initializes the HIF Rx/Tx ring descriptors and
82592041a0SGagandeep Singh  * initialize Rx queue with buffers.
83592041a0SGagandeep Singh  */
84592041a0SGagandeep Singh int
85592041a0SGagandeep Singh pfe_hif_init_buffers(struct pfe_hif *hif)
86592041a0SGagandeep Singh {
87592041a0SGagandeep Singh 	struct hif_desc	*desc, *first_desc_p;
88592041a0SGagandeep Singh 	uint32_t i = 0;
89592041a0SGagandeep Singh 
90592041a0SGagandeep Singh 	PMD_INIT_FUNC_TRACE();
91592041a0SGagandeep Singh 
92592041a0SGagandeep Singh 	/* Check enough Rx buffers available in the shared memory */
93592041a0SGagandeep Singh 	if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
94592041a0SGagandeep Singh 		return -ENOMEM;
95592041a0SGagandeep Singh 
96592041a0SGagandeep Singh 	hif->rx_base = hif->descr_baseaddr_v;
97592041a0SGagandeep Singh 	memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
98592041a0SGagandeep Singh 
99592041a0SGagandeep Singh 	/*Initialize Rx descriptors */
100592041a0SGagandeep Singh 	desc = hif->rx_base;
101592041a0SGagandeep Singh 	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
102592041a0SGagandeep Singh 
103592041a0SGagandeep Singh 	for (i = 0; i < hif->rx_ring_size; i++) {
104592041a0SGagandeep Singh 		/* Initialize Rx buffers from the shared memory */
105592041a0SGagandeep Singh 		struct rte_mbuf *mbuf =
106592041a0SGagandeep Singh 			(struct rte_mbuf *)hif->shm->rx_buf_pool[i];
107592041a0SGagandeep Singh 
108592041a0SGagandeep Singh 		/* PFE mbuf structure is as follow:
109592041a0SGagandeep Singh 		 * ----------------------------------------------------------+
110592041a0SGagandeep Singh 		 * | mbuf  | priv | headroom (annotation + PFE data) | data  |
111592041a0SGagandeep Singh 		 * ----------------------------------------------------------+
112592041a0SGagandeep Singh 		 *
113592041a0SGagandeep Singh 		 * As we are expecting additional information like parse
114592041a0SGagandeep Singh 		 * results, eth id, queue id from PFE block along with data.
115592041a0SGagandeep Singh 		 * so we have to provide additional memory for each packet to
116592041a0SGagandeep Singh 		 * HIF rx rings so that PFE block can write its headers.
1177be78d02SJosh Soref 		 * so, we are giving the data pointer to HIF rings whose
118592041a0SGagandeep Singh 		 * calculation is as below:
1197be78d02SJosh Soref 		 * mbuf->data_pointer - Required_header_size
120592041a0SGagandeep Singh 		 *
121592041a0SGagandeep Singh 		 * We are utilizing the HEADROOM area to receive the PFE
122592041a0SGagandeep Singh 		 * block headers. On packet reception, HIF driver will use
123592041a0SGagandeep Singh 		 * PFE headers information based on which it will decide
124592041a0SGagandeep Singh 		 * the clients and fill the parse results.
125592041a0SGagandeep Singh 		 * after that application can use/overwrite the HEADROOM area.
126592041a0SGagandeep Singh 		 */
127592041a0SGagandeep Singh 		hif->rx_buf_vaddr[i] =
128592041a0SGagandeep Singh 			(void *)((size_t)mbuf->buf_addr + mbuf->data_off -
129592041a0SGagandeep Singh 					PFE_PKT_HEADER_SZ);
130592041a0SGagandeep Singh 		hif->rx_buf_addr[i] =
131592041a0SGagandeep Singh 			(void *)(size_t)(rte_pktmbuf_iova(mbuf) -
132592041a0SGagandeep Singh 					PFE_PKT_HEADER_SZ);
133592041a0SGagandeep Singh 		hif->rx_buf_len[i] =  mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
134592041a0SGagandeep Singh 
135592041a0SGagandeep Singh 		hif->shm->rx_buf_pool[i] = NULL;
136592041a0SGagandeep Singh 
137592041a0SGagandeep Singh 		writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
138592041a0SGagandeep Singh 					&desc->data);
139592041a0SGagandeep Singh 		writel(0, &desc->status);
140592041a0SGagandeep Singh 
141592041a0SGagandeep Singh 		/*
142592041a0SGagandeep Singh 		 * Ensure everything else is written to DDR before
143592041a0SGagandeep Singh 		 * writing bd->ctrl
144592041a0SGagandeep Singh 		 */
145592041a0SGagandeep Singh 		rte_wmb();
146592041a0SGagandeep Singh 
147592041a0SGagandeep Singh 		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
148592041a0SGagandeep Singh 			| BD_CTRL_DIR | BD_CTRL_DESC_EN
149592041a0SGagandeep Singh 			| BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
150592041a0SGagandeep Singh 
151592041a0SGagandeep Singh 		/* Chain descriptors */
152592041a0SGagandeep Singh 		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
153592041a0SGagandeep Singh 		desc++;
154592041a0SGagandeep Singh 	}
155592041a0SGagandeep Singh 
156592041a0SGagandeep Singh 	/* Overwrite last descriptor to chain it to first one*/
157592041a0SGagandeep Singh 	desc--;
158592041a0SGagandeep Singh 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
159592041a0SGagandeep Singh 
160592041a0SGagandeep Singh 	hif->rxtoclean_index = 0;
161592041a0SGagandeep Singh 
162592041a0SGagandeep Singh 	/*Initialize Rx buffer descriptor ring base address */
163592041a0SGagandeep Singh 	writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
164592041a0SGagandeep Singh 
165592041a0SGagandeep Singh 	hif->tx_base = hif->rx_base + hif->rx_ring_size;
166592041a0SGagandeep Singh 	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
167592041a0SGagandeep Singh 				hif->rx_ring_size;
168592041a0SGagandeep Singh 	memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
169592041a0SGagandeep Singh 
170592041a0SGagandeep Singh 	/*Initialize tx descriptors */
171592041a0SGagandeep Singh 	desc = hif->tx_base;
172592041a0SGagandeep Singh 
173592041a0SGagandeep Singh 	for (i = 0; i < hif->tx_ring_size; i++) {
174592041a0SGagandeep Singh 		/* Chain descriptors */
175592041a0SGagandeep Singh 		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
176592041a0SGagandeep Singh 		writel(0, &desc->ctrl);
177592041a0SGagandeep Singh 		desc++;
178592041a0SGagandeep Singh 	}
179592041a0SGagandeep Singh 
180592041a0SGagandeep Singh 	/* Overwrite last descriptor to chain it to first one */
181592041a0SGagandeep Singh 	desc--;
182592041a0SGagandeep Singh 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
183592041a0SGagandeep Singh 	hif->txavail = hif->tx_ring_size;
184592041a0SGagandeep Singh 	hif->txtosend = 0;
185592041a0SGagandeep Singh 	hif->txtoclean = 0;
186592041a0SGagandeep Singh 	hif->txtoflush = 0;
187592041a0SGagandeep Singh 
188592041a0SGagandeep Singh 	/*Initialize Tx buffer descriptor ring base address */
189592041a0SGagandeep Singh 	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
190592041a0SGagandeep Singh 
191592041a0SGagandeep Singh 	return 0;
192592041a0SGagandeep Singh }
193592041a0SGagandeep Singh 
194592041a0SGagandeep Singh /*
195fe38ad9bSGagandeep Singh  * pfe_hif_client_register
196fe38ad9bSGagandeep Singh  *
197fe38ad9bSGagandeep Singh  * This function used to register a client driver with the HIF driver.
198fe38ad9bSGagandeep Singh  *
199fe38ad9bSGagandeep Singh  * Return value:
200fe38ad9bSGagandeep Singh  * 0 - on Successful registration
201fe38ad9bSGagandeep Singh  */
202fe38ad9bSGagandeep Singh static int
203fe38ad9bSGagandeep Singh pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
204fe38ad9bSGagandeep Singh 			struct hif_client_shm *client_shm)
205fe38ad9bSGagandeep Singh {
206fe38ad9bSGagandeep Singh 	struct hif_client *client = &hif->client[client_id];
207fe38ad9bSGagandeep Singh 	u32 i, cnt;
208fe38ad9bSGagandeep Singh 	struct rx_queue_desc *rx_qbase;
209fe38ad9bSGagandeep Singh 	struct tx_queue_desc *tx_qbase;
210fe38ad9bSGagandeep Singh 	struct hif_rx_queue *rx_queue;
211fe38ad9bSGagandeep Singh 	struct hif_tx_queue *tx_queue;
212fe38ad9bSGagandeep Singh 	int err = 0;
213fe38ad9bSGagandeep Singh 
214fe38ad9bSGagandeep Singh 	PMD_INIT_FUNC_TRACE();
215fe38ad9bSGagandeep Singh 
216fe38ad9bSGagandeep Singh 	rte_spinlock_lock(&hif->tx_lock);
217fe38ad9bSGagandeep Singh 
218fe38ad9bSGagandeep Singh 	if (test_bit(client_id, &hif->shm->g_client_status[0])) {
219fe38ad9bSGagandeep Singh 		PFE_PMD_ERR("client %d already registered", client_id);
220fe38ad9bSGagandeep Singh 		err = -1;
221fe38ad9bSGagandeep Singh 		goto unlock;
222fe38ad9bSGagandeep Singh 	}
223fe38ad9bSGagandeep Singh 
224fe38ad9bSGagandeep Singh 	memset(client, 0, sizeof(struct hif_client));
225fe38ad9bSGagandeep Singh 
226fe38ad9bSGagandeep Singh 	/* Initialize client Rx queues baseaddr, size */
227fe38ad9bSGagandeep Singh 
228fe38ad9bSGagandeep Singh 	cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
229fe38ad9bSGagandeep Singh 	/* Check if client is requesting for more queues than supported */
230fe38ad9bSGagandeep Singh 	if (cnt > HIF_CLIENT_QUEUES_MAX)
231fe38ad9bSGagandeep Singh 		cnt = HIF_CLIENT_QUEUES_MAX;
232fe38ad9bSGagandeep Singh 
233fe38ad9bSGagandeep Singh 	client->rx_qn = cnt;
234fe38ad9bSGagandeep Singh 	rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
235fe38ad9bSGagandeep Singh 	for (i = 0; i < cnt; i++) {
236fe38ad9bSGagandeep Singh 		rx_queue = &client->rx_q[i];
237fe38ad9bSGagandeep Singh 		rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
238fe38ad9bSGagandeep Singh 		rx_queue->size = client_shm->rx_qsize;
239fe38ad9bSGagandeep Singh 		rx_queue->write_idx = 0;
240fe38ad9bSGagandeep Singh 	}
241fe38ad9bSGagandeep Singh 
242fe38ad9bSGagandeep Singh 	/* Initialize client Tx queues baseaddr, size */
243fe38ad9bSGagandeep Singh 	cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
244fe38ad9bSGagandeep Singh 
245fe38ad9bSGagandeep Singh 	/* Check if client is requesting for more queues than supported */
246fe38ad9bSGagandeep Singh 	if (cnt > HIF_CLIENT_QUEUES_MAX)
247fe38ad9bSGagandeep Singh 		cnt = HIF_CLIENT_QUEUES_MAX;
248fe38ad9bSGagandeep Singh 
249fe38ad9bSGagandeep Singh 	client->tx_qn = cnt;
250fe38ad9bSGagandeep Singh 	tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
251fe38ad9bSGagandeep Singh 	for (i = 0; i < cnt; i++) {
252fe38ad9bSGagandeep Singh 		tx_queue = &client->tx_q[i];
253fe38ad9bSGagandeep Singh 		tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
254fe38ad9bSGagandeep Singh 		tx_queue->size = client_shm->tx_qsize;
255fe38ad9bSGagandeep Singh 		tx_queue->ack_idx = 0;
256fe38ad9bSGagandeep Singh 	}
257fe38ad9bSGagandeep Singh 
258fe38ad9bSGagandeep Singh 	set_bit(client_id, &hif->shm->g_client_status[0]);
259fe38ad9bSGagandeep Singh 
260fe38ad9bSGagandeep Singh unlock:
261fe38ad9bSGagandeep Singh 	rte_spinlock_unlock(&hif->tx_lock);
262fe38ad9bSGagandeep Singh 
263fe38ad9bSGagandeep Singh 	return err;
264fe38ad9bSGagandeep Singh }
265fe38ad9bSGagandeep Singh 
266fe38ad9bSGagandeep Singh /*
267fe38ad9bSGagandeep Singh  * pfe_hif_client_unregister
268fe38ad9bSGagandeep Singh  *
269fe38ad9bSGagandeep Singh  * This function used to unregister a client  from the HIF driver.
270fe38ad9bSGagandeep Singh  *
271fe38ad9bSGagandeep Singh  */
272fe38ad9bSGagandeep Singh static void
273fe38ad9bSGagandeep Singh pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
274fe38ad9bSGagandeep Singh {
275fe38ad9bSGagandeep Singh 	PMD_INIT_FUNC_TRACE();
276fe38ad9bSGagandeep Singh 
277fe38ad9bSGagandeep Singh 	/*
278fe38ad9bSGagandeep Singh 	 * Mark client as no longer available (which prevents further packet
279fe38ad9bSGagandeep Singh 	 * receive for this client)
280fe38ad9bSGagandeep Singh 	 */
281fe38ad9bSGagandeep Singh 	rte_spinlock_lock(&hif->tx_lock);
282fe38ad9bSGagandeep Singh 
283fe38ad9bSGagandeep Singh 	if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
284fe38ad9bSGagandeep Singh 		PFE_PMD_ERR("client %d not registered", client_id);
285fe38ad9bSGagandeep Singh 
286fe38ad9bSGagandeep Singh 		rte_spinlock_unlock(&hif->tx_lock);
287fe38ad9bSGagandeep Singh 		return;
288fe38ad9bSGagandeep Singh 	}
289fe38ad9bSGagandeep Singh 
290fe38ad9bSGagandeep Singh 	clear_bit(client_id, &hif->shm->g_client_status[0]);
291fe38ad9bSGagandeep Singh 
292fe38ad9bSGagandeep Singh 	rte_spinlock_unlock(&hif->tx_lock);
293fe38ad9bSGagandeep Singh }
294fe38ad9bSGagandeep Singh 
29536220514SGagandeep Singh /*
29636220514SGagandeep Singh  * client_put_rxpacket-
29736220514SGagandeep Singh  */
29836220514SGagandeep Singh static struct rte_mbuf *
29936220514SGagandeep Singh client_put_rxpacket(struct hif_rx_queue *queue,
30036220514SGagandeep Singh 		void *pkt, u32 len,
30136220514SGagandeep Singh 		u32 flags, u32 client_ctrl,
30236220514SGagandeep Singh 		struct rte_mempool *pool,
30336220514SGagandeep Singh 		u32 *rem_len)
30436220514SGagandeep Singh {
30536220514SGagandeep Singh 	struct rx_queue_desc *desc = queue->base + queue->write_idx;
30636220514SGagandeep Singh 	struct rte_mbuf *mbuf = NULL;
30736220514SGagandeep Singh 
30836220514SGagandeep Singh 
30936220514SGagandeep Singh 	if (readl(&desc->ctrl) & CL_DESC_OWN) {
31036220514SGagandeep Singh 		mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
31136220514SGagandeep Singh 		if (unlikely(!mbuf)) {
312*f665790aSDavid Marchand 			PFE_PMD_WARN("Buffer allocation failure");
31336220514SGagandeep Singh 			return NULL;
31436220514SGagandeep Singh 		}
31536220514SGagandeep Singh 
31636220514SGagandeep Singh 		desc->data = pkt;
31736220514SGagandeep Singh 		desc->client_ctrl = client_ctrl;
31836220514SGagandeep Singh 		/*
31936220514SGagandeep Singh 		 * Ensure everything else is written to DDR before
32036220514SGagandeep Singh 		 * writing bd->ctrl
32136220514SGagandeep Singh 		 */
32236220514SGagandeep Singh 		rte_wmb();
32336220514SGagandeep Singh 		writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
32436220514SGagandeep Singh 		queue->write_idx = (queue->write_idx + 1)
32536220514SGagandeep Singh 				    & (queue->size - 1);
32636220514SGagandeep Singh 
32736220514SGagandeep Singh 		*rem_len = mbuf->buf_len;
32836220514SGagandeep Singh 	}
32936220514SGagandeep Singh 
33036220514SGagandeep Singh 	return mbuf;
33136220514SGagandeep Singh }
33236220514SGagandeep Singh 
33336220514SGagandeep Singh /*
33436220514SGagandeep Singh  * pfe_hif_rx_process-
33536220514SGagandeep Singh  * This function does pfe hif rx queue processing.
33636220514SGagandeep Singh  * Dequeue packet from Rx queue and send it to corresponding client queue
33736220514SGagandeep Singh  */
33836220514SGagandeep Singh int
33936220514SGagandeep Singh pfe_hif_rx_process(struct pfe *pfe, int budget)
34036220514SGagandeep Singh {
34136220514SGagandeep Singh 	struct hif_desc	*desc;
34236220514SGagandeep Singh 	struct hif_hdr *pkt_hdr;
34336220514SGagandeep Singh 	struct __hif_hdr hif_hdr;
34436220514SGagandeep Singh 	void *free_buf;
34536220514SGagandeep Singh 	int rtc, len, rx_processed = 0;
34636220514SGagandeep Singh 	struct __hif_desc local_desc;
34736220514SGagandeep Singh 	int flags = 0, wait_for_last = 0, retry = 0;
34836220514SGagandeep Singh 	unsigned int buf_size = 0;
34936220514SGagandeep Singh 	struct rte_mbuf *mbuf = NULL;
35036220514SGagandeep Singh 	struct pfe_hif *hif = &pfe->hif;
35136220514SGagandeep Singh 
35236220514SGagandeep Singh 	rte_spinlock_lock(&hif->lock);
35336220514SGagandeep Singh 
35436220514SGagandeep Singh 	rtc = hif->rxtoclean_index;
35536220514SGagandeep Singh 
35636220514SGagandeep Singh 	while (rx_processed < budget) {
35736220514SGagandeep Singh 		desc = hif->rx_base + rtc;
35836220514SGagandeep Singh 
35936220514SGagandeep Singh 		__memcpy12(&local_desc, desc);
36036220514SGagandeep Singh 
36136220514SGagandeep Singh 		/* ACK pending Rx interrupt */
36236220514SGagandeep Singh 		if (local_desc.ctrl & BD_CTRL_DESC_EN) {
36336220514SGagandeep Singh 			if (unlikely(wait_for_last))
36436220514SGagandeep Singh 				continue;
36536220514SGagandeep Singh 			else
36636220514SGagandeep Singh 				break;
36736220514SGagandeep Singh 		}
36836220514SGagandeep Singh 
36936220514SGagandeep Singh 		len = BD_BUF_LEN(local_desc.ctrl);
37036220514SGagandeep Singh 		pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
37136220514SGagandeep Singh 
37236220514SGagandeep Singh 		/* Track last HIF header received */
37336220514SGagandeep Singh 		if (!hif->started) {
37436220514SGagandeep Singh 			hif->started = 1;
37536220514SGagandeep Singh 
37636220514SGagandeep Singh 			__memcpy8(&hif_hdr, pkt_hdr);
37736220514SGagandeep Singh 
37836220514SGagandeep Singh 			hif->qno = hif_hdr.hdr.q_num;
37936220514SGagandeep Singh 			hif->client_id = hif_hdr.hdr.client_id;
38036220514SGagandeep Singh 			hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
38136220514SGagandeep Singh 						hif_hdr.hdr.client_ctrl;
38236220514SGagandeep Singh 			flags = CL_DESC_FIRST;
38336220514SGagandeep Singh 
38436220514SGagandeep Singh 		} else {
38536220514SGagandeep Singh 			flags = 0;
38636220514SGagandeep Singh 		}
38736220514SGagandeep Singh 
38836220514SGagandeep Singh 		if (local_desc.ctrl & BD_CTRL_LIFM) {
38936220514SGagandeep Singh 			flags |= CL_DESC_LAST;
39036220514SGagandeep Singh 			wait_for_last = 0;
39136220514SGagandeep Singh 		} else {
39236220514SGagandeep Singh 			wait_for_last = 1;
39336220514SGagandeep Singh 		}
39436220514SGagandeep Singh 
39536220514SGagandeep Singh 		/* Check for valid client id and still registered */
39636220514SGagandeep Singh 		if (hif->client_id >= HIF_CLIENTS_MAX ||
39736220514SGagandeep Singh 		    !(test_bit(hif->client_id,
39836220514SGagandeep Singh 			&hif->shm->g_client_status[0]))) {
39936220514SGagandeep Singh 			PFE_PMD_INFO("packet with invalid client id %d qnum %d",
40036220514SGagandeep Singh 				hif->client_id, hif->qno);
40136220514SGagandeep Singh 
40236220514SGagandeep Singh 			free_buf = hif->rx_buf_addr[rtc];
40336220514SGagandeep Singh 
40436220514SGagandeep Singh 			goto pkt_drop;
40536220514SGagandeep Singh 		}
40636220514SGagandeep Singh 
40736220514SGagandeep Singh 		/* Check to valid queue number */
40836220514SGagandeep Singh 		if (hif->client[hif->client_id].rx_qn <= hif->qno) {
40936220514SGagandeep Singh 			PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
41036220514SGagandeep Singh 					hif->qno);
41136220514SGagandeep Singh 			hif->qno = 0;
41236220514SGagandeep Singh 		}
41336220514SGagandeep Singh 
41436220514SGagandeep Singh retry:
41536220514SGagandeep Singh 		mbuf =
41636220514SGagandeep Singh 		client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
41736220514SGagandeep Singh 				    (void *)pkt_hdr, len, flags,
41836220514SGagandeep Singh 				    hif->client_ctrl, hif->shm->pool,
41936220514SGagandeep Singh 				    &buf_size);
42036220514SGagandeep Singh 
42136220514SGagandeep Singh 		if (unlikely(!mbuf)) {
42236220514SGagandeep Singh 			if (!retry) {
42336220514SGagandeep Singh 				pfe_tx_do_cleanup(pfe);
42436220514SGagandeep Singh 				retry = 1;
42536220514SGagandeep Singh 				goto retry;
42636220514SGagandeep Singh 			}
42736220514SGagandeep Singh 			rx_processed = budget;
42836220514SGagandeep Singh 
42936220514SGagandeep Singh 			if (flags & CL_DESC_FIRST)
43036220514SGagandeep Singh 				hif->started = 0;
43136220514SGagandeep Singh 
43236220514SGagandeep Singh 			PFE_DP_LOG(DEBUG, "No buffers");
43336220514SGagandeep Singh 			break;
43436220514SGagandeep Singh 		}
43536220514SGagandeep Singh 
43636220514SGagandeep Singh 		retry = 0;
43736220514SGagandeep Singh 
43836220514SGagandeep Singh 		free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
43936220514SGagandeep Singh 		free_buf = free_buf - PFE_PKT_HEADER_SZ;
44036220514SGagandeep Singh 
44136220514SGagandeep Singh 		/*Fill free buffer in the descriptor */
44236220514SGagandeep Singh 		hif->rx_buf_addr[rtc] = free_buf;
44336220514SGagandeep Singh 		hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
44436220514SGagandeep Singh 				mbuf->data_off - PFE_PKT_HEADER_SZ);
44536220514SGagandeep Singh 		hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
44636220514SGagandeep Singh 
44736220514SGagandeep Singh pkt_drop:
44836220514SGagandeep Singh 		writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
44936220514SGagandeep Singh 		/*
45036220514SGagandeep Singh 		 * Ensure everything else is written to DDR before
45136220514SGagandeep Singh 		 * writing bd->ctrl
45236220514SGagandeep Singh 		 */
45336220514SGagandeep Singh 		rte_wmb();
45436220514SGagandeep Singh 		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
45536220514SGagandeep Singh 			BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
45636220514SGagandeep Singh 			&desc->ctrl);
45736220514SGagandeep Singh 
45836220514SGagandeep Singh 		rtc = (rtc + 1) & (hif->rx_ring_size - 1);
45936220514SGagandeep Singh 
46036220514SGagandeep Singh 		if (local_desc.ctrl & BD_CTRL_LIFM) {
46136220514SGagandeep Singh 			if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
46236220514SGagandeep Singh 				rx_processed++;
46336220514SGagandeep Singh 
46436220514SGagandeep Singh 			hif->started = 0;
46536220514SGagandeep Singh 		}
46636220514SGagandeep Singh 	}
46736220514SGagandeep Singh 
46836220514SGagandeep Singh 
46936220514SGagandeep Singh 	hif->rxtoclean_index = rtc;
47036220514SGagandeep Singh 	rte_spinlock_unlock(&hif->lock);
47136220514SGagandeep Singh 
47236220514SGagandeep Singh 	/* we made some progress, re-start rx dma in case it stopped */
47336220514SGagandeep Singh 	hif_rx_dma_start();
47436220514SGagandeep Singh 
47536220514SGagandeep Singh 	return rx_processed;
47636220514SGagandeep Singh }
47736220514SGagandeep Singh 
47836220514SGagandeep Singh /*
47936220514SGagandeep Singh  * client_ack_txpacket-
48036220514SGagandeep Singh  * This function ack the Tx packet in the give client Tx queue by resetting
48136220514SGagandeep Singh  * ownership bit in the descriptor.
48236220514SGagandeep Singh  */
48336220514SGagandeep Singh static int
48436220514SGagandeep Singh client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
48536220514SGagandeep Singh 		    unsigned int q_no)
48636220514SGagandeep Singh {
48736220514SGagandeep Singh 	struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
48836220514SGagandeep Singh 	struct tx_queue_desc *desc = queue->base + queue->ack_idx;
48936220514SGagandeep Singh 
49036220514SGagandeep Singh 	if (readl(&desc->ctrl) & CL_DESC_OWN) {
49136220514SGagandeep Singh 		writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
49236220514SGagandeep Singh 		queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
49336220514SGagandeep Singh 
49436220514SGagandeep Singh 		return 0;
49536220514SGagandeep Singh 
49636220514SGagandeep Singh 	} else {
49736220514SGagandeep Singh 		/*This should not happen */
49836220514SGagandeep Singh 		PFE_PMD_ERR("%d %d %d %d %d %p %d",
49936220514SGagandeep Singh 		       hif->txtosend, hif->txtoclean, hif->txavail,
50036220514SGagandeep Singh 			client_id, q_no, queue, queue->ack_idx);
50136220514SGagandeep Singh 		return 1;
50236220514SGagandeep Singh 	}
50336220514SGagandeep Singh }
50436220514SGagandeep Singh 
50536220514SGagandeep Singh static void
50636220514SGagandeep Singh __hif_tx_done_process(struct pfe *pfe, int count)
50736220514SGagandeep Singh {
50836220514SGagandeep Singh 	struct hif_desc *desc;
50936220514SGagandeep Singh 	struct hif_desc_sw *desc_sw;
51036220514SGagandeep Singh 	unsigned int ttc, tx_avl;
51136220514SGagandeep Singh 	int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
51236220514SGagandeep Singh 	struct pfe_hif *hif = &pfe->hif;
51336220514SGagandeep Singh 
51436220514SGagandeep Singh 	ttc = hif->txtoclean;
51536220514SGagandeep Singh 	tx_avl = hif->txavail;
51636220514SGagandeep Singh 
51736220514SGagandeep Singh 	while ((tx_avl < hif->tx_ring_size) && count--) {
51836220514SGagandeep Singh 		desc = hif->tx_base + ttc;
51936220514SGagandeep Singh 
52036220514SGagandeep Singh 		if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
52136220514SGagandeep Singh 			break;
52236220514SGagandeep Singh 
52336220514SGagandeep Singh 		desc_sw = &hif->tx_sw_queue[ttc];
52436220514SGagandeep Singh 
52536220514SGagandeep Singh 		if (desc_sw->client_id > HIF_CLIENTS_MAX)
52636220514SGagandeep Singh 			PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
52736220514SGagandeep Singh 
52836220514SGagandeep Singh 		pkts_done[desc_sw->client_id]++;
52936220514SGagandeep Singh 
53036220514SGagandeep Singh 		client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
53136220514SGagandeep Singh 
53236220514SGagandeep Singh 		ttc = (ttc + 1) & (hif->tx_ring_size - 1);
53336220514SGagandeep Singh 		tx_avl++;
53436220514SGagandeep Singh 	}
53536220514SGagandeep Singh 
53636220514SGagandeep Singh 	if (pkts_done[0])
53736220514SGagandeep Singh 		hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
53836220514SGagandeep Singh 				0);
53936220514SGagandeep Singh 	if (pkts_done[1])
54036220514SGagandeep Singh 		hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
54136220514SGagandeep Singh 				0);
54236220514SGagandeep Singh 	hif->txtoclean = ttc;
54336220514SGagandeep Singh 	hif->txavail = tx_avl;
54436220514SGagandeep Singh }
54536220514SGagandeep Singh 
54636220514SGagandeep Singh static inline void
54736220514SGagandeep Singh hif_tx_done_process(struct pfe *pfe, int count)
54836220514SGagandeep Singh {
54936220514SGagandeep Singh 	struct pfe_hif *hif = &pfe->hif;
55036220514SGagandeep Singh 	rte_spinlock_lock(&hif->tx_lock);
55136220514SGagandeep Singh 	__hif_tx_done_process(pfe, count);
55236220514SGagandeep Singh 	rte_spinlock_unlock(&hif->tx_lock);
55336220514SGagandeep Singh }
55436220514SGagandeep Singh 
55536220514SGagandeep Singh void
55636220514SGagandeep Singh pfe_tx_do_cleanup(struct pfe *pfe)
55736220514SGagandeep Singh {
55836220514SGagandeep Singh 	hif_tx_done_process(pfe, HIF_TX_DESC_NT);
55936220514SGagandeep Singh }
56036220514SGagandeep Singh 
56136220514SGagandeep Singh /*
56236220514SGagandeep Singh  * __hif_xmit_pkt -
56336220514SGagandeep Singh  * This function puts one packet in the HIF Tx queue
56436220514SGagandeep Singh  */
56536220514SGagandeep Singh void
56636220514SGagandeep Singh hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
56736220514SGagandeep Singh 	     q_no, void *data, u32 len, unsigned int flags)
56836220514SGagandeep Singh {
56936220514SGagandeep Singh 	struct hif_desc	*desc;
57036220514SGagandeep Singh 	struct hif_desc_sw *desc_sw;
57136220514SGagandeep Singh 
57236220514SGagandeep Singh 	desc = hif->tx_base + hif->txtosend;
57336220514SGagandeep Singh 	desc_sw = &hif->tx_sw_queue[hif->txtosend];
57436220514SGagandeep Singh 
57536220514SGagandeep Singh 	desc_sw->len = len;
57636220514SGagandeep Singh 	desc_sw->client_id = client_id;
57736220514SGagandeep Singh 	desc_sw->q_no = q_no;
57836220514SGagandeep Singh 	desc_sw->flags = flags;
57936220514SGagandeep Singh 
58036220514SGagandeep Singh 	writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
58136220514SGagandeep Singh 
58236220514SGagandeep Singh 	hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
58336220514SGagandeep Singh 	hif->txavail--;
58436220514SGagandeep Singh 
58536220514SGagandeep Singh 	if ((!((flags & HIF_DATA_VALID) && (flags &
58636220514SGagandeep Singh 				HIF_LAST_BUFFER))))
58736220514SGagandeep Singh 		goto skip_tx;
58836220514SGagandeep Singh 
58936220514SGagandeep Singh 	/*
59036220514SGagandeep Singh 	 * Ensure everything else is written to DDR before
59136220514SGagandeep Singh 	 * writing bd->ctrl
59236220514SGagandeep Singh 	 */
59336220514SGagandeep Singh 	rte_wmb();
59436220514SGagandeep Singh 
59536220514SGagandeep Singh 	do {
59636220514SGagandeep Singh 		desc_sw = &hif->tx_sw_queue[hif->txtoflush];
59736220514SGagandeep Singh 		desc = hif->tx_base + hif->txtoflush;
59836220514SGagandeep Singh 
59936220514SGagandeep Singh 		if (desc_sw->flags & HIF_LAST_BUFFER) {
60036220514SGagandeep Singh 			writel((BD_CTRL_LIFM |
60136220514SGagandeep Singh 			       BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
60236220514SGagandeep Singh 			       | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
60336220514SGagandeep Singh 				 BD_BUF_LEN(desc_sw->len)),
60436220514SGagandeep Singh 				&desc->ctrl);
60536220514SGagandeep Singh 		} else {
60636220514SGagandeep Singh 			writel((BD_CTRL_DESC_EN |
60736220514SGagandeep Singh 				BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
60836220514SGagandeep Singh 		}
60936220514SGagandeep Singh 		hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
61036220514SGagandeep Singh 	}
61136220514SGagandeep Singh 	while (hif->txtoflush != hif->txtosend)
61236220514SGagandeep Singh 		;
61336220514SGagandeep Singh 
61436220514SGagandeep Singh skip_tx:
61536220514SGagandeep Singh 	return;
61636220514SGagandeep Singh }
61736220514SGagandeep Singh 
618fe38ad9bSGagandeep Singh void
619fe38ad9bSGagandeep Singh hif_process_client_req(struct pfe_hif *hif, int req,
620fe38ad9bSGagandeep Singh 			    int data1, __rte_unused int data2)
621fe38ad9bSGagandeep Singh {
622fe38ad9bSGagandeep Singh 	unsigned int client_id = data1;
623fe38ad9bSGagandeep Singh 
624fe38ad9bSGagandeep Singh 	if (client_id >= HIF_CLIENTS_MAX) {
625fe38ad9bSGagandeep Singh 		PFE_PMD_ERR("client id %d out of bounds", client_id);
626fe38ad9bSGagandeep Singh 		return;
627fe38ad9bSGagandeep Singh 	}
628fe38ad9bSGagandeep Singh 
629fe38ad9bSGagandeep Singh 	switch (req) {
630fe38ad9bSGagandeep Singh 	case REQUEST_CL_REGISTER:
631fe38ad9bSGagandeep Singh 			/* Request for register a client */
632fe38ad9bSGagandeep Singh 			PFE_PMD_INFO("register client_id %d", client_id);
633fe38ad9bSGagandeep Singh 			pfe_hif_client_register(hif, client_id, (struct
634fe38ad9bSGagandeep Singh 				hif_client_shm *)&hif->shm->client[client_id]);
635fe38ad9bSGagandeep Singh 			break;
636fe38ad9bSGagandeep Singh 
637fe38ad9bSGagandeep Singh 	case REQUEST_CL_UNREGISTER:
638fe38ad9bSGagandeep Singh 			PFE_PMD_INFO("unregister client_id %d", client_id);
639fe38ad9bSGagandeep Singh 
640fe38ad9bSGagandeep Singh 			/* Request for unregister a client */
641fe38ad9bSGagandeep Singh 			pfe_hif_client_unregister(hif, client_id);
642fe38ad9bSGagandeep Singh 
643fe38ad9bSGagandeep Singh 			break;
644fe38ad9bSGagandeep Singh 
645fe38ad9bSGagandeep Singh 	default:
646fe38ad9bSGagandeep Singh 			PFE_PMD_ERR("unsupported request %d", req);
647fe38ad9bSGagandeep Singh 			break;
648fe38ad9bSGagandeep Singh 	}
649fe38ad9bSGagandeep Singh 
650fe38ad9bSGagandeep Singh 	/*
651fe38ad9bSGagandeep Singh 	 * Process client Tx queues
652fe38ad9bSGagandeep Singh 	 * Currently we don't have checking for tx pending
653fe38ad9bSGagandeep Singh 	 */
654fe38ad9bSGagandeep Singh }
655fe38ad9bSGagandeep Singh 
6565253fe37SGagandeep Singh #if defined(LS1012A_PFE_RESET_WA)
6575253fe37SGagandeep Singh static void
6585253fe37SGagandeep Singh pfe_hif_disable_rx_desc(struct pfe_hif *hif)
6595253fe37SGagandeep Singh {
6605253fe37SGagandeep Singh 	u32 ii;
6615253fe37SGagandeep Singh 	struct hif_desc	*desc = hif->rx_base;
6625253fe37SGagandeep Singh 
6635253fe37SGagandeep Singh 	/*Mark all descriptors as LAST_BD */
6645253fe37SGagandeep Singh 	for (ii = 0; ii < hif->rx_ring_size; ii++) {
6655253fe37SGagandeep Singh 		desc->ctrl |= BD_CTRL_LAST_BD;
6665253fe37SGagandeep Singh 		desc++;
6675253fe37SGagandeep Singh 	}
6685253fe37SGagandeep Singh }
6695253fe37SGagandeep Singh 
6705253fe37SGagandeep Singh struct class_rx_hdr_t {
6715253fe37SGagandeep Singh 	u32     next_ptr;       /* ptr to the start of the first DDR buffer */
6725253fe37SGagandeep Singh 	u16     length;         /* total packet length */
6735253fe37SGagandeep Singh 	u16     phyno;          /* input physical port number */
6745253fe37SGagandeep Singh 	u32     status;         /* gemac status bits */
6755253fe37SGagandeep Singh 	u32     status2;            /* reserved for software usage */
6765253fe37SGagandeep Singh };
6775253fe37SGagandeep Singh 
6785253fe37SGagandeep Singh /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
6795253fe37SGagandeep Singh  * except overflow
6805253fe37SGagandeep Singh  */
6815253fe37SGagandeep Singh #define STATUS_BAD_FRAME_ERR            BIT(16)
6825253fe37SGagandeep Singh #define STATUS_LENGTH_ERR               BIT(17)
6835253fe37SGagandeep Singh #define STATUS_CRC_ERR                  BIT(18)
6845253fe37SGagandeep Singh #define STATUS_TOO_SHORT_ERR            BIT(19)
6855253fe37SGagandeep Singh #define STATUS_TOO_LONG_ERR             BIT(20)
6865253fe37SGagandeep Singh #define STATUS_CODE_ERR                 BIT(21)
6875253fe37SGagandeep Singh #define STATUS_MC_HASH_MATCH            BIT(22)
6885253fe37SGagandeep Singh #define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
6895253fe37SGagandeep Singh #define STATUS_UNICAST_HASH_MATCH       BIT(24)
6905253fe37SGagandeep Singh #define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
6915253fe37SGagandeep Singh #define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
6925253fe37SGagandeep Singh #define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
6935253fe37SGagandeep Singh #define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
6945253fe37SGagandeep Singh #define MIN_PKT_SIZE			64
6955253fe37SGagandeep Singh #define DUMMY_PKT_COUNT			128
6965253fe37SGagandeep Singh 
6975253fe37SGagandeep Singh static inline void
6985253fe37SGagandeep Singh copy_to_lmem(u32 *dst, u32 *src, int len)
6995253fe37SGagandeep Singh {
7005253fe37SGagandeep Singh 	int i;
7015253fe37SGagandeep Singh 
7025253fe37SGagandeep Singh 	for (i = 0; i < len; i += sizeof(u32))	{
7035253fe37SGagandeep Singh 		*dst = htonl(*src);
7045253fe37SGagandeep Singh 		dst++; src++;
7055253fe37SGagandeep Singh 	}
7065253fe37SGagandeep Singh }
7075253fe37SGagandeep Singh #if defined(RTE_TOOLCHAIN_GCC)
7085253fe37SGagandeep Singh __attribute__ ((optimize(1)))
7095253fe37SGagandeep Singh #endif
7105253fe37SGagandeep Singh static void
7115253fe37SGagandeep Singh send_dummy_pkt_to_hif(void)
7125253fe37SGagandeep Singh {
7135253fe37SGagandeep Singh 	void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
7145253fe37SGagandeep Singh 	u64 physaddr;
7155253fe37SGagandeep Singh 	struct class_rx_hdr_t local_hdr;
7165253fe37SGagandeep Singh 	static u32 dummy_pkt[] =  {
7175253fe37SGagandeep Singh 		0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
7185253fe37SGagandeep Singh 		0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
7195253fe37SGagandeep Singh 		0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
7205253fe37SGagandeep Singh 		0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
7215253fe37SGagandeep Singh 
7225253fe37SGagandeep Singh 	ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
7235253fe37SGagandeep Singh 	if (!ddr_ptr)
7245253fe37SGagandeep Singh 		return;
7255253fe37SGagandeep Singh 
7265253fe37SGagandeep Singh 	lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
7275253fe37SGagandeep Singh 	if (!lmem_ptr)
7285253fe37SGagandeep Singh 		return;
7295253fe37SGagandeep Singh 
7305253fe37SGagandeep Singh 	PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
7315253fe37SGagandeep Singh 	physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
7325253fe37SGagandeep Singh 
7335253fe37SGagandeep Singh 	lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
7345253fe37SGagandeep Singh 
7355253fe37SGagandeep Singh 	local_hdr.phyno = htons(0); /* RX_PHY_0 */
7365253fe37SGagandeep Singh 	local_hdr.length = htons(MIN_PKT_SIZE);
7375253fe37SGagandeep Singh 
7385253fe37SGagandeep Singh 	local_hdr.next_ptr = htonl((u32)physaddr);
7395253fe37SGagandeep Singh 	/*Mark checksum is correct */
7405253fe37SGagandeep Singh 	local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
7415253fe37SGagandeep Singh 				STATUS_UDP_CHECKSUM_CORRECT |
7425253fe37SGagandeep Singh 				STATUS_TCP_CHECKSUM_CORRECT |
7435253fe37SGagandeep Singh 				STATUS_UNICAST_HASH_MATCH |
7445253fe37SGagandeep Singh 				STATUS_CUMULATIVE_ARC_HIT));
7455253fe37SGagandeep Singh 	copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
7465253fe37SGagandeep Singh 		     sizeof(local_hdr));
7475253fe37SGagandeep Singh 
7485253fe37SGagandeep Singh 	copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
7495253fe37SGagandeep Singh 		     0x40);
7505253fe37SGagandeep Singh 
7515253fe37SGagandeep Singh 	writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
7525253fe37SGagandeep Singh }
7535253fe37SGagandeep Singh 
7545253fe37SGagandeep Singh void
7555253fe37SGagandeep Singh pfe_hif_rx_idle(struct pfe_hif *hif)
7565253fe37SGagandeep Singh {
7575253fe37SGagandeep Singh 	int hif_stop_loop = DUMMY_PKT_COUNT;
7585253fe37SGagandeep Singh 	u32 rx_status;
7595253fe37SGagandeep Singh 
7605253fe37SGagandeep Singh 	pfe_hif_disable_rx_desc(hif);
7615253fe37SGagandeep Singh 	PFE_PMD_INFO("Bringing hif to idle state...");
7625253fe37SGagandeep Singh 	writel(0, HIF_INT_ENABLE);
7635253fe37SGagandeep Singh 	/*If HIF Rx BDP is busy send a dummy packet */
7645253fe37SGagandeep Singh 	do {
7655253fe37SGagandeep Singh 		rx_status = readl(HIF_RX_STATUS);
7665253fe37SGagandeep Singh 		if (rx_status & BDP_CSR_RX_DMA_ACTV)
7675253fe37SGagandeep Singh 			send_dummy_pkt_to_hif();
7685253fe37SGagandeep Singh 
769d86184dfSGagandeep Singh 		rte_delay_ms(1);
7705253fe37SGagandeep Singh 	} while (--hif_stop_loop);
7715253fe37SGagandeep Singh 
7725253fe37SGagandeep Singh 	if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
773*f665790aSDavid Marchand 		PFE_PMD_ERR("Failed");
7745253fe37SGagandeep Singh 	else
775*f665790aSDavid Marchand 		PFE_PMD_INFO("Done");
7765253fe37SGagandeep Singh }
7775253fe37SGagandeep Singh #endif
7785253fe37SGagandeep Singh 
7795253fe37SGagandeep Singh /*
7805253fe37SGagandeep Singh  * pfe_hif_init
7815253fe37SGagandeep Singh  * This function initializes the baseaddresses and irq, etc.
7825253fe37SGagandeep Singh  */
7835253fe37SGagandeep Singh int
7845253fe37SGagandeep Singh pfe_hif_init(struct pfe *pfe)
7855253fe37SGagandeep Singh {
7865253fe37SGagandeep Singh 	struct pfe_hif *hif = &pfe->hif;
7875253fe37SGagandeep Singh 	int err;
7885253fe37SGagandeep Singh 
7895253fe37SGagandeep Singh 	PMD_INIT_FUNC_TRACE();
7905253fe37SGagandeep Singh 
7915253fe37SGagandeep Singh #if defined(LS1012A_PFE_RESET_WA)
7925253fe37SGagandeep Singh 	pfe_hif_rx_idle(hif);
7935253fe37SGagandeep Singh #endif
7945253fe37SGagandeep Singh 
7955253fe37SGagandeep Singh 	err = pfe_hif_alloc_descr(hif);
7965253fe37SGagandeep Singh 	if (err)
7975253fe37SGagandeep Singh 		goto err0;
7985253fe37SGagandeep Singh 
7995253fe37SGagandeep Singh 	rte_spinlock_init(&hif->tx_lock);
8005253fe37SGagandeep Singh 	rte_spinlock_init(&hif->lock);
8015253fe37SGagandeep Singh 
8025253fe37SGagandeep Singh 	gpi_enable(HGPI_BASE_ADDR);
8035253fe37SGagandeep Singh 	if (getenv("PFE_INTR_SUPPORT")) {
8045253fe37SGagandeep Singh 		struct epoll_event epoll_ev;
8055253fe37SGagandeep Singh 		int event_fd = -1, epoll_fd, pfe_cdev_fd;
8065253fe37SGagandeep Singh 
8075253fe37SGagandeep Singh 		pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
8085253fe37SGagandeep Singh 		if (pfe_cdev_fd < 0) {
809*f665790aSDavid Marchand 			PFE_PMD_WARN("Unable to open PFE device file (%s).",
8105253fe37SGagandeep Singh 				     PFE_CDEV_PATH);
8115253fe37SGagandeep Singh 			pfe->cdev_fd = PFE_CDEV_INVALID_FD;
8125253fe37SGagandeep Singh 			return -1;
8135253fe37SGagandeep Singh 		}
8145253fe37SGagandeep Singh 		pfe->cdev_fd = pfe_cdev_fd;
8155253fe37SGagandeep Singh 
8165253fe37SGagandeep Singh 		event_fd = eventfd(0, EFD_NONBLOCK);
8175253fe37SGagandeep Singh 		/* hif interrupt enable */
8185253fe37SGagandeep Singh 		err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
8195253fe37SGagandeep Singh 		if (err) {
820*f665790aSDavid Marchand 			PFE_PMD_ERR("ioctl failed for intr enable err: %d",
8215253fe37SGagandeep Singh 					errno);
8225253fe37SGagandeep Singh 			goto err0;
8235253fe37SGagandeep Singh 		}
8245253fe37SGagandeep Singh 		epoll_fd = epoll_create(1);
8255253fe37SGagandeep Singh 		epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
8265253fe37SGagandeep Singh 		epoll_ev.data.fd = event_fd;
8275253fe37SGagandeep Singh 		err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
8285253fe37SGagandeep Singh 		if (err < 0) {
829*f665790aSDavid Marchand 			PFE_PMD_ERR("epoll_ctl failed with err = %d", errno);
8305253fe37SGagandeep Singh 			goto err0;
8315253fe37SGagandeep Singh 		}
8325253fe37SGagandeep Singh 		pfe->hif.epoll_fd = epoll_fd;
8335253fe37SGagandeep Singh 	}
8345253fe37SGagandeep Singh 	return 0;
8355253fe37SGagandeep Singh err0:
8365253fe37SGagandeep Singh 	return err;
8375253fe37SGagandeep Singh }
8385253fe37SGagandeep Singh 
8395253fe37SGagandeep Singh /* pfe_hif_exit- */
8405253fe37SGagandeep Singh void
8415253fe37SGagandeep Singh pfe_hif_exit(struct pfe *pfe)
8425253fe37SGagandeep Singh {
8435253fe37SGagandeep Singh 	struct pfe_hif *hif = &pfe->hif;
8445253fe37SGagandeep Singh 
8455253fe37SGagandeep Singh 	PMD_INIT_FUNC_TRACE();
8465253fe37SGagandeep Singh 
8475253fe37SGagandeep Singh 	rte_spinlock_lock(&hif->lock);
8485253fe37SGagandeep Singh 	hif->shm->g_client_status[0] = 0;
8495253fe37SGagandeep Singh 	/* Make sure all clients are disabled*/
8505253fe37SGagandeep Singh 	hif->shm->g_client_status[1] = 0;
8515253fe37SGagandeep Singh 
8525253fe37SGagandeep Singh 	rte_spinlock_unlock(&hif->lock);
8535253fe37SGagandeep Singh 
8545253fe37SGagandeep Singh 	if (hif->setuped) {
8555253fe37SGagandeep Singh #if defined(LS1012A_PFE_RESET_WA)
8565253fe37SGagandeep Singh 		pfe_hif_rx_idle(hif);
8575253fe37SGagandeep Singh #endif
8585253fe37SGagandeep Singh 		/*Disable Rx/Tx */
8595253fe37SGagandeep Singh 		hif_rx_disable();
8605253fe37SGagandeep Singh 		hif_tx_disable();
8615253fe37SGagandeep Singh 
86236220514SGagandeep Singh 		pfe_hif_release_buffers(hif);
86336220514SGagandeep Singh 		pfe_hif_shm_clean(hif->shm);
86436220514SGagandeep Singh 
8655253fe37SGagandeep Singh 		pfe_hif_free_descr(hif);
8665253fe37SGagandeep Singh 		pfe->hif.setuped = 0;
8675253fe37SGagandeep Singh 	}
8685253fe37SGagandeep Singh 	gpi_disable(HGPI_BASE_ADDR);
8695253fe37SGagandeep Singh }
870