xref: /dpdk/drivers/net/pfe/pfe_hif_lib.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include "pfe_logs.h"
6 #include "pfe_mod.h"
7 
8 unsigned int emac_txq_cnt;
9 
10 /*
11  * @pfe_hal_lib.c
12  * Common functions used by HIF client drivers
13  */
14 
15 /*HIF shared memory Global variable */
16 struct hif_shm ghif_shm;
17 
18 /* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
19  * This function should be called after pfe_hif_exit
20  *
21  * @param[in] hif_shm		Shared memory address location in DDR
22  */
23 void
24 pfe_hif_shm_clean(struct hif_shm *hif_shm)
25 {
26 	unsigned int i;
27 	void *pkt;
28 
29 	for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
30 		pkt = hif_shm->rx_buf_pool[i];
31 		if (pkt)
32 			rte_pktmbuf_free((struct rte_mbuf *)pkt);
33 	}
34 }
35 
36 /* Initialize shared memory used between HIF driver and clients,
37  * allocate rx_buffer_pool required for HIF Rx descriptors.
38  * This function should be called before initializing HIF driver.
39  *
40  * @param[in] hif_shm		Shared memory address location in DDR
41  * @return			0 - on succes, <0 on fail to initialize
42  */
43 int
44 pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
45 {
46 	unsigned int i;
47 	struct rte_mbuf *mbuf;
48 
49 	memset(hif_shm, 0, sizeof(struct hif_shm));
50 	hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
51 
52 	for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
53 		mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool));
54 		if (mbuf)
55 			hif_shm->rx_buf_pool[i] = mbuf;
56 		else
57 			goto err0;
58 	}
59 
60 	return 0;
61 
62 err0:
63 	PFE_PMD_ERR("Low memory");
64 	pfe_hif_shm_clean(hif_shm);
65 	return -ENOMEM;
66 }
67 
68 /*This function sends indication to HIF driver
69  *
70  * @param[in] hif	hif context
71  */
72 static void
73 hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
74 		     data2)
75 {
76 	hif_process_client_req(hif, req, data1, data2);
77 }
78 
79 void
80 hif_lib_indicate_client(struct hif_client_s *client, int event_type,
81 			int qno)
82 {
83 	if (!client || event_type >= HIF_EVENT_MAX ||
84 	    qno >= HIF_CLIENT_QUEUES_MAX)
85 		return;
86 
87 	if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
88 		client->event_handler(client->priv, event_type, qno);
89 }
90 
91 /*This function releases Rx queue descriptors memory and pre-filled buffers
92  *
93  * @param[in] client	hif_client context
94  */
95 static void
96 hif_lib_client_release_rx_buffers(struct hif_client_s *client)
97 {
98 	struct rte_mempool *pool;
99 	struct rte_pktmbuf_pool_private *mb_priv;
100 	struct rx_queue_desc *desc;
101 	unsigned int qno, ii;
102 	void *buf;
103 
104 	pool = client->pfe->hif.shm->pool;
105 	mb_priv = rte_mempool_get_priv(pool);
106 	for (qno = 0; qno < client->rx_qn; qno++) {
107 		desc = client->rx_q[qno].base;
108 
109 		for (ii = 0; ii < client->rx_q[qno].size; ii++) {
110 			buf = (void *)desc->data;
111 			if (buf) {
112 			/* Data pointer to mbuf pointer calculation:
113 			 * "Data - User private data - headroom - mbufsize"
114 			 * Actual data pointer given to HIF BDs was
115 			 * "mbuf->data_offset - PFE_PKT_HEADER_SZ"
116 			 */
117 				buf = buf + PFE_PKT_HEADER_SZ
118 					- sizeof(struct rte_mbuf)
119 					- RTE_PKTMBUF_HEADROOM
120 					- mb_priv->mbuf_priv_size;
121 				rte_pktmbuf_free((struct rte_mbuf *)buf);
122 				desc->ctrl = 0;
123 			}
124 			desc++;
125 		}
126 	}
127 	rte_free(client->rx_qbase);
128 }
129 
130 /*This function allocates memory for the rxq descriptors and pre-fill rx queues
131  * with buffers.
132  * @param[in] client	client context
133  * @param[in] q_size	size of the rxQ, all queues are of same size
134  */
135 static int
136 hif_lib_client_init_rx_buffers(struct hif_client_s *client,
137 					  int q_size)
138 {
139 	struct rx_queue_desc *desc;
140 	struct hif_client_rx_queue *queue;
141 	unsigned int ii, qno;
142 
143 	/*Allocate memory for the client queues */
144 	client->rx_qbase = rte_malloc(NULL, client->rx_qn * q_size *
145 			sizeof(struct rx_queue_desc), RTE_CACHE_LINE_SIZE);
146 	if (!client->rx_qbase)
147 		goto err;
148 
149 	for (qno = 0; qno < client->rx_qn; qno++) {
150 		queue = &client->rx_q[qno];
151 
152 		queue->base = client->rx_qbase + qno * q_size * sizeof(struct
153 				rx_queue_desc);
154 		queue->size = q_size;
155 		queue->read_idx = 0;
156 		queue->write_idx = 0;
157 		queue->queue_id = 0;
158 		queue->port_id = client->port_id;
159 		queue->priv = client->priv;
160 		PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d", qno,
161 			      queue->base, queue->size);
162 	}
163 
164 	for (qno = 0; qno < client->rx_qn; qno++) {
165 		queue = &client->rx_q[qno];
166 		desc = queue->base;
167 
168 		for (ii = 0; ii < queue->size; ii++) {
169 			desc->ctrl = CL_DESC_OWN;
170 			desc++;
171 		}
172 	}
173 
174 	return 0;
175 
176 err:
177 	return 1;
178 }
179 
180 
181 static void
182 hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
183 {
184 	/*
185 	 * Check if there are any pending packets. Client must flush the tx
186 	 * queues before unregistering, by calling by calling
187 	 * hif_lib_tx_get_next_complete()
188 	 *
189 	 * Hif no longer calls since we are no longer registered
190 	 */
191 	if (queue->tx_pending)
192 		PFE_PMD_ERR("pending transmit packet");
193 }
194 
195 static void
196 hif_lib_client_release_tx_buffers(struct hif_client_s *client)
197 {
198 	unsigned int qno;
199 
200 	for (qno = 0; qno < client->tx_qn; qno++)
201 		hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
202 
203 	rte_free(client->tx_qbase);
204 }
205 
206 static int
207 hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
208 						q_size)
209 {
210 	struct hif_client_tx_queue *queue;
211 	unsigned int qno;
212 
213 	client->tx_qbase = rte_malloc(NULL, client->tx_qn * q_size *
214 			sizeof(struct tx_queue_desc), RTE_CACHE_LINE_SIZE);
215 	if (!client->tx_qbase)
216 		return 1;
217 
218 	for (qno = 0; qno < client->tx_qn; qno++) {
219 		queue = &client->tx_q[qno];
220 
221 		queue->base = client->tx_qbase + qno * q_size * sizeof(struct
222 				tx_queue_desc);
223 		queue->size = q_size;
224 		queue->read_idx = 0;
225 		queue->write_idx = 0;
226 		queue->tx_pending = 0;
227 		queue->nocpy_flag = 0;
228 		queue->prev_tmu_tx_pkts = 0;
229 		queue->done_tmu_tx_pkts = 0;
230 		queue->priv = client->priv;
231 		queue->queue_id = 0;
232 		queue->port_id = client->port_id;
233 
234 		PFE_PMD_DEBUG("tx queue: %d, base: %p, size: %d", qno,
235 			 queue->base, queue->size);
236 	}
237 
238 	return 0;
239 }
240 
241 static int
242 hif_lib_event_dummy(__rte_unused void *priv,
243 		__rte_unused int event_type, __rte_unused int qno)
244 {
245 	return 0;
246 }
247 
248 int
249 hif_lib_client_register(struct hif_client_s *client)
250 {
251 	struct hif_shm *hif_shm;
252 	struct hif_client_shm *client_shm;
253 	int err, i;
254 
255 	PMD_INIT_FUNC_TRACE();
256 
257 	/*Allocate memory before spin_lock*/
258 	if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
259 		err = -ENOMEM;
260 		goto err_rx;
261 	}
262 
263 	if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
264 		err = -ENOMEM;
265 		goto err_tx;
266 	}
267 
268 	rte_spinlock_lock(&client->pfe->hif.lock);
269 	if (!(client->pfe) || client->id >= HIF_CLIENTS_MAX ||
270 	    client->pfe->hif_client[client->id]) {
271 		err = -EINVAL;
272 		goto err;
273 	}
274 
275 	hif_shm = client->pfe->hif.shm;
276 
277 	if (!client->event_handler)
278 		client->event_handler = hif_lib_event_dummy;
279 
280 	/*Initialize client specific shared memory */
281 	client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
282 	client_shm->rx_qbase = (unsigned long)client->rx_qbase;
283 	client_shm->rx_qsize = client->rx_qsize;
284 	client_shm->tx_qbase = (unsigned long)client->tx_qbase;
285 	client_shm->tx_qsize = client->tx_qsize;
286 	client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
287 				(client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
288 
289 	for (i = 0; i < HIF_EVENT_MAX; i++) {
290 		client->queue_mask[i] = 0;  /*
291 					     * By default all events are
292 					     * unmasked
293 					     */
294 	}
295 
296 	/*Indicate to HIF driver*/
297 	hif_lib_indicate_hif(&client->pfe->hif, REQUEST_CL_REGISTER,
298 			client->id, 0);
299 
300 	PFE_PMD_DEBUG("client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d",
301 		      client, client->id, client->tx_qsize, client->rx_qsize);
302 
303 	client->cpu_id = -1;
304 
305 	client->pfe->hif_client[client->id] = client;
306 	rte_spinlock_unlock(&client->pfe->hif.lock);
307 
308 	return 0;
309 
310 err:
311 	rte_spinlock_unlock(&client->pfe->hif.lock);
312 	hif_lib_client_release_tx_buffers(client);
313 
314 err_tx:
315 	hif_lib_client_release_rx_buffers(client);
316 
317 err_rx:
318 	return err;
319 }
320 
321 int
322 hif_lib_client_unregister(struct hif_client_s *client)
323 {
324 	struct pfe *pfe = client->pfe;
325 	u32 client_id = client->id;
326 
327 	PFE_PMD_INFO("client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d",
328 		     client, client->id, client->tx_qsize, client->rx_qsize);
329 
330 	rte_spinlock_lock(&pfe->hif.lock);
331 	hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
332 
333 	hif_lib_client_release_tx_buffers(client);
334 	hif_lib_client_release_rx_buffers(client);
335 	pfe->hif_client[client_id] = NULL;
336 	rte_spinlock_unlock(&pfe->hif.lock);
337 
338 	return 0;
339 }
340 
341 int
342 hif_lib_event_handler_start(struct hif_client_s *client, int event,
343 				int qno)
344 {
345 	struct hif_client_rx_queue *queue = &client->rx_q[qno];
346 	struct rx_queue_desc *desc = queue->base + queue->read_idx;
347 
348 	if (event >= HIF_EVENT_MAX || qno >= HIF_CLIENT_QUEUES_MAX) {
349 		PFE_PMD_WARN("Unsupported event : %d  queue number : %d",
350 				event, qno);
351 		return -1;
352 	}
353 
354 	test_and_clear_bit(qno, &client->queue_mask[event]);
355 
356 	switch (event) {
357 	case EVENT_RX_PKT_IND:
358 		if (!(desc->ctrl & CL_DESC_OWN))
359 			hif_lib_indicate_client(client,
360 						EVENT_RX_PKT_IND, qno);
361 		break;
362 
363 	case EVENT_HIGH_RX_WM:
364 	case EVENT_TXDONE_IND:
365 	default:
366 		break;
367 	}
368 
369 	return 0;
370 }
371 
372 #ifdef RTE_LIBRTE_PFE_SW_PARSE
373 static inline void
374 pfe_sw_parse_pkt(struct rte_mbuf *mbuf)
375 {
376 	struct rte_net_hdr_lens hdr_lens;
377 
378 	mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
379 			RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
380 			| RTE_PTYPE_L4_MASK);
381 	mbuf->l2_len = hdr_lens.l2_len;
382 	mbuf->l3_len = hdr_lens.l3_len;
383 }
384 #endif
385 
386 /*
387  * This function gets one packet from the specified client queue
388  * It also refill the rx buffer
389  */
390 int
391 hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
392 		struct rte_mempool *pool, struct rte_mbuf **rx_pkts,
393 		uint16_t nb_pkts)
394 {
395 	struct rx_queue_desc *desc;
396 	struct pfe_eth_priv_s *priv = queue->priv;
397 	struct rte_pktmbuf_pool_private *mb_priv;
398 	struct rte_mbuf *mbuf, *p_mbuf = NULL, *first_mbuf = NULL;
399 	struct rte_eth_stats *stats = &priv->stats;
400 	int i, wait_for_last = 0;
401 #ifndef RTE_LIBRTE_PFE_SW_PARSE
402 	struct pfe_parse *parse_res;
403 #endif
404 
405 	for (i = 0; i < nb_pkts;) {
406 		do {
407 			desc = queue->base + queue->read_idx;
408 			if ((desc->ctrl & CL_DESC_OWN)) {
409 				stats->ipackets += i;
410 				return i;
411 			}
412 
413 			mb_priv = rte_mempool_get_priv(pool);
414 
415 			mbuf = desc->data + PFE_PKT_HEADER_SZ
416 				- sizeof(struct rte_mbuf)
417 				- RTE_PKTMBUF_HEADROOM
418 				- mb_priv->mbuf_priv_size;
419 			mbuf->next = NULL;
420 			if (desc->ctrl & CL_DESC_FIRST) {
421 				/* TODO size of priv data if present in
422 				 * descriptor
423 				 */
424 				u16 size = 0;
425 				mbuf->pkt_len = CL_DESC_BUF_LEN(desc->ctrl)
426 						- PFE_PKT_HEADER_SZ - size;
427 				mbuf->data_len = mbuf->pkt_len;
428 				mbuf->port = queue->port_id;
429 #ifdef RTE_LIBRTE_PFE_SW_PARSE
430 				pfe_sw_parse_pkt(mbuf);
431 #else
432 				parse_res = (struct pfe_parse *)(desc->data +
433 					    PFE_HIF_SIZE);
434 				mbuf->packet_type = parse_res->packet_type;
435 #endif
436 				mbuf->nb_segs = 1;
437 				first_mbuf = mbuf;
438 				rx_pkts[i++] = first_mbuf;
439 			} else {
440 				mbuf->data_len = CL_DESC_BUF_LEN(desc->ctrl);
441 				mbuf->data_off = mbuf->data_off -
442 						 PFE_PKT_HEADER_SZ;
443 				first_mbuf->pkt_len += mbuf->data_len;
444 				first_mbuf->nb_segs++;
445 				p_mbuf->next = mbuf;
446 			}
447 			stats->ibytes += mbuf->data_len;
448 			p_mbuf = mbuf;
449 
450 			if (desc->ctrl & CL_DESC_LAST)
451 				wait_for_last = 0;
452 			else
453 				wait_for_last = 1;
454 			/*
455 			 * Needed so we don't free a buffer/page
456 			 * twice on module_exit
457 			 */
458 			desc->data = NULL;
459 
460 			/*
461 			 * Ensure everything else is written to DDR before
462 			 * writing bd->ctrl
463 			 */
464 			rte_wmb();
465 
466 			desc->ctrl = CL_DESC_OWN;
467 			queue->read_idx = (queue->read_idx + 1) &
468 					  (queue->size - 1);
469 		} while (wait_for_last);
470 	}
471 	stats->ipackets += i;
472 	return i;
473 }
474 
475 static inline void
476 hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
477 	      client_id, unsigned int qno,
478 	      u32 client_ctrl)
479 {
480 	/* Optimize the write since the destination may be non-cacheable */
481 	if (!((unsigned long)pkt_hdr & 0x3)) {
482 		((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
483 					client_id;
484 	} else {
485 		((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
486 		((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
487 	}
488 }
489 
490 /*This function puts the given packet in the specific client queue */
491 void
492 hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
493 		 void *data, void *data1, unsigned int len,
494 		 u32 client_ctrl, unsigned int flags, void *client_data)
495 {
496 	struct hif_client_tx_queue *queue = &client->tx_q[qno];
497 	struct tx_queue_desc *desc = queue->base + queue->write_idx;
498 
499 	/* First buffer */
500 	if (flags & HIF_FIRST_BUFFER) {
501 		data1 -= PFE_HIF_SIZE;
502 		data -= PFE_HIF_SIZE;
503 		len += PFE_HIF_SIZE;
504 
505 		hif_hdr_write(data1, client->id, qno, client_ctrl);
506 	}
507 
508 	desc->data = client_data;
509 	desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
510 
511 	hif_xmit_pkt(&client->pfe->hif, client->id, qno, data, len, flags);
512 
513 	queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
514 
515 	queue->tx_pending++;
516 }
517 
518 void *
519 hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
520 				   unsigned int *flags, __rte_unused  int count)
521 {
522 	struct hif_client_tx_queue *queue = &client->tx_q[qno];
523 	struct tx_queue_desc *desc = queue->base + queue->read_idx;
524 
525 	PFE_DP_LOG(DEBUG, "qno : %d rd_indx: %d pending:%d",
526 		   qno, queue->read_idx, queue->tx_pending);
527 
528 	if (!queue->tx_pending)
529 		return NULL;
530 
531 	if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
532 		u32 tmu_tx_pkts = 0;
533 
534 		if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
535 			queue->done_tmu_tx_pkts = UINT_MAX -
536 				queue->prev_tmu_tx_pkts + tmu_tx_pkts;
537 		else
538 			queue->done_tmu_tx_pkts = tmu_tx_pkts -
539 						queue->prev_tmu_tx_pkts;
540 
541 		queue->prev_tmu_tx_pkts  = tmu_tx_pkts;
542 
543 		if (!queue->done_tmu_tx_pkts)
544 			return NULL;
545 	}
546 
547 	if (desc->ctrl & CL_DESC_OWN)
548 		return NULL;
549 
550 	queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
551 	queue->tx_pending--;
552 
553 	*flags = CL_DESC_GET_FLAGS(desc->ctrl);
554 
555 	if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
556 		queue->done_tmu_tx_pkts--;
557 
558 	return desc->data;
559 }
560 
561 int
562 pfe_hif_lib_init(struct pfe *pfe)
563 {
564 	PMD_INIT_FUNC_TRACE();
565 
566 	emac_txq_cnt = EMAC_TXQ_CNT;
567 	pfe->hif.shm = &ghif_shm;
568 
569 	return 0;
570 }
571 
572 void
573 pfe_hif_lib_exit(__rte_unused struct pfe *pfe)
574 {
575 	PMD_INIT_FUNC_TRACE();
576 }
577