xref: /dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c (revision 12dc2539f7b12b2ec4570197c1e8a16a973d71f6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2019 NXP
5  *
6  */
7 
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 
16 #include <rte_mbuf.h>
17 #include <ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
20 #include <rte_string_fns.h>
21 #include <rte_cycles.h>
22 #include <rte_kvargs.h>
23 #include <dev_driver.h>
24 #include "rte_dpaa2_mempool.h"
25 
26 #include <bus_fslmc_driver.h>
27 #include <fslmc_logs.h>
28 #include <mc/fsl_dpbp.h>
29 #include <portal/dpaa2_hw_pvt.h>
30 #include <portal/dpaa2_hw_dpio.h>
31 #include "dpaa2_hw_mempool.h"
32 #include "dpaa2_hw_mempool_logs.h"
33 
34 #include <dpaax_iova_table.h>
35 
36 struct dpaa2_bp_info *rte_dpaa2_bpid_info;
37 static struct dpaa2_bp_list *h_bp_list;
38 
39 static int
40 rte_hw_mbuf_create_pool(struct rte_mempool *mp)
41 {
42 	struct dpaa2_bp_list *bp_list;
43 	struct dpaa2_dpbp_dev *avail_dpbp;
44 	struct dpaa2_bp_info *bp_info;
45 	struct dpbp_attr dpbp_attr;
46 	uint32_t bpid;
47 	unsigned int lcore_id;
48 	struct rte_mempool_cache *cache;
49 	int ret;
50 
51 	avail_dpbp = dpaa2_alloc_dpbp_dev();
52 
53 	if (rte_dpaa2_bpid_info == NULL) {
54 		rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
55 				      sizeof(struct dpaa2_bp_info) * MAX_BPID,
56 				      RTE_CACHE_LINE_SIZE);
57 		if (rte_dpaa2_bpid_info == NULL)
58 			return -ENOMEM;
59 		memset(rte_dpaa2_bpid_info, 0,
60 		       sizeof(struct dpaa2_bp_info) * MAX_BPID);
61 	}
62 
63 	if (!avail_dpbp) {
64 		DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
65 		return -ENOENT;
66 	}
67 
68 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
69 		ret = dpaa2_affine_qbman_swp();
70 		if (ret) {
71 			DPAA2_MEMPOOL_ERR(
72 				"Failed to allocate IO portal, tid: %d",
73 				rte_gettid());
74 			goto err1;
75 		}
76 	}
77 
78 	ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
79 	if (ret != 0) {
80 		DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
81 				  ret);
82 		goto err1;
83 	}
84 
85 	ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
86 				  avail_dpbp->token, &dpbp_attr);
87 	if (ret != 0) {
88 		DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
89 				  ret);
90 		goto err2;
91 	}
92 
93 	bp_info = rte_malloc(NULL,
94 			     sizeof(struct dpaa2_bp_info),
95 			     RTE_CACHE_LINE_SIZE);
96 	if (!bp_info) {
97 		DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
98 		ret = -ENOMEM;
99 		goto err2;
100 	}
101 
102 	/* Allocate the bp_list which will be added into global_bp_list */
103 	bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
104 			     RTE_CACHE_LINE_SIZE);
105 	if (!bp_list) {
106 		DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
107 		ret = -ENOMEM;
108 		goto err3;
109 	}
110 
111 	/* Set parameters of buffer pool list */
112 	bp_list->buf_pool.num_bufs = mp->size;
113 	bp_list->buf_pool.size = mp->elt_size
114 			- sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
115 	bp_list->buf_pool.bpid = dpbp_attr.bpid;
116 	bp_list->buf_pool.h_bpool_mem = NULL;
117 	bp_list->buf_pool.dpbp_node = avail_dpbp;
118 	/* Identification for our offloaded pool_data structure */
119 	bp_list->dpaa2_ops_index = mp->ops_index;
120 	bp_list->next = h_bp_list;
121 	bp_list->mp = mp;
122 
123 	bpid = dpbp_attr.bpid;
124 
125 	rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
126 				+ rte_pktmbuf_priv_size(mp);
127 	rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
128 	rte_dpaa2_bpid_info[bpid].bpid = bpid;
129 
130 	rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
131 		   sizeof(struct dpaa2_bp_info));
132 	mp->pool_data = (void *)bp_info;
133 
134 	DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
135 
136 	h_bp_list = bp_list;
137 	/* Update per core mempool cache threshold to optimal value which is
138 	 * number of buffers that can be released to HW buffer pool in
139 	 * a single API call.
140 	 */
141 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
142 		cache = &mp->local_cache[lcore_id];
143 		DPAA2_MEMPOOL_DEBUG("lCore %d: cache->flushthresh %d -> %d",
144 			lcore_id, cache->flushthresh,
145 			(uint32_t)(cache->size + DPAA2_MBUF_MAX_ACQ_REL));
146 		if (cache->flushthresh)
147 			cache->flushthresh = cache->size + DPAA2_MBUF_MAX_ACQ_REL;
148 	}
149 
150 	return 0;
151 err3:
152 	rte_free(bp_info);
153 err2:
154 	dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
155 err1:
156 	dpaa2_free_dpbp_dev(avail_dpbp);
157 
158 	return ret;
159 }
160 
161 static void
162 rte_hw_mbuf_free_pool(struct rte_mempool *mp)
163 {
164 	struct dpaa2_bp_info *bpinfo;
165 	struct dpaa2_bp_list *bp;
166 	struct dpaa2_dpbp_dev *dpbp_node;
167 
168 	if (!mp->pool_data) {
169 		DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
170 		return;
171 	}
172 
173 	bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
174 	bp = bpinfo->bp_list;
175 	dpbp_node = bp->buf_pool.dpbp_node;
176 
177 	dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
178 
179 	if (h_bp_list == bp) {
180 		h_bp_list = h_bp_list->next;
181 	} else { /* if it is not the first node */
182 		struct dpaa2_bp_list *prev = h_bp_list, *temp;
183 		temp = h_bp_list->next;
184 		while (temp) {
185 			if (temp == bp) {
186 				prev->next = temp->next;
187 				rte_free(bp);
188 				break;
189 			}
190 			prev = temp;
191 			temp = temp->next;
192 		}
193 	}
194 
195 	rte_free(mp->pool_data);
196 	dpaa2_free_dpbp_dev(dpbp_node);
197 }
198 
199 static void
200 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
201 			void * const *obj_table,
202 			uint32_t bpid,
203 			uint32_t meta_data_size,
204 			int count)
205 {
206 	struct qbman_release_desc releasedesc;
207 	struct qbman_swp *swp;
208 	int ret;
209 	int i, n, retry_count;
210 	uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
211 
212 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
213 		ret = dpaa2_affine_qbman_swp();
214 		if (ret != 0) {
215 			DPAA2_MEMPOOL_ERR(
216 				"Failed to allocate IO portal, tid: %d",
217 				rte_gettid());
218 			return;
219 		}
220 	}
221 	swp = DPAA2_PER_LCORE_PORTAL;
222 
223 	/* Create a release descriptor required for releasing
224 	 * buffers into QBMAN
225 	 */
226 	qbman_release_desc_clear(&releasedesc);
227 	qbman_release_desc_set_bpid(&releasedesc, bpid);
228 
229 	n = count % DPAA2_MBUF_MAX_ACQ_REL;
230 	if (unlikely(!n))
231 		goto aligned;
232 
233 	/* convert mbuf to buffers for the remainder */
234 	for (i = 0; i < n ; i++) {
235 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
236 		bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
237 				+ meta_data_size;
238 #else
239 		bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
240 #endif
241 	}
242 
243 	/* feed them to bman */
244 	retry_count = 0;
245 	while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
246 			-EBUSY) {
247 		retry_count++;
248 		if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
249 			DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
250 			return;
251 		}
252 	}
253 
254 aligned:
255 	/* if there are more buffers to free */
256 	while (n < count) {
257 		/* convert mbuf to buffers */
258 		for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
259 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
260 			bufs[i] = (uint64_t)
261 				  rte_mempool_virt2iova(obj_table[n + i])
262 				  + meta_data_size;
263 #else
264 			bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
265 #endif
266 		}
267 
268 		retry_count = 0;
269 		while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
270 					DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
271 			retry_count++;
272 			if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
273 				DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
274 				return;
275 			}
276 		}
277 		n += DPAA2_MBUF_MAX_ACQ_REL;
278 	}
279 }
280 
281 int rte_dpaa2_bpid_info_init(struct rte_mempool *mp)
282 {
283 	struct dpaa2_bp_info *bp_info = mempool_to_bpinfo(mp);
284 	uint32_t bpid = bp_info->bpid;
285 
286 	if (!rte_dpaa2_bpid_info) {
287 		rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
288 				      sizeof(struct dpaa2_bp_info) * MAX_BPID,
289 				      RTE_CACHE_LINE_SIZE);
290 		if (rte_dpaa2_bpid_info == NULL)
291 			return -ENOMEM;
292 		memset(rte_dpaa2_bpid_info, 0,
293 		       sizeof(struct dpaa2_bp_info) * MAX_BPID);
294 	}
295 
296 	rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
297 				+ rte_pktmbuf_priv_size(mp);
298 	rte_dpaa2_bpid_info[bpid].bp_list = bp_info->bp_list;
299 	rte_dpaa2_bpid_info[bpid].bpid = bpid;
300 
301 	return 0;
302 }
303 
304 uint16_t
305 rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
306 {
307 	struct dpaa2_bp_info *bp_info;
308 
309 	bp_info = mempool_to_bpinfo(mp);
310 	if (!(bp_info->bp_list)) {
311 		DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
312 		return -ENOMEM;
313 	}
314 
315 	return bp_info->bpid;
316 }
317 
318 struct rte_mbuf *
319 rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
320 {
321 	struct dpaa2_bp_info *bp_info;
322 
323 	bp_info = mempool_to_bpinfo(mp);
324 	if (!(bp_info->bp_list)) {
325 		DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
326 		return NULL;
327 	}
328 
329 	return (struct rte_mbuf *)((uint8_t *)buf_addr -
330 			bp_info->meta_data_size);
331 }
332 
333 int
334 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
335 			  void **obj_table, unsigned int count)
336 {
337 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
338 	static int alloc;
339 #endif
340 	struct qbman_swp *swp;
341 	uint16_t bpid;
342 	size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
343 	int i, ret;
344 	unsigned int n = 0;
345 	struct dpaa2_bp_info *bp_info;
346 
347 	bp_info = mempool_to_bpinfo(pool);
348 
349 	if (!(bp_info->bp_list)) {
350 		DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
351 		return -ENOENT;
352 	}
353 
354 	bpid = bp_info->bpid;
355 
356 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
357 		ret = dpaa2_affine_qbman_swp();
358 		if (ret != 0) {
359 			DPAA2_MEMPOOL_ERR(
360 				"Failed to allocate IO portal, tid: %d",
361 				rte_gettid());
362 			return ret;
363 		}
364 	}
365 	swp = DPAA2_PER_LCORE_PORTAL;
366 
367 	while (n < count) {
368 		/* Acquire is all-or-nothing, so we drain in 7s,
369 		 * then the remainder.
370 		 */
371 		if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
372 			ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
373 						DPAA2_MBUF_MAX_ACQ_REL);
374 		} else {
375 			ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
376 						count - n);
377 		}
378 		/* In case of less than requested number of buffers available
379 		 * in pool, qbman_swp_acquire returns 0
380 		 */
381 		if (ret <= 0) {
382 			DPAA2_MEMPOOL_DP_DEBUG(
383 				"Buffer acquire failed with err code: %d", ret);
384 			/* The API expect the exact number of requested bufs */
385 			/* Releasing all buffers allocated */
386 			rte_dpaa2_mbuf_release(pool, obj_table, bpid,
387 					   bp_info->meta_data_size, n);
388 			return -ENOBUFS;
389 		}
390 		/* assigning mbuf from the acquired objects */
391 		for (i = 0; (i < ret) && bufs[i]; i++) {
392 			DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
393 			obj_table[n] = (struct rte_mbuf *)
394 				       (bufs[i] - bp_info->meta_data_size);
395 			DPAA2_MEMPOOL_DP_DEBUG(
396 				   "Acquired %p address %p from BMAN\n",
397 				   (void *)bufs[i], (void *)obj_table[n]);
398 			n++;
399 		}
400 	}
401 
402 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
403 	alloc += n;
404 	DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
405 			       alloc, count, n);
406 #endif
407 	return 0;
408 }
409 
410 static int
411 rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
412 		  void * const *obj_table, unsigned int n)
413 {
414 	struct dpaa2_bp_info *bp_info;
415 
416 	bp_info = mempool_to_bpinfo(pool);
417 	if (!(bp_info->bp_list)) {
418 		DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
419 		return -ENOENT;
420 	}
421 	rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
422 			   bp_info->meta_data_size, n);
423 
424 	return 0;
425 }
426 
427 static unsigned int
428 rte_hw_mbuf_get_count(const struct rte_mempool *mp)
429 {
430 	int ret;
431 	unsigned int num_of_bufs = 0;
432 	struct dpaa2_bp_info *bp_info;
433 	struct dpaa2_dpbp_dev *dpbp_node;
434 	struct fsl_mc_io mc_io;
435 
436 	if (!mp || !mp->pool_data) {
437 		DPAA2_MEMPOOL_ERR("Invalid mempool provided");
438 		return 0;
439 	}
440 
441 	bp_info = (struct dpaa2_bp_info *)mp->pool_data;
442 	dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
443 
444 	/* In case as secondary process access stats, MCP portal in priv-hw may
445 	 * have primary process address. Need the secondary process based MCP
446 	 * portal address for this object.
447 	 */
448 	mc_io.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
449 	ret = dpbp_get_num_free_bufs(&mc_io, CMD_PRI_LOW,
450 				     dpbp_node->token, &num_of_bufs);
451 	if (ret) {
452 		DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
453 				  ret);
454 		return 0;
455 	}
456 
457 	DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
458 
459 	return num_of_bufs;
460 }
461 
462 static int
463 dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
464 	      void *vaddr, rte_iova_t paddr, size_t len,
465 	      rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
466 {
467 	struct rte_memseg_list *msl;
468 	/* The memsegment list exists incase the memory is not external.
469 	 * So, DMA-Map is required only when memory is provided by user,
470 	 * i.e. External.
471 	 */
472 	msl = rte_mem_virt2memseg_list(vaddr);
473 
474 	if (!msl) {
475 		DPAA2_MEMPOOL_DEBUG("Memsegment is External.");
476 		rte_fslmc_vfio_mem_dmamap((size_t)vaddr,
477 				(size_t)paddr, (size_t)len);
478 	}
479 	/* Insert entry into the PA->VA Table */
480 	dpaax_iova_table_update(paddr, vaddr, len);
481 
482 	return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
483 					       len, obj_cb, obj_cb_arg);
484 }
485 
486 static const struct rte_mempool_ops dpaa2_mpool_ops = {
487 	.name = DPAA2_MEMPOOL_OPS_NAME,
488 	.alloc = rte_hw_mbuf_create_pool,
489 	.free = rte_hw_mbuf_free_pool,
490 	.enqueue = rte_hw_mbuf_free_bulk,
491 	.dequeue = rte_dpaa2_mbuf_alloc_bulk,
492 	.get_count = rte_hw_mbuf_get_count,
493 	.populate = dpaa2_populate,
494 };
495 
496 RTE_MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
497 
498 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_mempool, NOTICE);
499