xref: /dpdk/lib/mempool/rte_mempool.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright(c) 2016 6WIND S.A.
4  * Copyright(c) 2022 SmartShare Systems
5  */
6 
7 #include <stdbool.h>
8 #include <stdlib.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <stdint.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 #include <errno.h>
15 #include <sys/queue.h>
16 
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_eal.h>
24 #include <rte_eal_memconfig.h>
25 #include <rte_errno.h>
26 #include <rte_string_fns.h>
27 #include <rte_tailq.h>
28 #include <rte_eal_paging.h>
29 #include <rte_telemetry.h>
30 
31 #include "mempool_trace.h"
32 #include "rte_mempool.h"
33 
34 RTE_LOG_REGISTER_DEFAULT(rte_mempool_logtype, INFO);
35 
36 TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
37 
38 static struct rte_tailq_elem rte_mempool_tailq = {
39 	.name = "RTE_MEMPOOL",
40 };
41 EAL_REGISTER_TAILQ(rte_mempool_tailq)
42 
43 TAILQ_HEAD(mempool_callback_tailq, mempool_callback_data);
44 
45 static struct mempool_callback_tailq callback_tailq =
46 		TAILQ_HEAD_INITIALIZER(callback_tailq);
47 
48 /* Invoke all registered mempool event callbacks. */
49 static void
50 mempool_event_callback_invoke(enum rte_mempool_event event,
51 			      struct rte_mempool *mp);
52 
53 /* Note: avoid using floating point since that compiler
54  * may not think that is constant.
55  */
56 #define CALC_CACHE_FLUSHTHRESH(c) (((c) * 3) / 2)
57 
58 #if defined(RTE_ARCH_X86)
59 /*
60  * return the greatest common divisor between a and b (fast algorithm)
61  */
62 static unsigned get_gcd(unsigned a, unsigned b)
63 {
64 	unsigned c;
65 
66 	if (0 == a)
67 		return b;
68 	if (0 == b)
69 		return a;
70 
71 	if (a < b) {
72 		c = a;
73 		a = b;
74 		b = c;
75 	}
76 
77 	while (b != 0) {
78 		c = a % b;
79 		a = b;
80 		b = c;
81 	}
82 
83 	return a;
84 }
85 
86 /*
87  * Depending on memory configuration on x86 arch, objects addresses are spread
88  * between channels and ranks in RAM: the pool allocator will add
89  * padding between objects. This function return the new size of the
90  * object.
91  */
92 static unsigned int
93 arch_mem_object_align(unsigned int obj_size)
94 {
95 	unsigned nrank, nchan;
96 	unsigned new_obj_size;
97 
98 	/* get number of channels */
99 	nchan = rte_memory_get_nchannel();
100 	if (nchan == 0)
101 		nchan = 4;
102 
103 	nrank = rte_memory_get_nrank();
104 	if (nrank == 0)
105 		nrank = 1;
106 
107 	/* process new object size */
108 	new_obj_size = (obj_size + RTE_MEMPOOL_ALIGN_MASK) / RTE_MEMPOOL_ALIGN;
109 	while (get_gcd(new_obj_size, nrank * nchan) != 1)
110 		new_obj_size++;
111 	return new_obj_size * RTE_MEMPOOL_ALIGN;
112 }
113 #else
114 static unsigned int
115 arch_mem_object_align(unsigned int obj_size)
116 {
117 	return obj_size;
118 }
119 #endif
120 
121 struct pagesz_walk_arg {
122 	int socket_id;
123 	size_t min;
124 };
125 
126 static int
127 find_min_pagesz(const struct rte_memseg_list *msl, void *arg)
128 {
129 	struct pagesz_walk_arg *wa = arg;
130 	bool valid;
131 
132 	/*
133 	 * we need to only look at page sizes available for a particular socket
134 	 * ID.  so, we either need an exact match on socket ID (can match both
135 	 * native and external memory), or, if SOCKET_ID_ANY was specified as a
136 	 * socket ID argument, we must only look at native memory and ignore any
137 	 * page sizes associated with external memory.
138 	 */
139 	valid = msl->socket_id == wa->socket_id;
140 	valid |= wa->socket_id == SOCKET_ID_ANY && msl->external == 0;
141 
142 	if (valid && msl->page_sz < wa->min)
143 		wa->min = msl->page_sz;
144 
145 	return 0;
146 }
147 
148 static size_t
149 get_min_page_size(int socket_id)
150 {
151 	struct pagesz_walk_arg wa;
152 
153 	wa.min = SIZE_MAX;
154 	wa.socket_id = socket_id;
155 
156 	rte_memseg_list_walk(find_min_pagesz, &wa);
157 
158 	return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
159 }
160 
161 
162 static void
163 mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
164 		 void *obj, rte_iova_t iova)
165 {
166 	struct rte_mempool_objhdr *hdr;
167 	struct rte_mempool_objtlr *tlr __rte_unused;
168 
169 	/* set mempool ptr in header */
170 	hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
171 	hdr->mp = mp;
172 	hdr->iova = iova;
173 	STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
174 	mp->populated_size++;
175 
176 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
177 	hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
178 	tlr = rte_mempool_get_trailer(obj);
179 	tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
180 #endif
181 }
182 
183 /* call obj_cb() for each mempool element */
184 uint32_t
185 rte_mempool_obj_iter(struct rte_mempool *mp,
186 	rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
187 {
188 	struct rte_mempool_objhdr *hdr;
189 	void *obj;
190 	unsigned n = 0;
191 
192 	STAILQ_FOREACH(hdr, &mp->elt_list, next) {
193 		obj = (char *)hdr + sizeof(*hdr);
194 		obj_cb(mp, obj_cb_arg, obj, n);
195 		n++;
196 	}
197 
198 	return n;
199 }
200 
201 /* call mem_cb() for each mempool memory chunk */
202 uint32_t
203 rte_mempool_mem_iter(struct rte_mempool *mp,
204 	rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
205 {
206 	struct rte_mempool_memhdr *hdr;
207 	unsigned n = 0;
208 
209 	STAILQ_FOREACH(hdr, &mp->mem_list, next) {
210 		mem_cb(mp, mem_cb_arg, hdr, n);
211 		n++;
212 	}
213 
214 	return n;
215 }
216 
217 /* get the header, trailer and total size of a mempool element. */
218 uint32_t
219 rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
220 	struct rte_mempool_objsz *sz)
221 {
222 	struct rte_mempool_objsz lsz;
223 
224 	sz = (sz != NULL) ? sz : &lsz;
225 
226 	sz->header_size = sizeof(struct rte_mempool_objhdr);
227 	if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0)
228 		sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
229 			RTE_MEMPOOL_ALIGN);
230 
231 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
232 	sz->trailer_size = sizeof(struct rte_mempool_objtlr);
233 #else
234 	sz->trailer_size = 0;
235 #endif
236 
237 	/* element size is 8 bytes-aligned at least */
238 	sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
239 
240 	/* expand trailer to next cache line */
241 	if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
242 		sz->total_size = sz->header_size + sz->elt_size +
243 			sz->trailer_size;
244 		sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
245 				  (sz->total_size & RTE_MEMPOOL_ALIGN_MASK)) &
246 				 RTE_MEMPOOL_ALIGN_MASK);
247 	}
248 
249 	/*
250 	 * increase trailer to add padding between objects in order to
251 	 * spread them across memory channels/ranks
252 	 */
253 	if ((flags & RTE_MEMPOOL_F_NO_SPREAD) == 0) {
254 		unsigned new_size;
255 		new_size = arch_mem_object_align
256 			    (sz->header_size + sz->elt_size + sz->trailer_size);
257 		sz->trailer_size = new_size - sz->header_size - sz->elt_size;
258 	}
259 
260 	/* this is the size of an object, including header and trailer */
261 	sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
262 
263 	return sz->total_size;
264 }
265 
266 /* free a memchunk allocated with rte_memzone_reserve() */
267 static void
268 rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
269 	void *opaque)
270 {
271 	const struct rte_memzone *mz = opaque;
272 	rte_memzone_free(mz);
273 }
274 
275 /* Free memory chunks used by a mempool. Objects must be in pool */
276 static void
277 rte_mempool_free_memchunks(struct rte_mempool *mp)
278 {
279 	struct rte_mempool_memhdr *memhdr;
280 	void *elt;
281 
282 	while (!STAILQ_EMPTY(&mp->elt_list)) {
283 		rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
284 		(void)elt;
285 		STAILQ_REMOVE_HEAD(&mp->elt_list, next);
286 		mp->populated_size--;
287 	}
288 
289 	while (!STAILQ_EMPTY(&mp->mem_list)) {
290 		memhdr = STAILQ_FIRST(&mp->mem_list);
291 		STAILQ_REMOVE_HEAD(&mp->mem_list, next);
292 		if (memhdr->free_cb != NULL)
293 			memhdr->free_cb(memhdr, memhdr->opaque);
294 		rte_free(memhdr);
295 		mp->nb_mem_chunks--;
296 	}
297 }
298 
299 static int
300 mempool_ops_alloc_once(struct rte_mempool *mp)
301 {
302 	int ret;
303 
304 	/* create the internal ring if not already done */
305 	if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED) == 0) {
306 		ret = rte_mempool_ops_alloc(mp);
307 		if (ret != 0)
308 			return ret;
309 		mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
310 	}
311 	return 0;
312 }
313 
314 /* Add objects in the pool, using a physically contiguous memory
315  * zone. Return the number of objects added, or a negative value
316  * on error.
317  */
318 int
319 rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
320 	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
321 	void *opaque)
322 {
323 	unsigned i = 0;
324 	size_t off;
325 	struct rte_mempool_memhdr *memhdr;
326 	int ret;
327 
328 	ret = mempool_ops_alloc_once(mp);
329 	if (ret != 0)
330 		return ret;
331 
332 	/* mempool is already populated */
333 	if (mp->populated_size >= mp->size)
334 		return -ENOSPC;
335 
336 	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
337 	if (memhdr == NULL)
338 		return -ENOMEM;
339 
340 	memhdr->mp = mp;
341 	memhdr->addr = vaddr;
342 	memhdr->iova = iova;
343 	memhdr->len = len;
344 	memhdr->free_cb = free_cb;
345 	memhdr->opaque = opaque;
346 
347 	if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
348 		off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
349 	else
350 		off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
351 
352 	if (off > len) {
353 		ret = 0;
354 		goto fail;
355 	}
356 
357 	i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
358 		(char *)vaddr + off,
359 		(iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
360 		len - off, mempool_add_elem, NULL);
361 
362 	/* not enough room to store one object */
363 	if (i == 0) {
364 		ret = 0;
365 		goto fail;
366 	}
367 
368 	STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
369 	mp->nb_mem_chunks++;
370 
371 	/* Check if at least some objects in the pool are now usable for IO. */
372 	if (!(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) && iova != RTE_BAD_IOVA)
373 		mp->flags &= ~RTE_MEMPOOL_F_NON_IO;
374 
375 	/* Report the mempool as ready only when fully populated. */
376 	if (mp->populated_size >= mp->size)
377 		mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_READY, mp);
378 
379 	rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);
380 	return i;
381 
382 fail:
383 	rte_free(memhdr);
384 	return ret;
385 }
386 
387 static rte_iova_t
388 get_iova(void *addr)
389 {
390 	struct rte_memseg *ms;
391 
392 	/* try registered memory first */
393 	ms = rte_mem_virt2memseg(addr, NULL);
394 	if (ms == NULL || ms->iova == RTE_BAD_IOVA)
395 		/* fall back to actual physical address */
396 		return rte_mem_virt2iova(addr);
397 	return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
398 }
399 
400 /* Populate the mempool with a virtual area. Return the number of
401  * objects added, or a negative value on error.
402  */
403 int
404 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
405 	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
406 	void *opaque)
407 {
408 	rte_iova_t iova;
409 	size_t off, phys_len;
410 	int ret, cnt = 0;
411 
412 	if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG)
413 		return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
414 			len, free_cb, opaque);
415 
416 	for (off = 0; off < len &&
417 		     mp->populated_size < mp->size; off += phys_len) {
418 
419 		iova = get_iova(addr + off);
420 
421 		/* populate with the largest group of contiguous pages */
422 		for (phys_len = RTE_MIN(
423 			(size_t)(RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
424 				(addr + off)),
425 			len - off);
426 		     off + phys_len < len;
427 		     phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
428 			rte_iova_t iova_tmp;
429 
430 			iova_tmp = get_iova(addr + off + phys_len);
431 
432 			if (iova_tmp == RTE_BAD_IOVA ||
433 					iova_tmp != iova + phys_len)
434 				break;
435 		}
436 
437 		ret = rte_mempool_populate_iova(mp, addr + off, iova,
438 			phys_len, free_cb, opaque);
439 		if (ret == 0)
440 			continue;
441 		if (ret < 0)
442 			goto fail;
443 		/* no need to call the free callback for next chunks */
444 		free_cb = NULL;
445 		cnt += ret;
446 	}
447 
448 	rte_mempool_trace_populate_virt(mp, addr, len, pg_sz, free_cb, opaque);
449 	return cnt;
450 
451  fail:
452 	rte_mempool_free_memchunks(mp);
453 	return ret;
454 }
455 
456 /* Get the minimal page size used in a mempool before populating it. */
457 int
458 rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
459 {
460 	bool need_iova_contig_obj;
461 	bool alloc_in_ext_mem;
462 	int ret;
463 
464 	/* check if we can retrieve a valid socket ID */
465 	ret = rte_malloc_heap_socket_is_external(mp->socket_id);
466 	if (ret < 0)
467 		return -EINVAL;
468 	alloc_in_ext_mem = (ret == 1);
469 	need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
470 
471 	if (!need_iova_contig_obj)
472 		*pg_sz = 0;
473 	else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
474 		*pg_sz = get_min_page_size(mp->socket_id);
475 	else
476 		*pg_sz = rte_mem_page_size();
477 
478 	rte_mempool_trace_get_page_size(mp, *pg_sz);
479 	return 0;
480 }
481 
482 /* Default function to populate the mempool: allocate memory in memzones,
483  * and populate them. Return the number of objects added, or a negative
484  * value on error.
485  */
486 int
487 rte_mempool_populate_default(struct rte_mempool *mp)
488 {
489 	unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
490 	char mz_name[RTE_MEMZONE_NAMESIZE];
491 	const struct rte_memzone *mz;
492 	ssize_t mem_size;
493 	size_t align, pg_sz, pg_shift = 0;
494 	rte_iova_t iova;
495 	unsigned mz_id, n;
496 	int ret;
497 	bool need_iova_contig_obj;
498 	size_t max_alloc_size = SIZE_MAX;
499 
500 	ret = mempool_ops_alloc_once(mp);
501 	if (ret != 0)
502 		return ret;
503 
504 	/* mempool must not be populated */
505 	if (mp->nb_mem_chunks != 0)
506 		return -EEXIST;
507 
508 	/*
509 	 * the following section calculates page shift and page size values.
510 	 *
511 	 * these values impact the result of calc_mem_size operation, which
512 	 * returns the amount of memory that should be allocated to store the
513 	 * desired number of objects. when not zero, it allocates more memory
514 	 * for the padding between objects, to ensure that an object does not
515 	 * cross a page boundary. in other words, page size/shift are to be set
516 	 * to zero if mempool elements won't care about page boundaries.
517 	 * there are several considerations for page size and page shift here.
518 	 *
519 	 * if we don't need our mempools to have physically contiguous objects,
520 	 * then just set page shift and page size to 0, because the user has
521 	 * indicated that there's no need to care about anything.
522 	 *
523 	 * if we do need contiguous objects (if a mempool driver has its
524 	 * own calc_size() method returning min_chunk_size = mem_size),
525 	 * there is also an option to reserve the entire mempool memory
526 	 * as one contiguous block of memory.
527 	 *
528 	 * if we require contiguous objects, but not necessarily the entire
529 	 * mempool reserved space to be contiguous, pg_sz will be != 0,
530 	 * and the default ops->populate() will take care of not placing
531 	 * objects across pages.
532 	 *
533 	 * if our IO addresses are physical, we may get memory from bigger
534 	 * pages, or we might get memory from smaller pages, and how much of it
535 	 * we require depends on whether we want bigger or smaller pages.
536 	 * However, requesting each and every memory size is too much work, so
537 	 * what we'll do instead is walk through the page sizes available, pick
538 	 * the smallest one and set up page shift to match that one. We will be
539 	 * wasting some space this way, but it's much nicer than looping around
540 	 * trying to reserve each and every page size.
541 	 *
542 	 * If we fail to get enough contiguous memory, then we'll go and
543 	 * reserve space in smaller chunks.
544 	 */
545 
546 	need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
547 	ret = rte_mempool_get_page_size(mp, &pg_sz);
548 	if (ret < 0)
549 		return ret;
550 
551 	if (pg_sz != 0)
552 		pg_shift = rte_bsf32(pg_sz);
553 
554 	for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
555 		size_t min_chunk_size;
556 
557 		mem_size = rte_mempool_ops_calc_mem_size(
558 			mp, n, pg_shift, &min_chunk_size, &align);
559 
560 		if (mem_size < 0) {
561 			ret = mem_size;
562 			goto fail;
563 		}
564 
565 		ret = snprintf(mz_name, sizeof(mz_name),
566 			RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
567 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
568 			ret = -ENAMETOOLONG;
569 			goto fail;
570 		}
571 
572 		/* if we're trying to reserve contiguous memory, add appropriate
573 		 * memzone flag.
574 		 */
575 		if (min_chunk_size == (size_t)mem_size)
576 			mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
577 
578 		/* Allocate a memzone, retrying with a smaller area on ENOMEM */
579 		do {
580 			mz = rte_memzone_reserve_aligned(mz_name,
581 				RTE_MIN((size_t)mem_size, max_alloc_size),
582 				mp->socket_id, mz_flags, align);
583 
584 			if (mz != NULL || rte_errno != ENOMEM)
585 				break;
586 
587 			max_alloc_size = RTE_MIN(max_alloc_size,
588 						(size_t)mem_size) / 2;
589 		} while (mz == NULL && max_alloc_size >= min_chunk_size);
590 
591 		if (mz == NULL) {
592 			ret = -rte_errno;
593 			goto fail;
594 		}
595 
596 		if (need_iova_contig_obj)
597 			iova = mz->iova;
598 		else
599 			iova = RTE_BAD_IOVA;
600 
601 		if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
602 			ret = rte_mempool_populate_iova(mp, mz->addr,
603 				iova, mz->len,
604 				rte_mempool_memchunk_mz_free,
605 				(void *)(uintptr_t)mz);
606 		else
607 			ret = rte_mempool_populate_virt(mp, mz->addr,
608 				mz->len, pg_sz,
609 				rte_mempool_memchunk_mz_free,
610 				(void *)(uintptr_t)mz);
611 		if (ret == 0) /* should not happen */
612 			ret = -ENOBUFS;
613 		if (ret < 0) {
614 			rte_memzone_free(mz);
615 			goto fail;
616 		}
617 	}
618 
619 	rte_mempool_trace_populate_default(mp);
620 	return mp->size;
621 
622  fail:
623 	rte_mempool_free_memchunks(mp);
624 	return ret;
625 }
626 
627 /* return the memory size required for mempool objects in anonymous mem */
628 static ssize_t
629 get_anon_size(const struct rte_mempool *mp)
630 {
631 	ssize_t size;
632 	size_t pg_sz, pg_shift;
633 	size_t min_chunk_size;
634 	size_t align;
635 
636 	pg_sz = rte_mem_page_size();
637 	pg_shift = rte_bsf32(pg_sz);
638 	size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
639 					     &min_chunk_size, &align);
640 
641 	return size;
642 }
643 
644 /* unmap a memory zone mapped by rte_mempool_populate_anon() */
645 static void
646 rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
647 	void *opaque)
648 {
649 	ssize_t size;
650 
651 	/*
652 	 * Calculate size since memhdr->len has contiguous chunk length
653 	 * which may be smaller if anon map is split into many contiguous
654 	 * chunks. Result must be the same as we calculated on populate.
655 	 */
656 	size = get_anon_size(memhdr->mp);
657 	if (size < 0)
658 		return;
659 
660 	rte_mem_unmap(opaque, size);
661 }
662 
663 /* populate the mempool with an anonymous mapping */
664 int
665 rte_mempool_populate_anon(struct rte_mempool *mp)
666 {
667 	ssize_t size;
668 	int ret;
669 	char *addr;
670 
671 	/* mempool is already populated, error */
672 	if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
673 		rte_errno = EINVAL;
674 		return 0;
675 	}
676 
677 	ret = mempool_ops_alloc_once(mp);
678 	if (ret < 0) {
679 		rte_errno = -ret;
680 		return 0;
681 	}
682 
683 	size = get_anon_size(mp);
684 	if (size < 0) {
685 		rte_errno = -size;
686 		return 0;
687 	}
688 
689 	/* get chunk of virtually continuous memory */
690 	addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
691 		RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
692 	if (addr == NULL)
693 		return 0;
694 	/* can't use MMAP_LOCKED, it does not exist on BSD */
695 	if (rte_mem_lock(addr, size) < 0) {
696 		rte_mem_unmap(addr, size);
697 		return 0;
698 	}
699 
700 	ret = rte_mempool_populate_virt(mp, addr, size, rte_mem_page_size(),
701 		rte_mempool_memchunk_anon_free, addr);
702 	if (ret == 0) /* should not happen */
703 		ret = -ENOBUFS;
704 	if (ret < 0) {
705 		rte_errno = -ret;
706 		goto fail;
707 	}
708 
709 	rte_mempool_trace_populate_anon(mp);
710 	return mp->populated_size;
711 
712  fail:
713 	rte_mempool_free_memchunks(mp);
714 	return 0;
715 }
716 
717 /* free a mempool */
718 void
719 rte_mempool_free(struct rte_mempool *mp)
720 {
721 	struct rte_mempool_list *mempool_list = NULL;
722 	struct rte_tailq_entry *te;
723 
724 	if (mp == NULL)
725 		return;
726 
727 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
728 	rte_mcfg_tailq_write_lock();
729 	/* find out tailq entry */
730 	TAILQ_FOREACH(te, mempool_list, next) {
731 		if (te->data == (void *)mp)
732 			break;
733 	}
734 
735 	if (te != NULL) {
736 		TAILQ_REMOVE(mempool_list, te, next);
737 		rte_free(te);
738 	}
739 	rte_mcfg_tailq_write_unlock();
740 
741 	mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp);
742 	rte_mempool_trace_free(mp);
743 	rte_mempool_free_memchunks(mp);
744 	rte_mempool_ops_free(mp);
745 	rte_memzone_free(mp->mz);
746 }
747 
748 static void
749 mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
750 {
751 	/* Check that cache have enough space for flush threshold */
752 	RTE_BUILD_BUG_ON(CALC_CACHE_FLUSHTHRESH(RTE_MEMPOOL_CACHE_MAX_SIZE) >
753 			 RTE_SIZEOF_FIELD(struct rte_mempool_cache, objs) /
754 			 RTE_SIZEOF_FIELD(struct rte_mempool_cache, objs[0]));
755 
756 	cache->size = size;
757 	cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
758 	cache->len = 0;
759 }
760 
761 /*
762  * Create and initialize a cache for objects that are retrieved from and
763  * returned to an underlying mempool. This structure is identical to the
764  * local_cache[lcore_id] pointed to by the mempool structure.
765  */
766 struct rte_mempool_cache *
767 rte_mempool_cache_create(uint32_t size, int socket_id)
768 {
769 	struct rte_mempool_cache *cache;
770 
771 	if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
772 		rte_errno = EINVAL;
773 		return NULL;
774 	}
775 
776 	cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
777 				  RTE_CACHE_LINE_SIZE, socket_id);
778 	if (cache == NULL) {
779 		RTE_MEMPOOL_LOG(ERR, "Cannot allocate mempool cache.");
780 		rte_errno = ENOMEM;
781 		return NULL;
782 	}
783 
784 	mempool_cache_init(cache, size);
785 
786 	rte_mempool_trace_cache_create(size, socket_id, cache);
787 	return cache;
788 }
789 
790 /*
791  * Free a cache. It's the responsibility of the user to make sure that any
792  * remaining objects in the cache are flushed to the corresponding
793  * mempool.
794  */
795 void
796 rte_mempool_cache_free(struct rte_mempool_cache *cache)
797 {
798 	rte_mempool_trace_cache_free(cache);
799 	rte_free(cache);
800 }
801 
802 /* create an empty mempool */
803 struct rte_mempool *
804 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
805 	unsigned cache_size, unsigned private_data_size,
806 	int socket_id, unsigned flags)
807 {
808 	char mz_name[RTE_MEMZONE_NAMESIZE];
809 	struct rte_mempool_list *mempool_list;
810 	struct rte_mempool *mp = NULL;
811 	struct rte_tailq_entry *te = NULL;
812 	const struct rte_memzone *mz = NULL;
813 	size_t mempool_size;
814 	unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
815 	struct rte_mempool_objsz objsz;
816 	unsigned lcore_id;
817 	int ret;
818 
819 	/* compilation-time checks */
820 	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
821 			  RTE_CACHE_LINE_MASK) != 0);
822 	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
823 			  RTE_CACHE_LINE_MASK) != 0);
824 #ifdef RTE_LIBRTE_MEMPOOL_STATS
825 	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
826 			  RTE_CACHE_LINE_MASK) != 0);
827 	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
828 			  RTE_CACHE_LINE_MASK) != 0);
829 #endif
830 
831 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
832 
833 	/* asked for zero items */
834 	if (n == 0) {
835 		rte_errno = EINVAL;
836 		return NULL;
837 	}
838 
839 	/* asked cache too big */
840 	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
841 	    CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
842 		rte_errno = EINVAL;
843 		return NULL;
844 	}
845 
846 	/* enforce only user flags are passed by the application */
847 	if ((flags & ~RTE_MEMPOOL_VALID_USER_FLAGS) != 0) {
848 		rte_errno = EINVAL;
849 		return NULL;
850 	}
851 
852 	/*
853 	 * No objects in the pool can be used for IO until it's populated
854 	 * with at least some objects with valid IOVA.
855 	 */
856 	flags |= RTE_MEMPOOL_F_NON_IO;
857 
858 	/* "no cache align" imply "no spread" */
859 	if (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
860 		flags |= RTE_MEMPOOL_F_NO_SPREAD;
861 
862 	/* calculate mempool object sizes. */
863 	if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
864 		rte_errno = EINVAL;
865 		return NULL;
866 	}
867 
868 	rte_mcfg_mempool_write_lock();
869 
870 	/*
871 	 * reserve a memory zone for this mempool: private data is
872 	 * cache-aligned
873 	 */
874 	private_data_size = (private_data_size +
875 			     RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
876 
877 
878 	/* try to allocate tailq entry */
879 	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
880 	if (te == NULL) {
881 		RTE_MEMPOOL_LOG(ERR, "Cannot allocate tailq entry!");
882 		goto exit_unlock;
883 	}
884 
885 	mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
886 	mempool_size += private_data_size;
887 	mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
888 
889 	ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
890 	if (ret < 0 || ret >= (int)sizeof(mz_name)) {
891 		rte_errno = ENAMETOOLONG;
892 		goto exit_unlock;
893 	}
894 
895 	mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
896 	if (mz == NULL)
897 		goto exit_unlock;
898 
899 	/* init the mempool structure */
900 	mp = mz->addr;
901 	memset(mp, 0, RTE_MEMPOOL_HEADER_SIZE(mp, cache_size));
902 	ret = strlcpy(mp->name, name, sizeof(mp->name));
903 	if (ret < 0 || ret >= (int)sizeof(mp->name)) {
904 		rte_errno = ENAMETOOLONG;
905 		goto exit_unlock;
906 	}
907 	mp->mz = mz;
908 	mp->size = n;
909 	mp->flags = flags;
910 	mp->socket_id = socket_id;
911 	mp->elt_size = objsz.elt_size;
912 	mp->header_size = objsz.header_size;
913 	mp->trailer_size = objsz.trailer_size;
914 	/* Size of default caches, zero means disabled. */
915 	mp->cache_size = cache_size;
916 	mp->private_data_size = private_data_size;
917 	STAILQ_INIT(&mp->elt_list);
918 	STAILQ_INIT(&mp->mem_list);
919 
920 	/*
921 	 * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
922 	 * set the correct index into the table of ops structs.
923 	 */
924 	if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET))
925 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
926 	else if (flags & RTE_MEMPOOL_F_SP_PUT)
927 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
928 	else if (flags & RTE_MEMPOOL_F_SC_GET)
929 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
930 	else
931 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
932 
933 	if (ret)
934 		goto exit_unlock;
935 
936 	/*
937 	 * local_cache pointer is set even if cache_size is zero.
938 	 * The local_cache points to just past the elt_pa[] array.
939 	 */
940 	mp->local_cache = (struct rte_mempool_cache *)
941 		RTE_PTR_ADD(mp, RTE_MEMPOOL_HEADER_SIZE(mp, 0));
942 
943 	/* Init all default caches. */
944 	if (cache_size != 0) {
945 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
946 			mempool_cache_init(&mp->local_cache[lcore_id],
947 					   cache_size);
948 	}
949 
950 	te->data = mp;
951 
952 	rte_mcfg_tailq_write_lock();
953 	TAILQ_INSERT_TAIL(mempool_list, te, next);
954 	rte_mcfg_tailq_write_unlock();
955 	rte_mcfg_mempool_write_unlock();
956 
957 	rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
958 		private_data_size, flags, mp);
959 	return mp;
960 
961 exit_unlock:
962 	rte_mcfg_mempool_write_unlock();
963 	rte_free(te);
964 	rte_mempool_free(mp);
965 	return NULL;
966 }
967 
968 /* create the mempool */
969 struct rte_mempool *
970 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
971 	unsigned cache_size, unsigned private_data_size,
972 	rte_mempool_ctor_t *mp_init, void *mp_init_arg,
973 	rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
974 	int socket_id, unsigned flags)
975 {
976 	struct rte_mempool *mp;
977 
978 	mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
979 		private_data_size, socket_id, flags);
980 	if (mp == NULL)
981 		return NULL;
982 
983 	/* call the mempool priv initializer */
984 	if (mp_init)
985 		mp_init(mp, mp_init_arg);
986 
987 	if (rte_mempool_populate_default(mp) < 0)
988 		goto fail;
989 
990 	/* call the object initializers */
991 	if (obj_init)
992 		rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
993 
994 	rte_mempool_trace_create(name, n, elt_size, cache_size,
995 		private_data_size, mp_init, mp_init_arg, obj_init,
996 		obj_init_arg, flags, mp);
997 	return mp;
998 
999  fail:
1000 	rte_mempool_free(mp);
1001 	return NULL;
1002 }
1003 
1004 /* Return the number of entries in the mempool */
1005 unsigned int
1006 rte_mempool_avail_count(const struct rte_mempool *mp)
1007 {
1008 	unsigned count;
1009 	unsigned lcore_id;
1010 
1011 	count = rte_mempool_ops_get_count(mp);
1012 
1013 	if (mp->cache_size == 0)
1014 		return count;
1015 
1016 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1017 		count += mp->local_cache[lcore_id].len;
1018 
1019 	/*
1020 	 * due to race condition (access to len is not locked), the
1021 	 * total can be greater than size... so fix the result
1022 	 */
1023 	if (count > mp->size)
1024 		return mp->size;
1025 	return count;
1026 }
1027 
1028 /* return the number of entries allocated from the mempool */
1029 unsigned int
1030 rte_mempool_in_use_count(const struct rte_mempool *mp)
1031 {
1032 	return mp->size - rte_mempool_avail_count(mp);
1033 }
1034 
1035 /* dump the cache status */
1036 static unsigned
1037 rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
1038 {
1039 	unsigned lcore_id;
1040 	unsigned count = 0;
1041 	unsigned cache_count;
1042 
1043 	fprintf(f, "  internal cache infos:\n");
1044 	fprintf(f, "    cache_size=%"PRIu32"\n", mp->cache_size);
1045 
1046 	if (mp->cache_size == 0)
1047 		return count;
1048 
1049 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1050 		cache_count = mp->local_cache[lcore_id].len;
1051 		fprintf(f, "    cache_count[%u]=%"PRIu32"\n",
1052 			lcore_id, cache_count);
1053 		count += cache_count;
1054 	}
1055 	fprintf(f, "    total_cache_count=%u\n", count);
1056 	return count;
1057 }
1058 
1059 #ifndef __INTEL_COMPILER
1060 #pragma GCC diagnostic ignored "-Wcast-qual"
1061 #endif
1062 
1063 /* check and update cookies or panic (internal) */
1064 void rte_mempool_check_cookies(const struct rte_mempool *mp,
1065 	void * const *obj_table_const, unsigned n, int free)
1066 {
1067 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1068 	struct rte_mempool_objhdr *hdr;
1069 	struct rte_mempool_objtlr *tlr;
1070 	uint64_t cookie;
1071 	void *tmp;
1072 	void *obj;
1073 	void **obj_table;
1074 
1075 	/* Force to drop the "const" attribute. This is done only when
1076 	 * DEBUG is enabled */
1077 	tmp = (void *) obj_table_const;
1078 	obj_table = tmp;
1079 
1080 	while (n--) {
1081 		obj = obj_table[n];
1082 
1083 		if (rte_mempool_from_obj(obj) != mp)
1084 			rte_panic("MEMPOOL: object is owned by another "
1085 				  "mempool\n");
1086 
1087 		hdr = rte_mempool_get_header(obj);
1088 		cookie = hdr->cookie;
1089 
1090 		if (free == 0) {
1091 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
1092 				RTE_MEMPOOL_LOG(CRIT,
1093 					"obj=%p, mempool=%p, cookie=%" PRIx64,
1094 					obj, (const void *) mp, cookie);
1095 				rte_panic("MEMPOOL: bad header cookie (put)\n");
1096 			}
1097 			hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
1098 		} else if (free == 1) {
1099 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
1100 				RTE_MEMPOOL_LOG(CRIT,
1101 					"obj=%p, mempool=%p, cookie=%" PRIx64,
1102 					obj, (const void *) mp, cookie);
1103 				rte_panic("MEMPOOL: bad header cookie (get)\n");
1104 			}
1105 			hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
1106 		} else if (free == 2) {
1107 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
1108 			    cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
1109 				RTE_MEMPOOL_LOG(CRIT,
1110 					"obj=%p, mempool=%p, cookie=%" PRIx64,
1111 					obj, (const void *) mp, cookie);
1112 				rte_panic("MEMPOOL: bad header cookie (audit)\n");
1113 			}
1114 		}
1115 		tlr = rte_mempool_get_trailer(obj);
1116 		cookie = tlr->cookie;
1117 		if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
1118 			RTE_MEMPOOL_LOG(CRIT,
1119 				"obj=%p, mempool=%p, cookie=%" PRIx64,
1120 				obj, (const void *) mp, cookie);
1121 			rte_panic("MEMPOOL: bad trailer cookie\n");
1122 		}
1123 	}
1124 #else
1125 	RTE_SET_USED(mp);
1126 	RTE_SET_USED(obj_table_const);
1127 	RTE_SET_USED(n);
1128 	RTE_SET_USED(free);
1129 #endif
1130 }
1131 
1132 void
1133 rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
1134 	void * const *first_obj_table_const, unsigned int n, int free)
1135 {
1136 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1137 	struct rte_mempool_info info;
1138 	const size_t total_elt_sz =
1139 		mp->header_size + mp->elt_size + mp->trailer_size;
1140 	unsigned int i, j;
1141 
1142 	rte_mempool_ops_get_info(mp, &info);
1143 
1144 	for (i = 0; i < n; ++i) {
1145 		void *first_obj = first_obj_table_const[i];
1146 
1147 		for (j = 0; j < info.contig_block_size; ++j) {
1148 			void *obj;
1149 
1150 			obj = (void *)((uintptr_t)first_obj + j * total_elt_sz);
1151 			rte_mempool_check_cookies(mp, &obj, 1, free);
1152 		}
1153 	}
1154 #else
1155 	RTE_SET_USED(mp);
1156 	RTE_SET_USED(first_obj_table_const);
1157 	RTE_SET_USED(n);
1158 	RTE_SET_USED(free);
1159 #endif
1160 }
1161 
1162 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1163 static void
1164 mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
1165 	void *obj, __rte_unused unsigned idx)
1166 {
1167 	RTE_MEMPOOL_CHECK_COOKIES(mp, &obj, 1, 2);
1168 }
1169 
1170 static void
1171 mempool_audit_cookies(struct rte_mempool *mp)
1172 {
1173 	unsigned num;
1174 
1175 	num = rte_mempool_obj_iter(mp, mempool_obj_audit, NULL);
1176 	if (num != mp->size) {
1177 		rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
1178 			"iterated only over %u elements\n",
1179 			mp, mp->size, num);
1180 	}
1181 }
1182 #else
1183 #define mempool_audit_cookies(mp) do {} while(0)
1184 #endif
1185 
1186 #ifndef __INTEL_COMPILER
1187 #pragma GCC diagnostic error "-Wcast-qual"
1188 #endif
1189 
1190 /* check cookies before and after objects */
1191 static void
1192 mempool_audit_cache(const struct rte_mempool *mp)
1193 {
1194 	/* check cache size consistency */
1195 	unsigned lcore_id;
1196 
1197 	if (mp->cache_size == 0)
1198 		return;
1199 
1200 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1201 		const struct rte_mempool_cache *cache;
1202 		cache = &mp->local_cache[lcore_id];
1203 		if (cache->len > RTE_DIM(cache->objs)) {
1204 			RTE_MEMPOOL_LOG(CRIT, "badness on cache[%u]",
1205 				lcore_id);
1206 			rte_panic("MEMPOOL: invalid cache len\n");
1207 		}
1208 	}
1209 }
1210 
1211 /* check the consistency of mempool (size, cookies, ...) */
1212 void
1213 rte_mempool_audit(struct rte_mempool *mp)
1214 {
1215 	mempool_audit_cache(mp);
1216 	mempool_audit_cookies(mp);
1217 
1218 	/* For case where mempool DEBUG is not set, and cache size is 0 */
1219 	RTE_SET_USED(mp);
1220 }
1221 
1222 /* dump the status of the mempool on the console */
1223 void
1224 rte_mempool_dump(FILE *f, struct rte_mempool *mp)
1225 {
1226 #ifdef RTE_LIBRTE_MEMPOOL_STATS
1227 	struct rte_mempool_info info;
1228 	struct rte_mempool_debug_stats sum;
1229 	unsigned lcore_id;
1230 #endif
1231 	struct rte_mempool_memhdr *memhdr;
1232 	struct rte_mempool_ops *ops;
1233 	unsigned common_count;
1234 	unsigned cache_count;
1235 	size_t mem_len = 0;
1236 
1237 	RTE_ASSERT(f != NULL);
1238 	RTE_ASSERT(mp != NULL);
1239 
1240 	fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
1241 	fprintf(f, "  flags=%x\n", mp->flags);
1242 	fprintf(f, "  socket_id=%d\n", mp->socket_id);
1243 	fprintf(f, "  pool=%p\n", mp->pool_data);
1244 	fprintf(f, "  iova=0x%" PRIx64 "\n", mp->mz->iova);
1245 	fprintf(f, "  nb_mem_chunks=%u\n", mp->nb_mem_chunks);
1246 	fprintf(f, "  size=%"PRIu32"\n", mp->size);
1247 	fprintf(f, "  populated_size=%"PRIu32"\n", mp->populated_size);
1248 	fprintf(f, "  header_size=%"PRIu32"\n", mp->header_size);
1249 	fprintf(f, "  elt_size=%"PRIu32"\n", mp->elt_size);
1250 	fprintf(f, "  trailer_size=%"PRIu32"\n", mp->trailer_size);
1251 	fprintf(f, "  total_obj_size=%"PRIu32"\n",
1252 	       mp->header_size + mp->elt_size + mp->trailer_size);
1253 
1254 	fprintf(f, "  private_data_size=%"PRIu32"\n", mp->private_data_size);
1255 
1256 	fprintf(f, "  ops_index=%d\n", mp->ops_index);
1257 	ops = rte_mempool_get_ops(mp->ops_index);
1258 	fprintf(f, "  ops_name: <%s>\n", (ops != NULL) ? ops->name : "NA");
1259 
1260 	STAILQ_FOREACH(memhdr, &mp->mem_list, next)
1261 		mem_len += memhdr->len;
1262 	if (mem_len != 0) {
1263 		fprintf(f, "  avg bytes/object=%#Lf\n",
1264 			(long double)mem_len / mp->size);
1265 	}
1266 
1267 	cache_count = rte_mempool_dump_cache(f, mp);
1268 	common_count = rte_mempool_ops_get_count(mp);
1269 	if ((cache_count + common_count) > mp->size)
1270 		common_count = mp->size - cache_count;
1271 	fprintf(f, "  common_pool_count=%u\n", common_count);
1272 
1273 	/* sum and dump statistics */
1274 #ifdef RTE_LIBRTE_MEMPOOL_STATS
1275 	rte_mempool_ops_get_info(mp, &info);
1276 	memset(&sum, 0, sizeof(sum));
1277 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE + 1; lcore_id++) {
1278 		sum.put_bulk += mp->stats[lcore_id].put_bulk;
1279 		sum.put_objs += mp->stats[lcore_id].put_objs;
1280 		sum.put_common_pool_bulk += mp->stats[lcore_id].put_common_pool_bulk;
1281 		sum.put_common_pool_objs += mp->stats[lcore_id].put_common_pool_objs;
1282 		sum.get_common_pool_bulk += mp->stats[lcore_id].get_common_pool_bulk;
1283 		sum.get_common_pool_objs += mp->stats[lcore_id].get_common_pool_objs;
1284 		sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
1285 		sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
1286 		sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
1287 		sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
1288 		sum.get_success_blks += mp->stats[lcore_id].get_success_blks;
1289 		sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks;
1290 	}
1291 	if (mp->cache_size != 0) {
1292 		/* Add the statistics stored in the mempool caches. */
1293 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1294 			sum.put_bulk += mp->local_cache[lcore_id].stats.put_bulk;
1295 			sum.put_objs += mp->local_cache[lcore_id].stats.put_objs;
1296 			sum.get_success_bulk += mp->local_cache[lcore_id].stats.get_success_bulk;
1297 			sum.get_success_objs += mp->local_cache[lcore_id].stats.get_success_objs;
1298 		}
1299 	}
1300 	fprintf(f, "  stats:\n");
1301 	fprintf(f, "    put_bulk=%"PRIu64"\n", sum.put_bulk);
1302 	fprintf(f, "    put_objs=%"PRIu64"\n", sum.put_objs);
1303 	fprintf(f, "    put_common_pool_bulk=%"PRIu64"\n", sum.put_common_pool_bulk);
1304 	fprintf(f, "    put_common_pool_objs=%"PRIu64"\n", sum.put_common_pool_objs);
1305 	fprintf(f, "    get_common_pool_bulk=%"PRIu64"\n", sum.get_common_pool_bulk);
1306 	fprintf(f, "    get_common_pool_objs=%"PRIu64"\n", sum.get_common_pool_objs);
1307 	fprintf(f, "    get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
1308 	fprintf(f, "    get_success_objs=%"PRIu64"\n", sum.get_success_objs);
1309 	fprintf(f, "    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
1310 	fprintf(f, "    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
1311 	if (info.contig_block_size > 0) {
1312 		fprintf(f, "    get_success_blks=%"PRIu64"\n",
1313 			sum.get_success_blks);
1314 		fprintf(f, "    get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
1315 	}
1316 #else
1317 	fprintf(f, "  no statistics available\n");
1318 #endif
1319 
1320 	rte_mempool_audit(mp);
1321 }
1322 
1323 /* dump the status of all mempools on the console */
1324 void
1325 rte_mempool_list_dump(FILE *f)
1326 {
1327 	struct rte_mempool *mp = NULL;
1328 	struct rte_tailq_entry *te;
1329 	struct rte_mempool_list *mempool_list;
1330 
1331 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
1332 
1333 	rte_mcfg_mempool_read_lock();
1334 
1335 	TAILQ_FOREACH(te, mempool_list, next) {
1336 		mp = (struct rte_mempool *) te->data;
1337 		rte_mempool_dump(f, mp);
1338 	}
1339 
1340 	rte_mcfg_mempool_read_unlock();
1341 }
1342 
1343 /* search a mempool from its name */
1344 struct rte_mempool *
1345 rte_mempool_lookup(const char *name)
1346 {
1347 	struct rte_mempool *mp = NULL;
1348 	struct rte_tailq_entry *te;
1349 	struct rte_mempool_list *mempool_list;
1350 
1351 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
1352 
1353 	rte_mcfg_mempool_read_lock();
1354 
1355 	TAILQ_FOREACH(te, mempool_list, next) {
1356 		mp = (struct rte_mempool *) te->data;
1357 		if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0)
1358 			break;
1359 	}
1360 
1361 	rte_mcfg_mempool_read_unlock();
1362 
1363 	if (te == NULL) {
1364 		rte_errno = ENOENT;
1365 		return NULL;
1366 	}
1367 
1368 	return mp;
1369 }
1370 
1371 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
1372 		      void *arg)
1373 {
1374 	struct rte_tailq_entry *te = NULL;
1375 	struct rte_mempool_list *mempool_list;
1376 	void *tmp_te;
1377 
1378 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
1379 
1380 	rte_mcfg_mempool_read_lock();
1381 
1382 	RTE_TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) {
1383 		(*func)((struct rte_mempool *) te->data, arg);
1384 	}
1385 
1386 	rte_mcfg_mempool_read_unlock();
1387 }
1388 
1389 struct mempool_callback_data {
1390 	TAILQ_ENTRY(mempool_callback_data) callbacks;
1391 	rte_mempool_event_callback *func;
1392 	void *user_data;
1393 };
1394 
1395 static void
1396 mempool_event_callback_invoke(enum rte_mempool_event event,
1397 			      struct rte_mempool *mp)
1398 {
1399 	struct mempool_callback_data *cb;
1400 	void *tmp_te;
1401 
1402 	rte_mcfg_tailq_read_lock();
1403 	RTE_TAILQ_FOREACH_SAFE(cb, &callback_tailq, callbacks, tmp_te) {
1404 		rte_mcfg_tailq_read_unlock();
1405 		cb->func(event, mp, cb->user_data);
1406 		rte_mcfg_tailq_read_lock();
1407 	}
1408 	rte_mcfg_tailq_read_unlock();
1409 }
1410 
1411 int
1412 rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1413 				    void *user_data)
1414 {
1415 	struct mempool_callback_data *cb;
1416 	int ret;
1417 
1418 	if (func == NULL) {
1419 		rte_errno = EINVAL;
1420 		return -rte_errno;
1421 	}
1422 
1423 	rte_mcfg_tailq_write_lock();
1424 	TAILQ_FOREACH(cb, &callback_tailq, callbacks) {
1425 		if (cb->func == func && cb->user_data == user_data) {
1426 			ret = -EEXIST;
1427 			goto exit;
1428 		}
1429 	}
1430 
1431 	cb = calloc(1, sizeof(*cb));
1432 	if (cb == NULL) {
1433 		RTE_MEMPOOL_LOG(ERR, "Cannot allocate event callback!");
1434 		ret = -ENOMEM;
1435 		goto exit;
1436 	}
1437 
1438 	cb->func = func;
1439 	cb->user_data = user_data;
1440 	TAILQ_INSERT_TAIL(&callback_tailq, cb, callbacks);
1441 	ret = 0;
1442 
1443 exit:
1444 	rte_mcfg_tailq_write_unlock();
1445 	rte_errno = -ret;
1446 	return ret;
1447 }
1448 
1449 int
1450 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1451 				      void *user_data)
1452 {
1453 	struct mempool_callback_data *cb;
1454 	int ret = -ENOENT;
1455 
1456 	rte_mcfg_tailq_write_lock();
1457 	TAILQ_FOREACH(cb, &callback_tailq, callbacks) {
1458 		if (cb->func == func && cb->user_data == user_data) {
1459 			TAILQ_REMOVE(&callback_tailq, cb, callbacks);
1460 			ret = 0;
1461 			break;
1462 		}
1463 	}
1464 	rte_mcfg_tailq_write_unlock();
1465 
1466 	if (ret == 0)
1467 		free(cb);
1468 	rte_errno = -ret;
1469 	return ret;
1470 }
1471 
1472 static void
1473 mempool_list_cb(struct rte_mempool *mp, void *arg)
1474 {
1475 	struct rte_tel_data *d = (struct rte_tel_data *)arg;
1476 
1477 	rte_tel_data_add_array_string(d, mp->name);
1478 }
1479 
1480 static int
1481 mempool_handle_list(const char *cmd __rte_unused,
1482 		    const char *params __rte_unused, struct rte_tel_data *d)
1483 {
1484 	rte_tel_data_start_array(d, RTE_TEL_STRING_VAL);
1485 	rte_mempool_walk(mempool_list_cb, d);
1486 	return 0;
1487 }
1488 
1489 struct mempool_info_cb_arg {
1490 	char *pool_name;
1491 	struct rte_tel_data *d;
1492 };
1493 
1494 static void
1495 mempool_info_cb(struct rte_mempool *mp, void *arg)
1496 {
1497 	struct mempool_info_cb_arg *info = (struct mempool_info_cb_arg *)arg;
1498 	const struct rte_memzone *mz;
1499 	uint64_t cache_count, common_count;
1500 
1501 	if (strncmp(mp->name, info->pool_name, RTE_MEMZONE_NAMESIZE))
1502 		return;
1503 
1504 	rte_tel_data_add_dict_string(info->d, "name", mp->name);
1505 	rte_tel_data_add_dict_uint(info->d, "pool_id", mp->pool_id);
1506 	rte_tel_data_add_dict_uint(info->d, "flags", mp->flags);
1507 	rte_tel_data_add_dict_int(info->d, "socket_id", mp->socket_id);
1508 	rte_tel_data_add_dict_uint(info->d, "size", mp->size);
1509 	rte_tel_data_add_dict_uint(info->d, "cache_size", mp->cache_size);
1510 	rte_tel_data_add_dict_uint(info->d, "elt_size", mp->elt_size);
1511 	rte_tel_data_add_dict_uint(info->d, "header_size", mp->header_size);
1512 	rte_tel_data_add_dict_uint(info->d, "trailer_size", mp->trailer_size);
1513 	rte_tel_data_add_dict_uint(info->d, "private_data_size",
1514 				  mp->private_data_size);
1515 	rte_tel_data_add_dict_int(info->d, "ops_index", mp->ops_index);
1516 	rte_tel_data_add_dict_uint(info->d, "populated_size",
1517 				  mp->populated_size);
1518 
1519 	cache_count = 0;
1520 	if (mp->cache_size > 0) {
1521 		int lcore_id;
1522 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1523 			cache_count += mp->local_cache[lcore_id].len;
1524 	}
1525 	rte_tel_data_add_dict_uint(info->d, "total_cache_count", cache_count);
1526 	common_count = rte_mempool_ops_get_count(mp);
1527 	if ((cache_count + common_count) > mp->size)
1528 		common_count = mp->size - cache_count;
1529 	rte_tel_data_add_dict_uint(info->d, "common_pool_count", common_count);
1530 
1531 	mz = mp->mz;
1532 	rte_tel_data_add_dict_string(info->d, "mz_name", mz->name);
1533 	rte_tel_data_add_dict_uint(info->d, "mz_len", mz->len);
1534 	rte_tel_data_add_dict_uint(info->d, "mz_hugepage_sz",
1535 				  mz->hugepage_sz);
1536 	rte_tel_data_add_dict_int(info->d, "mz_socket_id", mz->socket_id);
1537 	rte_tel_data_add_dict_uint(info->d, "mz_flags", mz->flags);
1538 }
1539 
1540 static int
1541 mempool_handle_info(const char *cmd __rte_unused, const char *params,
1542 		    struct rte_tel_data *d)
1543 {
1544 	struct mempool_info_cb_arg mp_arg;
1545 	char name[RTE_MEMZONE_NAMESIZE];
1546 
1547 	if (!params || strlen(params) == 0)
1548 		return -EINVAL;
1549 
1550 	rte_strlcpy(name, params, RTE_MEMZONE_NAMESIZE);
1551 
1552 	rte_tel_data_start_dict(d);
1553 	mp_arg.pool_name = name;
1554 	mp_arg.d = d;
1555 	rte_mempool_walk(mempool_info_cb, &mp_arg);
1556 
1557 	return 0;
1558 }
1559 
1560 RTE_INIT(mempool_init_telemetry)
1561 {
1562 	rte_telemetry_register_cmd("/mempool/list", mempool_handle_list,
1563 		"Returns list of available mempool. Takes no parameters");
1564 	rte_telemetry_register_cmd("/mempool/info", mempool_handle_info,
1565 		"Returns mempool info. Parameters: pool_name");
1566 }
1567