xref: /dpdk/lib/mempool/rte_mempool.h (revision 743bd29effd0bd829e68312db9e23f85f8857cd6)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation.
399a2dd95SBruce Richardson  * Copyright(c) 2016 6WIND S.A.
4203dcc9cSMorten Brørup  * Copyright(c) 2022 SmartShare Systems
599a2dd95SBruce Richardson  */
699a2dd95SBruce Richardson 
799a2dd95SBruce Richardson #ifndef _RTE_MEMPOOL_H_
899a2dd95SBruce Richardson #define _RTE_MEMPOOL_H_
999a2dd95SBruce Richardson 
1099a2dd95SBruce Richardson /**
1199a2dd95SBruce Richardson  * @file
1299a2dd95SBruce Richardson  * RTE Mempool.
1399a2dd95SBruce Richardson  *
1499a2dd95SBruce Richardson  * A memory pool is an allocator of fixed-size object. It is
1599a2dd95SBruce Richardson  * identified by its name, and uses a ring to store free objects. It
1699a2dd95SBruce Richardson  * provides some other optional services, like a per-core object
1799a2dd95SBruce Richardson  * cache, and an alignment helper to ensure that objects are padded
1899a2dd95SBruce Richardson  * to spread them equally on all RAM channels, ranks, and so on.
1999a2dd95SBruce Richardson  *
2099a2dd95SBruce Richardson  * Objects owned by a mempool should never be added in another
2199a2dd95SBruce Richardson  * mempool. When an object is freed using rte_mempool_put() or
2299a2dd95SBruce Richardson  * equivalent, the object data is not modified; the user can save some
2399a2dd95SBruce Richardson  * meta-data in the object data and retrieve them when allocating a
2499a2dd95SBruce Richardson  * new object.
2599a2dd95SBruce Richardson  *
2699a2dd95SBruce Richardson  * Note: the mempool implementation is not preemptible. An lcore must not be
2799a2dd95SBruce Richardson  * interrupted by another task that uses the same mempool (because it uses a
2899a2dd95SBruce Richardson  * ring which is not preemptible). Also, usual mempool functions like
2999a2dd95SBruce Richardson  * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
3099a2dd95SBruce Richardson  * thread due to the internal per-lcore cache. Due to the lack of caching,
3199a2dd95SBruce Richardson  * rte_mempool_get() or rte_mempool_put() performance will suffer when called
3299a2dd95SBruce Richardson  * by unregistered non-EAL threads. Instead, unregistered non-EAL threads
3399a2dd95SBruce Richardson  * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a
3499a2dd95SBruce Richardson  * user cache created with rte_mempool_cache_create().
3599a2dd95SBruce Richardson  */
3699a2dd95SBruce Richardson 
37e9fd1ebfSTyler Retzlaff #include <stdalign.h>
3899a2dd95SBruce Richardson #include <stdio.h>
3999a2dd95SBruce Richardson #include <stdint.h>
4099a2dd95SBruce Richardson #include <inttypes.h>
4199a2dd95SBruce Richardson 
421094dd94SDavid Marchand #include <rte_compat.h>
4399a2dd95SBruce Richardson #include <rte_config.h>
4499a2dd95SBruce Richardson #include <rte_spinlock.h>
4599a2dd95SBruce Richardson #include <rte_debug.h>
4699a2dd95SBruce Richardson #include <rte_lcore.h>
472d603bf6SStephen Hemminger #include <rte_log.h>
4899a2dd95SBruce Richardson #include <rte_branch_prediction.h>
4999a2dd95SBruce Richardson #include <rte_ring.h>
5099a2dd95SBruce Richardson #include <rte_memcpy.h>
5199a2dd95SBruce Richardson #include <rte_common.h>
5299a2dd95SBruce Richardson 
5399a2dd95SBruce Richardson #include "rte_mempool_trace_fp.h"
5499a2dd95SBruce Richardson 
5599a2dd95SBruce Richardson #ifdef __cplusplus
5699a2dd95SBruce Richardson extern "C" {
5799a2dd95SBruce Richardson #endif
5899a2dd95SBruce Richardson 
5999a2dd95SBruce Richardson #define RTE_MEMPOOL_HEADER_COOKIE1  0xbadbadbadadd2e55ULL /**< Header cookie. */
6099a2dd95SBruce Richardson #define RTE_MEMPOOL_HEADER_COOKIE2  0xf2eef2eedadd2e55ULL /**< Header cookie. */
6199a2dd95SBruce Richardson #define RTE_MEMPOOL_TRAILER_COOKIE  0xadd2e55badbadbadULL /**< Trailer cookie.*/
6299a2dd95SBruce Richardson 
639d87e05dSMorten Brørup #ifdef RTE_LIBRTE_MEMPOOL_STATS
6499a2dd95SBruce Richardson /**
6599a2dd95SBruce Richardson  * A structure that stores the mempool statistics (per-lcore).
66cee151b4SJoyce Kong  * Note: Cache stats (put_cache_bulk/objs, get_cache_bulk/objs) are not
67cee151b4SJoyce Kong  * captured since they can be calculated from other stats.
68cee151b4SJoyce Kong  * For example: put_cache_objs = put_objs - put_common_pool_objs.
6999a2dd95SBruce Richardson  */
70c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_mempool_debug_stats {
7199a2dd95SBruce Richardson 	uint64_t put_bulk;             /**< Number of puts. */
7299a2dd95SBruce Richardson 	uint64_t put_objs;             /**< Number of objects successfully put. */
73cee151b4SJoyce Kong 	uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */
74cee151b4SJoyce Kong 	uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */
75cee151b4SJoyce Kong 	uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */
76cee151b4SJoyce Kong 	uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */
7799a2dd95SBruce Richardson 	uint64_t get_success_bulk;     /**< Successful allocation number. */
7899a2dd95SBruce Richardson 	uint64_t get_success_objs;     /**< Objects successfully allocated. */
7999a2dd95SBruce Richardson 	uint64_t get_fail_bulk;        /**< Failed allocation number. */
8099a2dd95SBruce Richardson 	uint64_t get_fail_objs;        /**< Objects that failed to be allocated. */
81cee151b4SJoyce Kong 	uint64_t get_success_blks;     /**< Successful allocation number of contiguous blocks. */
82cee151b4SJoyce Kong 	uint64_t get_fail_blks;        /**< Failed allocation number of contiguous blocks. */
83a1934215SMorten Brørup 	RTE_CACHE_GUARD;
84c6552d9aSTyler Retzlaff };
8599a2dd95SBruce Richardson #endif
8699a2dd95SBruce Richardson 
8799a2dd95SBruce Richardson /**
8899a2dd95SBruce Richardson  * A structure that stores a per-core object cache.
8999a2dd95SBruce Richardson  */
90c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_mempool_cache {
9199a2dd95SBruce Richardson 	uint32_t size;	      /**< Size of the cache */
9299a2dd95SBruce Richardson 	uint32_t flushthresh; /**< Threshold before we flush excess elements */
9399a2dd95SBruce Richardson 	uint32_t len;	      /**< Current cache count */
94203dcc9cSMorten Brørup #ifdef RTE_LIBRTE_MEMPOOL_STATS
95203dcc9cSMorten Brørup 	uint32_t unused;
96203dcc9cSMorten Brørup 	/*
97203dcc9cSMorten Brørup 	 * Alternative location for the most frequently updated mempool statistics (per-lcore),
98203dcc9cSMorten Brørup 	 * providing faster update access when using a mempool cache.
99203dcc9cSMorten Brørup 	 */
100203dcc9cSMorten Brørup 	struct {
101203dcc9cSMorten Brørup 		uint64_t put_bulk;          /**< Number of puts. */
102203dcc9cSMorten Brørup 		uint64_t put_objs;          /**< Number of objects successfully put. */
103203dcc9cSMorten Brørup 		uint64_t get_success_bulk;  /**< Successful allocation number. */
104203dcc9cSMorten Brørup 		uint64_t get_success_objs;  /**< Objects successfully allocated. */
105203dcc9cSMorten Brørup 	} stats;                        /**< Statistics */
106203dcc9cSMorten Brørup #endif
107b77f5860SMorten Brørup 	/**
108b77f5860SMorten Brørup 	 * Cache objects
109b77f5860SMorten Brørup 	 *
11099a2dd95SBruce Richardson 	 * Cache is allocated to this size to allow it to overflow in certain
11199a2dd95SBruce Richardson 	 * cases to avoid needless emptying of cache.
11299a2dd95SBruce Richardson 	 */
113e9fd1ebfSTyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2];
114c6552d9aSTyler Retzlaff };
11599a2dd95SBruce Richardson 
11699a2dd95SBruce Richardson /**
11799a2dd95SBruce Richardson  * A structure that stores the size of mempool elements.
11899a2dd95SBruce Richardson  */
11999a2dd95SBruce Richardson struct rte_mempool_objsz {
12099a2dd95SBruce Richardson 	uint32_t elt_size;     /**< Size of an element. */
12199a2dd95SBruce Richardson 	uint32_t header_size;  /**< Size of header (before elt). */
12299a2dd95SBruce Richardson 	uint32_t trailer_size; /**< Size of trailer (after elt). */
12399a2dd95SBruce Richardson 	uint32_t total_size;
12499a2dd95SBruce Richardson 	/**< Total size of an object (header + elt + trailer). */
12599a2dd95SBruce Richardson };
12699a2dd95SBruce Richardson 
12799a2dd95SBruce Richardson /**< Maximum length of a memory pool's name. */
12899a2dd95SBruce Richardson #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
12999a2dd95SBruce Richardson 			      sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
13099a2dd95SBruce Richardson #define RTE_MEMPOOL_MZ_PREFIX "MP_"
13199a2dd95SBruce Richardson 
13299a2dd95SBruce Richardson /* "MP_<name>" */
13399a2dd95SBruce Richardson #define	RTE_MEMPOOL_MZ_FORMAT	RTE_MEMPOOL_MZ_PREFIX "%s"
13499a2dd95SBruce Richardson 
13599a2dd95SBruce Richardson #ifndef RTE_MEMPOOL_ALIGN
13699a2dd95SBruce Richardson /**
13799a2dd95SBruce Richardson  * Alignment of elements inside mempool.
13899a2dd95SBruce Richardson  */
13999a2dd95SBruce Richardson #define RTE_MEMPOOL_ALIGN	RTE_CACHE_LINE_SIZE
14099a2dd95SBruce Richardson #endif
14199a2dd95SBruce Richardson 
14299a2dd95SBruce Richardson #define RTE_MEMPOOL_ALIGN_MASK	(RTE_MEMPOOL_ALIGN - 1)
14399a2dd95SBruce Richardson 
14499a2dd95SBruce Richardson /**
14599a2dd95SBruce Richardson  * Mempool object header structure
14699a2dd95SBruce Richardson  *
14799a2dd95SBruce Richardson  * Each object stored in mempools are prefixed by this header structure,
14899a2dd95SBruce Richardson  * it allows to retrieve the mempool pointer from the object and to
14999a2dd95SBruce Richardson  * iterate on all objects attached to a mempool. When debug is enabled,
15099a2dd95SBruce Richardson  * a cookie is also added in this structure preventing corruptions and
15199a2dd95SBruce Richardson  * double-frees.
15299a2dd95SBruce Richardson  */
15399a2dd95SBruce Richardson struct rte_mempool_objhdr {
154f1f6ebc0SWilliam Tu 	RTE_STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
15599a2dd95SBruce Richardson 	struct rte_mempool *mp;          /**< The mempool owning the object. */
15699a2dd95SBruce Richardson 	rte_iova_t iova;                 /**< IO address of the object. */
15799a2dd95SBruce Richardson #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
15899a2dd95SBruce Richardson 	uint64_t cookie;                 /**< Debug cookie. */
15999a2dd95SBruce Richardson #endif
16099a2dd95SBruce Richardson };
16199a2dd95SBruce Richardson 
16299a2dd95SBruce Richardson /**
16399a2dd95SBruce Richardson  * A list of object headers type
16499a2dd95SBruce Richardson  */
165f1f6ebc0SWilliam Tu RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
16699a2dd95SBruce Richardson 
16799a2dd95SBruce Richardson #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
16899a2dd95SBruce Richardson 
16999a2dd95SBruce Richardson /**
17099a2dd95SBruce Richardson  * Mempool object trailer structure
17199a2dd95SBruce Richardson  *
17299a2dd95SBruce Richardson  * In debug mode, each object stored in mempools are suffixed by this
17399a2dd95SBruce Richardson  * trailer structure containing a cookie preventing memory corruptions.
17499a2dd95SBruce Richardson  */
17599a2dd95SBruce Richardson struct rte_mempool_objtlr {
17699a2dd95SBruce Richardson 	uint64_t cookie;                 /**< Debug cookie. */
17799a2dd95SBruce Richardson };
17899a2dd95SBruce Richardson 
17999a2dd95SBruce Richardson #endif
18099a2dd95SBruce Richardson 
18199a2dd95SBruce Richardson /**
1822d603bf6SStephen Hemminger  * @internal Logtype used for mempool related messages.
1832d603bf6SStephen Hemminger  */
1842d603bf6SStephen Hemminger extern int rte_mempool_logtype;
1852d603bf6SStephen Hemminger #define RTE_LOGTYPE_MEMPOOL	rte_mempool_logtype
18697433132SDavid Marchand #define RTE_MEMPOOL_LOG(level, ...) \
18797433132SDavid Marchand 	RTE_LOG_LINE(level, MEMPOOL, "" __VA_ARGS__)
1882d603bf6SStephen Hemminger 
1892d603bf6SStephen Hemminger /**
19099a2dd95SBruce Richardson  * A list of memory where objects are stored
19199a2dd95SBruce Richardson  */
192f1f6ebc0SWilliam Tu RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
19399a2dd95SBruce Richardson 
19499a2dd95SBruce Richardson /**
19599a2dd95SBruce Richardson  * Callback used to free a memory chunk
19699a2dd95SBruce Richardson  */
19799a2dd95SBruce Richardson typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
19899a2dd95SBruce Richardson 	void *opaque);
19999a2dd95SBruce Richardson 
20099a2dd95SBruce Richardson /**
20199a2dd95SBruce Richardson  * Mempool objects memory header structure
20299a2dd95SBruce Richardson  *
20399a2dd95SBruce Richardson  * The memory chunks where objects are stored. Each chunk is virtually
20499a2dd95SBruce Richardson  * and physically contiguous.
20599a2dd95SBruce Richardson  */
20699a2dd95SBruce Richardson struct rte_mempool_memhdr {
207f1f6ebc0SWilliam Tu 	RTE_STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
20899a2dd95SBruce Richardson 	struct rte_mempool *mp;  /**< The mempool owning the chunk */
20999a2dd95SBruce Richardson 	void *addr;              /**< Virtual address of the chunk */
21099a2dd95SBruce Richardson 	rte_iova_t iova;         /**< IO address of the chunk */
21199a2dd95SBruce Richardson 	size_t len;              /**< length of the chunk */
21299a2dd95SBruce Richardson 	rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
21399a2dd95SBruce Richardson 	void *opaque;            /**< Argument passed to the free callback */
21499a2dd95SBruce Richardson };
21599a2dd95SBruce Richardson 
21699a2dd95SBruce Richardson /**
21799a2dd95SBruce Richardson  * Additional information about the mempool
21899a2dd95SBruce Richardson  *
21999a2dd95SBruce Richardson  * The structure is cache-line aligned to avoid ABI breakages in
22099a2dd95SBruce Richardson  * a number of cases when something small is added.
22199a2dd95SBruce Richardson  */
222c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_mempool_info {
22399a2dd95SBruce Richardson 	/** Number of objects in the contiguous block */
22499a2dd95SBruce Richardson 	unsigned int contig_block_size;
225c6552d9aSTyler Retzlaff };
22699a2dd95SBruce Richardson 
22799a2dd95SBruce Richardson /**
22899a2dd95SBruce Richardson  * The RTE mempool structure.
22999a2dd95SBruce Richardson  */
230c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_mempool {
231a87a0c0dSAndrew Rybchenko 	char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
23299a2dd95SBruce Richardson 	union {
23399a2dd95SBruce Richardson 		void *pool_data;         /**< Ring or pool to store objects. */
23499a2dd95SBruce Richardson 		uint64_t pool_id;        /**< External mempool identifier. */
23599a2dd95SBruce Richardson 	};
23699a2dd95SBruce Richardson 	void *pool_config;               /**< optional args for ops alloc. */
23799a2dd95SBruce Richardson 	const struct rte_memzone *mz;    /**< Memzone where pool is alloc'd. */
23899a2dd95SBruce Richardson 	unsigned int flags;              /**< Flags of the mempool. */
23999a2dd95SBruce Richardson 	int socket_id;                   /**< Socket id passed at create. */
24099a2dd95SBruce Richardson 	uint32_t size;                   /**< Max size of the mempool. */
24199a2dd95SBruce Richardson 	uint32_t cache_size;
24299a2dd95SBruce Richardson 	/**< Size of per-lcore default local cache. */
24399a2dd95SBruce Richardson 
24499a2dd95SBruce Richardson 	uint32_t elt_size;               /**< Size of an element. */
24599a2dd95SBruce Richardson 	uint32_t header_size;            /**< Size of header (before elt). */
24699a2dd95SBruce Richardson 	uint32_t trailer_size;           /**< Size of trailer (after elt). */
24799a2dd95SBruce Richardson 
24899a2dd95SBruce Richardson 	unsigned private_data_size;      /**< Size of private data. */
24999a2dd95SBruce Richardson 	/**
25099a2dd95SBruce Richardson 	 * Index into rte_mempool_ops_table array of mempool ops
25199a2dd95SBruce Richardson 	 * structs, which contain callback function pointers.
25299a2dd95SBruce Richardson 	 * We're using an index here rather than pointers to the callbacks
25399a2dd95SBruce Richardson 	 * to facilitate any secondary processes that may want to use
25499a2dd95SBruce Richardson 	 * this mempool.
25599a2dd95SBruce Richardson 	 */
25699a2dd95SBruce Richardson 	int32_t ops_index;
25799a2dd95SBruce Richardson 
25899a2dd95SBruce Richardson 	struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
25999a2dd95SBruce Richardson 
26099a2dd95SBruce Richardson 	uint32_t populated_size;         /**< Number of populated objects. */
26199a2dd95SBruce Richardson 	struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
26299a2dd95SBruce Richardson 	uint32_t nb_mem_chunks;          /**< Number of memory chunks */
26399a2dd95SBruce Richardson 	struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
26499a2dd95SBruce Richardson 
2659d87e05dSMorten Brørup #ifdef RTE_LIBRTE_MEMPOOL_STATS
26617749e4dSMorten Brørup 	/** Per-lcore statistics.
26717749e4dSMorten Brørup 	 *
26817749e4dSMorten Brørup 	 * Plus one, for unregistered non-EAL threads.
26917749e4dSMorten Brørup 	 */
27017749e4dSMorten Brørup 	struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
27199a2dd95SBruce Richardson #endif
272c6552d9aSTyler Retzlaff };
27399a2dd95SBruce Richardson 
274925a83a5SAndrew Rybchenko /** Spreading among memory channels not required. */
275c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_NO_SPREAD		0x0001
276c47d7b90SAndrew Rybchenko /**
277c47d7b90SAndrew Rybchenko  * Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD.
278c47d7b90SAndrew Rybchenko  * To be deprecated.
279c47d7b90SAndrew Rybchenko  */
280c47d7b90SAndrew Rybchenko #define MEMPOOL_F_NO_SPREAD		RTE_MEMPOOL_F_NO_SPREAD
281925a83a5SAndrew Rybchenko /** Do not align objects on cache lines. */
282c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_NO_CACHE_ALIGN	0x0002
283c47d7b90SAndrew Rybchenko /**
284c47d7b90SAndrew Rybchenko  * Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN.
285c47d7b90SAndrew Rybchenko  * To be deprecated.
286c47d7b90SAndrew Rybchenko  */
287c47d7b90SAndrew Rybchenko #define MEMPOOL_F_NO_CACHE_ALIGN	RTE_MEMPOOL_F_NO_CACHE_ALIGN
288925a83a5SAndrew Rybchenko /** Default put is "single-producer". */
289c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_SP_PUT		0x0004
290c47d7b90SAndrew Rybchenko /**
291c47d7b90SAndrew Rybchenko  * Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT.
292c47d7b90SAndrew Rybchenko  * To be deprecated.
293c47d7b90SAndrew Rybchenko  */
294c47d7b90SAndrew Rybchenko #define MEMPOOL_F_SP_PUT		RTE_MEMPOOL_F_SP_PUT
295925a83a5SAndrew Rybchenko /** Default get is "single-consumer". */
296c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_SC_GET		0x0008
297c47d7b90SAndrew Rybchenko /**
298c47d7b90SAndrew Rybchenko  * Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET.
299c47d7b90SAndrew Rybchenko  * To be deprecated.
300c47d7b90SAndrew Rybchenko  */
301c47d7b90SAndrew Rybchenko #define MEMPOOL_F_SC_GET		RTE_MEMPOOL_F_SC_GET
302925a83a5SAndrew Rybchenko /** Internal: pool is created. */
303c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_POOL_CREATED	0x0010
304925a83a5SAndrew Rybchenko /** Don't need IOVA contiguous objects. */
305c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_NO_IOVA_CONTIG	0x0020
306c47d7b90SAndrew Rybchenko /**
307c47d7b90SAndrew Rybchenko  * Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG.
308c47d7b90SAndrew Rybchenko  * To be deprecated.
309c47d7b90SAndrew Rybchenko  */
310c47d7b90SAndrew Rybchenko #define MEMPOOL_F_NO_IOVA_CONTIG	RTE_MEMPOOL_F_NO_IOVA_CONTIG
31111541c5cSDmitry Kozlyuk /** Internal: no object from the pool can be used for device IO (DMA). */
312c47d7b90SAndrew Rybchenko #define RTE_MEMPOOL_F_NON_IO		0x0040
31399a2dd95SBruce Richardson 
31499a2dd95SBruce Richardson /**
315afdaa607SDavid Marchand  * This macro lists all the mempool flags an application may request.
316afdaa607SDavid Marchand  */
317afdaa607SDavid Marchand #define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
318afdaa607SDavid Marchand 	| RTE_MEMPOOL_F_NO_CACHE_ALIGN \
319afdaa607SDavid Marchand 	| RTE_MEMPOOL_F_SP_PUT \
320afdaa607SDavid Marchand 	| RTE_MEMPOOL_F_SC_GET \
321afdaa607SDavid Marchand 	| RTE_MEMPOOL_F_NO_IOVA_CONTIG \
322afdaa607SDavid Marchand 	)
3239d87e05dSMorten Brørup 
324afdaa607SDavid Marchand /**
3259d87e05dSMorten Brørup  * @internal When stats is enabled, store some statistics.
32699a2dd95SBruce Richardson  *
32799a2dd95SBruce Richardson  * @param mp
32899a2dd95SBruce Richardson  *   Pointer to the memory pool.
32999a2dd95SBruce Richardson  * @param name
33099a2dd95SBruce Richardson  *   Name of the statistics field to increment in the memory pool.
33199a2dd95SBruce Richardson  * @param n
3329d87e05dSMorten Brørup  *   Number to add to the statistics.
33399a2dd95SBruce Richardson  */
3349d87e05dSMorten Brørup #ifdef RTE_LIBRTE_MEMPOOL_STATS
335ad276d5cSAndrew Rybchenko #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {                                  \
33617749e4dSMorten Brørup 		unsigned int __lcore_id = rte_lcore_id();                       \
33717749e4dSMorten Brørup 		if (likely(__lcore_id < RTE_MAX_LCORE))                         \
33817749e4dSMorten Brørup 			(mp)->stats[__lcore_id].name += (n);                    \
33917749e4dSMorten Brørup 		else                                                            \
34099966debSTyler Retzlaff 			rte_atomic_fetch_add_explicit(&((mp)->stats[RTE_MAX_LCORE].name),  \
34199966debSTyler Retzlaff 					   (n), rte_memory_order_relaxed);              \
34299a2dd95SBruce Richardson 	} while (0)
34399a2dd95SBruce Richardson #else
344ad276d5cSAndrew Rybchenko #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
34599a2dd95SBruce Richardson #endif
34699a2dd95SBruce Richardson 
34799a2dd95SBruce Richardson /**
348203dcc9cSMorten Brørup  * @internal When stats is enabled, store some statistics.
349203dcc9cSMorten Brørup  *
350203dcc9cSMorten Brørup  * @param cache
351203dcc9cSMorten Brørup  *   Pointer to the memory pool cache.
352203dcc9cSMorten Brørup  * @param name
353203dcc9cSMorten Brørup  *   Name of the statistics field to increment in the memory pool cache.
354203dcc9cSMorten Brørup  * @param n
355203dcc9cSMorten Brørup  *   Number to add to the statistics.
356203dcc9cSMorten Brørup  */
357203dcc9cSMorten Brørup #ifdef RTE_LIBRTE_MEMPOOL_STATS
358203dcc9cSMorten Brørup #define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n))
359203dcc9cSMorten Brørup #else
360203dcc9cSMorten Brørup #define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0)
361203dcc9cSMorten Brørup #endif
362203dcc9cSMorten Brørup 
363203dcc9cSMorten Brørup /**
364d7203661SAndrew Rybchenko  * @internal Calculate the size of the mempool header.
36599a2dd95SBruce Richardson  *
36699a2dd95SBruce Richardson  * @param mp
36799a2dd95SBruce Richardson  *   Pointer to the memory pool.
36899a2dd95SBruce Richardson  * @param cs
36999a2dd95SBruce Richardson  *   Size of the per-lcore cache.
37099a2dd95SBruce Richardson  */
371d7203661SAndrew Rybchenko #define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
37299a2dd95SBruce Richardson 	(sizeof(*(mp)) + (((cs) == 0) ? 0 : \
37399a2dd95SBruce Richardson 	(sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
37499a2dd95SBruce Richardson 
37599a2dd95SBruce Richardson /* return the header of a mempool object (internal) */
376ad276d5cSAndrew Rybchenko static inline struct rte_mempool_objhdr *
rte_mempool_get_header(void * obj)377ad276d5cSAndrew Rybchenko rte_mempool_get_header(void *obj)
37899a2dd95SBruce Richardson {
37999a2dd95SBruce Richardson 	return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
38099a2dd95SBruce Richardson 		sizeof(struct rte_mempool_objhdr));
38199a2dd95SBruce Richardson }
38299a2dd95SBruce Richardson 
38399a2dd95SBruce Richardson /**
38499a2dd95SBruce Richardson  * Return a pointer to the mempool owning this object.
38599a2dd95SBruce Richardson  *
38699a2dd95SBruce Richardson  * @param obj
38799a2dd95SBruce Richardson  *   An object that is owned by a pool. If this is not the case,
38899a2dd95SBruce Richardson  *   the behavior is undefined.
38999a2dd95SBruce Richardson  * @return
39099a2dd95SBruce Richardson  *   A pointer to the mempool structure.
39199a2dd95SBruce Richardson  */
rte_mempool_from_obj(void * obj)39299a2dd95SBruce Richardson static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
39399a2dd95SBruce Richardson {
394ad276d5cSAndrew Rybchenko 	struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
39599a2dd95SBruce Richardson 	return hdr->mp;
39699a2dd95SBruce Richardson }
39799a2dd95SBruce Richardson 
39899a2dd95SBruce Richardson /* return the trailer of a mempool object (internal) */
rte_mempool_get_trailer(void * obj)399ad276d5cSAndrew Rybchenko static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
40099a2dd95SBruce Richardson {
40199a2dd95SBruce Richardson 	struct rte_mempool *mp = rte_mempool_from_obj(obj);
40299a2dd95SBruce Richardson 	return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
40399a2dd95SBruce Richardson }
40499a2dd95SBruce Richardson 
40599a2dd95SBruce Richardson /**
40699a2dd95SBruce Richardson  * @internal Check and update cookies or panic.
40799a2dd95SBruce Richardson  *
40899a2dd95SBruce Richardson  * @param mp
40999a2dd95SBruce Richardson  *   Pointer to the memory pool.
41099a2dd95SBruce Richardson  * @param obj_table_const
41199a2dd95SBruce Richardson  *   Pointer to a table of void * pointers (objects).
41299a2dd95SBruce Richardson  * @param n
41399a2dd95SBruce Richardson  *   Index of object in object table.
41499a2dd95SBruce Richardson  * @param free
41599a2dd95SBruce Richardson  *   - 0: object is supposed to be allocated, mark it as free
41699a2dd95SBruce Richardson  *   - 1: object is supposed to be free, mark it as allocated
41799a2dd95SBruce Richardson  *   - 2: just check that cookie is valid (free or allocated)
41899a2dd95SBruce Richardson  */
41999a2dd95SBruce Richardson void rte_mempool_check_cookies(const struct rte_mempool *mp,
42099a2dd95SBruce Richardson 	void * const *obj_table_const, unsigned n, int free);
42199a2dd95SBruce Richardson 
42299a2dd95SBruce Richardson #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
423ad276d5cSAndrew Rybchenko #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
42499a2dd95SBruce Richardson 	rte_mempool_check_cookies(mp, obj_table_const, n, free)
42599a2dd95SBruce Richardson #else
426ad276d5cSAndrew Rybchenko #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
42799a2dd95SBruce Richardson #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
42899a2dd95SBruce Richardson 
42999a2dd95SBruce Richardson /**
43099a2dd95SBruce Richardson  * @internal Check contiguous object blocks and update cookies or panic.
43199a2dd95SBruce Richardson  *
43299a2dd95SBruce Richardson  * @param mp
43399a2dd95SBruce Richardson  *   Pointer to the memory pool.
43499a2dd95SBruce Richardson  * @param first_obj_table_const
43599a2dd95SBruce Richardson  *   Pointer to a table of void * pointers (first object of the contiguous
43699a2dd95SBruce Richardson  *   object blocks).
43799a2dd95SBruce Richardson  * @param n
43899a2dd95SBruce Richardson  *   Number of contiguous object blocks.
43999a2dd95SBruce Richardson  * @param free
44099a2dd95SBruce Richardson  *   - 0: object is supposed to be allocated, mark it as free
44199a2dd95SBruce Richardson  *   - 1: object is supposed to be free, mark it as allocated
44299a2dd95SBruce Richardson  *   - 2: just check that cookie is valid (free or allocated)
44399a2dd95SBruce Richardson  */
44499a2dd95SBruce Richardson void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
44599a2dd95SBruce Richardson 	void * const *first_obj_table_const, unsigned int n, int free);
44699a2dd95SBruce Richardson 
44799a2dd95SBruce Richardson #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
448ad276d5cSAndrew Rybchenko #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
44999a2dd95SBruce Richardson 						free) \
45099a2dd95SBruce Richardson 	rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
45199a2dd95SBruce Richardson 						free)
45299a2dd95SBruce Richardson #else
453ad276d5cSAndrew Rybchenko #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
45499a2dd95SBruce Richardson 						free) \
45599a2dd95SBruce Richardson 	do {} while (0)
45699a2dd95SBruce Richardson #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
45799a2dd95SBruce Richardson 
45899a2dd95SBruce Richardson #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
45999a2dd95SBruce Richardson 
46099a2dd95SBruce Richardson /**
46199a2dd95SBruce Richardson  * Prototype for implementation specific data provisioning function.
46299a2dd95SBruce Richardson  *
46399a2dd95SBruce Richardson  * The function should provide the implementation specific memory for
46499a2dd95SBruce Richardson  * use by the other mempool ops functions in a given mempool ops struct.
46599a2dd95SBruce Richardson  * E.g. the default ops provides an instance of the rte_ring for this purpose.
46699a2dd95SBruce Richardson  * it will most likely point to a different type of data structure, and
46799a2dd95SBruce Richardson  * will be transparent to the application programmer.
46899a2dd95SBruce Richardson  * This function should set mp->pool_data.
46999a2dd95SBruce Richardson  */
47099a2dd95SBruce Richardson typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
47199a2dd95SBruce Richardson 
47299a2dd95SBruce Richardson /**
47399a2dd95SBruce Richardson  * Free the opaque private data pointed to by mp->pool_data pointer.
47499a2dd95SBruce Richardson  */
47599a2dd95SBruce Richardson typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
47699a2dd95SBruce Richardson 
47799a2dd95SBruce Richardson /**
47837fc1db9SFerruh Yigit  * Enqueue 'n' objects into the external pool.
47937fc1db9SFerruh Yigit  * @return
48037fc1db9SFerruh Yigit  *   - 0: Success
48137fc1db9SFerruh Yigit  *   - <0: Error
48299a2dd95SBruce Richardson  */
48399a2dd95SBruce Richardson typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
48499a2dd95SBruce Richardson 		void * const *obj_table, unsigned int n);
48599a2dd95SBruce Richardson 
48699a2dd95SBruce Richardson /**
48737fc1db9SFerruh Yigit  * Dequeue 'n' objects from the external pool.
48837fc1db9SFerruh Yigit  * @return
48937fc1db9SFerruh Yigit  *   - 0: Success
49037fc1db9SFerruh Yigit  *   - <0: Error
49199a2dd95SBruce Richardson  */
49299a2dd95SBruce Richardson typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
49399a2dd95SBruce Richardson 		void **obj_table, unsigned int n);
49499a2dd95SBruce Richardson 
49599a2dd95SBruce Richardson /**
49699a2dd95SBruce Richardson  * Dequeue a number of contiguous object blocks from the external pool.
49799a2dd95SBruce Richardson  */
49899a2dd95SBruce Richardson typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
49999a2dd95SBruce Richardson 		 void **first_obj_table, unsigned int n);
50099a2dd95SBruce Richardson 
50199a2dd95SBruce Richardson /**
50299a2dd95SBruce Richardson  * Return the number of available objects in the external pool.
50399a2dd95SBruce Richardson  */
50499a2dd95SBruce Richardson typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
50599a2dd95SBruce Richardson 
50699a2dd95SBruce Richardson /**
50799a2dd95SBruce Richardson  * Calculate memory size required to store given number of objects.
50899a2dd95SBruce Richardson  *
50999a2dd95SBruce Richardson  * If mempool objects are not required to be IOVA-contiguous
510c47d7b90SAndrew Rybchenko  * (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
51199a2dd95SBruce Richardson  * virtually contiguous chunk size. Otherwise, if mempool objects must
512c47d7b90SAndrew Rybchenko  * be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear),
51399a2dd95SBruce Richardson  * min_chunk_size defines IOVA-contiguous chunk size.
51499a2dd95SBruce Richardson  *
51599a2dd95SBruce Richardson  * @param[in] mp
51699a2dd95SBruce Richardson  *   Pointer to the memory pool.
51799a2dd95SBruce Richardson  * @param[in] obj_num
51899a2dd95SBruce Richardson  *   Number of objects.
51999a2dd95SBruce Richardson  * @param[in] pg_shift
52099a2dd95SBruce Richardson  *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.
52199a2dd95SBruce Richardson  * @param[out] min_chunk_size
52299a2dd95SBruce Richardson  *   Location for minimum size of the memory chunk which may be used to
52399a2dd95SBruce Richardson  *   store memory pool objects.
52499a2dd95SBruce Richardson  * @param[out] align
52599a2dd95SBruce Richardson  *   Location for required memory chunk alignment.
52699a2dd95SBruce Richardson  * @return
52799a2dd95SBruce Richardson  *   Required memory size.
52899a2dd95SBruce Richardson  */
52999a2dd95SBruce Richardson typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
53099a2dd95SBruce Richardson 		uint32_t obj_num,  uint32_t pg_shift,
53199a2dd95SBruce Richardson 		size_t *min_chunk_size, size_t *align);
53299a2dd95SBruce Richardson 
53399a2dd95SBruce Richardson /**
53499a2dd95SBruce Richardson  * @internal Helper to calculate memory size required to store given
53599a2dd95SBruce Richardson  * number of objects.
53699a2dd95SBruce Richardson  *
53799a2dd95SBruce Richardson  * This function is internal to mempool library and mempool drivers.
53899a2dd95SBruce Richardson  *
53999a2dd95SBruce Richardson  * If page boundaries may be ignored, it is just a product of total
54099a2dd95SBruce Richardson  * object size including header and trailer and number of objects.
54199a2dd95SBruce Richardson  * Otherwise, it is a number of pages required to store given number of
54299a2dd95SBruce Richardson  * objects without crossing page boundary.
54399a2dd95SBruce Richardson  *
54499a2dd95SBruce Richardson  * Note that if object size is bigger than page size, then it assumes
54599a2dd95SBruce Richardson  * that pages are grouped in subsets of physically continuous pages big
54699a2dd95SBruce Richardson  * enough to store at least one object.
54799a2dd95SBruce Richardson  *
54899a2dd95SBruce Richardson  * Minimum size of memory chunk is the total element size.
54999a2dd95SBruce Richardson  * Required memory chunk alignment is the cache line size.
55099a2dd95SBruce Richardson  *
55199a2dd95SBruce Richardson  * @param[in] mp
55299a2dd95SBruce Richardson  *   A pointer to the mempool structure.
55399a2dd95SBruce Richardson  * @param[in] obj_num
55499a2dd95SBruce Richardson  *   Number of objects to be added in mempool.
55599a2dd95SBruce Richardson  * @param[in] pg_shift
55699a2dd95SBruce Richardson  *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.
55799a2dd95SBruce Richardson  * @param[in] chunk_reserve
55899a2dd95SBruce Richardson  *   Amount of memory that must be reserved at the beginning of each page,
55999a2dd95SBruce Richardson  *   or at the beginning of the memory area if pg_shift is 0.
56099a2dd95SBruce Richardson  * @param[out] min_chunk_size
56199a2dd95SBruce Richardson  *   Location for minimum size of the memory chunk which may be used to
56299a2dd95SBruce Richardson  *   store memory pool objects.
56399a2dd95SBruce Richardson  * @param[out] align
56499a2dd95SBruce Richardson  *   Location for required memory chunk alignment.
56599a2dd95SBruce Richardson  * @return
56699a2dd95SBruce Richardson  *   Required memory size.
56799a2dd95SBruce Richardson  */
56899a2dd95SBruce Richardson ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
56999a2dd95SBruce Richardson 		uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
57099a2dd95SBruce Richardson 		size_t *min_chunk_size, size_t *align);
57199a2dd95SBruce Richardson 
57299a2dd95SBruce Richardson /**
57399a2dd95SBruce Richardson  * Default way to calculate memory size required to store given number of
57499a2dd95SBruce Richardson  * objects.
57599a2dd95SBruce Richardson  *
57699a2dd95SBruce Richardson  * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
57799a2dd95SBruce Richardson  * 0, min_chunk_size, align).
57899a2dd95SBruce Richardson  */
57999a2dd95SBruce Richardson ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
58099a2dd95SBruce Richardson 		uint32_t obj_num, uint32_t pg_shift,
58199a2dd95SBruce Richardson 		size_t *min_chunk_size, size_t *align);
58299a2dd95SBruce Richardson 
58399a2dd95SBruce Richardson /**
58499a2dd95SBruce Richardson  * Function to be called for each populated object.
58599a2dd95SBruce Richardson  *
58699a2dd95SBruce Richardson  * @param[in] mp
58799a2dd95SBruce Richardson  *   A pointer to the mempool structure.
58899a2dd95SBruce Richardson  * @param[in] opaque
58999a2dd95SBruce Richardson  *   An opaque pointer passed to iterator.
59099a2dd95SBruce Richardson  * @param[in] vaddr
59199a2dd95SBruce Richardson  *   Object virtual address.
59299a2dd95SBruce Richardson  * @param[in] iova
59399a2dd95SBruce Richardson  *   Input/output virtual address of the object or RTE_BAD_IOVA.
59499a2dd95SBruce Richardson  */
59599a2dd95SBruce Richardson typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
59699a2dd95SBruce Richardson 		void *opaque, void *vaddr, rte_iova_t iova);
59799a2dd95SBruce Richardson 
59899a2dd95SBruce Richardson /**
59999a2dd95SBruce Richardson  * Populate memory pool objects using provided memory chunk.
60099a2dd95SBruce Richardson  *
60199a2dd95SBruce Richardson  * Populated objects should be enqueued to the pool, e.g. using
60299a2dd95SBruce Richardson  * rte_mempool_ops_enqueue_bulk().
60399a2dd95SBruce Richardson  *
60499a2dd95SBruce Richardson  * If the given IO address is unknown (iova = RTE_BAD_IOVA),
60599a2dd95SBruce Richardson  * the chunk doesn't need to be physically contiguous (only virtually),
60699a2dd95SBruce Richardson  * and allocated objects may span two pages.
60799a2dd95SBruce Richardson  *
60899a2dd95SBruce Richardson  * @param[in] mp
60999a2dd95SBruce Richardson  *   A pointer to the mempool structure.
61099a2dd95SBruce Richardson  * @param[in] max_objs
61199a2dd95SBruce Richardson  *   Maximum number of objects to be populated.
61299a2dd95SBruce Richardson  * @param[in] vaddr
61399a2dd95SBruce Richardson  *   The virtual address of memory that should be used to store objects.
61499a2dd95SBruce Richardson  * @param[in] iova
61599a2dd95SBruce Richardson  *   The IO address
61699a2dd95SBruce Richardson  * @param[in] len
61799a2dd95SBruce Richardson  *   The length of memory in bytes.
61899a2dd95SBruce Richardson  * @param[in] obj_cb
61999a2dd95SBruce Richardson  *   Callback function to be executed for each populated object.
62099a2dd95SBruce Richardson  * @param[in] obj_cb_arg
62199a2dd95SBruce Richardson  *   An opaque pointer passed to the callback function.
62299a2dd95SBruce Richardson  * @return
62399a2dd95SBruce Richardson  *   The number of objects added on success.
62499a2dd95SBruce Richardson  *   On error, no objects are populated and a negative errno is returned.
62599a2dd95SBruce Richardson  */
62699a2dd95SBruce Richardson typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
62799a2dd95SBruce Richardson 		unsigned int max_objs,
62899a2dd95SBruce Richardson 		void *vaddr, rte_iova_t iova, size_t len,
62999a2dd95SBruce Richardson 		rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
63099a2dd95SBruce Richardson 
63199a2dd95SBruce Richardson /**
63299a2dd95SBruce Richardson  * Align objects on addresses multiple of total_elt_sz.
63399a2dd95SBruce Richardson  */
63499a2dd95SBruce Richardson #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
63599a2dd95SBruce Richardson 
63699a2dd95SBruce Richardson /**
63799a2dd95SBruce Richardson  * @internal Helper to populate memory pool object using provided memory
63899a2dd95SBruce Richardson  * chunk: just slice objects one by one, taking care of not
63999a2dd95SBruce Richardson  * crossing page boundaries.
64099a2dd95SBruce Richardson  *
64199a2dd95SBruce Richardson  * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses
64299a2dd95SBruce Richardson  * of object headers will be aligned on a multiple of total_elt_sz.
64399a2dd95SBruce Richardson  * This feature is used by octeontx hardware.
64499a2dd95SBruce Richardson  *
64599a2dd95SBruce Richardson  * This function is internal to mempool library and mempool drivers.
64699a2dd95SBruce Richardson  *
64799a2dd95SBruce Richardson  * @param[in] mp
64899a2dd95SBruce Richardson  *   A pointer to the mempool structure.
64999a2dd95SBruce Richardson  * @param[in] flags
65099a2dd95SBruce Richardson  *   Logical OR of following flags:
65199a2dd95SBruce Richardson  *   - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses
65299a2dd95SBruce Richardson  *     multiple of total_elt_sz.
65399a2dd95SBruce Richardson  * @param[in] max_objs
65499a2dd95SBruce Richardson  *   Maximum number of objects to be added in mempool.
65599a2dd95SBruce Richardson  * @param[in] vaddr
65699a2dd95SBruce Richardson  *   The virtual address of memory that should be used to store objects.
65799a2dd95SBruce Richardson  * @param[in] iova
65899a2dd95SBruce Richardson  *   The IO address corresponding to vaddr, or RTE_BAD_IOVA.
65999a2dd95SBruce Richardson  * @param[in] len
66099a2dd95SBruce Richardson  *   The length of memory in bytes.
66199a2dd95SBruce Richardson  * @param[in] obj_cb
66299a2dd95SBruce Richardson  *   Callback function to be executed for each populated object.
66399a2dd95SBruce Richardson  * @param[in] obj_cb_arg
66499a2dd95SBruce Richardson  *   An opaque pointer passed to the callback function.
66599a2dd95SBruce Richardson  * @return
66699a2dd95SBruce Richardson  *   The number of objects added in mempool.
66799a2dd95SBruce Richardson  */
66899a2dd95SBruce Richardson int rte_mempool_op_populate_helper(struct rte_mempool *mp,
66999a2dd95SBruce Richardson 		unsigned int flags, unsigned int max_objs,
67099a2dd95SBruce Richardson 		void *vaddr, rte_iova_t iova, size_t len,
67199a2dd95SBruce Richardson 		rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
67299a2dd95SBruce Richardson 
67399a2dd95SBruce Richardson /**
67499a2dd95SBruce Richardson  * Default way to populate memory pool object using provided memory chunk.
67599a2dd95SBruce Richardson  *
67699a2dd95SBruce Richardson  * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
67799a2dd95SBruce Richardson  * len, obj_cb, obj_cb_arg).
67899a2dd95SBruce Richardson  */
67999a2dd95SBruce Richardson int rte_mempool_op_populate_default(struct rte_mempool *mp,
68099a2dd95SBruce Richardson 		unsigned int max_objs,
68199a2dd95SBruce Richardson 		void *vaddr, rte_iova_t iova, size_t len,
68299a2dd95SBruce Richardson 		rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
68399a2dd95SBruce Richardson 
68499a2dd95SBruce Richardson /**
68599a2dd95SBruce Richardson  * Get some additional information about a mempool.
68699a2dd95SBruce Richardson  */
68799a2dd95SBruce Richardson typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
68899a2dd95SBruce Richardson 		struct rte_mempool_info *info);
68999a2dd95SBruce Richardson 
69099a2dd95SBruce Richardson 
69199a2dd95SBruce Richardson /** Structure defining mempool operations structure */
692c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_mempool_ops {
69399a2dd95SBruce Richardson 	char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
69499a2dd95SBruce Richardson 	rte_mempool_alloc_t alloc;       /**< Allocate private data. */
69599a2dd95SBruce Richardson 	rte_mempool_free_t free;         /**< Free the external pool. */
69699a2dd95SBruce Richardson 	rte_mempool_enqueue_t enqueue;   /**< Enqueue an object. */
69799a2dd95SBruce Richardson 	rte_mempool_dequeue_t dequeue;   /**< Dequeue an object. */
69899a2dd95SBruce Richardson 	rte_mempool_get_count get_count; /**< Get qty of available objs. */
69999a2dd95SBruce Richardson 	/**
70099a2dd95SBruce Richardson 	 * Optional callback to calculate memory size required to
70199a2dd95SBruce Richardson 	 * store specified number of objects.
70299a2dd95SBruce Richardson 	 */
70399a2dd95SBruce Richardson 	rte_mempool_calc_mem_size_t calc_mem_size;
70499a2dd95SBruce Richardson 	/**
70599a2dd95SBruce Richardson 	 * Optional callback to populate mempool objects using
70699a2dd95SBruce Richardson 	 * provided memory chunk.
70799a2dd95SBruce Richardson 	 */
70899a2dd95SBruce Richardson 	rte_mempool_populate_t populate;
70999a2dd95SBruce Richardson 	/**
71099a2dd95SBruce Richardson 	 * Get mempool info
71199a2dd95SBruce Richardson 	 */
71299a2dd95SBruce Richardson 	rte_mempool_get_info_t get_info;
71399a2dd95SBruce Richardson 	/**
71499a2dd95SBruce Richardson 	 * Dequeue a number of contiguous object blocks.
71599a2dd95SBruce Richardson 	 */
71699a2dd95SBruce Richardson 	rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks;
717c6552d9aSTyler Retzlaff };
71899a2dd95SBruce Richardson 
71999a2dd95SBruce Richardson #define RTE_MEMPOOL_MAX_OPS_IDX 16  /**< Max registered ops structs */
72099a2dd95SBruce Richardson 
72199a2dd95SBruce Richardson /**
72299a2dd95SBruce Richardson  * Structure storing the table of registered ops structs, each of which contain
72399a2dd95SBruce Richardson  * the function pointers for the mempool ops functions.
72499a2dd95SBruce Richardson  * Each process has its own storage for this ops struct array so that
72599a2dd95SBruce Richardson  * the mempools can be shared across primary and secondary processes.
72699a2dd95SBruce Richardson  * The indices used to access the array are valid across processes, whereas
72799a2dd95SBruce Richardson  * any function pointers stored directly in the mempool struct would not be.
72899a2dd95SBruce Richardson  * This results in us simply having "ops_index" in the mempool struct.
72999a2dd95SBruce Richardson  */
730c6552d9aSTyler Retzlaff struct __rte_cache_aligned rte_mempool_ops_table {
73199a2dd95SBruce Richardson 	rte_spinlock_t sl;     /**< Spinlock for add/delete. */
73299a2dd95SBruce Richardson 	uint32_t num_ops;      /**< Number of used ops structs in the table. */
73399a2dd95SBruce Richardson 	/**
73499a2dd95SBruce Richardson 	 * Storage for all possible ops structs.
73599a2dd95SBruce Richardson 	 */
73699a2dd95SBruce Richardson 	struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
737c6552d9aSTyler Retzlaff };
73899a2dd95SBruce Richardson 
73999a2dd95SBruce Richardson /** Array of registered ops structs. */
74099a2dd95SBruce Richardson extern struct rte_mempool_ops_table rte_mempool_ops_table;
74199a2dd95SBruce Richardson 
74299a2dd95SBruce Richardson /**
74399a2dd95SBruce Richardson  * @internal Get the mempool ops struct from its index.
74499a2dd95SBruce Richardson  *
74599a2dd95SBruce Richardson  * @param ops_index
74699a2dd95SBruce Richardson  *   The index of the ops struct in the ops struct table. It must be a valid
74799a2dd95SBruce Richardson  *   index: (0 <= idx < num_ops).
74899a2dd95SBruce Richardson  * @return
74999a2dd95SBruce Richardson  *   The pointer to the ops struct in the table.
75099a2dd95SBruce Richardson  */
75199a2dd95SBruce Richardson static inline struct rte_mempool_ops *
rte_mempool_get_ops(int ops_index)75299a2dd95SBruce Richardson rte_mempool_get_ops(int ops_index)
75399a2dd95SBruce Richardson {
75499a2dd95SBruce Richardson 	RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
75599a2dd95SBruce Richardson 
75699a2dd95SBruce Richardson 	return &rte_mempool_ops_table.ops[ops_index];
75799a2dd95SBruce Richardson }
75899a2dd95SBruce Richardson 
75999a2dd95SBruce Richardson /**
76099a2dd95SBruce Richardson  * @internal Wrapper for mempool_ops alloc callback.
76199a2dd95SBruce Richardson  *
76299a2dd95SBruce Richardson  * @param mp
76399a2dd95SBruce Richardson  *   Pointer to the memory pool.
76499a2dd95SBruce Richardson  * @return
76599a2dd95SBruce Richardson  *   - 0: Success; successfully allocated mempool pool_data.
76699a2dd95SBruce Richardson  *   - <0: Error; code of alloc function.
76799a2dd95SBruce Richardson  */
76899a2dd95SBruce Richardson int
76999a2dd95SBruce Richardson rte_mempool_ops_alloc(struct rte_mempool *mp);
77099a2dd95SBruce Richardson 
77199a2dd95SBruce Richardson /**
77299a2dd95SBruce Richardson  * @internal Wrapper for mempool_ops dequeue callback.
77399a2dd95SBruce Richardson  *
77499a2dd95SBruce Richardson  * @param mp
77599a2dd95SBruce Richardson  *   Pointer to the memory pool.
77699a2dd95SBruce Richardson  * @param obj_table
77799a2dd95SBruce Richardson  *   Pointer to a table of void * pointers (objects).
77899a2dd95SBruce Richardson  * @param n
77999a2dd95SBruce Richardson  *   Number of objects to get.
78099a2dd95SBruce Richardson  * @return
78199a2dd95SBruce Richardson  *   - 0: Success; got n objects.
78299a2dd95SBruce Richardson  *   - <0: Error; code of dequeue function.
78399a2dd95SBruce Richardson  */
78499a2dd95SBruce Richardson static inline int
rte_mempool_ops_dequeue_bulk(struct rte_mempool * mp,void ** obj_table,unsigned n)78599a2dd95SBruce Richardson rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
78699a2dd95SBruce Richardson 		void **obj_table, unsigned n)
78799a2dd95SBruce Richardson {
78899a2dd95SBruce Richardson 	struct rte_mempool_ops *ops;
789cee151b4SJoyce Kong 	int ret;
79099a2dd95SBruce Richardson 
79199a2dd95SBruce Richardson 	rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
79299a2dd95SBruce Richardson 	ops = rte_mempool_get_ops(mp->ops_index);
793cee151b4SJoyce Kong 	ret = ops->dequeue(mp, obj_table, n);
794cee151b4SJoyce Kong 	if (ret == 0) {
795ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
796ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
797cee151b4SJoyce Kong 	}
798cee151b4SJoyce Kong 	return ret;
79999a2dd95SBruce Richardson }
80099a2dd95SBruce Richardson 
80199a2dd95SBruce Richardson /**
80299a2dd95SBruce Richardson  * @internal Wrapper for mempool_ops dequeue_contig_blocks callback.
80399a2dd95SBruce Richardson  *
80499a2dd95SBruce Richardson  * @param[in] mp
80599a2dd95SBruce Richardson  *   Pointer to the memory pool.
80699a2dd95SBruce Richardson  * @param[out] first_obj_table
80799a2dd95SBruce Richardson  *   Pointer to a table of void * pointers (first objects).
80899a2dd95SBruce Richardson  * @param[in] n
80999a2dd95SBruce Richardson  *   Number of blocks to get.
81099a2dd95SBruce Richardson  * @return
81199a2dd95SBruce Richardson  *   - 0: Success; got n objects.
81299a2dd95SBruce Richardson  *   - <0: Error; code of dequeue function.
81399a2dd95SBruce Richardson  */
81499a2dd95SBruce Richardson static inline int
rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool * mp,void ** first_obj_table,unsigned int n)81599a2dd95SBruce Richardson rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
81699a2dd95SBruce Richardson 		void **first_obj_table, unsigned int n)
81799a2dd95SBruce Richardson {
81899a2dd95SBruce Richardson 	struct rte_mempool_ops *ops;
81999a2dd95SBruce Richardson 
82099a2dd95SBruce Richardson 	ops = rte_mempool_get_ops(mp->ops_index);
82199a2dd95SBruce Richardson 	RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
82299a2dd95SBruce Richardson 	rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
82399a2dd95SBruce Richardson 	return ops->dequeue_contig_blocks(mp, first_obj_table, n);
82499a2dd95SBruce Richardson }
82599a2dd95SBruce Richardson 
82699a2dd95SBruce Richardson /**
82799a2dd95SBruce Richardson  * @internal wrapper for mempool_ops enqueue callback.
82899a2dd95SBruce Richardson  *
82999a2dd95SBruce Richardson  * @param mp
83099a2dd95SBruce Richardson  *   Pointer to the memory pool.
83199a2dd95SBruce Richardson  * @param obj_table
83299a2dd95SBruce Richardson  *   Pointer to a table of void * pointers (objects).
83399a2dd95SBruce Richardson  * @param n
83499a2dd95SBruce Richardson  *   Number of objects to put.
83599a2dd95SBruce Richardson  * @return
83699a2dd95SBruce Richardson  *   - 0: Success; n objects supplied.
83799a2dd95SBruce Richardson  *   - <0: Error; code of enqueue function.
83899a2dd95SBruce Richardson  */
83999a2dd95SBruce Richardson static inline int
rte_mempool_ops_enqueue_bulk(struct rte_mempool * mp,void * const * obj_table,unsigned n)84099a2dd95SBruce Richardson rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
84199a2dd95SBruce Richardson 		unsigned n)
84299a2dd95SBruce Richardson {
84399a2dd95SBruce Richardson 	struct rte_mempool_ops *ops;
844e3f138aaSAndrew Rybchenko 	int ret;
84599a2dd95SBruce Richardson 
846ad276d5cSAndrew Rybchenko 	RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
847ad276d5cSAndrew Rybchenko 	RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
84899a2dd95SBruce Richardson 	rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
84999a2dd95SBruce Richardson 	ops = rte_mempool_get_ops(mp->ops_index);
850e3f138aaSAndrew Rybchenko 	ret = ops->enqueue(mp, obj_table, n);
851e3f138aaSAndrew Rybchenko #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
852e3f138aaSAndrew Rybchenko 	if (unlikely(ret < 0))
853ae67895bSDavid Marchand 		RTE_MEMPOOL_LOG(CRIT, "cannot enqueue %u objects to mempool %s",
854e3f138aaSAndrew Rybchenko 			n, mp->name);
855e3f138aaSAndrew Rybchenko #endif
856e3f138aaSAndrew Rybchenko 	return ret;
85799a2dd95SBruce Richardson }
85899a2dd95SBruce Richardson 
85999a2dd95SBruce Richardson /**
86099a2dd95SBruce Richardson  * @internal wrapper for mempool_ops get_count callback.
86199a2dd95SBruce Richardson  *
86299a2dd95SBruce Richardson  * @param mp
86399a2dd95SBruce Richardson  *   Pointer to the memory pool.
86499a2dd95SBruce Richardson  * @return
86599a2dd95SBruce Richardson  *   The number of available objects in the external pool.
86699a2dd95SBruce Richardson  */
86799a2dd95SBruce Richardson unsigned
86899a2dd95SBruce Richardson rte_mempool_ops_get_count(const struct rte_mempool *mp);
86999a2dd95SBruce Richardson 
87099a2dd95SBruce Richardson /**
87199a2dd95SBruce Richardson  * @internal wrapper for mempool_ops calc_mem_size callback.
87299a2dd95SBruce Richardson  * API to calculate size of memory required to store specified number of
87399a2dd95SBruce Richardson  * object.
87499a2dd95SBruce Richardson  *
87599a2dd95SBruce Richardson  * @param[in] mp
87699a2dd95SBruce Richardson  *   Pointer to the memory pool.
87799a2dd95SBruce Richardson  * @param[in] obj_num
87899a2dd95SBruce Richardson  *   Number of objects.
87999a2dd95SBruce Richardson  * @param[in] pg_shift
88099a2dd95SBruce Richardson  *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.
88199a2dd95SBruce Richardson  * @param[out] min_chunk_size
88299a2dd95SBruce Richardson  *   Location for minimum size of the memory chunk which may be used to
88399a2dd95SBruce Richardson  *   store memory pool objects.
88499a2dd95SBruce Richardson  * @param[out] align
88599a2dd95SBruce Richardson  *   Location for required memory chunk alignment.
88699a2dd95SBruce Richardson  * @return
88799a2dd95SBruce Richardson  *   Required memory size aligned at page boundary.
88899a2dd95SBruce Richardson  */
88999a2dd95SBruce Richardson ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
89099a2dd95SBruce Richardson 				      uint32_t obj_num, uint32_t pg_shift,
89199a2dd95SBruce Richardson 				      size_t *min_chunk_size, size_t *align);
89299a2dd95SBruce Richardson 
89399a2dd95SBruce Richardson /**
89499a2dd95SBruce Richardson  * @internal wrapper for mempool_ops populate callback.
89599a2dd95SBruce Richardson  *
89699a2dd95SBruce Richardson  * Populate memory pool objects using provided memory chunk.
89799a2dd95SBruce Richardson  *
89899a2dd95SBruce Richardson  * @param[in] mp
89999a2dd95SBruce Richardson  *   A pointer to the mempool structure.
90099a2dd95SBruce Richardson  * @param[in] max_objs
90199a2dd95SBruce Richardson  *   Maximum number of objects to be populated.
90299a2dd95SBruce Richardson  * @param[in] vaddr
90399a2dd95SBruce Richardson  *   The virtual address of memory that should be used to store objects.
90499a2dd95SBruce Richardson  * @param[in] iova
90599a2dd95SBruce Richardson  *   The IO address
90699a2dd95SBruce Richardson  * @param[in] len
90799a2dd95SBruce Richardson  *   The length of memory in bytes.
90899a2dd95SBruce Richardson  * @param[in] obj_cb
90999a2dd95SBruce Richardson  *   Callback function to be executed for each populated object.
91099a2dd95SBruce Richardson  * @param[in] obj_cb_arg
91199a2dd95SBruce Richardson  *   An opaque pointer passed to the callback function.
91299a2dd95SBruce Richardson  * @return
91399a2dd95SBruce Richardson  *   The number of objects added on success.
91499a2dd95SBruce Richardson  *   On error, no objects are populated and a negative errno is returned.
91599a2dd95SBruce Richardson  */
91699a2dd95SBruce Richardson int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
91799a2dd95SBruce Richardson 			     void *vaddr, rte_iova_t iova, size_t len,
91899a2dd95SBruce Richardson 			     rte_mempool_populate_obj_cb_t *obj_cb,
91999a2dd95SBruce Richardson 			     void *obj_cb_arg);
92099a2dd95SBruce Richardson 
92199a2dd95SBruce Richardson /**
92299a2dd95SBruce Richardson  * Wrapper for mempool_ops get_info callback.
92399a2dd95SBruce Richardson  *
92499a2dd95SBruce Richardson  * @param[in] mp
92599a2dd95SBruce Richardson  *   Pointer to the memory pool.
92699a2dd95SBruce Richardson  * @param[out] info
92799a2dd95SBruce Richardson  *   Pointer to the rte_mempool_info structure
92899a2dd95SBruce Richardson  * @return
92999a2dd95SBruce Richardson  *   - 0: Success; The mempool driver supports retrieving supplementary
93099a2dd95SBruce Richardson  *        mempool information
93199a2dd95SBruce Richardson  *   - -ENOTSUP - doesn't support get_info ops (valid case).
93299a2dd95SBruce Richardson  */
93399a2dd95SBruce Richardson int rte_mempool_ops_get_info(const struct rte_mempool *mp,
93499a2dd95SBruce Richardson 			 struct rte_mempool_info *info);
93599a2dd95SBruce Richardson 
93699a2dd95SBruce Richardson /**
93799a2dd95SBruce Richardson  * @internal wrapper for mempool_ops free callback.
93899a2dd95SBruce Richardson  *
93999a2dd95SBruce Richardson  * @param mp
94099a2dd95SBruce Richardson  *   Pointer to the memory pool.
94199a2dd95SBruce Richardson  */
94299a2dd95SBruce Richardson void
94399a2dd95SBruce Richardson rte_mempool_ops_free(struct rte_mempool *mp);
94499a2dd95SBruce Richardson 
94599a2dd95SBruce Richardson /**
94699a2dd95SBruce Richardson  * Set the ops of a mempool.
94799a2dd95SBruce Richardson  *
94899a2dd95SBruce Richardson  * This can only be done on a mempool that is not populated, i.e. just after
94999a2dd95SBruce Richardson  * a call to rte_mempool_create_empty().
95099a2dd95SBruce Richardson  *
95199a2dd95SBruce Richardson  * @param mp
95299a2dd95SBruce Richardson  *   Pointer to the memory pool.
95399a2dd95SBruce Richardson  * @param name
95499a2dd95SBruce Richardson  *   Name of the ops structure to use for this mempool.
95599a2dd95SBruce Richardson  * @param pool_config
95699a2dd95SBruce Richardson  *   Opaque data that can be passed by the application to the ops functions.
95799a2dd95SBruce Richardson  * @return
95899a2dd95SBruce Richardson  *   - 0: Success; the mempool is now using the requested ops functions.
95999a2dd95SBruce Richardson  *   - -EINVAL - Invalid ops struct name provided.
96099a2dd95SBruce Richardson  *   - -EEXIST - mempool already has an ops struct assigned.
96199a2dd95SBruce Richardson  */
96299a2dd95SBruce Richardson int
96399a2dd95SBruce Richardson rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
96499a2dd95SBruce Richardson 		void *pool_config);
96599a2dd95SBruce Richardson 
96699a2dd95SBruce Richardson /**
96799a2dd95SBruce Richardson  * Register mempool operations.
96899a2dd95SBruce Richardson  *
96999a2dd95SBruce Richardson  * @param ops
97099a2dd95SBruce Richardson  *   Pointer to an ops structure to register.
97199a2dd95SBruce Richardson  * @return
97299a2dd95SBruce Richardson  *   - >=0: Success; return the index of the ops struct in the table.
97399a2dd95SBruce Richardson  *   - -EINVAL - some missing callbacks while registering ops struct.
97499a2dd95SBruce Richardson  *   - -ENOSPC - the maximum number of ops structs has been reached.
97599a2dd95SBruce Richardson  */
97699a2dd95SBruce Richardson int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
97799a2dd95SBruce Richardson 
97899a2dd95SBruce Richardson /**
97999a2dd95SBruce Richardson  * Macro to statically register the ops of a mempool handler.
98099a2dd95SBruce Richardson  * Note that the rte_mempool_register_ops fails silently here when
98199a2dd95SBruce Richardson  * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
98299a2dd95SBruce Richardson  */
983cb77b060SAndrew Rybchenko #define RTE_MEMPOOL_REGISTER_OPS(ops)				\
98499a2dd95SBruce Richardson 	RTE_INIT(mp_hdlr_init_##ops)				\
98599a2dd95SBruce Richardson 	{							\
98699a2dd95SBruce Richardson 		rte_mempool_register_ops(&ops);			\
98799a2dd95SBruce Richardson 	}
98899a2dd95SBruce Richardson 
98999a2dd95SBruce Richardson /**
99099a2dd95SBruce Richardson  * An object callback function for mempool.
99199a2dd95SBruce Richardson  *
99299a2dd95SBruce Richardson  * Used by rte_mempool_create() and rte_mempool_obj_iter().
99399a2dd95SBruce Richardson  */
99499a2dd95SBruce Richardson typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
99599a2dd95SBruce Richardson 		void *opaque, void *obj, unsigned obj_idx);
99699a2dd95SBruce Richardson typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
99799a2dd95SBruce Richardson 
99899a2dd95SBruce Richardson /**
99999a2dd95SBruce Richardson  * A memory callback function for mempool.
100099a2dd95SBruce Richardson  *
100199a2dd95SBruce Richardson  * Used by rte_mempool_mem_iter().
100299a2dd95SBruce Richardson  */
100399a2dd95SBruce Richardson typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
100499a2dd95SBruce Richardson 		void *opaque, struct rte_mempool_memhdr *memhdr,
100599a2dd95SBruce Richardson 		unsigned mem_idx);
100699a2dd95SBruce Richardson 
100799a2dd95SBruce Richardson /**
100899a2dd95SBruce Richardson  * A mempool constructor callback function.
100999a2dd95SBruce Richardson  *
101099a2dd95SBruce Richardson  * Arguments are the mempool and the opaque pointer given by the user in
101199a2dd95SBruce Richardson  * rte_mempool_create().
101299a2dd95SBruce Richardson  */
101399a2dd95SBruce Richardson typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
101499a2dd95SBruce Richardson 
101599a2dd95SBruce Richardson /**
101699a2dd95SBruce Richardson  * Create a new mempool named *name* in memory.
101799a2dd95SBruce Richardson  *
101899a2dd95SBruce Richardson  * This function uses ``rte_memzone_reserve()`` to allocate memory. The
101999a2dd95SBruce Richardson  * pool contains n elements of elt_size. Its size is set to n.
102099a2dd95SBruce Richardson  *
102199a2dd95SBruce Richardson  * @param name
102299a2dd95SBruce Richardson  *   The name of the mempool.
102399a2dd95SBruce Richardson  * @param n
102499a2dd95SBruce Richardson  *   The number of elements in the mempool. The optimum size (in terms of
102599a2dd95SBruce Richardson  *   memory usage) for a mempool is when n is a power of two minus one:
102699a2dd95SBruce Richardson  *   n = (2^q - 1).
102799a2dd95SBruce Richardson  * @param elt_size
102899a2dd95SBruce Richardson  *   The size of each element.
102999a2dd95SBruce Richardson  * @param cache_size
103099a2dd95SBruce Richardson  *   If cache_size is non-zero, the rte_mempool library will try to
103199a2dd95SBruce Richardson  *   limit the accesses to the common lockless pool, by maintaining a
103299a2dd95SBruce Richardson  *   per-lcore object cache. This argument must be lower or equal to
103399a2dd95SBruce Richardson  *   RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
103499a2dd95SBruce Richardson  *   cache_size to have "n modulo cache_size == 0": if this is
103599a2dd95SBruce Richardson  *   not the case, some elements will always stay in the pool and will
103699a2dd95SBruce Richardson  *   never be used. The access to the per-lcore table is of course
103799a2dd95SBruce Richardson  *   faster than the multi-producer/consumer pool. The cache can be
103899a2dd95SBruce Richardson  *   disabled if the cache_size argument is set to 0; it can be useful to
103999a2dd95SBruce Richardson  *   avoid losing objects in cache.
104099a2dd95SBruce Richardson  * @param private_data_size
104199a2dd95SBruce Richardson  *   The size of the private data appended after the mempool
104299a2dd95SBruce Richardson  *   structure. This is useful for storing some private data after the
104399a2dd95SBruce Richardson  *   mempool structure, as is done for rte_mbuf_pool for example.
104499a2dd95SBruce Richardson  * @param mp_init
104599a2dd95SBruce Richardson  *   A function pointer that is called for initialization of the pool,
104699a2dd95SBruce Richardson  *   before object initialization. The user can initialize the private
104799a2dd95SBruce Richardson  *   data in this function if needed. This parameter can be NULL if
104899a2dd95SBruce Richardson  *   not needed.
104999a2dd95SBruce Richardson  * @param mp_init_arg
105099a2dd95SBruce Richardson  *   An opaque pointer to data that can be used in the mempool
105199a2dd95SBruce Richardson  *   constructor function.
105299a2dd95SBruce Richardson  * @param obj_init
105399a2dd95SBruce Richardson  *   A function pointer that is called for each object at
105499a2dd95SBruce Richardson  *   initialization of the pool. The user can set some meta data in
105599a2dd95SBruce Richardson  *   objects if needed. This parameter can be NULL if not needed.
105699a2dd95SBruce Richardson  *   The obj_init() function takes the mempool pointer, the init_arg,
105799a2dd95SBruce Richardson  *   the object pointer and the object number as parameters.
105899a2dd95SBruce Richardson  * @param obj_init_arg
105999a2dd95SBruce Richardson  *   An opaque pointer to data that can be used as an argument for
106099a2dd95SBruce Richardson  *   each call to the object constructor function.
106199a2dd95SBruce Richardson  * @param socket_id
106299a2dd95SBruce Richardson  *   The *socket_id* argument is the socket identifier in the case of
106399a2dd95SBruce Richardson  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
106499a2dd95SBruce Richardson  *   constraint for the reserved zone.
106599a2dd95SBruce Richardson  * @param flags
106699a2dd95SBruce Richardson  *   The *flags* arguments is an OR of following flags:
1067c47d7b90SAndrew Rybchenko  *   - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
106899a2dd95SBruce Richardson  *     between channels in RAM: the pool allocator will add padding
106999a2dd95SBruce Richardson  *     between objects depending on the hardware configuration. See
107099a2dd95SBruce Richardson  *     Memory alignment constraints for details. If this flag is set,
107199a2dd95SBruce Richardson  *     the allocator will just align them to a cache line.
1072c47d7b90SAndrew Rybchenko  *   - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
107399a2dd95SBruce Richardson  *     cache-aligned. This flag removes this constraint, and no
107499a2dd95SBruce Richardson  *     padding will be present between objects. This flag implies
1075c47d7b90SAndrew Rybchenko  *     RTE_MEMPOOL_F_NO_SPREAD.
1076c47d7b90SAndrew Rybchenko  *   - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
107799a2dd95SBruce Richardson  *     when using rte_mempool_put() or rte_mempool_put_bulk() is
107899a2dd95SBruce Richardson  *     "single-producer". Otherwise, it is "multi-producers".
1079c47d7b90SAndrew Rybchenko  *   - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior
108099a2dd95SBruce Richardson  *     when using rte_mempool_get() or rte_mempool_get_bulk() is
108199a2dd95SBruce Richardson  *     "single-consumer". Otherwise, it is "multi-consumers".
1082c47d7b90SAndrew Rybchenko  *   - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
108399a2dd95SBruce Richardson  *     necessarily be contiguous in IO memory.
108499a2dd95SBruce Richardson  * @return
108599a2dd95SBruce Richardson  *   The pointer to the new allocated mempool, on success. NULL on error
108699a2dd95SBruce Richardson  *   with rte_errno set appropriately. Possible rte_errno values include:
108799a2dd95SBruce Richardson  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
1088b240af8bSDavid Marchand  *    - EINVAL - cache size provided is too large or an unknown flag was passed
108999a2dd95SBruce Richardson  *    - ENOSPC - the maximum number of memzones has already been allocated
109099a2dd95SBruce Richardson  *    - EEXIST - a memzone with the same name already exists
109199a2dd95SBruce Richardson  *    - ENOMEM - no appropriate memory area found in which to create memzone
109299a2dd95SBruce Richardson  */
109399a2dd95SBruce Richardson struct rte_mempool *
109499a2dd95SBruce Richardson rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
109599a2dd95SBruce Richardson 		   unsigned cache_size, unsigned private_data_size,
109699a2dd95SBruce Richardson 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
109799a2dd95SBruce Richardson 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
109899a2dd95SBruce Richardson 		   int socket_id, unsigned flags);
109999a2dd95SBruce Richardson 
110099a2dd95SBruce Richardson /**
110199a2dd95SBruce Richardson  * Create an empty mempool
110299a2dd95SBruce Richardson  *
110399a2dd95SBruce Richardson  * The mempool is allocated and initialized, but it is not populated: no
110499a2dd95SBruce Richardson  * memory is allocated for the mempool elements. The user has to call
110599a2dd95SBruce Richardson  * rte_mempool_populate_*() to add memory chunks to the pool. Once
110699a2dd95SBruce Richardson  * populated, the user may also want to initialize each object with
110799a2dd95SBruce Richardson  * rte_mempool_obj_iter().
110899a2dd95SBruce Richardson  *
110999a2dd95SBruce Richardson  * @param name
111099a2dd95SBruce Richardson  *   The name of the mempool.
111199a2dd95SBruce Richardson  * @param n
111299a2dd95SBruce Richardson  *   The maximum number of elements that can be added in the mempool.
111399a2dd95SBruce Richardson  *   The optimum size (in terms of memory usage) for a mempool is when n
111499a2dd95SBruce Richardson  *   is a power of two minus one: n = (2^q - 1).
111599a2dd95SBruce Richardson  * @param elt_size
111699a2dd95SBruce Richardson  *   The size of each element.
111799a2dd95SBruce Richardson  * @param cache_size
111899a2dd95SBruce Richardson  *   Size of the cache. See rte_mempool_create() for details.
111999a2dd95SBruce Richardson  * @param private_data_size
112099a2dd95SBruce Richardson  *   The size of the private data appended after the mempool
112199a2dd95SBruce Richardson  *   structure. This is useful for storing some private data after the
112299a2dd95SBruce Richardson  *   mempool structure, as is done for rte_mbuf_pool for example.
112399a2dd95SBruce Richardson  * @param socket_id
112499a2dd95SBruce Richardson  *   The *socket_id* argument is the socket identifier in the case of
112599a2dd95SBruce Richardson  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
112699a2dd95SBruce Richardson  *   constraint for the reserved zone.
112799a2dd95SBruce Richardson  * @param flags
112899a2dd95SBruce Richardson  *   Flags controlling the behavior of the mempool. See
112999a2dd95SBruce Richardson  *   rte_mempool_create() for details.
113099a2dd95SBruce Richardson  * @return
113199a2dd95SBruce Richardson  *   The pointer to the new allocated mempool, on success. NULL on error
113299a2dd95SBruce Richardson  *   with rte_errno set appropriately. See rte_mempool_create() for details.
113399a2dd95SBruce Richardson  */
113499a2dd95SBruce Richardson struct rte_mempool *
113599a2dd95SBruce Richardson rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
113699a2dd95SBruce Richardson 	unsigned cache_size, unsigned private_data_size,
113799a2dd95SBruce Richardson 	int socket_id, unsigned flags);
113899a2dd95SBruce Richardson /**
113999a2dd95SBruce Richardson  * Free a mempool
114099a2dd95SBruce Richardson  *
114199a2dd95SBruce Richardson  * Unlink the mempool from global list, free the memory chunks, and all
114299a2dd95SBruce Richardson  * memory referenced by the mempool. The objects must not be used by
114399a2dd95SBruce Richardson  * other cores as they will be freed.
114499a2dd95SBruce Richardson  *
114599a2dd95SBruce Richardson  * @param mp
114699a2dd95SBruce Richardson  *   A pointer to the mempool structure.
1147e7b1c466SStephen Hemminger  *   If NULL then, the function does nothing.
114899a2dd95SBruce Richardson  */
114999a2dd95SBruce Richardson void
115099a2dd95SBruce Richardson rte_mempool_free(struct rte_mempool *mp);
115199a2dd95SBruce Richardson 
115299a2dd95SBruce Richardson /**
115399a2dd95SBruce Richardson  * Add physically contiguous memory for objects in the pool at init
115499a2dd95SBruce Richardson  *
115599a2dd95SBruce Richardson  * Add a virtually and physically contiguous memory chunk in the pool
115699a2dd95SBruce Richardson  * where objects can be instantiated.
115799a2dd95SBruce Richardson  *
115899a2dd95SBruce Richardson  * If the given IO address is unknown (iova = RTE_BAD_IOVA),
115999a2dd95SBruce Richardson  * the chunk doesn't need to be physically contiguous (only virtually),
116099a2dd95SBruce Richardson  * and allocated objects may span two pages.
116199a2dd95SBruce Richardson  *
116299a2dd95SBruce Richardson  * @param mp
116399a2dd95SBruce Richardson  *   A pointer to the mempool structure.
116499a2dd95SBruce Richardson  * @param vaddr
116599a2dd95SBruce Richardson  *   The virtual address of memory that should be used to store objects.
116699a2dd95SBruce Richardson  * @param iova
116799a2dd95SBruce Richardson  *   The IO address
116899a2dd95SBruce Richardson  * @param len
116999a2dd95SBruce Richardson  *   The length of memory in bytes.
117099a2dd95SBruce Richardson  * @param free_cb
117199a2dd95SBruce Richardson  *   The callback used to free this chunk when destroying the mempool.
117299a2dd95SBruce Richardson  * @param opaque
117399a2dd95SBruce Richardson  *   An opaque argument passed to free_cb.
117499a2dd95SBruce Richardson  * @return
117599a2dd95SBruce Richardson  *   The number of objects added on success (strictly positive).
117699a2dd95SBruce Richardson  *   On error, the chunk is not added in the memory list of the
117799a2dd95SBruce Richardson  *   mempool the following code is returned:
117899a2dd95SBruce Richardson  *     (0): not enough room in chunk for one object.
117999a2dd95SBruce Richardson  *     (-ENOSPC): mempool is already populated.
118099a2dd95SBruce Richardson  *     (-ENOMEM): allocation failure.
118199a2dd95SBruce Richardson  */
118299a2dd95SBruce Richardson int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
118399a2dd95SBruce Richardson 	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
118499a2dd95SBruce Richardson 	void *opaque);
118599a2dd95SBruce Richardson 
118699a2dd95SBruce Richardson /**
118799a2dd95SBruce Richardson  * Add virtually contiguous memory for objects in the pool at init
118899a2dd95SBruce Richardson  *
118999a2dd95SBruce Richardson  * Add a virtually contiguous memory chunk in the pool where objects can
119099a2dd95SBruce Richardson  * be instantiated.
119199a2dd95SBruce Richardson  *
119299a2dd95SBruce Richardson  * @param mp
119399a2dd95SBruce Richardson  *   A pointer to the mempool structure.
119499a2dd95SBruce Richardson  * @param addr
119599a2dd95SBruce Richardson  *   The virtual address of memory that should be used to store objects.
119699a2dd95SBruce Richardson  * @param len
119799a2dd95SBruce Richardson  *   The length of memory in bytes.
119899a2dd95SBruce Richardson  * @param pg_sz
119999a2dd95SBruce Richardson  *   The size of memory pages in this virtual area.
120099a2dd95SBruce Richardson  * @param free_cb
120199a2dd95SBruce Richardson  *   The callback used to free this chunk when destroying the mempool.
120299a2dd95SBruce Richardson  * @param opaque
120399a2dd95SBruce Richardson  *   An opaque argument passed to free_cb.
120499a2dd95SBruce Richardson  * @return
120599a2dd95SBruce Richardson  *   The number of objects added on success (strictly positive).
120699a2dd95SBruce Richardson  *   On error, the chunk is not added in the memory list of the
120799a2dd95SBruce Richardson  *   mempool the following code is returned:
120899a2dd95SBruce Richardson  *     (0): not enough room in chunk for one object.
120999a2dd95SBruce Richardson  *     (-ENOSPC): mempool is already populated.
121099a2dd95SBruce Richardson  *     (-ENOMEM): allocation failure.
121199a2dd95SBruce Richardson  */
121299a2dd95SBruce Richardson int
121399a2dd95SBruce Richardson rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
121499a2dd95SBruce Richardson 	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
121599a2dd95SBruce Richardson 	void *opaque);
121699a2dd95SBruce Richardson 
121799a2dd95SBruce Richardson /**
121899a2dd95SBruce Richardson  * Add memory for objects in the pool at init
121999a2dd95SBruce Richardson  *
122099a2dd95SBruce Richardson  * This is the default function used by rte_mempool_create() to populate
122199a2dd95SBruce Richardson  * the mempool. It adds memory allocated using rte_memzone_reserve().
122299a2dd95SBruce Richardson  *
122399a2dd95SBruce Richardson  * @param mp
122499a2dd95SBruce Richardson  *   A pointer to the mempool structure.
122599a2dd95SBruce Richardson  * @return
122699a2dd95SBruce Richardson  *   The number of objects added on success.
122799a2dd95SBruce Richardson  *   On error, the chunk is not added in the memory list of the
122899a2dd95SBruce Richardson  *   mempool and a negative errno is returned.
122999a2dd95SBruce Richardson  */
123099a2dd95SBruce Richardson int rte_mempool_populate_default(struct rte_mempool *mp);
123199a2dd95SBruce Richardson 
123299a2dd95SBruce Richardson /**
123399a2dd95SBruce Richardson  * Add memory from anonymous mapping for objects in the pool at init
123499a2dd95SBruce Richardson  *
123599a2dd95SBruce Richardson  * This function mmap an anonymous memory zone that is locked in
123699a2dd95SBruce Richardson  * memory to store the objects of the mempool.
123799a2dd95SBruce Richardson  *
123899a2dd95SBruce Richardson  * @param mp
123999a2dd95SBruce Richardson  *   A pointer to the mempool structure.
124099a2dd95SBruce Richardson  * @return
124199a2dd95SBruce Richardson  *   The number of objects added on success.
124299a2dd95SBruce Richardson  *   On error, 0 is returned, rte_errno is set, and the chunk is not added in
124399a2dd95SBruce Richardson  *   the memory list of the mempool.
124499a2dd95SBruce Richardson  */
124599a2dd95SBruce Richardson int rte_mempool_populate_anon(struct rte_mempool *mp);
124699a2dd95SBruce Richardson 
124799a2dd95SBruce Richardson /**
124899a2dd95SBruce Richardson  * Call a function for each mempool element
124999a2dd95SBruce Richardson  *
125099a2dd95SBruce Richardson  * Iterate across all objects attached to a rte_mempool and call the
125199a2dd95SBruce Richardson  * callback function on it.
125299a2dd95SBruce Richardson  *
125399a2dd95SBruce Richardson  * @param mp
125499a2dd95SBruce Richardson  *   A pointer to an initialized mempool.
125599a2dd95SBruce Richardson  * @param obj_cb
125699a2dd95SBruce Richardson  *   A function pointer that is called for each object.
125799a2dd95SBruce Richardson  * @param obj_cb_arg
125899a2dd95SBruce Richardson  *   An opaque pointer passed to the callback function.
125999a2dd95SBruce Richardson  * @return
126099a2dd95SBruce Richardson  *   Number of objects iterated.
126199a2dd95SBruce Richardson  */
126299a2dd95SBruce Richardson uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
126399a2dd95SBruce Richardson 	rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
126499a2dd95SBruce Richardson 
126599a2dd95SBruce Richardson /**
126699a2dd95SBruce Richardson  * Call a function for each mempool memory chunk
126799a2dd95SBruce Richardson  *
126899a2dd95SBruce Richardson  * Iterate across all memory chunks attached to a rte_mempool and call
126999a2dd95SBruce Richardson  * the callback function on it.
127099a2dd95SBruce Richardson  *
127199a2dd95SBruce Richardson  * @param mp
127299a2dd95SBruce Richardson  *   A pointer to an initialized mempool.
127399a2dd95SBruce Richardson  * @param mem_cb
127499a2dd95SBruce Richardson  *   A function pointer that is called for each memory chunk.
127599a2dd95SBruce Richardson  * @param mem_cb_arg
127699a2dd95SBruce Richardson  *   An opaque pointer passed to the callback function.
127799a2dd95SBruce Richardson  * @return
127899a2dd95SBruce Richardson  *   Number of memory chunks iterated.
127999a2dd95SBruce Richardson  */
128099a2dd95SBruce Richardson uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
128199a2dd95SBruce Richardson 	rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
128299a2dd95SBruce Richardson 
128399a2dd95SBruce Richardson /**
128499a2dd95SBruce Richardson  * Dump the status of the mempool to a file.
128599a2dd95SBruce Richardson  *
128699a2dd95SBruce Richardson  * @param f
128799a2dd95SBruce Richardson  *   A pointer to a file for output
128899a2dd95SBruce Richardson  * @param mp
128999a2dd95SBruce Richardson  *   A pointer to the mempool structure.
129099a2dd95SBruce Richardson  */
129199a2dd95SBruce Richardson void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
129299a2dd95SBruce Richardson 
129399a2dd95SBruce Richardson /**
129499a2dd95SBruce Richardson  * Create a user-owned mempool cache.
129599a2dd95SBruce Richardson  *
129699a2dd95SBruce Richardson  * This can be used by unregistered non-EAL threads to enable caching when they
129799a2dd95SBruce Richardson  * interact with a mempool.
129899a2dd95SBruce Richardson  *
129999a2dd95SBruce Richardson  * @param size
130099a2dd95SBruce Richardson  *   The size of the mempool cache. See rte_mempool_create()'s cache_size
130199a2dd95SBruce Richardson  *   parameter description for more information. The same limits and
130299a2dd95SBruce Richardson  *   considerations apply here too.
130399a2dd95SBruce Richardson  * @param socket_id
130499a2dd95SBruce Richardson  *   The socket identifier in the case of NUMA. The value can be
130599a2dd95SBruce Richardson  *   SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
130699a2dd95SBruce Richardson  */
130799a2dd95SBruce Richardson struct rte_mempool_cache *
130899a2dd95SBruce Richardson rte_mempool_cache_create(uint32_t size, int socket_id);
130999a2dd95SBruce Richardson 
131099a2dd95SBruce Richardson /**
131199a2dd95SBruce Richardson  * Free a user-owned mempool cache.
131299a2dd95SBruce Richardson  *
131399a2dd95SBruce Richardson  * @param cache
131499a2dd95SBruce Richardson  *   A pointer to the mempool cache.
131599a2dd95SBruce Richardson  */
131699a2dd95SBruce Richardson void
131799a2dd95SBruce Richardson rte_mempool_cache_free(struct rte_mempool_cache *cache);
131899a2dd95SBruce Richardson 
131999a2dd95SBruce Richardson /**
132099a2dd95SBruce Richardson  * Get a pointer to the per-lcore default mempool cache.
132199a2dd95SBruce Richardson  *
132299a2dd95SBruce Richardson  * @param mp
132399a2dd95SBruce Richardson  *   A pointer to the mempool structure.
132499a2dd95SBruce Richardson  * @param lcore_id
132599a2dd95SBruce Richardson  *   The logical core id.
132699a2dd95SBruce Richardson  * @return
132799a2dd95SBruce Richardson  *   A pointer to the mempool cache or NULL if disabled or unregistered non-EAL
132899a2dd95SBruce Richardson  *   thread.
132999a2dd95SBruce Richardson  */
133099a2dd95SBruce Richardson static __rte_always_inline struct rte_mempool_cache *
rte_mempool_default_cache(struct rte_mempool * mp,unsigned lcore_id)133199a2dd95SBruce Richardson rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
133299a2dd95SBruce Richardson {
133399a2dd95SBruce Richardson 	if (mp->cache_size == 0)
133499a2dd95SBruce Richardson 		return NULL;
133599a2dd95SBruce Richardson 
133699a2dd95SBruce Richardson 	if (lcore_id >= RTE_MAX_LCORE)
133799a2dd95SBruce Richardson 		return NULL;
133899a2dd95SBruce Richardson 
133999a2dd95SBruce Richardson 	rte_mempool_trace_default_cache(mp, lcore_id,
134099a2dd95SBruce Richardson 		&mp->local_cache[lcore_id]);
134199a2dd95SBruce Richardson 	return &mp->local_cache[lcore_id];
134299a2dd95SBruce Richardson }
134399a2dd95SBruce Richardson 
134499a2dd95SBruce Richardson /**
134599a2dd95SBruce Richardson  * Flush a user-owned mempool cache to the specified mempool.
134699a2dd95SBruce Richardson  *
134799a2dd95SBruce Richardson  * @param cache
134899a2dd95SBruce Richardson  *   A pointer to the mempool cache.
134999a2dd95SBruce Richardson  * @param mp
135099a2dd95SBruce Richardson  *   A pointer to the mempool.
135199a2dd95SBruce Richardson  */
135299a2dd95SBruce Richardson static __rte_always_inline void
rte_mempool_cache_flush(struct rte_mempool_cache * cache,struct rte_mempool * mp)135399a2dd95SBruce Richardson rte_mempool_cache_flush(struct rte_mempool_cache *cache,
135499a2dd95SBruce Richardson 			struct rte_mempool *mp)
135599a2dd95SBruce Richardson {
135699a2dd95SBruce Richardson 	if (cache == NULL)
135799a2dd95SBruce Richardson 		cache = rte_mempool_default_cache(mp, rte_lcore_id());
135899a2dd95SBruce Richardson 	if (cache == NULL || cache->len == 0)
135999a2dd95SBruce Richardson 		return;
136099a2dd95SBruce Richardson 	rte_mempool_trace_cache_flush(cache, mp);
136199a2dd95SBruce Richardson 	rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
136299a2dd95SBruce Richardson 	cache->len = 0;
136399a2dd95SBruce Richardson }
136499a2dd95SBruce Richardson 
136599a2dd95SBruce Richardson /**
136699a2dd95SBruce Richardson  * @internal Put several objects back in the mempool; used internally.
136799a2dd95SBruce Richardson  * @param mp
136899a2dd95SBruce Richardson  *   A pointer to the mempool structure.
136999a2dd95SBruce Richardson  * @param obj_table
137099a2dd95SBruce Richardson  *   A pointer to a table of void * pointers (objects).
137199a2dd95SBruce Richardson  * @param n
137299a2dd95SBruce Richardson  *   The number of objects to store back in the mempool, must be strictly
137399a2dd95SBruce Richardson  *   positive.
137499a2dd95SBruce Richardson  * @param cache
137599a2dd95SBruce Richardson  *   A pointer to a mempool cache structure. May be NULL if not needed.
137699a2dd95SBruce Richardson  */
137799a2dd95SBruce Richardson static __rte_always_inline void
rte_mempool_do_generic_put(struct rte_mempool * mp,void * const * obj_table,unsigned int n,struct rte_mempool_cache * cache)1378ad276d5cSAndrew Rybchenko rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
137999a2dd95SBruce Richardson 			   unsigned int n, struct rte_mempool_cache *cache)
138099a2dd95SBruce Richardson {
138199a2dd95SBruce Richardson 	void **cache_objs;
138299a2dd95SBruce Richardson 
1383203dcc9cSMorten Brørup 	/* No cache provided */
1384203dcc9cSMorten Brørup 	if (unlikely(cache == NULL))
138590cf759aSAndrew Rybchenko 		goto driver_enqueue;
138699a2dd95SBruce Richardson 
1387203dcc9cSMorten Brørup 	/* increment stat now, adding in mempool always success */
1388203dcc9cSMorten Brørup 	RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1389203dcc9cSMorten Brørup 	RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1390203dcc9cSMorten Brørup 
1391203dcc9cSMorten Brørup 	/* The request itself is too big for the cache */
1392203dcc9cSMorten Brørup 	if (unlikely(n > cache->flushthresh))
1393203dcc9cSMorten Brørup 		goto driver_enqueue_stats_incremented;
1394203dcc9cSMorten Brørup 
139599a2dd95SBruce Richardson 	/*
1396459531c9SMorten Brørup 	 * The cache follows the following algorithm:
1397459531c9SMorten Brørup 	 *   1. If the objects cannot be added to the cache without crossing
1398459531c9SMorten Brørup 	 *      the flush threshold, flush the cache to the backend.
1399459531c9SMorten Brørup 	 *   2. Add the objects to the cache.
140099a2dd95SBruce Richardson 	 */
140199a2dd95SBruce Richardson 
1402459531c9SMorten Brørup 	if (cache->len + n <= cache->flushthresh) {
1403459531c9SMorten Brørup 		cache_objs = &cache->objs[cache->len];
140499a2dd95SBruce Richardson 		cache->len += n;
1405459531c9SMorten Brørup 	} else {
1406e6e62f6fSAndrew Rybchenko 		cache_objs = &cache->objs[0];
1407e6e62f6fSAndrew Rybchenko 		rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
1408e6e62f6fSAndrew Rybchenko 		cache->len = n;
140999a2dd95SBruce Richardson 	}
141099a2dd95SBruce Richardson 
1411459531c9SMorten Brørup 	/* Add the objects to the cache. */
1412459531c9SMorten Brørup 	rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
1413459531c9SMorten Brørup 
141499a2dd95SBruce Richardson 	return;
141599a2dd95SBruce Richardson 
141690cf759aSAndrew Rybchenko driver_enqueue:
141799a2dd95SBruce Richardson 
1418203dcc9cSMorten Brørup 	/* increment stat now, adding in mempool always success */
1419203dcc9cSMorten Brørup 	RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1420203dcc9cSMorten Brørup 	RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1421203dcc9cSMorten Brørup 
1422203dcc9cSMorten Brørup driver_enqueue_stats_incremented:
1423203dcc9cSMorten Brørup 
142490cf759aSAndrew Rybchenko 	/* push objects to the backend */
142599a2dd95SBruce Richardson 	rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
142699a2dd95SBruce Richardson }
142799a2dd95SBruce Richardson 
142899a2dd95SBruce Richardson 
142999a2dd95SBruce Richardson /**
143099a2dd95SBruce Richardson  * Put several objects back in the mempool.
143199a2dd95SBruce Richardson  *
143299a2dd95SBruce Richardson  * @param mp
143399a2dd95SBruce Richardson  *   A pointer to the mempool structure.
143499a2dd95SBruce Richardson  * @param obj_table
143599a2dd95SBruce Richardson  *   A pointer to a table of void * pointers (objects).
143699a2dd95SBruce Richardson  * @param n
143799a2dd95SBruce Richardson  *   The number of objects to add in the mempool from the obj_table.
143899a2dd95SBruce Richardson  * @param cache
143999a2dd95SBruce Richardson  *   A pointer to a mempool cache structure. May be NULL if not needed.
144099a2dd95SBruce Richardson  */
144199a2dd95SBruce Richardson static __rte_always_inline void
rte_mempool_generic_put(struct rte_mempool * mp,void * const * obj_table,unsigned int n,struct rte_mempool_cache * cache)144299a2dd95SBruce Richardson rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
144399a2dd95SBruce Richardson 			unsigned int n, struct rte_mempool_cache *cache)
144499a2dd95SBruce Richardson {
144599a2dd95SBruce Richardson 	rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1446ad276d5cSAndrew Rybchenko 	RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1447ad276d5cSAndrew Rybchenko 	rte_mempool_do_generic_put(mp, obj_table, n, cache);
144899a2dd95SBruce Richardson }
144999a2dd95SBruce Richardson 
145099a2dd95SBruce Richardson /**
145199a2dd95SBruce Richardson  * Put several objects back in the mempool.
145299a2dd95SBruce Richardson  *
145399a2dd95SBruce Richardson  * This function calls the multi-producer or the single-producer
145499a2dd95SBruce Richardson  * version depending on the default behavior that was specified at
145599a2dd95SBruce Richardson  * mempool creation time (see flags).
145699a2dd95SBruce Richardson  *
145799a2dd95SBruce Richardson  * @param mp
145899a2dd95SBruce Richardson  *   A pointer to the mempool structure.
145999a2dd95SBruce Richardson  * @param obj_table
146099a2dd95SBruce Richardson  *   A pointer to a table of void * pointers (objects).
146199a2dd95SBruce Richardson  * @param n
146299a2dd95SBruce Richardson  *   The number of objects to add in the mempool from obj_table.
146399a2dd95SBruce Richardson  */
146499a2dd95SBruce Richardson static __rte_always_inline void
rte_mempool_put_bulk(struct rte_mempool * mp,void * const * obj_table,unsigned int n)146599a2dd95SBruce Richardson rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
146699a2dd95SBruce Richardson 		     unsigned int n)
146799a2dd95SBruce Richardson {
146899a2dd95SBruce Richardson 	struct rte_mempool_cache *cache;
146999a2dd95SBruce Richardson 	cache = rte_mempool_default_cache(mp, rte_lcore_id());
147099a2dd95SBruce Richardson 	rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
147199a2dd95SBruce Richardson 	rte_mempool_generic_put(mp, obj_table, n, cache);
147299a2dd95SBruce Richardson }
147399a2dd95SBruce Richardson 
147499a2dd95SBruce Richardson /**
147599a2dd95SBruce Richardson  * Put one object back in the mempool.
147699a2dd95SBruce Richardson  *
147799a2dd95SBruce Richardson  * This function calls the multi-producer or the single-producer
147899a2dd95SBruce Richardson  * version depending on the default behavior that was specified at
147999a2dd95SBruce Richardson  * mempool creation time (see flags).
148099a2dd95SBruce Richardson  *
148199a2dd95SBruce Richardson  * @param mp
148299a2dd95SBruce Richardson  *   A pointer to the mempool structure.
148399a2dd95SBruce Richardson  * @param obj
148499a2dd95SBruce Richardson  *   A pointer to the object to be added.
148599a2dd95SBruce Richardson  */
148699a2dd95SBruce Richardson static __rte_always_inline void
rte_mempool_put(struct rte_mempool * mp,void * obj)148799a2dd95SBruce Richardson rte_mempool_put(struct rte_mempool *mp, void *obj)
148899a2dd95SBruce Richardson {
148999a2dd95SBruce Richardson 	rte_mempool_put_bulk(mp, &obj, 1);
149099a2dd95SBruce Richardson }
149199a2dd95SBruce Richardson 
149299a2dd95SBruce Richardson /**
149399a2dd95SBruce Richardson  * @internal Get several objects from the mempool; used internally.
149499a2dd95SBruce Richardson  * @param mp
149599a2dd95SBruce Richardson  *   A pointer to the mempool structure.
149699a2dd95SBruce Richardson  * @param obj_table
149799a2dd95SBruce Richardson  *   A pointer to a table of void * pointers (objects).
149899a2dd95SBruce Richardson  * @param n
149999a2dd95SBruce Richardson  *   The number of objects to get, must be strictly positive.
150099a2dd95SBruce Richardson  * @param cache
150199a2dd95SBruce Richardson  *   A pointer to a mempool cache structure. May be NULL if not needed.
150299a2dd95SBruce Richardson  * @return
15039b3233abSFerruh Yigit  *   - 0: Success.
1504a2833eccSMorten Brørup  *   - <0: Error; code of driver dequeue function.
150599a2dd95SBruce Richardson  */
150699a2dd95SBruce Richardson static __rte_always_inline int
rte_mempool_do_generic_get(struct rte_mempool * mp,void ** obj_table,unsigned int n,struct rte_mempool_cache * cache)1507ad276d5cSAndrew Rybchenko rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
150899a2dd95SBruce Richardson 			   unsigned int n, struct rte_mempool_cache *cache)
150999a2dd95SBruce Richardson {
151099a2dd95SBruce Richardson 	int ret;
151105a00c75SMorten Brørup 	unsigned int remaining;
151299a2dd95SBruce Richardson 	uint32_t index, len;
151399a2dd95SBruce Richardson 	void **cache_objs;
151499a2dd95SBruce Richardson 
1515a2833eccSMorten Brørup 	/* No cache provided */
151605a00c75SMorten Brørup 	if (unlikely(cache == NULL)) {
151705a00c75SMorten Brørup 		remaining = n;
1518a2833eccSMorten Brørup 		goto driver_dequeue;
151905a00c75SMorten Brørup 	}
152099a2dd95SBruce Richardson 
152105a00c75SMorten Brørup 	/* The cache is a stack, so copy will be in reverse order. */
1522a2833eccSMorten Brørup 	cache_objs = &cache->objs[cache->len];
152305a00c75SMorten Brørup 
1524*743bd29eSTyler Retzlaff 	if (__rte_constant(n) && n <= cache->len) {
152505a00c75SMorten Brørup 		/*
152605a00c75SMorten Brørup 		 * The request size is known at build time, and
152705a00c75SMorten Brørup 		 * the entire request can be satisfied from the cache,
152805a00c75SMorten Brørup 		 * so let the compiler unroll the fixed length copy loop.
152905a00c75SMorten Brørup 		 */
153005a00c75SMorten Brørup 		cache->len -= n;
153105a00c75SMorten Brørup 		for (index = 0; index < n; index++)
153205a00c75SMorten Brørup 			*obj_table++ = *--cache_objs;
153305a00c75SMorten Brørup 
153405a00c75SMorten Brørup 		RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
153505a00c75SMorten Brørup 		RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
153605a00c75SMorten Brørup 
153705a00c75SMorten Brørup 		return 0;
153805a00c75SMorten Brørup 	}
153905a00c75SMorten Brørup 
154005a00c75SMorten Brørup 	/*
154105a00c75SMorten Brørup 	 * Use the cache as much as we have to return hot objects first.
154205a00c75SMorten Brørup 	 * If the request size 'n' is known at build time, the above comparison
154305a00c75SMorten Brørup 	 * ensures that n > cache->len here, so omit RTE_MIN().
154405a00c75SMorten Brørup 	 */
1545*743bd29eSTyler Retzlaff 	len = __rte_constant(n) ? cache->len : RTE_MIN(n, cache->len);
1546a2833eccSMorten Brørup 	cache->len -= len;
154705a00c75SMorten Brørup 	remaining = n - len;
1548a2833eccSMorten Brørup 	for (index = 0; index < len; index++)
1549a2833eccSMorten Brørup 		*obj_table++ = *--cache_objs;
155099a2dd95SBruce Richardson 
155105a00c75SMorten Brørup 	/*
155205a00c75SMorten Brørup 	 * If the request size 'n' is known at build time, the case
155305a00c75SMorten Brørup 	 * where the entire request can be satisfied from the cache
155405a00c75SMorten Brørup 	 * has already been handled above, so omit handling it here.
155505a00c75SMorten Brørup 	 */
1556*743bd29eSTyler Retzlaff 	if (!__rte_constant(n) && remaining == 0) {
1557a2833eccSMorten Brørup 		/* The entire request is satisfied from the cache. */
155899a2dd95SBruce Richardson 
1559203dcc9cSMorten Brørup 		RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1560203dcc9cSMorten Brørup 		RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1561a2833eccSMorten Brørup 
1562a2833eccSMorten Brørup 		return 0;
1563a2833eccSMorten Brørup 	}
1564a2833eccSMorten Brørup 
1565a2833eccSMorten Brørup 	/* if dequeue below would overflow mem allocated for cache */
1566a2833eccSMorten Brørup 	if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1567a2833eccSMorten Brørup 		goto driver_dequeue;
1568a2833eccSMorten Brørup 
1569a2833eccSMorten Brørup 	/* Fill the cache from the backend; fetch size + remaining objects. */
1570a2833eccSMorten Brørup 	ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1571a2833eccSMorten Brørup 			cache->size + remaining);
157299a2dd95SBruce Richardson 	if (unlikely(ret < 0)) {
157399a2dd95SBruce Richardson 		/*
1574a2833eccSMorten Brørup 		 * We are buffer constrained, and not able to allocate
1575a2833eccSMorten Brørup 		 * cache + remaining.
1576a2833eccSMorten Brørup 		 * Do not fill the cache, just satisfy the remaining part of
1577a2833eccSMorten Brørup 		 * the request directly from the backend.
157899a2dd95SBruce Richardson 		 */
1579a2833eccSMorten Brørup 		goto driver_dequeue;
158099a2dd95SBruce Richardson 	}
158199a2dd95SBruce Richardson 
1582a2833eccSMorten Brørup 	/* Satisfy the remaining part of the request from the filled cache. */
1583a2833eccSMorten Brørup 	cache_objs = &cache->objs[cache->size + remaining];
1584a2833eccSMorten Brørup 	for (index = 0; index < remaining; index++)
1585a2833eccSMorten Brørup 		*obj_table++ = *--cache_objs;
158699a2dd95SBruce Richardson 
1587a2833eccSMorten Brørup 	cache->len = cache->size;
158899a2dd95SBruce Richardson 
1589203dcc9cSMorten Brørup 	RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1590203dcc9cSMorten Brørup 	RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
159199a2dd95SBruce Richardson 
159299a2dd95SBruce Richardson 	return 0;
159399a2dd95SBruce Richardson 
1594a2833eccSMorten Brørup driver_dequeue:
159599a2dd95SBruce Richardson 
1596a2833eccSMorten Brørup 	/* Get remaining objects directly from the backend. */
1597a2833eccSMorten Brørup 	ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
159899a2dd95SBruce Richardson 
159956487040SDharmik Thakkar 	if (ret < 0) {
1600a2833eccSMorten Brørup 		if (likely(cache != NULL)) {
1601a2833eccSMorten Brørup 			cache->len = n - remaining;
1602a2833eccSMorten Brørup 			/*
1603a2833eccSMorten Brørup 			 * No further action is required to roll the first part
1604a2833eccSMorten Brørup 			 * of the request back into the cache, as objects in
1605a2833eccSMorten Brørup 			 * the cache are intact.
1606a2833eccSMorten Brørup 			 */
1607a2833eccSMorten Brørup 		}
1608a2833eccSMorten Brørup 
1609ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1610ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
161156487040SDharmik Thakkar 	} else {
1612203dcc9cSMorten Brørup 		if (likely(cache != NULL)) {
1613203dcc9cSMorten Brørup 			RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1614203dcc9cSMorten Brørup 			RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1615203dcc9cSMorten Brørup 		} else {
1616ad276d5cSAndrew Rybchenko 			RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1617ad276d5cSAndrew Rybchenko 			RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
161856487040SDharmik Thakkar 		}
1619203dcc9cSMorten Brørup 	}
162099a2dd95SBruce Richardson 
162199a2dd95SBruce Richardson 	return ret;
162299a2dd95SBruce Richardson }
162399a2dd95SBruce Richardson 
162499a2dd95SBruce Richardson /**
162599a2dd95SBruce Richardson  * Get several objects from the mempool.
162699a2dd95SBruce Richardson  *
162799a2dd95SBruce Richardson  * If cache is enabled, objects will be retrieved first from cache,
162899a2dd95SBruce Richardson  * subsequently from the common pool. Note that it can return -ENOENT when
162999a2dd95SBruce Richardson  * the local cache and common pool are empty, even if cache from other
163099a2dd95SBruce Richardson  * lcores are full.
163199a2dd95SBruce Richardson  *
163299a2dd95SBruce Richardson  * @param mp
163399a2dd95SBruce Richardson  *   A pointer to the mempool structure.
163499a2dd95SBruce Richardson  * @param obj_table
163599a2dd95SBruce Richardson  *   A pointer to a table of void * pointers (objects) that will be filled.
163699a2dd95SBruce Richardson  * @param n
163799a2dd95SBruce Richardson  *   The number of objects to get from mempool to obj_table.
163899a2dd95SBruce Richardson  * @param cache
163999a2dd95SBruce Richardson  *   A pointer to a mempool cache structure. May be NULL if not needed.
164099a2dd95SBruce Richardson  * @return
164199a2dd95SBruce Richardson  *   - 0: Success; objects taken.
164299a2dd95SBruce Richardson  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
164399a2dd95SBruce Richardson  */
164499a2dd95SBruce Richardson static __rte_always_inline int
rte_mempool_generic_get(struct rte_mempool * mp,void ** obj_table,unsigned int n,struct rte_mempool_cache * cache)164599a2dd95SBruce Richardson rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
164699a2dd95SBruce Richardson 			unsigned int n, struct rte_mempool_cache *cache)
164799a2dd95SBruce Richardson {
164899a2dd95SBruce Richardson 	int ret;
1649ad276d5cSAndrew Rybchenko 	ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
165099a2dd95SBruce Richardson 	if (ret == 0)
1651ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
165299a2dd95SBruce Richardson 	rte_mempool_trace_generic_get(mp, obj_table, n, cache);
165399a2dd95SBruce Richardson 	return ret;
165499a2dd95SBruce Richardson }
165599a2dd95SBruce Richardson 
165699a2dd95SBruce Richardson /**
165799a2dd95SBruce Richardson  * Get several objects from the mempool.
165899a2dd95SBruce Richardson  *
165999a2dd95SBruce Richardson  * This function calls the multi-consumers or the single-consumer
166099a2dd95SBruce Richardson  * version, depending on the default behaviour that was specified at
166199a2dd95SBruce Richardson  * mempool creation time (see flags).
166299a2dd95SBruce Richardson  *
166399a2dd95SBruce Richardson  * If cache is enabled, objects will be retrieved first from cache,
166499a2dd95SBruce Richardson  * subsequently from the common pool. Note that it can return -ENOENT when
166599a2dd95SBruce Richardson  * the local cache and common pool are empty, even if cache from other
166699a2dd95SBruce Richardson  * lcores are full.
166799a2dd95SBruce Richardson  *
166899a2dd95SBruce Richardson  * @param mp
166999a2dd95SBruce Richardson  *   A pointer to the mempool structure.
167099a2dd95SBruce Richardson  * @param obj_table
167199a2dd95SBruce Richardson  *   A pointer to a table of void * pointers (objects) that will be filled.
167299a2dd95SBruce Richardson  * @param n
167399a2dd95SBruce Richardson  *   The number of objects to get from the mempool to obj_table.
167499a2dd95SBruce Richardson  * @return
167599a2dd95SBruce Richardson  *   - 0: Success; objects taken
167699a2dd95SBruce Richardson  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
167799a2dd95SBruce Richardson  */
167899a2dd95SBruce Richardson static __rte_always_inline int
rte_mempool_get_bulk(struct rte_mempool * mp,void ** obj_table,unsigned int n)167999a2dd95SBruce Richardson rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
168099a2dd95SBruce Richardson {
168199a2dd95SBruce Richardson 	struct rte_mempool_cache *cache;
168299a2dd95SBruce Richardson 	cache = rte_mempool_default_cache(mp, rte_lcore_id());
168399a2dd95SBruce Richardson 	rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
168499a2dd95SBruce Richardson 	return rte_mempool_generic_get(mp, obj_table, n, cache);
168599a2dd95SBruce Richardson }
168699a2dd95SBruce Richardson 
168799a2dd95SBruce Richardson /**
168899a2dd95SBruce Richardson  * Get one object from the mempool.
168999a2dd95SBruce Richardson  *
169099a2dd95SBruce Richardson  * This function calls the multi-consumers or the single-consumer
169199a2dd95SBruce Richardson  * version, depending on the default behavior that was specified at
169299a2dd95SBruce Richardson  * mempool creation (see flags).
169399a2dd95SBruce Richardson  *
169499a2dd95SBruce Richardson  * If cache is enabled, objects will be retrieved first from cache,
169599a2dd95SBruce Richardson  * subsequently from the common pool. Note that it can return -ENOENT when
169699a2dd95SBruce Richardson  * the local cache and common pool are empty, even if cache from other
169799a2dd95SBruce Richardson  * lcores are full.
169899a2dd95SBruce Richardson  *
169999a2dd95SBruce Richardson  * @param mp
170099a2dd95SBruce Richardson  *   A pointer to the mempool structure.
170199a2dd95SBruce Richardson  * @param obj_p
170299a2dd95SBruce Richardson  *   A pointer to a void * pointer (object) that will be filled.
170399a2dd95SBruce Richardson  * @return
170499a2dd95SBruce Richardson  *   - 0: Success; objects taken.
170599a2dd95SBruce Richardson  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
170699a2dd95SBruce Richardson  */
170799a2dd95SBruce Richardson static __rte_always_inline int
rte_mempool_get(struct rte_mempool * mp,void ** obj_p)170899a2dd95SBruce Richardson rte_mempool_get(struct rte_mempool *mp, void **obj_p)
170999a2dd95SBruce Richardson {
171099a2dd95SBruce Richardson 	return rte_mempool_get_bulk(mp, obj_p, 1);
171199a2dd95SBruce Richardson }
171299a2dd95SBruce Richardson 
171399a2dd95SBruce Richardson /**
171499a2dd95SBruce Richardson  * Get a contiguous blocks of objects from the mempool.
171599a2dd95SBruce Richardson  *
171699a2dd95SBruce Richardson  * If cache is enabled, consider to flush it first, to reuse objects
171799a2dd95SBruce Richardson  * as soon as possible.
171899a2dd95SBruce Richardson  *
171999a2dd95SBruce Richardson  * The application should check that the driver supports the operation
172099a2dd95SBruce Richardson  * by calling rte_mempool_ops_get_info() and checking that `contig_block_size`
172199a2dd95SBruce Richardson  * is not zero.
172299a2dd95SBruce Richardson  *
172399a2dd95SBruce Richardson  * @param mp
172499a2dd95SBruce Richardson  *   A pointer to the mempool structure.
172599a2dd95SBruce Richardson  * @param first_obj_table
172699a2dd95SBruce Richardson  *   A pointer to a pointer to the first object in each block.
172799a2dd95SBruce Richardson  * @param n
172899a2dd95SBruce Richardson  *   The number of blocks to get from mempool.
172999a2dd95SBruce Richardson  * @return
173099a2dd95SBruce Richardson  *   - 0: Success; blocks taken.
173199a2dd95SBruce Richardson  *   - -ENOBUFS: Not enough entries in the mempool; no object is retrieved.
173299a2dd95SBruce Richardson  *   - -EOPNOTSUPP: The mempool driver does not support block dequeue
173399a2dd95SBruce Richardson  */
173499a2dd95SBruce Richardson static __rte_always_inline int
rte_mempool_get_contig_blocks(struct rte_mempool * mp,void ** first_obj_table,unsigned int n)173599a2dd95SBruce Richardson rte_mempool_get_contig_blocks(struct rte_mempool *mp,
173699a2dd95SBruce Richardson 			      void **first_obj_table, unsigned int n)
173799a2dd95SBruce Richardson {
173899a2dd95SBruce Richardson 	int ret;
173999a2dd95SBruce Richardson 
174099a2dd95SBruce Richardson 	ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
174199a2dd95SBruce Richardson 	if (ret == 0) {
1742ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1743ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1744ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
174599a2dd95SBruce Richardson 							1);
174699a2dd95SBruce Richardson 	} else {
1747ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1748ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
174999a2dd95SBruce Richardson 	}
175099a2dd95SBruce Richardson 
175199a2dd95SBruce Richardson 	rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
175299a2dd95SBruce Richardson 	return ret;
175399a2dd95SBruce Richardson }
175499a2dd95SBruce Richardson 
175599a2dd95SBruce Richardson /**
175699a2dd95SBruce Richardson  * Return the number of entries in the mempool.
175799a2dd95SBruce Richardson  *
175899a2dd95SBruce Richardson  * When cache is enabled, this function has to browse the length of
175999a2dd95SBruce Richardson  * all lcores, so it should not be used in a data path, but only for
176099a2dd95SBruce Richardson  * debug purposes. User-owned mempool caches are not accounted for.
176199a2dd95SBruce Richardson  *
176299a2dd95SBruce Richardson  * @param mp
176399a2dd95SBruce Richardson  *   A pointer to the mempool structure.
176499a2dd95SBruce Richardson  * @return
176599a2dd95SBruce Richardson  *   The number of entries in the mempool.
176699a2dd95SBruce Richardson  */
176799a2dd95SBruce Richardson unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
176899a2dd95SBruce Richardson 
176999a2dd95SBruce Richardson /**
177099a2dd95SBruce Richardson  * Return the number of elements which have been allocated from the mempool
177199a2dd95SBruce Richardson  *
177299a2dd95SBruce Richardson  * When cache is enabled, this function has to browse the length of
177399a2dd95SBruce Richardson  * all lcores, so it should not be used in a data path, but only for
177499a2dd95SBruce Richardson  * debug purposes.
177599a2dd95SBruce Richardson  *
177699a2dd95SBruce Richardson  * @param mp
177799a2dd95SBruce Richardson  *   A pointer to the mempool structure.
177899a2dd95SBruce Richardson  * @return
177999a2dd95SBruce Richardson  *   The number of free entries in the mempool.
178099a2dd95SBruce Richardson  */
178199a2dd95SBruce Richardson unsigned int
178299a2dd95SBruce Richardson rte_mempool_in_use_count(const struct rte_mempool *mp);
178399a2dd95SBruce Richardson 
178499a2dd95SBruce Richardson /**
178599a2dd95SBruce Richardson  * Test if the mempool is full.
178699a2dd95SBruce Richardson  *
178799a2dd95SBruce Richardson  * When cache is enabled, this function has to browse the length of all
178899a2dd95SBruce Richardson  * lcores, so it should not be used in a data path, but only for debug
178999a2dd95SBruce Richardson  * purposes. User-owned mempool caches are not accounted for.
179099a2dd95SBruce Richardson  *
179199a2dd95SBruce Richardson  * @param mp
179299a2dd95SBruce Richardson  *   A pointer to the mempool structure.
179399a2dd95SBruce Richardson  * @return
179499a2dd95SBruce Richardson  *   - 1: The mempool is full.
179599a2dd95SBruce Richardson  *   - 0: The mempool is not full.
179699a2dd95SBruce Richardson  */
179799a2dd95SBruce Richardson static inline int
rte_mempool_full(const struct rte_mempool * mp)179899a2dd95SBruce Richardson rte_mempool_full(const struct rte_mempool *mp)
179999a2dd95SBruce Richardson {
180099a2dd95SBruce Richardson 	return rte_mempool_avail_count(mp) == mp->size;
180199a2dd95SBruce Richardson }
180299a2dd95SBruce Richardson 
180399a2dd95SBruce Richardson /**
180499a2dd95SBruce Richardson  * Test if the mempool is empty.
180599a2dd95SBruce Richardson  *
180699a2dd95SBruce Richardson  * When cache is enabled, this function has to browse the length of all
180799a2dd95SBruce Richardson  * lcores, so it should not be used in a data path, but only for debug
180899a2dd95SBruce Richardson  * purposes. User-owned mempool caches are not accounted for.
180999a2dd95SBruce Richardson  *
181099a2dd95SBruce Richardson  * @param mp
181199a2dd95SBruce Richardson  *   A pointer to the mempool structure.
181299a2dd95SBruce Richardson  * @return
181399a2dd95SBruce Richardson  *   - 1: The mempool is empty.
181499a2dd95SBruce Richardson  *   - 0: The mempool is not empty.
181599a2dd95SBruce Richardson  */
181699a2dd95SBruce Richardson static inline int
rte_mempool_empty(const struct rte_mempool * mp)181799a2dd95SBruce Richardson rte_mempool_empty(const struct rte_mempool *mp)
181899a2dd95SBruce Richardson {
181999a2dd95SBruce Richardson 	return rte_mempool_avail_count(mp) == 0;
182099a2dd95SBruce Richardson }
182199a2dd95SBruce Richardson 
182299a2dd95SBruce Richardson /**
182399a2dd95SBruce Richardson  * Return the IO address of elt, which is an element of the pool mp.
182499a2dd95SBruce Richardson  *
182599a2dd95SBruce Richardson  * @param elt
182699a2dd95SBruce Richardson  *   A pointer (virtual address) to the element of the pool.
182799a2dd95SBruce Richardson  * @return
182899a2dd95SBruce Richardson  *   The IO address of the elt element.
1829c47d7b90SAndrew Rybchenko  *   If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the
183099a2dd95SBruce Richardson  *   returned value is RTE_BAD_IOVA.
183199a2dd95SBruce Richardson  */
183299a2dd95SBruce Richardson static inline rte_iova_t
rte_mempool_virt2iova(const void * elt)183399a2dd95SBruce Richardson rte_mempool_virt2iova(const void *elt)
183499a2dd95SBruce Richardson {
183599a2dd95SBruce Richardson 	const struct rte_mempool_objhdr *hdr;
183699a2dd95SBruce Richardson 	hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
183799a2dd95SBruce Richardson 		sizeof(*hdr));
183899a2dd95SBruce Richardson 	return hdr->iova;
183999a2dd95SBruce Richardson }
184099a2dd95SBruce Richardson 
184199a2dd95SBruce Richardson /**
184299a2dd95SBruce Richardson  * Check the consistency of mempool objects.
184399a2dd95SBruce Richardson  *
184499a2dd95SBruce Richardson  * Verify the coherency of fields in the mempool structure. Also check
184599a2dd95SBruce Richardson  * that the cookies of mempool objects (even the ones that are not
184699a2dd95SBruce Richardson  * present in pool) have a correct value. If not, a panic will occur.
184799a2dd95SBruce Richardson  *
184899a2dd95SBruce Richardson  * @param mp
184999a2dd95SBruce Richardson  *   A pointer to the mempool structure.
185099a2dd95SBruce Richardson  */
185199a2dd95SBruce Richardson void rte_mempool_audit(struct rte_mempool *mp);
185299a2dd95SBruce Richardson 
185399a2dd95SBruce Richardson /**
185499a2dd95SBruce Richardson  * Return a pointer to the private data in an mempool structure.
185599a2dd95SBruce Richardson  *
185699a2dd95SBruce Richardson  * @param mp
185799a2dd95SBruce Richardson  *   A pointer to the mempool structure.
185899a2dd95SBruce Richardson  * @return
185999a2dd95SBruce Richardson  *   A pointer to the private data.
186099a2dd95SBruce Richardson  */
rte_mempool_get_priv(struct rte_mempool * mp)186199a2dd95SBruce Richardson static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
186299a2dd95SBruce Richardson {
186399a2dd95SBruce Richardson 	return (char *)mp +
1864d7203661SAndrew Rybchenko 		RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
186599a2dd95SBruce Richardson }
186699a2dd95SBruce Richardson 
186799a2dd95SBruce Richardson /**
186899a2dd95SBruce Richardson  * Dump the status of all mempools on the console
186999a2dd95SBruce Richardson  *
187099a2dd95SBruce Richardson  * @param f
187199a2dd95SBruce Richardson  *   A pointer to a file for output
187299a2dd95SBruce Richardson  */
187399a2dd95SBruce Richardson void rte_mempool_list_dump(FILE *f);
187499a2dd95SBruce Richardson 
187599a2dd95SBruce Richardson /**
187699a2dd95SBruce Richardson  * Search a mempool from its name
187799a2dd95SBruce Richardson  *
187899a2dd95SBruce Richardson  * @param name
187999a2dd95SBruce Richardson  *   The name of the mempool.
188099a2dd95SBruce Richardson  * @return
188199a2dd95SBruce Richardson  *   The pointer to the mempool matching the name, or NULL if not found.
188299a2dd95SBruce Richardson  *   NULL on error
188399a2dd95SBruce Richardson  *   with rte_errno set appropriately. Possible rte_errno values include:
188499a2dd95SBruce Richardson  *    - ENOENT - required entry not available to return.
188599a2dd95SBruce Richardson  */
188699a2dd95SBruce Richardson struct rte_mempool *rte_mempool_lookup(const char *name);
188799a2dd95SBruce Richardson 
188899a2dd95SBruce Richardson /**
188999a2dd95SBruce Richardson  * Get the header, trailer and total size of a mempool element.
189099a2dd95SBruce Richardson  *
189199a2dd95SBruce Richardson  * Given a desired size of the mempool element and mempool flags,
189299a2dd95SBruce Richardson  * calculates header, trailer, body and total sizes of the mempool object.
189399a2dd95SBruce Richardson  *
189499a2dd95SBruce Richardson  * @param elt_size
189599a2dd95SBruce Richardson  *   The size of each element, without header and trailer.
189699a2dd95SBruce Richardson  * @param flags
189799a2dd95SBruce Richardson  *   The flags used for the mempool creation.
189899a2dd95SBruce Richardson  *   Consult rte_mempool_create() for more information about possible values.
189999a2dd95SBruce Richardson  *   The size of each element.
190099a2dd95SBruce Richardson  * @param sz
190199a2dd95SBruce Richardson  *   The calculated detailed size the mempool object. May be NULL.
190299a2dd95SBruce Richardson  * @return
190399a2dd95SBruce Richardson  *   Total size of the mempool object.
190499a2dd95SBruce Richardson  */
190599a2dd95SBruce Richardson uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
190699a2dd95SBruce Richardson 	struct rte_mempool_objsz *sz);
190799a2dd95SBruce Richardson 
190899a2dd95SBruce Richardson /**
190999a2dd95SBruce Richardson  * Walk list of all memory pools
191099a2dd95SBruce Richardson  *
191199a2dd95SBruce Richardson  * @param func
191299a2dd95SBruce Richardson  *   Iterator function
191399a2dd95SBruce Richardson  * @param arg
191499a2dd95SBruce Richardson  *   Argument passed to iterator
191599a2dd95SBruce Richardson  */
191699a2dd95SBruce Richardson void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
191799a2dd95SBruce Richardson 		      void *arg);
191899a2dd95SBruce Richardson 
191999a2dd95SBruce Richardson /**
19202f1015d8SPaul Szczepanek  * A structure used to retrieve information about the memory range
19212f1015d8SPaul Szczepanek  * of the mempool.
19222f1015d8SPaul Szczepanek  */
19232f1015d8SPaul Szczepanek struct rte_mempool_mem_range_info {
19242f1015d8SPaul Szczepanek 	/** Start of the memory range used by mempool objects */
19252f1015d8SPaul Szczepanek 	void *start;
19262f1015d8SPaul Szczepanek 	/** Length of the memory range used by mempool objects */
19272f1015d8SPaul Szczepanek 	size_t length;
19282f1015d8SPaul Szczepanek 	/** Are all memory addresses used by mempool objects contiguous */
19292f1015d8SPaul Szczepanek 	bool is_contiguous;
19302f1015d8SPaul Szczepanek };
19312f1015d8SPaul Szczepanek 
19322f1015d8SPaul Szczepanek /**
19332f1015d8SPaul Szczepanek  * @warning
19342f1015d8SPaul Szczepanek  * @b EXPERIMENTAL: this API may change without prior notice.
19352f1015d8SPaul Szczepanek  *
19362f1015d8SPaul Szczepanek  * Get information about the memory range used to store objects in the mempool.
19372f1015d8SPaul Szczepanek  *
19382f1015d8SPaul Szczepanek  * @param[in] mp
19392f1015d8SPaul Szczepanek  *   Pointer to an initialized mempool.
19402f1015d8SPaul Szczepanek  * @param[out] mem_range
19412f1015d8SPaul Szczepanek  *   Pointer to struct which is used to return lowest address,
19422f1015d8SPaul Szczepanek  *   length of the memory range containing all the addresses,
19432f1015d8SPaul Szczepanek  *   and whether these addresses are contiguous.
19442f1015d8SPaul Szczepanek  * @return
19452f1015d8SPaul Szczepanek  *   0 on success, -EINVAL if mempool is not valid or mem_range is NULL.
19462f1015d8SPaul Szczepanek  **/
19472f1015d8SPaul Szczepanek __rte_experimental
19482f1015d8SPaul Szczepanek int
19492f1015d8SPaul Szczepanek rte_mempool_get_mem_range(const struct rte_mempool *mp,
19502f1015d8SPaul Szczepanek 	struct rte_mempool_mem_range_info *mem_range);
19512f1015d8SPaul Szczepanek 
19522f1015d8SPaul Szczepanek /**
19532f1015d8SPaul Szczepanek  * @warning
19542f1015d8SPaul Szczepanek  * @b EXPERIMENTAL: this API may change without prior notice.
19552f1015d8SPaul Szczepanek  *
19562f1015d8SPaul Szczepanek  * Return alignment of objects stored in the mempool.
19572f1015d8SPaul Szczepanek  *
19582f1015d8SPaul Szczepanek  * @param[in] mp
19592f1015d8SPaul Szczepanek  *   Pointer to a mempool.
19602f1015d8SPaul Szczepanek  * @return
19612f1015d8SPaul Szczepanek  *   Object alignment if mp is valid. 0 if mp is NULL.
19622f1015d8SPaul Szczepanek  *
19632f1015d8SPaul Szczepanek  **/
19642f1015d8SPaul Szczepanek __rte_experimental
19652f1015d8SPaul Szczepanek size_t
19662f1015d8SPaul Szczepanek rte_mempool_get_obj_alignment(const struct rte_mempool *mp);
19672f1015d8SPaul Szczepanek 
19682f1015d8SPaul Szczepanek /**
196999a2dd95SBruce Richardson  * @internal Get page size used for mempool object allocation.
197099a2dd95SBruce Richardson  * This function is internal to mempool library and mempool drivers.
197199a2dd95SBruce Richardson  */
197299a2dd95SBruce Richardson int
197399a2dd95SBruce Richardson rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
197499a2dd95SBruce Richardson 
1975da2b9cb2SDmitry Kozlyuk /**
1976da2b9cb2SDmitry Kozlyuk  * Mempool event type.
1977da2b9cb2SDmitry Kozlyuk  * @internal
1978da2b9cb2SDmitry Kozlyuk  */
1979da2b9cb2SDmitry Kozlyuk enum rte_mempool_event {
1980da2b9cb2SDmitry Kozlyuk 	/** Occurs after a mempool is fully populated. */
1981da2b9cb2SDmitry Kozlyuk 	RTE_MEMPOOL_EVENT_READY = 0,
1982da2b9cb2SDmitry Kozlyuk 	/** Occurs before the destruction of a mempool begins. */
1983da2b9cb2SDmitry Kozlyuk 	RTE_MEMPOOL_EVENT_DESTROY = 1,
1984da2b9cb2SDmitry Kozlyuk };
1985da2b9cb2SDmitry Kozlyuk 
1986da2b9cb2SDmitry Kozlyuk /**
1987da2b9cb2SDmitry Kozlyuk  * @internal
1988da2b9cb2SDmitry Kozlyuk  * Mempool event callback.
1989da2b9cb2SDmitry Kozlyuk  *
1990da2b9cb2SDmitry Kozlyuk  * rte_mempool_event_callback_register() may be called from within the callback,
1991da2b9cb2SDmitry Kozlyuk  * but the callbacks registered this way will not be invoked for the same event.
1992da2b9cb2SDmitry Kozlyuk  * rte_mempool_event_callback_unregister() may only be safely called
1993da2b9cb2SDmitry Kozlyuk  * to remove the running callback.
1994da2b9cb2SDmitry Kozlyuk  */
1995da2b9cb2SDmitry Kozlyuk typedef void (rte_mempool_event_callback)(
1996da2b9cb2SDmitry Kozlyuk 		enum rte_mempool_event event,
1997da2b9cb2SDmitry Kozlyuk 		struct rte_mempool *mp,
1998da2b9cb2SDmitry Kozlyuk 		void *user_data);
1999da2b9cb2SDmitry Kozlyuk 
2000da2b9cb2SDmitry Kozlyuk /**
2001da2b9cb2SDmitry Kozlyuk  * @internal
2002da2b9cb2SDmitry Kozlyuk  * Register a callback function invoked on mempool life cycle event.
2003da2b9cb2SDmitry Kozlyuk  * The function will be invoked in the process
2004da2b9cb2SDmitry Kozlyuk  * that performs an action which triggers the callback.
200503b3cdf9SDmitry Kozlyuk  * Registration is process-private,
200603b3cdf9SDmitry Kozlyuk  * i.e. each process must manage callbacks on its own if needed.
2007da2b9cb2SDmitry Kozlyuk  *
2008da2b9cb2SDmitry Kozlyuk  * @param func
2009da2b9cb2SDmitry Kozlyuk  *   Callback function.
2010da2b9cb2SDmitry Kozlyuk  * @param user_data
2011da2b9cb2SDmitry Kozlyuk  *   User data.
2012da2b9cb2SDmitry Kozlyuk  *
2013da2b9cb2SDmitry Kozlyuk  * @return
2014da2b9cb2SDmitry Kozlyuk  *   0 on success, negative on failure and rte_errno is set.
2015da2b9cb2SDmitry Kozlyuk  */
2016da2b9cb2SDmitry Kozlyuk __rte_internal
2017da2b9cb2SDmitry Kozlyuk int
2018da2b9cb2SDmitry Kozlyuk rte_mempool_event_callback_register(rte_mempool_event_callback *func,
2019da2b9cb2SDmitry Kozlyuk 				    void *user_data);
2020da2b9cb2SDmitry Kozlyuk 
2021da2b9cb2SDmitry Kozlyuk /**
2022da2b9cb2SDmitry Kozlyuk  * @internal
2023da2b9cb2SDmitry Kozlyuk  * Unregister a callback added with rte_mempool_event_callback_register().
2024da2b9cb2SDmitry Kozlyuk  * @p func and @p user_data must exactly match registration parameters.
2025da2b9cb2SDmitry Kozlyuk  *
2026da2b9cb2SDmitry Kozlyuk  * @param func
2027da2b9cb2SDmitry Kozlyuk  *   Callback function.
2028da2b9cb2SDmitry Kozlyuk  * @param user_data
2029da2b9cb2SDmitry Kozlyuk  *   User data.
2030da2b9cb2SDmitry Kozlyuk  *
2031da2b9cb2SDmitry Kozlyuk  * @return
2032da2b9cb2SDmitry Kozlyuk  *   0 on success, negative on failure and rte_errno is set.
2033da2b9cb2SDmitry Kozlyuk  */
2034da2b9cb2SDmitry Kozlyuk __rte_internal
2035da2b9cb2SDmitry Kozlyuk int
2036da2b9cb2SDmitry Kozlyuk rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
2037da2b9cb2SDmitry Kozlyuk 				      void *user_data);
2038da2b9cb2SDmitry Kozlyuk 
203999a2dd95SBruce Richardson #ifdef __cplusplus
204099a2dd95SBruce Richardson }
204199a2dd95SBruce Richardson #endif
204299a2dd95SBruce Richardson 
204399a2dd95SBruce Richardson #endif /* _RTE_MEMPOOL_H_ */
2044