xref: /dpdk/drivers/mempool/ring/rte_mempool_ring.c (revision cb77b060ebecb9a14c5f7c7622535a1f6e71e2fa)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2010-2016 Intel Corporation
39a8e9b57SShreyansh Jain  */
49a8e9b57SShreyansh Jain 
59a8e9b57SShreyansh Jain #include <stdio.h>
69a8e9b57SShreyansh Jain #include <string.h>
79a8e9b57SShreyansh Jain 
89a8e9b57SShreyansh Jain #include <rte_errno.h>
99a8e9b57SShreyansh Jain #include <rte_ring.h>
109a8e9b57SShreyansh Jain #include <rte_mempool.h>
119a8e9b57SShreyansh Jain 
129a8e9b57SShreyansh Jain static int
common_ring_mp_enqueue(struct rte_mempool * mp,void * const * obj_table,unsigned n)139a8e9b57SShreyansh Jain common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
149a8e9b57SShreyansh Jain 		unsigned n)
159a8e9b57SShreyansh Jain {
169a8e9b57SShreyansh Jain 	return rte_ring_mp_enqueue_bulk(mp->pool_data,
179a8e9b57SShreyansh Jain 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
189a8e9b57SShreyansh Jain }
199a8e9b57SShreyansh Jain 
209a8e9b57SShreyansh Jain static int
common_ring_sp_enqueue(struct rte_mempool * mp,void * const * obj_table,unsigned n)219a8e9b57SShreyansh Jain common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
229a8e9b57SShreyansh Jain 		unsigned n)
239a8e9b57SShreyansh Jain {
249a8e9b57SShreyansh Jain 	return rte_ring_sp_enqueue_bulk(mp->pool_data,
259a8e9b57SShreyansh Jain 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
269a8e9b57SShreyansh Jain }
279a8e9b57SShreyansh Jain 
289a8e9b57SShreyansh Jain static int
rts_ring_mp_enqueue(struct rte_mempool * mp,void * const * obj_table,unsigned int n)29e1187407SKonstantin Ananyev rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
30e1187407SKonstantin Ananyev 	unsigned int n)
31e1187407SKonstantin Ananyev {
32e1187407SKonstantin Ananyev 	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
33e1187407SKonstantin Ananyev 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
34e1187407SKonstantin Ananyev }
35e1187407SKonstantin Ananyev 
36e1187407SKonstantin Ananyev static int
hts_ring_mp_enqueue(struct rte_mempool * mp,void * const * obj_table,unsigned int n)37e1187407SKonstantin Ananyev hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
38e1187407SKonstantin Ananyev 	unsigned int n)
39e1187407SKonstantin Ananyev {
40e1187407SKonstantin Ananyev 	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
41e1187407SKonstantin Ananyev 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
42e1187407SKonstantin Ananyev }
43e1187407SKonstantin Ananyev 
44e1187407SKonstantin Ananyev static int
common_ring_mc_dequeue(struct rte_mempool * mp,void ** obj_table,unsigned n)459a8e9b57SShreyansh Jain common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
469a8e9b57SShreyansh Jain {
479a8e9b57SShreyansh Jain 	return rte_ring_mc_dequeue_bulk(mp->pool_data,
489a8e9b57SShreyansh Jain 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
499a8e9b57SShreyansh Jain }
509a8e9b57SShreyansh Jain 
519a8e9b57SShreyansh Jain static int
common_ring_sc_dequeue(struct rte_mempool * mp,void ** obj_table,unsigned n)529a8e9b57SShreyansh Jain common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
539a8e9b57SShreyansh Jain {
549a8e9b57SShreyansh Jain 	return rte_ring_sc_dequeue_bulk(mp->pool_data,
559a8e9b57SShreyansh Jain 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
569a8e9b57SShreyansh Jain }
579a8e9b57SShreyansh Jain 
58e1187407SKonstantin Ananyev static int
rts_ring_mc_dequeue(struct rte_mempool * mp,void ** obj_table,unsigned int n)59e1187407SKonstantin Ananyev rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
60e1187407SKonstantin Ananyev {
61e1187407SKonstantin Ananyev 	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
62e1187407SKonstantin Ananyev 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
63e1187407SKonstantin Ananyev }
64e1187407SKonstantin Ananyev 
65e1187407SKonstantin Ananyev static int
hts_ring_mc_dequeue(struct rte_mempool * mp,void ** obj_table,unsigned int n)66e1187407SKonstantin Ananyev hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
67e1187407SKonstantin Ananyev {
68e1187407SKonstantin Ananyev 	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
69e1187407SKonstantin Ananyev 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
70e1187407SKonstantin Ananyev }
71e1187407SKonstantin Ananyev 
729a8e9b57SShreyansh Jain static unsigned
common_ring_get_count(const struct rte_mempool * mp)739a8e9b57SShreyansh Jain common_ring_get_count(const struct rte_mempool *mp)
749a8e9b57SShreyansh Jain {
759a8e9b57SShreyansh Jain 	return rte_ring_count(mp->pool_data);
769a8e9b57SShreyansh Jain }
779a8e9b57SShreyansh Jain 
789a8e9b57SShreyansh Jain static int
ring_alloc(struct rte_mempool * mp,uint32_t rg_flags)79e1187407SKonstantin Ananyev ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
809a8e9b57SShreyansh Jain {
81e1187407SKonstantin Ananyev 	int ret;
829a8e9b57SShreyansh Jain 	char rg_name[RTE_RING_NAMESIZE];
839a8e9b57SShreyansh Jain 	struct rte_ring *r;
849a8e9b57SShreyansh Jain 
859a8e9b57SShreyansh Jain 	ret = snprintf(rg_name, sizeof(rg_name),
869a8e9b57SShreyansh Jain 		RTE_MEMPOOL_MZ_FORMAT, mp->name);
879a8e9b57SShreyansh Jain 	if (ret < 0 || ret >= (int)sizeof(rg_name)) {
889a8e9b57SShreyansh Jain 		rte_errno = ENAMETOOLONG;
899a8e9b57SShreyansh Jain 		return -rte_errno;
909a8e9b57SShreyansh Jain 	}
919a8e9b57SShreyansh Jain 
929a8e9b57SShreyansh Jain 	/*
939a8e9b57SShreyansh Jain 	 * Allocate the ring that will be used to store objects.
949a8e9b57SShreyansh Jain 	 * Ring functions will return appropriate errors if we are
959a8e9b57SShreyansh Jain 	 * running as a secondary process etc., so no checks made
969a8e9b57SShreyansh Jain 	 * in this function for that condition.
979a8e9b57SShreyansh Jain 	 */
989a8e9b57SShreyansh Jain 	r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
999a8e9b57SShreyansh Jain 		mp->socket_id, rg_flags);
1009a8e9b57SShreyansh Jain 	if (r == NULL)
1019a8e9b57SShreyansh Jain 		return -rte_errno;
1029a8e9b57SShreyansh Jain 
1039a8e9b57SShreyansh Jain 	mp->pool_data = r;
1049a8e9b57SShreyansh Jain 
1059a8e9b57SShreyansh Jain 	return 0;
1069a8e9b57SShreyansh Jain }
1079a8e9b57SShreyansh Jain 
108e1187407SKonstantin Ananyev static int
common_ring_alloc(struct rte_mempool * mp)109e1187407SKonstantin Ananyev common_ring_alloc(struct rte_mempool *mp)
110e1187407SKonstantin Ananyev {
111e1187407SKonstantin Ananyev 	uint32_t rg_flags = 0;
112e1187407SKonstantin Ananyev 
113c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
114e1187407SKonstantin Ananyev 		rg_flags |= RING_F_SP_ENQ;
115c47d7b90SAndrew Rybchenko 	if (mp->flags & RTE_MEMPOOL_F_SC_GET)
116e1187407SKonstantin Ananyev 		rg_flags |= RING_F_SC_DEQ;
117e1187407SKonstantin Ananyev 
118e1187407SKonstantin Ananyev 	return ring_alloc(mp, rg_flags);
119e1187407SKonstantin Ananyev }
120e1187407SKonstantin Ananyev 
121e1187407SKonstantin Ananyev static int
rts_ring_alloc(struct rte_mempool * mp)122e1187407SKonstantin Ananyev rts_ring_alloc(struct rte_mempool *mp)
123e1187407SKonstantin Ananyev {
124e1187407SKonstantin Ananyev 	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
125e1187407SKonstantin Ananyev }
126e1187407SKonstantin Ananyev 
127e1187407SKonstantin Ananyev static int
hts_ring_alloc(struct rte_mempool * mp)128e1187407SKonstantin Ananyev hts_ring_alloc(struct rte_mempool *mp)
129e1187407SKonstantin Ananyev {
130e1187407SKonstantin Ananyev 	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
131e1187407SKonstantin Ananyev }
132e1187407SKonstantin Ananyev 
1339a8e9b57SShreyansh Jain static void
common_ring_free(struct rte_mempool * mp)1349a8e9b57SShreyansh Jain common_ring_free(struct rte_mempool *mp)
1359a8e9b57SShreyansh Jain {
1369a8e9b57SShreyansh Jain 	rte_ring_free(mp->pool_data);
1379a8e9b57SShreyansh Jain }
1389a8e9b57SShreyansh Jain 
1399a8e9b57SShreyansh Jain /*
1409a8e9b57SShreyansh Jain  * The following 4 declarations of mempool ops structs address
1419a8e9b57SShreyansh Jain  * the need for the backward compatible mempool handlers for
1429a8e9b57SShreyansh Jain  * single/multi producers and single/multi consumers as dictated by the
1439a8e9b57SShreyansh Jain  * flags provided to the rte_mempool_create function
1449a8e9b57SShreyansh Jain  */
1459a8e9b57SShreyansh Jain static const struct rte_mempool_ops ops_mp_mc = {
1469a8e9b57SShreyansh Jain 	.name = "ring_mp_mc",
1479a8e9b57SShreyansh Jain 	.alloc = common_ring_alloc,
1489a8e9b57SShreyansh Jain 	.free = common_ring_free,
1499a8e9b57SShreyansh Jain 	.enqueue = common_ring_mp_enqueue,
1509a8e9b57SShreyansh Jain 	.dequeue = common_ring_mc_dequeue,
1519a8e9b57SShreyansh Jain 	.get_count = common_ring_get_count,
1529a8e9b57SShreyansh Jain };
1539a8e9b57SShreyansh Jain 
1549a8e9b57SShreyansh Jain static const struct rte_mempool_ops ops_sp_sc = {
1559a8e9b57SShreyansh Jain 	.name = "ring_sp_sc",
1569a8e9b57SShreyansh Jain 	.alloc = common_ring_alloc,
1579a8e9b57SShreyansh Jain 	.free = common_ring_free,
1589a8e9b57SShreyansh Jain 	.enqueue = common_ring_sp_enqueue,
1599a8e9b57SShreyansh Jain 	.dequeue = common_ring_sc_dequeue,
1609a8e9b57SShreyansh Jain 	.get_count = common_ring_get_count,
1619a8e9b57SShreyansh Jain };
1629a8e9b57SShreyansh Jain 
1639a8e9b57SShreyansh Jain static const struct rte_mempool_ops ops_mp_sc = {
1649a8e9b57SShreyansh Jain 	.name = "ring_mp_sc",
1659a8e9b57SShreyansh Jain 	.alloc = common_ring_alloc,
1669a8e9b57SShreyansh Jain 	.free = common_ring_free,
1679a8e9b57SShreyansh Jain 	.enqueue = common_ring_mp_enqueue,
1689a8e9b57SShreyansh Jain 	.dequeue = common_ring_sc_dequeue,
1699a8e9b57SShreyansh Jain 	.get_count = common_ring_get_count,
1709a8e9b57SShreyansh Jain };
1719a8e9b57SShreyansh Jain 
1729a8e9b57SShreyansh Jain static const struct rte_mempool_ops ops_sp_mc = {
1739a8e9b57SShreyansh Jain 	.name = "ring_sp_mc",
1749a8e9b57SShreyansh Jain 	.alloc = common_ring_alloc,
1759a8e9b57SShreyansh Jain 	.free = common_ring_free,
1769a8e9b57SShreyansh Jain 	.enqueue = common_ring_sp_enqueue,
1779a8e9b57SShreyansh Jain 	.dequeue = common_ring_mc_dequeue,
1789a8e9b57SShreyansh Jain 	.get_count = common_ring_get_count,
1799a8e9b57SShreyansh Jain };
1809a8e9b57SShreyansh Jain 
181e1187407SKonstantin Ananyev /* ops for mempool with ring in MT_RTS sync mode */
182e1187407SKonstantin Ananyev static const struct rte_mempool_ops ops_mt_rts = {
183e1187407SKonstantin Ananyev 	.name = "ring_mt_rts",
184e1187407SKonstantin Ananyev 	.alloc = rts_ring_alloc,
185e1187407SKonstantin Ananyev 	.free = common_ring_free,
186e1187407SKonstantin Ananyev 	.enqueue = rts_ring_mp_enqueue,
187e1187407SKonstantin Ananyev 	.dequeue = rts_ring_mc_dequeue,
188e1187407SKonstantin Ananyev 	.get_count = common_ring_get_count,
189e1187407SKonstantin Ananyev };
190e1187407SKonstantin Ananyev 
191e1187407SKonstantin Ananyev /* ops for mempool with ring in MT_HTS sync mode */
192e1187407SKonstantin Ananyev static const struct rte_mempool_ops ops_mt_hts = {
193e1187407SKonstantin Ananyev 	.name = "ring_mt_hts",
194e1187407SKonstantin Ananyev 	.alloc = hts_ring_alloc,
195e1187407SKonstantin Ananyev 	.free = common_ring_free,
196e1187407SKonstantin Ananyev 	.enqueue = hts_ring_mp_enqueue,
197e1187407SKonstantin Ananyev 	.dequeue = hts_ring_mc_dequeue,
198e1187407SKonstantin Ananyev 	.get_count = common_ring_get_count,
199e1187407SKonstantin Ananyev };
200e1187407SKonstantin Ananyev 
201*cb77b060SAndrew Rybchenko RTE_MEMPOOL_REGISTER_OPS(ops_mp_mc);
202*cb77b060SAndrew Rybchenko RTE_MEMPOOL_REGISTER_OPS(ops_sp_sc);
203*cb77b060SAndrew Rybchenko RTE_MEMPOOL_REGISTER_OPS(ops_mp_sc);
204*cb77b060SAndrew Rybchenko RTE_MEMPOOL_REGISTER_OPS(ops_sp_mc);
205*cb77b060SAndrew Rybchenko RTE_MEMPOOL_REGISTER_OPS(ops_mt_rts);
206*cb77b060SAndrew Rybchenko RTE_MEMPOOL_REGISTER_OPS(ops_mt_hts);
207