1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 8 #include <rte_errno.h> 9 #include <rte_ring.h> 10 #include <rte_mempool.h> 11 12 static int 13 common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, 14 unsigned n) 15 { 16 return rte_ring_mp_enqueue_bulk(mp->pool_data, 17 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 18 } 19 20 static int 21 common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, 22 unsigned n) 23 { 24 return rte_ring_sp_enqueue_bulk(mp->pool_data, 25 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 26 } 27 28 static int 29 rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, 30 unsigned int n) 31 { 32 return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, 33 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 34 } 35 36 static int 37 hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, 38 unsigned int n) 39 { 40 return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, 41 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 42 } 43 44 static int 45 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) 46 { 47 return rte_ring_mc_dequeue_bulk(mp->pool_data, 48 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 49 } 50 51 static int 52 common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) 53 { 54 return rte_ring_sc_dequeue_bulk(mp->pool_data, 55 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 56 } 57 58 static int 59 rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) 60 { 61 return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, 62 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 63 } 64 65 static int 66 hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) 67 { 68 return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, 69 obj_table, n, NULL) == 0 ? -ENOBUFS : 0; 70 } 71 72 static unsigned 73 common_ring_get_count(const struct rte_mempool *mp) 74 { 75 return rte_ring_count(mp->pool_data); 76 } 77 78 static int 79 ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) 80 { 81 int ret; 82 char rg_name[RTE_RING_NAMESIZE]; 83 struct rte_ring *r; 84 85 ret = snprintf(rg_name, sizeof(rg_name), 86 RTE_MEMPOOL_MZ_FORMAT, mp->name); 87 if (ret < 0 || ret >= (int)sizeof(rg_name)) { 88 rte_errno = ENAMETOOLONG; 89 return -rte_errno; 90 } 91 92 /* 93 * Allocate the ring that will be used to store objects. 94 * Ring functions will return appropriate errors if we are 95 * running as a secondary process etc., so no checks made 96 * in this function for that condition. 97 */ 98 r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1), 99 mp->socket_id, rg_flags); 100 if (r == NULL) 101 return -rte_errno; 102 103 mp->pool_data = r; 104 105 return 0; 106 } 107 108 static int 109 common_ring_alloc(struct rte_mempool *mp) 110 { 111 uint32_t rg_flags = 0; 112 113 if (mp->flags & MEMPOOL_F_SP_PUT) 114 rg_flags |= RING_F_SP_ENQ; 115 if (mp->flags & MEMPOOL_F_SC_GET) 116 rg_flags |= RING_F_SC_DEQ; 117 118 return ring_alloc(mp, rg_flags); 119 } 120 121 static int 122 rts_ring_alloc(struct rte_mempool *mp) 123 { 124 return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); 125 } 126 127 static int 128 hts_ring_alloc(struct rte_mempool *mp) 129 { 130 return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); 131 } 132 133 static void 134 common_ring_free(struct rte_mempool *mp) 135 { 136 rte_ring_free(mp->pool_data); 137 } 138 139 /* 140 * The following 4 declarations of mempool ops structs address 141 * the need for the backward compatible mempool handlers for 142 * single/multi producers and single/multi consumers as dictated by the 143 * flags provided to the rte_mempool_create function 144 */ 145 static const struct rte_mempool_ops ops_mp_mc = { 146 .name = "ring_mp_mc", 147 .alloc = common_ring_alloc, 148 .free = common_ring_free, 149 .enqueue = common_ring_mp_enqueue, 150 .dequeue = common_ring_mc_dequeue, 151 .get_count = common_ring_get_count, 152 }; 153 154 static const struct rte_mempool_ops ops_sp_sc = { 155 .name = "ring_sp_sc", 156 .alloc = common_ring_alloc, 157 .free = common_ring_free, 158 .enqueue = common_ring_sp_enqueue, 159 .dequeue = common_ring_sc_dequeue, 160 .get_count = common_ring_get_count, 161 }; 162 163 static const struct rte_mempool_ops ops_mp_sc = { 164 .name = "ring_mp_sc", 165 .alloc = common_ring_alloc, 166 .free = common_ring_free, 167 .enqueue = common_ring_mp_enqueue, 168 .dequeue = common_ring_sc_dequeue, 169 .get_count = common_ring_get_count, 170 }; 171 172 static const struct rte_mempool_ops ops_sp_mc = { 173 .name = "ring_sp_mc", 174 .alloc = common_ring_alloc, 175 .free = common_ring_free, 176 .enqueue = common_ring_sp_enqueue, 177 .dequeue = common_ring_mc_dequeue, 178 .get_count = common_ring_get_count, 179 }; 180 181 /* ops for mempool with ring in MT_RTS sync mode */ 182 static const struct rte_mempool_ops ops_mt_rts = { 183 .name = "ring_mt_rts", 184 .alloc = rts_ring_alloc, 185 .free = common_ring_free, 186 .enqueue = rts_ring_mp_enqueue, 187 .dequeue = rts_ring_mc_dequeue, 188 .get_count = common_ring_get_count, 189 }; 190 191 /* ops for mempool with ring in MT_HTS sync mode */ 192 static const struct rte_mempool_ops ops_mt_hts = { 193 .name = "ring_mt_hts", 194 .alloc = hts_ring_alloc, 195 .free = common_ring_free, 196 .enqueue = hts_ring_mp_enqueue, 197 .dequeue = hts_ring_mc_dequeue, 198 .get_count = common_ring_get_count, 199 }; 200 201 MEMPOOL_REGISTER_OPS(ops_mp_mc); 202 MEMPOOL_REGISTER_OPS(ops_sp_sc); 203 MEMPOOL_REGISTER_OPS(ops_mp_sc); 204 MEMPOOL_REGISTER_OPS(ops_sp_mc); 205 MEMPOOL_REGISTER_OPS(ops_mt_rts); 206 MEMPOOL_REGISTER_OPS(ops_mt_hts); 207