1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Cavium Networks 3 */ 4 5 #include <string.h> 6 7 #include <rte_common.h> 8 #include <rte_malloc.h> 9 10 #include "zlib_pmd_private.h" 11 12 static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = { 13 { /* Deflate */ 14 .algo = RTE_COMP_ALGO_DEFLATE, 15 .comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS | 16 RTE_COMP_FF_HUFFMAN_FIXED | 17 RTE_COMP_FF_HUFFMAN_DYNAMIC), 18 .window_size = { 19 .min = 8, 20 .max = 15, 21 .increment = 1 22 }, 23 }, 24 25 RTE_COMP_END_OF_CAPABILITIES_LIST() 26 27 }; 28 29 /** Configure device */ 30 static int 31 zlib_pmd_config(struct rte_compressdev *dev, 32 struct rte_compressdev_config *config) 33 { 34 struct rte_mempool *mp; 35 char mp_name[RTE_MEMPOOL_NAMESIZE]; 36 struct zlib_private *internals = dev->data->dev_private; 37 38 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 39 "stream_mp_%u", dev->data->dev_id); 40 mp = internals->mp; 41 if (mp == NULL) { 42 mp = rte_mempool_create(mp_name, 43 config->max_nb_priv_xforms + 44 config->max_nb_streams, 45 sizeof(struct zlib_priv_xform), 46 0, 0, NULL, NULL, NULL, 47 NULL, config->socket_id, 48 0); 49 if (mp == NULL) { 50 ZLIB_PMD_ERR("Cannot create private xform pool on " 51 "socket %d\n", config->socket_id); 52 return -ENOMEM; 53 } 54 internals->mp = mp; 55 } 56 return 0; 57 } 58 59 /** Start device */ 60 static int 61 zlib_pmd_start(__rte_unused struct rte_compressdev *dev) 62 { 63 return 0; 64 } 65 66 /** Stop device */ 67 static void 68 zlib_pmd_stop(__rte_unused struct rte_compressdev *dev) 69 { 70 } 71 72 /** Close device */ 73 static int 74 zlib_pmd_close(struct rte_compressdev *dev) 75 { 76 struct zlib_private *internals = dev->data->dev_private; 77 rte_mempool_free(internals->mp); 78 internals->mp = NULL; 79 return 0; 80 } 81 82 /** Get device statistics */ 83 static void 84 zlib_pmd_stats_get(struct rte_compressdev *dev, 85 struct rte_compressdev_stats *stats) 86 { 87 int qp_id; 88 89 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 90 struct zlib_qp *qp = dev->data->queue_pairs[qp_id]; 91 92 stats->enqueued_count += qp->qp_stats.enqueued_count; 93 stats->dequeued_count += qp->qp_stats.dequeued_count; 94 95 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; 96 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; 97 } 98 } 99 100 /** Reset device statistics */ 101 static void 102 zlib_pmd_stats_reset(struct rte_compressdev *dev) 103 { 104 int qp_id; 105 106 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 107 struct zlib_qp *qp = dev->data->queue_pairs[qp_id]; 108 109 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); 110 } 111 } 112 113 /** Get device info */ 114 static void 115 zlib_pmd_info_get(struct rte_compressdev *dev, 116 struct rte_compressdev_info *dev_info) 117 { 118 if (dev_info != NULL) { 119 dev_info->driver_name = dev->device->name; 120 dev_info->feature_flags = dev->feature_flags; 121 dev_info->capabilities = zlib_pmd_capabilities; 122 } 123 } 124 125 /** Release queue pair */ 126 static int 127 zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id) 128 { 129 struct zlib_qp *qp = dev->data->queue_pairs[qp_id]; 130 131 if (qp != NULL) { 132 rte_ring_free(qp->processed_pkts); 133 rte_free(qp); 134 dev->data->queue_pairs[qp_id] = NULL; 135 } 136 return 0; 137 } 138 139 /** set a unique name for the queue pair based on its name, dev_id and qp_id */ 140 static int 141 zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev, 142 struct zlib_qp *qp) 143 { 144 unsigned int n = snprintf(qp->name, sizeof(qp->name), 145 "zlib_pmd_%u_qp_%u", 146 dev->data->dev_id, qp->id); 147 148 if (n >= sizeof(qp->name)) 149 return -1; 150 151 return 0; 152 } 153 154 /** Create a ring to place process packets on */ 155 static struct rte_ring * 156 zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp, 157 unsigned int ring_size, int socket_id) 158 { 159 struct rte_ring *r = qp->processed_pkts; 160 161 if (r) { 162 if (rte_ring_get_size(r) >= ring_size) { 163 ZLIB_PMD_INFO("Reusing existing ring %s for processed" 164 " packets", qp->name); 165 return r; 166 } 167 168 ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed" 169 " packets", qp->name); 170 return NULL; 171 } 172 173 return rte_ring_create(qp->name, ring_size, socket_id, 174 RING_F_EXACT_SZ); 175 } 176 177 /** Setup a queue pair */ 178 static int 179 zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 180 uint32_t max_inflight_ops, int socket_id) 181 { 182 struct zlib_qp *qp = NULL; 183 184 /* Free memory prior to re-allocation if needed. */ 185 if (dev->data->queue_pairs[qp_id] != NULL) 186 zlib_pmd_qp_release(dev, qp_id); 187 188 /* Allocate the queue pair data structure. */ 189 qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp), 190 RTE_CACHE_LINE_SIZE, socket_id); 191 if (qp == NULL) 192 return (-ENOMEM); 193 194 qp->id = qp_id; 195 dev->data->queue_pairs[qp_id] = qp; 196 197 if (zlib_pmd_qp_set_unique_name(dev, qp)) 198 goto qp_setup_cleanup; 199 200 qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp, 201 max_inflight_ops, socket_id); 202 if (qp->processed_pkts == NULL) 203 goto qp_setup_cleanup; 204 205 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); 206 return 0; 207 208 qp_setup_cleanup: 209 if (qp) { 210 rte_free(qp); 211 qp = NULL; 212 } 213 return -1; 214 } 215 216 /** Configure stream */ 217 static int 218 zlib_pmd_stream_create(struct rte_compressdev *dev, 219 const struct rte_comp_xform *xform, 220 void **zstream) 221 { 222 int ret = 0; 223 struct zlib_stream *stream; 224 struct zlib_private *internals = dev->data->dev_private; 225 226 if (xform == NULL) { 227 ZLIB_PMD_ERR("invalid xform struct"); 228 return -EINVAL; 229 } 230 231 if (rte_mempool_get(internals->mp, zstream)) { 232 ZLIB_PMD_ERR("Couldn't get object from session mempool"); 233 return -ENOMEM; 234 } 235 stream = *((struct zlib_stream **)zstream); 236 237 ret = zlib_set_stream_parameters(xform, stream); 238 239 if (ret < 0) { 240 ZLIB_PMD_ERR("failed configure session parameters"); 241 242 memset(stream, 0, sizeof(struct zlib_stream)); 243 /* Return session to mempool */ 244 rte_mempool_put(internals->mp, stream); 245 return ret; 246 } 247 248 return 0; 249 } 250 251 /** Configure private xform */ 252 static int 253 zlib_pmd_private_xform_create(struct rte_compressdev *dev, 254 const struct rte_comp_xform *xform, 255 void **private_xform) 256 { 257 return zlib_pmd_stream_create(dev, xform, private_xform); 258 } 259 260 /** Clear the memory of stream so it doesn't leave key material behind */ 261 static int 262 zlib_pmd_stream_free(__rte_unused struct rte_compressdev *dev, 263 void *zstream) 264 { 265 struct zlib_stream *stream = (struct zlib_stream *)zstream; 266 if (!stream) 267 return -EINVAL; 268 269 stream->free(&stream->strm); 270 /* Zero out the whole structure */ 271 memset(stream, 0, sizeof(struct zlib_stream)); 272 struct rte_mempool *mp = rte_mempool_from_obj(stream); 273 rte_mempool_put(mp, stream); 274 275 return 0; 276 } 277 278 /** Clear the memory of stream so it doesn't leave key material behind */ 279 static int 280 zlib_pmd_private_xform_free(struct rte_compressdev *dev, 281 void *private_xform) 282 { 283 return zlib_pmd_stream_free(dev, private_xform); 284 } 285 286 struct rte_compressdev_ops zlib_pmd_ops = { 287 .dev_configure = zlib_pmd_config, 288 .dev_start = zlib_pmd_start, 289 .dev_stop = zlib_pmd_stop, 290 .dev_close = zlib_pmd_close, 291 292 .stats_get = zlib_pmd_stats_get, 293 .stats_reset = zlib_pmd_stats_reset, 294 295 .dev_infos_get = zlib_pmd_info_get, 296 297 .queue_pair_setup = zlib_pmd_qp_setup, 298 .queue_pair_release = zlib_pmd_qp_release, 299 300 .private_xform_create = zlib_pmd_private_xform_create, 301 .private_xform_free = zlib_pmd_private_xform_free, 302 303 .stream_create = NULL, 304 .stream_free = NULL 305 }; 306 307 struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops; 308