1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2021 Intel Corporation 3 */ 4 5 #include <string.h> 6 7 #include <rte_common.h> 8 #include <rte_malloc.h> 9 10 #include "ipsec_mb_private.h" 11 12 #define IMB_MP_REQ_VER_STR "1.1.0" 13 14 /** Configure device */ 15 int 16 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev, 17 __rte_unused struct rte_cryptodev_config *config) 18 { 19 return 0; 20 } 21 22 /** Start device */ 23 int 24 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev) 25 { 26 return 0; 27 } 28 29 /** Stop device */ 30 void 31 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev) 32 { 33 } 34 35 /** Close device */ 36 int 37 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev) 38 { 39 return 0; 40 } 41 42 /** Get device statistics */ 43 void 44 ipsec_mb_stats_get(struct rte_cryptodev *dev, 45 struct rte_cryptodev_stats *stats) 46 { 47 int qp_id; 48 49 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 50 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; 51 if (qp == NULL) { 52 IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id); 53 continue; 54 } 55 56 stats->enqueued_count += qp->stats.enqueued_count; 57 stats->dequeued_count += qp->stats.dequeued_count; 58 59 stats->enqueue_err_count += qp->stats.enqueue_err_count; 60 stats->dequeue_err_count += qp->stats.dequeue_err_count; 61 } 62 } 63 64 /** Reset device statistics */ 65 void 66 ipsec_mb_stats_reset(struct rte_cryptodev *dev) 67 { 68 int qp_id; 69 70 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 71 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; 72 73 memset(&qp->stats, 0, sizeof(qp->stats)); 74 } 75 } 76 77 /** Get device info */ 78 void 79 ipsec_mb_info_get(struct rte_cryptodev *dev, 80 struct rte_cryptodev_info *dev_info) 81 { 82 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 83 struct ipsec_mb_internals *pmd_info = 84 &ipsec_mb_pmds[internals->pmd_type]; 85 86 if (dev_info != NULL) { 87 dev_info->driver_id = dev->driver_id; 88 dev_info->feature_flags = dev->feature_flags; 89 dev_info->capabilities = pmd_info->caps; 90 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 91 /* No limit of number of sessions */ 92 dev_info->sym.max_nb_sessions = 0; 93 } 94 } 95 96 /** Release queue pair */ 97 int 98 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 99 { 100 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; 101 struct rte_ring *r = NULL; 102 103 if (qp != NULL && rte_eal_process_type() == RTE_PROC_PRIMARY) { 104 r = rte_ring_lookup(qp->name); 105 if (r) 106 rte_ring_free(r); 107 108 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 109 if (qp->mb_mgr) 110 free_mb_mgr(qp->mb_mgr); 111 #else 112 if (qp->mb_mgr_mz) { 113 rte_memzone_free(qp->mb_mgr_mz); 114 qp->mb_mgr = NULL; 115 } 116 #endif 117 rte_free(qp); 118 dev->data->queue_pairs[qp_id] = NULL; 119 } 120 return 0; 121 } 122 123 /** Set a unique name for the queue pair */ 124 int 125 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, 126 struct ipsec_mb_qp *qp) 127 { 128 uint32_t n = 129 snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u", 130 dev->data->dev_id, qp->id); 131 132 if (n >= sizeof(qp->name)) 133 return -1; 134 135 return 0; 136 } 137 138 /** Create a ring to place processed operations on */ 139 static struct rte_ring 140 *ipsec_mb_qp_create_processed_ops_ring( 141 struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id) 142 { 143 struct rte_ring *r; 144 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 145 146 unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name)); 147 148 if (n >= sizeof(ring_name)) 149 return NULL; 150 151 r = rte_ring_lookup(ring_name); 152 if (r) { 153 if (rte_ring_get_size(r) >= ring_size) { 154 IPSEC_MB_LOG( 155 INFO, "Reusing existing ring %s for processed ops", 156 ring_name); 157 return r; 158 } 159 IPSEC_MB_LOG( 160 ERR, "Unable to reuse existing ring %s for processed ops", 161 ring_name); 162 return NULL; 163 } 164 165 return rte_ring_create(ring_name, ring_size, socket_id, 166 RING_F_SP_ENQ | RING_F_SC_DEQ); 167 } 168 169 #if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM 170 static IMB_MGR * 171 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz, 172 const char *mb_mgr_mz_name) 173 { 174 IMB_MGR *mb_mgr; 175 176 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 177 *mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name); 178 if (*mb_mgr_mz == NULL) { 179 *mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name, 180 imb_get_mb_mgr_size(), 181 rte_socket_id(), 0); 182 } 183 if (*mb_mgr_mz == NULL) { 184 IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s", 185 mb_mgr_mz_name); 186 return NULL; 187 } 188 mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1); 189 init_mb_mgr_auto(mb_mgr, NULL); 190 } else { 191 *mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name); 192 if (*mb_mgr_mz == NULL) { 193 IPSEC_MB_LOG(ERR, 194 "Secondary can't find %s mz, did primary create it?", 195 mb_mgr_mz_name); 196 return NULL; 197 } 198 mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0); 199 } 200 return mb_mgr; 201 } 202 #endif 203 204 /** Setup a queue pair */ 205 int 206 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 207 const struct rte_cryptodev_qp_conf *qp_conf, 208 int socket_id) 209 { 210 struct ipsec_mb_qp *qp = NULL; 211 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 212 struct ipsec_mb_internals *pmd_data = 213 &ipsec_mb_pmds[internals->pmd_type]; 214 uint32_t qp_size; 215 int ret; 216 217 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 218 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 219 IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess," 220 "the minimum version required for this feature is %s.", 221 IMB_VERSION_STR, IMB_MP_REQ_VER_STR); 222 return -EINVAL; 223 #endif 224 if (dev->data->queue_pairs[qp_id] != NULL) 225 qp = dev->data->queue_pairs[qp_id]; 226 } else { 227 /* Free memory prior to re-allocation if needed. */ 228 if (dev->data->queue_pairs[qp_id] != NULL) 229 ipsec_mb_qp_release(dev, qp_id); 230 231 qp_size = sizeof(*qp) + pmd_data->qp_priv_size; 232 /* Allocate the queue pair data structure. */ 233 qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size, 234 RTE_CACHE_LINE_SIZE, socket_id); 235 if (qp == NULL) 236 return -ENOMEM; 237 } 238 239 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 240 qp->mb_mgr = alloc_init_mb_mgr(); 241 #else 242 char mz_name[IPSEC_MB_MAX_MZ_NAME]; 243 snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d", 244 dev->data->dev_id, qp_id); 245 qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz), 246 mz_name); 247 #endif 248 if (qp->mb_mgr == NULL) { 249 ret = -ENOMEM; 250 goto qp_setup_cleanup; 251 } 252 253 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 254 return 0; 255 256 qp->id = qp_id; 257 dev->data->queue_pairs[qp_id] = qp; 258 if (ipsec_mb_qp_set_unique_name(dev, qp)) { 259 ret = -EINVAL; 260 goto qp_setup_cleanup; 261 } 262 263 qp->pmd_type = internals->pmd_type; 264 qp->sess_mp = qp_conf->mp_session; 265 qp->sess_mp_priv = qp_conf->mp_session_private; 266 267 qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp, 268 qp_conf->nb_descriptors, socket_id); 269 if (qp->ingress_queue == NULL) { 270 ret = -EINVAL; 271 goto qp_setup_cleanup; 272 } 273 274 memset(&qp->stats, 0, sizeof(qp->stats)); 275 276 if (pmd_data->queue_pair_configure) { 277 ret = pmd_data->queue_pair_configure(qp); 278 if (ret < 0) 279 goto qp_setup_cleanup; 280 } 281 282 return 0; 283 284 qp_setup_cleanup: 285 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 286 if (qp->mb_mgr) 287 free_mb_mgr(qp->mb_mgr); 288 #else 289 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 290 return ret; 291 if (qp->mb_mgr_mz) 292 rte_memzone_free(qp->mb_mgr_mz); 293 #endif 294 if (qp) 295 rte_free(qp); 296 return ret; 297 } 298 299 /** Return the size of the specific pmd session structure */ 300 unsigned 301 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev) 302 { 303 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 304 struct ipsec_mb_internals *pmd_data = 305 &ipsec_mb_pmds[internals->pmd_type]; 306 307 return pmd_data->session_priv_size; 308 } 309 310 /** Configure pmd specific multi-buffer session from a crypto xform chain */ 311 int 312 ipsec_mb_sym_session_configure( 313 struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, 314 struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) 315 { 316 void *sess_private_data; 317 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 318 struct ipsec_mb_internals *pmd_data = 319 &ipsec_mb_pmds[internals->pmd_type]; 320 IMB_MGR *mb_mgr = alloc_init_mb_mgr(); 321 int ret = 0; 322 323 if (!mb_mgr) 324 return -ENOMEM; 325 326 if (unlikely(sess == NULL)) { 327 IPSEC_MB_LOG(ERR, "invalid session struct"); 328 free_mb_mgr(mb_mgr); 329 return -EINVAL; 330 } 331 332 if (rte_mempool_get(mempool, &sess_private_data)) { 333 IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool"); 334 free_mb_mgr(mb_mgr); 335 return -ENOMEM; 336 } 337 338 ret = (*pmd_data->session_configure)(mb_mgr, sess_private_data, xform); 339 if (ret != 0) { 340 IPSEC_MB_LOG(ERR, "failed configure session parameters"); 341 342 /* Return session to mempool */ 343 rte_mempool_put(mempool, sess_private_data); 344 free_mb_mgr(mb_mgr); 345 return ret; 346 } 347 348 set_sym_session_private_data(sess, dev->driver_id, sess_private_data); 349 350 free_mb_mgr(mb_mgr); 351 return 0; 352 } 353 354 /** Clear the session memory */ 355 void 356 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev, 357 struct rte_cryptodev_sym_session *sess) 358 { 359 uint8_t index = dev->driver_id; 360 void *sess_priv = get_sym_session_private_data(sess, index); 361 362 /* Zero out the whole structure */ 363 if (sess_priv) { 364 memset(sess_priv, 0, ipsec_mb_sym_session_get_size(dev)); 365 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 366 367 set_sym_session_private_data(sess, index, NULL); 368 rte_mempool_put(sess_mp, sess_priv); 369 } 370 } 371