1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 */ 4 5 #include <string.h> 6 7 #include <rte_common.h> 8 #include <rte_cryptodev_pmd.h> 9 #include <rte_malloc.h> 10 11 #include "ccp_pmd_private.h" 12 #include "ccp_dev.h" 13 #include "ccp_crypto.h" 14 15 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = { 16 { /* AES ECB */ 17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 18 {.sym = { 19 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 20 {.cipher = { 21 .algo = RTE_CRYPTO_CIPHER_AES_ECB, 22 .block_size = 16, 23 .key_size = { 24 .min = 16, 25 .max = 32, 26 .increment = 8 27 }, 28 .iv_size = { 29 .min = 0, 30 .max = 0, 31 .increment = 0 32 } 33 }, } 34 }, } 35 }, 36 { /* AES CBC */ 37 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 38 {.sym = { 39 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 40 {.cipher = { 41 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 42 .block_size = 16, 43 .key_size = { 44 .min = 16, 45 .max = 32, 46 .increment = 8 47 }, 48 .iv_size = { 49 .min = 16, 50 .max = 16, 51 .increment = 0 52 } 53 }, } 54 }, } 55 }, 56 { /* AES CTR */ 57 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 58 {.sym = { 59 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 60 {.cipher = { 61 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 62 .block_size = 16, 63 .key_size = { 64 .min = 16, 65 .max = 32, 66 .increment = 8 67 }, 68 .iv_size = { 69 .min = 16, 70 .max = 16, 71 .increment = 0 72 } 73 }, } 74 }, } 75 }, 76 { /* 3DES CBC */ 77 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 78 {.sym = { 79 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 80 {.cipher = { 81 .algo = RTE_CRYPTO_CIPHER_3DES_CBC, 82 .block_size = 8, 83 .key_size = { 84 .min = 16, 85 .max = 24, 86 .increment = 8 87 }, 88 .iv_size = { 89 .min = 8, 90 .max = 8, 91 .increment = 0 92 } 93 }, } 94 }, } 95 }, 96 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() 97 }; 98 99 static int 100 ccp_pmd_config(struct rte_cryptodev *dev __rte_unused, 101 struct rte_cryptodev_config *config __rte_unused) 102 { 103 return 0; 104 } 105 106 static int 107 ccp_pmd_start(struct rte_cryptodev *dev) 108 { 109 return ccp_dev_start(dev); 110 } 111 112 static void 113 ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused) 114 { 115 116 } 117 118 static int 119 ccp_pmd_close(struct rte_cryptodev *dev __rte_unused) 120 { 121 return 0; 122 } 123 124 static void 125 ccp_pmd_stats_get(struct rte_cryptodev *dev, 126 struct rte_cryptodev_stats *stats) 127 { 128 int qp_id; 129 130 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 131 struct ccp_qp *qp = dev->data->queue_pairs[qp_id]; 132 133 stats->enqueued_count += qp->qp_stats.enqueued_count; 134 stats->dequeued_count += qp->qp_stats.dequeued_count; 135 136 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; 137 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; 138 } 139 140 } 141 142 static void 143 ccp_pmd_stats_reset(struct rte_cryptodev *dev) 144 { 145 int qp_id; 146 147 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 148 struct ccp_qp *qp = dev->data->queue_pairs[qp_id]; 149 150 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); 151 } 152 } 153 154 static void 155 ccp_pmd_info_get(struct rte_cryptodev *dev, 156 struct rte_cryptodev_info *dev_info) 157 { 158 struct ccp_private *internals = dev->data->dev_private; 159 160 if (dev_info != NULL) { 161 dev_info->driver_id = dev->driver_id; 162 dev_info->feature_flags = dev->feature_flags; 163 dev_info->capabilities = ccp_pmd_capabilities; 164 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; 165 dev_info->sym.max_nb_sessions = internals->max_nb_sessions; 166 } 167 } 168 169 static int 170 ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 171 { 172 struct ccp_qp *qp; 173 174 if (dev->data->queue_pairs[qp_id] != NULL) { 175 qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id]; 176 rte_ring_free(qp->processed_pkts); 177 rte_mempool_free(qp->batch_mp); 178 rte_free(qp); 179 dev->data->queue_pairs[qp_id] = NULL; 180 } 181 return 0; 182 } 183 184 static int 185 ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev, 186 struct ccp_qp *qp) 187 { 188 unsigned int n = snprintf(qp->name, sizeof(qp->name), 189 "ccp_pmd_%u_qp_%u", 190 dev->data->dev_id, qp->id); 191 192 if (n > sizeof(qp->name)) 193 return -1; 194 195 return 0; 196 } 197 198 static struct rte_ring * 199 ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp, 200 unsigned int ring_size, int socket_id) 201 { 202 struct rte_ring *r; 203 204 r = rte_ring_lookup(qp->name); 205 if (r) { 206 if (r->size >= ring_size) { 207 CCP_LOG_INFO( 208 "Reusing ring %s for processed packets", 209 qp->name); 210 return r; 211 } 212 CCP_LOG_INFO( 213 "Unable to reuse ring %s for processed packets", 214 qp->name); 215 return NULL; 216 } 217 218 return rte_ring_create(qp->name, ring_size, socket_id, 219 RING_F_SP_ENQ | RING_F_SC_DEQ); 220 } 221 222 static int 223 ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 224 const struct rte_cryptodev_qp_conf *qp_conf, 225 int socket_id, struct rte_mempool *session_pool) 226 { 227 struct ccp_private *internals = dev->data->dev_private; 228 struct ccp_qp *qp; 229 int retval = 0; 230 231 if (qp_id >= internals->max_nb_qpairs) { 232 CCP_LOG_ERR("Invalid qp_id %u, should be less than %u", 233 qp_id, internals->max_nb_qpairs); 234 return (-EINVAL); 235 } 236 237 /* Free memory prior to re-allocation if needed. */ 238 if (dev->data->queue_pairs[qp_id] != NULL) 239 ccp_pmd_qp_release(dev, qp_id); 240 241 /* Allocate the queue pair data structure. */ 242 qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp), 243 RTE_CACHE_LINE_SIZE, socket_id); 244 if (qp == NULL) { 245 CCP_LOG_ERR("Failed to allocate queue pair memory"); 246 return (-ENOMEM); 247 } 248 249 qp->dev = dev; 250 qp->id = qp_id; 251 dev->data->queue_pairs[qp_id] = qp; 252 253 retval = ccp_pmd_qp_set_unique_name(dev, qp); 254 if (retval) { 255 CCP_LOG_ERR("Failed to create unique name for ccp qp"); 256 goto qp_setup_cleanup; 257 } 258 259 qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp, 260 qp_conf->nb_descriptors, socket_id); 261 if (qp->processed_pkts == NULL) { 262 CCP_LOG_ERR("Failed to create batch info ring"); 263 goto qp_setup_cleanup; 264 } 265 266 qp->sess_mp = session_pool; 267 268 /* mempool for batch info */ 269 qp->batch_mp = rte_mempool_create( 270 qp->name, 271 qp_conf->nb_descriptors, 272 sizeof(struct ccp_batch_info), 273 RTE_CACHE_LINE_SIZE, 274 0, NULL, NULL, NULL, NULL, 275 SOCKET_ID_ANY, 0); 276 if (qp->batch_mp == NULL) 277 goto qp_setup_cleanup; 278 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); 279 return 0; 280 281 qp_setup_cleanup: 282 dev->data->queue_pairs[qp_id] = NULL; 283 if (qp) 284 rte_free(qp); 285 return -1; 286 } 287 288 static int 289 ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused, 290 uint16_t queue_pair_id __rte_unused) 291 { 292 return -ENOTSUP; 293 } 294 295 static int 296 ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused, 297 uint16_t queue_pair_id __rte_unused) 298 { 299 return -ENOTSUP; 300 } 301 302 static uint32_t 303 ccp_pmd_qp_count(struct rte_cryptodev *dev) 304 { 305 return dev->data->nb_queue_pairs; 306 } 307 308 static unsigned 309 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 310 { 311 return sizeof(struct ccp_session); 312 } 313 314 static int 315 ccp_pmd_session_configure(struct rte_cryptodev *dev, 316 struct rte_crypto_sym_xform *xform, 317 struct rte_cryptodev_sym_session *sess, 318 struct rte_mempool *mempool) 319 { 320 int ret; 321 void *sess_private_data; 322 323 if (unlikely(sess == NULL || xform == NULL)) { 324 CCP_LOG_ERR("Invalid session struct or xform"); 325 return -ENOMEM; 326 } 327 328 if (rte_mempool_get(mempool, &sess_private_data)) { 329 CCP_LOG_ERR("Couldn't get object from session mempool"); 330 return -ENOMEM; 331 } 332 ret = ccp_set_session_parameters(sess_private_data, xform); 333 if (ret != 0) { 334 CCP_LOG_ERR("failed configure session parameters"); 335 336 /* Return session to mempool */ 337 rte_mempool_put(mempool, sess_private_data); 338 return ret; 339 } 340 set_session_private_data(sess, dev->driver_id, 341 sess_private_data); 342 343 return 0; 344 } 345 346 static void 347 ccp_pmd_session_clear(struct rte_cryptodev *dev, 348 struct rte_cryptodev_sym_session *sess) 349 { 350 uint8_t index = dev->driver_id; 351 void *sess_priv = get_session_private_data(sess, index); 352 353 if (sess_priv) { 354 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 355 356 rte_mempool_put(sess_mp, sess_priv); 357 memset(sess_priv, 0, sizeof(struct ccp_session)); 358 set_session_private_data(sess, index, NULL); 359 } 360 } 361 362 struct rte_cryptodev_ops ccp_ops = { 363 .dev_configure = ccp_pmd_config, 364 .dev_start = ccp_pmd_start, 365 .dev_stop = ccp_pmd_stop, 366 .dev_close = ccp_pmd_close, 367 368 .stats_get = ccp_pmd_stats_get, 369 .stats_reset = ccp_pmd_stats_reset, 370 371 .dev_infos_get = ccp_pmd_info_get, 372 373 .queue_pair_setup = ccp_pmd_qp_setup, 374 .queue_pair_release = ccp_pmd_qp_release, 375 .queue_pair_start = ccp_pmd_qp_start, 376 .queue_pair_stop = ccp_pmd_qp_stop, 377 .queue_pair_count = ccp_pmd_qp_count, 378 379 .session_get_size = ccp_pmd_session_get_size, 380 .session_configure = ccp_pmd_session_configure, 381 .session_clear = ccp_pmd_session_clear, 382 }; 383 384 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops; 385