1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2021 Intel Corporation 3 */ 4 5 #include <rte_bus_vdev.h> 6 #include <rte_common.h> 7 #include <rte_cpuflags.h> 8 #include <rte_cryptodev.h> 9 #include <rte_hexdump.h> 10 #include <rte_malloc.h> 11 12 #include "pmd_kasumi_priv.h" 13 14 /** Parse crypto xform chain and set private session parameters. */ 15 static int 16 kasumi_session_configure(IMB_MGR *mgr, void *priv_sess, 17 const struct rte_crypto_sym_xform *xform) 18 { 19 const struct rte_crypto_sym_xform *auth_xform = NULL; 20 const struct rte_crypto_sym_xform *cipher_xform = NULL; 21 enum ipsec_mb_operation mode; 22 struct kasumi_session *sess = (struct kasumi_session *)priv_sess; 23 /* Select Crypto operation - hash then cipher / cipher then hash */ 24 int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform, 25 &cipher_xform, NULL); 26 27 if (ret) 28 return ret; 29 30 if (cipher_xform) { 31 /* Only KASUMI F8 supported */ 32 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) { 33 IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm "); 34 return -ENOTSUP; 35 } 36 37 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; 38 if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) { 39 IPSEC_MB_LOG(ERR, "Wrong IV length"); 40 return -EINVAL; 41 } 42 43 /* Initialize key */ 44 IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, 45 cipher_xform->cipher.key.data, 46 &sess->pKeySched_cipher); 47 } 48 49 if (auth_xform) { 50 /* Only KASUMI F9 supported */ 51 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) { 52 IPSEC_MB_LOG(ERR, "Unsupported authentication"); 53 return -ENOTSUP; 54 } 55 56 if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) { 57 IPSEC_MB_LOG(ERR, "Wrong digest length"); 58 return -EINVAL; 59 } 60 61 sess->auth_op = auth_xform->auth.op; 62 63 /* Initialize key */ 64 IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data, 65 &sess->pKeySched_hash); 66 } 67 68 sess->op = mode; 69 return ret; 70 } 71 72 /** Encrypt/decrypt mbufs with same cipher key. */ 73 static uint8_t 74 process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops, 75 struct kasumi_session *session, uint8_t num_ops) 76 { 77 unsigned int i; 78 uint8_t processed_ops = 0; 79 const void *src[num_ops]; 80 void *dst[num_ops]; 81 uint8_t *iv_ptr; 82 uint64_t iv[num_ops]; 83 uint32_t num_bytes[num_ops]; 84 85 for (i = 0; i < num_ops; i++) { 86 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) 87 + (ops[i]->sym->cipher.data.offset >> 3); 88 dst[i] = ops[i]->sym->m_dst 89 ? rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) 90 + (ops[i]->sym->cipher.data.offset >> 3) 91 : rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) 92 + (ops[i]->sym->cipher.data.offset >> 3); 93 iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *, 94 session->cipher_iv_offset); 95 iv[i] = *((uint64_t *)(iv_ptr)); 96 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3; 97 98 processed_ops++; 99 } 100 101 if (processed_ops != 0) 102 IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, 103 iv, src, dst, num_bytes, 104 processed_ops); 105 106 return processed_ops; 107 } 108 109 /** Encrypt/decrypt mbuf (bit level function). */ 110 static uint8_t 111 process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op, 112 struct kasumi_session *session) 113 { 114 uint8_t *src, *dst; 115 uint8_t *iv_ptr; 116 uint64_t iv; 117 uint32_t length_in_bits, offset_in_bits; 118 119 offset_in_bits = op->sym->cipher.data.offset; 120 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); 121 if (op->sym->m_dst == NULL) 122 dst = src; 123 else 124 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); 125 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 126 session->cipher_iv_offset); 127 iv = *((uint64_t *)(iv_ptr)); 128 length_in_bits = op->sym->cipher.data.length; 129 130 IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv, 131 src, dst, length_in_bits, offset_in_bits); 132 133 return 1; 134 } 135 136 /** Generate/verify hash from mbufs with same hash key. */ 137 static int 138 process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops, 139 struct kasumi_session *session, uint8_t num_ops) 140 { 141 unsigned int i; 142 uint8_t processed_ops = 0; 143 uint8_t *src, *dst; 144 uint32_t length_in_bits; 145 uint32_t num_bytes; 146 struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 147 148 for (i = 0; i < num_ops; i++) { 149 /* Data must be byte aligned */ 150 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { 151 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; 152 IPSEC_MB_LOG(ERR, "Invalid Offset"); 153 break; 154 } 155 156 length_in_bits = ops[i]->sym->auth.data.length; 157 158 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) 159 + (ops[i]->sym->auth.data.offset >> 3); 160 /* Direction from next bit after end of message */ 161 num_bytes = length_in_bits >> 3; 162 163 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { 164 dst = qp_data->temp_digest; 165 IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr, 166 &session->pKeySched_hash, src, 167 num_bytes, dst); 168 169 /* Verify digest. */ 170 if (memcmp(dst, ops[i]->sym->auth.digest.data, 171 KASUMI_DIGEST_LENGTH) 172 != 0) 173 ops[i]->status 174 = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 175 } else { 176 dst = ops[i]->sym->auth.digest.data; 177 178 IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr, 179 &session->pKeySched_hash, src, 180 num_bytes, dst); 181 } 182 processed_ops++; 183 } 184 185 return processed_ops; 186 } 187 188 /** Process a batch of crypto ops which shares the same session. */ 189 static int 190 process_ops(struct rte_crypto_op **ops, struct kasumi_session *session, 191 struct ipsec_mb_qp *qp, uint8_t num_ops) 192 { 193 unsigned int i; 194 unsigned int processed_ops; 195 196 switch (session->op) { 197 case IPSEC_MB_OP_ENCRYPT_ONLY: 198 case IPSEC_MB_OP_DECRYPT_ONLY: 199 processed_ops 200 = process_kasumi_cipher_op(qp, ops, session, num_ops); 201 break; 202 case IPSEC_MB_OP_HASH_GEN_ONLY: 203 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 204 processed_ops 205 = process_kasumi_hash_op(qp, ops, session, num_ops); 206 break; 207 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 208 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 209 processed_ops 210 = process_kasumi_cipher_op(qp, ops, session, num_ops); 211 process_kasumi_hash_op(qp, ops, session, processed_ops); 212 break; 213 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 214 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 215 processed_ops 216 = process_kasumi_hash_op(qp, ops, session, num_ops); 217 process_kasumi_cipher_op(qp, ops, session, processed_ops); 218 break; 219 default: 220 /* Operation not supported. */ 221 processed_ops = 0; 222 } 223 224 for (i = 0; i < num_ops; i++) { 225 /* 226 * If there was no error/authentication failure, 227 * change status to successful. 228 */ 229 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) 230 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 231 /* Free session if a session-less crypto op. */ 232 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 233 memset(session, 0, sizeof(struct kasumi_session)); 234 memset( 235 ops[i]->sym->session, 0, 236 rte_cryptodev_sym_get_existing_header_session_size( 237 ops[i]->sym->session)); 238 rte_mempool_put(qp->sess_mp_priv, session); 239 rte_mempool_put(qp->sess_mp, ops[i]->sym->session); 240 ops[i]->sym->session = NULL; 241 } 242 } 243 return processed_ops; 244 } 245 246 /** Process a crypto op with length/offset in bits. */ 247 static int 248 process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session, 249 struct ipsec_mb_qp *qp) 250 { 251 unsigned int processed_op; 252 253 switch (session->op) { 254 /* case KASUMI_OP_ONLY_CIPHER: */ 255 case IPSEC_MB_OP_ENCRYPT_ONLY: 256 case IPSEC_MB_OP_DECRYPT_ONLY: 257 processed_op = process_kasumi_cipher_op_bit(qp, op, session); 258 break; 259 /* case KASUMI_OP_ONLY_AUTH: */ 260 case IPSEC_MB_OP_HASH_GEN_ONLY: 261 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 262 processed_op = process_kasumi_hash_op(qp, &op, session, 1); 263 break; 264 /* case KASUMI_OP_CIPHER_AUTH: */ 265 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 266 processed_op = process_kasumi_cipher_op_bit(qp, op, session); 267 if (processed_op == 1) 268 process_kasumi_hash_op(qp, &op, session, 1); 269 break; 270 /* case KASUMI_OP_AUTH_CIPHER: */ 271 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 272 processed_op = process_kasumi_hash_op(qp, &op, session, 1); 273 if (processed_op == 1) 274 process_kasumi_cipher_op_bit(qp, op, session); 275 break; 276 default: 277 /* Operation not supported. */ 278 processed_op = 0; 279 } 280 281 /* 282 * If there was no error/authentication failure, 283 * change status to successful. 284 */ 285 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) 286 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 287 288 /* Free session if a session-less crypto op. */ 289 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 290 memset(op->sym->session, 0, sizeof(struct kasumi_session)); 291 rte_cryptodev_sym_session_free(op->sym->session); 292 op->sym->session = NULL; 293 } 294 return processed_op; 295 } 296 297 static uint16_t 298 kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 299 uint16_t nb_ops) 300 { 301 struct rte_crypto_op *c_ops[nb_ops]; 302 struct rte_crypto_op *curr_c_op = NULL; 303 304 struct kasumi_session *prev_sess = NULL, *curr_sess = NULL; 305 struct ipsec_mb_qp *qp = queue_pair; 306 unsigned int i; 307 uint8_t burst_size = 0; 308 uint8_t processed_ops; 309 unsigned int nb_dequeued; 310 311 nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue, 312 (void **)ops, nb_ops, NULL); 313 for (i = 0; i < nb_dequeued; i++) { 314 curr_c_op = ops[i]; 315 316 #ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG 317 if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) 318 || (curr_c_op->sym->m_dst != NULL 319 && !rte_pktmbuf_is_contiguous( 320 curr_c_op->sym->m_dst))) { 321 IPSEC_MB_LOG(ERR, 322 "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.", 323 curr_c_op); 324 curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; 325 break; 326 } 327 #endif 328 329 /* Set status as enqueued (not processed yet) by default. */ 330 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 331 332 curr_sess = (struct kasumi_session *) 333 ipsec_mb_get_session_private(qp, curr_c_op); 334 if (unlikely(curr_sess == NULL 335 || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) { 336 curr_c_op->status 337 = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 338 break; 339 } 340 341 /* If length/offset is at bit-level, process this buffer alone. 342 */ 343 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0) 344 || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) { 345 /* Process the ops of the previous session. */ 346 if (prev_sess != NULL) { 347 processed_ops = process_ops(c_ops, prev_sess, 348 qp, burst_size); 349 if (processed_ops < burst_size) { 350 burst_size = 0; 351 break; 352 } 353 354 burst_size = 0; 355 prev_sess = NULL; 356 } 357 358 processed_ops = process_op_bit(curr_c_op, 359 curr_sess, qp); 360 if (processed_ops != 1) 361 break; 362 363 continue; 364 } 365 366 /* Batch ops that share the same session. */ 367 if (prev_sess == NULL) { 368 prev_sess = curr_sess; 369 c_ops[burst_size++] = curr_c_op; 370 } else if (curr_sess == prev_sess) { 371 c_ops[burst_size++] = curr_c_op; 372 /* 373 * When there are enough ops to process in a batch, 374 * process them, and start a new batch. 375 */ 376 if (burst_size == KASUMI_MAX_BURST) { 377 processed_ops = process_ops(c_ops, prev_sess, 378 qp, burst_size); 379 if (processed_ops < burst_size) { 380 burst_size = 0; 381 break; 382 } 383 384 burst_size = 0; 385 prev_sess = NULL; 386 } 387 } else { 388 /* 389 * Different session, process the ops 390 * of the previous session. 391 */ 392 processed_ops = process_ops(c_ops, prev_sess, qp, 393 burst_size); 394 if (processed_ops < burst_size) { 395 burst_size = 0; 396 break; 397 } 398 399 burst_size = 0; 400 prev_sess = curr_sess; 401 402 c_ops[burst_size++] = curr_c_op; 403 } 404 } 405 406 if (burst_size != 0) { 407 /* Process the crypto ops of the last session. */ 408 processed_ops = process_ops(c_ops, prev_sess, qp, burst_size); 409 } 410 411 qp->stats.dequeued_count += i; 412 return i; 413 } 414 415 struct rte_cryptodev_ops kasumi_pmd_ops = { 416 .dev_configure = ipsec_mb_config, 417 .dev_start = ipsec_mb_start, 418 .dev_stop = ipsec_mb_stop, 419 .dev_close = ipsec_mb_close, 420 421 .stats_get = ipsec_mb_stats_get, 422 .stats_reset = ipsec_mb_stats_reset, 423 424 .dev_infos_get = ipsec_mb_info_get, 425 426 .queue_pair_setup = ipsec_mb_qp_setup, 427 .queue_pair_release = ipsec_mb_qp_release, 428 429 .sym_session_get_size = ipsec_mb_sym_session_get_size, 430 .sym_session_configure = ipsec_mb_sym_session_configure, 431 .sym_session_clear = ipsec_mb_sym_session_clear 432 }; 433 434 struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops; 435 436 static int 437 kasumi_probe(struct rte_vdev_device *vdev) 438 { 439 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_KASUMI); 440 } 441 442 static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = { 443 .probe = kasumi_probe, 444 .remove = ipsec_mb_remove 445 }; 446 447 static struct cryptodev_driver kasumi_crypto_drv; 448 449 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv); 450 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd); 451 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD, 452 "max_nb_queue_pairs=<int> socket_id=<int>"); 453 RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, 454 cryptodev_kasumi_pmd_drv.driver, 455 pmd_driver_id_kasumi); 456 457 /* Constructor function to register kasumi PMD */ 458 RTE_INIT(ipsec_mb_register_kasumi) 459 { 460 struct ipsec_mb_internals *kasumi_data 461 = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI]; 462 463 kasumi_data->caps = kasumi_capabilities; 464 kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst; 465 kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO 466 | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING 467 | RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA 468 | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT 469 | RTE_CRYPTODEV_FF_SYM_SESSIONLESS 470 | RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 471 kasumi_data->internals_priv_size = 0; 472 kasumi_data->ops = &kasumi_pmd_ops; 473 kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data); 474 kasumi_data->session_configure = kasumi_session_configure; 475 kasumi_data->session_priv_size = sizeof(struct kasumi_session); 476 } 477