1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2021 Intel Corporation 3 */ 4 5 #include "pmd_snow3g_priv.h" 6 7 /** Parse crypto xform chain and set private session parameters. */ 8 static int 9 snow3g_session_configure(IMB_MGR *mgr, void *priv_sess, 10 const struct rte_crypto_sym_xform *xform) 11 { 12 struct snow3g_session *sess = (struct snow3g_session *)priv_sess; 13 const struct rte_crypto_sym_xform *auth_xform = NULL; 14 const struct rte_crypto_sym_xform *cipher_xform = NULL; 15 enum ipsec_mb_operation mode; 16 17 /* Select Crypto operation - hash then cipher / cipher then hash */ 18 int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform, 19 &cipher_xform, NULL); 20 if (ret) 21 return ret; 22 23 if (cipher_xform) { 24 /* Only SNOW 3G UEA2 supported */ 25 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2) 26 return -ENOTSUP; 27 28 if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) { 29 IPSEC_MB_LOG(ERR, "Wrong IV length"); 30 return -EINVAL; 31 } 32 if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) { 33 IPSEC_MB_LOG(ERR, "Not enough memory to store the key"); 34 return -ENOMEM; 35 } 36 37 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; 38 39 /* Initialize key */ 40 IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data, 41 &sess->pKeySched_cipher); 42 } 43 44 if (auth_xform) { 45 /* Only SNOW 3G UIA2 supported */ 46 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2) 47 return -ENOTSUP; 48 49 if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) { 50 IPSEC_MB_LOG(ERR, "Wrong digest length"); 51 return -EINVAL; 52 } 53 if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) { 54 IPSEC_MB_LOG(ERR, "Not enough memory to store the key"); 55 return -ENOMEM; 56 } 57 58 sess->auth_op = auth_xform->auth.op; 59 60 if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) { 61 IPSEC_MB_LOG(ERR, "Wrong IV length"); 62 return -EINVAL; 63 } 64 sess->auth_iv_offset = auth_xform->auth.iv.offset; 65 66 /* Initialize key */ 67 IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data, 68 &sess->pKeySched_hash); 69 } 70 71 sess->op = mode; 72 73 return 0; 74 } 75 76 /** Check if conditions are met for digest-appended operations */ 77 static uint8_t * 78 snow3g_digest_appended_in_src(struct rte_crypto_op *op) 79 { 80 unsigned int auth_size, cipher_size; 81 82 auth_size = (op->sym->auth.data.offset >> 3) + 83 (op->sym->auth.data.length >> 3); 84 cipher_size = (op->sym->cipher.data.offset >> 3) + 85 (op->sym->cipher.data.length >> 3); 86 87 if (auth_size < cipher_size) 88 return rte_pktmbuf_mtod_offset(op->sym->m_src, 89 uint8_t *, auth_size); 90 91 return NULL; 92 } 93 94 /** Encrypt/decrypt mbufs with same cipher key. */ 95 static uint8_t 96 process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops, 97 struct snow3g_session *session, 98 uint8_t num_ops) 99 { 100 uint32_t i; 101 uint8_t processed_ops = 0; 102 const void *src[SNOW3G_MAX_BURST] = {NULL}; 103 void *dst[SNOW3G_MAX_BURST] = {NULL}; 104 uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL}; 105 const void *iv[SNOW3G_MAX_BURST] = {NULL}; 106 uint32_t num_bytes[SNOW3G_MAX_BURST] = {0}; 107 uint32_t cipher_off, cipher_len; 108 int unencrypted_bytes = 0; 109 110 for (i = 0; i < num_ops; i++) { 111 112 cipher_off = ops[i]->sym->cipher.data.offset >> 3; 113 cipher_len = ops[i]->sym->cipher.data.length >> 3; 114 src[i] = rte_pktmbuf_mtod_offset( 115 ops[i]->sym->m_src, uint8_t *, cipher_off); 116 117 /* If out-of-place operation */ 118 if (ops[i]->sym->m_dst && 119 ops[i]->sym->m_src != ops[i]->sym->m_dst) { 120 dst[i] = rte_pktmbuf_mtod_offset( 121 ops[i]->sym->m_dst, uint8_t *, cipher_off); 122 123 /* In case of out-of-place, auth-cipher operation 124 * with partial encryption of the digest, copy 125 * the remaining, unencrypted part. 126 */ 127 if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT 128 || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) 129 unencrypted_bytes = 130 (ops[i]->sym->auth.data.offset >> 3) + 131 (ops[i]->sym->auth.data.length >> 3) + 132 (SNOW3G_DIGEST_LENGTH) - 133 cipher_off - cipher_len; 134 if (unencrypted_bytes > 0) 135 rte_memcpy( 136 rte_pktmbuf_mtod_offset( 137 ops[i]->sym->m_dst, uint8_t *, 138 cipher_off + cipher_len), 139 rte_pktmbuf_mtod_offset( 140 ops[i]->sym->m_src, uint8_t *, 141 cipher_off + cipher_len), 142 unencrypted_bytes); 143 } else 144 dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, 145 uint8_t *, cipher_off); 146 147 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *, 148 session->cipher_iv_offset); 149 num_bytes[i] = cipher_len; 150 processed_ops++; 151 } 152 153 IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv, 154 src, dst, num_bytes, processed_ops); 155 156 /* Take care of the raw digest data in src buffer */ 157 for (i = 0; i < num_ops; i++) { 158 if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT || 159 session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) && 160 ops[i]->sym->m_dst != NULL) { 161 digest_appended[i] = 162 snow3g_digest_appended_in_src(ops[i]); 163 /* Clear unencrypted digest from 164 * the src buffer 165 */ 166 if (digest_appended[i] != NULL) 167 memset(digest_appended[i], 168 0, SNOW3G_DIGEST_LENGTH); 169 } 170 } 171 return processed_ops; 172 } 173 174 /** Encrypt/decrypt mbuf (bit level function). */ 175 static uint8_t 176 process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp, 177 struct rte_crypto_op *op, 178 struct snow3g_session *session) 179 { 180 uint8_t *src, *dst; 181 uint8_t *iv; 182 uint32_t length_in_bits, offset_in_bits; 183 int unencrypted_bytes = 0; 184 185 offset_in_bits = op->sym->cipher.data.offset; 186 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); 187 if (op->sym->m_dst == NULL) { 188 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; 189 IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n"); 190 return 0; 191 } 192 length_in_bits = op->sym->cipher.data.length; 193 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); 194 /* In case of out-of-place, auth-cipher operation 195 * with partial encryption of the digest, copy 196 * the remaining, unencrypted part. 197 */ 198 if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT || 199 session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) 200 unencrypted_bytes = 201 (op->sym->auth.data.offset >> 3) + 202 (op->sym->auth.data.length >> 3) + 203 (SNOW3G_DIGEST_LENGTH) - 204 (offset_in_bits >> 3) - 205 (length_in_bits >> 3); 206 if (unencrypted_bytes > 0) 207 rte_memcpy( 208 rte_pktmbuf_mtod_offset( 209 op->sym->m_dst, uint8_t *, 210 (length_in_bits >> 3)), 211 rte_pktmbuf_mtod_offset( 212 op->sym->m_src, uint8_t *, 213 (length_in_bits >> 3)), 214 unencrypted_bytes); 215 216 iv = rte_crypto_op_ctod_offset(op, uint8_t *, 217 session->cipher_iv_offset); 218 219 IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv, 220 src, dst, length_in_bits, offset_in_bits); 221 222 return 1; 223 } 224 225 /** Generate/verify hash from mbufs with same hash key. */ 226 static int 227 process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops, 228 struct snow3g_session *session, 229 uint8_t num_ops) 230 { 231 uint32_t i; 232 uint8_t processed_ops = 0; 233 uint8_t *src, *dst; 234 uint32_t length_in_bits; 235 uint8_t *iv; 236 uint8_t digest_appended = 0; 237 struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); 238 239 for (i = 0; i < num_ops; i++) { 240 /* Data must be byte aligned */ 241 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { 242 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; 243 IPSEC_MB_LOG(ERR, "Offset"); 244 break; 245 } 246 247 dst = NULL; 248 249 length_in_bits = ops[i]->sym->auth.data.length; 250 251 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + 252 (ops[i]->sym->auth.data.offset >> 3); 253 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *, 254 session->auth_iv_offset); 255 256 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { 257 dst = qp_data->temp_digest; 258 /* Handle auth cipher verify oop case*/ 259 if ((session->op == 260 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN || 261 session->op == 262 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) && 263 ops[i]->sym->m_dst != NULL) 264 src = rte_pktmbuf_mtod_offset( 265 ops[i]->sym->m_dst, uint8_t *, 266 ops[i]->sym->auth.data.offset >> 3); 267 268 IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr, 269 &session->pKeySched_hash, 270 iv, src, length_in_bits, dst); 271 /* Verify digest. */ 272 if (memcmp(dst, ops[i]->sym->auth.digest.data, 273 SNOW3G_DIGEST_LENGTH) != 0) 274 ops[i]->status = 275 RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 276 } else { 277 if (session->op == 278 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT || 279 session->op == 280 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) 281 dst = snow3g_digest_appended_in_src(ops[i]); 282 283 if (dst != NULL) 284 digest_appended = 1; 285 else 286 dst = ops[i]->sym->auth.digest.data; 287 288 IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr, 289 &session->pKeySched_hash, 290 iv, src, length_in_bits, dst); 291 292 /* Copy back digest from src to auth.digest.data */ 293 if (digest_appended) 294 rte_memcpy(ops[i]->sym->auth.digest.data, 295 dst, SNOW3G_DIGEST_LENGTH); 296 } 297 processed_ops++; 298 } 299 300 return processed_ops; 301 } 302 303 /** Process a batch of crypto ops which shares the same session. */ 304 static int 305 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, 306 struct ipsec_mb_qp *qp, uint8_t num_ops) 307 { 308 uint32_t i; 309 uint32_t processed_ops; 310 311 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG 312 for (i = 0; i < num_ops; i++) { 313 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || 314 (ops[i]->sym->m_dst != NULL && 315 !rte_pktmbuf_is_contiguous( 316 ops[i]->sym->m_dst))) { 317 IPSEC_MB_LOG(ERR, 318 "PMD supports only contiguous mbufs, " 319 "op (%p) provides noncontiguous mbuf as " 320 "source/destination buffer.\n", ops[i]); 321 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; 322 return 0; 323 } 324 } 325 #endif 326 327 switch (session->op) { 328 case IPSEC_MB_OP_ENCRYPT_ONLY: 329 case IPSEC_MB_OP_DECRYPT_ONLY: 330 processed_ops = process_snow3g_cipher_op(qp, ops, 331 session, num_ops); 332 break; 333 case IPSEC_MB_OP_HASH_GEN_ONLY: 334 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 335 processed_ops = process_snow3g_hash_op(qp, ops, session, 336 num_ops); 337 break; 338 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 339 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 340 processed_ops = process_snow3g_cipher_op(qp, ops, session, 341 num_ops); 342 process_snow3g_hash_op(qp, ops, session, processed_ops); 343 break; 344 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 345 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 346 processed_ops = process_snow3g_hash_op(qp, ops, session, 347 num_ops); 348 process_snow3g_cipher_op(qp, ops, session, processed_ops); 349 break; 350 default: 351 /* Operation not supported. */ 352 processed_ops = 0; 353 } 354 355 for (i = 0; i < num_ops; i++) { 356 /* 357 * If there was no error/authentication failure, 358 * change status to successful. 359 */ 360 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) 361 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 362 /* Free session if a session-less crypto op. */ 363 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 364 memset(session, 0, sizeof(struct snow3g_session)); 365 memset(ops[i]->sym->session, 0, 366 rte_cryptodev_sym_get_existing_header_session_size( 367 ops[i]->sym->session)); 368 rte_mempool_put(qp->sess_mp_priv, session); 369 rte_mempool_put(qp->sess_mp, ops[i]->sym->session); 370 ops[i]->sym->session = NULL; 371 } 372 } 373 return processed_ops; 374 } 375 376 /** Process a crypto op with length/offset in bits. */ 377 static int 378 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, 379 struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops) 380 { 381 uint32_t enqueued_op, processed_op; 382 383 switch (session->op) { 384 case IPSEC_MB_OP_ENCRYPT_ONLY: 385 case IPSEC_MB_OP_DECRYPT_ONLY: 386 387 processed_op = process_snow3g_cipher_op_bit(qp, op, 388 session); 389 break; 390 case IPSEC_MB_OP_HASH_GEN_ONLY: 391 case IPSEC_MB_OP_HASH_VERIFY_ONLY: 392 processed_op = process_snow3g_hash_op(qp, &op, session, 1); 393 break; 394 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: 395 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: 396 processed_op = process_snow3g_cipher_op_bit(qp, op, session); 397 if (processed_op == 1) 398 process_snow3g_hash_op(qp, &op, session, 1); 399 break; 400 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: 401 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: 402 processed_op = process_snow3g_hash_op(qp, &op, session, 1); 403 if (processed_op == 1) 404 process_snow3g_cipher_op_bit(qp, op, session); 405 break; 406 default: 407 /* Operation not supported. */ 408 processed_op = 0; 409 } 410 411 /* 412 * If there was no error/authentication failure, 413 * change status to successful. 414 */ 415 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) 416 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 417 418 /* Free session if a session-less crypto op. */ 419 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 420 memset(op->sym->session, 0, sizeof(struct snow3g_session)); 421 rte_cryptodev_sym_session_free(op->sym->session); 422 op->sym->session = NULL; 423 } 424 425 if (unlikely(processed_op != 1)) 426 return 0; 427 enqueued_op = rte_ring_enqueue(qp->ingress_queue, op); 428 qp->stats.enqueued_count += enqueued_op; 429 *accumulated_enqueued_ops += enqueued_op; 430 431 return 1; 432 } 433 434 static uint16_t 435 snow3g_pmd_dequeue_burst(void *queue_pair, 436 struct rte_crypto_op **ops, uint16_t nb_ops) 437 { 438 struct ipsec_mb_qp *qp = queue_pair; 439 struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST]; 440 struct rte_crypto_op *curr_c_op; 441 442 struct snow3g_session *prev_sess = NULL, *curr_sess = NULL; 443 uint32_t i; 444 uint8_t burst_size = 0; 445 uint16_t enqueued_ops = 0; 446 uint8_t processed_ops; 447 uint32_t nb_dequeued; 448 449 nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue, 450 (void **)ops, nb_ops, NULL); 451 452 for (i = 0; i < nb_dequeued; i++) { 453 curr_c_op = ops[i]; 454 455 /* Set status as enqueued (not processed yet) by default. */ 456 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 457 458 curr_sess = ipsec_mb_get_session_private(qp, curr_c_op); 459 if (unlikely(curr_sess == NULL || 460 curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) { 461 curr_c_op->status = 462 RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 463 break; 464 } 465 466 /* If length/offset is at bit-level, 467 * process this buffer alone. 468 */ 469 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0) 470 || ((curr_c_op->sym->cipher.data.offset 471 % BYTE_LEN) != 0)) { 472 /* Process the ops of the previous session. */ 473 if (prev_sess != NULL) { 474 processed_ops = process_ops(c_ops, prev_sess, 475 qp, burst_size); 476 if (processed_ops < burst_size) { 477 burst_size = 0; 478 break; 479 } 480 481 burst_size = 0; 482 prev_sess = NULL; 483 } 484 485 processed_ops = process_op_bit(curr_c_op, curr_sess, 486 qp, &enqueued_ops); 487 if (processed_ops != 1) 488 break; 489 490 continue; 491 } 492 493 /* Batch ops that share the same session. */ 494 if (prev_sess == NULL) { 495 prev_sess = curr_sess; 496 c_ops[burst_size++] = curr_c_op; 497 } else if (curr_sess == prev_sess) { 498 c_ops[burst_size++] = curr_c_op; 499 /* 500 * When there are enough ops to process in a batch, 501 * process them, and start a new batch. 502 */ 503 if (burst_size == SNOW3G_MAX_BURST) { 504 processed_ops = process_ops(c_ops, prev_sess, 505 qp, burst_size); 506 if (processed_ops < burst_size) { 507 burst_size = 0; 508 break; 509 } 510 511 burst_size = 0; 512 prev_sess = NULL; 513 } 514 } else { 515 /* 516 * Different session, process the ops 517 * of the previous session. 518 */ 519 processed_ops = process_ops(c_ops, prev_sess, 520 qp, burst_size); 521 if (processed_ops < burst_size) { 522 burst_size = 0; 523 break; 524 } 525 526 burst_size = 0; 527 prev_sess = curr_sess; 528 529 c_ops[burst_size++] = curr_c_op; 530 } 531 } 532 533 if (burst_size != 0) { 534 /* Process the crypto ops of the last session. */ 535 processed_ops = process_ops(c_ops, prev_sess, 536 qp, burst_size); 537 } 538 539 qp->stats.dequeued_count += i; 540 return i; 541 } 542 543 struct rte_cryptodev_ops snow3g_pmd_ops = { 544 .dev_configure = ipsec_mb_config, 545 .dev_start = ipsec_mb_start, 546 .dev_stop = ipsec_mb_stop, 547 .dev_close = ipsec_mb_close, 548 549 .stats_get = ipsec_mb_stats_get, 550 .stats_reset = ipsec_mb_stats_reset, 551 552 .dev_infos_get = ipsec_mb_info_get, 553 554 .queue_pair_setup = ipsec_mb_qp_setup, 555 .queue_pair_release = ipsec_mb_qp_release, 556 557 .sym_session_get_size = ipsec_mb_sym_session_get_size, 558 .sym_session_configure = ipsec_mb_sym_session_configure, 559 .sym_session_clear = ipsec_mb_sym_session_clear 560 }; 561 562 struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops; 563 564 static int 565 snow3g_probe(struct rte_vdev_device *vdev) 566 { 567 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_SNOW3G); 568 } 569 570 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = { 571 .probe = snow3g_probe, 572 .remove = ipsec_mb_remove 573 }; 574 575 static struct cryptodev_driver snow3g_crypto_drv; 576 577 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv); 578 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd); 579 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD, 580 "max_nb_queue_pairs=<int> socket_id=<int>"); 581 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, 582 cryptodev_snow3g_pmd_drv.driver, 583 pmd_driver_id_snow3g); 584 585 /* Constructor function to register snow3g PMD */ 586 RTE_INIT(ipsec_mb_register_snow3g) 587 { 588 struct ipsec_mb_internals *snow3g_data 589 = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G]; 590 591 snow3g_data->caps = snow3g_capabilities; 592 snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst; 593 snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 594 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 595 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA | 596 RTE_CRYPTODEV_FF_SYM_SESSIONLESS | 597 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | 598 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED; 599 snow3g_data->internals_priv_size = 0; 600 snow3g_data->ops = &snow3g_pmd_ops; 601 snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data); 602 snow3g_data->session_configure = snow3g_session_configure; 603 snow3g_data->session_priv_size = sizeof(struct snow3g_session); 604 } 605