Lines Matching +full:inline +full:- +full:crypto +full:- +full:engine
1 /*-
46 #include "crypto/t4_crypto.h"
51 * +-------------------------------+
53 * +-------------------------------+
55 * +-------------------------------+
57 * +-------------------------------+
59 * +-------------------------------+
61 * +-------------------------------+
63 * +-------------------------------+
64 * | AES key | ----- For requests with AES
65 * +-------------------------------+
66 * | Hash state | ----- For hash-only requests
67 * +-------------------------------+ -
68 * | IPAD (16-byte aligned) | \
69 * +-------------------------------+ +---- For requests with HMAC
70 * | OPAD (16-byte aligned) | /
71 * +-------------------------------+ -
72 * | GMAC H | ----- For AES-GCM
73 * +-------------------------------+ -
75 * +-------------------------------+ +---- Destination buffer for
76 * | PHYS_DSGL entries | / non-hash-only requests
77 * +-------------------------------+ -
78 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests
79 * +-------------------------------+
80 * | IV | ----- If immediate IV
81 * +-------------------------------+
82 * | Payload | ----- If immediate Payload
83 * +-------------------------------+ -
85 * +-------------------------------+ +---- If payload via SGL
87 * +-------------------------------+ -
89 * Note that the key context must be padded to ensure 16-byte alignment.
95 * +-------------------------------+
97 * +-------------------------------+
98 * | hash digest | ----- For HMAC request with
99 * +-------------------------------+ 'hash_size' set in work request
101 * A 32-bit big-endian error status word is supplied in the last 4
105 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
117 * The crypto engine supports a maximum AAD size of 511 bytes.
123 * entries. While the CPL includes a 16-bit length field, the T6 can
132 * length of 64k-1 or smaller. Longer requests either result in hung
137 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
188 * Pre-allocate a dummy output buffer for the IV and AAD for
239 * Pre-allocate S/G lists used when preparing a work request.
243 * used to describe the data the engine should DMA as input
255 * Crypto requests involve two kind of scatter/gather lists.
257 * Non-hash-only requests require a PHYS_DSGL that describes the
264 * The input payload may either be supplied inline as immediate data,
270 * buffers described by the crypto operation. ccr_populate_sglist()
271 * generates a scatter/gather list that covers an entire crypto
281 switch (cb->cb_type) {
283 error = sglist_append_mbuf(sg, cb->cb_mbuf);
286 error = sglist_append_single_mbuf(sg, cb->cb_mbuf);
289 error = sglist_append_uio(sg, cb->cb_uio);
292 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len);
295 error = sglist_append_vmpages(sg, cb->cb_vm_page,
296 cb->cb_vm_page_offset, cb->cb_vm_page_len);
314 for (i = 0; i < sg->sg_nseg; i++)
315 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
320 static inline int
343 sg = s->sg_dsgl;
345 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
347 cpl->pcirlxorder_to_noofsgentr = htobe32(
352 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
353 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id);
354 cpl->rss_hdr_int.hash_val = 0;
355 cpl->rss_hdr_int.channel = s->port->rx_channel_id;
358 for (i = 0; i < sg->sg_nseg; i++) {
359 seglen = sg->sg_segs[i].ss_len;
360 paddr = sg->sg_segs[i].ss_paddr;
362 sgl->addr[j] = htobe64(paddr);
364 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
366 seglen -= DSGL_SGE_MAXLEN;
368 sgl->len[j] = htobe16(seglen);
378 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
382 static inline int
387 nsegs--; /* first segment is part of ulptx_sgl */
400 sg = s->sg_ulptx;
401 MPASS(nsegs == sg->sg_nseg);
402 ss = &sg->sg_segs[0];
404 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
406 usgl->len0 = htobe32(ss->ss_len);
407 usgl->addr0 = htobe64(ss->ss_paddr);
409 for (i = 0; i < sg->sg_nseg - 1; i++) {
410 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
411 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
436 crwr->wreq.op_to_cctx_size = htobe32(
442 crwr->wreq.len16_pkd = htobe32(
444 crwr->wreq.session_id = 0;
445 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
446 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->rx_channel_id) |
452 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id));
453 crwr->wreq.key_addr = 0;
454 crwr->wreq.pld_size_hash_size = htobe32(
457 crwr->wreq.cookie = htobe64((uintptr_t)crp);
459 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
461 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
463 V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
464 crwr->ulptx.len = htobe32(
465 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
467 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
469 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
471 idata_len -= 16 - imm_len % 16;
472 crwr->sc_imm.len = htobe32(idata_len);
487 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
490 axf = s->hmac.auth_hash;
492 if (s->mode == HMAC) {
500 /* PADs must be 128-bit aligned. */
501 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
510 hash_size_in_response = axf->hashsize;
513 if (crp->crp_payload_length == 0) {
514 imm_len = axf->blocksize;
517 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
518 imm_len = crp->crp_payload_length;
523 sglist_reset(s->sg_ulptx);
524 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
525 crp->crp_payload_start, crp->crp_payload_length);
528 sgl_nsegs = s->sg_ulptx->sg_nseg;
535 wr = alloc_wrqe(wr_len, s->port->txq);
537 counter_u64_add(sc->stats_wr_nomem, 1);
546 crwr->sec_cpl.op_ivinsrtofst = htobe32(
548 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
553 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
554 axf->blocksize : crp->crp_payload_length);
556 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
560 crwr->sec_cpl.seqno_numivs = htobe32(
564 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
566 crwr->sec_cpl.ivgen_hdrlen = htobe32(
568 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
571 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
575 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
579 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
582 if (crp->crp_payload_length == 0) {
584 if (s->mode == HMAC)
585 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
586 htobe64(axf->blocksize << 3);
588 crypto_copydata(crp, crp->crp_payload_start,
589 crp->crp_payload_length, dst);
594 t4_wrq_tx(sc->adapter, wr);
608 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
609 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
611 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
614 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
632 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
634 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
635 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
639 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
642 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
647 sglist_reset(s->sg_dsgl);
649 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
650 crp->crp_payload_output_start, crp->crp_payload_length);
652 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
653 crp->crp_payload_start, crp->crp_payload_length);
656 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
661 /* The 'key' must be 128-bit aligned. */
662 kctx_len = roundup2(s->cipher.key_len, 16);
665 /* For AES-XTS we send a 16-byte IV in the work request. */
666 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
669 iv_len = s->cipher.iv_len;
671 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
672 imm_len = crp->crp_payload_length;
677 sglist_reset(s->sg_ulptx);
678 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
679 crp->crp_payload_start, crp->crp_payload_length);
682 sgl_nsegs = s->sg_ulptx->sg_nseg;
690 wr = alloc_wrqe(wr_len, s->port->txq);
692 counter_u64_add(sc->stats_wr_nomem, 1);
700 /* Zero the remainder of the IV for AES-XTS. */
701 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
706 crwr->sec_cpl.op_ivinsrtofst = htobe32(
708 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
713 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
715 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
718 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
722 crwr->sec_cpl.seqno_numivs = htobe32(
726 V_SCMD_CIPH_MODE(s->cipher.cipher_mode) |
731 crwr->sec_cpl.ivgen_hdrlen = htobe32(
736 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
737 switch (s->cipher.cipher_mode) {
739 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
740 memcpy(crwr->key_ctx.key, s->cipher.enckey,
741 s->cipher.key_len);
743 memcpy(crwr->key_ctx.key, s->cipher.deckey,
744 s->cipher.key_len);
747 memcpy(crwr->key_ctx.key, s->cipher.enckey,
748 s->cipher.key_len);
751 key_half = s->cipher.key_len / 2;
752 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
754 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
755 memcpy(crwr->key_ctx.key + key_half,
756 s->cipher.enckey, key_half);
758 memcpy(crwr->key_ctx.key + key_half,
759 s->cipher.deckey, key_half);
769 crypto_copydata(crp, crp->crp_payload_start,
770 crp->crp_payload_length, dst);
775 t4_wrq_tx(sc->adapter, wr);
788 * cpl->data[2], but OCF doesn't permit chained requests.
831 * payload could be supported as HMAC-only requests.
833 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
835 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
836 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
839 /* For AES-XTS we send a 16-byte IV in the work request. */
840 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
843 iv_len = s->cipher.iv_len;
845 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
848 axf = s->hmac.auth_hash;
849 hash_size_in_response = s->hmac.hash_len;
850 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
865 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
869 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
873 sglist_reset(s->sg_dsgl);
874 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0,
875 iv_len + crp->crp_aad_length);
879 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
880 crp->crp_payload_output_start, crp->crp_payload_length);
882 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
883 crp->crp_payload_start, crp->crp_payload_length);
888 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
889 crp->crp_digest_start, hash_size_in_response);
891 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
892 crp->crp_digest_start, hash_size_in_response);
896 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
901 /* PADs must be 128-bit aligned. */
902 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
908 kctx_len = roundup2(s->cipher.key_len, 16) + iopad_size * 2;
918 * crypto engine doesn't work properly if the IV offset points
922 input_len = crp->crp_aad_length + crp->crp_payload_length;
927 * firmware appears to require 512 - 16 bytes of spare room
931 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
943 sglist_reset(s->sg_ulptx);
944 if (crp->crp_aad_length != 0) {
945 if (crp->crp_aad != NULL)
946 error = sglist_append(s->sg_ulptx,
947 crp->crp_aad, crp->crp_aad_length);
949 error = sglist_append_sglist(s->sg_ulptx,
950 s->sg_input, crp->crp_aad_start,
951 crp->crp_aad_length);
955 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
956 crp->crp_payload_start, crp->crp_payload_length);
960 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
961 crp->crp_digest_start, hash_size_in_response);
965 sgl_nsegs = s->sg_ulptx->sg_nseg;
970 if (crp->crp_aad_length != 0) {
972 aad_stop = aad_start + crp->crp_aad_length - 1;
977 cipher_start = iv_len + crp->crp_aad_length + 1;
991 wr = alloc_wrqe(wr_len, s->port->txq);
993 counter_u64_add(sc->stats_wr_nomem, 1);
1001 /* Zero the remainder of the IV for AES-XTS. */
1002 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
1007 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1009 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1014 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1016 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1021 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1028 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1029 crwr->sec_cpl.seqno_numivs = htobe32(
1034 V_SCMD_CIPH_MODE(s->cipher.cipher_mode) |
1035 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1039 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1044 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1045 switch (s->cipher.cipher_mode) {
1047 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1048 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1049 s->cipher.key_len);
1051 memcpy(crwr->key_ctx.key, s->cipher.deckey,
1052 s->cipher.key_len);
1055 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1056 s->cipher.key_len);
1059 key_half = s->cipher.key_len / 2;
1060 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
1062 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1063 memcpy(crwr->key_ctx.key + key_half,
1064 s->cipher.enckey, key_half);
1066 memcpy(crwr->key_ctx.key + key_half,
1067 s->cipher.deckey, key_half);
1071 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1072 memcpy(dst, s->hmac.pads, iopad_size * 2);
1080 if (crp->crp_aad_length != 0) {
1081 if (crp->crp_aad != NULL)
1082 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1084 crypto_copydata(crp, crp->crp_aad_start,
1085 crp->crp_aad_length, dst);
1086 dst += crp->crp_aad_length;
1088 crypto_copydata(crp, crp->crp_payload_start,
1089 crp->crp_payload_length, dst);
1090 dst += crp->crp_payload_length;
1092 crypto_copydata(crp, crp->crp_digest_start,
1098 t4_wrq_tx(sc->adapter, wr);
1111 * cpl->data[2], but OCF doesn't permit chained requests.
1131 if (s->cipher.key_len == 0)
1135 * The crypto engine doesn't handle GCM requests with an empty
1138 if (crp->crp_payload_length == 0)
1141 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
1144 hash_size_in_response = s->gmac.hash_len;
1145 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1155 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1168 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
1172 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
1176 sglist_reset(s->sg_dsgl);
1177 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1178 crp->crp_aad_length);
1182 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1183 crp->crp_payload_output_start, crp->crp_payload_length);
1185 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1186 crp->crp_payload_start, crp->crp_payload_length);
1191 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1192 crp->crp_digest_start, hash_size_in_response);
1194 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1195 crp->crp_digest_start, hash_size_in_response);
1199 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1208 kctx_len = roundup2(s->cipher.key_len, 16) + GMAC_BLOCK_LEN;
1218 * crypto engine doesn't work properly if the IV offset points
1222 input_len = crp->crp_aad_length + crp->crp_payload_length;
1233 sglist_reset(s->sg_ulptx);
1234 if (crp->crp_aad_length != 0) {
1235 if (crp->crp_aad != NULL)
1236 error = sglist_append(s->sg_ulptx,
1237 crp->crp_aad, crp->crp_aad_length);
1239 error = sglist_append_sglist(s->sg_ulptx,
1240 s->sg_input, crp->crp_aad_start,
1241 crp->crp_aad_length);
1245 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1246 crp->crp_payload_start, crp->crp_payload_length);
1250 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1251 crp->crp_digest_start, hash_size_in_response);
1255 sgl_nsegs = s->sg_ulptx->sg_nseg;
1259 if (crp->crp_aad_length != 0) {
1261 aad_stop = aad_start + crp->crp_aad_length - 1;
1266 cipher_start = iv_len + crp->crp_aad_length + 1;
1280 wr = alloc_wrqe(wr_len, s->port->txq);
1282 counter_u64_add(sc->stats_wr_nomem, 1);
1294 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1296 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1301 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1312 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1317 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1325 crwr->sec_cpl.seqno_numivs = htobe32(
1335 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1340 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1341 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1342 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1343 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1351 if (crp->crp_aad_length != 0) {
1352 if (crp->crp_aad != NULL)
1353 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1355 crypto_copydata(crp, crp->crp_aad_start,
1356 crp->crp_aad_length, dst);
1357 dst += crp->crp_aad_length;
1359 crypto_copydata(crp, crp->crp_payload_start,
1360 crp->crp_payload_length, dst);
1361 dst += crp->crp_payload_length;
1363 crypto_copydata(crp, crp->crp_digest_start,
1369 t4_wrq_tx(sc->adapter, wr);
1382 * cpl->data[2], but OCF doesn't permit chained requests.
1422 /* Set length of hash in bits 3 - 5. */
1423 b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1425 /* Store the payload length as a big-endian value. */
1426 payload_len = crp->crp_payload_length;
1428 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1434 * field and store the AAD length as a big-endian value at the
1435 * start of block 1. This only assumes a 16-bit AAD length
1438 if (crp->crp_aad_length != 0) {
1440 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
1461 csp = crypto_get_params(crp->crp_session);
1463 if (s->cipher.key_len == 0)
1467 * The crypto engine doesn't handle CCM requests with an empty
1470 if (crp->crp_payload_length == 0)
1474 if (crp->crp_payload_length > ccm_max_payload_length(csp))
1482 if (crp->crp_aad_length != 0)
1484 aad_len = b0_len + crp->crp_aad_length;
1490 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1502 hash_size_in_response = s->ccm_mac.hash_len;
1503 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1518 if (iv_len + aad_len + crp->crp_payload_length +
1522 if (iv_len + aad_len + crp->crp_payload_length >
1526 sglist_reset(s->sg_dsgl);
1527 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1532 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1533 crp->crp_payload_output_start, crp->crp_payload_length);
1535 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1536 crp->crp_payload_start, crp->crp_payload_length);
1541 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1542 crp->crp_digest_start, hash_size_in_response);
1544 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1545 crp->crp_digest_start, hash_size_in_response);
1549 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1558 kctx_len = roundup2(s->cipher.key_len, 16) * 2;
1568 * crypto engine doesn't work properly if the IV offset points
1572 input_len = aad_len + crp->crp_payload_length;
1585 sglist_reset(s->sg_ulptx);
1586 if (crp->crp_aad_length != 0) {
1587 if (crp->crp_aad != NULL)
1588 error = sglist_append(s->sg_ulptx,
1589 crp->crp_aad, crp->crp_aad_length);
1591 error = sglist_append_sglist(s->sg_ulptx,
1592 s->sg_input, crp->crp_aad_start,
1593 crp->crp_aad_length);
1597 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1598 crp->crp_payload_start, crp->crp_payload_length);
1602 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1603 crp->crp_digest_start, hash_size_in_response);
1607 sgl_nsegs = s->sg_ulptx->sg_nseg;
1612 aad_stop = aad_start + aad_len - 1;
1627 wr = alloc_wrqe(wr_len, s->port->txq);
1629 counter_u64_add(sc->stats_wr_nomem, 1);
1640 iv[0] = (15 - csp->csp_ivlen) - 1;
1646 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1648 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1653 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1659 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1664 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1672 crwr->sec_cpl.seqno_numivs = htobe32(
1682 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1687 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1688 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1689 memcpy(crwr->key_ctx.key + roundup(s->cipher.key_len, 16),
1690 s->cipher.enckey, s->cipher.key_len);
1700 if (crp->crp_aad_length != 0) {
1701 if (crp->crp_aad != NULL)
1702 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1704 crypto_copydata(crp, crp->crp_aad_start,
1705 crp->crp_aad_length, dst);
1706 dst += crp->crp_aad_length;
1708 crypto_copydata(crp, crp->crp_payload_start,
1709 crp->crp_payload_length, dst);
1710 dst += crp->crp_payload_length;
1712 crypto_copydata(crp, crp->crp_digest_start,
1720 * is 16-byte aligned.
1722 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1725 8 - CCM_AAD_FIELD_SIZE);
1727 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1728 idata->len = htobe32(0);
1735 t4_wrq_tx(sc->adapter, wr);
1748 * cpl->data[2], but OCF doesn't permit chained requests.
1757 * Use the software session for requests not supported by the crypto
1758 * engine (e.g. CCM and GCM requests with an empty payload).
1765 orig = crp->crp_opaque;
1766 orig->crp_etype = crp->crp_etype;
1778 new = crypto_clonereq(crp, s->sw_session, M_NOWAIT);
1780 crp->crp_etype = ENOMEM;
1791 new->crp_opaque = crp;
1792 new->crp_callback = ccr_soft_done;
1795 crp->crp_etype = error;
1806 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1807 device_find_child(parent, "ccr", -1) == NULL)
1815 device_set_desc(dev, "Chelsio Crypto Accelerator");
1822 struct sysctl_ctx_list *ctx = &sc->ctx;
1831 oid = device_get_sysctl_tree(sc->dev);
1835 &sc->port_mask, 0, "Mask of enabled ports");
1845 &sc->stats_hash, "Hash requests submitted");
1847 &sc->stats_hmac, "HMAC requests submitted");
1849 CTLFLAG_RD, &sc->stats_cipher_encrypt,
1852 CTLFLAG_RD, &sc->stats_cipher_decrypt,
1855 CTLFLAG_RD, &sc->stats_eta_encrypt,
1858 CTLFLAG_RD, &sc->stats_eta_decrypt,
1861 CTLFLAG_RD, &sc->stats_gcm_encrypt,
1862 "AES-GCM encryption requests submitted");
1864 CTLFLAG_RD, &sc->stats_gcm_decrypt,
1865 "AES-GCM decryption requests submitted");
1867 CTLFLAG_RD, &sc->stats_ccm_encrypt,
1868 "AES-CCM encryption requests submitted");
1870 CTLFLAG_RD, &sc->stats_ccm_decrypt,
1871 "AES-CCM decryption requests submitted");
1873 &sc->stats_wr_nomem, "Work request memory allocation failures");
1875 &sc->stats_inflight, "Requests currently pending");
1877 &sc->stats_mac_error, "MAC errors");
1879 &sc->stats_pad_error, "Padding errors");
1881 CTLFLAG_RD, &sc->stats_sglist_error,
1884 CTLFLAG_RD, &sc->stats_process_error,
1887 CTLFLAG_RD, &sc->stats_sw_fallback,
1894 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics");
1896 for (i = 0; i < nitems(sc->ports); i++) {
1897 if (sc->ports[i].rxq == NULL)
1909 CTLFLAG_RD, &sc->ports[i].active_sessions, 0,
1912 CTLFLAG_RD, &sc->ports[i].stats_queued, "Requests queued");
1914 CTLFLAG_RD, &sc->ports[i].stats_completed,
1924 pi = sc->adapter->port[port];
1925 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port];
1926 sc->ports[port].rxq = &sc->adapter->sge.rxq[pi->vi->first_rxq];
1927 sc->ports[port].rx_channel_id = pi->rx_chan;
1928 sc->ports[port].tx_channel_id = pi->tx_chan;
1929 sc->ports[port].stats_queued = counter_u64_alloc(M_WAITOK);
1930 sc->ports[port].stats_completed = counter_u64_alloc(M_WAITOK);
1931 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1,
1935 * Completions for crypto requests on port 1 can sometimes
1939 if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
1941 sc->port_mask |= 1u << port;
1952 sc->dev = dev;
1953 sysctl_ctx_init(&sc->ctx);
1954 sc->adapter = device_get_softc(device_get_parent(dev));
1955 for_each_port(sc->adapter, i) {
1961 device_printf(dev, "could not get crypto driver id\n");
1964 sc->cid = cid;
1970 sc->first_rxq_id = sc->adapter->sge.rxq[0].iq.abs_id;
1972 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1973 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1974 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1975 sc->stats_cipher_encrypt = counter_u64_alloc(M_WAITOK);
1976 sc->stats_cipher_decrypt = counter_u64_alloc(M_WAITOK);
1977 sc->stats_hash = counter_u64_alloc(M_WAITOK);
1978 sc->stats_hmac = counter_u64_alloc(M_WAITOK);
1979 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK);
1980 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK);
1981 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK);
1982 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK);
1983 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK);
1984 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK);
1985 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK);
1986 sc->stats_inflight = counter_u64_alloc(M_WAITOK);
1987 sc->stats_mac_error = counter_u64_alloc(M_WAITOK);
1988 sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
1989 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
1990 sc->stats_process_error = counter_u64_alloc(M_WAITOK);
1991 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
2001 counter_u64_free(sc->ports[port].stats_queued);
2002 counter_u64_free(sc->ports[port].stats_completed);
2013 mtx_lock(&sc->lock);
2014 sc->detaching = true;
2015 mtx_unlock(&sc->lock);
2017 crypto_unregister_all(sc->cid);
2019 sysctl_ctx_free(&sc->ctx);
2020 mtx_destroy(&sc->lock);
2021 counter_u64_free(sc->stats_cipher_encrypt);
2022 counter_u64_free(sc->stats_cipher_decrypt);
2023 counter_u64_free(sc->stats_hash);
2024 counter_u64_free(sc->stats_hmac);
2025 counter_u64_free(sc->stats_eta_encrypt);
2026 counter_u64_free(sc->stats_eta_decrypt);
2027 counter_u64_free(sc->stats_gcm_encrypt);
2028 counter_u64_free(sc->stats_gcm_decrypt);
2029 counter_u64_free(sc->stats_ccm_encrypt);
2030 counter_u64_free(sc->stats_ccm_decrypt);
2031 counter_u64_free(sc->stats_wr_nomem);
2032 counter_u64_free(sc->stats_inflight);
2033 counter_u64_free(sc->stats_mac_error);
2034 counter_u64_free(sc->stats_pad_error);
2035 counter_u64_free(sc->stats_sglist_error);
2036 counter_u64_free(sc->stats_process_error);
2037 counter_u64_free(sc->stats_sw_fallback);
2038 for_each_port(sc->adapter, i) {
2041 sglist_free(sc->sg_iv_aad);
2042 free(sc->iv_aad_buf, M_CCR);
2052 axf = s->hmac.auth_hash;
2053 axf->Init(&auth_ctx);
2054 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
2085 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
2103 s->cipher.key_len = klen;
2104 memcpy(s->cipher.enckey, key, s->cipher.key_len);
2105 switch (s->cipher.cipher_mode) {
2108 t4_aes_getdeckey(s->cipher.deckey, key, kbits);
2112 kctx_len = roundup2(s->cipher.key_len, 16);
2113 switch (s->mode) {
2115 mk_size = s->hmac.mk_size;
2117 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2148 s->cipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2149 V_KEY_CONTEXT_DUAL_CK(s->cipher.cipher_mode ==
2160 switch (csp->csp_auth_alg) {
2182 switch (csp->csp_cipher_alg) {
2184 if (csp->csp_ivlen != AES_BLOCK_LEN)
2188 if (csp->csp_ivlen != AES_BLOCK_LEN)
2192 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2198 return (ccr_aes_check_keylen(csp->csp_cipher_alg,
2199 csp->csp_cipher_klen));
2206 switch (csp->csp_cipher_alg) {
2227 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
2230 switch (csp->csp_mode) {
2240 switch (csp->csp_cipher_alg) {
2256 if (csp->csp_cipher_klen != 0) {
2274 mtx_assert(&sc->lock, MA_OWNED);
2276 for (i = 0; i < nitems(sc->ports); i++) {
2277 p = &sc->ports[i];
2279 /* Ignore non-existent ports. */
2280 if (p->rxq == NULL)
2289 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL)
2292 if ((sc->port_mask & (1u << i)) == 0)
2296 p->active_sessions < best->active_sessions)
2305 crypto_freesession(s->sw_session);
2306 sglist_free(s->sg_input);
2307 sglist_free(s->sg_output);
2308 sglist_free(s->sg_ulptx);
2309 sglist_free(s->sg_dsgl);
2310 mtx_destroy(&s->lock);
2324 switch (csp->csp_auth_alg) {
2371 switch (csp->csp_mode) {
2403 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF);
2404 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2405 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2406 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2407 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT);
2408 if (s->sg_input == NULL || s->sg_output == NULL ||
2409 s->sg_ulptx == NULL || s->sg_dsgl == NULL) {
2414 if (csp->csp_mode == CSP_MODE_AEAD) {
2415 error = crypto_newsession(&s->sw_session, csp,
2424 s->sc = sc;
2426 mtx_lock(&sc->lock);
2427 if (sc->detaching) {
2428 mtx_unlock(&sc->lock);
2433 s->port = ccr_choose_port(sc);
2434 if (s->port == NULL) {
2435 mtx_unlock(&sc->lock);
2440 switch (csp->csp_mode) {
2443 s->mode = CCM;
2445 s->mode = GCM;
2448 s->mode = ETA;
2451 if (csp->csp_auth_klen != 0)
2452 s->mode = HMAC;
2454 s->mode = HASH;
2457 s->mode = CIPHER;
2461 if (s->mode == GCM) {
2462 if (csp->csp_auth_mlen == 0)
2463 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2465 s->gmac.hash_len = csp->csp_auth_mlen;
2466 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
2467 s->gmac.ghash_h);
2468 } else if (s->mode == CCM) {
2469 if (csp->csp_auth_mlen == 0)
2470 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2472 s->ccm_mac.hash_len = csp->csp_auth_mlen;
2474 s->hmac.auth_hash = auth_hash;
2475 s->hmac.auth_mode = auth_mode;
2476 s->hmac.mk_size = mk_size;
2477 s->hmac.partial_digest_len = partial_digest_len;
2478 if (csp->csp_auth_mlen == 0)
2479 s->hmac.hash_len = auth_hash->hashsize;
2481 s->hmac.hash_len = csp->csp_auth_mlen;
2482 if (csp->csp_auth_key != NULL)
2484 csp->csp_auth_key, csp->csp_auth_klen,
2485 s->hmac.pads);
2490 s->cipher.cipher_mode = cipher_mode;
2491 s->cipher.iv_len = csp->csp_ivlen;
2492 if (csp->csp_cipher_key != NULL)
2493 ccr_aes_setkey(s, csp->csp_cipher_key,
2494 csp->csp_cipher_klen);
2497 s->port->active_sessions++;
2498 mtx_unlock(&sc->lock);
2511 if (s->pending != 0)
2514 s->pending);
2516 mtx_lock(&sc->lock);
2517 s->port->active_sessions--;
2518 mtx_unlock(&sc->lock);
2530 csp = crypto_get_params(crp->crp_session);
2531 s = crypto_get_driver_session(crp->crp_session);
2534 mtx_lock(&s->lock);
2535 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
2537 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf);
2539 counter_u64_add(sc->stats_sglist_error, 1);
2543 switch (s->mode) {
2547 counter_u64_add(sc->stats_hash, 1);
2550 if (crp->crp_auth_key != NULL)
2551 t4_init_hmac_digest(s->hmac.auth_hash,
2552 s->hmac.partial_digest_len, crp->crp_auth_key,
2553 csp->csp_auth_klen, s->hmac.pads);
2556 counter_u64_add(sc->stats_hmac, 1);
2559 if (crp->crp_cipher_key != NULL)
2560 ccr_aes_setkey(s, crp->crp_cipher_key,
2561 csp->csp_cipher_klen);
2564 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2565 counter_u64_add(sc->stats_cipher_encrypt, 1);
2567 counter_u64_add(sc->stats_cipher_decrypt, 1);
2571 if (crp->crp_auth_key != NULL)
2572 t4_init_hmac_digest(s->hmac.auth_hash,
2573 s->hmac.partial_digest_len, crp->crp_auth_key,
2574 csp->csp_auth_klen, s->hmac.pads);
2575 if (crp->crp_cipher_key != NULL)
2576 ccr_aes_setkey(s, crp->crp_cipher_key,
2577 csp->csp_cipher_klen);
2580 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2581 counter_u64_add(sc->stats_eta_encrypt, 1);
2583 counter_u64_add(sc->stats_eta_decrypt, 1);
2587 if (crp->crp_cipher_key != NULL) {
2588 t4_init_gmac_hash(crp->crp_cipher_key,
2589 csp->csp_cipher_klen, s->gmac.ghash_h);
2590 ccr_aes_setkey(s, crp->crp_cipher_key,
2591 csp->csp_cipher_klen);
2595 counter_u64_add(sc->stats_sw_fallback, 1);
2596 mtx_unlock(&s->lock);
2601 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2602 counter_u64_add(sc->stats_gcm_encrypt, 1);
2604 counter_u64_add(sc->stats_gcm_decrypt, 1);
2608 if (crp->crp_cipher_key != NULL) {
2609 ccr_aes_setkey(s, crp->crp_cipher_key,
2610 csp->csp_cipher_klen);
2614 counter_u64_add(sc->stats_sw_fallback, 1);
2615 mtx_unlock(&s->lock);
2620 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2621 counter_u64_add(sc->stats_ccm_encrypt, 1);
2623 counter_u64_add(sc->stats_ccm_decrypt, 1);
2630 s->pending++;
2632 counter_u64_add(sc->stats_inflight, 1);
2633 counter_u64_add(s->port->stats_queued, 1);
2635 counter_u64_add(sc->stats_process_error, 1);
2638 mtx_unlock(&s->lock);
2641 crp->crp_etype = error;
2664 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2665 s = crypto_get_driver_session(crp->crp_session);
2666 status = be64toh(cpl->data[0]);
2672 sc = s->sc;
2674 mtx_lock(&s->lock);
2675 s->pending--;
2676 mtx_unlock(&s->lock);
2678 counter_u64_add(sc->stats_inflight, -1);
2679 counter_u64_add(s->port->stats_completed, 1);
2681 switch (s->mode) {
2702 counter_u64_add(sc->stats_mac_error, 1);
2704 counter_u64_add(sc->stats_pad_error, 1);
2706 crp->crp_etype = error;
2750 MODULE_DEPEND(ccr, crypto, 1, 1, 1);