1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdlib.h> 7 #include <sys/types.h> 8 #include <sys/stat.h> 9 #include <netinet/in.h> 10 #include <netinet/ip.h> 11 #include <netinet/ip6.h> 12 #include <fcntl.h> 13 #include <unistd.h> 14 15 #include <rte_common.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #include <rte_random.h> 19 20 #include "ipsec.h" 21 #include "esp.h" 22 #include "ipip.h" 23 24 int 25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, 26 struct rte_crypto_op *cop) 27 { 28 struct ip *ip4; 29 struct rte_crypto_sym_op *sym_cop; 30 int32_t payload_len, ip_hdr_len; 31 32 RTE_ASSERT(sa != NULL); 33 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) 34 return 0; 35 36 RTE_ASSERT(m != NULL); 37 RTE_ASSERT(cop != NULL); 38 39 ip4 = rte_pktmbuf_mtod(m, struct ip *); 40 if (likely(ip4->ip_v == IPVERSION)) 41 ip_hdr_len = ip4->ip_hl * 4; 42 else if (ip4->ip_v == IP6_VERSION) 43 /* XXX No option headers supported */ 44 ip_hdr_len = sizeof(struct ip6_hdr); 45 else { 46 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", 47 ip4->ip_v); 48 return -EINVAL; 49 } 50 51 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - 52 sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len; 53 54 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { 55 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", 56 payload_len, sa->block_size); 57 return -EINVAL; 58 } 59 60 sym_cop = get_sym_cop(cop); 61 sym_cop->m_src = m; 62 63 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 64 sym_cop->aead.data.offset = 65 ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len; 66 sym_cop->aead.data.length = payload_len; 67 68 struct cnt_blk *icb; 69 uint8_t *aad; 70 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + 71 sizeof(struct rte_esp_hdr)); 72 73 icb = get_cnt_blk(m); 74 icb->salt = sa->salt; 75 memcpy(&icb->iv, iv, 8); 76 icb->cnt = rte_cpu_to_be_32(1); 77 78 aad = get_aad(m); 79 memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8); 80 sym_cop->aead.aad.data = aad; 81 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, 82 aad - rte_pktmbuf_mtod(m, uint8_t *)); 83 84 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, 85 rte_pktmbuf_pkt_len(m) - sa->digest_len); 86 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, 87 rte_pktmbuf_pkt_len(m) - sa->digest_len); 88 } else { 89 sym_cop->cipher.data.offset = ip_hdr_len + 90 sizeof(struct rte_esp_hdr) + 91 sa->iv_len; 92 sym_cop->cipher.data.length = payload_len; 93 94 struct cnt_blk *icb; 95 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + 96 sizeof(struct rte_esp_hdr)); 97 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, 98 uint8_t *, IV_OFFSET); 99 100 switch (sa->cipher_algo) { 101 case RTE_CRYPTO_CIPHER_NULL: 102 case RTE_CRYPTO_CIPHER_3DES_CBC: 103 case RTE_CRYPTO_CIPHER_AES_CBC: 104 /* Copy IV at the end of crypto operation */ 105 rte_memcpy(iv_ptr, iv, sa->iv_len); 106 break; 107 case RTE_CRYPTO_CIPHER_AES_CTR: 108 icb = get_cnt_blk(m); 109 icb->salt = sa->salt; 110 memcpy(&icb->iv, iv, 8); 111 icb->cnt = rte_cpu_to_be_32(1); 112 break; 113 default: 114 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", 115 sa->cipher_algo); 116 return -EINVAL; 117 } 118 119 switch (sa->auth_algo) { 120 case RTE_CRYPTO_AUTH_NULL: 121 case RTE_CRYPTO_AUTH_SHA1_HMAC: 122 case RTE_CRYPTO_AUTH_SHA256_HMAC: 123 sym_cop->auth.data.offset = ip_hdr_len; 124 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) + 125 sa->iv_len + payload_len; 126 break; 127 default: 128 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", 129 sa->auth_algo); 130 return -EINVAL; 131 } 132 133 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, 134 rte_pktmbuf_pkt_len(m) - sa->digest_len); 135 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, 136 rte_pktmbuf_pkt_len(m) - sa->digest_len); 137 } 138 139 return 0; 140 } 141 142 int 143 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, 144 struct rte_crypto_op *cop) 145 { 146 struct ip *ip4, *ip; 147 struct ip6_hdr *ip6; 148 uint8_t *nexthdr, *pad_len; 149 uint8_t *padding; 150 uint16_t i; 151 152 RTE_ASSERT(m != NULL); 153 RTE_ASSERT(sa != NULL); 154 RTE_ASSERT(cop != NULL); 155 156 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || 157 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { 158 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { 159 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) 160 cop->status = RTE_CRYPTO_OP_STATUS_ERROR; 161 else 162 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 163 } else 164 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 165 } 166 167 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 168 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__); 169 return -1; 170 } 171 172 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && 173 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) { 174 nexthdr = &m->inner_esp_next_proto; 175 } else { 176 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, 177 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); 178 pad_len = nexthdr - 1; 179 180 padding = pad_len - *pad_len; 181 for (i = 0; i < *pad_len; i++) { 182 if (padding[i] != i + 1) { 183 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); 184 return -EINVAL; 185 } 186 } 187 188 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { 189 RTE_LOG(ERR, IPSEC_ESP, 190 "failed to remove pad_len + digest\n"); 191 return -EINVAL; 192 } 193 } 194 195 if (unlikely(IS_TRANSPORT(sa->flags))) { 196 ip = rte_pktmbuf_mtod(m, struct ip *); 197 ip4 = (struct ip *)rte_pktmbuf_adj(m, 198 sizeof(struct rte_esp_hdr) + sa->iv_len); 199 if (likely(ip->ip_v == IPVERSION)) { 200 memmove(ip4, ip, ip->ip_hl * 4); 201 ip4->ip_p = *nexthdr; 202 ip4->ip_len = htons(rte_pktmbuf_data_len(m)); 203 } else { 204 ip6 = (struct ip6_hdr *)ip4; 205 /* XXX No option headers supported */ 206 memmove(ip6, ip, sizeof(struct ip6_hdr)); 207 ip6->ip6_nxt = *nexthdr; 208 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - 209 sizeof(struct ip6_hdr)); 210 } 211 } else 212 ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len); 213 214 return 0; 215 } 216 217 int 218 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, 219 struct rte_crypto_op *cop) 220 { 221 struct ip *ip4; 222 struct ip6_hdr *ip6; 223 struct rte_esp_hdr *esp = NULL; 224 uint8_t *padding = NULL, *new_ip, nlp; 225 struct rte_crypto_sym_op *sym_cop; 226 int32_t i; 227 uint16_t pad_payload_len, pad_len, ip_hdr_len; 228 229 RTE_ASSERT(m != NULL); 230 RTE_ASSERT(sa != NULL); 231 232 ip_hdr_len = 0; 233 234 ip4 = rte_pktmbuf_mtod(m, struct ip *); 235 if (likely(ip4->ip_v == IPVERSION)) { 236 if (unlikely(IS_TRANSPORT(sa->flags))) { 237 ip_hdr_len = ip4->ip_hl * 4; 238 nlp = ip4->ip_p; 239 } else 240 nlp = IPPROTO_IPIP; 241 } else if (ip4->ip_v == IP6_VERSION) { 242 if (unlikely(IS_TRANSPORT(sa->flags))) { 243 /* XXX No option headers supported */ 244 ip_hdr_len = sizeof(struct ip6_hdr); 245 ip6 = (struct ip6_hdr *)ip4; 246 nlp = ip6->ip6_nxt; 247 } else 248 nlp = IPPROTO_IPV6; 249 } else { 250 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", 251 ip4->ip_v); 252 return -EINVAL; 253 } 254 255 /* Padded payload length */ 256 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - 257 ip_hdr_len + 2, sa->block_size); 258 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); 259 260 RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags)); 261 262 if (likely(IS_IP4_TUNNEL(sa->flags))) 263 ip_hdr_len = sizeof(struct ip); 264 else if (IS_IP6_TUNNEL(sa->flags)) 265 ip_hdr_len = sizeof(struct ip6_hdr); 266 else if (!IS_TRANSPORT(sa->flags)) { 267 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", 268 sa->flags); 269 return -EINVAL; 270 } 271 272 /* Check maximum packet size */ 273 if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len + 274 pad_payload_len + sa->digest_len > IP_MAXPACKET)) { 275 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); 276 return -EINVAL; 277 } 278 279 /* Add trailer padding if it is not constructed by HW */ 280 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || 281 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && 282 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) { 283 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + 284 sa->digest_len); 285 if (unlikely(padding == NULL)) { 286 RTE_LOG(ERR, IPSEC_ESP, 287 "not enough mbuf trailing space\n"); 288 return -ENOSPC; 289 } 290 rte_prefetch0(padding); 291 } 292 293 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { 294 case IP4_TUNNEL: 295 ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len, 296 &sa->src, &sa->dst); 297 esp = (struct rte_esp_hdr *)(ip4 + 1); 298 break; 299 case IP6_TUNNEL: 300 ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len, 301 &sa->src, &sa->dst); 302 esp = (struct rte_esp_hdr *)(ip6 + 1); 303 break; 304 case TRANSPORT: 305 new_ip = (uint8_t *)rte_pktmbuf_prepend(m, 306 sizeof(struct rte_esp_hdr) + sa->iv_len); 307 memmove(new_ip, ip4, ip_hdr_len); 308 esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len); 309 ip4 = (struct ip *)new_ip; 310 if (likely(ip4->ip_v == IPVERSION)) { 311 ip4->ip_p = IPPROTO_ESP; 312 ip4->ip_len = htons(rte_pktmbuf_data_len(m)); 313 } else { 314 ip6 = (struct ip6_hdr *)new_ip; 315 ip6->ip6_nxt = IPPROTO_ESP; 316 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - 317 sizeof(struct ip6_hdr)); 318 } 319 } 320 321 sa->seq++; 322 esp->spi = rte_cpu_to_be_32(sa->spi); 323 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); 324 325 /* set iv */ 326 uint64_t *iv = (uint64_t *)(esp + 1); 327 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 328 *iv = rte_cpu_to_be_64(sa->seq); 329 } else { 330 switch (sa->cipher_algo) { 331 case RTE_CRYPTO_CIPHER_NULL: 332 case RTE_CRYPTO_CIPHER_3DES_CBC: 333 case RTE_CRYPTO_CIPHER_AES_CBC: 334 memset(iv, 0, sa->iv_len); 335 break; 336 case RTE_CRYPTO_CIPHER_AES_CTR: 337 *iv = rte_cpu_to_be_64(sa->seq); 338 break; 339 default: 340 RTE_LOG(ERR, IPSEC_ESP, 341 "unsupported cipher algorithm %u\n", 342 sa->cipher_algo); 343 return -EINVAL; 344 } 345 } 346 347 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 348 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) { 349 /* Set the inner esp next protocol for HW trailer */ 350 m->inner_esp_next_proto = nlp; 351 m->packet_type |= RTE_PTYPE_TUNNEL_ESP; 352 } else { 353 padding[pad_len - 2] = pad_len - 2; 354 padding[pad_len - 1] = nlp; 355 } 356 goto done; 357 } 358 359 RTE_ASSERT(cop != NULL); 360 sym_cop = get_sym_cop(cop); 361 sym_cop->m_src = m; 362 363 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 364 uint8_t *aad; 365 366 sym_cop->aead.data.offset = ip_hdr_len + 367 sizeof(struct rte_esp_hdr) + sa->iv_len; 368 sym_cop->aead.data.length = pad_payload_len; 369 370 /* Fill pad_len using default sequential scheme */ 371 for (i = 0; i < pad_len - 2; i++) 372 padding[i] = i + 1; 373 padding[pad_len - 2] = pad_len - 2; 374 padding[pad_len - 1] = nlp; 375 376 struct cnt_blk *icb = get_cnt_blk(m); 377 icb->salt = sa->salt; 378 icb->iv = rte_cpu_to_be_64(sa->seq); 379 icb->cnt = rte_cpu_to_be_32(1); 380 381 aad = get_aad(m); 382 memcpy(aad, esp, 8); 383 sym_cop->aead.aad.data = aad; 384 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, 385 aad - rte_pktmbuf_mtod(m, uint8_t *)); 386 387 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, 388 rte_pktmbuf_pkt_len(m) - sa->digest_len); 389 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, 390 rte_pktmbuf_pkt_len(m) - sa->digest_len); 391 } else { 392 switch (sa->cipher_algo) { 393 case RTE_CRYPTO_CIPHER_NULL: 394 case RTE_CRYPTO_CIPHER_3DES_CBC: 395 case RTE_CRYPTO_CIPHER_AES_CBC: 396 sym_cop->cipher.data.offset = ip_hdr_len + 397 sizeof(struct rte_esp_hdr); 398 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; 399 break; 400 case RTE_CRYPTO_CIPHER_AES_CTR: 401 sym_cop->cipher.data.offset = ip_hdr_len + 402 sizeof(struct rte_esp_hdr) + sa->iv_len; 403 sym_cop->cipher.data.length = pad_payload_len; 404 break; 405 default: 406 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", 407 sa->cipher_algo); 408 return -EINVAL; 409 } 410 411 /* Fill pad_len using default sequential scheme */ 412 for (i = 0; i < pad_len - 2; i++) 413 padding[i] = i + 1; 414 padding[pad_len - 2] = pad_len - 2; 415 padding[pad_len - 1] = nlp; 416 417 struct cnt_blk *icb = get_cnt_blk(m); 418 icb->salt = sa->salt; 419 icb->iv = rte_cpu_to_be_64(sa->seq); 420 icb->cnt = rte_cpu_to_be_32(1); 421 422 switch (sa->auth_algo) { 423 case RTE_CRYPTO_AUTH_NULL: 424 case RTE_CRYPTO_AUTH_SHA1_HMAC: 425 case RTE_CRYPTO_AUTH_SHA256_HMAC: 426 sym_cop->auth.data.offset = ip_hdr_len; 427 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) + 428 sa->iv_len + pad_payload_len; 429 break; 430 default: 431 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", 432 sa->auth_algo); 433 return -EINVAL; 434 } 435 436 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, 437 rte_pktmbuf_pkt_len(m) - sa->digest_len); 438 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, 439 rte_pktmbuf_pkt_len(m) - sa->digest_len); 440 } 441 442 done: 443 return 0; 444 } 445 446 int 447 esp_outbound_post(struct rte_mbuf *m, 448 struct ipsec_sa *sa, 449 struct rte_crypto_op *cop) 450 { 451 RTE_ASSERT(m != NULL); 452 RTE_ASSERT(sa != NULL); 453 454 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || 455 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { 456 m->ol_flags |= PKT_TX_SEC_OFFLOAD; 457 } else { 458 RTE_ASSERT(cop != NULL); 459 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 460 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", 461 __func__); 462 return -1; 463 } 464 } 465 466 return 0; 467 } 468