1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 #include <stdlib.h> 36 #include <sys/types.h> 37 #include <sys/stat.h> 38 #include <netinet/in.h> 39 #include <netinet/ip.h> 40 #include <netinet/ip6.h> 41 #include <fcntl.h> 42 #include <unistd.h> 43 44 #include <rte_common.h> 45 #include <rte_crypto.h> 46 #include <rte_cryptodev.h> 47 #include <rte_random.h> 48 49 #include "ipsec.h" 50 #include "esp.h" 51 #include "ipip.h" 52 53 int 54 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, 55 struct rte_crypto_op *cop) 56 { 57 struct ip *ip4; 58 struct rte_crypto_sym_op *sym_cop; 59 int32_t payload_len, ip_hdr_len; 60 61 RTE_ASSERT(m != NULL); 62 RTE_ASSERT(sa != NULL); 63 RTE_ASSERT(cop != NULL); 64 65 ip4 = rte_pktmbuf_mtod(m, struct ip *); 66 if (likely(ip4->ip_v == IPVERSION)) 67 ip_hdr_len = ip4->ip_hl * 4; 68 else if (ip4->ip_v == IP6_VERSION) 69 /* XXX No option headers supported */ 70 ip_hdr_len = sizeof(struct ip6_hdr); 71 else { 72 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", 73 ip4->ip_v); 74 return -EINVAL; 75 } 76 77 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - 78 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; 79 80 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { 81 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", 82 payload_len, sa->block_size); 83 return -EINVAL; 84 } 85 86 sym_cop = get_sym_cop(cop); 87 88 sym_cop->m_src = m; 89 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + 90 sa->iv_len; 91 sym_cop->cipher.data.length = payload_len; 92 93 struct cnt_blk *icb; 94 uint8_t *aad; 95 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); 96 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, 97 uint8_t *, IV_OFFSET); 98 99 switch (sa->cipher_algo) { 100 case RTE_CRYPTO_CIPHER_NULL: 101 case RTE_CRYPTO_CIPHER_AES_CBC: 102 /* Copy IV at the end of crypto operation */ 103 rte_memcpy(iv_ptr, iv, sa->iv_len); 104 break; 105 case RTE_CRYPTO_CIPHER_AES_CTR: 106 case RTE_CRYPTO_CIPHER_AES_GCM: 107 icb = get_cnt_blk(m); 108 icb->salt = sa->salt; 109 memcpy(&icb->iv, iv, 8); 110 icb->cnt = rte_cpu_to_be_32(1); 111 break; 112 default: 113 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", 114 sa->cipher_algo); 115 return -EINVAL; 116 } 117 118 switch (sa->auth_algo) { 119 case RTE_CRYPTO_AUTH_NULL: 120 case RTE_CRYPTO_AUTH_SHA1_HMAC: 121 case RTE_CRYPTO_AUTH_SHA256_HMAC: 122 sym_cop->auth.data.offset = ip_hdr_len; 123 sym_cop->auth.data.length = sizeof(struct esp_hdr) + 124 sa->iv_len + payload_len; 125 break; 126 case RTE_CRYPTO_AUTH_AES_GCM: 127 aad = get_aad(m); 128 memcpy(aad, iv - sizeof(struct esp_hdr), 8); 129 sym_cop->auth.aad.data = aad; 130 sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, 131 aad - rte_pktmbuf_mtod(m, uint8_t *)); 132 sym_cop->auth.aad.length = 8; 133 break; 134 default: 135 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", 136 sa->auth_algo); 137 return -EINVAL; 138 } 139 140 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, 141 rte_pktmbuf_pkt_len(m) - sa->digest_len); 142 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, 143 rte_pktmbuf_pkt_len(m) - sa->digest_len); 144 sym_cop->auth.digest.length = sa->digest_len; 145 146 return 0; 147 } 148 149 int 150 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, 151 struct rte_crypto_op *cop) 152 { 153 struct ip *ip4, *ip; 154 struct ip6_hdr *ip6; 155 uint8_t *nexthdr, *pad_len; 156 uint8_t *padding; 157 uint16_t i; 158 159 RTE_ASSERT(m != NULL); 160 RTE_ASSERT(sa != NULL); 161 RTE_ASSERT(cop != NULL); 162 163 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 164 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); 165 return -1; 166 } 167 168 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, 169 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); 170 pad_len = nexthdr - 1; 171 172 padding = pad_len - *pad_len; 173 for (i = 0; i < *pad_len; i++) { 174 if (padding[i] != i + 1) { 175 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); 176 return -EINVAL; 177 } 178 } 179 180 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { 181 RTE_LOG(ERR, IPSEC_ESP, 182 "failed to remove pad_len + digest\n"); 183 return -EINVAL; 184 } 185 186 if (unlikely(sa->flags == TRANSPORT)) { 187 ip = rte_pktmbuf_mtod(m, struct ip *); 188 ip4 = (struct ip *)rte_pktmbuf_adj(m, 189 sizeof(struct esp_hdr) + sa->iv_len); 190 if (likely(ip->ip_v == IPVERSION)) { 191 memmove(ip4, ip, ip->ip_hl * 4); 192 ip4->ip_p = *nexthdr; 193 ip4->ip_len = htons(rte_pktmbuf_data_len(m)); 194 } else { 195 ip6 = (struct ip6_hdr *)ip4; 196 /* XXX No option headers supported */ 197 memmove(ip6, ip, sizeof(struct ip6_hdr)); 198 ip6->ip6_nxt = *nexthdr; 199 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); 200 } 201 } else 202 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); 203 204 return 0; 205 } 206 207 int 208 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, 209 struct rte_crypto_op *cop) 210 { 211 struct ip *ip4; 212 struct ip6_hdr *ip6; 213 struct esp_hdr *esp = NULL; 214 uint8_t *padding, *new_ip, nlp; 215 struct rte_crypto_sym_op *sym_cop; 216 int32_t i; 217 uint16_t pad_payload_len, pad_len, ip_hdr_len; 218 219 RTE_ASSERT(m != NULL); 220 RTE_ASSERT(sa != NULL); 221 RTE_ASSERT(cop != NULL); 222 223 ip_hdr_len = 0; 224 225 ip4 = rte_pktmbuf_mtod(m, struct ip *); 226 if (likely(ip4->ip_v == IPVERSION)) { 227 if (unlikely(sa->flags == TRANSPORT)) { 228 ip_hdr_len = ip4->ip_hl * 4; 229 nlp = ip4->ip_p; 230 } else 231 nlp = IPPROTO_IPIP; 232 } else if (ip4->ip_v == IP6_VERSION) { 233 if (unlikely(sa->flags == TRANSPORT)) { 234 /* XXX No option headers supported */ 235 ip_hdr_len = sizeof(struct ip6_hdr); 236 ip6 = (struct ip6_hdr *)ip4; 237 nlp = ip6->ip6_nxt; 238 } else 239 nlp = IPPROTO_IPV6; 240 } else { 241 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", 242 ip4->ip_v); 243 return -EINVAL; 244 } 245 246 /* Padded payload length */ 247 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - 248 ip_hdr_len + 2, sa->block_size); 249 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); 250 251 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL || 252 sa->flags == TRANSPORT); 253 254 if (likely(sa->flags == IP4_TUNNEL)) 255 ip_hdr_len = sizeof(struct ip); 256 else if (sa->flags == IP6_TUNNEL) 257 ip_hdr_len = sizeof(struct ip6_hdr); 258 else if (sa->flags != TRANSPORT) { 259 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", 260 sa->flags); 261 return -EINVAL; 262 } 263 264 /* Check maximum packet size */ 265 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len + 266 pad_payload_len + sa->digest_len > IP_MAXPACKET)) { 267 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); 268 return -EINVAL; 269 } 270 271 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len); 272 if (unlikely(padding == NULL)) { 273 RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n"); 274 return -ENOSPC; 275 } 276 rte_prefetch0(padding); 277 278 switch (sa->flags) { 279 case IP4_TUNNEL: 280 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, 281 &sa->src, &sa->dst); 282 esp = (struct esp_hdr *)(ip4 + 1); 283 break; 284 case IP6_TUNNEL: 285 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, 286 &sa->src, &sa->dst); 287 esp = (struct esp_hdr *)(ip6 + 1); 288 break; 289 case TRANSPORT: 290 new_ip = (uint8_t *)rte_pktmbuf_prepend(m, 291 sizeof(struct esp_hdr) + sa->iv_len); 292 memmove(new_ip, ip4, ip_hdr_len); 293 esp = (struct esp_hdr *)(new_ip + ip_hdr_len); 294 if (likely(ip4->ip_v == IPVERSION)) { 295 ip4 = (struct ip *)new_ip; 296 ip4->ip_p = IPPROTO_ESP; 297 ip4->ip_len = htons(rte_pktmbuf_data_len(m)); 298 } else { 299 ip6 = (struct ip6_hdr *)new_ip; 300 ip6->ip6_nxt = IPPROTO_ESP; 301 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); 302 } 303 } 304 305 sa->seq++; 306 esp->spi = rte_cpu_to_be_32(sa->spi); 307 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); 308 309 uint64_t *iv = (uint64_t *)(esp + 1); 310 311 sym_cop = get_sym_cop(cop); 312 sym_cop->m_src = m; 313 switch (sa->cipher_algo) { 314 case RTE_CRYPTO_CIPHER_NULL: 315 case RTE_CRYPTO_CIPHER_AES_CBC: 316 memset(iv, 0, sa->iv_len); 317 sym_cop->cipher.data.offset = ip_hdr_len + 318 sizeof(struct esp_hdr); 319 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; 320 break; 321 case RTE_CRYPTO_CIPHER_AES_CTR: 322 case RTE_CRYPTO_CIPHER_AES_GCM: 323 *iv = sa->seq; 324 sym_cop->cipher.data.offset = ip_hdr_len + 325 sizeof(struct esp_hdr) + sa->iv_len; 326 sym_cop->cipher.data.length = pad_payload_len; 327 break; 328 default: 329 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", 330 sa->cipher_algo); 331 return -EINVAL; 332 } 333 334 /* Fill pad_len using default sequential scheme */ 335 for (i = 0; i < pad_len - 2; i++) 336 padding[i] = i + 1; 337 padding[pad_len - 2] = pad_len - 2; 338 padding[pad_len - 1] = nlp; 339 340 struct cnt_blk *icb = get_cnt_blk(m); 341 icb->salt = sa->salt; 342 icb->iv = sa->seq; 343 icb->cnt = rte_cpu_to_be_32(1); 344 345 uint8_t *aad; 346 347 switch (sa->auth_algo) { 348 case RTE_CRYPTO_AUTH_NULL: 349 case RTE_CRYPTO_AUTH_SHA1_HMAC: 350 case RTE_CRYPTO_AUTH_SHA256_HMAC: 351 sym_cop->auth.data.offset = ip_hdr_len; 352 sym_cop->auth.data.length = sizeof(struct esp_hdr) + 353 sa->iv_len + pad_payload_len; 354 break; 355 case RTE_CRYPTO_AUTH_AES_GCM: 356 aad = get_aad(m); 357 memcpy(aad, esp, 8); 358 sym_cop->auth.aad.data = aad; 359 sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, 360 aad - rte_pktmbuf_mtod(m, uint8_t *)); 361 sym_cop->auth.aad.length = 8; 362 break; 363 default: 364 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", 365 sa->auth_algo); 366 return -EINVAL; 367 } 368 369 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, 370 rte_pktmbuf_pkt_len(m) - sa->digest_len); 371 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, 372 rte_pktmbuf_pkt_len(m) - sa->digest_len); 373 sym_cop->auth.digest.length = sa->digest_len; 374 375 return 0; 376 } 377 378 int 379 esp_outbound_post(struct rte_mbuf *m __rte_unused, 380 struct ipsec_sa *sa __rte_unused, 381 struct rte_crypto_op *cop) 382 { 383 RTE_ASSERT(m != NULL); 384 RTE_ASSERT(sa != NULL); 385 RTE_ASSERT(cop != NULL); 386 387 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 388 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); 389 return -1; 390 } 391 392 return 0; 393 } 394