1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 NXP. 3 * Copyright(c) 2017 Intel Corporation. 4 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved 5 */ 6 7 #include <ctype.h> 8 #include <stdlib.h> 9 10 #include <rte_cryptodev.h> 11 #include <dev_driver.h> 12 #include <rte_telemetry.h> 13 #include "rte_security.h" 14 #include "rte_security_driver.h" 15 16 /* Macro to check for invalid pointers */ 17 #define RTE_PTR_OR_ERR_RET(ptr, retval) do { \ 18 if ((ptr) == NULL) \ 19 return retval; \ 20 } while (0) 21 22 /* Macro to check for invalid pointers chains */ 23 #define RTE_PTR_CHAIN3_OR_ERR_RET(p1, p2, p3, retval, last_retval) do { \ 24 RTE_PTR_OR_ERR_RET(p1, retval); \ 25 RTE_PTR_OR_ERR_RET(p1->p2, retval); \ 26 RTE_PTR_OR_ERR_RET(p1->p2->p3, last_retval); \ 27 } while (0) 28 29 #define RTE_SECURITY_DYNFIELD_NAME "rte_security_dynfield_metadata" 30 #define RTE_SECURITY_OOP_DYNFIELD_NAME "rte_security_oop_dynfield_metadata" 31 32 int rte_security_dynfield_offset = -1; 33 int rte_security_oop_dynfield_offset = -1; 34 35 int 36 rte_security_dynfield_register(void) 37 { 38 static const struct rte_mbuf_dynfield dynfield_desc = { 39 .name = RTE_SECURITY_DYNFIELD_NAME, 40 .size = sizeof(rte_security_dynfield_t), 41 .align = __alignof__(rte_security_dynfield_t), 42 }; 43 rte_security_dynfield_offset = 44 rte_mbuf_dynfield_register(&dynfield_desc); 45 return rte_security_dynfield_offset; 46 } 47 48 int 49 rte_security_oop_dynfield_register(void) 50 { 51 static const struct rte_mbuf_dynfield dynfield_desc = { 52 .name = RTE_SECURITY_OOP_DYNFIELD_NAME, 53 .size = sizeof(rte_security_oop_dynfield_t), 54 .align = __alignof__(rte_security_oop_dynfield_t), 55 }; 56 57 rte_security_oop_dynfield_offset = 58 rte_mbuf_dynfield_register(&dynfield_desc); 59 return rte_security_oop_dynfield_offset; 60 } 61 62 void * 63 rte_security_session_create(void *ctx, 64 struct rte_security_session_conf *conf, 65 struct rte_mempool *mp) 66 { 67 struct rte_security_session *sess = NULL; 68 struct rte_security_ctx *instance = ctx; 69 uint32_t sess_priv_size; 70 71 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_create, NULL, NULL); 72 RTE_PTR_OR_ERR_RET(conf, NULL); 73 RTE_PTR_OR_ERR_RET(mp, NULL); 74 75 sess_priv_size = instance->ops->session_get_size(instance->device); 76 if (mp->elt_size < (sizeof(struct rte_security_session) + sess_priv_size)) 77 return NULL; 78 79 if (rte_mempool_get(mp, (void **)&sess)) 80 return NULL; 81 82 /* Clear session priv data */ 83 memset(sess->driver_priv_data, 0, sess_priv_size); 84 85 sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) + 86 offsetof(struct rte_security_session, driver_priv_data); 87 if (instance->ops->session_create(instance->device, conf, sess)) { 88 rte_mempool_put(mp, (void *)sess); 89 return NULL; 90 } 91 instance->sess_cnt++; 92 93 return (void *)sess; 94 } 95 96 int 97 rte_security_session_update(void *ctx, void *sess, struct rte_security_session_conf *conf) 98 { 99 struct rte_security_ctx *instance = ctx; 100 101 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_update, -EINVAL, 102 -ENOTSUP); 103 RTE_PTR_OR_ERR_RET(sess, -EINVAL); 104 RTE_PTR_OR_ERR_RET(conf, -EINVAL); 105 106 return instance->ops->session_update(instance->device, sess, conf); 107 } 108 109 unsigned int 110 rte_security_session_get_size(void *ctx) 111 { 112 struct rte_security_ctx *instance = ctx; 113 114 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_get_size, 0, 0); 115 116 return (sizeof(struct rte_security_session) + 117 instance->ops->session_get_size(instance->device)); 118 } 119 120 int 121 rte_security_session_stats_get(void *ctx, void *sess, struct rte_security_stats *stats) 122 { 123 struct rte_security_ctx *instance = ctx; 124 125 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_stats_get, -EINVAL, 126 -ENOTSUP); 127 /* Parameter sess can be NULL in case of getting global statistics. */ 128 RTE_PTR_OR_ERR_RET(stats, -EINVAL); 129 130 return instance->ops->session_stats_get(instance->device, sess, stats); 131 } 132 133 int 134 rte_security_session_destroy(void *ctx, void *sess) 135 { 136 struct rte_security_ctx *instance = ctx; 137 int ret; 138 139 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, session_destroy, -EINVAL, 140 -ENOTSUP); 141 RTE_PTR_OR_ERR_RET(sess, -EINVAL); 142 143 ret = instance->ops->session_destroy(instance->device, sess); 144 if (ret != 0) 145 return ret; 146 147 rte_mempool_put(rte_mempool_from_obj(sess), (void *)sess); 148 149 if (instance->sess_cnt) 150 instance->sess_cnt--; 151 152 return 0; 153 } 154 155 int 156 rte_security_macsec_sc_create(void *ctx, struct rte_security_macsec_sc *conf) 157 { 158 struct rte_security_ctx *instance = ctx; 159 int sc_id; 160 161 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_create, -EINVAL, -ENOTSUP); 162 RTE_PTR_OR_ERR_RET(conf, -EINVAL); 163 164 sc_id = instance->ops->macsec_sc_create(instance->device, conf); 165 if (sc_id >= 0) 166 instance->macsec_sc_cnt++; 167 168 return sc_id; 169 } 170 171 int 172 rte_security_macsec_sa_create(void *ctx, struct rte_security_macsec_sa *conf) 173 { 174 struct rte_security_ctx *instance = ctx; 175 int sa_id; 176 177 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_create, -EINVAL, -ENOTSUP); 178 RTE_PTR_OR_ERR_RET(conf, -EINVAL); 179 180 sa_id = instance->ops->macsec_sa_create(instance->device, conf); 181 if (sa_id >= 0) 182 instance->macsec_sa_cnt++; 183 184 return sa_id; 185 } 186 187 int 188 rte_security_macsec_sc_destroy(void *ctx, uint16_t sc_id, 189 enum rte_security_macsec_direction dir) 190 { 191 struct rte_security_ctx *instance = ctx; 192 int ret; 193 194 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_destroy, -EINVAL, -ENOTSUP); 195 196 ret = instance->ops->macsec_sc_destroy(instance->device, sc_id, dir); 197 if (ret != 0) 198 return ret; 199 200 if (instance->macsec_sc_cnt) 201 instance->macsec_sc_cnt--; 202 203 return 0; 204 } 205 206 int 207 rte_security_macsec_sa_destroy(void *ctx, uint16_t sa_id, 208 enum rte_security_macsec_direction dir) 209 { 210 struct rte_security_ctx *instance = ctx; 211 int ret; 212 213 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_destroy, -EINVAL, -ENOTSUP); 214 215 ret = instance->ops->macsec_sa_destroy(instance->device, sa_id, dir); 216 if (ret != 0) 217 return ret; 218 219 if (instance->macsec_sa_cnt) 220 instance->macsec_sa_cnt--; 221 222 return 0; 223 } 224 225 int 226 rte_security_macsec_sc_stats_get(void *ctx, uint16_t sc_id, 227 enum rte_security_macsec_direction dir, 228 struct rte_security_macsec_sc_stats *stats) 229 { 230 struct rte_security_ctx *instance = ctx; 231 232 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_stats_get, -EINVAL, -ENOTSUP); 233 RTE_PTR_OR_ERR_RET(stats, -EINVAL); 234 235 return instance->ops->macsec_sc_stats_get(instance->device, sc_id, dir, stats); 236 } 237 238 int 239 rte_security_macsec_sa_stats_get(void *ctx, uint16_t sa_id, 240 enum rte_security_macsec_direction dir, 241 struct rte_security_macsec_sa_stats *stats) 242 { 243 struct rte_security_ctx *instance = ctx; 244 245 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_stats_get, -EINVAL, -ENOTSUP); 246 RTE_PTR_OR_ERR_RET(stats, -EINVAL); 247 248 return instance->ops->macsec_sa_stats_get(instance->device, sa_id, dir, stats); 249 } 250 251 int 252 __rte_security_set_pkt_metadata(void *ctx, void *sess, struct rte_mbuf *m, void *params) 253 { 254 struct rte_security_ctx *instance = ctx; 255 #ifdef RTE_DEBUG 256 RTE_PTR_OR_ERR_RET(sess, -EINVAL); 257 RTE_PTR_OR_ERR_RET(instance, -EINVAL); 258 RTE_PTR_OR_ERR_RET(instance->ops, -EINVAL); 259 #endif 260 if (*instance->ops->set_pkt_metadata == NULL) 261 return -ENOTSUP; 262 return instance->ops->set_pkt_metadata(instance->device, 263 sess, m, params); 264 } 265 266 const struct rte_security_capability * 267 rte_security_capabilities_get(void *ctx) 268 { 269 struct rte_security_ctx *instance = ctx; 270 271 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, capabilities_get, NULL, NULL); 272 273 return instance->ops->capabilities_get(instance->device); 274 } 275 276 const struct rte_security_capability * 277 rte_security_capability_get(void *ctx, struct rte_security_capability_idx *idx) 278 { 279 const struct rte_security_capability *capabilities; 280 const struct rte_security_capability *capability; 281 struct rte_security_ctx *instance = ctx; 282 uint16_t i = 0; 283 284 RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, capabilities_get, NULL, NULL); 285 RTE_PTR_OR_ERR_RET(idx, NULL); 286 287 capabilities = instance->ops->capabilities_get(instance->device); 288 289 if (capabilities == NULL) 290 return NULL; 291 292 while ((capability = &capabilities[i++])->action 293 != RTE_SECURITY_ACTION_TYPE_NONE) { 294 if (capability->action == idx->action && 295 capability->protocol == idx->protocol) { 296 if (idx->protocol == RTE_SECURITY_PROTOCOL_IPSEC) { 297 if (capability->ipsec.proto == 298 idx->ipsec.proto && 299 capability->ipsec.mode == 300 idx->ipsec.mode && 301 capability->ipsec.direction == 302 idx->ipsec.direction) 303 return capability; 304 } else if (idx->protocol == RTE_SECURITY_PROTOCOL_PDCP) { 305 if (capability->pdcp.domain == 306 idx->pdcp.domain) 307 return capability; 308 } else if (idx->protocol == 309 RTE_SECURITY_PROTOCOL_DOCSIS) { 310 if (capability->docsis.direction == 311 idx->docsis.direction) 312 return capability; 313 } else if (idx->protocol == 314 RTE_SECURITY_PROTOCOL_MACSEC) { 315 if (idx->macsec.alg == capability->macsec.alg) 316 return capability; 317 } else if (idx->protocol == RTE_SECURITY_PROTOCOL_TLS_RECORD) { 318 if (capability->tls_record.ver == idx->tls_record.ver && 319 capability->tls_record.type == idx->tls_record.type) 320 return capability; 321 } 322 } 323 } 324 325 return NULL; 326 } 327 328 int 329 rte_security_rx_inject_configure(void *ctx, uint16_t port_id, bool enable) 330 { 331 struct rte_security_ctx *instance = ctx; 332 333 RTE_PTR_OR_ERR_RET(instance, -EINVAL); 334 RTE_PTR_OR_ERR_RET(instance->ops, -ENOTSUP); 335 RTE_PTR_OR_ERR_RET(instance->ops->rx_inject_configure, -ENOTSUP); 336 337 return instance->ops->rx_inject_configure(instance->device, port_id, enable); 338 } 339 340 uint16_t 341 rte_security_inb_pkt_rx_inject(void *ctx, struct rte_mbuf **pkts, void **sess, 342 uint16_t nb_pkts) 343 { 344 struct rte_security_ctx *instance = ctx; 345 346 return instance->ops->inb_pkt_rx_inject(instance->device, pkts, 347 (struct rte_security_session **)sess, nb_pkts); 348 } 349 350 static int 351 security_handle_cryptodev_list(const char *cmd __rte_unused, 352 const char *params __rte_unused, 353 struct rte_tel_data *d) 354 { 355 int dev_id; 356 357 if (rte_cryptodev_count() < 1) 358 return -1; 359 360 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 361 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 362 if (rte_cryptodev_is_valid_dev(dev_id) && 363 rte_cryptodev_get_sec_ctx(dev_id)) 364 rte_tel_data_add_array_int(d, dev_id); 365 366 return 0; 367 } 368 369 #define CRYPTO_CAPS_SZ \ 370 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 371 sizeof(uint64_t)) / sizeof(uint64_t)) 372 373 static int 374 crypto_caps_array(struct rte_tel_data *d, 375 const struct rte_cryptodev_capabilities *capabilities) 376 { 377 const struct rte_cryptodev_capabilities *dev_caps; 378 uint64_t caps_val[CRYPTO_CAPS_SZ]; 379 unsigned int i = 0, j; 380 381 rte_tel_data_start_array(d, RTE_TEL_UINT_VAL); 382 383 while ((dev_caps = &capabilities[i++])->op != 384 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 385 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 386 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 387 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 388 rte_tel_data_add_array_uint(d, caps_val[j]); 389 } 390 391 return (i - 1); 392 } 393 394 #define SEC_CAPS_SZ \ 395 (RTE_ALIGN_CEIL(sizeof(struct rte_security_capability), \ 396 sizeof(uint64_t)) / sizeof(uint64_t)) 397 398 static int 399 sec_caps_array(struct rte_tel_data *d, 400 const struct rte_security_capability *capabilities) 401 { 402 const struct rte_security_capability *dev_caps; 403 uint64_t caps_val[SEC_CAPS_SZ]; 404 unsigned int i = 0, j; 405 406 rte_tel_data_start_array(d, RTE_TEL_UINT_VAL); 407 408 while ((dev_caps = &capabilities[i++])->action != 409 RTE_SECURITY_ACTION_TYPE_NONE) { 410 memset(&caps_val, 0, SEC_CAPS_SZ * sizeof(caps_val[0])); 411 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 412 for (j = 0; j < SEC_CAPS_SZ; j++) 413 rte_tel_data_add_array_uint(d, caps_val[j]); 414 } 415 416 return i - 1; 417 } 418 419 static const struct rte_security_capability * 420 security_capability_by_index(const struct rte_security_capability *capabilities, 421 int index) 422 { 423 const struct rte_security_capability *dev_caps = NULL; 424 int i = 0; 425 426 while ((dev_caps = &capabilities[i])->action != 427 RTE_SECURITY_ACTION_TYPE_NONE) { 428 if (i == index) 429 return dev_caps; 430 431 ++i; 432 } 433 434 return NULL; 435 } 436 437 static int 438 security_capabilities_from_dev_id(int dev_id, const void **caps) 439 { 440 const struct rte_security_capability *capabilities; 441 void *sec_ctx; 442 443 if (rte_cryptodev_is_valid_dev(dev_id) == 0) 444 return -EINVAL; 445 446 sec_ctx = rte_cryptodev_get_sec_ctx(dev_id); 447 RTE_PTR_OR_ERR_RET(sec_ctx, -EINVAL); 448 449 capabilities = rte_security_capabilities_get(sec_ctx); 450 RTE_PTR_OR_ERR_RET(capabilities, -EINVAL); 451 452 *caps = capabilities; 453 return 0; 454 } 455 456 static int 457 security_handle_cryptodev_sec_caps(const char *cmd __rte_unused, const char *params, 458 struct rte_tel_data *d) 459 { 460 const struct rte_security_capability *capabilities; 461 struct rte_tel_data *sec_caps; 462 char *end_param; 463 int sec_caps_n; 464 int dev_id; 465 int rc; 466 467 if (!params || strlen(params) == 0 || !isdigit(*params)) 468 return -EINVAL; 469 470 dev_id = strtoul(params, &end_param, 0); 471 if (*end_param != '\0') 472 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 473 474 rc = security_capabilities_from_dev_id(dev_id, (void *)&capabilities); 475 if (rc < 0) 476 return rc; 477 478 sec_caps = rte_tel_data_alloc(); 479 RTE_PTR_OR_ERR_RET(sec_caps, -ENOMEM); 480 481 rte_tel_data_start_dict(d); 482 sec_caps_n = sec_caps_array(sec_caps, capabilities); 483 rte_tel_data_add_dict_container(d, "sec_caps", sec_caps, 0); 484 rte_tel_data_add_dict_int(d, "sec_caps_n", sec_caps_n); 485 486 return 0; 487 } 488 489 static int 490 security_handle_cryptodev_crypto_caps(const char *cmd __rte_unused, const char *params, 491 struct rte_tel_data *d) 492 { 493 const struct rte_security_capability *capabilities; 494 struct rte_tel_data *crypto_caps; 495 const char *capa_param; 496 int dev_id, capa_id; 497 int crypto_caps_n; 498 char *end_param; 499 int rc; 500 501 if (!params || strlen(params) == 0 || !isdigit(*params)) 502 return -EINVAL; 503 504 dev_id = strtoul(params, &end_param, 0); 505 capa_param = strtok(end_param, ","); 506 if (!capa_param || strlen(capa_param) == 0 || !isdigit(*capa_param)) 507 return -EINVAL; 508 509 capa_id = strtoul(capa_param, &end_param, 0); 510 if (*end_param != '\0') 511 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 512 513 rc = security_capabilities_from_dev_id(dev_id, (void *)&capabilities); 514 if (rc < 0) 515 return rc; 516 517 capabilities = security_capability_by_index(capabilities, capa_id); 518 RTE_PTR_OR_ERR_RET(capabilities, -EINVAL); 519 520 crypto_caps = rte_tel_data_alloc(); 521 RTE_PTR_OR_ERR_RET(crypto_caps, -ENOMEM); 522 523 rte_tel_data_start_dict(d); 524 crypto_caps_n = crypto_caps_array(crypto_caps, capabilities->crypto_capabilities); 525 526 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 527 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 528 529 return 0; 530 } 531 532 RTE_INIT(security_init_telemetry) 533 { 534 rte_telemetry_register_cmd("/security/cryptodev/list", 535 security_handle_cryptodev_list, 536 "Returns list of available crypto devices by IDs. No parameters."); 537 538 rte_telemetry_register_cmd("/security/cryptodev/sec_caps", 539 security_handle_cryptodev_sec_caps, 540 "Returns security capabilities for a cryptodev. Parameters: int dev_id"); 541 542 rte_telemetry_register_cmd("/security/cryptodev/crypto_caps", 543 security_handle_cryptodev_crypto_caps, 544 "Returns crypto capabilities for a security capability. Parameters: int dev_id, sec_cap_id"); 545 } 546