1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_bus_pci.h> 7 #include <rte_bus_vdev.h> 8 #include <rte_common.h> 9 #include <rte_cryptodev.h> 10 #include <rte_cryptodev_pmd.h> 11 #include <rte_pci.h> 12 #include <rte_dev.h> 13 #include <rte_malloc.h> 14 15 #include "ccp_crypto.h" 16 #include "ccp_dev.h" 17 #include "ccp_pmd_private.h" 18 19 /** 20 * Global static parameter used to find if CCP device is already initialized. 21 */ 22 static unsigned int ccp_pmd_init_done; 23 uint8_t ccp_cryptodev_driver_id; 24 uint8_t cryptodev_cnt; 25 26 struct ccp_pmd_init_params { 27 struct rte_cryptodev_pmd_init_params def_p; 28 bool auth_opt; 29 }; 30 31 #define CCP_CRYPTODEV_PARAM_NAME ("name") 32 #define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") 33 #define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") 34 #define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") 35 36 const char *ccp_pmd_valid_params[] = { 37 CCP_CRYPTODEV_PARAM_NAME, 38 CCP_CRYPTODEV_PARAM_SOCKET_ID, 39 CCP_CRYPTODEV_PARAM_MAX_NB_QP, 40 CCP_CRYPTODEV_PARAM_AUTH_OPT, 41 }; 42 43 /** ccp pmd auth option */ 44 enum ccp_pmd_auth_opt { 45 CCP_PMD_AUTH_OPT_CCP = 0, 46 CCP_PMD_AUTH_OPT_CPU, 47 }; 48 49 /** parse integer from integer argument */ 50 static int 51 parse_integer_arg(const char *key __rte_unused, 52 const char *value, void *extra_args) 53 { 54 int *i = (int *) extra_args; 55 56 *i = atoi(value); 57 if (*i < 0) { 58 CCP_LOG_ERR("Argument has to be positive.\n"); 59 return -EINVAL; 60 } 61 62 return 0; 63 } 64 65 /** parse name argument */ 66 static int 67 parse_name_arg(const char *key __rte_unused, 68 const char *value, void *extra_args) 69 { 70 struct rte_cryptodev_pmd_init_params *params = extra_args; 71 72 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { 73 CCP_LOG_ERR("Invalid name %s, should be less than " 74 "%u bytes.\n", value, 75 RTE_CRYPTODEV_NAME_MAX_LEN - 1); 76 return -EINVAL; 77 } 78 79 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); 80 81 return 0; 82 } 83 84 /** parse authentication operation option */ 85 static int 86 parse_auth_opt_arg(const char *key __rte_unused, 87 const char *value, void *extra_args) 88 { 89 struct ccp_pmd_init_params *params = extra_args; 90 int i; 91 92 i = atoi(value); 93 if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) { 94 CCP_LOG_ERR("Invalid ccp pmd auth option. " 95 "0->auth on CCP(default), " 96 "1->auth on CPU\n"); 97 return -EINVAL; 98 } 99 params->auth_opt = i; 100 return 0; 101 } 102 103 static int 104 ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params, 105 const char *input_args) 106 { 107 struct rte_kvargs *kvlist = NULL; 108 int ret = 0; 109 110 if (params == NULL) 111 return -EINVAL; 112 113 if (input_args) { 114 kvlist = rte_kvargs_parse(input_args, 115 ccp_pmd_valid_params); 116 if (kvlist == NULL) 117 return -1; 118 119 ret = rte_kvargs_process(kvlist, 120 CCP_CRYPTODEV_PARAM_MAX_NB_QP, 121 &parse_integer_arg, 122 ¶ms->def_p.max_nb_queue_pairs); 123 if (ret < 0) 124 goto free_kvlist; 125 126 ret = rte_kvargs_process(kvlist, 127 CCP_CRYPTODEV_PARAM_SOCKET_ID, 128 &parse_integer_arg, 129 ¶ms->def_p.socket_id); 130 if (ret < 0) 131 goto free_kvlist; 132 133 ret = rte_kvargs_process(kvlist, 134 CCP_CRYPTODEV_PARAM_NAME, 135 &parse_name_arg, 136 ¶ms->def_p); 137 if (ret < 0) 138 goto free_kvlist; 139 140 ret = rte_kvargs_process(kvlist, 141 CCP_CRYPTODEV_PARAM_AUTH_OPT, 142 &parse_auth_opt_arg, 143 params); 144 if (ret < 0) 145 goto free_kvlist; 146 147 } 148 149 free_kvlist: 150 rte_kvargs_free(kvlist); 151 return ret; 152 } 153 154 static struct ccp_session * 155 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) 156 { 157 struct ccp_session *sess = NULL; 158 159 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 160 if (unlikely(op->sym->session == NULL)) 161 return NULL; 162 163 sess = (struct ccp_session *) 164 get_sym_session_private_data( 165 op->sym->session, 166 ccp_cryptodev_driver_id); 167 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 168 void *_sess; 169 void *_sess_private_data = NULL; 170 struct ccp_private *internals; 171 172 if (rte_mempool_get(qp->sess_mp, &_sess)) 173 return NULL; 174 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) 175 return NULL; 176 177 sess = (struct ccp_session *)_sess_private_data; 178 179 internals = (struct ccp_private *)qp->dev->data->dev_private; 180 if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, 181 internals) != 0)) { 182 rte_mempool_put(qp->sess_mp, _sess); 183 rte_mempool_put(qp->sess_mp_priv, _sess_private_data); 184 sess = NULL; 185 } 186 op->sym->session = (struct rte_cryptodev_sym_session *)_sess; 187 set_sym_session_private_data(op->sym->session, 188 ccp_cryptodev_driver_id, 189 _sess_private_data); 190 } 191 192 return sess; 193 } 194 195 static uint16_t 196 ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, 197 uint16_t nb_ops) 198 { 199 struct ccp_session *sess = NULL; 200 struct ccp_qp *qp = queue_pair; 201 struct ccp_queue *cmd_q; 202 struct rte_cryptodev *dev = qp->dev; 203 uint16_t i, enq_cnt = 0, slots_req = 0; 204 uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0; 205 206 if (nb_ops == 0) 207 return 0; 208 209 if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) 210 return 0; 211 if (tmp_ops >= cryptodev_cnt) 212 cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt; 213 else 214 cur_ops = tmp_ops; 215 while (tmp_ops) { 216 b_idx = nb_ops - tmp_ops; 217 slots_req = 0; 218 if (cur_ops <= tmp_ops) { 219 tmp_ops -= cur_ops; 220 } else { 221 cur_ops = tmp_ops; 222 tmp_ops = 0; 223 } 224 for (i = 0; i < cur_ops; i++) { 225 sess = get_ccp_session(qp, ops[i + b_idx]); 226 if (unlikely(sess == NULL) && (i == 0)) { 227 qp->qp_stats.enqueue_err_count++; 228 return 0; 229 } else if (sess == NULL) { 230 cur_ops = i; 231 break; 232 } 233 slots_req += ccp_compute_slot_count(sess); 234 } 235 236 cmd_q = ccp_allot_queue(dev, slots_req); 237 if (unlikely(cmd_q == NULL)) 238 return 0; 239 enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops, 240 nb_ops, slots_req, b_idx); 241 i++; 242 } 243 244 qp->qp_stats.enqueued_count += enq_cnt; 245 return enq_cnt; 246 } 247 248 static uint16_t 249 ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 250 uint16_t nb_ops) 251 { 252 struct ccp_qp *qp = queue_pair; 253 uint16_t nb_dequeued = 0, i, total_nb_ops; 254 255 nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops); 256 257 if (total_nb_ops) { 258 while (nb_dequeued != total_nb_ops) { 259 nb_dequeued = process_ops_to_dequeue(qp, 260 ops, nb_ops, &total_nb_ops); 261 } 262 } 263 264 /* Free session if a session-less crypto op */ 265 for (i = 0; i < nb_dequeued; i++) 266 if (unlikely(ops[i]->sess_type == 267 RTE_CRYPTO_OP_SESSIONLESS)) { 268 struct ccp_session *sess = (struct ccp_session *) 269 get_sym_session_private_data( 270 ops[i]->sym->session, 271 ccp_cryptodev_driver_id); 272 273 rte_mempool_put(qp->sess_mp_priv, 274 sess); 275 rte_mempool_put(qp->sess_mp, 276 ops[i]->sym->session); 277 ops[i]->sym->session = NULL; 278 } 279 qp->qp_stats.dequeued_count += nb_dequeued; 280 281 return nb_dequeued; 282 } 283 284 /* 285 * The set of PCI devices this driver supports 286 */ 287 static struct rte_pci_id ccp_pci_id[] = { 288 { 289 RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ 290 }, 291 { 292 RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ 293 }, 294 { 295 RTE_PCI_DEVICE(0x1022, 0x15df), /* AMD CCP RV */ 296 }, 297 {.device_id = 0}, 298 }; 299 300 /** Remove ccp pmd */ 301 static int 302 cryptodev_ccp_remove(struct rte_vdev_device *dev) 303 { 304 const char *name; 305 306 ccp_pmd_init_done = 0; 307 name = rte_vdev_device_name(dev); 308 if (name == NULL) 309 return -EINVAL; 310 311 RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", 312 name, rte_socket_id()); 313 314 return 0; 315 } 316 317 /** Create crypto device */ 318 static int 319 cryptodev_ccp_create(const char *name, 320 struct rte_vdev_device *vdev, 321 struct ccp_pmd_init_params *init_params) 322 { 323 struct rte_cryptodev *dev; 324 struct ccp_private *internals; 325 326 if (init_params->def_p.name[0] == '\0') 327 strlcpy(init_params->def_p.name, name, 328 sizeof(init_params->def_p.name)); 329 330 dev = rte_cryptodev_pmd_create(init_params->def_p.name, 331 &vdev->device, 332 &init_params->def_p); 333 if (dev == NULL) { 334 CCP_LOG_ERR("failed to create cryptodev vdev"); 335 goto init_error; 336 } 337 338 cryptodev_cnt = ccp_probe_devices(ccp_pci_id); 339 340 if (cryptodev_cnt == 0) { 341 CCP_LOG_ERR("failed to detect CCP crypto device"); 342 goto init_error; 343 } 344 345 printf("CCP : Crypto device count = %d\n", cryptodev_cnt); 346 dev->driver_id = ccp_cryptodev_driver_id; 347 348 /* register rx/tx burst functions for data path */ 349 dev->dev_ops = ccp_pmd_ops; 350 dev->enqueue_burst = ccp_pmd_enqueue_burst; 351 dev->dequeue_burst = ccp_pmd_dequeue_burst; 352 353 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 354 RTE_CRYPTODEV_FF_HW_ACCELERATED | 355 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 356 RTE_CRYPTODEV_FF_SYM_SESSIONLESS; 357 358 internals = dev->data->dev_private; 359 360 internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; 361 internals->auth_opt = init_params->auth_opt; 362 internals->crypto_num_dev = cryptodev_cnt; 363 364 return 0; 365 366 init_error: 367 CCP_LOG_ERR("driver %s: %s() failed", 368 init_params->def_p.name, __func__); 369 cryptodev_ccp_remove(vdev); 370 371 return -EFAULT; 372 } 373 374 /** Probe ccp pmd */ 375 static int 376 cryptodev_ccp_probe(struct rte_vdev_device *vdev) 377 { 378 int rc = 0; 379 const char *name; 380 struct ccp_pmd_init_params init_params = { 381 .def_p = { 382 "", 383 sizeof(struct ccp_private), 384 rte_socket_id(), 385 CCP_PMD_MAX_QUEUE_PAIRS 386 }, 387 .auth_opt = CCP_PMD_AUTH_OPT_CCP, 388 }; 389 const char *input_args; 390 391 if (ccp_pmd_init_done) { 392 RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); 393 return -EFAULT; 394 } 395 name = rte_vdev_device_name(vdev); 396 if (name == NULL) 397 return -EINVAL; 398 399 input_args = rte_vdev_device_args(vdev); 400 ccp_pmd_parse_input_args(&init_params, input_args); 401 init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; 402 403 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, 404 init_params.def_p.socket_id); 405 RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", 406 init_params.def_p.max_nb_queue_pairs); 407 RTE_LOG(INFO, PMD, "Authentication offload to %s\n", 408 ((init_params.auth_opt == 0) ? "CCP" : "CPU")); 409 410 rc = cryptodev_ccp_create(name, vdev, &init_params); 411 if (rc) 412 return rc; 413 ccp_pmd_init_done = 1; 414 return 0; 415 } 416 417 static struct rte_vdev_driver cryptodev_ccp_pmd_drv = { 418 .probe = cryptodev_ccp_probe, 419 .remove = cryptodev_ccp_remove 420 }; 421 422 static struct cryptodev_driver ccp_crypto_drv; 423 424 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); 425 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, 426 "max_nb_queue_pairs=<int> " 427 "socket_id=<int> " 428 "ccp_auth_opt=<int>"); 429 RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver, 430 ccp_cryptodev_driver_id); 431