1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_bus_pci.h> 7 #include <rte_bus_vdev.h> 8 #include <rte_common.h> 9 #include <rte_config.h> 10 #include <rte_cryptodev.h> 11 #include <rte_cryptodev_pmd.h> 12 #include <rte_pci.h> 13 #include <rte_dev.h> 14 #include <rte_malloc.h> 15 16 #include "ccp_crypto.h" 17 #include "ccp_dev.h" 18 #include "ccp_pmd_private.h" 19 20 /** 21 * Global static parameter used to find if CCP device is already initialized. 22 */ 23 static unsigned int ccp_pmd_init_done; 24 uint8_t ccp_cryptodev_driver_id; 25 uint8_t cryptodev_cnt; 26 27 struct ccp_pmd_init_params { 28 struct rte_cryptodev_pmd_init_params def_p; 29 bool auth_opt; 30 }; 31 32 #define CCP_CRYPTODEV_PARAM_NAME ("name") 33 #define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") 34 #define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") 35 #define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") 36 37 const char *ccp_pmd_valid_params[] = { 38 CCP_CRYPTODEV_PARAM_NAME, 39 CCP_CRYPTODEV_PARAM_SOCKET_ID, 40 CCP_CRYPTODEV_PARAM_MAX_NB_QP, 41 CCP_CRYPTODEV_PARAM_AUTH_OPT, 42 }; 43 44 /** ccp pmd auth option */ 45 enum ccp_pmd_auth_opt { 46 CCP_PMD_AUTH_OPT_CCP = 0, 47 CCP_PMD_AUTH_OPT_CPU, 48 }; 49 50 /** parse integer from integer argument */ 51 static int 52 parse_integer_arg(const char *key __rte_unused, 53 const char *value, void *extra_args) 54 { 55 int *i = (int *) extra_args; 56 57 *i = atoi(value); 58 if (*i < 0) { 59 CCP_LOG_ERR("Argument has to be positive.\n"); 60 return -EINVAL; 61 } 62 63 return 0; 64 } 65 66 /** parse name argument */ 67 static int 68 parse_name_arg(const char *key __rte_unused, 69 const char *value, void *extra_args) 70 { 71 struct rte_cryptodev_pmd_init_params *params = extra_args; 72 73 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { 74 CCP_LOG_ERR("Invalid name %s, should be less than " 75 "%u bytes.\n", value, 76 RTE_CRYPTODEV_NAME_MAX_LEN - 1); 77 return -EINVAL; 78 } 79 80 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); 81 82 return 0; 83 } 84 85 /** parse authentication operation option */ 86 static int 87 parse_auth_opt_arg(const char *key __rte_unused, 88 const char *value, void *extra_args) 89 { 90 struct ccp_pmd_init_params *params = extra_args; 91 int i; 92 93 i = atoi(value); 94 if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) { 95 CCP_LOG_ERR("Invalid ccp pmd auth option. " 96 "0->auth on CCP(default), " 97 "1->auth on CPU\n"); 98 return -EINVAL; 99 } 100 params->auth_opt = i; 101 return 0; 102 } 103 104 static int 105 ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params, 106 const char *input_args) 107 { 108 struct rte_kvargs *kvlist = NULL; 109 int ret = 0; 110 111 if (params == NULL) 112 return -EINVAL; 113 114 if (input_args) { 115 kvlist = rte_kvargs_parse(input_args, 116 ccp_pmd_valid_params); 117 if (kvlist == NULL) 118 return -1; 119 120 ret = rte_kvargs_process(kvlist, 121 CCP_CRYPTODEV_PARAM_MAX_NB_QP, 122 &parse_integer_arg, 123 ¶ms->def_p.max_nb_queue_pairs); 124 if (ret < 0) 125 goto free_kvlist; 126 127 ret = rte_kvargs_process(kvlist, 128 CCP_CRYPTODEV_PARAM_SOCKET_ID, 129 &parse_integer_arg, 130 ¶ms->def_p.socket_id); 131 if (ret < 0) 132 goto free_kvlist; 133 134 ret = rte_kvargs_process(kvlist, 135 CCP_CRYPTODEV_PARAM_NAME, 136 &parse_name_arg, 137 ¶ms->def_p); 138 if (ret < 0) 139 goto free_kvlist; 140 141 ret = rte_kvargs_process(kvlist, 142 CCP_CRYPTODEV_PARAM_AUTH_OPT, 143 &parse_auth_opt_arg, 144 params); 145 if (ret < 0) 146 goto free_kvlist; 147 148 } 149 150 free_kvlist: 151 rte_kvargs_free(kvlist); 152 return ret; 153 } 154 155 static struct ccp_session * 156 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) 157 { 158 struct ccp_session *sess = NULL; 159 160 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 161 if (unlikely(op->sym->session == NULL)) 162 return NULL; 163 164 sess = (struct ccp_session *) 165 get_sym_session_private_data( 166 op->sym->session, 167 ccp_cryptodev_driver_id); 168 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 169 void *_sess; 170 void *_sess_private_data = NULL; 171 struct ccp_private *internals; 172 173 if (rte_mempool_get(qp->sess_mp, &_sess)) 174 return NULL; 175 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) 176 return NULL; 177 178 sess = (struct ccp_session *)_sess_private_data; 179 180 internals = (struct ccp_private *)qp->dev->data->dev_private; 181 if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, 182 internals) != 0)) { 183 rte_mempool_put(qp->sess_mp, _sess); 184 rte_mempool_put(qp->sess_mp_priv, _sess_private_data); 185 sess = NULL; 186 } 187 op->sym->session = (struct rte_cryptodev_sym_session *)_sess; 188 set_sym_session_private_data(op->sym->session, 189 ccp_cryptodev_driver_id, 190 _sess_private_data); 191 } 192 193 return sess; 194 } 195 196 static uint16_t 197 ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, 198 uint16_t nb_ops) 199 { 200 struct ccp_session *sess = NULL; 201 struct ccp_qp *qp = queue_pair; 202 struct ccp_queue *cmd_q; 203 struct rte_cryptodev *dev = qp->dev; 204 uint16_t i, enq_cnt = 0, slots_req = 0; 205 uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0; 206 207 if (nb_ops == 0) 208 return 0; 209 210 if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) 211 return 0; 212 if (tmp_ops >= cryptodev_cnt) 213 cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt; 214 else 215 cur_ops = tmp_ops; 216 while (tmp_ops) { 217 b_idx = nb_ops - tmp_ops; 218 slots_req = 0; 219 if (cur_ops <= tmp_ops) { 220 tmp_ops -= cur_ops; 221 } else { 222 cur_ops = tmp_ops; 223 tmp_ops = 0; 224 } 225 for (i = 0; i < cur_ops; i++) { 226 sess = get_ccp_session(qp, ops[i + b_idx]); 227 if (unlikely(sess == NULL) && (i == 0)) { 228 qp->qp_stats.enqueue_err_count++; 229 return 0; 230 } else if (sess == NULL) { 231 cur_ops = i; 232 break; 233 } 234 slots_req += ccp_compute_slot_count(sess); 235 } 236 237 cmd_q = ccp_allot_queue(dev, slots_req); 238 if (unlikely(cmd_q == NULL)) 239 return 0; 240 enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops, 241 nb_ops, slots_req, b_idx); 242 i++; 243 } 244 245 qp->qp_stats.enqueued_count += enq_cnt; 246 return enq_cnt; 247 } 248 249 static uint16_t 250 ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 251 uint16_t nb_ops) 252 { 253 struct ccp_qp *qp = queue_pair; 254 uint16_t nb_dequeued = 0, i, total_nb_ops; 255 256 nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops); 257 258 if (total_nb_ops) { 259 while (nb_dequeued != total_nb_ops) { 260 nb_dequeued = process_ops_to_dequeue(qp, 261 ops, nb_ops, &total_nb_ops); 262 } 263 } 264 265 /* Free session if a session-less crypto op */ 266 for (i = 0; i < nb_dequeued; i++) 267 if (unlikely(ops[i]->sess_type == 268 RTE_CRYPTO_OP_SESSIONLESS)) { 269 struct ccp_session *sess = (struct ccp_session *) 270 get_sym_session_private_data( 271 ops[i]->sym->session, 272 ccp_cryptodev_driver_id); 273 274 rte_mempool_put(qp->sess_mp_priv, 275 sess); 276 rte_mempool_put(qp->sess_mp, 277 ops[i]->sym->session); 278 ops[i]->sym->session = NULL; 279 } 280 qp->qp_stats.dequeued_count += nb_dequeued; 281 282 return nb_dequeued; 283 } 284 285 /* 286 * The set of PCI devices this driver supports 287 */ 288 static struct rte_pci_id ccp_pci_id[] = { 289 { 290 RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ 291 }, 292 { 293 RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ 294 }, 295 { 296 RTE_PCI_DEVICE(0x1022, 0x15df), /* AMD CCP RV */ 297 }, 298 {.device_id = 0}, 299 }; 300 301 /** Remove ccp pmd */ 302 static int 303 cryptodev_ccp_remove(struct rte_vdev_device *dev) 304 { 305 const char *name; 306 307 ccp_pmd_init_done = 0; 308 name = rte_vdev_device_name(dev); 309 if (name == NULL) 310 return -EINVAL; 311 312 RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", 313 name, rte_socket_id()); 314 315 return 0; 316 } 317 318 /** Create crypto device */ 319 static int 320 cryptodev_ccp_create(const char *name, 321 struct rte_vdev_device *vdev, 322 struct ccp_pmd_init_params *init_params) 323 { 324 struct rte_cryptodev *dev; 325 struct ccp_private *internals; 326 327 if (init_params->def_p.name[0] == '\0') 328 strlcpy(init_params->def_p.name, name, 329 sizeof(init_params->def_p.name)); 330 331 dev = rte_cryptodev_pmd_create(init_params->def_p.name, 332 &vdev->device, 333 &init_params->def_p); 334 if (dev == NULL) { 335 CCP_LOG_ERR("failed to create cryptodev vdev"); 336 goto init_error; 337 } 338 339 cryptodev_cnt = ccp_probe_devices(ccp_pci_id); 340 341 if (cryptodev_cnt == 0) { 342 CCP_LOG_ERR("failed to detect CCP crypto device"); 343 goto init_error; 344 } 345 346 printf("CCP : Crypto device count = %d\n", cryptodev_cnt); 347 dev->driver_id = ccp_cryptodev_driver_id; 348 349 /* register rx/tx burst functions for data path */ 350 dev->dev_ops = ccp_pmd_ops; 351 dev->enqueue_burst = ccp_pmd_enqueue_burst; 352 dev->dequeue_burst = ccp_pmd_dequeue_burst; 353 354 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 355 RTE_CRYPTODEV_FF_HW_ACCELERATED | 356 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; 357 358 internals = dev->data->dev_private; 359 360 internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; 361 internals->auth_opt = init_params->auth_opt; 362 internals->crypto_num_dev = cryptodev_cnt; 363 364 return 0; 365 366 init_error: 367 CCP_LOG_ERR("driver %s: %s() failed", 368 init_params->def_p.name, __func__); 369 cryptodev_ccp_remove(vdev); 370 371 return -EFAULT; 372 } 373 374 /** Probe ccp pmd */ 375 static int 376 cryptodev_ccp_probe(struct rte_vdev_device *vdev) 377 { 378 int rc = 0; 379 const char *name; 380 struct ccp_pmd_init_params init_params = { 381 .def_p = { 382 "", 383 sizeof(struct ccp_private), 384 rte_socket_id(), 385 CCP_PMD_MAX_QUEUE_PAIRS 386 }, 387 .auth_opt = CCP_PMD_AUTH_OPT_CCP, 388 }; 389 const char *input_args; 390 391 if (ccp_pmd_init_done) { 392 RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); 393 return -EFAULT; 394 } 395 name = rte_vdev_device_name(vdev); 396 if (name == NULL) 397 return -EINVAL; 398 399 input_args = rte_vdev_device_args(vdev); 400 ccp_pmd_parse_input_args(&init_params, input_args); 401 init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; 402 403 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, 404 init_params.def_p.socket_id); 405 RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", 406 init_params.def_p.max_nb_queue_pairs); 407 RTE_LOG(INFO, PMD, "Authentication offload to %s\n", 408 ((init_params.auth_opt == 0) ? "CCP" : "CPU")); 409 410 rc = cryptodev_ccp_create(name, vdev, &init_params); 411 if (rc) 412 return rc; 413 ccp_pmd_init_done = 1; 414 return 0; 415 } 416 417 static struct rte_vdev_driver cryptodev_ccp_pmd_drv = { 418 .probe = cryptodev_ccp_probe, 419 .remove = cryptodev_ccp_remove 420 }; 421 422 static struct cryptodev_driver ccp_crypto_drv; 423 424 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); 425 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, 426 "max_nb_queue_pairs=<int> " 427 "socket_id=<int> " 428 "ccp_auth_opt=<int>"); 429 RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver, 430 ccp_cryptodev_driver_id); 431