1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_bus_pci.h> 7 #include <rte_bus_vdev.h> 8 #include <rte_common.h> 9 #include <rte_cryptodev.h> 10 #include <cryptodev_pmd.h> 11 #include <rte_pci.h> 12 #include <rte_dev.h> 13 #include <rte_malloc.h> 14 15 #include "ccp_crypto.h" 16 #include "ccp_dev.h" 17 #include "ccp_pmd_private.h" 18 19 /** 20 * Global static parameter used to find if CCP device is already initialized. 21 */ 22 static unsigned int ccp_pmd_init_done; 23 uint8_t ccp_cryptodev_driver_id; 24 uint8_t cryptodev_cnt; 25 extern void *sha_ctx; 26 27 struct ccp_pmd_init_params { 28 struct rte_cryptodev_pmd_init_params def_p; 29 bool auth_opt; 30 }; 31 32 #define CCP_CRYPTODEV_PARAM_NAME ("name") 33 #define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") 34 #define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") 35 #define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") 36 37 const char *ccp_pmd_valid_params[] = { 38 CCP_CRYPTODEV_PARAM_NAME, 39 CCP_CRYPTODEV_PARAM_SOCKET_ID, 40 CCP_CRYPTODEV_PARAM_MAX_NB_QP, 41 CCP_CRYPTODEV_PARAM_AUTH_OPT, 42 }; 43 44 /** ccp pmd auth option */ 45 enum ccp_pmd_auth_opt { 46 CCP_PMD_AUTH_OPT_CCP = 0, 47 CCP_PMD_AUTH_OPT_CPU, 48 }; 49 50 static struct ccp_session * 51 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) 52 { 53 struct ccp_session *sess = NULL; 54 55 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 56 if (unlikely(op->sym->session == NULL)) 57 return NULL; 58 59 sess = (struct ccp_session *) 60 get_sym_session_private_data( 61 op->sym->session, 62 ccp_cryptodev_driver_id); 63 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 64 void *_sess; 65 void *_sess_private_data = NULL; 66 struct ccp_private *internals; 67 68 if (rte_mempool_get(qp->sess_mp, &_sess)) 69 return NULL; 70 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) 71 return NULL; 72 73 sess = (struct ccp_session *)_sess_private_data; 74 75 internals = (struct ccp_private *)qp->dev->data->dev_private; 76 if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, 77 internals) != 0)) { 78 rte_mempool_put(qp->sess_mp, _sess); 79 rte_mempool_put(qp->sess_mp_priv, _sess_private_data); 80 sess = NULL; 81 } 82 op->sym->session = (struct rte_cryptodev_sym_session *)_sess; 83 set_sym_session_private_data(op->sym->session, 84 ccp_cryptodev_driver_id, 85 _sess_private_data); 86 } 87 88 return sess; 89 } 90 91 static uint16_t 92 ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, 93 uint16_t nb_ops) 94 { 95 struct ccp_session *sess = NULL; 96 struct ccp_qp *qp = queue_pair; 97 struct ccp_queue *cmd_q; 98 struct rte_cryptodev *dev = qp->dev; 99 uint16_t i, enq_cnt = 0, slots_req = 0; 100 uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0; 101 102 if (nb_ops == 0) 103 return 0; 104 105 if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) 106 return 0; 107 if (tmp_ops >= cryptodev_cnt) 108 cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt; 109 else 110 cur_ops = tmp_ops; 111 while (tmp_ops) { 112 b_idx = nb_ops - tmp_ops; 113 slots_req = 0; 114 if (cur_ops <= tmp_ops) { 115 tmp_ops -= cur_ops; 116 } else { 117 cur_ops = tmp_ops; 118 tmp_ops = 0; 119 } 120 for (i = 0; i < cur_ops; i++) { 121 sess = get_ccp_session(qp, ops[i + b_idx]); 122 if (unlikely(sess == NULL) && (i == 0)) { 123 qp->qp_stats.enqueue_err_count++; 124 return 0; 125 } else if (sess == NULL) { 126 cur_ops = i; 127 break; 128 } 129 slots_req += ccp_compute_slot_count(sess); 130 } 131 132 cmd_q = ccp_allot_queue(dev, slots_req); 133 if (unlikely(cmd_q == NULL)) 134 return 0; 135 enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops, 136 nb_ops, slots_req, b_idx); 137 i++; 138 } 139 140 qp->qp_stats.enqueued_count += enq_cnt; 141 return enq_cnt; 142 } 143 144 static uint16_t 145 ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 146 uint16_t nb_ops) 147 { 148 struct ccp_qp *qp = queue_pair; 149 uint16_t nb_dequeued = 0, i, total_nb_ops; 150 151 nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops); 152 153 if (total_nb_ops) { 154 while (nb_dequeued != total_nb_ops) { 155 nb_dequeued = process_ops_to_dequeue(qp, 156 ops, nb_ops, &total_nb_ops); 157 } 158 } 159 160 /* Free session if a session-less crypto op */ 161 for (i = 0; i < nb_dequeued; i++) 162 if (unlikely(ops[i]->sess_type == 163 RTE_CRYPTO_OP_SESSIONLESS)) { 164 struct ccp_session *sess = (struct ccp_session *) 165 get_sym_session_private_data( 166 ops[i]->sym->session, 167 ccp_cryptodev_driver_id); 168 169 rte_mempool_put(qp->sess_mp_priv, 170 sess); 171 rte_mempool_put(qp->sess_mp, 172 ops[i]->sym->session); 173 ops[i]->sym->session = NULL; 174 } 175 qp->qp_stats.dequeued_count += nb_dequeued; 176 177 return nb_dequeued; 178 } 179 180 /* 181 * The set of PCI devices this driver supports 182 */ 183 static struct rte_pci_id ccp_pci_id[] = { 184 { 185 RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ 186 }, 187 { 188 RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ 189 }, 190 { 191 RTE_PCI_DEVICE(0x1022, 0x15df), /* AMD CCP RV */ 192 }, 193 {.device_id = 0}, 194 }; 195 196 /** Remove ccp pmd */ 197 static int 198 cryptodev_ccp_remove(struct rte_pci_device *pci_dev) 199 { 200 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 201 struct rte_cryptodev *dev; 202 203 if (pci_dev == NULL) 204 return -EINVAL; 205 206 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 207 208 if (name[0] == '\0') 209 return -EINVAL; 210 211 dev = rte_cryptodev_pmd_get_named_dev(name); 212 if (dev == NULL) 213 return -ENODEV; 214 215 ccp_pmd_init_done = 0; 216 rte_free(sha_ctx); 217 218 RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", 219 name, rte_socket_id()); 220 221 return rte_cryptodev_pmd_destroy(dev); 222 } 223 224 /** Create crypto device */ 225 static int 226 cryptodev_ccp_create(const char *name, 227 struct rte_pci_device *pci_dev, 228 struct ccp_pmd_init_params *init_params, 229 struct rte_pci_driver *pci_drv) 230 { 231 struct rte_cryptodev *dev; 232 struct ccp_private *internals; 233 234 if (init_params->def_p.name[0] == '\0') 235 strlcpy(init_params->def_p.name, name, 236 sizeof(init_params->def_p.name)); 237 238 dev = rte_cryptodev_pmd_create(init_params->def_p.name, 239 &pci_dev->device, 240 &init_params->def_p); 241 if (dev == NULL) { 242 CCP_LOG_ERR("failed to create cryptodev vdev"); 243 goto init_error; 244 } 245 246 cryptodev_cnt = ccp_probe_devices(pci_dev, ccp_pci_id); 247 248 if (cryptodev_cnt == 0) { 249 CCP_LOG_ERR("failed to detect CCP crypto device"); 250 goto init_error; 251 } 252 253 printf("CCP : Crypto device count = %d\n", cryptodev_cnt); 254 dev->device = &pci_dev->device; 255 dev->device->driver = &pci_drv->driver; 256 dev->driver_id = ccp_cryptodev_driver_id; 257 258 /* register rx/tx burst functions for data path */ 259 dev->dev_ops = ccp_pmd_ops; 260 dev->enqueue_burst = ccp_pmd_enqueue_burst; 261 dev->dequeue_burst = ccp_pmd_dequeue_burst; 262 263 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 264 RTE_CRYPTODEV_FF_HW_ACCELERATED | 265 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 266 RTE_CRYPTODEV_FF_SYM_SESSIONLESS; 267 268 internals = dev->data->dev_private; 269 270 internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; 271 internals->auth_opt = init_params->auth_opt; 272 internals->crypto_num_dev = cryptodev_cnt; 273 274 rte_cryptodev_pmd_probing_finish(dev); 275 276 return 0; 277 278 init_error: 279 CCP_LOG_ERR("driver %s: %s() failed", 280 init_params->def_p.name, __func__); 281 cryptodev_ccp_remove(pci_dev); 282 283 return -EFAULT; 284 } 285 286 /** Probe ccp pmd */ 287 static int 288 cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused, 289 struct rte_pci_device *pci_dev) 290 { 291 int rc = 0; 292 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 293 struct ccp_pmd_init_params init_params = { 294 .def_p = { 295 "", 296 sizeof(struct ccp_private), 297 rte_socket_id(), 298 CCP_PMD_MAX_QUEUE_PAIRS 299 }, 300 .auth_opt = CCP_PMD_AUTH_OPT_CCP, 301 }; 302 303 sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64); 304 if (ccp_pmd_init_done) { 305 RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); 306 return -EFAULT; 307 } 308 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 309 if (name[0] == '\0') 310 return -EINVAL; 311 312 init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; 313 314 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, 315 init_params.def_p.socket_id); 316 RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", 317 init_params.def_p.max_nb_queue_pairs); 318 RTE_LOG(INFO, PMD, "Authentication offload to %s\n", 319 ((init_params.auth_opt == 0) ? "CCP" : "CPU")); 320 321 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 322 323 rc = cryptodev_ccp_create(name, pci_dev, &init_params, pci_drv); 324 if (rc) 325 return rc; 326 ccp_pmd_init_done = 1; 327 return 0; 328 } 329 330 static struct rte_pci_driver cryptodev_ccp_pmd_drv = { 331 .id_table = ccp_pci_id, 332 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 333 .probe = cryptodev_ccp_probe, 334 .remove = cryptodev_ccp_remove 335 }; 336 337 static struct cryptodev_driver ccp_crypto_drv; 338 339 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); 340 RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_CCP_PMD, "* igb_uio | uio_pci_generic | vfio-pci"); 341 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, 342 "max_nb_queue_pairs=<int> " 343 "socket_id=<int> " 344 "ccp_auth_opt=<int>"); 345 RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver, 346 ccp_cryptodev_driver_id); 347