1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 */ 4 5 #include <rte_string_fns.h> 6 #include <bus_pci_driver.h> 7 #include <bus_vdev_driver.h> 8 #include <rte_common.h> 9 #include <rte_cryptodev.h> 10 #include <cryptodev_pmd.h> 11 #include <rte_pci.h> 12 #include <dev_driver.h> 13 #include <rte_malloc.h> 14 15 #include "ccp_crypto.h" 16 #include "ccp_dev.h" 17 #include "ccp_pmd_private.h" 18 19 /** 20 * Global static parameter used to find if CCP device is already initialized. 21 */ 22 static unsigned int ccp_pmd_init_done; 23 uint8_t ccp_cryptodev_driver_id; 24 uint8_t cryptodev_cnt; 25 26 struct ccp_pmd_init_params { 27 struct rte_cryptodev_pmd_init_params def_p; 28 bool auth_opt; 29 }; 30 31 #define CCP_CRYPTODEV_PARAM_NAME ("name") 32 #define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") 33 #define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") 34 #define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") 35 36 const char *ccp_pmd_valid_params[] = { 37 CCP_CRYPTODEV_PARAM_NAME, 38 CCP_CRYPTODEV_PARAM_SOCKET_ID, 39 CCP_CRYPTODEV_PARAM_MAX_NB_QP, 40 CCP_CRYPTODEV_PARAM_AUTH_OPT, 41 }; 42 43 /** ccp pmd auth option */ 44 enum ccp_pmd_auth_opt { 45 CCP_PMD_AUTH_OPT_CCP = 0, 46 CCP_PMD_AUTH_OPT_CPU, 47 }; 48 49 static struct ccp_session * 50 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) 51 { 52 struct ccp_session *sess = NULL; 53 54 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 55 if (unlikely(op->sym->session == NULL)) 56 return NULL; 57 58 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 59 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 60 struct rte_cryptodev_sym_session *_sess; 61 struct ccp_private *internals; 62 63 if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) 64 return NULL; 65 66 sess = (void *)_sess->driver_priv_data; 67 68 internals = (struct ccp_private *)qp->dev->data->dev_private; 69 if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, 70 internals) != 0)) { 71 rte_mempool_put(qp->sess_mp, _sess); 72 sess = NULL; 73 } 74 op->sym->session = _sess; 75 } 76 77 return sess; 78 } 79 80 static uint16_t 81 ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, 82 uint16_t nb_ops) 83 { 84 struct ccp_session *sess = NULL; 85 struct ccp_qp *qp = queue_pair; 86 struct ccp_queue *cmd_q; 87 struct rte_cryptodev *dev = qp->dev; 88 uint16_t i, enq_cnt = 0, slots_req = 0; 89 uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0; 90 91 if (nb_ops == 0) 92 return 0; 93 94 if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) 95 return 0; 96 if (tmp_ops >= cryptodev_cnt) 97 cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt; 98 else 99 cur_ops = tmp_ops; 100 while (tmp_ops) { 101 b_idx = nb_ops - tmp_ops; 102 slots_req = 0; 103 if (cur_ops <= tmp_ops) { 104 tmp_ops -= cur_ops; 105 } else { 106 cur_ops = tmp_ops; 107 tmp_ops = 0; 108 } 109 for (i = 0; i < cur_ops; i++) { 110 sess = get_ccp_session(qp, ops[i + b_idx]); 111 if (unlikely(sess == NULL) && (i == 0)) { 112 qp->qp_stats.enqueue_err_count++; 113 return 0; 114 } else if (sess == NULL) { 115 cur_ops = i; 116 break; 117 } 118 slots_req += ccp_compute_slot_count(sess); 119 } 120 121 cmd_q = ccp_allot_queue(dev, slots_req); 122 if (unlikely(cmd_q == NULL)) 123 return 0; 124 enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops, 125 nb_ops, slots_req, b_idx); 126 i++; 127 } 128 129 qp->qp_stats.enqueued_count += enq_cnt; 130 return enq_cnt; 131 } 132 133 static uint16_t 134 ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, 135 uint16_t nb_ops) 136 { 137 struct ccp_qp *qp = queue_pair; 138 uint16_t nb_dequeued = 0, i, total_nb_ops; 139 140 nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops); 141 142 if (total_nb_ops) { 143 while (nb_dequeued != total_nb_ops) { 144 nb_dequeued = process_ops_to_dequeue(qp, 145 ops, nb_ops, &total_nb_ops); 146 } 147 } 148 149 /* Free session if a session-less crypto op */ 150 for (i = 0; i < nb_dequeued; i++) 151 if (unlikely(ops[i]->sess_type == 152 RTE_CRYPTO_OP_SESSIONLESS)) { 153 struct ccp_session *sess = 154 CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session); 155 156 memset(sess, 0, sizeof(*sess)); 157 rte_mempool_put(qp->sess_mp, 158 ops[i]->sym->session); 159 ops[i]->sym->session = NULL; 160 } 161 qp->qp_stats.dequeued_count += nb_dequeued; 162 163 return nb_dequeued; 164 } 165 166 /* 167 * The set of PCI devices this driver supports 168 */ 169 static struct rte_pci_id ccp_pci_id[] = { 170 { RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_CCP_5A), }, 171 { RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_CCP_5B), }, 172 { RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_CCP_RV), }, 173 {.device_id = 0}, 174 }; 175 176 /** Remove ccp pmd */ 177 static int 178 cryptodev_ccp_remove(struct rte_pci_device *pci_dev) 179 { 180 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 181 struct rte_cryptodev *dev; 182 183 if (pci_dev == NULL) 184 return -EINVAL; 185 186 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 187 188 if (name[0] == '\0') 189 return -EINVAL; 190 191 dev = rte_cryptodev_pmd_get_named_dev(name); 192 if (dev == NULL) 193 return -ENODEV; 194 195 ccp_pmd_init_done = 0; 196 197 CCP_LOG_INFO("Closing ccp device %s on numa socket %u", name, rte_socket_id()); 198 199 return rte_cryptodev_pmd_destroy(dev); 200 } 201 202 /** Create crypto device */ 203 static int 204 cryptodev_ccp_create(const char *name, 205 struct rte_pci_device *pci_dev, 206 struct ccp_pmd_init_params *init_params, 207 struct rte_pci_driver *pci_drv) 208 { 209 struct rte_cryptodev *dev; 210 struct ccp_private *internals; 211 212 if (init_params->def_p.name[0] == '\0') 213 strlcpy(init_params->def_p.name, name, 214 sizeof(init_params->def_p.name)); 215 216 dev = rte_cryptodev_pmd_create(init_params->def_p.name, 217 &pci_dev->device, 218 &init_params->def_p); 219 if (dev == NULL) { 220 CCP_LOG_ERR("failed to create cryptodev vdev"); 221 goto init_error; 222 } 223 224 if (ccp_probe_device(pci_dev) != 0) { 225 CCP_LOG_ERR("failed to detect CCP crypto device"); 226 goto init_error; 227 } 228 cryptodev_cnt++; 229 230 CCP_LOG_DBG("CCP : Crypto device count = %d", cryptodev_cnt); 231 dev->device = &pci_dev->device; 232 dev->device->driver = &pci_drv->driver; 233 dev->driver_id = ccp_cryptodev_driver_id; 234 235 /* register rx/tx burst functions for data path */ 236 dev->dev_ops = ccp_pmd_ops; 237 dev->enqueue_burst = ccp_pmd_enqueue_burst; 238 dev->dequeue_burst = ccp_pmd_dequeue_burst; 239 240 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 241 RTE_CRYPTODEV_FF_HW_ACCELERATED | 242 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 243 RTE_CRYPTODEV_FF_SYM_SESSIONLESS; 244 245 internals = dev->data->dev_private; 246 247 internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; 248 internals->auth_opt = init_params->auth_opt; 249 internals->crypto_num_dev = cryptodev_cnt; 250 251 rte_cryptodev_pmd_probing_finish(dev); 252 253 return 0; 254 255 init_error: 256 CCP_LOG_ERR("driver %s: %s() failed", 257 init_params->def_p.name, __func__); 258 cryptodev_ccp_remove(pci_dev); 259 260 return -EFAULT; 261 } 262 263 /** Probe ccp pmd */ 264 static int 265 cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused, 266 struct rte_pci_device *pci_dev) 267 { 268 int rc = 0; 269 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 270 struct ccp_pmd_init_params init_params = { 271 .def_p = { 272 "", 273 sizeof(struct ccp_private), 274 rte_socket_id(), 275 CCP_PMD_MAX_QUEUE_PAIRS 276 }, 277 .auth_opt = CCP_PMD_AUTH_OPT_CCP, 278 }; 279 280 if (ccp_pmd_init_done) { 281 CCP_LOG_INFO("CCP PMD already initialized"); 282 return -EFAULT; 283 } 284 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 285 if (name[0] == '\0') 286 return -EINVAL; 287 288 init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; 289 290 CCP_LOG_INFO("Initialising %s on NUMA node %d", name, 291 init_params.def_p.socket_id); 292 CCP_LOG_INFO("Max number of queue pairs = %d", 293 init_params.def_p.max_nb_queue_pairs); 294 CCP_LOG_INFO("Authentication offload to %s", 295 ((init_params.auth_opt == 0) ? "CCP" : "CPU")); 296 297 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 298 299 rc = cryptodev_ccp_create(name, pci_dev, &init_params, pci_drv); 300 if (rc) 301 return rc; 302 ccp_pmd_init_done = 1; 303 return 0; 304 } 305 306 static struct rte_pci_driver cryptodev_ccp_pmd_drv = { 307 .id_table = ccp_pci_id, 308 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 309 .probe = cryptodev_ccp_probe, 310 .remove = cryptodev_ccp_remove 311 }; 312 313 static struct cryptodev_driver ccp_crypto_drv; 314 315 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); 316 RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_CCP_PMD, "* igb_uio | uio_pci_generic | vfio-pci"); 317 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, 318 "max_nb_queue_pairs=<int> " 319 "socket_id=<int> " 320 "ccp_auth_opt=<int>"); 321 RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver, 322 ccp_cryptodev_driver_id); 323 RTE_LOG_REGISTER_DEFAULT(crypto_ccp_logtype, NOTICE); 324