1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2017. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <string.h> 34 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_cryptodev_pmd.h> 38 39 #include "armv8_crypto_defs.h" 40 41 #include "rte_armv8_pmd_private.h" 42 43 static const struct rte_cryptodev_capabilities 44 armv8_crypto_pmd_capabilities[] = { 45 { /* SHA1 HMAC */ 46 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 47 {.sym = { 48 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 49 {.auth = { 50 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 51 .block_size = 64, 52 .key_size = { 53 .min = 16, 54 .max = 128, 55 .increment = 0 56 }, 57 .digest_size = { 58 .min = 20, 59 .max = 20, 60 .increment = 0 61 }, 62 .aad_size = { 0 } 63 }, } 64 }, } 65 }, 66 { /* SHA256 HMAC */ 67 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 68 {.sym = { 69 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 70 {.auth = { 71 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, 72 .block_size = 64, 73 .key_size = { 74 .min = 16, 75 .max = 128, 76 .increment = 0 77 }, 78 .digest_size = { 79 .min = 32, 80 .max = 32, 81 .increment = 0 82 }, 83 .aad_size = { 0 } 84 }, } 85 }, } 86 }, 87 { /* AES CBC */ 88 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 89 {.sym = { 90 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 91 {.cipher = { 92 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 93 .block_size = 16, 94 .key_size = { 95 .min = 16, 96 .max = 16, 97 .increment = 0 98 }, 99 .iv_size = { 100 .min = 16, 101 .max = 16, 102 .increment = 0 103 } 104 }, } 105 }, } 106 }, 107 108 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() 109 }; 110 111 112 /** Configure device */ 113 static int 114 armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev) 115 { 116 return 0; 117 } 118 119 /** Start device */ 120 static int 121 armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev) 122 { 123 return 0; 124 } 125 126 /** Stop device */ 127 static void 128 armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev) 129 { 130 } 131 132 /** Close device */ 133 static int 134 armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev) 135 { 136 return 0; 137 } 138 139 140 /** Get device statistics */ 141 static void 142 armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev, 143 struct rte_cryptodev_stats *stats) 144 { 145 int qp_id; 146 147 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 148 struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id]; 149 150 stats->enqueued_count += qp->stats.enqueued_count; 151 stats->dequeued_count += qp->stats.dequeued_count; 152 153 stats->enqueue_err_count += qp->stats.enqueue_err_count; 154 stats->dequeue_err_count += qp->stats.dequeue_err_count; 155 } 156 } 157 158 /** Reset device statistics */ 159 static void 160 armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev) 161 { 162 int qp_id; 163 164 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 165 struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id]; 166 167 memset(&qp->stats, 0, sizeof(qp->stats)); 168 } 169 } 170 171 172 /** Get device info */ 173 static void 174 armv8_crypto_pmd_info_get(struct rte_cryptodev *dev, 175 struct rte_cryptodev_info *dev_info) 176 { 177 struct armv8_crypto_private *internals = dev->data->dev_private; 178 179 if (dev_info != NULL) { 180 dev_info->dev_type = dev->dev_type; 181 dev_info->feature_flags = dev->feature_flags; 182 dev_info->capabilities = armv8_crypto_pmd_capabilities; 183 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; 184 dev_info->sym.max_nb_sessions = internals->max_nb_sessions; 185 } 186 } 187 188 /** Release queue pair */ 189 static int 190 armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 191 { 192 193 if (dev->data->queue_pairs[qp_id] != NULL) { 194 rte_free(dev->data->queue_pairs[qp_id]); 195 dev->data->queue_pairs[qp_id] = NULL; 196 } 197 198 return 0; 199 } 200 201 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */ 202 static int 203 armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev, 204 struct armv8_crypto_qp *qp) 205 { 206 unsigned int n; 207 208 n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u", 209 dev->data->dev_id, qp->id); 210 211 if (n > sizeof(qp->name)) 212 return -1; 213 214 return 0; 215 } 216 217 218 /** Create a ring to place processed operations on */ 219 static struct rte_ring * 220 armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp, 221 unsigned int ring_size, int socket_id) 222 { 223 struct rte_ring *r; 224 225 r = rte_ring_lookup(qp->name); 226 if (r) { 227 if (rte_ring_get_size(r) >= ring_size) { 228 ARMV8_CRYPTO_LOG_INFO( 229 "Reusing existing ring %s for processed ops", 230 qp->name); 231 return r; 232 } 233 234 ARMV8_CRYPTO_LOG_ERR( 235 "Unable to reuse existing ring %s for processed ops", 236 qp->name); 237 return NULL; 238 } 239 240 return rte_ring_create(qp->name, ring_size, socket_id, 241 RING_F_SP_ENQ | RING_F_SC_DEQ); 242 } 243 244 245 /** Setup a queue pair */ 246 static int 247 armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 248 const struct rte_cryptodev_qp_conf *qp_conf, 249 int socket_id) 250 { 251 struct armv8_crypto_qp *qp = NULL; 252 253 /* Free memory prior to re-allocation if needed. */ 254 if (dev->data->queue_pairs[qp_id] != NULL) 255 armv8_crypto_pmd_qp_release(dev, qp_id); 256 257 /* Allocate the queue pair data structure. */ 258 qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp), 259 RTE_CACHE_LINE_SIZE, socket_id); 260 if (qp == NULL) 261 return -ENOMEM; 262 263 qp->id = qp_id; 264 dev->data->queue_pairs[qp_id] = qp; 265 266 if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0) 267 goto qp_setup_cleanup; 268 269 qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp, 270 qp_conf->nb_descriptors, socket_id); 271 if (qp->processed_ops == NULL) 272 goto qp_setup_cleanup; 273 274 qp->sess_mp = dev->data->session_pool; 275 276 memset(&qp->stats, 0, sizeof(qp->stats)); 277 278 return 0; 279 280 qp_setup_cleanup: 281 if (qp) 282 rte_free(qp); 283 284 return -1; 285 } 286 287 /** Start queue pair */ 288 static int 289 armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 290 __rte_unused uint16_t queue_pair_id) 291 { 292 return -ENOTSUP; 293 } 294 295 /** Stop queue pair */ 296 static int 297 armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 298 __rte_unused uint16_t queue_pair_id) 299 { 300 return -ENOTSUP; 301 } 302 303 /** Return the number of allocated queue pairs */ 304 static uint32_t 305 armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev) 306 { 307 return dev->data->nb_queue_pairs; 308 } 309 310 /** Returns the size of the session structure */ 311 static unsigned 312 armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 313 { 314 return sizeof(struct armv8_crypto_session); 315 } 316 317 /** Configure the session from a crypto xform chain */ 318 static void * 319 armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, 320 struct rte_crypto_sym_xform *xform, void *sess) 321 { 322 if (unlikely(sess == NULL)) { 323 ARMV8_CRYPTO_LOG_ERR("invalid session struct"); 324 return NULL; 325 } 326 327 if (armv8_crypto_set_session_parameters( 328 sess, xform) != 0) { 329 ARMV8_CRYPTO_LOG_ERR("failed configure session parameters"); 330 return NULL; 331 } 332 333 return sess; 334 } 335 336 /** Clear the memory of session so it doesn't leave key material behind */ 337 static void 338 armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, 339 void *sess) 340 { 341 342 /* Zero out the whole structure */ 343 if (sess) 344 memset(sess, 0, sizeof(struct armv8_crypto_session)); 345 } 346 347 struct rte_cryptodev_ops armv8_crypto_pmd_ops = { 348 .dev_configure = armv8_crypto_pmd_config, 349 .dev_start = armv8_crypto_pmd_start, 350 .dev_stop = armv8_crypto_pmd_stop, 351 .dev_close = armv8_crypto_pmd_close, 352 353 .stats_get = armv8_crypto_pmd_stats_get, 354 .stats_reset = armv8_crypto_pmd_stats_reset, 355 356 .dev_infos_get = armv8_crypto_pmd_info_get, 357 358 .queue_pair_setup = armv8_crypto_pmd_qp_setup, 359 .queue_pair_release = armv8_crypto_pmd_qp_release, 360 .queue_pair_start = armv8_crypto_pmd_qp_start, 361 .queue_pair_stop = armv8_crypto_pmd_qp_stop, 362 .queue_pair_count = armv8_crypto_pmd_qp_count, 363 364 .session_get_size = armv8_crypto_pmd_session_get_size, 365 .session_configure = armv8_crypto_pmd_session_configure, 366 .session_clear = armv8_crypto_pmd_session_clear 367 }; 368 369 struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops; 370