1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 8 #include <rte_common.h> 9 #include <bus_vdev_driver.h> 10 #include <rte_malloc.h> 11 #include <rte_ring.h> 12 #include <rte_kvargs.h> 13 14 #include <rte_bbdev.h> 15 #include <rte_bbdev_pmd.h> 16 17 #define DRIVER_NAME baseband_null 18 19 RTE_LOG_REGISTER_DEFAULT(bbdev_null_logtype, NOTICE); 20 #define RTE_LOGTYPE_BBDEV_NULL bbdev_null_logtype 21 22 /* Helper macro for logging */ 23 #define rte_bbdev_log(level, ...) \ 24 RTE_LOG_LINE(level, BBDEV_NULL, __VA_ARGS__) 25 26 #define rte_bbdev_log_debug(fmt, ...) \ 27 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ 28 ##__VA_ARGS__) 29 30 /* Initialisation params structure that can be used by null BBDEV driver */ 31 struct bbdev_null_params { 32 int socket_id; /*< Null BBDEV socket */ 33 uint16_t queues_num; /*< Null BBDEV queues number */ 34 }; 35 36 /* Acceptable params for null BBDEV devices */ 37 #define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues" 38 #define BBDEV_NULL_SOCKET_ID_ARG "socket_id" 39 40 static const char * const bbdev_null_valid_params[] = { 41 BBDEV_NULL_MAX_NB_QUEUES_ARG, 42 BBDEV_NULL_SOCKET_ID_ARG 43 }; 44 45 /* private data structure */ 46 struct bbdev_private { 47 unsigned int max_nb_queues; /**< Max number of queues */ 48 }; 49 50 /* queue */ 51 struct __rte_cache_aligned bbdev_queue { 52 struct rte_ring *processed_pkts; /* Ring for processed packets */ 53 }; 54 55 /* Get device info */ 56 static void 57 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) 58 { 59 struct bbdev_private *internals = dev->data->dev_private; 60 61 static const struct rte_bbdev_op_cap bbdev_capabilities[] = { 62 RTE_BBDEV_END_OF_CAPABILITIES_LIST(), 63 }; 64 65 static struct rte_bbdev_queue_conf default_queue_conf = { 66 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, 67 }; 68 69 default_queue_conf.socket = dev->data->socket_id; 70 71 dev_info->driver_name = RTE_STR(DRIVER_NAME); 72 dev_info->max_num_queues = internals->max_nb_queues; 73 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; 74 dev_info->hardware_accelerated = false; 75 dev_info->max_dl_queue_priority = 0; 76 dev_info->max_ul_queue_priority = 0; 77 dev_info->default_queue_conf = default_queue_conf; 78 dev_info->capabilities = bbdev_capabilities; 79 dev_info->cpu_flag_reqs = NULL; 80 dev_info->min_alignment = 0; 81 82 /* BBDEV null device does not process the data, so 83 * endianness setting is not relevant, but setting it 84 * here for code completeness. 85 */ 86 dev_info->data_endianness = RTE_LITTLE_ENDIAN; 87 dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED; 88 89 rte_bbdev_log_debug("got device info from %u", dev->data->dev_id); 90 } 91 92 /* Release queue */ 93 static int 94 q_release(struct rte_bbdev *dev, uint16_t q_id) 95 { 96 struct bbdev_queue *q = dev->data->queues[q_id].queue_private; 97 98 if (q != NULL) { 99 rte_ring_free(q->processed_pkts); 100 rte_free(q); 101 dev->data->queues[q_id].queue_private = NULL; 102 } 103 104 rte_bbdev_log_debug("released device queue %u:%u", 105 dev->data->dev_id, q_id); 106 return 0; 107 } 108 109 /* Setup a queue */ 110 static int 111 q_setup(struct rte_bbdev *dev, uint16_t q_id, 112 const struct rte_bbdev_queue_conf *queue_conf) 113 { 114 struct bbdev_queue *q; 115 char ring_name[RTE_RING_NAMESIZE]; 116 snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u", 117 dev->data->dev_id, q_id); 118 119 /* Allocate the queue data structure. */ 120 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), 121 RTE_CACHE_LINE_SIZE, queue_conf->socket); 122 if (q == NULL) { 123 rte_bbdev_log(ERR, "Failed to allocate queue memory"); 124 return -ENOMEM; 125 } 126 127 q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size, 128 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 129 if (q->processed_pkts == NULL) { 130 rte_bbdev_log(ERR, "Failed to create ring"); 131 goto free_q; 132 } 133 134 dev->data->queues[q_id].queue_private = q; 135 rte_bbdev_log_debug("setup device queue %s", ring_name); 136 return 0; 137 138 free_q: 139 rte_free(q); 140 return -EFAULT; 141 } 142 143 static const struct rte_bbdev_ops pmd_ops = { 144 .info_get = info_get, 145 .queue_setup = q_setup, 146 .queue_release = q_release 147 }; 148 149 /* Enqueue decode burst */ 150 static uint16_t 151 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, 152 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 153 { 154 struct bbdev_queue *q = q_data->queue_private; 155 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts, 156 (void **)ops, nb_ops, NULL); 157 158 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 159 q_data->queue_stats.enqueued_count += nb_enqueued; 160 161 return nb_enqueued; 162 } 163 164 /* Enqueue encode burst */ 165 static uint16_t 166 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, 167 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 168 { 169 struct bbdev_queue *q = q_data->queue_private; 170 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts, 171 (void **)ops, nb_ops, NULL); 172 173 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 174 q_data->queue_stats.enqueued_count += nb_enqueued; 175 176 return nb_enqueued; 177 } 178 179 /* Dequeue decode burst */ 180 static uint16_t 181 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, 182 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 183 { 184 struct bbdev_queue *q = q_data->queue_private; 185 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 186 (void **)ops, nb_ops, NULL); 187 q_data->queue_stats.dequeued_count += nb_dequeued; 188 189 return nb_dequeued; 190 } 191 192 /* Dequeue encode burst */ 193 static uint16_t 194 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, 195 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 196 { 197 struct bbdev_queue *q = q_data->queue_private; 198 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 199 (void **)ops, nb_ops, NULL); 200 q_data->queue_stats.dequeued_count += nb_dequeued; 201 202 return nb_dequeued; 203 } 204 205 /* Parse 16bit integer from string argument */ 206 static inline int 207 parse_u16_arg(const char *key, const char *value, void *extra_args) 208 { 209 uint16_t *u16 = extra_args; 210 unsigned int long result; 211 212 if ((value == NULL) || (extra_args == NULL)) 213 return -EINVAL; 214 errno = 0; 215 result = strtoul(value, NULL, 0); 216 if ((result >= (1 << 16)) || (errno != 0)) { 217 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); 218 return -ERANGE; 219 } 220 *u16 = (uint16_t)result; 221 return 0; 222 } 223 224 /* Parse parameters used to create device */ 225 static int 226 parse_bbdev_null_params(struct bbdev_null_params *params, 227 const char *input_args) 228 { 229 struct rte_kvargs *kvlist = NULL; 230 int ret = 0; 231 232 if (params == NULL) 233 return -EINVAL; 234 if (input_args) { 235 kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params); 236 if (kvlist == NULL) 237 return -EFAULT; 238 239 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0], 240 &parse_u16_arg, ¶ms->queues_num); 241 if (ret < 0) 242 goto exit; 243 244 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1], 245 &parse_u16_arg, ¶ms->socket_id); 246 if (ret < 0) 247 goto exit; 248 249 if (params->socket_id >= RTE_MAX_NUMA_NODES) { 250 rte_bbdev_log(ERR, "Invalid socket, must be < %u", 251 RTE_MAX_NUMA_NODES); 252 goto exit; 253 } 254 } 255 256 exit: 257 rte_kvargs_free(kvlist); 258 return ret; 259 } 260 261 /* Create device */ 262 static int 263 null_bbdev_create(struct rte_vdev_device *vdev, 264 struct bbdev_null_params *init_params) 265 { 266 struct rte_bbdev *bbdev; 267 const char *name = rte_vdev_device_name(vdev); 268 269 bbdev = rte_bbdev_allocate(name); 270 if (bbdev == NULL) 271 return -ENODEV; 272 273 bbdev->data->dev_private = rte_zmalloc_socket(name, 274 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, 275 init_params->socket_id); 276 if (bbdev->data->dev_private == NULL) { 277 rte_bbdev_release(bbdev); 278 return -ENOMEM; 279 } 280 281 bbdev->dev_ops = &pmd_ops; 282 bbdev->device = &vdev->device; 283 bbdev->data->socket_id = init_params->socket_id; 284 bbdev->intr_handle = NULL; 285 286 /* register rx/tx burst functions for data path */ 287 bbdev->dequeue_enc_ops = dequeue_enc_ops; 288 bbdev->dequeue_dec_ops = dequeue_dec_ops; 289 bbdev->enqueue_enc_ops = enqueue_enc_ops; 290 bbdev->enqueue_dec_ops = enqueue_dec_ops; 291 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = 292 init_params->queues_num; 293 294 return 0; 295 } 296 297 /* Initialise device */ 298 static int 299 null_bbdev_probe(struct rte_vdev_device *vdev) 300 { 301 struct bbdev_null_params init_params = { 302 rte_socket_id(), 303 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES 304 }; 305 const char *name; 306 const char *input_args; 307 308 if (vdev == NULL) 309 return -EINVAL; 310 311 name = rte_vdev_device_name(vdev); 312 if (name == NULL) 313 return -EINVAL; 314 315 input_args = rte_vdev_device_args(vdev); 316 parse_bbdev_null_params(&init_params, input_args); 317 318 rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d", 319 name, init_params.socket_id, init_params.queues_num); 320 321 return null_bbdev_create(vdev, &init_params); 322 } 323 324 /* Uninitialise device */ 325 static int 326 null_bbdev_remove(struct rte_vdev_device *vdev) 327 { 328 struct rte_bbdev *bbdev; 329 const char *name; 330 331 if (vdev == NULL) 332 return -EINVAL; 333 334 name = rte_vdev_device_name(vdev); 335 if (name == NULL) 336 return -EINVAL; 337 338 bbdev = rte_bbdev_get_named_dev(name); 339 if (bbdev == NULL) 340 return -EINVAL; 341 342 rte_free(bbdev->data->dev_private); 343 344 return rte_bbdev_release(bbdev); 345 } 346 347 static struct rte_vdev_driver bbdev_null_pmd_drv = { 348 .probe = null_bbdev_probe, 349 .remove = null_bbdev_remove 350 }; 351 352 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv); 353 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, 354 BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> " 355 BBDEV_NULL_SOCKET_ID_ARG"=<int>"); 356 RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null); 357