1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 #include <rte_byteorder.h> 13 #include <rte_common.h> 14 #include <rte_debug.h> 15 #include <dev_driver.h> 16 #include <rte_eal.h> 17 #include <rte_log.h> 18 #include <rte_malloc.h> 19 #include <rte_memory.h> 20 #include <rte_lcore.h> 21 #include <bus_vdev_driver.h> 22 23 #include "skeleton_eventdev.h" 24 25 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton 26 /**< Skeleton event device PMD name */ 27 28 static uint16_t 29 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 30 uint16_t nb_events) 31 { 32 struct skeleton_port *sp = port; 33 34 RTE_SET_USED(sp); 35 RTE_SET_USED(ev); 36 RTE_SET_USED(port); 37 RTE_SET_USED(nb_events); 38 39 return 0; 40 } 41 42 static uint16_t 43 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[], 44 uint16_t nb_events, uint64_t timeout_ticks) 45 { 46 struct skeleton_port *sp = port; 47 48 RTE_SET_USED(sp); 49 RTE_SET_USED(ev); 50 RTE_SET_USED(nb_events); 51 RTE_SET_USED(timeout_ticks); 52 53 return 0; 54 } 55 56 static void 57 skeleton_eventdev_info_get(struct rte_eventdev *dev, 58 struct rte_event_dev_info *dev_info) 59 { 60 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 61 62 PMD_DRV_FUNC_TRACE(); 63 64 RTE_SET_USED(skel); 65 66 dev_info->min_dequeue_timeout_ns = 1; 67 dev_info->max_dequeue_timeout_ns = 10000; 68 dev_info->dequeue_timeout_ns = 25; 69 dev_info->max_event_queues = 64; 70 dev_info->max_event_queue_flows = (1ULL << 20); 71 dev_info->max_event_queue_priority_levels = 8; 72 dev_info->max_event_priority_levels = 8; 73 dev_info->max_event_ports = 32; 74 dev_info->max_event_port_dequeue_depth = 16; 75 dev_info->max_event_port_enqueue_depth = 16; 76 dev_info->max_num_events = (1ULL << 20); 77 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 78 RTE_EVENT_DEV_CAP_BURST_MODE | 79 RTE_EVENT_DEV_CAP_EVENT_QOS | 80 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | 81 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; 82 dev_info->max_profiles_per_port = 1; 83 } 84 85 static int 86 skeleton_eventdev_configure(const struct rte_eventdev *dev) 87 { 88 struct rte_eventdev_data *data = dev->data; 89 struct rte_event_dev_config *conf = &data->dev_conf; 90 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 91 92 PMD_DRV_FUNC_TRACE(); 93 94 RTE_SET_USED(conf); 95 RTE_SET_USED(skel); 96 97 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id); 98 return 0; 99 } 100 101 static int 102 skeleton_eventdev_start(struct rte_eventdev *dev) 103 { 104 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 105 106 PMD_DRV_FUNC_TRACE(); 107 108 RTE_SET_USED(skel); 109 110 return 0; 111 } 112 113 static void 114 skeleton_eventdev_stop(struct rte_eventdev *dev) 115 { 116 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 117 118 PMD_DRV_FUNC_TRACE(); 119 120 RTE_SET_USED(skel); 121 } 122 123 static int 124 skeleton_eventdev_close(struct rte_eventdev *dev) 125 { 126 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 127 128 PMD_DRV_FUNC_TRACE(); 129 130 RTE_SET_USED(skel); 131 132 return 0; 133 } 134 135 static void 136 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 137 struct rte_event_queue_conf *queue_conf) 138 { 139 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 140 141 PMD_DRV_FUNC_TRACE(); 142 143 RTE_SET_USED(skel); 144 RTE_SET_USED(queue_id); 145 146 queue_conf->nb_atomic_flows = (1ULL << 20); 147 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 148 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 149 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 150 } 151 152 static void 153 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 154 { 155 PMD_DRV_FUNC_TRACE(); 156 157 RTE_SET_USED(dev); 158 RTE_SET_USED(queue_id); 159 } 160 161 static int 162 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 163 const struct rte_event_queue_conf *queue_conf) 164 { 165 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 166 167 PMD_DRV_FUNC_TRACE(); 168 169 RTE_SET_USED(skel); 170 RTE_SET_USED(queue_conf); 171 RTE_SET_USED(queue_id); 172 173 return 0; 174 } 175 176 static void 177 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 178 struct rte_event_port_conf *port_conf) 179 { 180 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 181 182 PMD_DRV_FUNC_TRACE(); 183 184 RTE_SET_USED(skel); 185 RTE_SET_USED(port_id); 186 187 port_conf->new_event_threshold = 32 * 1024; 188 port_conf->dequeue_depth = 16; 189 port_conf->enqueue_depth = 16; 190 port_conf->event_port_cfg = 0; 191 } 192 193 static void 194 skeleton_eventdev_port_release(void *port) 195 { 196 struct skeleton_port *sp = port; 197 PMD_DRV_FUNC_TRACE(); 198 199 rte_free(sp); 200 } 201 202 static int 203 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 204 const struct rte_event_port_conf *port_conf) 205 { 206 struct skeleton_port *sp; 207 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 208 209 PMD_DRV_FUNC_TRACE(); 210 211 RTE_SET_USED(skel); 212 RTE_SET_USED(port_conf); 213 214 /* Free memory prior to re-allocation if needed */ 215 if (dev->data->ports[port_id] != NULL) { 216 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 217 port_id); 218 skeleton_eventdev_port_release(dev->data->ports[port_id]); 219 dev->data->ports[port_id] = NULL; 220 } 221 222 /* Allocate event port memory */ 223 sp = rte_zmalloc_socket("eventdev port", 224 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE, 225 dev->data->socket_id); 226 if (sp == NULL) { 227 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id); 228 return -ENOMEM; 229 } 230 231 sp->port_id = port_id; 232 233 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp); 234 235 dev->data->ports[port_id] = sp; 236 return 0; 237 } 238 239 static int 240 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port, 241 const uint8_t queues[], const uint8_t priorities[], 242 uint16_t nb_links) 243 { 244 struct skeleton_port *sp = port; 245 PMD_DRV_FUNC_TRACE(); 246 247 RTE_SET_USED(dev); 248 RTE_SET_USED(sp); 249 RTE_SET_USED(queues); 250 RTE_SET_USED(priorities); 251 252 /* Linked all the queues */ 253 return (int)nb_links; 254 } 255 256 static int 257 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 258 uint8_t queues[], uint16_t nb_unlinks) 259 { 260 struct skeleton_port *sp = port; 261 PMD_DRV_FUNC_TRACE(); 262 263 RTE_SET_USED(dev); 264 RTE_SET_USED(sp); 265 RTE_SET_USED(queues); 266 267 /* Unlinked all the queues */ 268 return (int)nb_unlinks; 269 270 } 271 272 static int 273 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 274 uint64_t *timeout_ticks) 275 { 276 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 277 uint32_t scale = 1; 278 279 PMD_DRV_FUNC_TRACE(); 280 281 RTE_SET_USED(skel); 282 *timeout_ticks = ns * scale; 283 284 return 0; 285 } 286 287 static void 288 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f) 289 { 290 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 291 292 PMD_DRV_FUNC_TRACE(); 293 294 RTE_SET_USED(skel); 295 RTE_SET_USED(f); 296 } 297 298 299 /* Initialize and register event driver with DPDK Application */ 300 static struct eventdev_ops skeleton_eventdev_ops = { 301 .dev_infos_get = skeleton_eventdev_info_get, 302 .dev_configure = skeleton_eventdev_configure, 303 .dev_start = skeleton_eventdev_start, 304 .dev_stop = skeleton_eventdev_stop, 305 .dev_close = skeleton_eventdev_close, 306 .queue_def_conf = skeleton_eventdev_queue_def_conf, 307 .queue_setup = skeleton_eventdev_queue_setup, 308 .queue_release = skeleton_eventdev_queue_release, 309 .port_def_conf = skeleton_eventdev_port_def_conf, 310 .port_setup = skeleton_eventdev_port_setup, 311 .port_release = skeleton_eventdev_port_release, 312 .port_link = skeleton_eventdev_port_link, 313 .port_unlink = skeleton_eventdev_port_unlink, 314 .timeout_ticks = skeleton_eventdev_timeout_ticks, 315 .dump = skeleton_eventdev_dump 316 }; 317 318 static int 319 skeleton_eventdev_init(struct rte_eventdev *eventdev) 320 { 321 struct rte_pci_device *pci_dev; 322 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev); 323 int ret = 0; 324 325 PMD_DRV_FUNC_TRACE(); 326 327 eventdev->dev_ops = &skeleton_eventdev_ops; 328 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; 329 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; 330 331 /* For secondary processes, the primary has done all the work */ 332 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 333 return 0; 334 335 pci_dev = RTE_DEV_TO_PCI(eventdev->dev); 336 337 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 338 if (!skel->reg_base) { 339 PMD_DRV_ERR("Failed to map BAR0"); 340 ret = -ENODEV; 341 goto fail; 342 } 343 344 skel->device_id = pci_dev->id.device_id; 345 skel->vendor_id = pci_dev->id.vendor_id; 346 skel->subsystem_device_id = pci_dev->id.subsystem_device_id; 347 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 348 349 PMD_DRV_LOG(DEBUG, "PCI device (%x:%x) " PCI_PRI_FMT, 350 pci_dev->id.vendor_id, pci_dev->id.device_id, 351 pci_dev->addr.domain, pci_dev->addr.bus, 352 pci_dev->addr.devid, pci_dev->addr.function); 353 354 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)", 355 eventdev->data->dev_id, eventdev->data->socket_id, 356 skel->vendor_id, skel->device_id); 357 358 fail: 359 return ret; 360 } 361 362 /* PCI based event device */ 363 364 #define EVENTDEV_SKEL_VENDOR_ID 0x177d 365 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001 366 367 static const struct rte_pci_id pci_id_skeleton_map[] = { 368 { 369 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID, 370 EVENTDEV_SKEL_PRODUCT_ID) 371 }, 372 { 373 .vendor_id = 0, 374 }, 375 }; 376 377 static int 378 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv, 379 struct rte_pci_device *pci_dev) 380 { 381 return rte_event_pmd_pci_probe(pci_drv, pci_dev, 382 sizeof(struct skeleton_eventdev), skeleton_eventdev_init); 383 } 384 385 static int 386 event_skeleton_pci_remove(struct rte_pci_device *pci_dev) 387 { 388 return rte_event_pmd_pci_remove(pci_dev, NULL); 389 } 390 391 static struct rte_pci_driver pci_eventdev_skeleton_pmd = { 392 .id_table = pci_id_skeleton_map, 393 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 394 .probe = event_skeleton_pci_probe, 395 .remove = event_skeleton_pci_remove, 396 }; 397 398 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd); 399 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map); 400 401 /* VDEV based event device */ 402 403 static int 404 skeleton_eventdev_create(const char *name, int socket_id, struct rte_vdev_device *vdev) 405 { 406 struct rte_eventdev *eventdev; 407 408 eventdev = rte_event_pmd_vdev_init(name, 409 sizeof(struct skeleton_eventdev), socket_id, vdev); 410 if (eventdev == NULL) { 411 PMD_DRV_ERR("Failed to create eventdev vdev %s", name); 412 goto fail; 413 } 414 415 eventdev->dev_ops = &skeleton_eventdev_ops; 416 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; 417 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; 418 419 event_dev_probing_finish(eventdev); 420 return 0; 421 fail: 422 return -EFAULT; 423 } 424 425 static int 426 skeleton_eventdev_probe(struct rte_vdev_device *vdev) 427 { 428 const char *name; 429 430 name = rte_vdev_device_name(vdev); 431 PMD_DRV_LOG(INFO, "Initializing %s on NUMA node %d", name, rte_socket_id()); 432 return skeleton_eventdev_create(name, rte_socket_id(), vdev); 433 } 434 435 static int 436 skeleton_eventdev_remove(struct rte_vdev_device *vdev) 437 { 438 const char *name; 439 440 name = rte_vdev_device_name(vdev); 441 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id()); 442 443 return rte_event_pmd_vdev_uninit(name); 444 } 445 446 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = { 447 .probe = skeleton_eventdev_probe, 448 .remove = skeleton_eventdev_remove 449 }; 450 451 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd); 452 RTE_LOG_REGISTER_DEFAULT(skeleton_eventdev_logtype, INFO); 453