1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <rte_dev.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_event_eth_rx_adapter.h> 13 #include <rte_kvargs.h> 14 #include <rte_lcore.h> 15 #include <rte_log.h> 16 #include <rte_malloc.h> 17 #include <rte_memory.h> 18 #include <rte_bus_vdev.h> 19 20 #include "ssovf_evdev.h" 21 #include "timvf_evdev.h" 22 23 static uint8_t timvf_enable_stats; 24 25 RTE_LOG_REGISTER(otx_logtype_ssovf, pmd.event.octeontx, NOTICE); 26 27 /* SSOPF Mailbox messages */ 28 29 struct ssovf_mbox_dev_info { 30 uint64_t min_deq_timeout_ns; 31 uint64_t max_deq_timeout_ns; 32 uint32_t max_num_events; 33 }; 34 35 static int 36 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 37 { 38 struct octeontx_mbox_hdr hdr = {0}; 39 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 40 41 hdr.coproc = SSO_COPROC; 42 hdr.msg = SSO_GET_DEV_INFO; 43 hdr.vfid = 0; 44 45 memset(info, 0, len); 46 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 47 } 48 49 struct ssovf_mbox_getwork_wait { 50 uint64_t wait_ns; 51 }; 52 53 static int 54 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 55 { 56 struct octeontx_mbox_hdr hdr = {0}; 57 struct ssovf_mbox_getwork_wait tmo_set; 58 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 59 int ret; 60 61 hdr.coproc = SSO_COPROC; 62 hdr.msg = SSO_SET_GETWORK_WAIT; 63 hdr.vfid = 0; 64 65 tmo_set.wait_ns = timeout_ns; 66 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 67 if (ret) 68 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 69 70 return ret; 71 } 72 73 struct ssovf_mbox_grp_pri { 74 uint8_t vhgrp_id; 75 uint8_t wgt_left; /* Read only */ 76 uint8_t weight; 77 uint8_t affinity; 78 uint8_t priority; 79 }; 80 81 static int 82 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 83 { 84 struct octeontx_mbox_hdr hdr = {0}; 85 struct ssovf_mbox_grp_pri grp; 86 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 87 int ret; 88 89 hdr.coproc = SSO_COPROC; 90 hdr.msg = SSO_GRP_SET_PRIORITY; 91 hdr.vfid = queue; 92 93 grp.vhgrp_id = queue; 94 grp.weight = 0xff; 95 grp.affinity = 0xff; 96 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 97 98 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 99 if (ret) 100 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 101 102 return ret; 103 } 104 105 struct ssovf_mbox_convert_ns_getworks_iter { 106 uint64_t wait_ns; 107 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 108 }; 109 110 static int 111 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 112 { 113 struct octeontx_mbox_hdr hdr = {0}; 114 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 115 uint16_t len = sizeof(ns2iter); 116 int ret; 117 118 hdr.coproc = SSO_COPROC; 119 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 120 hdr.vfid = 0; 121 122 memset(&ns2iter, 0, len); 123 ns2iter.wait_ns = ns; 124 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 125 if (ret < 0 || (ret != len)) { 126 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 127 return -EIO; 128 } 129 130 *tmo_ticks = ns2iter.getwork_iter; 131 return 0; 132 } 133 134 static void 135 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 136 { 137 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 138 139 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 140 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 141 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 142 dev_info->max_event_queues = edev->max_event_queues; 143 dev_info->max_event_queue_flows = (1ULL << 20); 144 dev_info->max_event_queue_priority_levels = 8; 145 dev_info->max_event_priority_levels = 1; 146 dev_info->max_event_ports = edev->max_event_ports; 147 dev_info->max_event_port_dequeue_depth = 1; 148 dev_info->max_event_port_enqueue_depth = 1; 149 dev_info->max_num_events = edev->max_num_events; 150 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 151 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 152 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 153 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 154 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 155 RTE_EVENT_DEV_CAP_NONSEQ_MODE; 156 157 } 158 159 static int 160 ssovf_configure(const struct rte_eventdev *dev) 161 { 162 struct rte_event_dev_config *conf = &dev->data->dev_conf; 163 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 164 uint64_t deq_tmo_ns; 165 166 ssovf_func_trace(); 167 deq_tmo_ns = conf->dequeue_timeout_ns; 168 if (deq_tmo_ns == 0) 169 deq_tmo_ns = edev->min_deq_timeout_ns; 170 171 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 172 edev->is_timeout_deq = 1; 173 deq_tmo_ns = edev->min_deq_timeout_ns; 174 } 175 edev->nb_event_queues = conf->nb_event_queues; 176 edev->nb_event_ports = conf->nb_event_ports; 177 178 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 179 } 180 181 static void 182 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 183 struct rte_event_queue_conf *queue_conf) 184 { 185 RTE_SET_USED(dev); 186 RTE_SET_USED(queue_id); 187 188 queue_conf->nb_atomic_flows = (1ULL << 20); 189 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 190 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 191 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 192 } 193 194 static void 195 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 196 { 197 RTE_SET_USED(dev); 198 RTE_SET_USED(queue_id); 199 } 200 201 static int 202 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 203 const struct rte_event_queue_conf *queue_conf) 204 { 205 RTE_SET_USED(dev); 206 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 207 208 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 209 } 210 211 static void 212 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 213 struct rte_event_port_conf *port_conf) 214 { 215 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 216 217 RTE_SET_USED(port_id); 218 port_conf->new_event_threshold = edev->max_num_events; 219 port_conf->dequeue_depth = 1; 220 port_conf->enqueue_depth = 1; 221 port_conf->disable_implicit_release = 0; 222 } 223 224 static void 225 ssovf_port_release(void *port) 226 { 227 rte_free(port); 228 } 229 230 static int 231 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 232 const struct rte_event_port_conf *port_conf) 233 { 234 struct ssows *ws; 235 uint32_t reg_off; 236 uint8_t q; 237 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 238 239 ssovf_func_trace("port=%d", port_id); 240 RTE_SET_USED(port_conf); 241 242 /* Free memory prior to re-allocation if needed */ 243 if (dev->data->ports[port_id] != NULL) { 244 ssovf_port_release(dev->data->ports[port_id]); 245 dev->data->ports[port_id] = NULL; 246 } 247 248 /* Allocate event port memory */ 249 ws = rte_zmalloc_socket("eventdev ssows", 250 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 251 dev->data->socket_id); 252 if (ws == NULL) { 253 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 254 return -ENOMEM; 255 } 256 257 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 258 if (ws->base == NULL) { 259 rte_free(ws); 260 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 261 return -EINVAL; 262 } 263 264 reg_off = SSOW_VHWS_OP_GET_WORK0; 265 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 266 reg_off |= 1 << 16; /* Wait */ 267 ws->getwork = ws->base + reg_off; 268 ws->port = port_id; 269 ws->lookup_mem = octeontx_fastpath_lookup_mem_get(); 270 271 for (q = 0; q < edev->nb_event_queues; q++) { 272 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 273 if (ws->grps[q] == NULL) { 274 rte_free(ws); 275 ssovf_log_err("Failed to get grp%d base addr", q); 276 return -EINVAL; 277 } 278 } 279 280 dev->data->ports[port_id] = ws; 281 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 282 return 0; 283 } 284 285 static int 286 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 287 const uint8_t priorities[], uint16_t nb_links) 288 { 289 uint16_t link; 290 uint64_t val; 291 struct ssows *ws = port; 292 293 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 294 RTE_SET_USED(dev); 295 RTE_SET_USED(priorities); 296 297 for (link = 0; link < nb_links; link++) { 298 val = queues[link]; 299 val |= (1ULL << 24); /* Set membership */ 300 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 301 } 302 return (int)nb_links; 303 } 304 305 static int 306 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 307 uint16_t nb_unlinks) 308 { 309 uint16_t unlink; 310 uint64_t val; 311 struct ssows *ws = port; 312 313 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 314 RTE_SET_USED(dev); 315 316 for (unlink = 0; unlink < nb_unlinks; unlink++) { 317 val = queues[unlink]; 318 val &= ~(1ULL << 24); /* Clear membership */ 319 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 320 } 321 return (int)nb_unlinks; 322 } 323 324 static int 325 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 326 { 327 RTE_SET_USED(dev); 328 329 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 330 } 331 332 static void 333 ssows_dump(struct ssows *ws, FILE *f) 334 { 335 uint8_t *base = ws->base; 336 uint64_t val; 337 338 fprintf(f, "\t---------------port%d---------------\n", ws->port); 339 val = ssovf_read64(base + SSOW_VHWS_TAG); 340 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 341 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 342 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 343 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 344 (int)(val >> 63) & 0x1); 345 346 val = ssovf_read64(base + SSOW_VHWS_WQP); 347 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 348 349 val = ssovf_read64(base + SSOW_VHWS_LINKS); 350 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 351 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 352 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 353 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 354 355 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 356 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 357 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 358 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 359 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 360 (int)(val >> 63) & 0x1); 361 362 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 363 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 364 } 365 366 static int 367 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 368 const struct rte_eth_dev *eth_dev, uint32_t *caps) 369 { 370 int ret; 371 RTE_SET_USED(dev); 372 373 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 374 if (ret) 375 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 376 else 377 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 378 379 return 0; 380 } 381 382 static int 383 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 384 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 385 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 386 { 387 int ret = 0; 388 const struct octeontx_nic *nic = eth_dev->data->dev_private; 389 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 390 pki_mod_qos_t pki_qos; 391 RTE_SET_USED(dev); 392 393 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 394 if (ret) 395 return -EINVAL; 396 397 if (rx_queue_id >= 0) 398 return -EINVAL; 399 400 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 401 return -ENOTSUP; 402 403 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 404 405 pki_qos.port_type = 0; 406 pki_qos.index = 0; 407 pki_qos.mmask.f_tag_type = 1; 408 pki_qos.mmask.f_port_add = 1; 409 pki_qos.mmask.f_grp_ok = 1; 410 pki_qos.mmask.f_grp_bad = 1; 411 pki_qos.mmask.f_grptag_ok = 1; 412 pki_qos.mmask.f_grptag_bad = 1; 413 414 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; 415 pki_qos.qos_entry.port_add = 0; 416 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 417 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 418 pki_qos.qos_entry.grptag_bad = 0; 419 pki_qos.qos_entry.grptag_ok = 0; 420 421 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 422 if (ret < 0) 423 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 424 nic->port_id, queue_conf->ev.queue_id); 425 426 edev->rx_offload_flags = nic->rx_offload_flags; 427 edev->tx_offload_flags = nic->tx_offload_flags; 428 return ret; 429 } 430 431 static int 432 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 433 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 434 { 435 int ret = 0; 436 const struct octeontx_nic *nic = eth_dev->data->dev_private; 437 pki_del_qos_t pki_qos; 438 RTE_SET_USED(dev); 439 440 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 441 if (ret) 442 return -EINVAL; 443 444 pki_qos.port_type = 0; 445 pki_qos.index = 0; 446 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 447 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 448 if (ret < 0) 449 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 450 nic->port_id, rx_queue_id); 451 return ret; 452 } 453 454 static int 455 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 456 const struct rte_eth_dev *eth_dev) 457 { 458 RTE_SET_USED(dev); 459 RTE_SET_USED(eth_dev); 460 461 return 0; 462 } 463 464 465 static int 466 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 467 const struct rte_eth_dev *eth_dev) 468 { 469 RTE_SET_USED(dev); 470 RTE_SET_USED(eth_dev); 471 472 return 0; 473 } 474 475 static int 476 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev, 477 const struct rte_eth_dev *eth_dev, uint32_t *caps) 478 { 479 int ret; 480 RTE_SET_USED(dev); 481 482 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 483 if (ret) 484 *caps = 0; 485 else 486 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 487 488 return 0; 489 } 490 491 static int 492 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev) 493 { 494 RTE_SET_USED(id); 495 RTE_SET_USED(dev); 496 return 0; 497 } 498 499 static int 500 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev) 501 { 502 RTE_SET_USED(id); 503 RTE_SET_USED(dev); 504 return 0; 505 } 506 507 static int 508 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev, 509 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 510 { 511 RTE_SET_USED(id); 512 RTE_SET_USED(dev); 513 RTE_SET_USED(eth_dev); 514 RTE_SET_USED(tx_queue_id); 515 return 0; 516 } 517 518 static int 519 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev, 520 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 521 { 522 RTE_SET_USED(id); 523 RTE_SET_USED(dev); 524 RTE_SET_USED(eth_dev); 525 RTE_SET_USED(tx_queue_id); 526 return 0; 527 } 528 529 static int 530 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev) 531 { 532 RTE_SET_USED(id); 533 RTE_SET_USED(dev); 534 return 0; 535 } 536 537 static int 538 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev) 539 { 540 RTE_SET_USED(id); 541 RTE_SET_USED(dev); 542 return 0; 543 } 544 545 546 static void 547 ssovf_dump(struct rte_eventdev *dev, FILE *f) 548 { 549 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 550 uint8_t port; 551 552 /* Dump SSOWVF debug registers */ 553 for (port = 0; port < edev->nb_event_ports; port++) 554 ssows_dump(dev->data->ports[port], f); 555 } 556 557 static int 558 ssovf_start(struct rte_eventdev *dev) 559 { 560 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 561 struct ssows *ws; 562 uint8_t *base; 563 uint8_t i; 564 565 ssovf_func_trace(); 566 for (i = 0; i < edev->nb_event_ports; i++) { 567 ws = dev->data->ports[i]; 568 ssows_reset(ws); 569 ws->swtag_req = 0; 570 } 571 572 for (i = 0; i < edev->nb_event_queues; i++) { 573 /* Consume all the events through HWS0 */ 574 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 575 576 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 577 base += SSO_VHGRP_QCTL; 578 ssovf_write64(1, base); /* Enable SSO group */ 579 } 580 581 ssovf_fastpath_fns_set(dev); 582 return 0; 583 } 584 585 static void 586 ssows_handle_event(void *arg, struct rte_event event) 587 { 588 struct rte_eventdev *dev = arg; 589 590 if (dev->dev_ops->dev_stop_flush != NULL) 591 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 592 dev->data->dev_stop_flush_arg); 593 } 594 595 static void 596 ssovf_stop(struct rte_eventdev *dev) 597 { 598 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 599 struct ssows *ws; 600 uint8_t *base; 601 uint8_t i; 602 603 ssovf_func_trace(); 604 for (i = 0; i < edev->nb_event_ports; i++) { 605 ws = dev->data->ports[i]; 606 ssows_reset(ws); 607 ws->swtag_req = 0; 608 } 609 610 for (i = 0; i < edev->nb_event_queues; i++) { 611 /* Consume all the events through HWS0 */ 612 ssows_flush_events(dev->data->ports[0], i, 613 ssows_handle_event, dev); 614 615 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 616 base += SSO_VHGRP_QCTL; 617 ssovf_write64(0, base); /* Disable SSO group */ 618 } 619 } 620 621 static int 622 ssovf_close(struct rte_eventdev *dev) 623 { 624 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 625 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 626 uint8_t i; 627 628 for (i = 0; i < edev->nb_event_queues; i++) 629 all_queues[i] = i; 630 631 for (i = 0; i < edev->nb_event_ports; i++) 632 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 633 edev->nb_event_queues); 634 return 0; 635 } 636 637 static int 638 ssovf_selftest(const char *key __rte_unused, const char *value, 639 void *opaque) 640 { 641 int *flag = opaque; 642 *flag = !!atoi(value); 643 return 0; 644 } 645 646 static int 647 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 648 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) 649 { 650 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 651 timvf_enable_stats); 652 } 653 654 /* Initialize and register event driver with DPDK Application */ 655 static struct rte_eventdev_ops ssovf_ops = { 656 .dev_infos_get = ssovf_info_get, 657 .dev_configure = ssovf_configure, 658 .queue_def_conf = ssovf_queue_def_conf, 659 .queue_setup = ssovf_queue_setup, 660 .queue_release = ssovf_queue_release, 661 .port_def_conf = ssovf_port_def_conf, 662 .port_setup = ssovf_port_setup, 663 .port_release = ssovf_port_release, 664 .port_link = ssovf_port_link, 665 .port_unlink = ssovf_port_unlink, 666 .timeout_ticks = ssovf_timeout_ticks, 667 668 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 669 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 670 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 671 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 672 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 673 674 .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get, 675 .eth_tx_adapter_create = ssovf_eth_tx_adapter_create, 676 .eth_tx_adapter_free = ssovf_eth_tx_adapter_free, 677 .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add, 678 .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del, 679 .eth_tx_adapter_start = ssovf_eth_tx_adapter_start, 680 .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop, 681 682 .timer_adapter_caps_get = ssovf_timvf_caps_get, 683 684 .dev_selftest = test_eventdev_octeontx, 685 686 .dump = ssovf_dump, 687 .dev_start = ssovf_start, 688 .dev_stop = ssovf_stop, 689 .dev_close = ssovf_close 690 }; 691 692 static int 693 ssovf_vdev_probe(struct rte_vdev_device *vdev) 694 { 695 struct ssovf_info oinfo; 696 struct ssovf_mbox_dev_info info; 697 struct ssovf_evdev *edev; 698 struct rte_eventdev *eventdev; 699 static int ssovf_init_once; 700 const char *name; 701 const char *params; 702 int ret; 703 int selftest = 0; 704 705 static const char *const args[] = { 706 SSOVF_SELFTEST_ARG, 707 TIMVF_ENABLE_STATS_ARG, 708 NULL 709 }; 710 711 name = rte_vdev_device_name(vdev); 712 /* More than one instance is not supported */ 713 if (ssovf_init_once) { 714 ssovf_log_err("Request to create >1 %s instance", name); 715 return -EINVAL; 716 } 717 718 params = rte_vdev_device_args(vdev); 719 if (params != NULL && params[0] != '\0') { 720 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 721 722 if (!kvlist) { 723 ssovf_log_info( 724 "Ignoring unsupported params supplied '%s'", 725 name); 726 } else { 727 int ret = rte_kvargs_process(kvlist, 728 SSOVF_SELFTEST_ARG, 729 ssovf_selftest, &selftest); 730 if (ret != 0) { 731 ssovf_log_err("%s: Error in selftest", name); 732 rte_kvargs_free(kvlist); 733 return ret; 734 } 735 736 ret = rte_kvargs_process(kvlist, 737 TIMVF_ENABLE_STATS_ARG, 738 ssovf_selftest, &timvf_enable_stats); 739 if (ret != 0) { 740 ssovf_log_err("%s: Error in timvf stats", name); 741 rte_kvargs_free(kvlist); 742 return ret; 743 } 744 } 745 746 rte_kvargs_free(kvlist); 747 } 748 749 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 750 rte_socket_id()); 751 if (eventdev == NULL) { 752 ssovf_log_err("Failed to create eventdev vdev %s", name); 753 return -ENOMEM; 754 } 755 eventdev->dev_ops = &ssovf_ops; 756 757 /* For secondary processes, the primary has done all the work */ 758 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 759 ssovf_fastpath_fns_set(eventdev); 760 return 0; 761 } 762 763 octeontx_mbox_init(); 764 ret = ssovf_info(&oinfo); 765 if (ret) { 766 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 767 goto error; 768 } 769 770 edev = ssovf_pmd_priv(eventdev); 771 edev->max_event_ports = oinfo.total_ssowvfs; 772 edev->max_event_queues = oinfo.total_ssovfs; 773 edev->is_timeout_deq = 0; 774 775 ret = ssovf_mbox_dev_info(&info); 776 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 777 ssovf_log_err("Failed to get mbox devinfo %d", ret); 778 goto error; 779 } 780 781 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 782 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 783 edev->max_num_events = info.max_num_events; 784 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 785 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 786 info.max_num_events); 787 788 if (!edev->max_event_ports || !edev->max_event_queues) { 789 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 790 edev->max_event_queues, edev->max_event_ports); 791 ret = -ENODEV; 792 goto error; 793 } 794 795 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 796 name, oinfo.domain, edev->max_event_queues, 797 edev->max_event_ports); 798 799 ssovf_init_once = 1; 800 if (selftest) 801 test_eventdev_octeontx(); 802 return 0; 803 804 error: 805 rte_event_pmd_vdev_uninit(name); 806 return ret; 807 } 808 809 static int 810 ssovf_vdev_remove(struct rte_vdev_device *vdev) 811 { 812 const char *name; 813 814 name = rte_vdev_device_name(vdev); 815 ssovf_log_info("Closing %s", name); 816 return rte_event_pmd_vdev_uninit(name); 817 } 818 819 static struct rte_vdev_driver vdev_ssovf_pmd = { 820 .probe = ssovf_vdev_probe, 821 .remove = ssovf_vdev_remove 822 }; 823 824 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 825