1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <rte_dev.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_event_eth_rx_adapter.h> 13 #include <rte_kvargs.h> 14 #include <rte_lcore.h> 15 #include <rte_log.h> 16 #include <rte_malloc.h> 17 #include <rte_memory.h> 18 #include <rte_bus_vdev.h> 19 20 #include "ssovf_evdev.h" 21 #include "timvf_evdev.h" 22 23 int otx_logtype_ssovf; 24 static uint8_t timvf_enable_stats; 25 26 RTE_INIT(otx_ssovf_init_log) 27 { 28 otx_logtype_ssovf = rte_log_register("pmd.event.octeontx"); 29 if (otx_logtype_ssovf >= 0) 30 rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE); 31 } 32 33 /* SSOPF Mailbox messages */ 34 35 struct ssovf_mbox_dev_info { 36 uint64_t min_deq_timeout_ns; 37 uint64_t max_deq_timeout_ns; 38 uint32_t max_num_events; 39 }; 40 41 static int 42 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 43 { 44 struct octeontx_mbox_hdr hdr = {0}; 45 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 46 47 hdr.coproc = SSO_COPROC; 48 hdr.msg = SSO_GET_DEV_INFO; 49 hdr.vfid = 0; 50 51 memset(info, 0, len); 52 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 53 } 54 55 struct ssovf_mbox_getwork_wait { 56 uint64_t wait_ns; 57 }; 58 59 static int 60 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 61 { 62 struct octeontx_mbox_hdr hdr = {0}; 63 struct ssovf_mbox_getwork_wait tmo_set; 64 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 65 int ret; 66 67 hdr.coproc = SSO_COPROC; 68 hdr.msg = SSO_SET_GETWORK_WAIT; 69 hdr.vfid = 0; 70 71 tmo_set.wait_ns = timeout_ns; 72 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 73 if (ret) 74 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 75 76 return ret; 77 } 78 79 struct ssovf_mbox_grp_pri { 80 uint8_t vhgrp_id; 81 uint8_t wgt_left; /* Read only */ 82 uint8_t weight; 83 uint8_t affinity; 84 uint8_t priority; 85 }; 86 87 static int 88 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 89 { 90 struct octeontx_mbox_hdr hdr = {0}; 91 struct ssovf_mbox_grp_pri grp; 92 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 93 int ret; 94 95 hdr.coproc = SSO_COPROC; 96 hdr.msg = SSO_GRP_SET_PRIORITY; 97 hdr.vfid = queue; 98 99 grp.vhgrp_id = queue; 100 grp.weight = 0xff; 101 grp.affinity = 0xff; 102 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 103 104 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 105 if (ret) 106 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 107 108 return ret; 109 } 110 111 struct ssovf_mbox_convert_ns_getworks_iter { 112 uint64_t wait_ns; 113 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 114 }; 115 116 static int 117 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 118 { 119 struct octeontx_mbox_hdr hdr = {0}; 120 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 121 uint16_t len = sizeof(ns2iter); 122 int ret; 123 124 hdr.coproc = SSO_COPROC; 125 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 126 hdr.vfid = 0; 127 128 memset(&ns2iter, 0, len); 129 ns2iter.wait_ns = ns; 130 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 131 if (ret < 0 || (ret != len)) { 132 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 133 return -EIO; 134 } 135 136 *tmo_ticks = ns2iter.getwork_iter; 137 return 0; 138 } 139 140 static void 141 ssovf_fastpath_fns_set(struct rte_eventdev *dev) 142 { 143 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 144 145 dev->enqueue = ssows_enq; 146 dev->enqueue_burst = ssows_enq_burst; 147 dev->enqueue_new_burst = ssows_enq_new_burst; 148 dev->enqueue_forward_burst = ssows_enq_fwd_burst; 149 dev->dequeue = ssows_deq; 150 dev->dequeue_burst = ssows_deq_burst; 151 dev->txa_enqueue = sso_event_tx_adapter_enqueue; 152 dev->txa_enqueue_same_dest = dev->txa_enqueue; 153 154 if (edev->is_timeout_deq) { 155 dev->dequeue = ssows_deq_timeout; 156 dev->dequeue_burst = ssows_deq_timeout_burst; 157 } 158 } 159 160 static void 161 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 162 { 163 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 164 165 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 166 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 167 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 168 dev_info->max_event_queues = edev->max_event_queues; 169 dev_info->max_event_queue_flows = (1ULL << 20); 170 dev_info->max_event_queue_priority_levels = 8; 171 dev_info->max_event_priority_levels = 1; 172 dev_info->max_event_ports = edev->max_event_ports; 173 dev_info->max_event_port_dequeue_depth = 1; 174 dev_info->max_event_port_enqueue_depth = 1; 175 dev_info->max_num_events = edev->max_num_events; 176 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 177 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 178 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 179 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 180 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 181 RTE_EVENT_DEV_CAP_NONSEQ_MODE; 182 183 } 184 185 static int 186 ssovf_configure(const struct rte_eventdev *dev) 187 { 188 struct rte_event_dev_config *conf = &dev->data->dev_conf; 189 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 190 uint64_t deq_tmo_ns; 191 192 ssovf_func_trace(); 193 deq_tmo_ns = conf->dequeue_timeout_ns; 194 if (deq_tmo_ns == 0) 195 deq_tmo_ns = edev->min_deq_timeout_ns; 196 197 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 198 edev->is_timeout_deq = 1; 199 deq_tmo_ns = edev->min_deq_timeout_ns; 200 } 201 edev->nb_event_queues = conf->nb_event_queues; 202 edev->nb_event_ports = conf->nb_event_ports; 203 204 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 205 } 206 207 static void 208 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 209 struct rte_event_queue_conf *queue_conf) 210 { 211 RTE_SET_USED(dev); 212 RTE_SET_USED(queue_id); 213 214 queue_conf->nb_atomic_flows = (1ULL << 20); 215 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 216 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 217 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 218 } 219 220 static void 221 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 222 { 223 RTE_SET_USED(dev); 224 RTE_SET_USED(queue_id); 225 } 226 227 static int 228 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 229 const struct rte_event_queue_conf *queue_conf) 230 { 231 RTE_SET_USED(dev); 232 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 233 234 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 235 } 236 237 static void 238 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 239 struct rte_event_port_conf *port_conf) 240 { 241 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 242 243 RTE_SET_USED(port_id); 244 port_conf->new_event_threshold = edev->max_num_events; 245 port_conf->dequeue_depth = 1; 246 port_conf->enqueue_depth = 1; 247 port_conf->disable_implicit_release = 0; 248 } 249 250 static void 251 ssovf_port_release(void *port) 252 { 253 rte_free(port); 254 } 255 256 static int 257 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 258 const struct rte_event_port_conf *port_conf) 259 { 260 struct ssows *ws; 261 uint32_t reg_off; 262 uint8_t q; 263 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 264 265 ssovf_func_trace("port=%d", port_id); 266 RTE_SET_USED(port_conf); 267 268 /* Free memory prior to re-allocation if needed */ 269 if (dev->data->ports[port_id] != NULL) { 270 ssovf_port_release(dev->data->ports[port_id]); 271 dev->data->ports[port_id] = NULL; 272 } 273 274 /* Allocate event port memory */ 275 ws = rte_zmalloc_socket("eventdev ssows", 276 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 277 dev->data->socket_id); 278 if (ws == NULL) { 279 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 280 return -ENOMEM; 281 } 282 283 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 284 if (ws->base == NULL) { 285 rte_free(ws); 286 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 287 return -EINVAL; 288 } 289 290 reg_off = SSOW_VHWS_OP_GET_WORK0; 291 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 292 reg_off |= 1 << 16; /* Wait */ 293 ws->getwork = ws->base + reg_off; 294 ws->port = port_id; 295 296 for (q = 0; q < edev->nb_event_queues; q++) { 297 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 298 if (ws->grps[q] == NULL) { 299 rte_free(ws); 300 ssovf_log_err("Failed to get grp%d base addr", q); 301 return -EINVAL; 302 } 303 } 304 305 dev->data->ports[port_id] = ws; 306 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 307 return 0; 308 } 309 310 static int 311 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 312 const uint8_t priorities[], uint16_t nb_links) 313 { 314 uint16_t link; 315 uint64_t val; 316 struct ssows *ws = port; 317 318 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 319 RTE_SET_USED(dev); 320 RTE_SET_USED(priorities); 321 322 for (link = 0; link < nb_links; link++) { 323 val = queues[link]; 324 val |= (1ULL << 24); /* Set membership */ 325 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 326 } 327 return (int)nb_links; 328 } 329 330 static int 331 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 332 uint16_t nb_unlinks) 333 { 334 uint16_t unlink; 335 uint64_t val; 336 struct ssows *ws = port; 337 338 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 339 RTE_SET_USED(dev); 340 341 for (unlink = 0; unlink < nb_unlinks; unlink++) { 342 val = queues[unlink]; 343 val &= ~(1ULL << 24); /* Clear membership */ 344 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 345 } 346 return (int)nb_unlinks; 347 } 348 349 static int 350 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 351 { 352 RTE_SET_USED(dev); 353 354 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 355 } 356 357 static void 358 ssows_dump(struct ssows *ws, FILE *f) 359 { 360 uint8_t *base = ws->base; 361 uint64_t val; 362 363 fprintf(f, "\t---------------port%d---------------\n", ws->port); 364 val = ssovf_read64(base + SSOW_VHWS_TAG); 365 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 366 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 367 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 368 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 369 (int)(val >> 63) & 0x1); 370 371 val = ssovf_read64(base + SSOW_VHWS_WQP); 372 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 373 374 val = ssovf_read64(base + SSOW_VHWS_LINKS); 375 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 376 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 377 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 378 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 379 380 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 381 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 382 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 383 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 384 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 385 (int)(val >> 63) & 0x1); 386 387 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 388 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 389 } 390 391 static int 392 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 393 const struct rte_eth_dev *eth_dev, uint32_t *caps) 394 { 395 int ret; 396 RTE_SET_USED(dev); 397 398 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 399 if (ret) 400 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 401 else 402 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 403 404 return 0; 405 } 406 407 static int 408 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 409 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 410 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 411 { 412 int ret = 0; 413 const struct octeontx_nic *nic = eth_dev->data->dev_private; 414 pki_mod_qos_t pki_qos; 415 RTE_SET_USED(dev); 416 417 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 418 if (ret) 419 return -EINVAL; 420 421 if (rx_queue_id >= 0) 422 return -EINVAL; 423 424 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 425 return -ENOTSUP; 426 427 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 428 429 pki_qos.port_type = 0; 430 pki_qos.index = 0; 431 pki_qos.mmask.f_tag_type = 1; 432 pki_qos.mmask.f_port_add = 1; 433 pki_qos.mmask.f_grp_ok = 1; 434 pki_qos.mmask.f_grp_bad = 1; 435 pki_qos.mmask.f_grptag_ok = 1; 436 pki_qos.mmask.f_grptag_bad = 1; 437 438 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; 439 pki_qos.qos_entry.port_add = 0; 440 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 441 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 442 pki_qos.qos_entry.grptag_bad = 0; 443 pki_qos.qos_entry.grptag_ok = 0; 444 445 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 446 if (ret < 0) 447 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 448 nic->port_id, queue_conf->ev.queue_id); 449 450 return ret; 451 } 452 453 static int 454 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 455 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 456 { 457 int ret = 0; 458 const struct octeontx_nic *nic = eth_dev->data->dev_private; 459 pki_del_qos_t pki_qos; 460 RTE_SET_USED(dev); 461 462 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 463 if (ret) 464 return -EINVAL; 465 466 pki_qos.port_type = 0; 467 pki_qos.index = 0; 468 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 469 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 470 if (ret < 0) 471 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 472 nic->port_id, rx_queue_id); 473 return ret; 474 } 475 476 static int 477 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 478 const struct rte_eth_dev *eth_dev) 479 { 480 RTE_SET_USED(dev); 481 RTE_SET_USED(eth_dev); 482 483 return 0; 484 } 485 486 487 static int 488 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 489 const struct rte_eth_dev *eth_dev) 490 { 491 RTE_SET_USED(dev); 492 RTE_SET_USED(eth_dev); 493 494 return 0; 495 } 496 497 static int 498 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev, 499 const struct rte_eth_dev *eth_dev, uint32_t *caps) 500 { 501 int ret; 502 RTE_SET_USED(dev); 503 504 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 505 if (ret) 506 *caps = 0; 507 else 508 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 509 510 return 0; 511 } 512 513 static int 514 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev) 515 { 516 RTE_SET_USED(id); 517 RTE_SET_USED(dev); 518 return 0; 519 } 520 521 static int 522 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev) 523 { 524 RTE_SET_USED(id); 525 RTE_SET_USED(dev); 526 return 0; 527 } 528 529 static int 530 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev, 531 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 532 { 533 RTE_SET_USED(id); 534 RTE_SET_USED(dev); 535 RTE_SET_USED(eth_dev); 536 RTE_SET_USED(tx_queue_id); 537 return 0; 538 } 539 540 static int 541 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev, 542 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 543 { 544 RTE_SET_USED(id); 545 RTE_SET_USED(dev); 546 RTE_SET_USED(eth_dev); 547 RTE_SET_USED(tx_queue_id); 548 return 0; 549 } 550 551 static int 552 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev) 553 { 554 RTE_SET_USED(id); 555 RTE_SET_USED(dev); 556 return 0; 557 } 558 559 static int 560 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev) 561 { 562 RTE_SET_USED(id); 563 RTE_SET_USED(dev); 564 return 0; 565 } 566 567 568 static void 569 ssovf_dump(struct rte_eventdev *dev, FILE *f) 570 { 571 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 572 uint8_t port; 573 574 /* Dump SSOWVF debug registers */ 575 for (port = 0; port < edev->nb_event_ports; port++) 576 ssows_dump(dev->data->ports[port], f); 577 } 578 579 static int 580 ssovf_start(struct rte_eventdev *dev) 581 { 582 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 583 struct ssows *ws; 584 uint8_t *base; 585 uint8_t i; 586 587 ssovf_func_trace(); 588 for (i = 0; i < edev->nb_event_ports; i++) { 589 ws = dev->data->ports[i]; 590 ssows_reset(ws); 591 ws->swtag_req = 0; 592 } 593 594 for (i = 0; i < edev->nb_event_queues; i++) { 595 /* Consume all the events through HWS0 */ 596 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 597 598 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 599 base += SSO_VHGRP_QCTL; 600 ssovf_write64(1, base); /* Enable SSO group */ 601 } 602 603 ssovf_fastpath_fns_set(dev); 604 return 0; 605 } 606 607 static void 608 ssows_handle_event(void *arg, struct rte_event event) 609 { 610 struct rte_eventdev *dev = arg; 611 612 if (dev->dev_ops->dev_stop_flush != NULL) 613 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 614 dev->data->dev_stop_flush_arg); 615 } 616 617 static void 618 ssovf_stop(struct rte_eventdev *dev) 619 { 620 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 621 struct ssows *ws; 622 uint8_t *base; 623 uint8_t i; 624 625 ssovf_func_trace(); 626 for (i = 0; i < edev->nb_event_ports; i++) { 627 ws = dev->data->ports[i]; 628 ssows_reset(ws); 629 ws->swtag_req = 0; 630 } 631 632 for (i = 0; i < edev->nb_event_queues; i++) { 633 /* Consume all the events through HWS0 */ 634 ssows_flush_events(dev->data->ports[0], i, 635 ssows_handle_event, dev); 636 637 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 638 base += SSO_VHGRP_QCTL; 639 ssovf_write64(0, base); /* Disable SSO group */ 640 } 641 } 642 643 static int 644 ssovf_close(struct rte_eventdev *dev) 645 { 646 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 647 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 648 uint8_t i; 649 650 for (i = 0; i < edev->nb_event_queues; i++) 651 all_queues[i] = i; 652 653 for (i = 0; i < edev->nb_event_ports; i++) 654 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 655 edev->nb_event_queues); 656 return 0; 657 } 658 659 static int 660 ssovf_selftest(const char *key __rte_unused, const char *value, 661 void *opaque) 662 { 663 int *flag = opaque; 664 *flag = !!atoi(value); 665 return 0; 666 } 667 668 static int 669 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 670 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) 671 { 672 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 673 timvf_enable_stats); 674 } 675 676 /* Initialize and register event driver with DPDK Application */ 677 static struct rte_eventdev_ops ssovf_ops = { 678 .dev_infos_get = ssovf_info_get, 679 .dev_configure = ssovf_configure, 680 .queue_def_conf = ssovf_queue_def_conf, 681 .queue_setup = ssovf_queue_setup, 682 .queue_release = ssovf_queue_release, 683 .port_def_conf = ssovf_port_def_conf, 684 .port_setup = ssovf_port_setup, 685 .port_release = ssovf_port_release, 686 .port_link = ssovf_port_link, 687 .port_unlink = ssovf_port_unlink, 688 .timeout_ticks = ssovf_timeout_ticks, 689 690 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 691 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 692 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 693 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 694 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 695 696 .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get, 697 .eth_tx_adapter_create = ssovf_eth_tx_adapter_create, 698 .eth_tx_adapter_free = ssovf_eth_tx_adapter_free, 699 .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add, 700 .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del, 701 .eth_tx_adapter_start = ssovf_eth_tx_adapter_start, 702 .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop, 703 704 .timer_adapter_caps_get = ssovf_timvf_caps_get, 705 706 .dev_selftest = test_eventdev_octeontx, 707 708 .dump = ssovf_dump, 709 .dev_start = ssovf_start, 710 .dev_stop = ssovf_stop, 711 .dev_close = ssovf_close 712 }; 713 714 static int 715 ssovf_vdev_probe(struct rte_vdev_device *vdev) 716 { 717 struct ssovf_info oinfo; 718 struct ssovf_mbox_dev_info info; 719 struct ssovf_evdev *edev; 720 struct rte_eventdev *eventdev; 721 static int ssovf_init_once; 722 const char *name; 723 const char *params; 724 int ret; 725 int selftest = 0; 726 727 static const char *const args[] = { 728 SSOVF_SELFTEST_ARG, 729 TIMVF_ENABLE_STATS_ARG, 730 NULL 731 }; 732 733 name = rte_vdev_device_name(vdev); 734 /* More than one instance is not supported */ 735 if (ssovf_init_once) { 736 ssovf_log_err("Request to create >1 %s instance", name); 737 return -EINVAL; 738 } 739 740 params = rte_vdev_device_args(vdev); 741 if (params != NULL && params[0] != '\0') { 742 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 743 744 if (!kvlist) { 745 ssovf_log_info( 746 "Ignoring unsupported params supplied '%s'", 747 name); 748 } else { 749 int ret = rte_kvargs_process(kvlist, 750 SSOVF_SELFTEST_ARG, 751 ssovf_selftest, &selftest); 752 if (ret != 0) { 753 ssovf_log_err("%s: Error in selftest", name); 754 rte_kvargs_free(kvlist); 755 return ret; 756 } 757 758 ret = rte_kvargs_process(kvlist, 759 TIMVF_ENABLE_STATS_ARG, 760 ssovf_selftest, &timvf_enable_stats); 761 if (ret != 0) { 762 ssovf_log_err("%s: Error in timvf stats", name); 763 rte_kvargs_free(kvlist); 764 return ret; 765 } 766 } 767 768 rte_kvargs_free(kvlist); 769 } 770 771 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 772 rte_socket_id()); 773 if (eventdev == NULL) { 774 ssovf_log_err("Failed to create eventdev vdev %s", name); 775 return -ENOMEM; 776 } 777 eventdev->dev_ops = &ssovf_ops; 778 779 /* For secondary processes, the primary has done all the work */ 780 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 781 ssovf_fastpath_fns_set(eventdev); 782 return 0; 783 } 784 785 octeontx_mbox_init(); 786 ret = ssovf_info(&oinfo); 787 if (ret) { 788 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 789 goto error; 790 } 791 792 edev = ssovf_pmd_priv(eventdev); 793 edev->max_event_ports = oinfo.total_ssowvfs; 794 edev->max_event_queues = oinfo.total_ssovfs; 795 edev->is_timeout_deq = 0; 796 797 ret = ssovf_mbox_dev_info(&info); 798 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 799 ssovf_log_err("Failed to get mbox devinfo %d", ret); 800 goto error; 801 } 802 803 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 804 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 805 edev->max_num_events = info.max_num_events; 806 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 807 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 808 info.max_num_events); 809 810 if (!edev->max_event_ports || !edev->max_event_queues) { 811 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 812 edev->max_event_queues, edev->max_event_ports); 813 ret = -ENODEV; 814 goto error; 815 } 816 817 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 818 name, oinfo.domain, edev->max_event_queues, 819 edev->max_event_ports); 820 821 ssovf_init_once = 1; 822 if (selftest) 823 test_eventdev_octeontx(); 824 return 0; 825 826 error: 827 rte_event_pmd_vdev_uninit(name); 828 return ret; 829 } 830 831 static int 832 ssovf_vdev_remove(struct rte_vdev_device *vdev) 833 { 834 const char *name; 835 836 name = rte_vdev_device_name(vdev); 837 ssovf_log_info("Closing %s", name); 838 return rte_event_pmd_vdev_uninit(name); 839 } 840 841 static struct rte_vdev_driver vdev_ssovf_pmd = { 842 .probe = ssovf_vdev_probe, 843 .remove = ssovf_vdev_remove 844 }; 845 846 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 847