1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <rte_dev.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_event_eth_rx_adapter.h> 13 #include <rte_kvargs.h> 14 #include <rte_lcore.h> 15 #include <rte_log.h> 16 #include <rte_malloc.h> 17 #include <rte_memory.h> 18 #include <rte_bus_vdev.h> 19 20 #include "ssovf_evdev.h" 21 #include "timvf_evdev.h" 22 23 int otx_logtype_ssovf; 24 static uint8_t timvf_enable_stats; 25 26 RTE_INIT(otx_ssovf_init_log); 27 static void 28 otx_ssovf_init_log(void) 29 { 30 otx_logtype_ssovf = rte_log_register("pmd.event.octeontx"); 31 if (otx_logtype_ssovf >= 0) 32 rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE); 33 } 34 35 /* SSOPF Mailbox messages */ 36 37 struct ssovf_mbox_dev_info { 38 uint64_t min_deq_timeout_ns; 39 uint64_t max_deq_timeout_ns; 40 uint32_t max_num_events; 41 }; 42 43 static int 44 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 45 { 46 struct octeontx_mbox_hdr hdr = {0}; 47 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 48 49 hdr.coproc = SSO_COPROC; 50 hdr.msg = SSO_GET_DEV_INFO; 51 hdr.vfid = 0; 52 53 memset(info, 0, len); 54 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 55 } 56 57 struct ssovf_mbox_getwork_wait { 58 uint64_t wait_ns; 59 }; 60 61 static int 62 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 63 { 64 struct octeontx_mbox_hdr hdr = {0}; 65 struct ssovf_mbox_getwork_wait tmo_set; 66 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 67 int ret; 68 69 hdr.coproc = SSO_COPROC; 70 hdr.msg = SSO_SET_GETWORK_WAIT; 71 hdr.vfid = 0; 72 73 tmo_set.wait_ns = timeout_ns; 74 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 75 if (ret) 76 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 77 78 return ret; 79 } 80 81 struct ssovf_mbox_grp_pri { 82 uint8_t wgt_left; /* Read only */ 83 uint8_t weight; 84 uint8_t affinity; 85 uint8_t priority; 86 }; 87 88 static int 89 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 90 { 91 struct octeontx_mbox_hdr hdr = {0}; 92 struct ssovf_mbox_grp_pri grp; 93 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 94 int ret; 95 96 hdr.coproc = SSO_COPROC; 97 hdr.msg = SSO_GRP_SET_PRIORITY; 98 hdr.vfid = queue; 99 100 grp.weight = 0xff; 101 grp.affinity = 0xff; 102 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 103 104 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 105 if (ret) 106 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 107 108 return ret; 109 } 110 111 struct ssovf_mbox_convert_ns_getworks_iter { 112 uint64_t wait_ns; 113 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 114 }; 115 116 static int 117 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 118 { 119 struct octeontx_mbox_hdr hdr = {0}; 120 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 121 uint16_t len = sizeof(ns2iter); 122 int ret; 123 124 hdr.coproc = SSO_COPROC; 125 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 126 hdr.vfid = 0; 127 128 memset(&ns2iter, 0, len); 129 ns2iter.wait_ns = ns; 130 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 131 if (ret < 0 || (ret != len)) { 132 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 133 return -EIO; 134 } 135 136 *tmo_ticks = ns2iter.getwork_iter; 137 return 0; 138 } 139 140 static void 141 ssovf_fastpath_fns_set(struct rte_eventdev *dev) 142 { 143 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 144 145 dev->enqueue = ssows_enq; 146 dev->enqueue_burst = ssows_enq_burst; 147 dev->enqueue_new_burst = ssows_enq_new_burst; 148 dev->enqueue_forward_burst = ssows_enq_fwd_burst; 149 dev->dequeue = ssows_deq; 150 dev->dequeue_burst = ssows_deq_burst; 151 152 if (edev->is_timeout_deq) { 153 dev->dequeue = ssows_deq_timeout; 154 dev->dequeue_burst = ssows_deq_timeout_burst; 155 } 156 } 157 158 static void 159 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 160 { 161 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 162 163 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 164 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 165 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 166 dev_info->max_event_queues = edev->max_event_queues; 167 dev_info->max_event_queue_flows = (1ULL << 20); 168 dev_info->max_event_queue_priority_levels = 8; 169 dev_info->max_event_priority_levels = 1; 170 dev_info->max_event_ports = edev->max_event_ports; 171 dev_info->max_event_port_dequeue_depth = 1; 172 dev_info->max_event_port_enqueue_depth = 1; 173 dev_info->max_num_events = edev->max_num_events; 174 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 175 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 176 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 177 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 178 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 179 RTE_EVENT_DEV_CAP_NONSEQ_MODE; 180 181 } 182 183 static int 184 ssovf_configure(const struct rte_eventdev *dev) 185 { 186 struct rte_event_dev_config *conf = &dev->data->dev_conf; 187 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 188 uint64_t deq_tmo_ns; 189 190 ssovf_func_trace(); 191 deq_tmo_ns = conf->dequeue_timeout_ns; 192 if (deq_tmo_ns == 0) 193 deq_tmo_ns = edev->min_deq_timeout_ns; 194 195 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 196 edev->is_timeout_deq = 1; 197 deq_tmo_ns = edev->min_deq_timeout_ns; 198 } 199 edev->nb_event_queues = conf->nb_event_queues; 200 edev->nb_event_ports = conf->nb_event_ports; 201 202 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 203 } 204 205 static void 206 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 207 struct rte_event_queue_conf *queue_conf) 208 { 209 RTE_SET_USED(dev); 210 RTE_SET_USED(queue_id); 211 212 queue_conf->nb_atomic_flows = (1ULL << 20); 213 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 214 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 215 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 216 } 217 218 static void 219 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 220 { 221 RTE_SET_USED(dev); 222 RTE_SET_USED(queue_id); 223 } 224 225 static int 226 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 227 const struct rte_event_queue_conf *queue_conf) 228 { 229 RTE_SET_USED(dev); 230 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 231 232 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 233 } 234 235 static void 236 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 237 struct rte_event_port_conf *port_conf) 238 { 239 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 240 241 RTE_SET_USED(port_id); 242 port_conf->new_event_threshold = edev->max_num_events; 243 port_conf->dequeue_depth = 1; 244 port_conf->enqueue_depth = 1; 245 port_conf->disable_implicit_release = 0; 246 } 247 248 static void 249 ssovf_port_release(void *port) 250 { 251 rte_free(port); 252 } 253 254 static int 255 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 256 const struct rte_event_port_conf *port_conf) 257 { 258 struct ssows *ws; 259 uint32_t reg_off; 260 uint8_t q; 261 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 262 263 ssovf_func_trace("port=%d", port_id); 264 RTE_SET_USED(port_conf); 265 266 /* Free memory prior to re-allocation if needed */ 267 if (dev->data->ports[port_id] != NULL) { 268 ssovf_port_release(dev->data->ports[port_id]); 269 dev->data->ports[port_id] = NULL; 270 } 271 272 /* Allocate event port memory */ 273 ws = rte_zmalloc_socket("eventdev ssows", 274 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 275 dev->data->socket_id); 276 if (ws == NULL) { 277 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 278 return -ENOMEM; 279 } 280 281 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 282 if (ws->base == NULL) { 283 rte_free(ws); 284 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 285 return -EINVAL; 286 } 287 288 reg_off = SSOW_VHWS_OP_GET_WORK0; 289 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 290 reg_off |= 1 << 16; /* Wait */ 291 ws->getwork = ws->base + reg_off; 292 ws->port = port_id; 293 294 for (q = 0; q < edev->nb_event_queues; q++) { 295 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 296 if (ws->grps[q] == NULL) { 297 rte_free(ws); 298 ssovf_log_err("Failed to get grp%d base addr", q); 299 return -EINVAL; 300 } 301 } 302 303 dev->data->ports[port_id] = ws; 304 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 305 return 0; 306 } 307 308 static int 309 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 310 const uint8_t priorities[], uint16_t nb_links) 311 { 312 uint16_t link; 313 uint64_t val; 314 struct ssows *ws = port; 315 316 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 317 RTE_SET_USED(dev); 318 RTE_SET_USED(priorities); 319 320 for (link = 0; link < nb_links; link++) { 321 val = queues[link]; 322 val |= (1ULL << 24); /* Set membership */ 323 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 324 } 325 return (int)nb_links; 326 } 327 328 static int 329 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 330 uint16_t nb_unlinks) 331 { 332 uint16_t unlink; 333 uint64_t val; 334 struct ssows *ws = port; 335 336 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 337 RTE_SET_USED(dev); 338 339 for (unlink = 0; unlink < nb_unlinks; unlink++) { 340 val = queues[unlink]; 341 val &= ~(1ULL << 24); /* Clear membership */ 342 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 343 } 344 return (int)nb_unlinks; 345 } 346 347 static int 348 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 349 { 350 RTE_SET_USED(dev); 351 352 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 353 } 354 355 static void 356 ssows_dump(struct ssows *ws, FILE *f) 357 { 358 uint8_t *base = ws->base; 359 uint64_t val; 360 361 fprintf(f, "\t---------------port%d---------------\n", ws->port); 362 val = ssovf_read64(base + SSOW_VHWS_TAG); 363 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 364 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 365 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 366 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 367 (int)(val >> 63) & 0x1); 368 369 val = ssovf_read64(base + SSOW_VHWS_WQP); 370 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 371 372 val = ssovf_read64(base + SSOW_VHWS_LINKS); 373 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 374 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 375 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 376 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 377 378 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 379 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 380 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 381 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 382 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 383 (int)(val >> 63) & 0x1); 384 385 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 386 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 387 } 388 389 static int 390 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 391 const struct rte_eth_dev *eth_dev, uint32_t *caps) 392 { 393 int ret; 394 RTE_SET_USED(dev); 395 396 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 397 if (ret) 398 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 399 else 400 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 401 402 return 0; 403 } 404 405 static int 406 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 407 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 408 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 409 { 410 int ret = 0; 411 const struct octeontx_nic *nic = eth_dev->data->dev_private; 412 pki_mod_qos_t pki_qos; 413 RTE_SET_USED(dev); 414 415 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 416 if (ret) 417 return -EINVAL; 418 419 if (rx_queue_id >= 0) 420 return -EINVAL; 421 422 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 423 return -ENOTSUP; 424 425 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 426 427 pki_qos.port_type = 0; 428 pki_qos.index = 0; 429 pki_qos.mmask.f_tag_type = 1; 430 pki_qos.mmask.f_port_add = 1; 431 pki_qos.mmask.f_grp_ok = 1; 432 pki_qos.mmask.f_grp_bad = 1; 433 pki_qos.mmask.f_grptag_ok = 1; 434 pki_qos.mmask.f_grptag_bad = 1; 435 436 pki_qos.tag_type = queue_conf->ev.sched_type; 437 pki_qos.qos_entry.port_add = 0; 438 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 439 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 440 pki_qos.qos_entry.grptag_bad = 0; 441 pki_qos.qos_entry.grptag_ok = 0; 442 443 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 444 if (ret < 0) 445 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 446 nic->port_id, queue_conf->ev.queue_id); 447 448 return ret; 449 } 450 451 static int 452 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 453 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 454 { 455 int ret = 0; 456 const struct octeontx_nic *nic = eth_dev->data->dev_private; 457 pki_del_qos_t pki_qos; 458 RTE_SET_USED(dev); 459 RTE_SET_USED(rx_queue_id); 460 461 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 462 if (ret) 463 return -EINVAL; 464 465 pki_qos.port_type = 0; 466 pki_qos.index = 0; 467 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 468 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 469 if (ret < 0) 470 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 471 nic->port_id, queue_conf->ev.queue_id); 472 return ret; 473 } 474 475 static int 476 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 477 const struct rte_eth_dev *eth_dev) 478 { 479 int ret; 480 const struct octeontx_nic *nic = eth_dev->data->dev_private; 481 RTE_SET_USED(dev); 482 483 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 484 if (ret) 485 return 0; 486 octeontx_pki_port_start(nic->port_id); 487 return 0; 488 } 489 490 491 static int 492 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 493 const struct rte_eth_dev *eth_dev) 494 { 495 int ret; 496 const struct octeontx_nic *nic = eth_dev->data->dev_private; 497 RTE_SET_USED(dev); 498 499 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 500 if (ret) 501 return 0; 502 octeontx_pki_port_stop(nic->port_id); 503 return 0; 504 } 505 506 static void 507 ssovf_dump(struct rte_eventdev *dev, FILE *f) 508 { 509 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 510 uint8_t port; 511 512 /* Dump SSOWVF debug registers */ 513 for (port = 0; port < edev->nb_event_ports; port++) 514 ssows_dump(dev->data->ports[port], f); 515 } 516 517 static int 518 ssovf_start(struct rte_eventdev *dev) 519 { 520 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 521 struct ssows *ws; 522 uint8_t *base; 523 uint8_t i; 524 525 ssovf_func_trace(); 526 for (i = 0; i < edev->nb_event_ports; i++) { 527 ws = dev->data->ports[i]; 528 ssows_reset(ws); 529 ws->swtag_req = 0; 530 } 531 532 for (i = 0; i < edev->nb_event_queues; i++) { 533 /* Consume all the events through HWS0 */ 534 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 535 536 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 537 base += SSO_VHGRP_QCTL; 538 ssovf_write64(1, base); /* Enable SSO group */ 539 } 540 541 ssovf_fastpath_fns_set(dev); 542 return 0; 543 } 544 545 static void 546 ssows_handle_event(void *arg, struct rte_event event) 547 { 548 struct rte_eventdev *dev = arg; 549 550 if (dev->dev_ops->dev_stop_flush != NULL) 551 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 552 dev->data->dev_stop_flush_arg); 553 } 554 555 static void 556 ssovf_stop(struct rte_eventdev *dev) 557 { 558 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 559 struct ssows *ws; 560 uint8_t *base; 561 uint8_t i; 562 563 ssovf_func_trace(); 564 for (i = 0; i < edev->nb_event_ports; i++) { 565 ws = dev->data->ports[i]; 566 ssows_reset(ws); 567 ws->swtag_req = 0; 568 } 569 570 for (i = 0; i < edev->nb_event_queues; i++) { 571 /* Consume all the events through HWS0 */ 572 ssows_flush_events(dev->data->ports[0], i, 573 ssows_handle_event, dev); 574 575 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 576 base += SSO_VHGRP_QCTL; 577 ssovf_write64(0, base); /* Disable SSO group */ 578 } 579 } 580 581 static int 582 ssovf_close(struct rte_eventdev *dev) 583 { 584 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 585 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 586 uint8_t i; 587 588 for (i = 0; i < edev->nb_event_queues; i++) 589 all_queues[i] = i; 590 591 for (i = 0; i < edev->nb_event_ports; i++) 592 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 593 edev->nb_event_queues); 594 return 0; 595 } 596 597 static int 598 ssovf_selftest(const char *key __rte_unused, const char *value, 599 void *opaque) 600 { 601 int *flag = opaque; 602 *flag = !!atoi(value); 603 return 0; 604 } 605 606 static int 607 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 608 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) 609 { 610 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 611 timvf_enable_stats); 612 } 613 614 /* Initialize and register event driver with DPDK Application */ 615 static struct rte_eventdev_ops ssovf_ops = { 616 .dev_infos_get = ssovf_info_get, 617 .dev_configure = ssovf_configure, 618 .queue_def_conf = ssovf_queue_def_conf, 619 .queue_setup = ssovf_queue_setup, 620 .queue_release = ssovf_queue_release, 621 .port_def_conf = ssovf_port_def_conf, 622 .port_setup = ssovf_port_setup, 623 .port_release = ssovf_port_release, 624 .port_link = ssovf_port_link, 625 .port_unlink = ssovf_port_unlink, 626 .timeout_ticks = ssovf_timeout_ticks, 627 628 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 629 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 630 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 631 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 632 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 633 634 .timer_adapter_caps_get = ssovf_timvf_caps_get, 635 636 .dev_selftest = test_eventdev_octeontx, 637 638 .dump = ssovf_dump, 639 .dev_start = ssovf_start, 640 .dev_stop = ssovf_stop, 641 .dev_close = ssovf_close 642 }; 643 644 static int 645 ssovf_vdev_probe(struct rte_vdev_device *vdev) 646 { 647 struct ssovf_info oinfo; 648 struct ssovf_mbox_dev_info info; 649 struct ssovf_evdev *edev; 650 struct rte_eventdev *eventdev; 651 static int ssovf_init_once; 652 const char *name; 653 const char *params; 654 int ret; 655 int selftest = 0; 656 657 static const char *const args[] = { 658 SSOVF_SELFTEST_ARG, 659 TIMVF_ENABLE_STATS_ARG, 660 NULL 661 }; 662 663 name = rte_vdev_device_name(vdev); 664 /* More than one instance is not supported */ 665 if (ssovf_init_once) { 666 ssovf_log_err("Request to create >1 %s instance", name); 667 return -EINVAL; 668 } 669 670 params = rte_vdev_device_args(vdev); 671 if (params != NULL && params[0] != '\0') { 672 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 673 674 if (!kvlist) { 675 ssovf_log_info( 676 "Ignoring unsupported params supplied '%s'", 677 name); 678 } else { 679 int ret = rte_kvargs_process(kvlist, 680 SSOVF_SELFTEST_ARG, 681 ssovf_selftest, &selftest); 682 if (ret != 0) { 683 ssovf_log_err("%s: Error in selftest", name); 684 rte_kvargs_free(kvlist); 685 return ret; 686 } 687 688 ret = rte_kvargs_process(kvlist, 689 TIMVF_ENABLE_STATS_ARG, 690 ssovf_selftest, &timvf_enable_stats); 691 if (ret != 0) { 692 ssovf_log_err("%s: Error in timvf stats", name); 693 rte_kvargs_free(kvlist); 694 return ret; 695 } 696 } 697 698 rte_kvargs_free(kvlist); 699 } 700 701 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 702 rte_socket_id()); 703 if (eventdev == NULL) { 704 ssovf_log_err("Failed to create eventdev vdev %s", name); 705 return -ENOMEM; 706 } 707 eventdev->dev_ops = &ssovf_ops; 708 709 /* For secondary processes, the primary has done all the work */ 710 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 711 ssovf_fastpath_fns_set(eventdev); 712 return 0; 713 } 714 715 ret = ssovf_info(&oinfo); 716 if (ret) { 717 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 718 goto error; 719 } 720 721 edev = ssovf_pmd_priv(eventdev); 722 edev->max_event_ports = oinfo.total_ssowvfs; 723 edev->max_event_queues = oinfo.total_ssovfs; 724 edev->is_timeout_deq = 0; 725 726 ret = ssovf_mbox_dev_info(&info); 727 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 728 ssovf_log_err("Failed to get mbox devinfo %d", ret); 729 goto error; 730 } 731 732 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 733 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 734 edev->max_num_events = info.max_num_events; 735 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 736 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 737 info.max_num_events); 738 739 if (!edev->max_event_ports || !edev->max_event_queues) { 740 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 741 edev->max_event_queues, edev->max_event_ports); 742 ret = -ENODEV; 743 goto error; 744 } 745 746 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 747 name, oinfo.domain, edev->max_event_queues, 748 edev->max_event_ports); 749 750 ssovf_init_once = 1; 751 if (selftest) 752 test_eventdev_octeontx(); 753 return 0; 754 755 error: 756 rte_event_pmd_vdev_uninit(name); 757 return ret; 758 } 759 760 static int 761 ssovf_vdev_remove(struct rte_vdev_device *vdev) 762 { 763 const char *name; 764 765 name = rte_vdev_device_name(vdev); 766 ssovf_log_info("Closing %s", name); 767 return rte_event_pmd_vdev_uninit(name); 768 } 769 770 static struct rte_vdev_driver vdev_ssovf_pmd = { 771 .probe = ssovf_vdev_probe, 772 .remove = ssovf_vdev_remove 773 }; 774 775 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 776