1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <rte_dev.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_event_eth_rx_adapter.h> 13 #include <rte_kvargs.h> 14 #include <rte_lcore.h> 15 #include <rte_log.h> 16 #include <rte_malloc.h> 17 #include <rte_memory.h> 18 #include <rte_bus_vdev.h> 19 20 #include "ssovf_evdev.h" 21 #include "timvf_evdev.h" 22 23 int otx_logtype_ssovf; 24 25 RTE_INIT(otx_ssovf_init_log); 26 static void 27 otx_ssovf_init_log(void) 28 { 29 otx_logtype_ssovf = rte_log_register("pmd.event.octeontx"); 30 if (otx_logtype_ssovf >= 0) 31 rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE); 32 } 33 34 /* SSOPF Mailbox messages */ 35 36 struct ssovf_mbox_dev_info { 37 uint64_t min_deq_timeout_ns; 38 uint64_t max_deq_timeout_ns; 39 uint32_t max_num_events; 40 }; 41 42 static int 43 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 44 { 45 struct octeontx_mbox_hdr hdr = {0}; 46 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 47 48 hdr.coproc = SSO_COPROC; 49 hdr.msg = SSO_GET_DEV_INFO; 50 hdr.vfid = 0; 51 52 memset(info, 0, len); 53 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 54 } 55 56 struct ssovf_mbox_getwork_wait { 57 uint64_t wait_ns; 58 }; 59 60 static int 61 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 62 { 63 struct octeontx_mbox_hdr hdr = {0}; 64 struct ssovf_mbox_getwork_wait tmo_set; 65 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 66 int ret; 67 68 hdr.coproc = SSO_COPROC; 69 hdr.msg = SSO_SET_GETWORK_WAIT; 70 hdr.vfid = 0; 71 72 tmo_set.wait_ns = timeout_ns; 73 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 74 if (ret) 75 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 76 77 return ret; 78 } 79 80 struct ssovf_mbox_grp_pri { 81 uint8_t wgt_left; /* Read only */ 82 uint8_t weight; 83 uint8_t affinity; 84 uint8_t priority; 85 }; 86 87 static int 88 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 89 { 90 struct octeontx_mbox_hdr hdr = {0}; 91 struct ssovf_mbox_grp_pri grp; 92 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 93 int ret; 94 95 hdr.coproc = SSO_COPROC; 96 hdr.msg = SSO_GRP_SET_PRIORITY; 97 hdr.vfid = queue; 98 99 grp.weight = 0xff; 100 grp.affinity = 0xff; 101 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 102 103 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 104 if (ret) 105 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 106 107 return ret; 108 } 109 110 struct ssovf_mbox_convert_ns_getworks_iter { 111 uint64_t wait_ns; 112 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 113 }; 114 115 static int 116 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 117 { 118 struct octeontx_mbox_hdr hdr = {0}; 119 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 120 uint16_t len = sizeof(ns2iter); 121 int ret; 122 123 hdr.coproc = SSO_COPROC; 124 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 125 hdr.vfid = 0; 126 127 memset(&ns2iter, 0, len); 128 ns2iter.wait_ns = ns; 129 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 130 if (ret < 0 || (ret != len)) { 131 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 132 return -EIO; 133 } 134 135 *tmo_ticks = ns2iter.getwork_iter; 136 return 0; 137 } 138 139 static void 140 ssovf_fastpath_fns_set(struct rte_eventdev *dev) 141 { 142 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 143 144 dev->enqueue = ssows_enq; 145 dev->enqueue_burst = ssows_enq_burst; 146 dev->enqueue_new_burst = ssows_enq_new_burst; 147 dev->enqueue_forward_burst = ssows_enq_fwd_burst; 148 dev->dequeue = ssows_deq; 149 dev->dequeue_burst = ssows_deq_burst; 150 151 if (edev->is_timeout_deq) { 152 dev->dequeue = ssows_deq_timeout; 153 dev->dequeue_burst = ssows_deq_timeout_burst; 154 } 155 } 156 157 static void 158 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 159 { 160 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 161 162 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 163 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 164 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 165 dev_info->max_event_queues = edev->max_event_queues; 166 dev_info->max_event_queue_flows = (1ULL << 20); 167 dev_info->max_event_queue_priority_levels = 8; 168 dev_info->max_event_priority_levels = 1; 169 dev_info->max_event_ports = edev->max_event_ports; 170 dev_info->max_event_port_dequeue_depth = 1; 171 dev_info->max_event_port_enqueue_depth = 1; 172 dev_info->max_num_events = edev->max_num_events; 173 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 174 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 175 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 176 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 177 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 178 RTE_EVENT_DEV_CAP_NONSEQ_MODE; 179 180 } 181 182 static int 183 ssovf_configure(const struct rte_eventdev *dev) 184 { 185 struct rte_event_dev_config *conf = &dev->data->dev_conf; 186 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 187 uint64_t deq_tmo_ns; 188 189 ssovf_func_trace(); 190 deq_tmo_ns = conf->dequeue_timeout_ns; 191 if (deq_tmo_ns == 0) 192 deq_tmo_ns = edev->min_deq_timeout_ns; 193 194 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 195 edev->is_timeout_deq = 1; 196 deq_tmo_ns = edev->min_deq_timeout_ns; 197 } 198 edev->nb_event_queues = conf->nb_event_queues; 199 edev->nb_event_ports = conf->nb_event_ports; 200 201 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 202 } 203 204 static void 205 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 206 struct rte_event_queue_conf *queue_conf) 207 { 208 RTE_SET_USED(dev); 209 RTE_SET_USED(queue_id); 210 211 queue_conf->nb_atomic_flows = (1ULL << 20); 212 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 213 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 214 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 215 } 216 217 static void 218 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 219 { 220 RTE_SET_USED(dev); 221 RTE_SET_USED(queue_id); 222 } 223 224 static int 225 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 226 const struct rte_event_queue_conf *queue_conf) 227 { 228 RTE_SET_USED(dev); 229 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 230 231 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 232 } 233 234 static void 235 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 236 struct rte_event_port_conf *port_conf) 237 { 238 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 239 240 RTE_SET_USED(port_id); 241 port_conf->new_event_threshold = edev->max_num_events; 242 port_conf->dequeue_depth = 1; 243 port_conf->enqueue_depth = 1; 244 port_conf->disable_implicit_release = 0; 245 } 246 247 static void 248 ssovf_port_release(void *port) 249 { 250 rte_free(port); 251 } 252 253 static int 254 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 255 const struct rte_event_port_conf *port_conf) 256 { 257 struct ssows *ws; 258 uint32_t reg_off; 259 uint8_t q; 260 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 261 262 ssovf_func_trace("port=%d", port_id); 263 RTE_SET_USED(port_conf); 264 265 /* Free memory prior to re-allocation if needed */ 266 if (dev->data->ports[port_id] != NULL) { 267 ssovf_port_release(dev->data->ports[port_id]); 268 dev->data->ports[port_id] = NULL; 269 } 270 271 /* Allocate event port memory */ 272 ws = rte_zmalloc_socket("eventdev ssows", 273 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 274 dev->data->socket_id); 275 if (ws == NULL) { 276 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 277 return -ENOMEM; 278 } 279 280 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 281 if (ws->base == NULL) { 282 rte_free(ws); 283 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 284 return -EINVAL; 285 } 286 287 reg_off = SSOW_VHWS_OP_GET_WORK0; 288 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 289 reg_off |= 1 << 16; /* Wait */ 290 ws->getwork = ws->base + reg_off; 291 ws->port = port_id; 292 293 for (q = 0; q < edev->nb_event_queues; q++) { 294 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 295 if (ws->grps[q] == NULL) { 296 rte_free(ws); 297 ssovf_log_err("Failed to get grp%d base addr", q); 298 return -EINVAL; 299 } 300 } 301 302 dev->data->ports[port_id] = ws; 303 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 304 return 0; 305 } 306 307 static int 308 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 309 const uint8_t priorities[], uint16_t nb_links) 310 { 311 uint16_t link; 312 uint64_t val; 313 struct ssows *ws = port; 314 315 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 316 RTE_SET_USED(dev); 317 RTE_SET_USED(priorities); 318 319 for (link = 0; link < nb_links; link++) { 320 val = queues[link]; 321 val |= (1ULL << 24); /* Set membership */ 322 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 323 } 324 return (int)nb_links; 325 } 326 327 static int 328 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 329 uint16_t nb_unlinks) 330 { 331 uint16_t unlink; 332 uint64_t val; 333 struct ssows *ws = port; 334 335 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 336 RTE_SET_USED(dev); 337 338 for (unlink = 0; unlink < nb_unlinks; unlink++) { 339 val = queues[unlink]; 340 val &= ~(1ULL << 24); /* Clear membership */ 341 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 342 } 343 return (int)nb_unlinks; 344 } 345 346 static int 347 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 348 { 349 RTE_SET_USED(dev); 350 351 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 352 } 353 354 static void 355 ssows_dump(struct ssows *ws, FILE *f) 356 { 357 uint8_t *base = ws->base; 358 uint64_t val; 359 360 fprintf(f, "\t---------------port%d---------------\n", ws->port); 361 val = ssovf_read64(base + SSOW_VHWS_TAG); 362 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 363 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 364 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 365 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 366 (int)(val >> 63) & 0x1); 367 368 val = ssovf_read64(base + SSOW_VHWS_WQP); 369 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 370 371 val = ssovf_read64(base + SSOW_VHWS_LINKS); 372 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 373 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 374 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 375 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 376 377 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 378 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 379 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 380 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 381 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 382 (int)(val >> 63) & 0x1); 383 384 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 385 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 386 } 387 388 static int 389 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 390 const struct rte_eth_dev *eth_dev, uint32_t *caps) 391 { 392 int ret; 393 RTE_SET_USED(dev); 394 395 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 396 if (ret) 397 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 398 else 399 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 400 401 return 0; 402 } 403 404 static int 405 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 406 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 407 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 408 { 409 int ret = 0; 410 const struct octeontx_nic *nic = eth_dev->data->dev_private; 411 pki_mod_qos_t pki_qos; 412 RTE_SET_USED(dev); 413 414 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 415 if (ret) 416 return -EINVAL; 417 418 if (rx_queue_id >= 0) 419 return -EINVAL; 420 421 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 422 return -ENOTSUP; 423 424 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 425 426 pki_qos.port_type = 0; 427 pki_qos.index = 0; 428 pki_qos.mmask.f_tag_type = 1; 429 pki_qos.mmask.f_port_add = 1; 430 pki_qos.mmask.f_grp_ok = 1; 431 pki_qos.mmask.f_grp_bad = 1; 432 pki_qos.mmask.f_grptag_ok = 1; 433 pki_qos.mmask.f_grptag_bad = 1; 434 435 pki_qos.tag_type = queue_conf->ev.sched_type; 436 pki_qos.qos_entry.port_add = 0; 437 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 438 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 439 pki_qos.qos_entry.grptag_bad = 0; 440 pki_qos.qos_entry.grptag_ok = 0; 441 442 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 443 if (ret < 0) 444 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 445 nic->port_id, queue_conf->ev.queue_id); 446 447 return ret; 448 } 449 450 static int 451 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 452 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 453 { 454 int ret = 0; 455 const struct octeontx_nic *nic = eth_dev->data->dev_private; 456 pki_del_qos_t pki_qos; 457 RTE_SET_USED(dev); 458 RTE_SET_USED(rx_queue_id); 459 460 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 461 if (ret) 462 return -EINVAL; 463 464 pki_qos.port_type = 0; 465 pki_qos.index = 0; 466 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 467 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 468 if (ret < 0) 469 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 470 nic->port_id, queue_conf->ev.queue_id); 471 return ret; 472 } 473 474 static int 475 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 476 const struct rte_eth_dev *eth_dev) 477 { 478 int ret; 479 const struct octeontx_nic *nic = eth_dev->data->dev_private; 480 RTE_SET_USED(dev); 481 482 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 483 if (ret) 484 return 0; 485 octeontx_pki_port_start(nic->port_id); 486 return 0; 487 } 488 489 490 static int 491 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 492 const struct rte_eth_dev *eth_dev) 493 { 494 int ret; 495 const struct octeontx_nic *nic = eth_dev->data->dev_private; 496 RTE_SET_USED(dev); 497 498 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 499 if (ret) 500 return 0; 501 octeontx_pki_port_stop(nic->port_id); 502 return 0; 503 } 504 505 static void 506 ssovf_dump(struct rte_eventdev *dev, FILE *f) 507 { 508 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 509 uint8_t port; 510 511 /* Dump SSOWVF debug registers */ 512 for (port = 0; port < edev->nb_event_ports; port++) 513 ssows_dump(dev->data->ports[port], f); 514 } 515 516 static int 517 ssovf_start(struct rte_eventdev *dev) 518 { 519 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 520 struct ssows *ws; 521 uint8_t *base; 522 uint8_t i; 523 524 ssovf_func_trace(); 525 for (i = 0; i < edev->nb_event_ports; i++) { 526 ws = dev->data->ports[i]; 527 ssows_reset(ws); 528 ws->swtag_req = 0; 529 } 530 531 for (i = 0; i < edev->nb_event_queues; i++) { 532 /* Consume all the events through HWS0 */ 533 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 534 535 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 536 base += SSO_VHGRP_QCTL; 537 ssovf_write64(1, base); /* Enable SSO group */ 538 } 539 540 ssovf_fastpath_fns_set(dev); 541 return 0; 542 } 543 544 static void 545 ssows_handle_event(void *arg, struct rte_event event) 546 { 547 struct rte_eventdev *dev = arg; 548 549 if (dev->dev_ops->dev_stop_flush != NULL) 550 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 551 dev->data->dev_stop_flush_arg); 552 } 553 554 static void 555 ssovf_stop(struct rte_eventdev *dev) 556 { 557 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 558 struct ssows *ws; 559 uint8_t *base; 560 uint8_t i; 561 562 ssovf_func_trace(); 563 for (i = 0; i < edev->nb_event_ports; i++) { 564 ws = dev->data->ports[i]; 565 ssows_reset(ws); 566 ws->swtag_req = 0; 567 } 568 569 for (i = 0; i < edev->nb_event_queues; i++) { 570 /* Consume all the events through HWS0 */ 571 ssows_flush_events(dev->data->ports[0], i, 572 ssows_handle_event, dev); 573 574 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 575 base += SSO_VHGRP_QCTL; 576 ssovf_write64(0, base); /* Disable SSO group */ 577 } 578 } 579 580 static int 581 ssovf_close(struct rte_eventdev *dev) 582 { 583 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 584 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 585 uint8_t i; 586 587 for (i = 0; i < edev->nb_event_queues; i++) 588 all_queues[i] = i; 589 590 for (i = 0; i < edev->nb_event_ports; i++) 591 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 592 edev->nb_event_queues); 593 return 0; 594 } 595 596 static int 597 ssovf_selftest(const char *key __rte_unused, const char *value, 598 void *opaque) 599 { 600 int *flag = opaque; 601 *flag = !!atoi(value); 602 return 0; 603 } 604 605 static int 606 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 607 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) 608 { 609 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 0); 610 } 611 612 /* Initialize and register event driver with DPDK Application */ 613 static struct rte_eventdev_ops ssovf_ops = { 614 .dev_infos_get = ssovf_info_get, 615 .dev_configure = ssovf_configure, 616 .queue_def_conf = ssovf_queue_def_conf, 617 .queue_setup = ssovf_queue_setup, 618 .queue_release = ssovf_queue_release, 619 .port_def_conf = ssovf_port_def_conf, 620 .port_setup = ssovf_port_setup, 621 .port_release = ssovf_port_release, 622 .port_link = ssovf_port_link, 623 .port_unlink = ssovf_port_unlink, 624 .timeout_ticks = ssovf_timeout_ticks, 625 626 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 627 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 628 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 629 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 630 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 631 632 .timer_adapter_caps_get = ssovf_timvf_caps_get, 633 634 .dev_selftest = test_eventdev_octeontx, 635 636 .dump = ssovf_dump, 637 .dev_start = ssovf_start, 638 .dev_stop = ssovf_stop, 639 .dev_close = ssovf_close 640 }; 641 642 static int 643 ssovf_vdev_probe(struct rte_vdev_device *vdev) 644 { 645 struct ssovf_info oinfo; 646 struct ssovf_mbox_dev_info info; 647 struct ssovf_evdev *edev; 648 struct rte_eventdev *eventdev; 649 static int ssovf_init_once; 650 const char *name; 651 const char *params; 652 int ret; 653 int selftest = 0; 654 655 static const char *const args[] = { 656 SSOVF_SELFTEST_ARG, 657 NULL 658 }; 659 660 name = rte_vdev_device_name(vdev); 661 /* More than one instance is not supported */ 662 if (ssovf_init_once) { 663 ssovf_log_err("Request to create >1 %s instance", name); 664 return -EINVAL; 665 } 666 667 params = rte_vdev_device_args(vdev); 668 if (params != NULL && params[0] != '\0') { 669 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 670 671 if (!kvlist) { 672 ssovf_log_info( 673 "Ignoring unsupported params supplied '%s'", 674 name); 675 } else { 676 int ret = rte_kvargs_process(kvlist, 677 SSOVF_SELFTEST_ARG, 678 ssovf_selftest, &selftest); 679 if (ret != 0) { 680 ssovf_log_err("%s: Error in selftest", name); 681 rte_kvargs_free(kvlist); 682 return ret; 683 } 684 } 685 686 rte_kvargs_free(kvlist); 687 } 688 689 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 690 rte_socket_id()); 691 if (eventdev == NULL) { 692 ssovf_log_err("Failed to create eventdev vdev %s", name); 693 return -ENOMEM; 694 } 695 eventdev->dev_ops = &ssovf_ops; 696 697 /* For secondary processes, the primary has done all the work */ 698 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 699 ssovf_fastpath_fns_set(eventdev); 700 return 0; 701 } 702 703 ret = ssovf_info(&oinfo); 704 if (ret) { 705 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 706 goto error; 707 } 708 709 edev = ssovf_pmd_priv(eventdev); 710 edev->max_event_ports = oinfo.total_ssowvfs; 711 edev->max_event_queues = oinfo.total_ssovfs; 712 edev->is_timeout_deq = 0; 713 714 ret = ssovf_mbox_dev_info(&info); 715 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 716 ssovf_log_err("Failed to get mbox devinfo %d", ret); 717 goto error; 718 } 719 720 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 721 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 722 edev->max_num_events = info.max_num_events; 723 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 724 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 725 info.max_num_events); 726 727 if (!edev->max_event_ports || !edev->max_event_queues) { 728 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 729 edev->max_event_queues, edev->max_event_ports); 730 ret = -ENODEV; 731 goto error; 732 } 733 734 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 735 name, oinfo.domain, edev->max_event_queues, 736 edev->max_event_ports); 737 738 ssovf_init_once = 1; 739 if (selftest) 740 test_eventdev_octeontx(); 741 return 0; 742 743 error: 744 rte_event_pmd_vdev_uninit(name); 745 return ret; 746 } 747 748 static int 749 ssovf_vdev_remove(struct rte_vdev_device *vdev) 750 { 751 const char *name; 752 753 name = rte_vdev_device_name(vdev); 754 ssovf_log_info("Closing %s", name); 755 return rte_event_pmd_vdev_uninit(name); 756 } 757 758 static struct rte_vdev_driver vdev_ssovf_pmd = { 759 .probe = ssovf_vdev_probe, 760 .remove = ssovf_vdev_remove 761 }; 762 763 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 764