1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <rte_dev.h> 10 #include <rte_eal.h> 11 #include <rte_ethdev.h> 12 #include <rte_event_eth_rx_adapter.h> 13 #include <rte_lcore.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_memory.h> 17 #include <rte_bus_vdev.h> 18 19 #include "ssovf_evdev.h" 20 21 int otx_logtype_ssovf; 22 23 RTE_INIT(otx_ssovf_init_log); 24 static void 25 otx_ssovf_init_log(void) 26 { 27 otx_logtype_ssovf = rte_log_register("pmd.otx.eventdev"); 28 if (otx_logtype_ssovf >= 0) 29 rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE); 30 } 31 32 /* SSOPF Mailbox messages */ 33 34 struct ssovf_mbox_dev_info { 35 uint64_t min_deq_timeout_ns; 36 uint64_t max_deq_timeout_ns; 37 uint32_t max_num_events; 38 }; 39 40 static int 41 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 42 { 43 struct octeontx_mbox_hdr hdr = {0}; 44 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 45 46 hdr.coproc = SSO_COPROC; 47 hdr.msg = SSO_GET_DEV_INFO; 48 hdr.vfid = 0; 49 50 memset(info, 0, len); 51 return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len); 52 } 53 54 struct ssovf_mbox_getwork_wait { 55 uint64_t wait_ns; 56 }; 57 58 static int 59 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 60 { 61 struct octeontx_mbox_hdr hdr = {0}; 62 struct ssovf_mbox_getwork_wait tmo_set; 63 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 64 int ret; 65 66 hdr.coproc = SSO_COPROC; 67 hdr.msg = SSO_SET_GETWORK_WAIT; 68 hdr.vfid = 0; 69 70 tmo_set.wait_ns = timeout_ns; 71 ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0); 72 if (ret) 73 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 74 75 return ret; 76 } 77 78 struct ssovf_mbox_grp_pri { 79 uint8_t wgt_left; /* Read only */ 80 uint8_t weight; 81 uint8_t affinity; 82 uint8_t priority; 83 }; 84 85 static int 86 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 87 { 88 struct octeontx_mbox_hdr hdr = {0}; 89 struct ssovf_mbox_grp_pri grp; 90 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 91 int ret; 92 93 hdr.coproc = SSO_COPROC; 94 hdr.msg = SSO_GRP_SET_PRIORITY; 95 hdr.vfid = queue; 96 97 grp.weight = 0xff; 98 grp.affinity = 0xff; 99 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 100 101 ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0); 102 if (ret) 103 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 104 105 return ret; 106 } 107 108 struct ssovf_mbox_convert_ns_getworks_iter { 109 uint64_t wait_ns; 110 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 111 }; 112 113 static int 114 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 115 { 116 struct octeontx_mbox_hdr hdr = {0}; 117 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 118 uint16_t len = sizeof(ns2iter); 119 int ret; 120 121 hdr.coproc = SSO_COPROC; 122 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 123 hdr.vfid = 0; 124 125 memset(&ns2iter, 0, len); 126 ns2iter.wait_ns = ns; 127 ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 128 if (ret < 0 || (ret != len)) { 129 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 130 return -EIO; 131 } 132 133 *tmo_ticks = ns2iter.getwork_iter; 134 return 0; 135 } 136 137 static void 138 ssovf_fastpath_fns_set(struct rte_eventdev *dev) 139 { 140 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 141 142 dev->enqueue = ssows_enq; 143 dev->enqueue_burst = ssows_enq_burst; 144 dev->enqueue_new_burst = ssows_enq_new_burst; 145 dev->enqueue_forward_burst = ssows_enq_fwd_burst; 146 dev->dequeue = ssows_deq; 147 dev->dequeue_burst = ssows_deq_burst; 148 149 if (edev->is_timeout_deq) { 150 dev->dequeue = ssows_deq_timeout; 151 dev->dequeue_burst = ssows_deq_timeout_burst; 152 } 153 } 154 155 static void 156 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 157 { 158 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 159 160 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 161 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 162 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 163 dev_info->max_event_queues = edev->max_event_queues; 164 dev_info->max_event_queue_flows = (1ULL << 20); 165 dev_info->max_event_queue_priority_levels = 8; 166 dev_info->max_event_priority_levels = 1; 167 dev_info->max_event_ports = edev->max_event_ports; 168 dev_info->max_event_port_dequeue_depth = 1; 169 dev_info->max_event_port_enqueue_depth = 1; 170 dev_info->max_num_events = edev->max_num_events; 171 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 172 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 173 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES; 174 } 175 176 static int 177 ssovf_configure(const struct rte_eventdev *dev) 178 { 179 struct rte_event_dev_config *conf = &dev->data->dev_conf; 180 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 181 uint64_t deq_tmo_ns; 182 183 ssovf_func_trace(); 184 deq_tmo_ns = conf->dequeue_timeout_ns; 185 if (deq_tmo_ns == 0) 186 deq_tmo_ns = edev->min_deq_timeout_ns; 187 188 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 189 edev->is_timeout_deq = 1; 190 deq_tmo_ns = edev->min_deq_timeout_ns; 191 } 192 edev->nb_event_queues = conf->nb_event_queues; 193 edev->nb_event_ports = conf->nb_event_ports; 194 195 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 196 } 197 198 static void 199 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 200 struct rte_event_queue_conf *queue_conf) 201 { 202 RTE_SET_USED(dev); 203 RTE_SET_USED(queue_id); 204 205 queue_conf->nb_atomic_flows = (1ULL << 20); 206 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 207 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 208 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 209 } 210 211 static void 212 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 213 { 214 RTE_SET_USED(dev); 215 RTE_SET_USED(queue_id); 216 } 217 218 static int 219 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 220 const struct rte_event_queue_conf *queue_conf) 221 { 222 RTE_SET_USED(dev); 223 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 224 225 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 226 } 227 228 static void 229 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 230 struct rte_event_port_conf *port_conf) 231 { 232 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 233 234 RTE_SET_USED(port_id); 235 port_conf->new_event_threshold = edev->max_num_events; 236 port_conf->dequeue_depth = 1; 237 port_conf->enqueue_depth = 1; 238 } 239 240 static void 241 ssovf_port_release(void *port) 242 { 243 rte_free(port); 244 } 245 246 static int 247 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 248 const struct rte_event_port_conf *port_conf) 249 { 250 struct ssows *ws; 251 uint32_t reg_off; 252 uint8_t q; 253 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 254 255 ssovf_func_trace("port=%d", port_id); 256 RTE_SET_USED(port_conf); 257 258 /* Free memory prior to re-allocation if needed */ 259 if (dev->data->ports[port_id] != NULL) { 260 ssovf_port_release(dev->data->ports[port_id]); 261 dev->data->ports[port_id] = NULL; 262 } 263 264 /* Allocate event port memory */ 265 ws = rte_zmalloc_socket("eventdev ssows", 266 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 267 dev->data->socket_id); 268 if (ws == NULL) { 269 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 270 return -ENOMEM; 271 } 272 273 ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 274 if (ws->base == NULL) { 275 rte_free(ws); 276 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 277 return -EINVAL; 278 } 279 280 reg_off = SSOW_VHWS_OP_GET_WORK0; 281 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 282 reg_off |= 1 << 16; /* Wait */ 283 ws->getwork = ws->base + reg_off; 284 ws->port = port_id; 285 286 for (q = 0; q < edev->nb_event_queues; q++) { 287 ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 288 if (ws->grps[q] == NULL) { 289 rte_free(ws); 290 ssovf_log_err("Failed to get grp%d base addr", q); 291 return -EINVAL; 292 } 293 } 294 295 dev->data->ports[port_id] = ws; 296 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 297 return 0; 298 } 299 300 static int 301 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 302 const uint8_t priorities[], uint16_t nb_links) 303 { 304 uint16_t link; 305 uint64_t val; 306 struct ssows *ws = port; 307 308 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 309 RTE_SET_USED(dev); 310 RTE_SET_USED(priorities); 311 312 for (link = 0; link < nb_links; link++) { 313 val = queues[link]; 314 val |= (1ULL << 24); /* Set membership */ 315 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 316 } 317 return (int)nb_links; 318 } 319 320 static int 321 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 322 uint16_t nb_unlinks) 323 { 324 uint16_t unlink; 325 uint64_t val; 326 struct ssows *ws = port; 327 328 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 329 RTE_SET_USED(dev); 330 331 for (unlink = 0; unlink < nb_unlinks; unlink++) { 332 val = queues[unlink]; 333 val &= ~(1ULL << 24); /* Clear membership */ 334 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 335 } 336 return (int)nb_unlinks; 337 } 338 339 static int 340 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 341 { 342 RTE_SET_USED(dev); 343 344 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 345 } 346 347 static void 348 ssows_dump(struct ssows *ws, FILE *f) 349 { 350 uint8_t *base = ws->base; 351 uint64_t val; 352 353 fprintf(f, "\t---------------port%d---------------\n", ws->port); 354 val = ssovf_read64(base + SSOW_VHWS_TAG); 355 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 356 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 357 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 358 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 359 (int)(val >> 63) & 0x1); 360 361 val = ssovf_read64(base + SSOW_VHWS_WQP); 362 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 363 364 val = ssovf_read64(base + SSOW_VHWS_LINKS); 365 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 366 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 367 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 368 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 369 370 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 371 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 372 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 373 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 374 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 375 (int)(val >> 63) & 0x1); 376 377 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 378 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 379 } 380 381 static int 382 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 383 const struct rte_eth_dev *eth_dev, uint32_t *caps) 384 { 385 int ret; 386 RTE_SET_USED(dev); 387 388 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 389 if (ret) 390 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 391 else 392 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 393 394 return 0; 395 } 396 397 static int 398 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 399 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 400 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 401 { 402 int ret = 0; 403 const struct octeontx_nic *nic = eth_dev->data->dev_private; 404 pki_mod_qos_t pki_qos; 405 RTE_SET_USED(dev); 406 407 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 408 if (ret) 409 return -EINVAL; 410 411 if (rx_queue_id >= 0) 412 return -EINVAL; 413 414 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 415 return -ENOTSUP; 416 417 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 418 419 pki_qos.port_type = 0; 420 pki_qos.index = 0; 421 pki_qos.mmask.f_tag_type = 1; 422 pki_qos.mmask.f_port_add = 1; 423 pki_qos.mmask.f_grp_ok = 1; 424 pki_qos.mmask.f_grp_bad = 1; 425 pki_qos.mmask.f_grptag_ok = 1; 426 pki_qos.mmask.f_grptag_bad = 1; 427 428 pki_qos.tag_type = queue_conf->ev.sched_type; 429 pki_qos.qos_entry.port_add = 0; 430 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 431 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 432 pki_qos.qos_entry.grptag_bad = 0; 433 pki_qos.qos_entry.grptag_ok = 0; 434 435 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 436 if (ret < 0) 437 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 438 nic->port_id, queue_conf->ev.queue_id); 439 440 return ret; 441 } 442 443 static int 444 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 445 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 446 { 447 int ret = 0; 448 const struct octeontx_nic *nic = eth_dev->data->dev_private; 449 pki_del_qos_t pki_qos; 450 RTE_SET_USED(dev); 451 RTE_SET_USED(rx_queue_id); 452 453 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 454 if (ret) 455 return -EINVAL; 456 457 pki_qos.port_type = 0; 458 pki_qos.index = 0; 459 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 460 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 461 if (ret < 0) 462 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 463 nic->port_id, queue_conf->ev.queue_id); 464 return ret; 465 } 466 467 static int 468 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 469 const struct rte_eth_dev *eth_dev) 470 { 471 int ret; 472 const struct octeontx_nic *nic = eth_dev->data->dev_private; 473 RTE_SET_USED(dev); 474 475 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 476 if (ret) 477 return 0; 478 octeontx_pki_port_start(nic->port_id); 479 return 0; 480 } 481 482 483 static int 484 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 485 const struct rte_eth_dev *eth_dev) 486 { 487 int ret; 488 const struct octeontx_nic *nic = eth_dev->data->dev_private; 489 RTE_SET_USED(dev); 490 491 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 492 if (ret) 493 return 0; 494 octeontx_pki_port_stop(nic->port_id); 495 return 0; 496 } 497 498 static void 499 ssovf_dump(struct rte_eventdev *dev, FILE *f) 500 { 501 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 502 uint8_t port; 503 504 /* Dump SSOWVF debug registers */ 505 for (port = 0; port < edev->nb_event_ports; port++) 506 ssows_dump(dev->data->ports[port], f); 507 } 508 509 static int 510 ssovf_start(struct rte_eventdev *dev) 511 { 512 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 513 struct ssows *ws; 514 uint8_t *base; 515 uint8_t i; 516 517 ssovf_func_trace(); 518 for (i = 0; i < edev->nb_event_ports; i++) { 519 ws = dev->data->ports[i]; 520 ssows_reset(ws); 521 ws->swtag_req = 0; 522 } 523 524 for (i = 0; i < edev->nb_event_queues; i++) { 525 /* Consume all the events through HWS0 */ 526 ssows_flush_events(dev->data->ports[0], i); 527 528 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 529 base += SSO_VHGRP_QCTL; 530 ssovf_write64(1, base); /* Enable SSO group */ 531 } 532 533 ssovf_fastpath_fns_set(dev); 534 return 0; 535 } 536 537 static void 538 ssovf_stop(struct rte_eventdev *dev) 539 { 540 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 541 struct ssows *ws; 542 uint8_t *base; 543 uint8_t i; 544 545 ssovf_func_trace(); 546 for (i = 0; i < edev->nb_event_ports; i++) { 547 ws = dev->data->ports[i]; 548 ssows_reset(ws); 549 ws->swtag_req = 0; 550 } 551 552 for (i = 0; i < edev->nb_event_queues; i++) { 553 /* Consume all the events through HWS0 */ 554 ssows_flush_events(dev->data->ports[0], i); 555 556 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 557 base += SSO_VHGRP_QCTL; 558 ssovf_write64(0, base); /* Disable SSO group */ 559 } 560 } 561 562 static int 563 ssovf_close(struct rte_eventdev *dev) 564 { 565 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 566 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 567 uint8_t i; 568 569 for (i = 0; i < edev->nb_event_queues; i++) 570 all_queues[i] = i; 571 572 for (i = 0; i < edev->nb_event_ports; i++) 573 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 574 edev->nb_event_queues); 575 return 0; 576 } 577 578 /* Initialize and register event driver with DPDK Application */ 579 static const struct rte_eventdev_ops ssovf_ops = { 580 .dev_infos_get = ssovf_info_get, 581 .dev_configure = ssovf_configure, 582 .queue_def_conf = ssovf_queue_def_conf, 583 .queue_setup = ssovf_queue_setup, 584 .queue_release = ssovf_queue_release, 585 .port_def_conf = ssovf_port_def_conf, 586 .port_setup = ssovf_port_setup, 587 .port_release = ssovf_port_release, 588 .port_link = ssovf_port_link, 589 .port_unlink = ssovf_port_unlink, 590 .timeout_ticks = ssovf_timeout_ticks, 591 592 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 593 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 594 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 595 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 596 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 597 598 .dump = ssovf_dump, 599 .dev_start = ssovf_start, 600 .dev_stop = ssovf_stop, 601 .dev_close = ssovf_close 602 }; 603 604 static int 605 ssovf_vdev_probe(struct rte_vdev_device *vdev) 606 { 607 struct octeontx_ssovf_info oinfo; 608 struct ssovf_mbox_dev_info info; 609 struct ssovf_evdev *edev; 610 struct rte_eventdev *eventdev; 611 static int ssovf_init_once; 612 const char *name; 613 int ret; 614 615 name = rte_vdev_device_name(vdev); 616 /* More than one instance is not supported */ 617 if (ssovf_init_once) { 618 ssovf_log_err("Request to create >1 %s instance", name); 619 return -EINVAL; 620 } 621 622 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 623 rte_socket_id()); 624 if (eventdev == NULL) { 625 ssovf_log_err("Failed to create eventdev vdev %s", name); 626 return -ENOMEM; 627 } 628 eventdev->dev_ops = &ssovf_ops; 629 630 /* For secondary processes, the primary has done all the work */ 631 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 632 ssovf_fastpath_fns_set(eventdev); 633 return 0; 634 } 635 636 ret = octeontx_ssovf_info(&oinfo); 637 if (ret) { 638 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 639 goto error; 640 } 641 642 edev = ssovf_pmd_priv(eventdev); 643 edev->max_event_ports = oinfo.total_ssowvfs; 644 edev->max_event_queues = oinfo.total_ssovfs; 645 edev->is_timeout_deq = 0; 646 647 ret = ssovf_mbox_dev_info(&info); 648 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 649 ssovf_log_err("Failed to get mbox devinfo %d", ret); 650 goto error; 651 } 652 653 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 654 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 655 edev->max_num_events = info.max_num_events; 656 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 657 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 658 info.max_num_events); 659 660 if (!edev->max_event_ports || !edev->max_event_queues) { 661 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 662 edev->max_event_queues, edev->max_event_ports); 663 ret = -ENODEV; 664 goto error; 665 } 666 667 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 668 name, oinfo.domain, edev->max_event_queues, 669 edev->max_event_ports); 670 671 ssovf_init_once = 1; 672 return 0; 673 674 error: 675 rte_event_pmd_vdev_uninit(name); 676 return ret; 677 } 678 679 static int 680 ssovf_vdev_remove(struct rte_vdev_device *vdev) 681 { 682 const char *name; 683 684 name = rte_vdev_device_name(vdev); 685 ssovf_log_info("Closing %s", name); 686 return rte_event_pmd_vdev_uninit(name); 687 } 688 689 static struct rte_vdev_driver vdev_ssovf_pmd = { 690 .probe = ssovf_vdev_probe, 691 .remove = ssovf_vdev_remove 692 }; 693 694 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 695