1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_debug.h> 9 #include <rte_dev.h> 10 #include <rte_eal.h> 11 #include <ethdev_driver.h> 12 #include <rte_event_eth_rx_adapter.h> 13 #include <rte_kvargs.h> 14 #include <rte_lcore.h> 15 #include <rte_log.h> 16 #include <rte_malloc.h> 17 #include <rte_memory.h> 18 #include <rte_bus_vdev.h> 19 20 #include "ssovf_evdev.h" 21 #include "timvf_evdev.h" 22 23 static uint8_t timvf_enable_stats; 24 25 RTE_LOG_REGISTER(otx_logtype_ssovf, pmd.event.octeontx, NOTICE); 26 27 /* SSOPF Mailbox messages */ 28 29 struct ssovf_mbox_dev_info { 30 uint64_t min_deq_timeout_ns; 31 uint64_t max_deq_timeout_ns; 32 uint32_t max_num_events; 33 }; 34 35 static int 36 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 37 { 38 struct octeontx_mbox_hdr hdr = {0}; 39 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 40 41 hdr.coproc = SSO_COPROC; 42 hdr.msg = SSO_GET_DEV_INFO; 43 hdr.vfid = 0; 44 45 memset(info, 0, len); 46 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 47 } 48 49 struct ssovf_mbox_getwork_wait { 50 uint64_t wait_ns; 51 }; 52 53 static int 54 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 55 { 56 struct octeontx_mbox_hdr hdr = {0}; 57 struct ssovf_mbox_getwork_wait tmo_set; 58 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 59 int ret; 60 61 hdr.coproc = SSO_COPROC; 62 hdr.msg = SSO_SET_GETWORK_WAIT; 63 hdr.vfid = 0; 64 65 tmo_set.wait_ns = timeout_ns; 66 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 67 if (ret) 68 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 69 70 return ret; 71 } 72 73 struct ssovf_mbox_grp_pri { 74 uint8_t vhgrp_id; 75 uint8_t wgt_left; /* Read only */ 76 uint8_t weight; 77 uint8_t affinity; 78 uint8_t priority; 79 }; 80 81 static int 82 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 83 { 84 struct octeontx_mbox_hdr hdr = {0}; 85 struct ssovf_mbox_grp_pri grp; 86 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 87 int ret; 88 89 hdr.coproc = SSO_COPROC; 90 hdr.msg = SSO_GRP_SET_PRIORITY; 91 hdr.vfid = queue; 92 93 grp.vhgrp_id = queue; 94 grp.weight = 0xff; 95 grp.affinity = 0xff; 96 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 97 98 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 99 if (ret) 100 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 101 102 return ret; 103 } 104 105 struct ssovf_mbox_convert_ns_getworks_iter { 106 uint64_t wait_ns; 107 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 108 }; 109 110 static int 111 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 112 { 113 struct octeontx_mbox_hdr hdr = {0}; 114 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 115 uint16_t len = sizeof(ns2iter); 116 int ret; 117 118 hdr.coproc = SSO_COPROC; 119 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 120 hdr.vfid = 0; 121 122 memset(&ns2iter, 0, len); 123 ns2iter.wait_ns = ns; 124 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 125 if (ret < 0 || (ret != len)) { 126 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 127 return -EIO; 128 } 129 130 *tmo_ticks = ns2iter.getwork_iter; 131 return 0; 132 } 133 134 static void 135 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 136 { 137 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 138 139 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 140 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 141 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 142 dev_info->max_event_queues = edev->max_event_queues; 143 dev_info->max_event_queue_flows = (1ULL << 20); 144 dev_info->max_event_queue_priority_levels = 8; 145 dev_info->max_event_priority_levels = 1; 146 dev_info->max_event_ports = edev->max_event_ports; 147 dev_info->max_event_port_dequeue_depth = 1; 148 dev_info->max_event_port_enqueue_depth = 1; 149 dev_info->max_num_events = edev->max_num_events; 150 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 151 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 152 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 153 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 154 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 155 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 156 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID; 157 158 } 159 160 static int 161 ssovf_configure(const struct rte_eventdev *dev) 162 { 163 struct rte_event_dev_config *conf = &dev->data->dev_conf; 164 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 165 uint64_t deq_tmo_ns; 166 167 ssovf_func_trace(); 168 deq_tmo_ns = conf->dequeue_timeout_ns; 169 if (deq_tmo_ns == 0) 170 deq_tmo_ns = edev->min_deq_timeout_ns; 171 172 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 173 edev->is_timeout_deq = 1; 174 deq_tmo_ns = edev->min_deq_timeout_ns; 175 } 176 edev->nb_event_queues = conf->nb_event_queues; 177 edev->nb_event_ports = conf->nb_event_ports; 178 179 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 180 } 181 182 static void 183 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 184 struct rte_event_queue_conf *queue_conf) 185 { 186 RTE_SET_USED(dev); 187 RTE_SET_USED(queue_id); 188 189 queue_conf->nb_atomic_flows = (1ULL << 20); 190 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 191 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 192 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 193 } 194 195 static void 196 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 197 { 198 RTE_SET_USED(dev); 199 RTE_SET_USED(queue_id); 200 } 201 202 static int 203 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 204 const struct rte_event_queue_conf *queue_conf) 205 { 206 RTE_SET_USED(dev); 207 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 208 209 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 210 } 211 212 static void 213 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 214 struct rte_event_port_conf *port_conf) 215 { 216 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 217 218 RTE_SET_USED(port_id); 219 port_conf->new_event_threshold = edev->max_num_events; 220 port_conf->dequeue_depth = 1; 221 port_conf->enqueue_depth = 1; 222 port_conf->event_port_cfg = 0; 223 } 224 225 static void 226 ssovf_port_release(void *port) 227 { 228 rte_free(port); 229 } 230 231 static int 232 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 233 const struct rte_event_port_conf *port_conf) 234 { 235 struct ssows *ws; 236 uint32_t reg_off; 237 uint8_t q; 238 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 239 240 ssovf_func_trace("port=%d", port_id); 241 RTE_SET_USED(port_conf); 242 243 /* Free memory prior to re-allocation if needed */ 244 if (dev->data->ports[port_id] != NULL) { 245 ssovf_port_release(dev->data->ports[port_id]); 246 dev->data->ports[port_id] = NULL; 247 } 248 249 /* Allocate event port memory */ 250 ws = rte_zmalloc_socket("eventdev ssows", 251 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 252 dev->data->socket_id); 253 if (ws == NULL) { 254 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 255 return -ENOMEM; 256 } 257 258 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 259 if (ws->base == NULL) { 260 rte_free(ws); 261 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 262 return -EINVAL; 263 } 264 265 reg_off = SSOW_VHWS_OP_GET_WORK0; 266 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 267 reg_off |= 1 << 16; /* Wait */ 268 ws->getwork = ws->base + reg_off; 269 ws->port = port_id; 270 ws->lookup_mem = octeontx_fastpath_lookup_mem_get(); 271 272 for (q = 0; q < edev->nb_event_queues; q++) { 273 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 274 if (ws->grps[q] == NULL) { 275 rte_free(ws); 276 ssovf_log_err("Failed to get grp%d base addr", q); 277 return -EINVAL; 278 } 279 } 280 281 dev->data->ports[port_id] = ws; 282 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 283 return 0; 284 } 285 286 static int 287 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 288 const uint8_t priorities[], uint16_t nb_links) 289 { 290 uint16_t link; 291 uint64_t val; 292 struct ssows *ws = port; 293 294 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 295 RTE_SET_USED(dev); 296 RTE_SET_USED(priorities); 297 298 for (link = 0; link < nb_links; link++) { 299 val = queues[link]; 300 val |= (1ULL << 24); /* Set membership */ 301 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 302 } 303 return (int)nb_links; 304 } 305 306 static int 307 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 308 uint16_t nb_unlinks) 309 { 310 uint16_t unlink; 311 uint64_t val; 312 struct ssows *ws = port; 313 314 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 315 RTE_SET_USED(dev); 316 317 for (unlink = 0; unlink < nb_unlinks; unlink++) { 318 val = queues[unlink]; 319 val &= ~(1ULL << 24); /* Clear membership */ 320 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 321 } 322 return (int)nb_unlinks; 323 } 324 325 static int 326 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 327 { 328 RTE_SET_USED(dev); 329 330 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 331 } 332 333 static void 334 ssows_dump(struct ssows *ws, FILE *f) 335 { 336 uint8_t *base = ws->base; 337 uint64_t val; 338 339 fprintf(f, "\t---------------port%d---------------\n", ws->port); 340 val = ssovf_read64(base + SSOW_VHWS_TAG); 341 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 342 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 343 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 344 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 345 (int)(val >> 63) & 0x1); 346 347 val = ssovf_read64(base + SSOW_VHWS_WQP); 348 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 349 350 val = ssovf_read64(base + SSOW_VHWS_LINKS); 351 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 352 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 353 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 354 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 355 356 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 357 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 358 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 359 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 360 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 361 (int)(val >> 63) & 0x1); 362 363 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 364 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 365 } 366 367 static int 368 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 369 const struct rte_eth_dev *eth_dev, uint32_t *caps) 370 { 371 int ret; 372 RTE_SET_USED(dev); 373 374 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 375 if (ret) 376 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 377 else 378 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 379 380 return 0; 381 } 382 383 static int 384 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 385 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 386 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 387 { 388 const struct octeontx_nic *nic = eth_dev->data->dev_private; 389 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 390 uint16_t free_idx = UINT16_MAX; 391 struct octeontx_rxq *rxq; 392 pki_mod_qos_t pki_qos; 393 uint8_t found = false; 394 int i, ret = 0; 395 void *old_ptr; 396 397 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 398 if (ret) 399 return -EINVAL; 400 401 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 402 return -ENOTSUP; 403 404 /* eth_octeontx only supports one rq. */ 405 rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id; 406 rxq = eth_dev->data->rx_queues[rx_queue_id]; 407 /* Add rxq pool to list of used pools and reduce available events. */ 408 for (i = 0; i < edev->rxq_pools; i++) { 409 if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) { 410 edev->rxq_pool_rcnt[i]++; 411 found = true; 412 break; 413 } else if (free_idx == UINT16_MAX && 414 edev->rxq_pool_array[i] == 0) { 415 free_idx = i; 416 } 417 } 418 419 if (!found) { 420 uint16_t idx; 421 422 if (edev->available_events < rxq->pool->size) { 423 ssovf_log_err( 424 "Max available events %"PRIu32" requested events in rxq pool %"PRIu32"", 425 edev->available_events, rxq->pool->size); 426 return -ENOMEM; 427 } 428 429 if (free_idx != UINT16_MAX) { 430 idx = free_idx; 431 } else { 432 old_ptr = edev->rxq_pool_array; 433 edev->rxq_pools++; 434 edev->rxq_pool_array = rte_realloc( 435 edev->rxq_pool_array, 436 sizeof(uint64_t) * edev->rxq_pools, 0); 437 if (edev->rxq_pool_array == NULL) { 438 edev->rxq_pools--; 439 edev->rxq_pool_array = old_ptr; 440 return -ENOMEM; 441 } 442 443 old_ptr = edev->rxq_pool_rcnt; 444 edev->rxq_pool_rcnt = rte_realloc( 445 edev->rxq_pool_rcnt, 446 sizeof(uint8_t) * edev->rxq_pools, 0); 447 if (edev->rxq_pool_rcnt == NULL) { 448 edev->rxq_pools--; 449 edev->rxq_pool_rcnt = old_ptr; 450 return -ENOMEM; 451 } 452 idx = edev->rxq_pools - 1; 453 } 454 455 edev->rxq_pool_array[idx] = (uintptr_t)rxq->pool; 456 edev->rxq_pool_rcnt[idx] = 1; 457 edev->available_events -= rxq->pool->size; 458 } 459 460 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 461 462 pki_qos.port_type = 0; 463 pki_qos.index = 0; 464 pki_qos.mmask.f_tag_type = 1; 465 pki_qos.mmask.f_port_add = 1; 466 pki_qos.mmask.f_grp_ok = 1; 467 pki_qos.mmask.f_grp_bad = 1; 468 pki_qos.mmask.f_grptag_ok = 1; 469 pki_qos.mmask.f_grptag_bad = 1; 470 471 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; 472 pki_qos.qos_entry.port_add = 0; 473 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 474 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 475 pki_qos.qos_entry.grptag_bad = 0; 476 pki_qos.qos_entry.grptag_ok = 0; 477 478 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 479 if (ret < 0) 480 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 481 nic->port_id, queue_conf->ev.queue_id); 482 483 edev->rx_offload_flags = nic->rx_offload_flags; 484 edev->tx_offload_flags = nic->tx_offload_flags; 485 return ret; 486 } 487 488 static int 489 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 490 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 491 { 492 const struct octeontx_nic *nic = eth_dev->data->dev_private; 493 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 494 struct octeontx_rxq *rxq; 495 pki_del_qos_t pki_qos; 496 uint8_t found = false; 497 int i, ret = 0; 498 499 rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id; 500 rxq = eth_dev->data->rx_queues[rx_queue_id]; 501 for (i = 0; i < edev->rxq_pools; i++) { 502 if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) { 503 found = true; 504 break; 505 } 506 } 507 508 if (found) { 509 edev->rxq_pool_rcnt[i]--; 510 if (edev->rxq_pool_rcnt[i] == 0) 511 edev->rxq_pool_array[i] = 0; 512 edev->available_events += rxq->pool->size; 513 } 514 515 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 516 if (ret) 517 return -EINVAL; 518 519 pki_qos.port_type = 0; 520 pki_qos.index = 0; 521 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 522 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 523 if (ret < 0) 524 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 525 nic->port_id, rx_queue_id); 526 return ret; 527 } 528 529 static int 530 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 531 const struct rte_eth_dev *eth_dev) 532 { 533 RTE_SET_USED(dev); 534 RTE_SET_USED(eth_dev); 535 536 return 0; 537 } 538 539 540 static int 541 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 542 const struct rte_eth_dev *eth_dev) 543 { 544 RTE_SET_USED(dev); 545 RTE_SET_USED(eth_dev); 546 547 return 0; 548 } 549 550 static int 551 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev, 552 const struct rte_eth_dev *eth_dev, uint32_t *caps) 553 { 554 int ret; 555 RTE_SET_USED(dev); 556 557 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 558 if (ret) 559 *caps = 0; 560 else 561 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 562 563 return 0; 564 } 565 566 static int 567 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev) 568 { 569 RTE_SET_USED(id); 570 RTE_SET_USED(dev); 571 return 0; 572 } 573 574 static int 575 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev) 576 { 577 RTE_SET_USED(id); 578 RTE_SET_USED(dev); 579 return 0; 580 } 581 582 static int 583 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev, 584 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 585 { 586 RTE_SET_USED(id); 587 RTE_SET_USED(dev); 588 RTE_SET_USED(eth_dev); 589 RTE_SET_USED(tx_queue_id); 590 return 0; 591 } 592 593 static int 594 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev, 595 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 596 { 597 RTE_SET_USED(id); 598 RTE_SET_USED(dev); 599 RTE_SET_USED(eth_dev); 600 RTE_SET_USED(tx_queue_id); 601 return 0; 602 } 603 604 static int 605 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev) 606 { 607 RTE_SET_USED(id); 608 RTE_SET_USED(dev); 609 return 0; 610 } 611 612 static int 613 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev) 614 { 615 RTE_SET_USED(id); 616 RTE_SET_USED(dev); 617 return 0; 618 } 619 620 621 static void 622 ssovf_dump(struct rte_eventdev *dev, FILE *f) 623 { 624 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 625 uint8_t port; 626 627 /* Dump SSOWVF debug registers */ 628 for (port = 0; port < edev->nb_event_ports; port++) 629 ssows_dump(dev->data->ports[port], f); 630 } 631 632 static int 633 ssovf_start(struct rte_eventdev *dev) 634 { 635 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 636 struct ssows *ws; 637 uint8_t *base; 638 uint8_t i; 639 640 ssovf_func_trace(); 641 for (i = 0; i < edev->nb_event_ports; i++) { 642 ws = dev->data->ports[i]; 643 ssows_reset(ws); 644 ws->swtag_req = 0; 645 } 646 647 for (i = 0; i < edev->nb_event_queues; i++) { 648 /* Consume all the events through HWS0 */ 649 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 650 651 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 652 base += SSO_VHGRP_QCTL; 653 ssovf_write64(1, base); /* Enable SSO group */ 654 } 655 656 ssovf_fastpath_fns_set(dev); 657 return 0; 658 } 659 660 static void 661 ssows_handle_event(void *arg, struct rte_event event) 662 { 663 struct rte_eventdev *dev = arg; 664 665 if (dev->dev_ops->dev_stop_flush != NULL) 666 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 667 dev->data->dev_stop_flush_arg); 668 } 669 670 static void 671 ssovf_stop(struct rte_eventdev *dev) 672 { 673 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 674 struct ssows *ws; 675 uint8_t *base; 676 uint8_t i; 677 678 ssovf_func_trace(); 679 for (i = 0; i < edev->nb_event_ports; i++) { 680 ws = dev->data->ports[i]; 681 ssows_reset(ws); 682 ws->swtag_req = 0; 683 } 684 685 for (i = 0; i < edev->nb_event_queues; i++) { 686 /* Consume all the events through HWS0 */ 687 ssows_flush_events(dev->data->ports[0], i, 688 ssows_handle_event, dev); 689 690 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 691 base += SSO_VHGRP_QCTL; 692 ssovf_write64(0, base); /* Disable SSO group */ 693 } 694 } 695 696 static int 697 ssovf_close(struct rte_eventdev *dev) 698 { 699 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 700 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 701 uint8_t i; 702 703 for (i = 0; i < edev->nb_event_queues; i++) 704 all_queues[i] = i; 705 706 for (i = 0; i < edev->nb_event_ports; i++) 707 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 708 edev->nb_event_queues); 709 return 0; 710 } 711 712 static int 713 ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) 714 { 715 int *flag = opaque; 716 *flag = !!atoi(value); 717 return 0; 718 } 719 720 static int 721 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 722 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) 723 { 724 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 725 timvf_enable_stats); 726 } 727 728 /* Initialize and register event driver with DPDK Application */ 729 static struct rte_eventdev_ops ssovf_ops = { 730 .dev_infos_get = ssovf_info_get, 731 .dev_configure = ssovf_configure, 732 .queue_def_conf = ssovf_queue_def_conf, 733 .queue_setup = ssovf_queue_setup, 734 .queue_release = ssovf_queue_release, 735 .port_def_conf = ssovf_port_def_conf, 736 .port_setup = ssovf_port_setup, 737 .port_release = ssovf_port_release, 738 .port_link = ssovf_port_link, 739 .port_unlink = ssovf_port_unlink, 740 .timeout_ticks = ssovf_timeout_ticks, 741 742 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 743 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 744 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 745 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 746 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 747 748 .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get, 749 .eth_tx_adapter_create = ssovf_eth_tx_adapter_create, 750 .eth_tx_adapter_free = ssovf_eth_tx_adapter_free, 751 .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add, 752 .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del, 753 .eth_tx_adapter_start = ssovf_eth_tx_adapter_start, 754 .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop, 755 756 .timer_adapter_caps_get = ssovf_timvf_caps_get, 757 758 .dev_selftest = test_eventdev_octeontx, 759 760 .dump = ssovf_dump, 761 .dev_start = ssovf_start, 762 .dev_stop = ssovf_stop, 763 .dev_close = ssovf_close 764 }; 765 766 static int 767 ssovf_vdev_probe(struct rte_vdev_device *vdev) 768 { 769 struct ssovf_info oinfo; 770 struct ssovf_mbox_dev_info info; 771 struct ssovf_evdev *edev; 772 struct rte_eventdev *eventdev; 773 static int ssovf_init_once; 774 const char *name; 775 const char *params; 776 int ret; 777 778 static const char *const args[] = { 779 TIMVF_ENABLE_STATS_ARG, 780 NULL 781 }; 782 783 name = rte_vdev_device_name(vdev); 784 /* More than one instance is not supported */ 785 if (ssovf_init_once) { 786 ssovf_log_err("Request to create >1 %s instance", name); 787 return -EINVAL; 788 } 789 790 params = rte_vdev_device_args(vdev); 791 if (params != NULL && params[0] != '\0') { 792 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 793 794 if (!kvlist) { 795 ssovf_log_info( 796 "Ignoring unsupported params supplied '%s'", 797 name); 798 } else { 799 ret = rte_kvargs_process(kvlist, TIMVF_ENABLE_STATS_ARG, 800 ssovf_parsekv, 801 &timvf_enable_stats); 802 if (ret != 0) { 803 ssovf_log_err("%s: Error in timvf stats", name); 804 rte_kvargs_free(kvlist); 805 return ret; 806 } 807 } 808 809 rte_kvargs_free(kvlist); 810 } 811 812 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 813 rte_socket_id()); 814 if (eventdev == NULL) { 815 ssovf_log_err("Failed to create eventdev vdev %s", name); 816 return -ENOMEM; 817 } 818 eventdev->dev_ops = &ssovf_ops; 819 820 timvf_set_eventdevice(eventdev); 821 822 /* For secondary processes, the primary has done all the work */ 823 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 824 ssovf_fastpath_fns_set(eventdev); 825 return 0; 826 } 827 828 octeontx_mbox_init(); 829 ret = ssovf_info(&oinfo); 830 if (ret) { 831 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 832 goto error; 833 } 834 835 edev = ssovf_pmd_priv(eventdev); 836 edev->max_event_ports = oinfo.total_ssowvfs; 837 edev->max_event_queues = oinfo.total_ssovfs; 838 edev->is_timeout_deq = 0; 839 840 ret = ssovf_mbox_dev_info(&info); 841 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 842 ssovf_log_err("Failed to get mbox devinfo %d", ret); 843 goto error; 844 } 845 846 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 847 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 848 edev->max_num_events = info.max_num_events; 849 edev->available_events = info.max_num_events; 850 851 ssovf_log_dbg("min_deq_tmo=%" PRId64 " max_deq_tmo=%" PRId64 852 " max_evts=%d", 853 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 854 info.max_num_events); 855 856 if (!edev->max_event_ports || !edev->max_event_queues) { 857 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 858 edev->max_event_queues, edev->max_event_ports); 859 ret = -ENODEV; 860 goto error; 861 } 862 863 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 864 name, oinfo.domain, edev->max_event_queues, 865 edev->max_event_ports); 866 867 ssovf_init_once = 1; 868 return 0; 869 870 error: 871 rte_event_pmd_vdev_uninit(name); 872 return ret; 873 } 874 875 static int 876 ssovf_vdev_remove(struct rte_vdev_device *vdev) 877 { 878 const char *name; 879 880 name = rte_vdev_device_name(vdev); 881 ssovf_log_info("Closing %s", name); 882 return rte_event_pmd_vdev_uninit(name); 883 } 884 885 static struct rte_vdev_driver vdev_ssovf_pmd = { 886 .probe = ssovf_vdev_probe, 887 .remove = ssovf_vdev_remove 888 }; 889 890 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 891