1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <cryptodev_pmd.h> 9 #include <rte_debug.h> 10 #include <rte_dev.h> 11 #include <rte_eal.h> 12 #include <ethdev_driver.h> 13 #include <rte_event_eth_rx_adapter.h> 14 #include <rte_kvargs.h> 15 #include <rte_lcore.h> 16 #include <rte_log.h> 17 #include <rte_malloc.h> 18 #include <rte_memory.h> 19 #include <rte_bus_vdev.h> 20 21 #include "ssovf_evdev.h" 22 #include "timvf_evdev.h" 23 #include "otx_cryptodev_hw_access.h" 24 25 static uint8_t timvf_enable_stats; 26 27 RTE_LOG_REGISTER_DEFAULT(otx_logtype_ssovf, NOTICE); 28 29 /* SSOPF Mailbox messages */ 30 31 struct ssovf_mbox_dev_info { 32 uint64_t min_deq_timeout_ns; 33 uint64_t max_deq_timeout_ns; 34 uint32_t max_num_events; 35 }; 36 37 static int 38 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 39 { 40 struct octeontx_mbox_hdr hdr = {0}; 41 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 42 43 hdr.coproc = SSO_COPROC; 44 hdr.msg = SSO_GET_DEV_INFO; 45 hdr.vfid = 0; 46 47 memset(info, 0, len); 48 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 49 } 50 51 struct ssovf_mbox_getwork_wait { 52 uint64_t wait_ns; 53 }; 54 55 static int 56 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 57 { 58 struct octeontx_mbox_hdr hdr = {0}; 59 struct ssovf_mbox_getwork_wait tmo_set; 60 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 61 int ret; 62 63 hdr.coproc = SSO_COPROC; 64 hdr.msg = SSO_SET_GETWORK_WAIT; 65 hdr.vfid = 0; 66 67 tmo_set.wait_ns = timeout_ns; 68 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 69 if (ret) 70 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 71 72 return ret; 73 } 74 75 struct ssovf_mbox_grp_pri { 76 uint8_t vhgrp_id; 77 uint8_t wgt_left; /* Read only */ 78 uint8_t weight; 79 uint8_t affinity; 80 uint8_t priority; 81 }; 82 83 static int 84 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 85 { 86 struct octeontx_mbox_hdr hdr = {0}; 87 struct ssovf_mbox_grp_pri grp; 88 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 89 int ret; 90 91 hdr.coproc = SSO_COPROC; 92 hdr.msg = SSO_GRP_SET_PRIORITY; 93 hdr.vfid = queue; 94 95 grp.vhgrp_id = queue; 96 grp.weight = 0xff; 97 grp.affinity = 0xff; 98 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 99 100 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 101 if (ret) 102 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 103 104 return ret; 105 } 106 107 struct ssovf_mbox_convert_ns_getworks_iter { 108 uint64_t wait_ns; 109 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 110 }; 111 112 static int 113 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 114 { 115 struct octeontx_mbox_hdr hdr = {0}; 116 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 117 uint16_t len = sizeof(ns2iter); 118 int ret; 119 120 hdr.coproc = SSO_COPROC; 121 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 122 hdr.vfid = 0; 123 124 memset(&ns2iter, 0, len); 125 ns2iter.wait_ns = ns; 126 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 127 if (ret < 0 || (ret != len)) { 128 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 129 return -EIO; 130 } 131 132 *tmo_ticks = ns2iter.getwork_iter; 133 return 0; 134 } 135 136 static void 137 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 138 { 139 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 140 141 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 142 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 143 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 144 dev_info->max_event_queues = edev->max_event_queues; 145 dev_info->max_event_queue_flows = (1ULL << 20); 146 dev_info->max_event_queue_priority_levels = 8; 147 dev_info->max_event_priority_levels = 1; 148 dev_info->max_event_ports = edev->max_event_ports; 149 dev_info->max_event_port_dequeue_depth = 1; 150 dev_info->max_event_port_enqueue_depth = 1; 151 dev_info->max_num_events = edev->max_num_events; 152 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 153 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 154 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 155 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 156 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 157 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 158 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID; 159 160 } 161 162 static int 163 ssovf_configure(const struct rte_eventdev *dev) 164 { 165 struct rte_event_dev_config *conf = &dev->data->dev_conf; 166 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 167 uint64_t deq_tmo_ns; 168 169 ssovf_func_trace(); 170 deq_tmo_ns = conf->dequeue_timeout_ns; 171 if (deq_tmo_ns == 0) 172 deq_tmo_ns = edev->min_deq_timeout_ns; 173 174 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 175 edev->is_timeout_deq = 1; 176 deq_tmo_ns = edev->min_deq_timeout_ns; 177 } 178 edev->nb_event_queues = conf->nb_event_queues; 179 edev->nb_event_ports = conf->nb_event_ports; 180 181 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 182 } 183 184 static void 185 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 186 struct rte_event_queue_conf *queue_conf) 187 { 188 RTE_SET_USED(dev); 189 RTE_SET_USED(queue_id); 190 191 queue_conf->nb_atomic_flows = (1ULL << 20); 192 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 193 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 194 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 195 } 196 197 static void 198 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 199 { 200 RTE_SET_USED(dev); 201 RTE_SET_USED(queue_id); 202 } 203 204 static int 205 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 206 const struct rte_event_queue_conf *queue_conf) 207 { 208 RTE_SET_USED(dev); 209 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 210 211 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 212 } 213 214 static void 215 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 216 struct rte_event_port_conf *port_conf) 217 { 218 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 219 220 RTE_SET_USED(port_id); 221 port_conf->new_event_threshold = edev->max_num_events; 222 port_conf->dequeue_depth = 1; 223 port_conf->enqueue_depth = 1; 224 port_conf->event_port_cfg = 0; 225 } 226 227 static void 228 ssovf_port_release(void *port) 229 { 230 rte_free(port); 231 } 232 233 static int 234 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 235 const struct rte_event_port_conf *port_conf) 236 { 237 struct ssows *ws; 238 uint32_t reg_off; 239 uint8_t q; 240 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 241 242 ssovf_func_trace("port=%d", port_id); 243 RTE_SET_USED(port_conf); 244 245 /* Free memory prior to re-allocation if needed */ 246 if (dev->data->ports[port_id] != NULL) { 247 ssovf_port_release(dev->data->ports[port_id]); 248 dev->data->ports[port_id] = NULL; 249 } 250 251 /* Allocate event port memory */ 252 ws = rte_zmalloc_socket("eventdev ssows", 253 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 254 dev->data->socket_id); 255 if (ws == NULL) { 256 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 257 return -ENOMEM; 258 } 259 260 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 261 if (ws->base == NULL) { 262 rte_free(ws); 263 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 264 return -EINVAL; 265 } 266 267 reg_off = SSOW_VHWS_OP_GET_WORK0; 268 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 269 reg_off |= 1 << 16; /* Wait */ 270 ws->getwork = ws->base + reg_off; 271 ws->port = port_id; 272 ws->lookup_mem = octeontx_fastpath_lookup_mem_get(); 273 274 for (q = 0; q < edev->nb_event_queues; q++) { 275 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 276 if (ws->grps[q] == NULL) { 277 rte_free(ws); 278 ssovf_log_err("Failed to get grp%d base addr", q); 279 return -EINVAL; 280 } 281 } 282 283 dev->data->ports[port_id] = ws; 284 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 285 return 0; 286 } 287 288 static int 289 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 290 const uint8_t priorities[], uint16_t nb_links) 291 { 292 uint16_t link; 293 uint64_t val; 294 struct ssows *ws = port; 295 296 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 297 RTE_SET_USED(dev); 298 RTE_SET_USED(priorities); 299 300 for (link = 0; link < nb_links; link++) { 301 val = queues[link]; 302 val |= (1ULL << 24); /* Set membership */ 303 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 304 } 305 return (int)nb_links; 306 } 307 308 static int 309 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 310 uint16_t nb_unlinks) 311 { 312 uint16_t unlink; 313 uint64_t val; 314 struct ssows *ws = port; 315 316 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 317 RTE_SET_USED(dev); 318 319 for (unlink = 0; unlink < nb_unlinks; unlink++) { 320 val = queues[unlink]; 321 val &= ~(1ULL << 24); /* Clear membership */ 322 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 323 } 324 return (int)nb_unlinks; 325 } 326 327 static int 328 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 329 { 330 RTE_SET_USED(dev); 331 332 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 333 } 334 335 static void 336 ssows_dump(struct ssows *ws, FILE *f) 337 { 338 uint8_t *base = ws->base; 339 uint64_t val; 340 341 fprintf(f, "\t---------------port%d---------------\n", ws->port); 342 val = ssovf_read64(base + SSOW_VHWS_TAG); 343 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 344 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 345 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 346 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 347 (int)(val >> 63) & 0x1); 348 349 val = ssovf_read64(base + SSOW_VHWS_WQP); 350 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 351 352 val = ssovf_read64(base + SSOW_VHWS_LINKS); 353 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 354 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 355 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 356 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 357 358 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 359 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 360 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 361 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 362 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 363 (int)(val >> 63) & 0x1); 364 365 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 366 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 367 } 368 369 static int 370 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 371 const struct rte_eth_dev *eth_dev, uint32_t *caps) 372 { 373 int ret; 374 RTE_SET_USED(dev); 375 376 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 377 if (ret) 378 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 379 else 380 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 381 382 return 0; 383 } 384 385 static int 386 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 387 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 388 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 389 { 390 const struct octeontx_nic *nic = eth_dev->data->dev_private; 391 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 392 uint16_t free_idx = UINT16_MAX; 393 struct octeontx_rxq *rxq; 394 pki_mod_qos_t pki_qos; 395 uint8_t found = false; 396 int i, ret = 0; 397 void *old_ptr; 398 399 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 400 if (ret) 401 return -EINVAL; 402 403 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 404 return -ENOTSUP; 405 406 /* eth_octeontx only supports one rq. */ 407 rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id; 408 rxq = eth_dev->data->rx_queues[rx_queue_id]; 409 /* Add rxq pool to list of used pools and reduce available events. */ 410 for (i = 0; i < edev->rxq_pools; i++) { 411 if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) { 412 edev->rxq_pool_rcnt[i]++; 413 found = true; 414 break; 415 } else if (free_idx == UINT16_MAX && 416 edev->rxq_pool_array[i] == 0) { 417 free_idx = i; 418 } 419 } 420 421 if (!found) { 422 uint16_t idx; 423 424 if (edev->available_events < rxq->pool->size) { 425 ssovf_log_err( 426 "Max available events %"PRIu32" requested events in rxq pool %"PRIu32"", 427 edev->available_events, rxq->pool->size); 428 return -ENOMEM; 429 } 430 431 if (free_idx != UINT16_MAX) { 432 idx = free_idx; 433 } else { 434 old_ptr = edev->rxq_pool_array; 435 edev->rxq_pools++; 436 edev->rxq_pool_array = rte_realloc( 437 edev->rxq_pool_array, 438 sizeof(uint64_t) * edev->rxq_pools, 0); 439 if (edev->rxq_pool_array == NULL) { 440 edev->rxq_pools--; 441 edev->rxq_pool_array = old_ptr; 442 return -ENOMEM; 443 } 444 445 old_ptr = edev->rxq_pool_rcnt; 446 edev->rxq_pool_rcnt = rte_realloc( 447 edev->rxq_pool_rcnt, 448 sizeof(uint8_t) * edev->rxq_pools, 0); 449 if (edev->rxq_pool_rcnt == NULL) { 450 edev->rxq_pools--; 451 edev->rxq_pool_rcnt = old_ptr; 452 return -ENOMEM; 453 } 454 idx = edev->rxq_pools - 1; 455 } 456 457 edev->rxq_pool_array[idx] = (uintptr_t)rxq->pool; 458 edev->rxq_pool_rcnt[idx] = 1; 459 edev->available_events -= rxq->pool->size; 460 } 461 462 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 463 464 pki_qos.port_type = 0; 465 pki_qos.index = 0; 466 pki_qos.mmask.f_tag_type = 1; 467 pki_qos.mmask.f_port_add = 1; 468 pki_qos.mmask.f_grp_ok = 1; 469 pki_qos.mmask.f_grp_bad = 1; 470 pki_qos.mmask.f_grptag_ok = 1; 471 pki_qos.mmask.f_grptag_bad = 1; 472 473 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; 474 pki_qos.qos_entry.port_add = 0; 475 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 476 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 477 pki_qos.qos_entry.grptag_bad = 0; 478 pki_qos.qos_entry.grptag_ok = 0; 479 480 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 481 if (ret < 0) 482 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 483 nic->port_id, queue_conf->ev.queue_id); 484 485 edev->rx_offload_flags = nic->rx_offload_flags; 486 edev->tx_offload_flags = nic->tx_offload_flags; 487 return ret; 488 } 489 490 static int 491 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 492 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 493 { 494 const struct octeontx_nic *nic = eth_dev->data->dev_private; 495 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 496 struct octeontx_rxq *rxq; 497 pki_del_qos_t pki_qos; 498 uint8_t found = false; 499 int i, ret = 0; 500 501 rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id; 502 rxq = eth_dev->data->rx_queues[rx_queue_id]; 503 for (i = 0; i < edev->rxq_pools; i++) { 504 if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) { 505 found = true; 506 break; 507 } 508 } 509 510 if (found) { 511 edev->rxq_pool_rcnt[i]--; 512 if (edev->rxq_pool_rcnt[i] == 0) 513 edev->rxq_pool_array[i] = 0; 514 edev->available_events += rxq->pool->size; 515 } 516 517 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 518 if (ret) 519 return -EINVAL; 520 521 pki_qos.port_type = 0; 522 pki_qos.index = 0; 523 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 524 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 525 if (ret < 0) 526 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 527 nic->port_id, rx_queue_id); 528 return ret; 529 } 530 531 static int 532 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 533 const struct rte_eth_dev *eth_dev) 534 { 535 RTE_SET_USED(dev); 536 RTE_SET_USED(eth_dev); 537 538 return 0; 539 } 540 541 542 static int 543 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 544 const struct rte_eth_dev *eth_dev) 545 { 546 RTE_SET_USED(dev); 547 RTE_SET_USED(eth_dev); 548 549 return 0; 550 } 551 552 static int 553 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev, 554 const struct rte_eth_dev *eth_dev, uint32_t *caps) 555 { 556 int ret; 557 RTE_SET_USED(dev); 558 559 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 560 if (ret) 561 *caps = 0; 562 else 563 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 564 565 return 0; 566 } 567 568 static int 569 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev) 570 { 571 RTE_SET_USED(id); 572 RTE_SET_USED(dev); 573 return 0; 574 } 575 576 static int 577 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev) 578 { 579 RTE_SET_USED(id); 580 RTE_SET_USED(dev); 581 return 0; 582 } 583 584 static int 585 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev, 586 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 587 { 588 RTE_SET_USED(id); 589 RTE_SET_USED(dev); 590 RTE_SET_USED(eth_dev); 591 RTE_SET_USED(tx_queue_id); 592 return 0; 593 } 594 595 static int 596 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev, 597 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 598 { 599 RTE_SET_USED(id); 600 RTE_SET_USED(dev); 601 RTE_SET_USED(eth_dev); 602 RTE_SET_USED(tx_queue_id); 603 return 0; 604 } 605 606 static int 607 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev) 608 { 609 RTE_SET_USED(id); 610 RTE_SET_USED(dev); 611 return 0; 612 } 613 614 static int 615 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev) 616 { 617 RTE_SET_USED(id); 618 RTE_SET_USED(dev); 619 return 0; 620 } 621 622 623 static void 624 ssovf_dump(struct rte_eventdev *dev, FILE *f) 625 { 626 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 627 uint8_t port; 628 629 /* Dump SSOWVF debug registers */ 630 for (port = 0; port < edev->nb_event_ports; port++) 631 ssows_dump(dev->data->ports[port], f); 632 } 633 634 static int 635 ssovf_start(struct rte_eventdev *dev) 636 { 637 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 638 struct ssows *ws; 639 uint8_t *base; 640 uint8_t i; 641 642 ssovf_func_trace(); 643 for (i = 0; i < edev->nb_event_ports; i++) { 644 ws = dev->data->ports[i]; 645 ssows_reset(ws); 646 ws->swtag_req = 0; 647 } 648 649 for (i = 0; i < edev->nb_event_queues; i++) { 650 /* Consume all the events through HWS0 */ 651 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 652 653 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 654 base += SSO_VHGRP_QCTL; 655 ssovf_write64(1, base); /* Enable SSO group */ 656 } 657 658 ssovf_fastpath_fns_set(dev); 659 return 0; 660 } 661 662 static void 663 ssows_handle_event(void *arg, struct rte_event event) 664 { 665 struct rte_eventdev *dev = arg; 666 667 if (dev->dev_ops->dev_stop_flush != NULL) 668 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 669 dev->data->dev_stop_flush_arg); 670 } 671 672 static void 673 ssovf_stop(struct rte_eventdev *dev) 674 { 675 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 676 struct ssows *ws; 677 uint8_t *base; 678 uint8_t i; 679 680 ssovf_func_trace(); 681 for (i = 0; i < edev->nb_event_ports; i++) { 682 ws = dev->data->ports[i]; 683 ssows_reset(ws); 684 ws->swtag_req = 0; 685 } 686 687 for (i = 0; i < edev->nb_event_queues; i++) { 688 /* Consume all the events through HWS0 */ 689 ssows_flush_events(dev->data->ports[0], i, 690 ssows_handle_event, dev); 691 692 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 693 base += SSO_VHGRP_QCTL; 694 ssovf_write64(0, base); /* Disable SSO group */ 695 } 696 } 697 698 static int 699 ssovf_close(struct rte_eventdev *dev) 700 { 701 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 702 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 703 uint8_t i; 704 705 for (i = 0; i < edev->nb_event_queues; i++) 706 all_queues[i] = i; 707 708 for (i = 0; i < edev->nb_event_ports; i++) 709 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 710 edev->nb_event_queues); 711 return 0; 712 } 713 714 static int 715 ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) 716 { 717 int *flag = opaque; 718 *flag = !!atoi(value); 719 return 0; 720 } 721 722 static int 723 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 724 uint32_t *caps, const struct event_timer_adapter_ops **ops) 725 { 726 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 727 timvf_enable_stats); 728 } 729 730 static int 731 ssovf_crypto_adapter_caps_get(const struct rte_eventdev *dev, 732 const struct rte_cryptodev *cdev, uint32_t *caps) 733 { 734 RTE_SET_USED(dev); 735 RTE_SET_USED(cdev); 736 737 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | 738 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; 739 740 return 0; 741 } 742 743 static int 744 ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev, 745 const struct rte_cryptodev *cdev, 746 int32_t queue_pair_id, 747 const struct rte_event *event) 748 { 749 struct cpt_instance *qp; 750 uint8_t qp_id; 751 752 RTE_SET_USED(event); 753 754 if (queue_pair_id == -1) { 755 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 756 qp = cdev->data->queue_pairs[qp_id]; 757 qp->ca_enabled = 1; 758 } 759 } else { 760 qp = cdev->data->queue_pairs[queue_pair_id]; 761 qp->ca_enabled = 1; 762 } 763 764 ssovf_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev); 765 766 return 0; 767 } 768 769 static int 770 ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev, 771 const struct rte_cryptodev *cdev, 772 int32_t queue_pair_id) 773 { 774 struct cpt_instance *qp; 775 uint8_t qp_id; 776 777 RTE_SET_USED(dev); 778 779 if (queue_pair_id == -1) { 780 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 781 qp = cdev->data->queue_pairs[qp_id]; 782 qp->ca_enabled = 0; 783 } 784 } else { 785 qp = cdev->data->queue_pairs[queue_pair_id]; 786 qp->ca_enabled = 0; 787 } 788 789 return 0; 790 } 791 792 /* Initialize and register event driver with DPDK Application */ 793 static struct eventdev_ops ssovf_ops = { 794 .dev_infos_get = ssovf_info_get, 795 .dev_configure = ssovf_configure, 796 .queue_def_conf = ssovf_queue_def_conf, 797 .queue_setup = ssovf_queue_setup, 798 .queue_release = ssovf_queue_release, 799 .port_def_conf = ssovf_port_def_conf, 800 .port_setup = ssovf_port_setup, 801 .port_release = ssovf_port_release, 802 .port_link = ssovf_port_link, 803 .port_unlink = ssovf_port_unlink, 804 .timeout_ticks = ssovf_timeout_ticks, 805 806 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 807 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 808 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 809 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 810 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 811 812 .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get, 813 .eth_tx_adapter_create = ssovf_eth_tx_adapter_create, 814 .eth_tx_adapter_free = ssovf_eth_tx_adapter_free, 815 .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add, 816 .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del, 817 .eth_tx_adapter_start = ssovf_eth_tx_adapter_start, 818 .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop, 819 820 .timer_adapter_caps_get = ssovf_timvf_caps_get, 821 822 .crypto_adapter_caps_get = ssovf_crypto_adapter_caps_get, 823 .crypto_adapter_queue_pair_add = ssovf_crypto_adapter_qp_add, 824 .crypto_adapter_queue_pair_del = ssovf_crypto_adapter_qp_del, 825 826 .dev_selftest = test_eventdev_octeontx, 827 828 .dump = ssovf_dump, 829 .dev_start = ssovf_start, 830 .dev_stop = ssovf_stop, 831 .dev_close = ssovf_close 832 }; 833 834 static int 835 ssovf_vdev_probe(struct rte_vdev_device *vdev) 836 { 837 struct ssovf_info oinfo; 838 struct ssovf_mbox_dev_info info; 839 struct ssovf_evdev *edev; 840 struct rte_eventdev *eventdev; 841 static int ssovf_init_once; 842 const char *name; 843 const char *params; 844 int ret; 845 846 static const char *const args[] = { 847 TIMVF_ENABLE_STATS_ARG, 848 NULL 849 }; 850 851 name = rte_vdev_device_name(vdev); 852 /* More than one instance is not supported */ 853 if (ssovf_init_once) { 854 ssovf_log_err("Request to create >1 %s instance", name); 855 return -EINVAL; 856 } 857 858 params = rte_vdev_device_args(vdev); 859 if (params != NULL && params[0] != '\0') { 860 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 861 862 if (!kvlist) { 863 ssovf_log_info( 864 "Ignoring unsupported params supplied '%s'", 865 name); 866 } else { 867 ret = rte_kvargs_process(kvlist, TIMVF_ENABLE_STATS_ARG, 868 ssovf_parsekv, 869 &timvf_enable_stats); 870 if (ret != 0) { 871 ssovf_log_err("%s: Error in timvf stats", name); 872 rte_kvargs_free(kvlist); 873 return ret; 874 } 875 } 876 877 rte_kvargs_free(kvlist); 878 } 879 880 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 881 rte_socket_id()); 882 if (eventdev == NULL) { 883 ssovf_log_err("Failed to create eventdev vdev %s", name); 884 return -ENOMEM; 885 } 886 eventdev->dev_ops = &ssovf_ops; 887 888 timvf_set_eventdevice(eventdev); 889 890 /* For secondary processes, the primary has done all the work */ 891 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 892 ssovf_fastpath_fns_set(eventdev); 893 return 0; 894 } 895 896 octeontx_mbox_init(); 897 ret = ssovf_info(&oinfo); 898 if (ret) { 899 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 900 goto error; 901 } 902 903 edev = ssovf_pmd_priv(eventdev); 904 edev->max_event_ports = oinfo.total_ssowvfs; 905 edev->max_event_queues = oinfo.total_ssovfs; 906 edev->is_timeout_deq = 0; 907 908 ret = ssovf_mbox_dev_info(&info); 909 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 910 ssovf_log_err("Failed to get mbox devinfo %d", ret); 911 goto error; 912 } 913 914 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 915 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 916 edev->max_num_events = info.max_num_events; 917 edev->available_events = info.max_num_events; 918 919 ssovf_log_dbg("min_deq_tmo=%" PRId64 " max_deq_tmo=%" PRId64 920 " max_evts=%d", 921 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 922 info.max_num_events); 923 924 if (!edev->max_event_ports || !edev->max_event_queues) { 925 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 926 edev->max_event_queues, edev->max_event_ports); 927 ret = -ENODEV; 928 goto error; 929 } 930 931 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 932 name, oinfo.domain, edev->max_event_queues, 933 edev->max_event_ports); 934 935 ssovf_init_once = 1; 936 event_dev_probing_finish(eventdev); 937 return 0; 938 939 error: 940 rte_event_pmd_vdev_uninit(name); 941 return ret; 942 } 943 944 static int 945 ssovf_vdev_remove(struct rte_vdev_device *vdev) 946 { 947 const char *name; 948 949 name = rte_vdev_device_name(vdev); 950 ssovf_log_info("Closing %s", name); 951 return rte_event_pmd_vdev_uninit(name); 952 } 953 954 static struct rte_vdev_driver vdev_ssovf_pmd = { 955 .probe = ssovf_vdev_probe, 956 .remove = ssovf_vdev_remove 957 }; 958 959 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 960