1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <cryptodev_pmd.h> 9 #include <rte_debug.h> 10 #include <rte_dev.h> 11 #include <rte_eal.h> 12 #include <ethdev_driver.h> 13 #include <rte_event_eth_rx_adapter.h> 14 #include <rte_kvargs.h> 15 #include <rte_lcore.h> 16 #include <rte_log.h> 17 #include <rte_malloc.h> 18 #include <rte_memory.h> 19 #include <rte_bus_vdev.h> 20 21 #include "ssovf_evdev.h" 22 #include "timvf_evdev.h" 23 #include "otx_cryptodev_hw_access.h" 24 25 static uint8_t timvf_enable_stats; 26 27 RTE_LOG_REGISTER_DEFAULT(otx_logtype_ssovf, NOTICE); 28 29 /* SSOPF Mailbox messages */ 30 31 struct ssovf_mbox_dev_info { 32 uint64_t min_deq_timeout_ns; 33 uint64_t max_deq_timeout_ns; 34 uint32_t max_num_events; 35 }; 36 37 static int 38 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 39 { 40 struct octeontx_mbox_hdr hdr = {0}; 41 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 42 43 hdr.coproc = SSO_COPROC; 44 hdr.msg = SSO_GET_DEV_INFO; 45 hdr.vfid = 0; 46 47 memset(info, 0, len); 48 return octeontx_mbox_send(&hdr, NULL, 0, info, len); 49 } 50 51 struct ssovf_mbox_getwork_wait { 52 uint64_t wait_ns; 53 }; 54 55 static int 56 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 57 { 58 struct octeontx_mbox_hdr hdr = {0}; 59 struct ssovf_mbox_getwork_wait tmo_set; 60 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 61 int ret; 62 63 hdr.coproc = SSO_COPROC; 64 hdr.msg = SSO_SET_GETWORK_WAIT; 65 hdr.vfid = 0; 66 67 tmo_set.wait_ns = timeout_ns; 68 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); 69 if (ret) 70 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 71 72 return ret; 73 } 74 75 struct ssovf_mbox_grp_pri { 76 uint8_t vhgrp_id; 77 uint8_t wgt_left; /* Read only */ 78 uint8_t weight; 79 uint8_t affinity; 80 uint8_t priority; 81 }; 82 83 static int 84 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 85 { 86 struct octeontx_mbox_hdr hdr = {0}; 87 struct ssovf_mbox_grp_pri grp; 88 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 89 int ret; 90 91 hdr.coproc = SSO_COPROC; 92 hdr.msg = SSO_GRP_SET_PRIORITY; 93 hdr.vfid = queue; 94 95 grp.vhgrp_id = queue; 96 grp.weight = 0xff; 97 grp.affinity = 0xff; 98 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 99 100 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); 101 if (ret) 102 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 103 104 return ret; 105 } 106 107 struct ssovf_mbox_convert_ns_getworks_iter { 108 uint64_t wait_ns; 109 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 110 }; 111 112 static int 113 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 114 { 115 struct octeontx_mbox_hdr hdr = {0}; 116 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 117 uint16_t len = sizeof(ns2iter); 118 int ret; 119 120 hdr.coproc = SSO_COPROC; 121 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 122 hdr.vfid = 0; 123 124 memset(&ns2iter, 0, len); 125 ns2iter.wait_ns = ns; 126 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 127 if (ret < 0 || (ret != len)) { 128 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 129 return -EIO; 130 } 131 132 *tmo_ticks = ns2iter.getwork_iter; 133 return 0; 134 } 135 136 static void 137 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 138 { 139 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 140 141 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 142 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 143 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 144 dev_info->max_event_queues = edev->max_event_queues; 145 dev_info->max_event_queue_flows = (1ULL << 20); 146 dev_info->max_event_queue_priority_levels = 8; 147 dev_info->max_event_priority_levels = 1; 148 dev_info->max_event_ports = edev->max_event_ports; 149 dev_info->max_event_port_dequeue_depth = 1; 150 dev_info->max_event_port_enqueue_depth = 1; 151 dev_info->max_num_events = edev->max_num_events; 152 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 153 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 154 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES| 155 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 156 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 157 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 158 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | 159 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; 160 161 } 162 163 static int 164 ssovf_configure(const struct rte_eventdev *dev) 165 { 166 struct rte_event_dev_config *conf = &dev->data->dev_conf; 167 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 168 uint64_t deq_tmo_ns; 169 170 ssovf_func_trace(); 171 deq_tmo_ns = conf->dequeue_timeout_ns; 172 if (deq_tmo_ns == 0) 173 deq_tmo_ns = edev->min_deq_timeout_ns; 174 175 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 176 edev->is_timeout_deq = 1; 177 deq_tmo_ns = edev->min_deq_timeout_ns; 178 } 179 edev->nb_event_queues = conf->nb_event_queues; 180 edev->nb_event_ports = conf->nb_event_ports; 181 182 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 183 } 184 185 static void 186 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 187 struct rte_event_queue_conf *queue_conf) 188 { 189 RTE_SET_USED(dev); 190 RTE_SET_USED(queue_id); 191 192 queue_conf->nb_atomic_flows = (1ULL << 20); 193 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 194 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 195 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 196 } 197 198 static void 199 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 200 { 201 RTE_SET_USED(dev); 202 RTE_SET_USED(queue_id); 203 } 204 205 static int 206 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 207 const struct rte_event_queue_conf *queue_conf) 208 { 209 RTE_SET_USED(dev); 210 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 211 212 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 213 } 214 215 static void 216 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 217 struct rte_event_port_conf *port_conf) 218 { 219 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 220 221 RTE_SET_USED(port_id); 222 port_conf->new_event_threshold = edev->max_num_events; 223 port_conf->dequeue_depth = 1; 224 port_conf->enqueue_depth = 1; 225 port_conf->event_port_cfg = 0; 226 } 227 228 static void 229 ssovf_port_release(void *port) 230 { 231 rte_free(port); 232 } 233 234 static int 235 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 236 const struct rte_event_port_conf *port_conf) 237 { 238 struct ssows *ws; 239 uint32_t reg_off; 240 uint8_t q; 241 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 242 243 ssovf_func_trace("port=%d", port_id); 244 RTE_SET_USED(port_conf); 245 246 /* Free memory prior to re-allocation if needed */ 247 if (dev->data->ports[port_id] != NULL) { 248 ssovf_port_release(dev->data->ports[port_id]); 249 dev->data->ports[port_id] = NULL; 250 } 251 252 /* Allocate event port memory */ 253 ws = rte_zmalloc_socket("eventdev ssows", 254 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 255 dev->data->socket_id); 256 if (ws == NULL) { 257 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 258 return -ENOMEM; 259 } 260 261 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 262 if (ws->base == NULL) { 263 rte_free(ws); 264 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 265 return -EINVAL; 266 } 267 268 reg_off = SSOW_VHWS_OP_GET_WORK0; 269 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 270 reg_off |= 1 << 16; /* Wait */ 271 ws->getwork = ws->base + reg_off; 272 ws->port = port_id; 273 ws->lookup_mem = octeontx_fastpath_lookup_mem_get(); 274 275 for (q = 0; q < edev->nb_event_queues; q++) { 276 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 277 if (ws->grps[q] == NULL) { 278 rte_free(ws); 279 ssovf_log_err("Failed to get grp%d base addr", q); 280 return -EINVAL; 281 } 282 } 283 284 dev->data->ports[port_id] = ws; 285 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 286 return 0; 287 } 288 289 static int 290 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 291 const uint8_t priorities[], uint16_t nb_links) 292 { 293 uint16_t link; 294 uint64_t val; 295 struct ssows *ws = port; 296 297 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 298 RTE_SET_USED(dev); 299 RTE_SET_USED(priorities); 300 301 for (link = 0; link < nb_links; link++) { 302 val = queues[link]; 303 val |= (1ULL << 24); /* Set membership */ 304 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 305 } 306 return (int)nb_links; 307 } 308 309 static int 310 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 311 uint16_t nb_unlinks) 312 { 313 uint16_t unlink; 314 uint64_t val; 315 struct ssows *ws = port; 316 317 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 318 RTE_SET_USED(dev); 319 320 for (unlink = 0; unlink < nb_unlinks; unlink++) { 321 val = queues[unlink]; 322 val &= ~(1ULL << 24); /* Clear membership */ 323 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 324 } 325 return (int)nb_unlinks; 326 } 327 328 static int 329 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 330 { 331 RTE_SET_USED(dev); 332 333 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 334 } 335 336 static void 337 ssows_dump(struct ssows *ws, FILE *f) 338 { 339 uint8_t *base = ws->base; 340 uint64_t val; 341 342 fprintf(f, "\t---------------port%d---------------\n", ws->port); 343 val = ssovf_read64(base + SSOW_VHWS_TAG); 344 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 345 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 346 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 347 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 348 (int)(val >> 63) & 0x1); 349 350 val = ssovf_read64(base + SSOW_VHWS_WQP); 351 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 352 353 val = ssovf_read64(base + SSOW_VHWS_LINKS); 354 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 355 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 356 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 357 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 358 359 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 360 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 361 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 362 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 363 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 364 (int)(val >> 63) & 0x1); 365 366 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 367 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 368 } 369 370 static int 371 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 372 const struct rte_eth_dev *eth_dev, uint32_t *caps) 373 { 374 int ret; 375 RTE_SET_USED(dev); 376 377 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 378 if (ret) 379 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 380 else 381 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT; 382 383 return 0; 384 } 385 386 static int 387 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev, 388 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, 389 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 390 { 391 const struct octeontx_nic *nic = eth_dev->data->dev_private; 392 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 393 uint16_t free_idx = UINT16_MAX; 394 struct octeontx_rxq *rxq; 395 pki_mod_qos_t pki_qos; 396 uint8_t found = false; 397 int i, ret = 0; 398 void *old_ptr; 399 400 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 401 if (ret) 402 return -EINVAL; 403 404 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 405 return -ENOTSUP; 406 407 /* eth_octeontx only supports one rq. */ 408 rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id; 409 rxq = eth_dev->data->rx_queues[rx_queue_id]; 410 /* Add rxq pool to list of used pools and reduce available events. */ 411 for (i = 0; i < edev->rxq_pools; i++) { 412 if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) { 413 edev->rxq_pool_rcnt[i]++; 414 found = true; 415 break; 416 } else if (free_idx == UINT16_MAX && 417 edev->rxq_pool_array[i] == 0) { 418 free_idx = i; 419 } 420 } 421 422 if (!found) { 423 uint16_t idx; 424 425 if (edev->available_events < rxq->pool->size) { 426 ssovf_log_err( 427 "Max available events %"PRIu32" requested events in rxq pool %"PRIu32"", 428 edev->available_events, rxq->pool->size); 429 return -ENOMEM; 430 } 431 432 if (free_idx != UINT16_MAX) { 433 idx = free_idx; 434 } else { 435 old_ptr = edev->rxq_pool_array; 436 edev->rxq_pools++; 437 edev->rxq_pool_array = rte_realloc( 438 edev->rxq_pool_array, 439 sizeof(uint64_t) * edev->rxq_pools, 0); 440 if (edev->rxq_pool_array == NULL) { 441 edev->rxq_pools--; 442 edev->rxq_pool_array = old_ptr; 443 return -ENOMEM; 444 } 445 446 old_ptr = edev->rxq_pool_rcnt; 447 edev->rxq_pool_rcnt = rte_realloc( 448 edev->rxq_pool_rcnt, 449 sizeof(uint8_t) * edev->rxq_pools, 0); 450 if (edev->rxq_pool_rcnt == NULL) { 451 edev->rxq_pools--; 452 edev->rxq_pool_rcnt = old_ptr; 453 return -ENOMEM; 454 } 455 idx = edev->rxq_pools - 1; 456 } 457 458 edev->rxq_pool_array[idx] = (uintptr_t)rxq->pool; 459 edev->rxq_pool_rcnt[idx] = 1; 460 edev->available_events -= rxq->pool->size; 461 } 462 463 memset(&pki_qos, 0, sizeof(pki_mod_qos_t)); 464 465 pki_qos.port_type = 0; 466 pki_qos.index = 0; 467 pki_qos.mmask.f_tag_type = 1; 468 pki_qos.mmask.f_port_add = 1; 469 pki_qos.mmask.f_grp_ok = 1; 470 pki_qos.mmask.f_grp_bad = 1; 471 pki_qos.mmask.f_grptag_ok = 1; 472 pki_qos.mmask.f_grptag_bad = 1; 473 474 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; 475 pki_qos.qos_entry.port_add = 0; 476 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; 477 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; 478 pki_qos.qos_entry.grptag_bad = 0; 479 pki_qos.qos_entry.grptag_ok = 0; 480 481 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos); 482 if (ret < 0) 483 ssovf_log_err("failed to modify QOS, port=%d, q=%d", 484 nic->port_id, queue_conf->ev.queue_id); 485 486 edev->rx_offload_flags = nic->rx_offload_flags; 487 edev->tx_offload_flags = nic->tx_offload_flags; 488 return ret; 489 } 490 491 static int 492 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 493 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) 494 { 495 const struct octeontx_nic *nic = eth_dev->data->dev_private; 496 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 497 struct octeontx_rxq *rxq; 498 pki_del_qos_t pki_qos; 499 uint8_t found = false; 500 int i, ret = 0; 501 502 rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id; 503 rxq = eth_dev->data->rx_queues[rx_queue_id]; 504 for (i = 0; i < edev->rxq_pools; i++) { 505 if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) { 506 found = true; 507 break; 508 } 509 } 510 511 if (found) { 512 edev->rxq_pool_rcnt[i]--; 513 if (edev->rxq_pool_rcnt[i] == 0) 514 edev->rxq_pool_array[i] = 0; 515 edev->available_events += rxq->pool->size; 516 } 517 518 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 519 if (ret) 520 return -EINVAL; 521 522 pki_qos.port_type = 0; 523 pki_qos.index = 0; 524 memset(&pki_qos, 0, sizeof(pki_del_qos_t)); 525 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos); 526 if (ret < 0) 527 ssovf_log_err("Failed to delete QOS port=%d, q=%d", 528 nic->port_id, rx_queue_id); 529 return ret; 530 } 531 532 static int 533 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, 534 const struct rte_eth_dev *eth_dev) 535 { 536 RTE_SET_USED(dev); 537 RTE_SET_USED(eth_dev); 538 539 return 0; 540 } 541 542 543 static int 544 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, 545 const struct rte_eth_dev *eth_dev) 546 { 547 RTE_SET_USED(dev); 548 RTE_SET_USED(eth_dev); 549 550 return 0; 551 } 552 553 static int 554 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev, 555 const struct rte_eth_dev *eth_dev, uint32_t *caps) 556 { 557 int ret; 558 RTE_SET_USED(dev); 559 560 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); 561 if (ret) 562 *caps = 0; 563 else 564 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 565 566 return 0; 567 } 568 569 static int 570 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev) 571 { 572 RTE_SET_USED(id); 573 RTE_SET_USED(dev); 574 return 0; 575 } 576 577 static int 578 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev) 579 { 580 RTE_SET_USED(id); 581 RTE_SET_USED(dev); 582 return 0; 583 } 584 585 static int 586 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev, 587 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 588 { 589 RTE_SET_USED(id); 590 RTE_SET_USED(dev); 591 RTE_SET_USED(eth_dev); 592 RTE_SET_USED(tx_queue_id); 593 return 0; 594 } 595 596 static int 597 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev, 598 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) 599 { 600 RTE_SET_USED(id); 601 RTE_SET_USED(dev); 602 RTE_SET_USED(eth_dev); 603 RTE_SET_USED(tx_queue_id); 604 return 0; 605 } 606 607 static int 608 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev) 609 { 610 RTE_SET_USED(id); 611 RTE_SET_USED(dev); 612 return 0; 613 } 614 615 static int 616 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev) 617 { 618 RTE_SET_USED(id); 619 RTE_SET_USED(dev); 620 return 0; 621 } 622 623 624 static void 625 ssovf_dump(struct rte_eventdev *dev, FILE *f) 626 { 627 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 628 uint8_t port; 629 630 /* Dump SSOWVF debug registers */ 631 for (port = 0; port < edev->nb_event_ports; port++) 632 ssows_dump(dev->data->ports[port], f); 633 } 634 635 static int 636 ssovf_start(struct rte_eventdev *dev) 637 { 638 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 639 struct ssows *ws; 640 uint8_t *base; 641 uint8_t i; 642 643 ssovf_func_trace(); 644 for (i = 0; i < edev->nb_event_ports; i++) { 645 ws = dev->data->ports[i]; 646 ssows_reset(ws); 647 ws->swtag_req = 0; 648 } 649 650 for (i = 0; i < edev->nb_event_queues; i++) { 651 /* Consume all the events through HWS0 */ 652 ssows_flush_events(dev->data->ports[0], i, NULL, NULL); 653 654 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 655 base += SSO_VHGRP_QCTL; 656 ssovf_write64(1, base); /* Enable SSO group */ 657 } 658 659 ssovf_fastpath_fns_set(dev); 660 return 0; 661 } 662 663 static void 664 ssows_handle_event(void *arg, struct rte_event event) 665 { 666 struct rte_eventdev *dev = arg; 667 668 if (dev->dev_ops->dev_stop_flush != NULL) 669 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, 670 dev->data->dev_stop_flush_arg); 671 } 672 673 static void 674 ssovf_stop(struct rte_eventdev *dev) 675 { 676 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 677 struct ssows *ws; 678 uint8_t *base; 679 uint8_t i; 680 681 ssovf_func_trace(); 682 for (i = 0; i < edev->nb_event_ports; i++) { 683 ws = dev->data->ports[i]; 684 ssows_reset(ws); 685 ws->swtag_req = 0; 686 } 687 688 for (i = 0; i < edev->nb_event_queues; i++) { 689 /* Consume all the events through HWS0 */ 690 ssows_flush_events(dev->data->ports[0], i, 691 ssows_handle_event, dev); 692 693 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 694 base += SSO_VHGRP_QCTL; 695 ssovf_write64(0, base); /* Disable SSO group */ 696 } 697 } 698 699 static int 700 ssovf_close(struct rte_eventdev *dev) 701 { 702 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 703 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 704 uint8_t i; 705 706 for (i = 0; i < edev->nb_event_queues; i++) 707 all_queues[i] = i; 708 709 for (i = 0; i < edev->nb_event_ports; i++) 710 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 711 edev->nb_event_queues); 712 return 0; 713 } 714 715 static int 716 ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) 717 { 718 int *flag = opaque; 719 *flag = !!atoi(value); 720 return 0; 721 } 722 723 static int 724 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, 725 uint32_t *caps, const struct event_timer_adapter_ops **ops) 726 { 727 return timvf_timer_adapter_caps_get(dev, flags, caps, ops, 728 timvf_enable_stats); 729 } 730 731 static int 732 ssovf_crypto_adapter_caps_get(const struct rte_eventdev *dev, 733 const struct rte_cryptodev *cdev, uint32_t *caps) 734 { 735 RTE_SET_USED(dev); 736 RTE_SET_USED(cdev); 737 738 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | 739 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; 740 741 return 0; 742 } 743 744 static int 745 ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev, 746 const struct rte_cryptodev *cdev, 747 int32_t queue_pair_id, 748 const struct rte_event *event) 749 { 750 struct cpt_instance *qp; 751 uint8_t qp_id; 752 753 RTE_SET_USED(event); 754 755 if (queue_pair_id == -1) { 756 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 757 qp = cdev->data->queue_pairs[qp_id]; 758 qp->ca_enabled = 1; 759 } 760 } else { 761 qp = cdev->data->queue_pairs[queue_pair_id]; 762 qp->ca_enabled = 1; 763 } 764 765 ssovf_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev); 766 767 return 0; 768 } 769 770 static int 771 ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev, 772 const struct rte_cryptodev *cdev, 773 int32_t queue_pair_id) 774 { 775 struct cpt_instance *qp; 776 uint8_t qp_id; 777 778 RTE_SET_USED(dev); 779 780 if (queue_pair_id == -1) { 781 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 782 qp = cdev->data->queue_pairs[qp_id]; 783 qp->ca_enabled = 0; 784 } 785 } else { 786 qp = cdev->data->queue_pairs[queue_pair_id]; 787 qp->ca_enabled = 0; 788 } 789 790 return 0; 791 } 792 793 /* Initialize and register event driver with DPDK Application */ 794 static struct eventdev_ops ssovf_ops = { 795 .dev_infos_get = ssovf_info_get, 796 .dev_configure = ssovf_configure, 797 .queue_def_conf = ssovf_queue_def_conf, 798 .queue_setup = ssovf_queue_setup, 799 .queue_release = ssovf_queue_release, 800 .port_def_conf = ssovf_port_def_conf, 801 .port_setup = ssovf_port_setup, 802 .port_release = ssovf_port_release, 803 .port_link = ssovf_port_link, 804 .port_unlink = ssovf_port_unlink, 805 .timeout_ticks = ssovf_timeout_ticks, 806 807 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get, 808 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add, 809 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del, 810 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, 811 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, 812 813 .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get, 814 .eth_tx_adapter_create = ssovf_eth_tx_adapter_create, 815 .eth_tx_adapter_free = ssovf_eth_tx_adapter_free, 816 .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add, 817 .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del, 818 .eth_tx_adapter_start = ssovf_eth_tx_adapter_start, 819 .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop, 820 821 .timer_adapter_caps_get = ssovf_timvf_caps_get, 822 823 .crypto_adapter_caps_get = ssovf_crypto_adapter_caps_get, 824 .crypto_adapter_queue_pair_add = ssovf_crypto_adapter_qp_add, 825 .crypto_adapter_queue_pair_del = ssovf_crypto_adapter_qp_del, 826 827 .dev_selftest = test_eventdev_octeontx, 828 829 .dump = ssovf_dump, 830 .dev_start = ssovf_start, 831 .dev_stop = ssovf_stop, 832 .dev_close = ssovf_close 833 }; 834 835 static int 836 ssovf_vdev_probe(struct rte_vdev_device *vdev) 837 { 838 struct ssovf_info oinfo; 839 struct ssovf_mbox_dev_info info; 840 struct ssovf_evdev *edev; 841 struct rte_eventdev *eventdev; 842 static int ssovf_init_once; 843 const char *name; 844 const char *params; 845 int ret; 846 847 static const char *const args[] = { 848 TIMVF_ENABLE_STATS_ARG, 849 NULL 850 }; 851 852 name = rte_vdev_device_name(vdev); 853 /* More than one instance is not supported */ 854 if (ssovf_init_once) { 855 ssovf_log_err("Request to create >1 %s instance", name); 856 return -EINVAL; 857 } 858 859 params = rte_vdev_device_args(vdev); 860 if (params != NULL && params[0] != '\0') { 861 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 862 863 if (!kvlist) { 864 ssovf_log_info( 865 "Ignoring unsupported params supplied '%s'", 866 name); 867 } else { 868 ret = rte_kvargs_process(kvlist, TIMVF_ENABLE_STATS_ARG, 869 ssovf_parsekv, 870 &timvf_enable_stats); 871 if (ret != 0) { 872 ssovf_log_err("%s: Error in timvf stats", name); 873 rte_kvargs_free(kvlist); 874 return ret; 875 } 876 } 877 878 rte_kvargs_free(kvlist); 879 } 880 881 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 882 rte_socket_id()); 883 if (eventdev == NULL) { 884 ssovf_log_err("Failed to create eventdev vdev %s", name); 885 return -ENOMEM; 886 } 887 eventdev->dev_ops = &ssovf_ops; 888 889 timvf_set_eventdevice(eventdev); 890 891 /* For secondary processes, the primary has done all the work */ 892 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 893 ssovf_fastpath_fns_set(eventdev); 894 return 0; 895 } 896 897 octeontx_mbox_init(); 898 ret = ssovf_info(&oinfo); 899 if (ret) { 900 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 901 goto error; 902 } 903 904 edev = ssovf_pmd_priv(eventdev); 905 edev->max_event_ports = oinfo.total_ssowvfs; 906 edev->max_event_queues = oinfo.total_ssovfs; 907 edev->is_timeout_deq = 0; 908 909 ret = ssovf_mbox_dev_info(&info); 910 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 911 ssovf_log_err("Failed to get mbox devinfo %d", ret); 912 goto error; 913 } 914 915 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 916 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 917 edev->max_num_events = info.max_num_events; 918 edev->available_events = info.max_num_events; 919 920 ssovf_log_dbg("min_deq_tmo=%" PRId64 " max_deq_tmo=%" PRId64 921 " max_evts=%d", 922 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 923 info.max_num_events); 924 925 if (!edev->max_event_ports || !edev->max_event_queues) { 926 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 927 edev->max_event_queues, edev->max_event_ports); 928 ret = -ENODEV; 929 goto error; 930 } 931 932 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 933 name, oinfo.domain, edev->max_event_queues, 934 edev->max_event_ports); 935 936 ssovf_init_once = 1; 937 event_dev_probing_finish(eventdev); 938 return 0; 939 940 error: 941 rte_event_pmd_vdev_uninit(name); 942 return ret; 943 } 944 945 static int 946 ssovf_vdev_remove(struct rte_vdev_device *vdev) 947 { 948 const char *name; 949 950 name = rte_vdev_device_name(vdev); 951 ssovf_log_info("Closing %s", name); 952 return rte_event_pmd_vdev_uninit(name); 953 } 954 955 static struct rte_vdev_driver vdev_ssovf_pmd = { 956 .probe = ssovf_vdev_probe, 957 .remove = ssovf_vdev_remove 958 }; 959 960 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 961