1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium, Inc. 2017. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium, Inc nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <inttypes.h> 34 35 #include <rte_common.h> 36 #include <rte_debug.h> 37 #include <rte_dev.h> 38 #include <rte_eal.h> 39 #include <rte_lcore.h> 40 #include <rte_log.h> 41 #include <rte_malloc.h> 42 #include <rte_memory.h> 43 #include <rte_memzone.h> 44 #include <rte_vdev.h> 45 46 #include "ssovf_evdev.h" 47 48 /* SSOPF Mailbox messages */ 49 50 struct ssovf_mbox_dev_info { 51 uint64_t min_deq_timeout_ns; 52 uint64_t max_deq_timeout_ns; 53 uint32_t max_num_events; 54 }; 55 56 static int 57 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 58 { 59 struct octeontx_mbox_hdr hdr = {0}; 60 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 61 62 hdr.coproc = SSO_COPROC; 63 hdr.msg = SSO_GET_DEV_INFO; 64 hdr.vfid = 0; 65 66 memset(info, 0, len); 67 return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len); 68 } 69 70 struct ssovf_mbox_getwork_wait { 71 uint64_t wait_ns; 72 }; 73 74 static int 75 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 76 { 77 struct octeontx_mbox_hdr hdr = {0}; 78 struct ssovf_mbox_getwork_wait tmo_set; 79 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 80 int ret; 81 82 hdr.coproc = SSO_COPROC; 83 hdr.msg = SSO_SET_GETWORK_WAIT; 84 hdr.vfid = 0; 85 86 tmo_set.wait_ns = timeout_ns; 87 ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0); 88 if (ret) 89 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 90 91 return ret; 92 } 93 94 struct ssovf_mbox_grp_pri { 95 uint8_t wgt_left; /* Read only */ 96 uint8_t weight; 97 uint8_t affinity; 98 uint8_t priority; 99 }; 100 101 static int 102 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 103 { 104 struct octeontx_mbox_hdr hdr = {0}; 105 struct ssovf_mbox_grp_pri grp; 106 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 107 int ret; 108 109 hdr.coproc = SSO_COPROC; 110 hdr.msg = SSO_GRP_SET_PRIORITY; 111 hdr.vfid = queue; 112 113 grp.weight = 0xff; 114 grp.affinity = 0xff; 115 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 116 117 ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0); 118 if (ret) 119 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 120 121 return ret; 122 } 123 124 struct ssovf_mbox_convert_ns_getworks_iter { 125 uint64_t wait_ns; 126 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 127 }; 128 129 static int 130 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 131 { 132 struct octeontx_mbox_hdr hdr = {0}; 133 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 134 uint16_t len = sizeof(ns2iter); 135 int ret; 136 137 hdr.coproc = SSO_COPROC; 138 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 139 hdr.vfid = 0; 140 141 memset(&ns2iter, 0, len); 142 ns2iter.wait_ns = ns; 143 ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 144 if (ret < 0 || (ret != len)) { 145 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 146 return -EIO; 147 } 148 149 *tmo_ticks = ns2iter.getwork_iter; 150 return 0; 151 } 152 153 static void 154 ssovf_fastpath_fns_set(struct rte_eventdev *dev) 155 { 156 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 157 158 dev->schedule = NULL; 159 dev->enqueue = ssows_enq; 160 dev->enqueue_burst = ssows_enq_burst; 161 dev->enqueue_new_burst = ssows_enq_new_burst; 162 dev->enqueue_forward_burst = ssows_enq_fwd_burst; 163 dev->dequeue = ssows_deq; 164 dev->dequeue_burst = ssows_deq_burst; 165 166 if (edev->is_timeout_deq) { 167 dev->dequeue = ssows_deq_timeout; 168 dev->dequeue_burst = ssows_deq_timeout_burst; 169 } 170 } 171 172 static void 173 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 174 { 175 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 176 177 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 178 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 179 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 180 dev_info->max_event_queues = edev->max_event_queues; 181 dev_info->max_event_queue_flows = (1ULL << 20); 182 dev_info->max_event_queue_priority_levels = 8; 183 dev_info->max_event_priority_levels = 1; 184 dev_info->max_event_ports = edev->max_event_ports; 185 dev_info->max_event_port_dequeue_depth = 1; 186 dev_info->max_event_port_enqueue_depth = 1; 187 dev_info->max_num_events = edev->max_num_events; 188 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 189 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 190 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES; 191 } 192 193 static int 194 ssovf_configure(const struct rte_eventdev *dev) 195 { 196 struct rte_event_dev_config *conf = &dev->data->dev_conf; 197 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 198 uint64_t deq_tmo_ns; 199 200 ssovf_func_trace(); 201 deq_tmo_ns = conf->dequeue_timeout_ns; 202 if (deq_tmo_ns == 0) 203 deq_tmo_ns = edev->min_deq_timeout_ns; 204 205 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 206 edev->is_timeout_deq = 1; 207 deq_tmo_ns = edev->min_deq_timeout_ns; 208 } 209 edev->nb_event_queues = conf->nb_event_queues; 210 edev->nb_event_ports = conf->nb_event_ports; 211 212 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 213 } 214 215 static void 216 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 217 struct rte_event_queue_conf *queue_conf) 218 { 219 RTE_SET_USED(dev); 220 RTE_SET_USED(queue_id); 221 222 queue_conf->nb_atomic_flows = (1ULL << 20); 223 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 224 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 225 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 226 } 227 228 static void 229 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 230 { 231 RTE_SET_USED(dev); 232 RTE_SET_USED(queue_id); 233 } 234 235 static int 236 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 237 const struct rte_event_queue_conf *queue_conf) 238 { 239 RTE_SET_USED(dev); 240 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 241 242 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 243 } 244 245 static void 246 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 247 struct rte_event_port_conf *port_conf) 248 { 249 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 250 251 RTE_SET_USED(port_id); 252 port_conf->new_event_threshold = edev->max_num_events; 253 port_conf->dequeue_depth = 1; 254 port_conf->enqueue_depth = 1; 255 } 256 257 static void 258 ssovf_port_release(void *port) 259 { 260 rte_free(port); 261 } 262 263 static int 264 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 265 const struct rte_event_port_conf *port_conf) 266 { 267 struct ssows *ws; 268 uint32_t reg_off; 269 uint8_t q; 270 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 271 272 ssovf_func_trace("port=%d", port_id); 273 RTE_SET_USED(port_conf); 274 275 /* Free memory prior to re-allocation if needed */ 276 if (dev->data->ports[port_id] != NULL) { 277 ssovf_port_release(dev->data->ports[port_id]); 278 dev->data->ports[port_id] = NULL; 279 } 280 281 /* Allocate event port memory */ 282 ws = rte_zmalloc_socket("eventdev ssows", 283 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 284 dev->data->socket_id); 285 if (ws == NULL) { 286 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 287 return -ENOMEM; 288 } 289 290 ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 291 if (ws->base == NULL) { 292 rte_free(ws); 293 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 294 return -EINVAL; 295 } 296 297 reg_off = SSOW_VHWS_OP_GET_WORK0; 298 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 299 reg_off |= 1 << 16; /* Wait */ 300 ws->getwork = ws->base + reg_off; 301 ws->port = port_id; 302 303 for (q = 0; q < edev->nb_event_queues; q++) { 304 ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 305 if (ws->grps[q] == NULL) { 306 rte_free(ws); 307 ssovf_log_err("Failed to get grp%d base addr", q); 308 return -EINVAL; 309 } 310 } 311 312 dev->data->ports[port_id] = ws; 313 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 314 return 0; 315 } 316 317 static int 318 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 319 const uint8_t priorities[], uint16_t nb_links) 320 { 321 uint16_t link; 322 uint64_t val; 323 struct ssows *ws = port; 324 325 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 326 RTE_SET_USED(dev); 327 RTE_SET_USED(priorities); 328 329 for (link = 0; link < nb_links; link++) { 330 val = queues[link]; 331 val |= (1ULL << 24); /* Set membership */ 332 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 333 } 334 return (int)nb_links; 335 } 336 337 static int 338 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 339 uint16_t nb_unlinks) 340 { 341 uint16_t unlink; 342 uint64_t val; 343 struct ssows *ws = port; 344 345 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 346 RTE_SET_USED(dev); 347 348 for (unlink = 0; unlink < nb_unlinks; unlink++) { 349 val = queues[unlink]; 350 val &= ~(1ULL << 24); /* Clear membership */ 351 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 352 } 353 return (int)nb_unlinks; 354 } 355 356 static int 357 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 358 { 359 RTE_SET_USED(dev); 360 361 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 362 } 363 364 static void 365 ssows_dump(struct ssows *ws, FILE *f) 366 { 367 uint8_t *base = ws->base; 368 uint64_t val; 369 370 fprintf(f, "\t---------------port%d---------------\n", ws->port); 371 val = ssovf_read64(base + SSOW_VHWS_TAG); 372 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 373 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 374 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 375 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 376 (int)(val >> 63) & 0x1); 377 378 val = ssovf_read64(base + SSOW_VHWS_WQP); 379 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 380 381 val = ssovf_read64(base + SSOW_VHWS_LINKS); 382 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 383 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 384 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 385 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 386 387 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 388 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 389 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 390 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 391 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 392 (int)(val >> 63) & 0x1); 393 394 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 395 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 396 } 397 398 static void 399 ssovf_dump(struct rte_eventdev *dev, FILE *f) 400 { 401 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 402 uint8_t port; 403 404 /* Dump SSOWVF debug registers */ 405 for (port = 0; port < edev->nb_event_ports; port++) 406 ssows_dump(dev->data->ports[port], f); 407 } 408 409 static int 410 ssovf_start(struct rte_eventdev *dev) 411 { 412 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 413 struct ssows *ws; 414 uint8_t *base; 415 uint8_t i; 416 417 ssovf_func_trace(); 418 for (i = 0; i < edev->nb_event_ports; i++) { 419 ws = dev->data->ports[i]; 420 ssows_reset(ws); 421 ws->swtag_req = 0; 422 } 423 424 for (i = 0; i < edev->nb_event_queues; i++) { 425 /* Consume all the events through HWS0 */ 426 ssows_flush_events(dev->data->ports[0], i); 427 428 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 429 base += SSO_VHGRP_QCTL; 430 ssovf_write64(1, base); /* Enable SSO group */ 431 } 432 433 ssovf_fastpath_fns_set(dev); 434 return 0; 435 } 436 437 static void 438 ssovf_stop(struct rte_eventdev *dev) 439 { 440 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 441 struct ssows *ws; 442 uint8_t *base; 443 uint8_t i; 444 445 ssovf_func_trace(); 446 for (i = 0; i < edev->nb_event_ports; i++) { 447 ws = dev->data->ports[i]; 448 ssows_reset(ws); 449 ws->swtag_req = 0; 450 } 451 452 for (i = 0; i < edev->nb_event_queues; i++) { 453 /* Consume all the events through HWS0 */ 454 ssows_flush_events(dev->data->ports[0], i); 455 456 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 457 base += SSO_VHGRP_QCTL; 458 ssovf_write64(0, base); /* Disable SSO group */ 459 } 460 } 461 462 static int 463 ssovf_close(struct rte_eventdev *dev) 464 { 465 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 466 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 467 uint8_t i; 468 469 for (i = 0; i < edev->nb_event_queues; i++) 470 all_queues[i] = i; 471 472 for (i = 0; i < edev->nb_event_ports; i++) 473 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 474 edev->nb_event_queues); 475 return 0; 476 } 477 478 /* Initialize and register event driver with DPDK Application */ 479 static const struct rte_eventdev_ops ssovf_ops = { 480 .dev_infos_get = ssovf_info_get, 481 .dev_configure = ssovf_configure, 482 .queue_def_conf = ssovf_queue_def_conf, 483 .queue_setup = ssovf_queue_setup, 484 .queue_release = ssovf_queue_release, 485 .port_def_conf = ssovf_port_def_conf, 486 .port_setup = ssovf_port_setup, 487 .port_release = ssovf_port_release, 488 .port_link = ssovf_port_link, 489 .port_unlink = ssovf_port_unlink, 490 .timeout_ticks = ssovf_timeout_ticks, 491 .dump = ssovf_dump, 492 .dev_start = ssovf_start, 493 .dev_stop = ssovf_stop, 494 .dev_close = ssovf_close 495 }; 496 497 static int 498 ssovf_vdev_probe(struct rte_vdev_device *vdev) 499 { 500 struct octeontx_ssovf_info oinfo; 501 struct ssovf_mbox_dev_info info; 502 struct ssovf_evdev *edev; 503 struct rte_eventdev *eventdev; 504 static int ssovf_init_once; 505 const char *name; 506 int ret; 507 508 name = rte_vdev_device_name(vdev); 509 /* More than one instance is not supported */ 510 if (ssovf_init_once) { 511 ssovf_log_err("Request to create >1 %s instance", name); 512 return -EINVAL; 513 } 514 515 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 516 rte_socket_id()); 517 if (eventdev == NULL) { 518 ssovf_log_err("Failed to create eventdev vdev %s", name); 519 return -ENOMEM; 520 } 521 eventdev->dev_ops = &ssovf_ops; 522 523 /* For secondary processes, the primary has done all the work */ 524 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 525 ssovf_fastpath_fns_set(eventdev); 526 return 0; 527 } 528 529 ret = octeontx_ssovf_info(&oinfo); 530 if (ret) { 531 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 532 goto error; 533 } 534 535 edev = ssovf_pmd_priv(eventdev); 536 edev->max_event_ports = oinfo.total_ssowvfs; 537 edev->max_event_queues = oinfo.total_ssovfs; 538 edev->is_timeout_deq = 0; 539 540 ret = ssovf_mbox_dev_info(&info); 541 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 542 ssovf_log_err("Failed to get mbox devinfo %d", ret); 543 goto error; 544 } 545 546 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 547 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 548 edev->max_num_events = info.max_num_events; 549 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 550 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 551 info.max_num_events); 552 553 if (!edev->max_event_ports || !edev->max_event_queues) { 554 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 555 edev->max_event_queues, edev->max_event_ports); 556 ret = -ENODEV; 557 goto error; 558 } 559 560 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 561 name, oinfo.domain, edev->max_event_queues, 562 edev->max_event_ports); 563 564 ssovf_init_once = 1; 565 return 0; 566 567 error: 568 rte_event_pmd_vdev_uninit(name); 569 return ret; 570 } 571 572 static int 573 ssovf_vdev_remove(struct rte_vdev_device *vdev) 574 { 575 const char *name; 576 577 name = rte_vdev_device_name(vdev); 578 ssovf_log_info("Closing %s", name); 579 return rte_event_pmd_vdev_uninit(name); 580 } 581 582 static struct rte_vdev_driver vdev_ssovf_pmd = { 583 .probe = ssovf_vdev_probe, 584 .remove = ssovf_vdev_remove 585 }; 586 587 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 588