1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2017. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <inttypes.h> 34 35 #include <rte_common.h> 36 #include <rte_debug.h> 37 #include <rte_dev.h> 38 #include <rte_eal.h> 39 #include <rte_lcore.h> 40 #include <rte_log.h> 41 #include <rte_malloc.h> 42 #include <rte_memory.h> 43 #include <rte_memzone.h> 44 #include <rte_vdev.h> 45 46 #include "ssovf_evdev.h" 47 48 /* SSOPF Mailbox messages */ 49 50 struct ssovf_mbox_dev_info { 51 uint64_t min_deq_timeout_ns; 52 uint64_t max_deq_timeout_ns; 53 uint32_t max_num_events; 54 }; 55 56 static int 57 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) 58 { 59 struct octeontx_mbox_hdr hdr = {0}; 60 uint16_t len = sizeof(struct ssovf_mbox_dev_info); 61 62 hdr.coproc = SSO_COPROC; 63 hdr.msg = SSO_GET_DEV_INFO; 64 hdr.vfid = 0; 65 66 memset(info, 0, len); 67 return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len); 68 } 69 70 struct ssovf_mbox_getwork_wait { 71 uint64_t wait_ns; 72 }; 73 74 static int 75 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) 76 { 77 struct octeontx_mbox_hdr hdr = {0}; 78 struct ssovf_mbox_getwork_wait tmo_set; 79 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait); 80 int ret; 81 82 hdr.coproc = SSO_COPROC; 83 hdr.msg = SSO_SET_GETWORK_WAIT; 84 hdr.vfid = 0; 85 86 tmo_set.wait_ns = timeout_ns; 87 ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0); 88 if (ret) 89 ssovf_log_err("Failed to set getwork timeout(%d)", ret); 90 91 return ret; 92 } 93 94 struct ssovf_mbox_grp_pri { 95 uint8_t wgt_left; /* Read only */ 96 uint8_t weight; 97 uint8_t affinity; 98 uint8_t priority; 99 }; 100 101 static int 102 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) 103 { 104 struct octeontx_mbox_hdr hdr = {0}; 105 struct ssovf_mbox_grp_pri grp; 106 uint16_t len = sizeof(struct ssovf_mbox_grp_pri); 107 int ret; 108 109 hdr.coproc = SSO_COPROC; 110 hdr.msg = SSO_GRP_SET_PRIORITY; 111 hdr.vfid = queue; 112 113 grp.weight = 0xff; 114 grp.affinity = 0xff; 115 grp.priority = prio / 32; /* Normalize to 0 to 7 */ 116 117 ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0); 118 if (ret) 119 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); 120 121 return ret; 122 } 123 124 struct ssovf_mbox_convert_ns_getworks_iter { 125 uint64_t wait_ns; 126 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */ 127 }; 128 129 static int 130 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) 131 { 132 struct octeontx_mbox_hdr hdr = {0}; 133 struct ssovf_mbox_convert_ns_getworks_iter ns2iter; 134 uint16_t len = sizeof(ns2iter); 135 int ret; 136 137 hdr.coproc = SSO_COPROC; 138 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER; 139 hdr.vfid = 0; 140 141 memset(&ns2iter, 0, len); 142 ns2iter.wait_ns = ns; 143 ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); 144 if (ret < 0 || (ret != len)) { 145 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); 146 return -EIO; 147 } 148 149 *tmo_ticks = ns2iter.getwork_iter; 150 return 0; 151 } 152 153 static void 154 ssovf_fastpath_fns_set(struct rte_eventdev *dev) 155 { 156 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 157 158 dev->schedule = NULL; 159 dev->enqueue = ssows_enq; 160 dev->enqueue_burst = ssows_enq_burst; 161 dev->dequeue = ssows_deq; 162 dev->dequeue_burst = ssows_deq_burst; 163 164 if (edev->is_timeout_deq) { 165 dev->dequeue = ssows_deq_timeout; 166 dev->dequeue_burst = ssows_deq_timeout_burst; 167 } 168 } 169 170 static void 171 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) 172 { 173 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 174 175 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD); 176 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns; 177 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns; 178 dev_info->max_event_queues = edev->max_event_queues; 179 dev_info->max_event_queue_flows = (1ULL << 20); 180 dev_info->max_event_queue_priority_levels = 8; 181 dev_info->max_event_priority_levels = 1; 182 dev_info->max_event_ports = edev->max_event_ports; 183 dev_info->max_event_port_dequeue_depth = 1; 184 dev_info->max_event_port_enqueue_depth = 1; 185 dev_info->max_num_events = edev->max_num_events; 186 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 187 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 188 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES; 189 } 190 191 static int 192 ssovf_configure(const struct rte_eventdev *dev) 193 { 194 struct rte_event_dev_config *conf = &dev->data->dev_conf; 195 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 196 uint64_t deq_tmo_ns; 197 198 ssovf_func_trace(); 199 deq_tmo_ns = conf->dequeue_timeout_ns; 200 if (deq_tmo_ns == 0) 201 deq_tmo_ns = edev->min_deq_timeout_ns; 202 203 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 204 edev->is_timeout_deq = 1; 205 deq_tmo_ns = edev->min_deq_timeout_ns; 206 } 207 edev->nb_event_queues = conf->nb_event_queues; 208 edev->nb_event_ports = conf->nb_event_ports; 209 210 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns); 211 } 212 213 static void 214 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 215 struct rte_event_queue_conf *queue_conf) 216 { 217 RTE_SET_USED(dev); 218 RTE_SET_USED(queue_id); 219 220 queue_conf->nb_atomic_flows = (1ULL << 20); 221 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 222 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 223 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 224 } 225 226 static void 227 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 228 { 229 RTE_SET_USED(dev); 230 RTE_SET_USED(queue_id); 231 } 232 233 static int 234 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 235 const struct rte_event_queue_conf *queue_conf) 236 { 237 RTE_SET_USED(dev); 238 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority); 239 240 return ssovf_mbox_priority_set(queue_id, queue_conf->priority); 241 } 242 243 static void 244 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 245 struct rte_event_port_conf *port_conf) 246 { 247 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 248 249 RTE_SET_USED(port_id); 250 port_conf->new_event_threshold = edev->max_num_events; 251 port_conf->dequeue_depth = 1; 252 port_conf->enqueue_depth = 1; 253 } 254 255 static void 256 ssovf_port_release(void *port) 257 { 258 rte_free(port); 259 } 260 261 static int 262 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, 263 const struct rte_event_port_conf *port_conf) 264 { 265 struct ssows *ws; 266 uint32_t reg_off; 267 uint8_t q; 268 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 269 270 ssovf_func_trace("port=%d", port_id); 271 RTE_SET_USED(port_conf); 272 273 /* Free memory prior to re-allocation if needed */ 274 if (dev->data->ports[port_id] != NULL) { 275 ssovf_port_release(dev->data->ports[port_id]); 276 dev->data->ports[port_id] = NULL; 277 } 278 279 /* Allocate event port memory */ 280 ws = rte_zmalloc_socket("eventdev ssows", 281 sizeof(struct ssows), RTE_CACHE_LINE_SIZE, 282 dev->data->socket_id); 283 if (ws == NULL) { 284 ssovf_log_err("Failed to alloc memory for port=%d", port_id); 285 return -ENOMEM; 286 } 287 288 ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); 289 if (ws->base == NULL) { 290 rte_free(ws); 291 ssovf_log_err("Failed to get hws base addr port=%d", port_id); 292 return -EINVAL; 293 } 294 295 reg_off = SSOW_VHWS_OP_GET_WORK0; 296 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */ 297 reg_off |= 1 << 16; /* Wait */ 298 ws->getwork = ws->base + reg_off; 299 ws->port = port_id; 300 301 for (q = 0; q < edev->nb_event_queues; q++) { 302 ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); 303 if (ws->grps[q] == NULL) { 304 rte_free(ws); 305 ssovf_log_err("Failed to get grp%d base addr", q); 306 return -EINVAL; 307 } 308 } 309 310 dev->data->ports[port_id] = ws; 311 ssovf_log_dbg("port=%d ws=%p", port_id, ws); 312 return 0; 313 } 314 315 static int 316 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], 317 const uint8_t priorities[], uint16_t nb_links) 318 { 319 uint16_t link; 320 uint64_t val; 321 struct ssows *ws = port; 322 323 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links); 324 RTE_SET_USED(dev); 325 RTE_SET_USED(priorities); 326 327 for (link = 0; link < nb_links; link++) { 328 val = queues[link]; 329 val |= (1ULL << 24); /* Set membership */ 330 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 331 } 332 return (int)nb_links; 333 } 334 335 static int 336 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], 337 uint16_t nb_unlinks) 338 { 339 uint16_t unlink; 340 uint64_t val; 341 struct ssows *ws = port; 342 343 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks); 344 RTE_SET_USED(dev); 345 346 for (unlink = 0; unlink < nb_unlinks; unlink++) { 347 val = queues[unlink]; 348 val &= ~(1ULL << 24); /* Clear membership */ 349 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0)); 350 } 351 return (int)nb_unlinks; 352 } 353 354 static int 355 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks) 356 { 357 RTE_SET_USED(dev); 358 359 return ssovf_mbox_timeout_ticks(ns, tmo_ticks); 360 } 361 362 static void 363 ssows_dump(struct ssows *ws, FILE *f) 364 { 365 uint8_t *base = ws->base; 366 uint64_t val; 367 368 fprintf(f, "\t---------------port%d---------------\n", ws->port); 369 val = ssovf_read64(base + SSOW_VHWS_TAG); 370 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n", 371 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 372 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1, 373 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff, 374 (int)(val >> 63) & 0x1); 375 376 val = ssovf_read64(base + SSOW_VHWS_WQP); 377 fprintf(f, "\twqp=0x%"PRIx64"\n", val); 378 379 val = ssovf_read64(base + SSOW_VHWS_LINKS); 380 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n", 381 (int)(val & 0x3ff), (int)(val >> 10) & 0x1, 382 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1, 383 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff); 384 385 val = ssovf_read64(base + SSOW_VHWS_PENDTAG); 386 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n", 387 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3, 388 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1, 389 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1, 390 (int)(val >> 63) & 0x1); 391 392 val = ssovf_read64(base + SSOW_VHWS_PENDWQP); 393 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val); 394 } 395 396 static void 397 ssovf_dump(struct rte_eventdev *dev, FILE *f) 398 { 399 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 400 uint8_t port; 401 402 /* Dump SSOWVF debug registers */ 403 for (port = 0; port < edev->nb_event_ports; port++) 404 ssows_dump(dev->data->ports[port], f); 405 } 406 407 static int 408 ssovf_start(struct rte_eventdev *dev) 409 { 410 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 411 struct ssows *ws; 412 uint8_t *base; 413 uint8_t i; 414 415 ssovf_func_trace(); 416 for (i = 0; i < edev->nb_event_ports; i++) { 417 ws = dev->data->ports[i]; 418 ssows_reset(ws); 419 ws->swtag_req = 0; 420 } 421 422 for (i = 0; i < edev->nb_event_queues; i++) { 423 /* Consume all the events through HWS0 */ 424 ssows_flush_events(dev->data->ports[0], i); 425 426 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 427 base += SSO_VHGRP_QCTL; 428 ssovf_write64(1, base); /* Enable SSO group */ 429 } 430 431 ssovf_fastpath_fns_set(dev); 432 return 0; 433 } 434 435 static void 436 ssovf_stop(struct rte_eventdev *dev) 437 { 438 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 439 struct ssows *ws; 440 uint8_t *base; 441 uint8_t i; 442 443 ssovf_func_trace(); 444 for (i = 0; i < edev->nb_event_ports; i++) { 445 ws = dev->data->ports[i]; 446 ssows_reset(ws); 447 ws->swtag_req = 0; 448 } 449 450 for (i = 0; i < edev->nb_event_queues; i++) { 451 /* Consume all the events through HWS0 */ 452 ssows_flush_events(dev->data->ports[0], i); 453 454 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); 455 base += SSO_VHGRP_QCTL; 456 ssovf_write64(0, base); /* Disable SSO group */ 457 } 458 } 459 460 static int 461 ssovf_close(struct rte_eventdev *dev) 462 { 463 struct ssovf_evdev *edev = ssovf_pmd_priv(dev); 464 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 465 uint8_t i; 466 467 for (i = 0; i < edev->nb_event_queues; i++) 468 all_queues[i] = i; 469 470 for (i = 0; i < edev->nb_event_ports; i++) 471 ssovf_port_unlink(dev, dev->data->ports[i], all_queues, 472 edev->nb_event_queues); 473 return 0; 474 } 475 476 /* Initialize and register event driver with DPDK Application */ 477 static const struct rte_eventdev_ops ssovf_ops = { 478 .dev_infos_get = ssovf_info_get, 479 .dev_configure = ssovf_configure, 480 .queue_def_conf = ssovf_queue_def_conf, 481 .queue_setup = ssovf_queue_setup, 482 .queue_release = ssovf_queue_release, 483 .port_def_conf = ssovf_port_def_conf, 484 .port_setup = ssovf_port_setup, 485 .port_release = ssovf_port_release, 486 .port_link = ssovf_port_link, 487 .port_unlink = ssovf_port_unlink, 488 .timeout_ticks = ssovf_timeout_ticks, 489 .dump = ssovf_dump, 490 .dev_start = ssovf_start, 491 .dev_stop = ssovf_stop, 492 .dev_close = ssovf_close 493 }; 494 495 static int 496 ssovf_vdev_probe(struct rte_vdev_device *vdev) 497 { 498 struct octeontx_ssovf_info oinfo; 499 struct ssovf_mbox_dev_info info; 500 struct ssovf_evdev *edev; 501 struct rte_eventdev *eventdev; 502 static int ssovf_init_once; 503 const char *name; 504 int ret; 505 506 name = rte_vdev_device_name(vdev); 507 /* More than one instance is not supported */ 508 if (ssovf_init_once) { 509 ssovf_log_err("Request to create >1 %s instance", name); 510 return -EINVAL; 511 } 512 513 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), 514 rte_socket_id()); 515 if (eventdev == NULL) { 516 ssovf_log_err("Failed to create eventdev vdev %s", name); 517 return -ENOMEM; 518 } 519 eventdev->dev_ops = &ssovf_ops; 520 521 /* For secondary processes, the primary has done all the work */ 522 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 523 ssovf_fastpath_fns_set(eventdev); 524 return 0; 525 } 526 527 ret = octeontx_ssovf_info(&oinfo); 528 if (ret) { 529 ssovf_log_err("Failed to probe and validate ssovfs %d", ret); 530 goto error; 531 } 532 533 edev = ssovf_pmd_priv(eventdev); 534 edev->max_event_ports = oinfo.total_ssowvfs; 535 edev->max_event_queues = oinfo.total_ssovfs; 536 edev->is_timeout_deq = 0; 537 538 ret = ssovf_mbox_dev_info(&info); 539 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) { 540 ssovf_log_err("Failed to get mbox devinfo %d", ret); 541 goto error; 542 } 543 544 edev->min_deq_timeout_ns = info.min_deq_timeout_ns; 545 edev->max_deq_timeout_ns = info.max_deq_timeout_ns; 546 edev->max_num_events = info.max_num_events; 547 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d", 548 info.min_deq_timeout_ns, info.max_deq_timeout_ns, 549 info.max_num_events); 550 551 if (!edev->max_event_ports || !edev->max_event_queues) { 552 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d", 553 edev->max_event_queues, edev->max_event_ports); 554 ret = -ENODEV; 555 goto error; 556 } 557 558 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d", 559 name, oinfo.domain, edev->max_event_queues, 560 edev->max_event_ports); 561 562 ssovf_init_once = 1; 563 return 0; 564 565 error: 566 rte_event_pmd_vdev_uninit(name); 567 return ret; 568 } 569 570 static int 571 ssovf_vdev_remove(struct rte_vdev_device *vdev) 572 { 573 const char *name; 574 575 name = rte_vdev_device_name(vdev); 576 ssovf_log_info("Closing %s", name); 577 return rte_event_pmd_vdev_uninit(name); 578 } 579 580 static struct rte_vdev_driver vdev_ssovf_pmd = { 581 .probe = ssovf_vdev_probe, 582 .remove = ssovf_vdev_remove 583 }; 584 585 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd); 586