1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* spinlock for eth device callbacks */ 48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* spinlock for add/remove rx callbacks */ 51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove tx callbacks */ 54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for shared data allocation */ 57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* store statistics names and its offset in stats structure */ 60 struct rte_eth_xstats_name_off { 61 char name[RTE_ETH_XSTATS_NAME_SIZE]; 62 unsigned offset; 63 }; 64 65 /* Shared memory between primary and secondary processes. */ 66 static struct { 67 uint64_t next_owner_id; 68 rte_spinlock_t ownership_lock; 69 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 70 } *eth_dev_shared_data; 71 72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 73 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 74 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 75 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 76 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 77 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 78 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 79 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 80 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 81 rx_nombuf)}, 82 }; 83 84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 85 86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 87 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 88 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 89 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 90 }; 91 92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 93 94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 95 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 96 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 97 }; 98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 99 100 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 101 { DEV_RX_OFFLOAD_##_name, #_name } 102 103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 104 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 105 106 static const struct { 107 uint64_t offload; 108 const char *name; 109 } eth_dev_rx_offload_names[] = { 110 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 111 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 114 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 115 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 118 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 119 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 120 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 121 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME), 122 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 123 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 124 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 125 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 126 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 127 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 128 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 129 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 130 }; 131 132 #undef RTE_RX_OFFLOAD_BIT2STR 133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 134 135 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 136 { DEV_TX_OFFLOAD_##_name, #_name } 137 138 static const struct { 139 uint64_t offload; 140 const char *name; 141 } eth_dev_tx_offload_names[] = { 142 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 143 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 144 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 148 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 149 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 150 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 151 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 156 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 157 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 158 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 159 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 160 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 161 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 162 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 163 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 164 }; 165 166 #undef RTE_TX_OFFLOAD_BIT2STR 167 168 /** 169 * The user application callback description. 170 * 171 * It contains callback address to be registered by user application, 172 * the pointer to the parameters for callback, and the event type. 173 */ 174 struct rte_eth_dev_callback { 175 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 176 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 177 void *cb_arg; /**< Parameter for callback */ 178 void *ret_param; /**< Return parameter */ 179 enum rte_eth_event_type event; /**< Interrupt event type */ 180 uint32_t active; /**< Callback is executing */ 181 }; 182 183 enum { 184 STAT_QMAP_TX = 0, 185 STAT_QMAP_RX 186 }; 187 188 int 189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 190 { 191 int ret; 192 struct rte_devargs devargs; 193 const char *bus_param_key; 194 char *bus_str = NULL; 195 char *cls_str = NULL; 196 int str_size; 197 198 if (iter == NULL) { 199 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 200 return -EINVAL; 201 } 202 203 if (devargs_str == NULL) { 204 RTE_ETHDEV_LOG(ERR, 205 "Cannot initialize iterator from NULL device description string\n"); 206 return -EINVAL; 207 } 208 209 memset(iter, 0, sizeof(*iter)); 210 memset(&devargs, 0, sizeof(devargs)); 211 212 /* 213 * The devargs string may use various syntaxes: 214 * - 0000:08:00.0,representor=[1-3] 215 * - pci:0000:06:00.0,representor=[0,5] 216 * - class=eth,mac=00:11:22:33:44:55 217 * A new syntax is in development (not yet supported): 218 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 219 */ 220 221 /* 222 * Handle pure class filter (i.e. without any bus-level argument), 223 * from future new syntax. 224 * rte_devargs_parse() is not yet supporting the new syntax, 225 * that's why this simple case is temporarily parsed here. 226 */ 227 #define iter_anybus_str "class=eth," 228 if (strncmp(devargs_str, iter_anybus_str, 229 strlen(iter_anybus_str)) == 0) { 230 iter->cls_str = devargs_str + strlen(iter_anybus_str); 231 goto end; 232 } 233 234 /* Split bus, device and parameters. */ 235 ret = rte_devargs_parse(&devargs, devargs_str); 236 if (ret != 0) 237 goto error; 238 239 /* 240 * Assume parameters of old syntax can match only at ethdev level. 241 * Extra parameters will be ignored, thanks to "+" prefix. 242 */ 243 str_size = strlen(devargs.args) + 2; 244 cls_str = malloc(str_size); 245 if (cls_str == NULL) { 246 ret = -ENOMEM; 247 goto error; 248 } 249 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 250 if (ret != str_size - 1) { 251 ret = -EINVAL; 252 goto error; 253 } 254 iter->cls_str = cls_str; 255 256 iter->bus = devargs.bus; 257 if (iter->bus->dev_iterate == NULL) { 258 ret = -ENOTSUP; 259 goto error; 260 } 261 262 /* Convert bus args to new syntax for use with new API dev_iterate. */ 263 if ((strcmp(iter->bus->name, "vdev") == 0) || 264 (strcmp(iter->bus->name, "fslmc") == 0) || 265 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 266 bus_param_key = "name"; 267 } else if (strcmp(iter->bus->name, "pci") == 0) { 268 bus_param_key = "addr"; 269 } else { 270 ret = -ENOTSUP; 271 goto error; 272 } 273 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 274 bus_str = malloc(str_size); 275 if (bus_str == NULL) { 276 ret = -ENOMEM; 277 goto error; 278 } 279 ret = snprintf(bus_str, str_size, "%s=%s", 280 bus_param_key, devargs.name); 281 if (ret != str_size - 1) { 282 ret = -EINVAL; 283 goto error; 284 } 285 iter->bus_str = bus_str; 286 287 end: 288 iter->cls = rte_class_find_by_name("eth"); 289 rte_devargs_reset(&devargs); 290 return 0; 291 292 error: 293 if (ret == -ENOTSUP) 294 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 295 iter->bus->name); 296 rte_devargs_reset(&devargs); 297 free(bus_str); 298 free(cls_str); 299 return ret; 300 } 301 302 uint16_t 303 rte_eth_iterator_next(struct rte_dev_iterator *iter) 304 { 305 if (iter == NULL) { 306 RTE_ETHDEV_LOG(ERR, 307 "Cannot get next device from NULL iterator\n"); 308 return RTE_MAX_ETHPORTS; 309 } 310 311 if (iter->cls == NULL) /* invalid ethdev iterator */ 312 return RTE_MAX_ETHPORTS; 313 314 do { /* loop to try all matching rte_device */ 315 /* If not pure ethdev filter and */ 316 if (iter->bus != NULL && 317 /* not in middle of rte_eth_dev iteration, */ 318 iter->class_device == NULL) { 319 /* get next rte_device to try. */ 320 iter->device = iter->bus->dev_iterate( 321 iter->device, iter->bus_str, iter); 322 if (iter->device == NULL) 323 break; /* no more rte_device candidate */ 324 } 325 /* A device is matching bus part, need to check ethdev part. */ 326 iter->class_device = iter->cls->dev_iterate( 327 iter->class_device, iter->cls_str, iter); 328 if (iter->class_device != NULL) 329 return eth_dev_to_id(iter->class_device); /* match */ 330 } while (iter->bus != NULL); /* need to try next rte_device */ 331 332 /* No more ethdev port to iterate. */ 333 rte_eth_iterator_cleanup(iter); 334 return RTE_MAX_ETHPORTS; 335 } 336 337 void 338 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 339 { 340 if (iter == NULL) { 341 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 342 return; 343 } 344 345 if (iter->bus_str == NULL) 346 return; /* nothing to free in pure class filter */ 347 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 348 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 349 memset(iter, 0, sizeof(*iter)); 350 } 351 352 uint16_t 353 rte_eth_find_next(uint16_t port_id) 354 { 355 while (port_id < RTE_MAX_ETHPORTS && 356 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 357 port_id++; 358 359 if (port_id >= RTE_MAX_ETHPORTS) 360 return RTE_MAX_ETHPORTS; 361 362 return port_id; 363 } 364 365 /* 366 * Macro to iterate over all valid ports for internal usage. 367 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 368 */ 369 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 370 for (port_id = rte_eth_find_next(0); \ 371 port_id < RTE_MAX_ETHPORTS; \ 372 port_id = rte_eth_find_next(port_id + 1)) 373 374 uint16_t 375 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 376 { 377 port_id = rte_eth_find_next(port_id); 378 while (port_id < RTE_MAX_ETHPORTS && 379 rte_eth_devices[port_id].device != parent) 380 port_id = rte_eth_find_next(port_id + 1); 381 382 return port_id; 383 } 384 385 uint16_t 386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 387 { 388 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 389 return rte_eth_find_next_of(port_id, 390 rte_eth_devices[ref_port_id].device); 391 } 392 393 static void 394 eth_dev_shared_data_prepare(void) 395 { 396 const unsigned flags = 0; 397 const struct rte_memzone *mz; 398 399 rte_spinlock_lock(ð_dev_shared_data_lock); 400 401 if (eth_dev_shared_data == NULL) { 402 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 403 /* Allocate port data and ownership shared memory. */ 404 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 405 sizeof(*eth_dev_shared_data), 406 rte_socket_id(), flags); 407 } else 408 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 409 if (mz == NULL) 410 rte_panic("Cannot allocate ethdev shared data\n"); 411 412 eth_dev_shared_data = mz->addr; 413 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 414 eth_dev_shared_data->next_owner_id = 415 RTE_ETH_DEV_NO_OWNER + 1; 416 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 417 memset(eth_dev_shared_data->data, 0, 418 sizeof(eth_dev_shared_data->data)); 419 } 420 } 421 422 rte_spinlock_unlock(ð_dev_shared_data_lock); 423 } 424 425 static bool 426 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 427 { 428 return ethdev->data->name[0] != '\0'; 429 } 430 431 static struct rte_eth_dev * 432 eth_dev_allocated(const char *name) 433 { 434 uint16_t i; 435 436 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 437 438 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 439 if (rte_eth_devices[i].data != NULL && 440 strcmp(rte_eth_devices[i].data->name, name) == 0) 441 return &rte_eth_devices[i]; 442 } 443 return NULL; 444 } 445 446 struct rte_eth_dev * 447 rte_eth_dev_allocated(const char *name) 448 { 449 struct rte_eth_dev *ethdev; 450 451 eth_dev_shared_data_prepare(); 452 453 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 454 455 ethdev = eth_dev_allocated(name); 456 457 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 458 459 return ethdev; 460 } 461 462 static uint16_t 463 eth_dev_find_free_port(void) 464 { 465 uint16_t i; 466 467 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 468 /* Using shared name field to find a free port. */ 469 if (eth_dev_shared_data->data[i].name[0] == '\0') { 470 RTE_ASSERT(rte_eth_devices[i].state == 471 RTE_ETH_DEV_UNUSED); 472 return i; 473 } 474 } 475 return RTE_MAX_ETHPORTS; 476 } 477 478 static struct rte_eth_dev * 479 eth_dev_get(uint16_t port_id) 480 { 481 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 482 483 eth_dev->data = ð_dev_shared_data->data[port_id]; 484 485 return eth_dev; 486 } 487 488 struct rte_eth_dev * 489 rte_eth_dev_allocate(const char *name) 490 { 491 uint16_t port_id; 492 struct rte_eth_dev *eth_dev = NULL; 493 size_t name_len; 494 495 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 496 if (name_len == 0) { 497 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 498 return NULL; 499 } 500 501 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 502 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 503 return NULL; 504 } 505 506 eth_dev_shared_data_prepare(); 507 508 /* Synchronize port creation between primary and secondary threads. */ 509 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 510 511 if (eth_dev_allocated(name) != NULL) { 512 RTE_ETHDEV_LOG(ERR, 513 "Ethernet device with name %s already allocated\n", 514 name); 515 goto unlock; 516 } 517 518 port_id = eth_dev_find_free_port(); 519 if (port_id == RTE_MAX_ETHPORTS) { 520 RTE_ETHDEV_LOG(ERR, 521 "Reached maximum number of Ethernet ports\n"); 522 goto unlock; 523 } 524 525 eth_dev = eth_dev_get(port_id); 526 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 527 eth_dev->data->port_id = port_id; 528 eth_dev->data->mtu = RTE_ETHER_MTU; 529 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 530 531 unlock: 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 return eth_dev; 535 } 536 537 /* 538 * Attach to a port already registered by the primary process, which 539 * makes sure that the same device would have the same port id both 540 * in the primary and secondary process. 541 */ 542 struct rte_eth_dev * 543 rte_eth_dev_attach_secondary(const char *name) 544 { 545 uint16_t i; 546 struct rte_eth_dev *eth_dev = NULL; 547 548 eth_dev_shared_data_prepare(); 549 550 /* Synchronize port attachment to primary port creation and release. */ 551 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 552 553 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 554 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 555 break; 556 } 557 if (i == RTE_MAX_ETHPORTS) { 558 RTE_ETHDEV_LOG(ERR, 559 "Device %s is not driven by the primary process\n", 560 name); 561 } else { 562 eth_dev = eth_dev_get(i); 563 RTE_ASSERT(eth_dev->data->port_id == i); 564 } 565 566 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 567 return eth_dev; 568 } 569 570 int 571 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 572 { 573 if (eth_dev == NULL) 574 return -EINVAL; 575 576 eth_dev_shared_data_prepare(); 577 578 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 579 rte_eth_dev_callback_process(eth_dev, 580 RTE_ETH_EVENT_DESTROY, NULL); 581 582 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 583 584 eth_dev->state = RTE_ETH_DEV_UNUSED; 585 eth_dev->device = NULL; 586 eth_dev->process_private = NULL; 587 eth_dev->intr_handle = NULL; 588 eth_dev->rx_pkt_burst = NULL; 589 eth_dev->tx_pkt_burst = NULL; 590 eth_dev->tx_pkt_prepare = NULL; 591 eth_dev->rx_queue_count = NULL; 592 eth_dev->rx_descriptor_done = NULL; 593 eth_dev->rx_descriptor_status = NULL; 594 eth_dev->tx_descriptor_status = NULL; 595 eth_dev->dev_ops = NULL; 596 597 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 598 rte_free(eth_dev->data->rx_queues); 599 rte_free(eth_dev->data->tx_queues); 600 rte_free(eth_dev->data->mac_addrs); 601 rte_free(eth_dev->data->hash_mac_addrs); 602 rte_free(eth_dev->data->dev_private); 603 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 604 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 605 } 606 607 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 608 609 return 0; 610 } 611 612 int 613 rte_eth_dev_is_valid_port(uint16_t port_id) 614 { 615 if (port_id >= RTE_MAX_ETHPORTS || 616 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 617 return 0; 618 else 619 return 1; 620 } 621 622 static int 623 eth_is_valid_owner_id(uint64_t owner_id) 624 { 625 if (owner_id == RTE_ETH_DEV_NO_OWNER || 626 eth_dev_shared_data->next_owner_id <= owner_id) 627 return 0; 628 return 1; 629 } 630 631 uint64_t 632 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 633 { 634 port_id = rte_eth_find_next(port_id); 635 while (port_id < RTE_MAX_ETHPORTS && 636 rte_eth_devices[port_id].data->owner.id != owner_id) 637 port_id = rte_eth_find_next(port_id + 1); 638 639 return port_id; 640 } 641 642 int 643 rte_eth_dev_owner_new(uint64_t *owner_id) 644 { 645 if (owner_id == NULL) { 646 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 647 return -EINVAL; 648 } 649 650 eth_dev_shared_data_prepare(); 651 652 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 653 654 *owner_id = eth_dev_shared_data->next_owner_id++; 655 656 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 657 return 0; 658 } 659 660 static int 661 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 662 const struct rte_eth_dev_owner *new_owner) 663 { 664 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 665 struct rte_eth_dev_owner *port_owner; 666 667 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 668 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 669 port_id); 670 return -ENODEV; 671 } 672 673 if (new_owner == NULL) { 674 RTE_ETHDEV_LOG(ERR, 675 "Cannot set ethdev port %u owner from NULL owner\n", 676 port_id); 677 return -EINVAL; 678 } 679 680 if (!eth_is_valid_owner_id(new_owner->id) && 681 !eth_is_valid_owner_id(old_owner_id)) { 682 RTE_ETHDEV_LOG(ERR, 683 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 684 old_owner_id, new_owner->id); 685 return -EINVAL; 686 } 687 688 port_owner = &rte_eth_devices[port_id].data->owner; 689 if (port_owner->id != old_owner_id) { 690 RTE_ETHDEV_LOG(ERR, 691 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 692 port_id, port_owner->name, port_owner->id); 693 return -EPERM; 694 } 695 696 /* can not truncate (same structure) */ 697 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 698 699 port_owner->id = new_owner->id; 700 701 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 702 port_id, new_owner->name, new_owner->id); 703 704 return 0; 705 } 706 707 int 708 rte_eth_dev_owner_set(const uint16_t port_id, 709 const struct rte_eth_dev_owner *owner) 710 { 711 int ret; 712 713 eth_dev_shared_data_prepare(); 714 715 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 716 717 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 718 719 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 720 return ret; 721 } 722 723 int 724 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 725 { 726 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 727 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 728 int ret; 729 730 eth_dev_shared_data_prepare(); 731 732 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 733 734 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 735 736 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 737 return ret; 738 } 739 740 int 741 rte_eth_dev_owner_delete(const uint64_t owner_id) 742 { 743 uint16_t port_id; 744 int ret = 0; 745 746 eth_dev_shared_data_prepare(); 747 748 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 749 750 if (eth_is_valid_owner_id(owner_id)) { 751 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 752 if (rte_eth_devices[port_id].data->owner.id == owner_id) 753 memset(&rte_eth_devices[port_id].data->owner, 0, 754 sizeof(struct rte_eth_dev_owner)); 755 RTE_ETHDEV_LOG(NOTICE, 756 "All port owners owned by %016"PRIx64" identifier have removed\n", 757 owner_id); 758 } else { 759 RTE_ETHDEV_LOG(ERR, 760 "Invalid owner id=%016"PRIx64"\n", 761 owner_id); 762 ret = -EINVAL; 763 } 764 765 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 766 767 return ret; 768 } 769 770 int 771 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 772 { 773 struct rte_eth_dev *ethdev; 774 775 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 776 ethdev = &rte_eth_devices[port_id]; 777 778 if (!eth_dev_is_allocated(ethdev)) { 779 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 780 port_id); 781 return -ENODEV; 782 } 783 784 if (owner == NULL) { 785 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 786 port_id); 787 return -EINVAL; 788 } 789 790 eth_dev_shared_data_prepare(); 791 792 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 793 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 794 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 795 796 return 0; 797 } 798 799 int 800 rte_eth_dev_socket_id(uint16_t port_id) 801 { 802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 803 return rte_eth_devices[port_id].data->numa_node; 804 } 805 806 void * 807 rte_eth_dev_get_sec_ctx(uint16_t port_id) 808 { 809 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 810 return rte_eth_devices[port_id].security_ctx; 811 } 812 813 uint16_t 814 rte_eth_dev_count_avail(void) 815 { 816 uint16_t p; 817 uint16_t count; 818 819 count = 0; 820 821 RTE_ETH_FOREACH_DEV(p) 822 count++; 823 824 return count; 825 } 826 827 uint16_t 828 rte_eth_dev_count_total(void) 829 { 830 uint16_t port, count = 0; 831 832 RTE_ETH_FOREACH_VALID_DEV(port) 833 count++; 834 835 return count; 836 } 837 838 int 839 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 840 { 841 char *tmp; 842 843 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 844 845 if (name == NULL) { 846 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 847 port_id); 848 return -EINVAL; 849 } 850 851 /* shouldn't check 'rte_eth_devices[i].data', 852 * because it might be overwritten by VDEV PMD */ 853 tmp = eth_dev_shared_data->data[port_id].name; 854 strcpy(name, tmp); 855 return 0; 856 } 857 858 int 859 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 860 { 861 uint16_t pid; 862 863 if (name == NULL) { 864 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 865 return -EINVAL; 866 } 867 868 if (port_id == NULL) { 869 RTE_ETHDEV_LOG(ERR, 870 "Cannot get port ID to NULL for %s\n", name); 871 return -EINVAL; 872 } 873 874 RTE_ETH_FOREACH_VALID_DEV(pid) 875 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 876 *port_id = pid; 877 return 0; 878 } 879 880 return -ENODEV; 881 } 882 883 static int 884 eth_err(uint16_t port_id, int ret) 885 { 886 if (ret == 0) 887 return 0; 888 if (rte_eth_dev_is_removed(port_id)) 889 return -EIO; 890 return ret; 891 } 892 893 static int 894 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 895 { 896 uint16_t old_nb_queues = dev->data->nb_rx_queues; 897 void **rxq; 898 unsigned i; 899 900 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 901 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 902 sizeof(dev->data->rx_queues[0]) * nb_queues, 903 RTE_CACHE_LINE_SIZE); 904 if (dev->data->rx_queues == NULL) { 905 dev->data->nb_rx_queues = 0; 906 return -(ENOMEM); 907 } 908 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 909 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 910 911 rxq = dev->data->rx_queues; 912 913 for (i = nb_queues; i < old_nb_queues; i++) 914 (*dev->dev_ops->rx_queue_release)(rxq[i]); 915 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, 916 RTE_CACHE_LINE_SIZE); 917 if (rxq == NULL) 918 return -(ENOMEM); 919 if (nb_queues > old_nb_queues) { 920 uint16_t new_qs = nb_queues - old_nb_queues; 921 922 memset(rxq + old_nb_queues, 0, 923 sizeof(rxq[0]) * new_qs); 924 } 925 926 dev->data->rx_queues = rxq; 927 928 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 929 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); 930 931 rxq = dev->data->rx_queues; 932 933 for (i = nb_queues; i < old_nb_queues; i++) 934 (*dev->dev_ops->rx_queue_release)(rxq[i]); 935 936 rte_free(dev->data->rx_queues); 937 dev->data->rx_queues = NULL; 938 } 939 dev->data->nb_rx_queues = nb_queues; 940 return 0; 941 } 942 943 static int 944 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 945 { 946 uint16_t port_id; 947 948 if (rx_queue_id >= dev->data->nb_rx_queues) { 949 port_id = dev->data->port_id; 950 RTE_ETHDEV_LOG(ERR, 951 "Invalid Rx queue_id=%u of device with port_id=%u\n", 952 rx_queue_id, port_id); 953 return -EINVAL; 954 } 955 956 if (dev->data->rx_queues[rx_queue_id] == NULL) { 957 port_id = dev->data->port_id; 958 RTE_ETHDEV_LOG(ERR, 959 "Queue %u of device with port_id=%u has not been setup\n", 960 rx_queue_id, port_id); 961 return -EINVAL; 962 } 963 964 return 0; 965 } 966 967 static int 968 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 969 { 970 uint16_t port_id; 971 972 if (tx_queue_id >= dev->data->nb_tx_queues) { 973 port_id = dev->data->port_id; 974 RTE_ETHDEV_LOG(ERR, 975 "Invalid Tx queue_id=%u of device with port_id=%u\n", 976 tx_queue_id, port_id); 977 return -EINVAL; 978 } 979 980 if (dev->data->tx_queues[tx_queue_id] == NULL) { 981 port_id = dev->data->port_id; 982 RTE_ETHDEV_LOG(ERR, 983 "Queue %u of device with port_id=%u has not been setup\n", 984 tx_queue_id, port_id); 985 return -EINVAL; 986 } 987 988 return 0; 989 } 990 991 int 992 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 993 { 994 struct rte_eth_dev *dev; 995 int ret; 996 997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 998 dev = &rte_eth_devices[port_id]; 999 1000 if (!dev->data->dev_started) { 1001 RTE_ETHDEV_LOG(ERR, 1002 "Port %u must be started before start any queue\n", 1003 port_id); 1004 return -EINVAL; 1005 } 1006 1007 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1008 if (ret != 0) 1009 return ret; 1010 1011 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1012 1013 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1014 RTE_ETHDEV_LOG(INFO, 1015 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1016 rx_queue_id, port_id); 1017 return -EINVAL; 1018 } 1019 1020 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1021 RTE_ETHDEV_LOG(INFO, 1022 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1023 rx_queue_id, port_id); 1024 return 0; 1025 } 1026 1027 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1028 } 1029 1030 int 1031 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1032 { 1033 struct rte_eth_dev *dev; 1034 int ret; 1035 1036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1037 dev = &rte_eth_devices[port_id]; 1038 1039 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1040 if (ret != 0) 1041 return ret; 1042 1043 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1044 1045 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1046 RTE_ETHDEV_LOG(INFO, 1047 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1048 rx_queue_id, port_id); 1049 return -EINVAL; 1050 } 1051 1052 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1053 RTE_ETHDEV_LOG(INFO, 1054 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1055 rx_queue_id, port_id); 1056 return 0; 1057 } 1058 1059 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1060 } 1061 1062 int 1063 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1064 { 1065 struct rte_eth_dev *dev; 1066 int ret; 1067 1068 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1069 dev = &rte_eth_devices[port_id]; 1070 1071 if (!dev->data->dev_started) { 1072 RTE_ETHDEV_LOG(ERR, 1073 "Port %u must be started before start any queue\n", 1074 port_id); 1075 return -EINVAL; 1076 } 1077 1078 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1079 if (ret != 0) 1080 return ret; 1081 1082 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1083 1084 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1085 RTE_ETHDEV_LOG(INFO, 1086 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1087 tx_queue_id, port_id); 1088 return -EINVAL; 1089 } 1090 1091 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1092 RTE_ETHDEV_LOG(INFO, 1093 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1094 tx_queue_id, port_id); 1095 return 0; 1096 } 1097 1098 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1099 } 1100 1101 int 1102 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1103 { 1104 struct rte_eth_dev *dev; 1105 int ret; 1106 1107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1108 dev = &rte_eth_devices[port_id]; 1109 1110 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1111 if (ret != 0) 1112 return ret; 1113 1114 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1115 1116 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1117 RTE_ETHDEV_LOG(INFO, 1118 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1119 tx_queue_id, port_id); 1120 return -EINVAL; 1121 } 1122 1123 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1124 RTE_ETHDEV_LOG(INFO, 1125 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1126 tx_queue_id, port_id); 1127 return 0; 1128 } 1129 1130 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1131 } 1132 1133 static int 1134 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1135 { 1136 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1137 void **txq; 1138 unsigned i; 1139 1140 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1141 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1142 sizeof(dev->data->tx_queues[0]) * nb_queues, 1143 RTE_CACHE_LINE_SIZE); 1144 if (dev->data->tx_queues == NULL) { 1145 dev->data->nb_tx_queues = 0; 1146 return -(ENOMEM); 1147 } 1148 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1149 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 1150 1151 txq = dev->data->tx_queues; 1152 1153 for (i = nb_queues; i < old_nb_queues; i++) 1154 (*dev->dev_ops->tx_queue_release)(txq[i]); 1155 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, 1156 RTE_CACHE_LINE_SIZE); 1157 if (txq == NULL) 1158 return -ENOMEM; 1159 if (nb_queues > old_nb_queues) { 1160 uint16_t new_qs = nb_queues - old_nb_queues; 1161 1162 memset(txq + old_nb_queues, 0, 1163 sizeof(txq[0]) * new_qs); 1164 } 1165 1166 dev->data->tx_queues = txq; 1167 1168 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1169 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); 1170 1171 txq = dev->data->tx_queues; 1172 1173 for (i = nb_queues; i < old_nb_queues; i++) 1174 (*dev->dev_ops->tx_queue_release)(txq[i]); 1175 1176 rte_free(dev->data->tx_queues); 1177 dev->data->tx_queues = NULL; 1178 } 1179 dev->data->nb_tx_queues = nb_queues; 1180 return 0; 1181 } 1182 1183 uint32_t 1184 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1185 { 1186 switch (speed) { 1187 case ETH_SPEED_NUM_10M: 1188 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1189 case ETH_SPEED_NUM_100M: 1190 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1191 case ETH_SPEED_NUM_1G: 1192 return ETH_LINK_SPEED_1G; 1193 case ETH_SPEED_NUM_2_5G: 1194 return ETH_LINK_SPEED_2_5G; 1195 case ETH_SPEED_NUM_5G: 1196 return ETH_LINK_SPEED_5G; 1197 case ETH_SPEED_NUM_10G: 1198 return ETH_LINK_SPEED_10G; 1199 case ETH_SPEED_NUM_20G: 1200 return ETH_LINK_SPEED_20G; 1201 case ETH_SPEED_NUM_25G: 1202 return ETH_LINK_SPEED_25G; 1203 case ETH_SPEED_NUM_40G: 1204 return ETH_LINK_SPEED_40G; 1205 case ETH_SPEED_NUM_50G: 1206 return ETH_LINK_SPEED_50G; 1207 case ETH_SPEED_NUM_56G: 1208 return ETH_LINK_SPEED_56G; 1209 case ETH_SPEED_NUM_100G: 1210 return ETH_LINK_SPEED_100G; 1211 case ETH_SPEED_NUM_200G: 1212 return ETH_LINK_SPEED_200G; 1213 default: 1214 return 0; 1215 } 1216 } 1217 1218 const char * 1219 rte_eth_dev_rx_offload_name(uint64_t offload) 1220 { 1221 const char *name = "UNKNOWN"; 1222 unsigned int i; 1223 1224 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1225 if (offload == eth_dev_rx_offload_names[i].offload) { 1226 name = eth_dev_rx_offload_names[i].name; 1227 break; 1228 } 1229 } 1230 1231 return name; 1232 } 1233 1234 const char * 1235 rte_eth_dev_tx_offload_name(uint64_t offload) 1236 { 1237 const char *name = "UNKNOWN"; 1238 unsigned int i; 1239 1240 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1241 if (offload == eth_dev_tx_offload_names[i].offload) { 1242 name = eth_dev_tx_offload_names[i].name; 1243 break; 1244 } 1245 } 1246 1247 return name; 1248 } 1249 1250 static inline int 1251 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1252 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1253 { 1254 int ret = 0; 1255 1256 if (dev_info_size == 0) { 1257 if (config_size != max_rx_pkt_len) { 1258 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1259 " %u != %u is not allowed\n", 1260 port_id, config_size, max_rx_pkt_len); 1261 ret = -EINVAL; 1262 } 1263 } else if (config_size > dev_info_size) { 1264 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1265 "> max allowed value %u\n", port_id, config_size, 1266 dev_info_size); 1267 ret = -EINVAL; 1268 } else if (config_size < RTE_ETHER_MIN_LEN) { 1269 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1270 "< min allowed value %u\n", port_id, config_size, 1271 (unsigned int)RTE_ETHER_MIN_LEN); 1272 ret = -EINVAL; 1273 } 1274 return ret; 1275 } 1276 1277 /* 1278 * Validate offloads that are requested through rte_eth_dev_configure against 1279 * the offloads successfully set by the ethernet device. 1280 * 1281 * @param port_id 1282 * The port identifier of the Ethernet device. 1283 * @param req_offloads 1284 * The offloads that have been requested through `rte_eth_dev_configure`. 1285 * @param set_offloads 1286 * The offloads successfully set by the ethernet device. 1287 * @param offload_type 1288 * The offload type i.e. Rx/Tx string. 1289 * @param offload_name 1290 * The function that prints the offload name. 1291 * @return 1292 * - (0) if validation successful. 1293 * - (-EINVAL) if requested offload has been silently disabled. 1294 * 1295 */ 1296 static int 1297 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1298 uint64_t set_offloads, const char *offload_type, 1299 const char *(*offload_name)(uint64_t)) 1300 { 1301 uint64_t offloads_diff = req_offloads ^ set_offloads; 1302 uint64_t offload; 1303 int ret = 0; 1304 1305 while (offloads_diff != 0) { 1306 /* Check if any offload is requested but not enabled. */ 1307 offload = 1ULL << __builtin_ctzll(offloads_diff); 1308 if (offload & req_offloads) { 1309 RTE_ETHDEV_LOG(ERR, 1310 "Port %u failed to enable %s offload %s\n", 1311 port_id, offload_type, offload_name(offload)); 1312 ret = -EINVAL; 1313 } 1314 1315 /* Check if offload couldn't be disabled. */ 1316 if (offload & set_offloads) { 1317 RTE_ETHDEV_LOG(DEBUG, 1318 "Port %u %s offload %s is not requested but enabled\n", 1319 port_id, offload_type, offload_name(offload)); 1320 } 1321 1322 offloads_diff &= ~offload; 1323 } 1324 1325 return ret; 1326 } 1327 1328 int 1329 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1330 const struct rte_eth_conf *dev_conf) 1331 { 1332 struct rte_eth_dev *dev; 1333 struct rte_eth_dev_info dev_info; 1334 struct rte_eth_conf orig_conf; 1335 uint16_t overhead_len; 1336 int diag; 1337 int ret; 1338 uint16_t old_mtu; 1339 1340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1341 dev = &rte_eth_devices[port_id]; 1342 1343 if (dev_conf == NULL) { 1344 RTE_ETHDEV_LOG(ERR, 1345 "Cannot configure ethdev port %u from NULL config\n", 1346 port_id); 1347 return -EINVAL; 1348 } 1349 1350 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1351 1352 if (dev->data->dev_started) { 1353 RTE_ETHDEV_LOG(ERR, 1354 "Port %u must be stopped to allow configuration\n", 1355 port_id); 1356 return -EBUSY; 1357 } 1358 1359 /* Store original config, as rollback required on failure */ 1360 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1361 1362 /* 1363 * Copy the dev_conf parameter into the dev structure. 1364 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1365 */ 1366 if (dev_conf != &dev->data->dev_conf) 1367 memcpy(&dev->data->dev_conf, dev_conf, 1368 sizeof(dev->data->dev_conf)); 1369 1370 /* Backup mtu for rollback */ 1371 old_mtu = dev->data->mtu; 1372 1373 ret = rte_eth_dev_info_get(port_id, &dev_info); 1374 if (ret != 0) 1375 goto rollback; 1376 1377 /* Get the real Ethernet overhead length */ 1378 if (dev_info.max_mtu != UINT16_MAX && 1379 dev_info.max_rx_pktlen > dev_info.max_mtu) 1380 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu; 1381 else 1382 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1383 1384 /* If number of queues specified by application for both Rx and Tx is 1385 * zero, use driver preferred values. This cannot be done individually 1386 * as it is valid for either Tx or Rx (but not both) to be zero. 1387 * If driver does not provide any preferred valued, fall back on 1388 * EAL defaults. 1389 */ 1390 if (nb_rx_q == 0 && nb_tx_q == 0) { 1391 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1392 if (nb_rx_q == 0) 1393 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1394 nb_tx_q = dev_info.default_txportconf.nb_queues; 1395 if (nb_tx_q == 0) 1396 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1397 } 1398 1399 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1400 RTE_ETHDEV_LOG(ERR, 1401 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1402 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1403 ret = -EINVAL; 1404 goto rollback; 1405 } 1406 1407 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1408 RTE_ETHDEV_LOG(ERR, 1409 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1410 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1411 ret = -EINVAL; 1412 goto rollback; 1413 } 1414 1415 /* 1416 * Check that the numbers of RX and TX queues are not greater 1417 * than the maximum number of RX and TX queues supported by the 1418 * configured device. 1419 */ 1420 if (nb_rx_q > dev_info.max_rx_queues) { 1421 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1422 port_id, nb_rx_q, dev_info.max_rx_queues); 1423 ret = -EINVAL; 1424 goto rollback; 1425 } 1426 1427 if (nb_tx_q > dev_info.max_tx_queues) { 1428 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1429 port_id, nb_tx_q, dev_info.max_tx_queues); 1430 ret = -EINVAL; 1431 goto rollback; 1432 } 1433 1434 /* Check that the device supports requested interrupts */ 1435 if ((dev_conf->intr_conf.lsc == 1) && 1436 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1437 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1438 dev->device->driver->name); 1439 ret = -EINVAL; 1440 goto rollback; 1441 } 1442 if ((dev_conf->intr_conf.rmv == 1) && 1443 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1444 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1445 dev->device->driver->name); 1446 ret = -EINVAL; 1447 goto rollback; 1448 } 1449 1450 /* 1451 * If jumbo frames are enabled, check that the maximum RX packet 1452 * length is supported by the configured device. 1453 */ 1454 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1455 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) { 1456 RTE_ETHDEV_LOG(ERR, 1457 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n", 1458 port_id, dev_conf->rxmode.max_rx_pkt_len, 1459 dev_info.max_rx_pktlen); 1460 ret = -EINVAL; 1461 goto rollback; 1462 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) { 1463 RTE_ETHDEV_LOG(ERR, 1464 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", 1465 port_id, dev_conf->rxmode.max_rx_pkt_len, 1466 (unsigned int)RTE_ETHER_MIN_LEN); 1467 ret = -EINVAL; 1468 goto rollback; 1469 } 1470 1471 /* Scale the MTU size to adapt max_rx_pkt_len */ 1472 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 1473 overhead_len; 1474 } else { 1475 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len; 1476 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len || 1477 pktlen > RTE_ETHER_MTU + overhead_len) 1478 /* Use default value */ 1479 dev->data->dev_conf.rxmode.max_rx_pkt_len = 1480 RTE_ETHER_MTU + overhead_len; 1481 } 1482 1483 /* 1484 * If LRO is enabled, check that the maximum aggregated packet 1485 * size is supported by the configured device. 1486 */ 1487 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1488 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1489 dev->data->dev_conf.rxmode.max_lro_pkt_size = 1490 dev->data->dev_conf.rxmode.max_rx_pkt_len; 1491 ret = eth_dev_check_lro_pkt_size(port_id, 1492 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1493 dev->data->dev_conf.rxmode.max_rx_pkt_len, 1494 dev_info.max_lro_pkt_size); 1495 if (ret != 0) 1496 goto rollback; 1497 } 1498 1499 /* Any requested offloading must be within its device capabilities */ 1500 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1501 dev_conf->rxmode.offloads) { 1502 RTE_ETHDEV_LOG(ERR, 1503 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1504 "capabilities 0x%"PRIx64" in %s()\n", 1505 port_id, dev_conf->rxmode.offloads, 1506 dev_info.rx_offload_capa, 1507 __func__); 1508 ret = -EINVAL; 1509 goto rollback; 1510 } 1511 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1512 dev_conf->txmode.offloads) { 1513 RTE_ETHDEV_LOG(ERR, 1514 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1515 "capabilities 0x%"PRIx64" in %s()\n", 1516 port_id, dev_conf->txmode.offloads, 1517 dev_info.tx_offload_capa, 1518 __func__); 1519 ret = -EINVAL; 1520 goto rollback; 1521 } 1522 1523 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1524 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1525 1526 /* Check that device supports requested rss hash functions. */ 1527 if ((dev_info.flow_type_rss_offloads | 1528 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1529 dev_info.flow_type_rss_offloads) { 1530 RTE_ETHDEV_LOG(ERR, 1531 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1532 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1533 dev_info.flow_type_rss_offloads); 1534 ret = -EINVAL; 1535 goto rollback; 1536 } 1537 1538 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1539 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1540 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1541 RTE_ETHDEV_LOG(ERR, 1542 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1543 port_id, 1544 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1545 ret = -EINVAL; 1546 goto rollback; 1547 } 1548 1549 /* 1550 * Setup new number of RX/TX queues and reconfigure device. 1551 */ 1552 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1553 if (diag != 0) { 1554 RTE_ETHDEV_LOG(ERR, 1555 "Port%u eth_dev_rx_queue_config = %d\n", 1556 port_id, diag); 1557 ret = diag; 1558 goto rollback; 1559 } 1560 1561 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1562 if (diag != 0) { 1563 RTE_ETHDEV_LOG(ERR, 1564 "Port%u eth_dev_tx_queue_config = %d\n", 1565 port_id, diag); 1566 eth_dev_rx_queue_config(dev, 0); 1567 ret = diag; 1568 goto rollback; 1569 } 1570 1571 diag = (*dev->dev_ops->dev_configure)(dev); 1572 if (diag != 0) { 1573 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1574 port_id, diag); 1575 ret = eth_err(port_id, diag); 1576 goto reset_queues; 1577 } 1578 1579 /* Initialize Rx profiling if enabled at compilation time. */ 1580 diag = __rte_eth_dev_profile_init(port_id, dev); 1581 if (diag != 0) { 1582 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1583 port_id, diag); 1584 ret = eth_err(port_id, diag); 1585 goto reset_queues; 1586 } 1587 1588 /* Validate Rx offloads. */ 1589 diag = eth_dev_validate_offloads(port_id, 1590 dev_conf->rxmode.offloads, 1591 dev->data->dev_conf.rxmode.offloads, "Rx", 1592 rte_eth_dev_rx_offload_name); 1593 if (diag != 0) { 1594 ret = diag; 1595 goto reset_queues; 1596 } 1597 1598 /* Validate Tx offloads. */ 1599 diag = eth_dev_validate_offloads(port_id, 1600 dev_conf->txmode.offloads, 1601 dev->data->dev_conf.txmode.offloads, "Tx", 1602 rte_eth_dev_tx_offload_name); 1603 if (diag != 0) { 1604 ret = diag; 1605 goto reset_queues; 1606 } 1607 1608 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1609 return 0; 1610 reset_queues: 1611 eth_dev_rx_queue_config(dev, 0); 1612 eth_dev_tx_queue_config(dev, 0); 1613 rollback: 1614 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1615 if (old_mtu != dev->data->mtu) 1616 dev->data->mtu = old_mtu; 1617 1618 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1619 return ret; 1620 } 1621 1622 void 1623 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1624 { 1625 if (dev->data->dev_started) { 1626 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1627 dev->data->port_id); 1628 return; 1629 } 1630 1631 eth_dev_rx_queue_config(dev, 0); 1632 eth_dev_tx_queue_config(dev, 0); 1633 1634 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1635 } 1636 1637 static void 1638 eth_dev_mac_restore(struct rte_eth_dev *dev, 1639 struct rte_eth_dev_info *dev_info) 1640 { 1641 struct rte_ether_addr *addr; 1642 uint16_t i; 1643 uint32_t pool = 0; 1644 uint64_t pool_mask; 1645 1646 /* replay MAC address configuration including default MAC */ 1647 addr = &dev->data->mac_addrs[0]; 1648 if (*dev->dev_ops->mac_addr_set != NULL) 1649 (*dev->dev_ops->mac_addr_set)(dev, addr); 1650 else if (*dev->dev_ops->mac_addr_add != NULL) 1651 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1652 1653 if (*dev->dev_ops->mac_addr_add != NULL) { 1654 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1655 addr = &dev->data->mac_addrs[i]; 1656 1657 /* skip zero address */ 1658 if (rte_is_zero_ether_addr(addr)) 1659 continue; 1660 1661 pool = 0; 1662 pool_mask = dev->data->mac_pool_sel[i]; 1663 1664 do { 1665 if (pool_mask & 1ULL) 1666 (*dev->dev_ops->mac_addr_add)(dev, 1667 addr, i, pool); 1668 pool_mask >>= 1; 1669 pool++; 1670 } while (pool_mask); 1671 } 1672 } 1673 } 1674 1675 static int 1676 eth_dev_config_restore(struct rte_eth_dev *dev, 1677 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1678 { 1679 int ret; 1680 1681 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1682 eth_dev_mac_restore(dev, dev_info); 1683 1684 /* replay promiscuous configuration */ 1685 /* 1686 * use callbacks directly since we don't need port_id check and 1687 * would like to bypass the same value set 1688 */ 1689 if (rte_eth_promiscuous_get(port_id) == 1 && 1690 *dev->dev_ops->promiscuous_enable != NULL) { 1691 ret = eth_err(port_id, 1692 (*dev->dev_ops->promiscuous_enable)(dev)); 1693 if (ret != 0 && ret != -ENOTSUP) { 1694 RTE_ETHDEV_LOG(ERR, 1695 "Failed to enable promiscuous mode for device (port %u): %s\n", 1696 port_id, rte_strerror(-ret)); 1697 return ret; 1698 } 1699 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1700 *dev->dev_ops->promiscuous_disable != NULL) { 1701 ret = eth_err(port_id, 1702 (*dev->dev_ops->promiscuous_disable)(dev)); 1703 if (ret != 0 && ret != -ENOTSUP) { 1704 RTE_ETHDEV_LOG(ERR, 1705 "Failed to disable promiscuous mode for device (port %u): %s\n", 1706 port_id, rte_strerror(-ret)); 1707 return ret; 1708 } 1709 } 1710 1711 /* replay all multicast configuration */ 1712 /* 1713 * use callbacks directly since we don't need port_id check and 1714 * would like to bypass the same value set 1715 */ 1716 if (rte_eth_allmulticast_get(port_id) == 1 && 1717 *dev->dev_ops->allmulticast_enable != NULL) { 1718 ret = eth_err(port_id, 1719 (*dev->dev_ops->allmulticast_enable)(dev)); 1720 if (ret != 0 && ret != -ENOTSUP) { 1721 RTE_ETHDEV_LOG(ERR, 1722 "Failed to enable allmulticast mode for device (port %u): %s\n", 1723 port_id, rte_strerror(-ret)); 1724 return ret; 1725 } 1726 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1727 *dev->dev_ops->allmulticast_disable != NULL) { 1728 ret = eth_err(port_id, 1729 (*dev->dev_ops->allmulticast_disable)(dev)); 1730 if (ret != 0 && ret != -ENOTSUP) { 1731 RTE_ETHDEV_LOG(ERR, 1732 "Failed to disable allmulticast mode for device (port %u): %s\n", 1733 port_id, rte_strerror(-ret)); 1734 return ret; 1735 } 1736 } 1737 1738 return 0; 1739 } 1740 1741 int 1742 rte_eth_dev_start(uint16_t port_id) 1743 { 1744 struct rte_eth_dev *dev; 1745 struct rte_eth_dev_info dev_info; 1746 int diag; 1747 int ret, ret_stop; 1748 1749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1750 dev = &rte_eth_devices[port_id]; 1751 1752 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1753 1754 if (dev->data->dev_started != 0) { 1755 RTE_ETHDEV_LOG(INFO, 1756 "Device with port_id=%"PRIu16" already started\n", 1757 port_id); 1758 return 0; 1759 } 1760 1761 ret = rte_eth_dev_info_get(port_id, &dev_info); 1762 if (ret != 0) 1763 return ret; 1764 1765 /* Lets restore MAC now if device does not support live change */ 1766 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1767 eth_dev_mac_restore(dev, &dev_info); 1768 1769 diag = (*dev->dev_ops->dev_start)(dev); 1770 if (diag == 0) 1771 dev->data->dev_started = 1; 1772 else 1773 return eth_err(port_id, diag); 1774 1775 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1776 if (ret != 0) { 1777 RTE_ETHDEV_LOG(ERR, 1778 "Error during restoring configuration for device (port %u): %s\n", 1779 port_id, rte_strerror(-ret)); 1780 ret_stop = rte_eth_dev_stop(port_id); 1781 if (ret_stop != 0) { 1782 RTE_ETHDEV_LOG(ERR, 1783 "Failed to stop device (port %u): %s\n", 1784 port_id, rte_strerror(-ret_stop)); 1785 } 1786 1787 return ret; 1788 } 1789 1790 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1791 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1792 (*dev->dev_ops->link_update)(dev, 0); 1793 } 1794 1795 rte_ethdev_trace_start(port_id); 1796 return 0; 1797 } 1798 1799 int 1800 rte_eth_dev_stop(uint16_t port_id) 1801 { 1802 struct rte_eth_dev *dev; 1803 int ret; 1804 1805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1806 dev = &rte_eth_devices[port_id]; 1807 1808 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1809 1810 if (dev->data->dev_started == 0) { 1811 RTE_ETHDEV_LOG(INFO, 1812 "Device with port_id=%"PRIu16" already stopped\n", 1813 port_id); 1814 return 0; 1815 } 1816 1817 dev->data->dev_started = 0; 1818 ret = (*dev->dev_ops->dev_stop)(dev); 1819 rte_ethdev_trace_stop(port_id, ret); 1820 1821 return ret; 1822 } 1823 1824 int 1825 rte_eth_dev_set_link_up(uint16_t port_id) 1826 { 1827 struct rte_eth_dev *dev; 1828 1829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1830 dev = &rte_eth_devices[port_id]; 1831 1832 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1833 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1834 } 1835 1836 int 1837 rte_eth_dev_set_link_down(uint16_t port_id) 1838 { 1839 struct rte_eth_dev *dev; 1840 1841 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1842 dev = &rte_eth_devices[port_id]; 1843 1844 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1845 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1846 } 1847 1848 int 1849 rte_eth_dev_close(uint16_t port_id) 1850 { 1851 struct rte_eth_dev *dev; 1852 int firsterr, binerr; 1853 int *lasterr = &firsterr; 1854 1855 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1856 dev = &rte_eth_devices[port_id]; 1857 1858 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1859 *lasterr = (*dev->dev_ops->dev_close)(dev); 1860 if (*lasterr != 0) 1861 lasterr = &binerr; 1862 1863 rte_ethdev_trace_close(port_id); 1864 *lasterr = rte_eth_dev_release_port(dev); 1865 1866 return firsterr; 1867 } 1868 1869 int 1870 rte_eth_dev_reset(uint16_t port_id) 1871 { 1872 struct rte_eth_dev *dev; 1873 int ret; 1874 1875 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1876 dev = &rte_eth_devices[port_id]; 1877 1878 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1879 1880 ret = rte_eth_dev_stop(port_id); 1881 if (ret != 0) { 1882 RTE_ETHDEV_LOG(ERR, 1883 "Failed to stop device (port %u) before reset: %s - ignore\n", 1884 port_id, rte_strerror(-ret)); 1885 } 1886 ret = dev->dev_ops->dev_reset(dev); 1887 1888 return eth_err(port_id, ret); 1889 } 1890 1891 int 1892 rte_eth_dev_is_removed(uint16_t port_id) 1893 { 1894 struct rte_eth_dev *dev; 1895 int ret; 1896 1897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1898 dev = &rte_eth_devices[port_id]; 1899 1900 if (dev->state == RTE_ETH_DEV_REMOVED) 1901 return 1; 1902 1903 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1904 1905 ret = dev->dev_ops->is_removed(dev); 1906 if (ret != 0) 1907 /* Device is physically removed. */ 1908 dev->state = RTE_ETH_DEV_REMOVED; 1909 1910 return ret; 1911 } 1912 1913 static int 1914 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1915 uint16_t n_seg, uint32_t *mbp_buf_size, 1916 const struct rte_eth_dev_info *dev_info) 1917 { 1918 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1919 struct rte_mempool *mp_first; 1920 uint32_t offset_mask; 1921 uint16_t seg_idx; 1922 1923 if (n_seg > seg_capa->max_nseg) { 1924 RTE_ETHDEV_LOG(ERR, 1925 "Requested Rx segments %u exceed supported %u\n", 1926 n_seg, seg_capa->max_nseg); 1927 return -EINVAL; 1928 } 1929 /* 1930 * Check the sizes and offsets against buffer sizes 1931 * for each segment specified in extended configuration. 1932 */ 1933 mp_first = rx_seg[0].mp; 1934 offset_mask = (1u << seg_capa->offset_align_log2) - 1; 1935 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1936 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1937 uint32_t length = rx_seg[seg_idx].length; 1938 uint32_t offset = rx_seg[seg_idx].offset; 1939 1940 if (mpl == NULL) { 1941 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1942 return -EINVAL; 1943 } 1944 if (seg_idx != 0 && mp_first != mpl && 1945 seg_capa->multi_pools == 0) { 1946 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1947 return -ENOTSUP; 1948 } 1949 if (offset != 0) { 1950 if (seg_capa->offset_allowed == 0) { 1951 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1952 return -ENOTSUP; 1953 } 1954 if (offset & offset_mask) { 1955 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1956 offset, 1957 seg_capa->offset_align_log2); 1958 return -EINVAL; 1959 } 1960 } 1961 if (mpl->private_data_size < 1962 sizeof(struct rte_pktmbuf_pool_private)) { 1963 RTE_ETHDEV_LOG(ERR, 1964 "%s private_data_size %u < %u\n", 1965 mpl->name, mpl->private_data_size, 1966 (unsigned int)sizeof 1967 (struct rte_pktmbuf_pool_private)); 1968 return -ENOSPC; 1969 } 1970 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1971 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1972 length = length != 0 ? length : *mbp_buf_size; 1973 if (*mbp_buf_size < length + offset) { 1974 RTE_ETHDEV_LOG(ERR, 1975 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1976 mpl->name, *mbp_buf_size, 1977 length + offset, length, offset); 1978 return -EINVAL; 1979 } 1980 } 1981 return 0; 1982 } 1983 1984 int 1985 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1986 uint16_t nb_rx_desc, unsigned int socket_id, 1987 const struct rte_eth_rxconf *rx_conf, 1988 struct rte_mempool *mp) 1989 { 1990 int ret; 1991 uint32_t mbp_buf_size; 1992 struct rte_eth_dev *dev; 1993 struct rte_eth_dev_info dev_info; 1994 struct rte_eth_rxconf local_conf; 1995 void **rxq; 1996 1997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1998 dev = &rte_eth_devices[port_id]; 1999 2000 if (rx_queue_id >= dev->data->nb_rx_queues) { 2001 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2002 return -EINVAL; 2003 } 2004 2005 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2006 2007 ret = rte_eth_dev_info_get(port_id, &dev_info); 2008 if (ret != 0) 2009 return ret; 2010 2011 if (mp != NULL) { 2012 /* Single pool configuration check. */ 2013 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2014 RTE_ETHDEV_LOG(ERR, 2015 "Ambiguous segment configuration\n"); 2016 return -EINVAL; 2017 } 2018 /* 2019 * Check the size of the mbuf data buffer, this value 2020 * must be provided in the private data of the memory pool. 2021 * First check that the memory pool(s) has a valid private data. 2022 */ 2023 if (mp->private_data_size < 2024 sizeof(struct rte_pktmbuf_pool_private)) { 2025 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2026 mp->name, mp->private_data_size, 2027 (unsigned int) 2028 sizeof(struct rte_pktmbuf_pool_private)); 2029 return -ENOSPC; 2030 } 2031 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2032 if (mbp_buf_size < dev_info.min_rx_bufsize + 2033 RTE_PKTMBUF_HEADROOM) { 2034 RTE_ETHDEV_LOG(ERR, 2035 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2036 mp->name, mbp_buf_size, 2037 RTE_PKTMBUF_HEADROOM + 2038 dev_info.min_rx_bufsize, 2039 RTE_PKTMBUF_HEADROOM, 2040 dev_info.min_rx_bufsize); 2041 return -EINVAL; 2042 } 2043 } else { 2044 const struct rte_eth_rxseg_split *rx_seg; 2045 uint16_t n_seg; 2046 2047 /* Extended multi-segment configuration check. */ 2048 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2049 RTE_ETHDEV_LOG(ERR, 2050 "Memory pool is null and no extended configuration provided\n"); 2051 return -EINVAL; 2052 } 2053 2054 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2055 n_seg = rx_conf->rx_nseg; 2056 2057 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2058 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2059 &mbp_buf_size, 2060 &dev_info); 2061 if (ret != 0) 2062 return ret; 2063 } else { 2064 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2065 return -EINVAL; 2066 } 2067 } 2068 2069 /* Use default specified by driver, if nb_rx_desc is zero */ 2070 if (nb_rx_desc == 0) { 2071 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2072 /* If driver default is also zero, fall back on EAL default */ 2073 if (nb_rx_desc == 0) 2074 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2075 } 2076 2077 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2078 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2079 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2080 2081 RTE_ETHDEV_LOG(ERR, 2082 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2083 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2084 dev_info.rx_desc_lim.nb_min, 2085 dev_info.rx_desc_lim.nb_align); 2086 return -EINVAL; 2087 } 2088 2089 if (dev->data->dev_started && 2090 !(dev_info.dev_capa & 2091 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2092 return -EBUSY; 2093 2094 if (dev->data->dev_started && 2095 (dev->data->rx_queue_state[rx_queue_id] != 2096 RTE_ETH_QUEUE_STATE_STOPPED)) 2097 return -EBUSY; 2098 2099 rxq = dev->data->rx_queues; 2100 if (rxq[rx_queue_id]) { 2101 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, 2102 -ENOTSUP); 2103 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); 2104 rxq[rx_queue_id] = NULL; 2105 } 2106 2107 if (rx_conf == NULL) 2108 rx_conf = &dev_info.default_rxconf; 2109 2110 local_conf = *rx_conf; 2111 2112 /* 2113 * If an offloading has already been enabled in 2114 * rte_eth_dev_configure(), it has been enabled on all queues, 2115 * so there is no need to enable it in this queue again. 2116 * The local_conf.offloads input to underlying PMD only carries 2117 * those offloadings which are only enabled on this queue and 2118 * not enabled on all queues. 2119 */ 2120 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2121 2122 /* 2123 * New added offloadings for this queue are those not enabled in 2124 * rte_eth_dev_configure() and they must be per-queue type. 2125 * A pure per-port offloading can't be enabled on a queue while 2126 * disabled on another queue. A pure per-port offloading can't 2127 * be enabled for any queue as new added one if it hasn't been 2128 * enabled in rte_eth_dev_configure(). 2129 */ 2130 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2131 local_conf.offloads) { 2132 RTE_ETHDEV_LOG(ERR, 2133 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2134 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2135 port_id, rx_queue_id, local_conf.offloads, 2136 dev_info.rx_queue_offload_capa, 2137 __func__); 2138 return -EINVAL; 2139 } 2140 2141 /* 2142 * If LRO is enabled, check that the maximum aggregated packet 2143 * size is supported by the configured device. 2144 */ 2145 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2146 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2147 dev->data->dev_conf.rxmode.max_lro_pkt_size = 2148 dev->data->dev_conf.rxmode.max_rx_pkt_len; 2149 int ret = eth_dev_check_lro_pkt_size(port_id, 2150 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2151 dev->data->dev_conf.rxmode.max_rx_pkt_len, 2152 dev_info.max_lro_pkt_size); 2153 if (ret != 0) 2154 return ret; 2155 } 2156 2157 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2158 socket_id, &local_conf, mp); 2159 if (!ret) { 2160 if (!dev->data->min_rx_buf_size || 2161 dev->data->min_rx_buf_size > mbp_buf_size) 2162 dev->data->min_rx_buf_size = mbp_buf_size; 2163 } 2164 2165 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2166 rx_conf, ret); 2167 return eth_err(port_id, ret); 2168 } 2169 2170 int 2171 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2172 uint16_t nb_rx_desc, 2173 const struct rte_eth_hairpin_conf *conf) 2174 { 2175 int ret; 2176 struct rte_eth_dev *dev; 2177 struct rte_eth_hairpin_cap cap; 2178 void **rxq; 2179 int i; 2180 int count; 2181 2182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2183 dev = &rte_eth_devices[port_id]; 2184 2185 if (rx_queue_id >= dev->data->nb_rx_queues) { 2186 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2187 return -EINVAL; 2188 } 2189 2190 if (conf == NULL) { 2191 RTE_ETHDEV_LOG(ERR, 2192 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2193 port_id); 2194 return -EINVAL; 2195 } 2196 2197 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2198 if (ret != 0) 2199 return ret; 2200 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2201 -ENOTSUP); 2202 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2203 if (nb_rx_desc == 0) 2204 nb_rx_desc = cap.max_nb_desc; 2205 if (nb_rx_desc > cap.max_nb_desc) { 2206 RTE_ETHDEV_LOG(ERR, 2207 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2208 nb_rx_desc, cap.max_nb_desc); 2209 return -EINVAL; 2210 } 2211 if (conf->peer_count > cap.max_rx_2_tx) { 2212 RTE_ETHDEV_LOG(ERR, 2213 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2214 conf->peer_count, cap.max_rx_2_tx); 2215 return -EINVAL; 2216 } 2217 if (conf->peer_count == 0) { 2218 RTE_ETHDEV_LOG(ERR, 2219 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2220 conf->peer_count); 2221 return -EINVAL; 2222 } 2223 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2224 cap.max_nb_queues != UINT16_MAX; i++) { 2225 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2226 count++; 2227 } 2228 if (count > cap.max_nb_queues) { 2229 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2230 cap.max_nb_queues); 2231 return -EINVAL; 2232 } 2233 if (dev->data->dev_started) 2234 return -EBUSY; 2235 rxq = dev->data->rx_queues; 2236 if (rxq[rx_queue_id] != NULL) { 2237 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, 2238 -ENOTSUP); 2239 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); 2240 rxq[rx_queue_id] = NULL; 2241 } 2242 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2243 nb_rx_desc, conf); 2244 if (ret == 0) 2245 dev->data->rx_queue_state[rx_queue_id] = 2246 RTE_ETH_QUEUE_STATE_HAIRPIN; 2247 return eth_err(port_id, ret); 2248 } 2249 2250 int 2251 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2252 uint16_t nb_tx_desc, unsigned int socket_id, 2253 const struct rte_eth_txconf *tx_conf) 2254 { 2255 struct rte_eth_dev *dev; 2256 struct rte_eth_dev_info dev_info; 2257 struct rte_eth_txconf local_conf; 2258 void **txq; 2259 int ret; 2260 2261 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2262 dev = &rte_eth_devices[port_id]; 2263 2264 if (tx_queue_id >= dev->data->nb_tx_queues) { 2265 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2266 return -EINVAL; 2267 } 2268 2269 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2270 2271 ret = rte_eth_dev_info_get(port_id, &dev_info); 2272 if (ret != 0) 2273 return ret; 2274 2275 /* Use default specified by driver, if nb_tx_desc is zero */ 2276 if (nb_tx_desc == 0) { 2277 nb_tx_desc = dev_info.default_txportconf.ring_size; 2278 /* If driver default is zero, fall back on EAL default */ 2279 if (nb_tx_desc == 0) 2280 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2281 } 2282 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2283 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2284 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2285 RTE_ETHDEV_LOG(ERR, 2286 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2287 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2288 dev_info.tx_desc_lim.nb_min, 2289 dev_info.tx_desc_lim.nb_align); 2290 return -EINVAL; 2291 } 2292 2293 if (dev->data->dev_started && 2294 !(dev_info.dev_capa & 2295 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2296 return -EBUSY; 2297 2298 if (dev->data->dev_started && 2299 (dev->data->tx_queue_state[tx_queue_id] != 2300 RTE_ETH_QUEUE_STATE_STOPPED)) 2301 return -EBUSY; 2302 2303 txq = dev->data->tx_queues; 2304 if (txq[tx_queue_id]) { 2305 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, 2306 -ENOTSUP); 2307 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); 2308 txq[tx_queue_id] = NULL; 2309 } 2310 2311 if (tx_conf == NULL) 2312 tx_conf = &dev_info.default_txconf; 2313 2314 local_conf = *tx_conf; 2315 2316 /* 2317 * If an offloading has already been enabled in 2318 * rte_eth_dev_configure(), it has been enabled on all queues, 2319 * so there is no need to enable it in this queue again. 2320 * The local_conf.offloads input to underlying PMD only carries 2321 * those offloadings which are only enabled on this queue and 2322 * not enabled on all queues. 2323 */ 2324 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2325 2326 /* 2327 * New added offloadings for this queue are those not enabled in 2328 * rte_eth_dev_configure() and they must be per-queue type. 2329 * A pure per-port offloading can't be enabled on a queue while 2330 * disabled on another queue. A pure per-port offloading can't 2331 * be enabled for any queue as new added one if it hasn't been 2332 * enabled in rte_eth_dev_configure(). 2333 */ 2334 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2335 local_conf.offloads) { 2336 RTE_ETHDEV_LOG(ERR, 2337 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2338 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2339 port_id, tx_queue_id, local_conf.offloads, 2340 dev_info.tx_queue_offload_capa, 2341 __func__); 2342 return -EINVAL; 2343 } 2344 2345 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2346 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2347 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2348 } 2349 2350 int 2351 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2352 uint16_t nb_tx_desc, 2353 const struct rte_eth_hairpin_conf *conf) 2354 { 2355 struct rte_eth_dev *dev; 2356 struct rte_eth_hairpin_cap cap; 2357 void **txq; 2358 int i; 2359 int count; 2360 int ret; 2361 2362 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2363 dev = &rte_eth_devices[port_id]; 2364 2365 if (tx_queue_id >= dev->data->nb_tx_queues) { 2366 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2367 return -EINVAL; 2368 } 2369 2370 if (conf == NULL) { 2371 RTE_ETHDEV_LOG(ERR, 2372 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2373 port_id); 2374 return -EINVAL; 2375 } 2376 2377 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2378 if (ret != 0) 2379 return ret; 2380 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2381 -ENOTSUP); 2382 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2383 if (nb_tx_desc == 0) 2384 nb_tx_desc = cap.max_nb_desc; 2385 if (nb_tx_desc > cap.max_nb_desc) { 2386 RTE_ETHDEV_LOG(ERR, 2387 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2388 nb_tx_desc, cap.max_nb_desc); 2389 return -EINVAL; 2390 } 2391 if (conf->peer_count > cap.max_tx_2_rx) { 2392 RTE_ETHDEV_LOG(ERR, 2393 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2394 conf->peer_count, cap.max_tx_2_rx); 2395 return -EINVAL; 2396 } 2397 if (conf->peer_count == 0) { 2398 RTE_ETHDEV_LOG(ERR, 2399 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2400 conf->peer_count); 2401 return -EINVAL; 2402 } 2403 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2404 cap.max_nb_queues != UINT16_MAX; i++) { 2405 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2406 count++; 2407 } 2408 if (count > cap.max_nb_queues) { 2409 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2410 cap.max_nb_queues); 2411 return -EINVAL; 2412 } 2413 if (dev->data->dev_started) 2414 return -EBUSY; 2415 txq = dev->data->tx_queues; 2416 if (txq[tx_queue_id] != NULL) { 2417 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, 2418 -ENOTSUP); 2419 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); 2420 txq[tx_queue_id] = NULL; 2421 } 2422 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2423 (dev, tx_queue_id, nb_tx_desc, conf); 2424 if (ret == 0) 2425 dev->data->tx_queue_state[tx_queue_id] = 2426 RTE_ETH_QUEUE_STATE_HAIRPIN; 2427 return eth_err(port_id, ret); 2428 } 2429 2430 int 2431 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2432 { 2433 struct rte_eth_dev *dev; 2434 int ret; 2435 2436 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2437 dev = &rte_eth_devices[tx_port]; 2438 2439 if (dev->data->dev_started == 0) { 2440 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2441 return -EBUSY; 2442 } 2443 2444 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2445 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2446 if (ret != 0) 2447 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2448 " to Rx %d (%d - all ports)\n", 2449 tx_port, rx_port, RTE_MAX_ETHPORTS); 2450 2451 return ret; 2452 } 2453 2454 int 2455 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2456 { 2457 struct rte_eth_dev *dev; 2458 int ret; 2459 2460 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2461 dev = &rte_eth_devices[tx_port]; 2462 2463 if (dev->data->dev_started == 0) { 2464 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2465 return -EBUSY; 2466 } 2467 2468 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2469 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2470 if (ret != 0) 2471 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2472 " from Rx %d (%d - all ports)\n", 2473 tx_port, rx_port, RTE_MAX_ETHPORTS); 2474 2475 return ret; 2476 } 2477 2478 int 2479 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2480 size_t len, uint32_t direction) 2481 { 2482 struct rte_eth_dev *dev; 2483 int ret; 2484 2485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2486 dev = &rte_eth_devices[port_id]; 2487 2488 if (peer_ports == NULL) { 2489 RTE_ETHDEV_LOG(ERR, 2490 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2491 port_id); 2492 return -EINVAL; 2493 } 2494 2495 if (len == 0) { 2496 RTE_ETHDEV_LOG(ERR, 2497 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2498 port_id); 2499 return -EINVAL; 2500 } 2501 2502 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2503 -ENOTSUP); 2504 2505 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2506 len, direction); 2507 if (ret < 0) 2508 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2509 port_id, direction ? "Rx" : "Tx"); 2510 2511 return ret; 2512 } 2513 2514 void 2515 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2516 void *userdata __rte_unused) 2517 { 2518 rte_pktmbuf_free_bulk(pkts, unsent); 2519 } 2520 2521 void 2522 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2523 void *userdata) 2524 { 2525 uint64_t *count = userdata; 2526 2527 rte_pktmbuf_free_bulk(pkts, unsent); 2528 *count += unsent; 2529 } 2530 2531 int 2532 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2533 buffer_tx_error_fn cbfn, void *userdata) 2534 { 2535 if (buffer == NULL) { 2536 RTE_ETHDEV_LOG(ERR, 2537 "Cannot set Tx buffer error callback to NULL buffer\n"); 2538 return -EINVAL; 2539 } 2540 2541 buffer->error_callback = cbfn; 2542 buffer->error_userdata = userdata; 2543 return 0; 2544 } 2545 2546 int 2547 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2548 { 2549 int ret = 0; 2550 2551 if (buffer == NULL) { 2552 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2553 return -EINVAL; 2554 } 2555 2556 buffer->size = size; 2557 if (buffer->error_callback == NULL) { 2558 ret = rte_eth_tx_buffer_set_err_callback( 2559 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2560 } 2561 2562 return ret; 2563 } 2564 2565 int 2566 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2567 { 2568 struct rte_eth_dev *dev; 2569 int ret; 2570 2571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2572 dev = &rte_eth_devices[port_id]; 2573 2574 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2575 2576 /* Call driver to free pending mbufs. */ 2577 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2578 free_cnt); 2579 return eth_err(port_id, ret); 2580 } 2581 2582 int 2583 rte_eth_promiscuous_enable(uint16_t port_id) 2584 { 2585 struct rte_eth_dev *dev; 2586 int diag = 0; 2587 2588 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2589 dev = &rte_eth_devices[port_id]; 2590 2591 if (dev->data->promiscuous == 1) 2592 return 0; 2593 2594 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2595 2596 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2597 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2598 2599 return eth_err(port_id, diag); 2600 } 2601 2602 int 2603 rte_eth_promiscuous_disable(uint16_t port_id) 2604 { 2605 struct rte_eth_dev *dev; 2606 int diag = 0; 2607 2608 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2609 dev = &rte_eth_devices[port_id]; 2610 2611 if (dev->data->promiscuous == 0) 2612 return 0; 2613 2614 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2615 2616 dev->data->promiscuous = 0; 2617 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2618 if (diag != 0) 2619 dev->data->promiscuous = 1; 2620 2621 return eth_err(port_id, diag); 2622 } 2623 2624 int 2625 rte_eth_promiscuous_get(uint16_t port_id) 2626 { 2627 struct rte_eth_dev *dev; 2628 2629 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2630 dev = &rte_eth_devices[port_id]; 2631 2632 return dev->data->promiscuous; 2633 } 2634 2635 int 2636 rte_eth_allmulticast_enable(uint16_t port_id) 2637 { 2638 struct rte_eth_dev *dev; 2639 int diag; 2640 2641 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2642 dev = &rte_eth_devices[port_id]; 2643 2644 if (dev->data->all_multicast == 1) 2645 return 0; 2646 2647 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2648 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2649 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2650 2651 return eth_err(port_id, diag); 2652 } 2653 2654 int 2655 rte_eth_allmulticast_disable(uint16_t port_id) 2656 { 2657 struct rte_eth_dev *dev; 2658 int diag; 2659 2660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2661 dev = &rte_eth_devices[port_id]; 2662 2663 if (dev->data->all_multicast == 0) 2664 return 0; 2665 2666 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2667 dev->data->all_multicast = 0; 2668 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2669 if (diag != 0) 2670 dev->data->all_multicast = 1; 2671 2672 return eth_err(port_id, diag); 2673 } 2674 2675 int 2676 rte_eth_allmulticast_get(uint16_t port_id) 2677 { 2678 struct rte_eth_dev *dev; 2679 2680 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2681 dev = &rte_eth_devices[port_id]; 2682 2683 return dev->data->all_multicast; 2684 } 2685 2686 int 2687 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2688 { 2689 struct rte_eth_dev *dev; 2690 2691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2692 dev = &rte_eth_devices[port_id]; 2693 2694 if (eth_link == NULL) { 2695 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2696 port_id); 2697 return -EINVAL; 2698 } 2699 2700 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2701 rte_eth_linkstatus_get(dev, eth_link); 2702 else { 2703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2704 (*dev->dev_ops->link_update)(dev, 1); 2705 *eth_link = dev->data->dev_link; 2706 } 2707 2708 return 0; 2709 } 2710 2711 int 2712 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2713 { 2714 struct rte_eth_dev *dev; 2715 2716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2717 dev = &rte_eth_devices[port_id]; 2718 2719 if (eth_link == NULL) { 2720 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2721 port_id); 2722 return -EINVAL; 2723 } 2724 2725 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2726 rte_eth_linkstatus_get(dev, eth_link); 2727 else { 2728 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2729 (*dev->dev_ops->link_update)(dev, 0); 2730 *eth_link = dev->data->dev_link; 2731 } 2732 2733 return 0; 2734 } 2735 2736 const char * 2737 rte_eth_link_speed_to_str(uint32_t link_speed) 2738 { 2739 switch (link_speed) { 2740 case ETH_SPEED_NUM_NONE: return "None"; 2741 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2742 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2743 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2744 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2745 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2746 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2747 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2748 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2749 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2750 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2751 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2752 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2753 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2754 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2755 default: return "Invalid"; 2756 } 2757 } 2758 2759 int 2760 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2761 { 2762 if (str == NULL) { 2763 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2764 return -EINVAL; 2765 } 2766 2767 if (len == 0) { 2768 RTE_ETHDEV_LOG(ERR, 2769 "Cannot convert link to string with zero size\n"); 2770 return -EINVAL; 2771 } 2772 2773 if (eth_link == NULL) { 2774 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2775 return -EINVAL; 2776 } 2777 2778 if (eth_link->link_status == ETH_LINK_DOWN) 2779 return snprintf(str, len, "Link down"); 2780 else 2781 return snprintf(str, len, "Link up at %s %s %s", 2782 rte_eth_link_speed_to_str(eth_link->link_speed), 2783 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2784 "FDX" : "HDX", 2785 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2786 "Autoneg" : "Fixed"); 2787 } 2788 2789 int 2790 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2791 { 2792 struct rte_eth_dev *dev; 2793 2794 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2795 dev = &rte_eth_devices[port_id]; 2796 2797 if (stats == NULL) { 2798 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2799 port_id); 2800 return -EINVAL; 2801 } 2802 2803 memset(stats, 0, sizeof(*stats)); 2804 2805 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2806 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2807 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2808 } 2809 2810 int 2811 rte_eth_stats_reset(uint16_t port_id) 2812 { 2813 struct rte_eth_dev *dev; 2814 int ret; 2815 2816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2817 dev = &rte_eth_devices[port_id]; 2818 2819 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2820 ret = (*dev->dev_ops->stats_reset)(dev); 2821 if (ret != 0) 2822 return eth_err(port_id, ret); 2823 2824 dev->data->rx_mbuf_alloc_failed = 0; 2825 2826 return 0; 2827 } 2828 2829 static inline int 2830 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2831 { 2832 uint16_t nb_rxqs, nb_txqs; 2833 int count; 2834 2835 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2836 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2837 2838 count = RTE_NB_STATS; 2839 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2840 count += nb_rxqs * RTE_NB_RXQ_STATS; 2841 count += nb_txqs * RTE_NB_TXQ_STATS; 2842 } 2843 2844 return count; 2845 } 2846 2847 static int 2848 eth_dev_get_xstats_count(uint16_t port_id) 2849 { 2850 struct rte_eth_dev *dev; 2851 int count; 2852 2853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2854 dev = &rte_eth_devices[port_id]; 2855 if (dev->dev_ops->xstats_get_names_by_id != NULL) { 2856 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, 2857 NULL, 0); 2858 if (count < 0) 2859 return eth_err(port_id, count); 2860 } 2861 if (dev->dev_ops->xstats_get_names != NULL) { 2862 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2863 if (count < 0) 2864 return eth_err(port_id, count); 2865 } else 2866 count = 0; 2867 2868 2869 count += eth_dev_get_xstats_basic_count(dev); 2870 2871 return count; 2872 } 2873 2874 int 2875 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2876 uint64_t *id) 2877 { 2878 int cnt_xstats, idx_xstat; 2879 2880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2881 2882 if (xstat_name == NULL) { 2883 RTE_ETHDEV_LOG(ERR, 2884 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2885 port_id); 2886 return -ENOMEM; 2887 } 2888 2889 if (id == NULL) { 2890 RTE_ETHDEV_LOG(ERR, 2891 "Cannot get ethdev port %u xstats ID to NULL\n", 2892 port_id); 2893 return -ENOMEM; 2894 } 2895 2896 /* Get count */ 2897 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2898 if (cnt_xstats < 0) { 2899 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2900 return -ENODEV; 2901 } 2902 2903 /* Get id-name lookup table */ 2904 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2905 2906 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2907 port_id, xstats_names, cnt_xstats, NULL)) { 2908 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2909 return -1; 2910 } 2911 2912 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2913 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2914 *id = idx_xstat; 2915 return 0; 2916 }; 2917 } 2918 2919 return -EINVAL; 2920 } 2921 2922 /* retrieve basic stats names */ 2923 static int 2924 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2925 struct rte_eth_xstat_name *xstats_names) 2926 { 2927 int cnt_used_entries = 0; 2928 uint32_t idx, id_queue; 2929 uint16_t num_q; 2930 2931 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2932 strlcpy(xstats_names[cnt_used_entries].name, 2933 eth_dev_stats_strings[idx].name, 2934 sizeof(xstats_names[0].name)); 2935 cnt_used_entries++; 2936 } 2937 2938 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2939 return cnt_used_entries; 2940 2941 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2942 for (id_queue = 0; id_queue < num_q; id_queue++) { 2943 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2944 snprintf(xstats_names[cnt_used_entries].name, 2945 sizeof(xstats_names[0].name), 2946 "rx_q%u_%s", 2947 id_queue, eth_dev_rxq_stats_strings[idx].name); 2948 cnt_used_entries++; 2949 } 2950 2951 } 2952 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2953 for (id_queue = 0; id_queue < num_q; id_queue++) { 2954 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2955 snprintf(xstats_names[cnt_used_entries].name, 2956 sizeof(xstats_names[0].name), 2957 "tx_q%u_%s", 2958 id_queue, eth_dev_txq_stats_strings[idx].name); 2959 cnt_used_entries++; 2960 } 2961 } 2962 return cnt_used_entries; 2963 } 2964 2965 /* retrieve ethdev extended statistics names */ 2966 int 2967 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2968 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2969 uint64_t *ids) 2970 { 2971 struct rte_eth_xstat_name *xstats_names_copy; 2972 unsigned int no_basic_stat_requested = 1; 2973 unsigned int no_ext_stat_requested = 1; 2974 unsigned int expected_entries; 2975 unsigned int basic_count; 2976 struct rte_eth_dev *dev; 2977 unsigned int i; 2978 int ret; 2979 2980 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2981 dev = &rte_eth_devices[port_id]; 2982 2983 basic_count = eth_dev_get_xstats_basic_count(dev); 2984 ret = eth_dev_get_xstats_count(port_id); 2985 if (ret < 0) 2986 return ret; 2987 expected_entries = (unsigned int)ret; 2988 2989 /* Return max number of stats if no ids given */ 2990 if (!ids) { 2991 if (!xstats_names) 2992 return expected_entries; 2993 else if (xstats_names && size < expected_entries) 2994 return expected_entries; 2995 } 2996 2997 if (ids && !xstats_names) 2998 return -EINVAL; 2999 3000 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3001 uint64_t ids_copy[size]; 3002 3003 for (i = 0; i < size; i++) { 3004 if (ids[i] < basic_count) { 3005 no_basic_stat_requested = 0; 3006 break; 3007 } 3008 3009 /* 3010 * Convert ids to xstats ids that PMD knows. 3011 * ids known by user are basic + extended stats. 3012 */ 3013 ids_copy[i] = ids[i] - basic_count; 3014 } 3015 3016 if (no_basic_stat_requested) 3017 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3018 xstats_names, ids_copy, size); 3019 } 3020 3021 /* Retrieve all stats */ 3022 if (!ids) { 3023 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3024 expected_entries); 3025 if (num_stats < 0 || num_stats > (int)expected_entries) 3026 return num_stats; 3027 else 3028 return expected_entries; 3029 } 3030 3031 xstats_names_copy = calloc(expected_entries, 3032 sizeof(struct rte_eth_xstat_name)); 3033 3034 if (!xstats_names_copy) { 3035 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3036 return -ENOMEM; 3037 } 3038 3039 if (ids) { 3040 for (i = 0; i < size; i++) { 3041 if (ids[i] >= basic_count) { 3042 no_ext_stat_requested = 0; 3043 break; 3044 } 3045 } 3046 } 3047 3048 /* Fill xstats_names_copy structure */ 3049 if (ids && no_ext_stat_requested) { 3050 eth_basic_stats_get_names(dev, xstats_names_copy); 3051 } else { 3052 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3053 expected_entries); 3054 if (ret < 0) { 3055 free(xstats_names_copy); 3056 return ret; 3057 } 3058 } 3059 3060 /* Filter stats */ 3061 for (i = 0; i < size; i++) { 3062 if (ids[i] >= expected_entries) { 3063 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3064 free(xstats_names_copy); 3065 return -1; 3066 } 3067 xstats_names[i] = xstats_names_copy[ids[i]]; 3068 } 3069 3070 free(xstats_names_copy); 3071 return size; 3072 } 3073 3074 int 3075 rte_eth_xstats_get_names(uint16_t port_id, 3076 struct rte_eth_xstat_name *xstats_names, 3077 unsigned int size) 3078 { 3079 struct rte_eth_dev *dev; 3080 int cnt_used_entries; 3081 int cnt_expected_entries; 3082 int cnt_driver_entries; 3083 3084 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3085 if (xstats_names == NULL || cnt_expected_entries < 0 || 3086 (int)size < cnt_expected_entries) 3087 return cnt_expected_entries; 3088 3089 /* port_id checked in eth_dev_get_xstats_count() */ 3090 dev = &rte_eth_devices[port_id]; 3091 3092 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3093 3094 if (dev->dev_ops->xstats_get_names != NULL) { 3095 /* If there are any driver-specific xstats, append them 3096 * to end of list. 3097 */ 3098 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3099 dev, 3100 xstats_names + cnt_used_entries, 3101 size - cnt_used_entries); 3102 if (cnt_driver_entries < 0) 3103 return eth_err(port_id, cnt_driver_entries); 3104 cnt_used_entries += cnt_driver_entries; 3105 } 3106 3107 return cnt_used_entries; 3108 } 3109 3110 3111 static int 3112 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3113 { 3114 struct rte_eth_dev *dev; 3115 struct rte_eth_stats eth_stats; 3116 unsigned int count = 0, i, q; 3117 uint64_t val, *stats_ptr; 3118 uint16_t nb_rxqs, nb_txqs; 3119 int ret; 3120 3121 ret = rte_eth_stats_get(port_id, ð_stats); 3122 if (ret < 0) 3123 return ret; 3124 3125 dev = &rte_eth_devices[port_id]; 3126 3127 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3128 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3129 3130 /* global stats */ 3131 for (i = 0; i < RTE_NB_STATS; i++) { 3132 stats_ptr = RTE_PTR_ADD(ð_stats, 3133 eth_dev_stats_strings[i].offset); 3134 val = *stats_ptr; 3135 xstats[count++].value = val; 3136 } 3137 3138 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3139 return count; 3140 3141 /* per-rxq stats */ 3142 for (q = 0; q < nb_rxqs; q++) { 3143 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3144 stats_ptr = RTE_PTR_ADD(ð_stats, 3145 eth_dev_rxq_stats_strings[i].offset + 3146 q * sizeof(uint64_t)); 3147 val = *stats_ptr; 3148 xstats[count++].value = val; 3149 } 3150 } 3151 3152 /* per-txq stats */ 3153 for (q = 0; q < nb_txqs; q++) { 3154 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3155 stats_ptr = RTE_PTR_ADD(ð_stats, 3156 eth_dev_txq_stats_strings[i].offset + 3157 q * sizeof(uint64_t)); 3158 val = *stats_ptr; 3159 xstats[count++].value = val; 3160 } 3161 } 3162 return count; 3163 } 3164 3165 /* retrieve ethdev extended statistics */ 3166 int 3167 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3168 uint64_t *values, unsigned int size) 3169 { 3170 unsigned int no_basic_stat_requested = 1; 3171 unsigned int no_ext_stat_requested = 1; 3172 unsigned int num_xstats_filled; 3173 unsigned int basic_count; 3174 uint16_t expected_entries; 3175 struct rte_eth_dev *dev; 3176 unsigned int i; 3177 int ret; 3178 3179 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3180 dev = &rte_eth_devices[port_id]; 3181 3182 ret = eth_dev_get_xstats_count(port_id); 3183 if (ret < 0) 3184 return ret; 3185 expected_entries = (uint16_t)ret; 3186 struct rte_eth_xstat xstats[expected_entries]; 3187 basic_count = eth_dev_get_xstats_basic_count(dev); 3188 3189 /* Return max number of stats if no ids given */ 3190 if (!ids) { 3191 if (!values) 3192 return expected_entries; 3193 else if (values && size < expected_entries) 3194 return expected_entries; 3195 } 3196 3197 if (ids && !values) 3198 return -EINVAL; 3199 3200 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3201 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3202 uint64_t ids_copy[size]; 3203 3204 for (i = 0; i < size; i++) { 3205 if (ids[i] < basic_count) { 3206 no_basic_stat_requested = 0; 3207 break; 3208 } 3209 3210 /* 3211 * Convert ids to xstats ids that PMD knows. 3212 * ids known by user are basic + extended stats. 3213 */ 3214 ids_copy[i] = ids[i] - basic_count; 3215 } 3216 3217 if (no_basic_stat_requested) 3218 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3219 values, size); 3220 } 3221 3222 if (ids) { 3223 for (i = 0; i < size; i++) { 3224 if (ids[i] >= basic_count) { 3225 no_ext_stat_requested = 0; 3226 break; 3227 } 3228 } 3229 } 3230 3231 /* Fill the xstats structure */ 3232 if (ids && no_ext_stat_requested) 3233 ret = eth_basic_stats_get(port_id, xstats); 3234 else 3235 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3236 3237 if (ret < 0) 3238 return ret; 3239 num_xstats_filled = (unsigned int)ret; 3240 3241 /* Return all stats */ 3242 if (!ids) { 3243 for (i = 0; i < num_xstats_filled; i++) 3244 values[i] = xstats[i].value; 3245 return expected_entries; 3246 } 3247 3248 /* Filter stats */ 3249 for (i = 0; i < size; i++) { 3250 if (ids[i] >= expected_entries) { 3251 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3252 return -1; 3253 } 3254 values[i] = xstats[ids[i]].value; 3255 } 3256 return size; 3257 } 3258 3259 int 3260 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3261 unsigned int n) 3262 { 3263 struct rte_eth_dev *dev; 3264 unsigned int count = 0, i; 3265 signed int xcount = 0; 3266 uint16_t nb_rxqs, nb_txqs; 3267 int ret; 3268 3269 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3270 dev = &rte_eth_devices[port_id]; 3271 3272 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3273 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3274 3275 /* Return generic statistics */ 3276 count = RTE_NB_STATS; 3277 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3278 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3279 3280 /* implemented by the driver */ 3281 if (dev->dev_ops->xstats_get != NULL) { 3282 /* Retrieve the xstats from the driver at the end of the 3283 * xstats struct. 3284 */ 3285 xcount = (*dev->dev_ops->xstats_get)(dev, 3286 xstats ? xstats + count : NULL, 3287 (n > count) ? n - count : 0); 3288 3289 if (xcount < 0) 3290 return eth_err(port_id, xcount); 3291 } 3292 3293 if (n < count + xcount || xstats == NULL) 3294 return count + xcount; 3295 3296 /* now fill the xstats structure */ 3297 ret = eth_basic_stats_get(port_id, xstats); 3298 if (ret < 0) 3299 return ret; 3300 count = ret; 3301 3302 for (i = 0; i < count; i++) 3303 xstats[i].id = i; 3304 /* add an offset to driver-specific stats */ 3305 for ( ; i < count + xcount; i++) 3306 xstats[i].id += count; 3307 3308 return count + xcount; 3309 } 3310 3311 /* reset ethdev extended statistics */ 3312 int 3313 rte_eth_xstats_reset(uint16_t port_id) 3314 { 3315 struct rte_eth_dev *dev; 3316 3317 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3318 dev = &rte_eth_devices[port_id]; 3319 3320 /* implemented by the driver */ 3321 if (dev->dev_ops->xstats_reset != NULL) 3322 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3323 3324 /* fallback to default */ 3325 return rte_eth_stats_reset(port_id); 3326 } 3327 3328 static int 3329 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3330 uint8_t stat_idx, uint8_t is_rx) 3331 { 3332 struct rte_eth_dev *dev; 3333 3334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3335 dev = &rte_eth_devices[port_id]; 3336 3337 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3338 return -EINVAL; 3339 3340 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3341 return -EINVAL; 3342 3343 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3344 return -EINVAL; 3345 3346 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3347 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3348 } 3349 3350 int 3351 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3352 uint8_t stat_idx) 3353 { 3354 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3355 tx_queue_id, 3356 stat_idx, STAT_QMAP_TX)); 3357 } 3358 3359 int 3360 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3361 uint8_t stat_idx) 3362 { 3363 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3364 rx_queue_id, 3365 stat_idx, STAT_QMAP_RX)); 3366 } 3367 3368 int 3369 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3370 { 3371 struct rte_eth_dev *dev; 3372 3373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3374 dev = &rte_eth_devices[port_id]; 3375 3376 if (fw_version == NULL && fw_size > 0) { 3377 RTE_ETHDEV_LOG(ERR, 3378 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3379 port_id); 3380 return -EINVAL; 3381 } 3382 3383 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3384 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3385 fw_version, fw_size)); 3386 } 3387 3388 int 3389 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3390 { 3391 struct rte_eth_dev *dev; 3392 const struct rte_eth_desc_lim lim = { 3393 .nb_max = UINT16_MAX, 3394 .nb_min = 0, 3395 .nb_align = 1, 3396 .nb_seg_max = UINT16_MAX, 3397 .nb_mtu_seg_max = UINT16_MAX, 3398 }; 3399 int diag; 3400 3401 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3402 dev = &rte_eth_devices[port_id]; 3403 3404 if (dev_info == NULL) { 3405 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3406 port_id); 3407 return -EINVAL; 3408 } 3409 3410 /* 3411 * Init dev_info before port_id check since caller does not have 3412 * return status and does not know if get is successful or not. 3413 */ 3414 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3415 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3416 3417 dev_info->rx_desc_lim = lim; 3418 dev_info->tx_desc_lim = lim; 3419 dev_info->device = dev->device; 3420 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3421 dev_info->max_mtu = UINT16_MAX; 3422 3423 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3424 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3425 if (diag != 0) { 3426 /* Cleanup already filled in device information */ 3427 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3428 return eth_err(port_id, diag); 3429 } 3430 3431 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3432 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3433 RTE_MAX_QUEUES_PER_PORT); 3434 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3435 RTE_MAX_QUEUES_PER_PORT); 3436 3437 dev_info->driver_name = dev->device->driver->name; 3438 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3439 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3440 3441 dev_info->dev_flags = &dev->data->dev_flags; 3442 3443 return 0; 3444 } 3445 3446 int 3447 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3448 uint32_t *ptypes, int num) 3449 { 3450 int i, j; 3451 struct rte_eth_dev *dev; 3452 const uint32_t *all_ptypes; 3453 3454 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3455 dev = &rte_eth_devices[port_id]; 3456 3457 if (ptypes == NULL && num > 0) { 3458 RTE_ETHDEV_LOG(ERR, 3459 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3460 port_id); 3461 return -EINVAL; 3462 } 3463 3464 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3465 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3466 3467 if (!all_ptypes) 3468 return 0; 3469 3470 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3471 if (all_ptypes[i] & ptype_mask) { 3472 if (j < num) 3473 ptypes[j] = all_ptypes[i]; 3474 j++; 3475 } 3476 3477 return j; 3478 } 3479 3480 int 3481 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3482 uint32_t *set_ptypes, unsigned int num) 3483 { 3484 const uint32_t valid_ptype_masks[] = { 3485 RTE_PTYPE_L2_MASK, 3486 RTE_PTYPE_L3_MASK, 3487 RTE_PTYPE_L4_MASK, 3488 RTE_PTYPE_TUNNEL_MASK, 3489 RTE_PTYPE_INNER_L2_MASK, 3490 RTE_PTYPE_INNER_L3_MASK, 3491 RTE_PTYPE_INNER_L4_MASK, 3492 }; 3493 const uint32_t *all_ptypes; 3494 struct rte_eth_dev *dev; 3495 uint32_t unused_mask; 3496 unsigned int i, j; 3497 int ret; 3498 3499 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3500 dev = &rte_eth_devices[port_id]; 3501 3502 if (num > 0 && set_ptypes == NULL) { 3503 RTE_ETHDEV_LOG(ERR, 3504 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3505 port_id); 3506 return -EINVAL; 3507 } 3508 3509 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3510 *dev->dev_ops->dev_ptypes_set == NULL) { 3511 ret = 0; 3512 goto ptype_unknown; 3513 } 3514 3515 if (ptype_mask == 0) { 3516 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3517 ptype_mask); 3518 goto ptype_unknown; 3519 } 3520 3521 unused_mask = ptype_mask; 3522 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3523 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3524 if (mask && mask != valid_ptype_masks[i]) { 3525 ret = -EINVAL; 3526 goto ptype_unknown; 3527 } 3528 unused_mask &= ~valid_ptype_masks[i]; 3529 } 3530 3531 if (unused_mask) { 3532 ret = -EINVAL; 3533 goto ptype_unknown; 3534 } 3535 3536 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3537 if (all_ptypes == NULL) { 3538 ret = 0; 3539 goto ptype_unknown; 3540 } 3541 3542 /* 3543 * Accommodate as many set_ptypes as possible. If the supplied 3544 * set_ptypes array is insufficient fill it partially. 3545 */ 3546 for (i = 0, j = 0; set_ptypes != NULL && 3547 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3548 if (ptype_mask & all_ptypes[i]) { 3549 if (j < num - 1) { 3550 set_ptypes[j] = all_ptypes[i]; 3551 j++; 3552 continue; 3553 } 3554 break; 3555 } 3556 } 3557 3558 if (set_ptypes != NULL && j < num) 3559 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3560 3561 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3562 3563 ptype_unknown: 3564 if (num > 0) 3565 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3566 3567 return ret; 3568 } 3569 3570 int 3571 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3572 { 3573 struct rte_eth_dev *dev; 3574 3575 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3576 dev = &rte_eth_devices[port_id]; 3577 3578 if (mac_addr == NULL) { 3579 RTE_ETHDEV_LOG(ERR, 3580 "Cannot get ethdev port %u MAC address to NULL\n", 3581 port_id); 3582 return -EINVAL; 3583 } 3584 3585 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3586 3587 return 0; 3588 } 3589 3590 int 3591 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3592 { 3593 struct rte_eth_dev *dev; 3594 3595 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3596 dev = &rte_eth_devices[port_id]; 3597 3598 if (mtu == NULL) { 3599 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3600 port_id); 3601 return -EINVAL; 3602 } 3603 3604 *mtu = dev->data->mtu; 3605 return 0; 3606 } 3607 3608 int 3609 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3610 { 3611 int ret; 3612 struct rte_eth_dev_info dev_info; 3613 struct rte_eth_dev *dev; 3614 3615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3616 dev = &rte_eth_devices[port_id]; 3617 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3618 3619 /* 3620 * Check if the device supports dev_infos_get, if it does not 3621 * skip min_mtu/max_mtu validation here as this requires values 3622 * that are populated within the call to rte_eth_dev_info_get() 3623 * which relies on dev->dev_ops->dev_infos_get. 3624 */ 3625 if (*dev->dev_ops->dev_infos_get != NULL) { 3626 ret = rte_eth_dev_info_get(port_id, &dev_info); 3627 if (ret != 0) 3628 return ret; 3629 3630 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) 3631 return -EINVAL; 3632 } 3633 3634 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3635 if (!ret) 3636 dev->data->mtu = mtu; 3637 3638 return eth_err(port_id, ret); 3639 } 3640 3641 int 3642 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3643 { 3644 struct rte_eth_dev *dev; 3645 int ret; 3646 3647 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3648 dev = &rte_eth_devices[port_id]; 3649 3650 if (!(dev->data->dev_conf.rxmode.offloads & 3651 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3652 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3653 port_id); 3654 return -ENOSYS; 3655 } 3656 3657 if (vlan_id > 4095) { 3658 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3659 port_id, vlan_id); 3660 return -EINVAL; 3661 } 3662 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3663 3664 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3665 if (ret == 0) { 3666 struct rte_vlan_filter_conf *vfc; 3667 int vidx; 3668 int vbit; 3669 3670 vfc = &dev->data->vlan_filter_conf; 3671 vidx = vlan_id / 64; 3672 vbit = vlan_id % 64; 3673 3674 if (on) 3675 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3676 else 3677 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3678 } 3679 3680 return eth_err(port_id, ret); 3681 } 3682 3683 int 3684 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3685 int on) 3686 { 3687 struct rte_eth_dev *dev; 3688 3689 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3690 dev = &rte_eth_devices[port_id]; 3691 3692 if (rx_queue_id >= dev->data->nb_rx_queues) { 3693 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3694 return -EINVAL; 3695 } 3696 3697 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3698 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3699 3700 return 0; 3701 } 3702 3703 int 3704 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3705 enum rte_vlan_type vlan_type, 3706 uint16_t tpid) 3707 { 3708 struct rte_eth_dev *dev; 3709 3710 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3711 dev = &rte_eth_devices[port_id]; 3712 3713 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3714 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3715 tpid)); 3716 } 3717 3718 int 3719 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3720 { 3721 struct rte_eth_dev_info dev_info; 3722 struct rte_eth_dev *dev; 3723 int ret = 0; 3724 int mask = 0; 3725 int cur, org = 0; 3726 uint64_t orig_offloads; 3727 uint64_t dev_offloads; 3728 uint64_t new_offloads; 3729 3730 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3731 dev = &rte_eth_devices[port_id]; 3732 3733 /* save original values in case of failure */ 3734 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3735 dev_offloads = orig_offloads; 3736 3737 /* check which option changed by application */ 3738 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3739 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3740 if (cur != org) { 3741 if (cur) 3742 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3743 else 3744 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3745 mask |= ETH_VLAN_STRIP_MASK; 3746 } 3747 3748 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3749 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3750 if (cur != org) { 3751 if (cur) 3752 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3753 else 3754 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3755 mask |= ETH_VLAN_FILTER_MASK; 3756 } 3757 3758 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3759 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3760 if (cur != org) { 3761 if (cur) 3762 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3763 else 3764 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3765 mask |= ETH_VLAN_EXTEND_MASK; 3766 } 3767 3768 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3769 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3770 if (cur != org) { 3771 if (cur) 3772 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3773 else 3774 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3775 mask |= ETH_QINQ_STRIP_MASK; 3776 } 3777 3778 /*no change*/ 3779 if (mask == 0) 3780 return ret; 3781 3782 ret = rte_eth_dev_info_get(port_id, &dev_info); 3783 if (ret != 0) 3784 return ret; 3785 3786 /* Rx VLAN offloading must be within its device capabilities */ 3787 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3788 new_offloads = dev_offloads & ~orig_offloads; 3789 RTE_ETHDEV_LOG(ERR, 3790 "Ethdev port_id=%u requested new added VLAN offloads " 3791 "0x%" PRIx64 " must be within Rx offloads capabilities " 3792 "0x%" PRIx64 " in %s()\n", 3793 port_id, new_offloads, dev_info.rx_offload_capa, 3794 __func__); 3795 return -EINVAL; 3796 } 3797 3798 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3799 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3800 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3801 if (ret) { 3802 /* hit an error restore original values */ 3803 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3804 } 3805 3806 return eth_err(port_id, ret); 3807 } 3808 3809 int 3810 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3811 { 3812 struct rte_eth_dev *dev; 3813 uint64_t *dev_offloads; 3814 int ret = 0; 3815 3816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3817 dev = &rte_eth_devices[port_id]; 3818 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3819 3820 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3821 ret |= ETH_VLAN_STRIP_OFFLOAD; 3822 3823 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3824 ret |= ETH_VLAN_FILTER_OFFLOAD; 3825 3826 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3827 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3828 3829 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3830 ret |= ETH_QINQ_STRIP_OFFLOAD; 3831 3832 return ret; 3833 } 3834 3835 int 3836 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3837 { 3838 struct rte_eth_dev *dev; 3839 3840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3841 dev = &rte_eth_devices[port_id]; 3842 3843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3844 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3845 } 3846 3847 int 3848 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3849 { 3850 struct rte_eth_dev *dev; 3851 3852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3853 dev = &rte_eth_devices[port_id]; 3854 3855 if (fc_conf == NULL) { 3856 RTE_ETHDEV_LOG(ERR, 3857 "Cannot get ethdev port %u flow control config to NULL\n", 3858 port_id); 3859 return -EINVAL; 3860 } 3861 3862 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3863 memset(fc_conf, 0, sizeof(*fc_conf)); 3864 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3865 } 3866 3867 int 3868 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3869 { 3870 struct rte_eth_dev *dev; 3871 3872 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3873 dev = &rte_eth_devices[port_id]; 3874 3875 if (fc_conf == NULL) { 3876 RTE_ETHDEV_LOG(ERR, 3877 "Cannot set ethdev port %u flow control from NULL config\n", 3878 port_id); 3879 return -EINVAL; 3880 } 3881 3882 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3883 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3884 return -EINVAL; 3885 } 3886 3887 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3888 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3889 } 3890 3891 int 3892 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3893 struct rte_eth_pfc_conf *pfc_conf) 3894 { 3895 struct rte_eth_dev *dev; 3896 3897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3898 dev = &rte_eth_devices[port_id]; 3899 3900 if (pfc_conf == NULL) { 3901 RTE_ETHDEV_LOG(ERR, 3902 "Cannot set ethdev port %u priority flow control from NULL config\n", 3903 port_id); 3904 return -EINVAL; 3905 } 3906 3907 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3908 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3909 return -EINVAL; 3910 } 3911 3912 /* High water, low water validation are device specific */ 3913 if (*dev->dev_ops->priority_flow_ctrl_set) 3914 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3915 (dev, pfc_conf)); 3916 return -ENOTSUP; 3917 } 3918 3919 static int 3920 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3921 uint16_t reta_size) 3922 { 3923 uint16_t i, num; 3924 3925 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3926 for (i = 0; i < num; i++) { 3927 if (reta_conf[i].mask) 3928 return 0; 3929 } 3930 3931 return -EINVAL; 3932 } 3933 3934 static int 3935 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3936 uint16_t reta_size, 3937 uint16_t max_rxq) 3938 { 3939 uint16_t i, idx, shift; 3940 3941 if (max_rxq == 0) { 3942 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3943 return -EINVAL; 3944 } 3945 3946 for (i = 0; i < reta_size; i++) { 3947 idx = i / RTE_RETA_GROUP_SIZE; 3948 shift = i % RTE_RETA_GROUP_SIZE; 3949 if ((reta_conf[idx].mask & (1ULL << shift)) && 3950 (reta_conf[idx].reta[shift] >= max_rxq)) { 3951 RTE_ETHDEV_LOG(ERR, 3952 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3953 idx, shift, 3954 reta_conf[idx].reta[shift], max_rxq); 3955 return -EINVAL; 3956 } 3957 } 3958 3959 return 0; 3960 } 3961 3962 int 3963 rte_eth_dev_rss_reta_update(uint16_t port_id, 3964 struct rte_eth_rss_reta_entry64 *reta_conf, 3965 uint16_t reta_size) 3966 { 3967 struct rte_eth_dev *dev; 3968 int ret; 3969 3970 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3971 dev = &rte_eth_devices[port_id]; 3972 3973 if (reta_conf == NULL) { 3974 RTE_ETHDEV_LOG(ERR, 3975 "Cannot update ethdev port %u RSS RETA to NULL\n", 3976 port_id); 3977 return -EINVAL; 3978 } 3979 3980 if (reta_size == 0) { 3981 RTE_ETHDEV_LOG(ERR, 3982 "Cannot update ethdev port %u RSS RETA with zero size\n", 3983 port_id); 3984 return -EINVAL; 3985 } 3986 3987 /* Check mask bits */ 3988 ret = eth_check_reta_mask(reta_conf, reta_size); 3989 if (ret < 0) 3990 return ret; 3991 3992 /* Check entry value */ 3993 ret = eth_check_reta_entry(reta_conf, reta_size, 3994 dev->data->nb_rx_queues); 3995 if (ret < 0) 3996 return ret; 3997 3998 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 3999 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4000 reta_size)); 4001 } 4002 4003 int 4004 rte_eth_dev_rss_reta_query(uint16_t port_id, 4005 struct rte_eth_rss_reta_entry64 *reta_conf, 4006 uint16_t reta_size) 4007 { 4008 struct rte_eth_dev *dev; 4009 int ret; 4010 4011 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4012 dev = &rte_eth_devices[port_id]; 4013 4014 if (reta_conf == NULL) { 4015 RTE_ETHDEV_LOG(ERR, 4016 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4017 port_id); 4018 return -EINVAL; 4019 } 4020 4021 /* Check mask bits */ 4022 ret = eth_check_reta_mask(reta_conf, reta_size); 4023 if (ret < 0) 4024 return ret; 4025 4026 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4027 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4028 reta_size)); 4029 } 4030 4031 int 4032 rte_eth_dev_rss_hash_update(uint16_t port_id, 4033 struct rte_eth_rss_conf *rss_conf) 4034 { 4035 struct rte_eth_dev *dev; 4036 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4037 int ret; 4038 4039 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4040 dev = &rte_eth_devices[port_id]; 4041 4042 if (rss_conf == NULL) { 4043 RTE_ETHDEV_LOG(ERR, 4044 "Cannot update ethdev port %u RSS hash from NULL config\n", 4045 port_id); 4046 return -EINVAL; 4047 } 4048 4049 ret = rte_eth_dev_info_get(port_id, &dev_info); 4050 if (ret != 0) 4051 return ret; 4052 4053 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4054 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4055 dev_info.flow_type_rss_offloads) { 4056 RTE_ETHDEV_LOG(ERR, 4057 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4058 port_id, rss_conf->rss_hf, 4059 dev_info.flow_type_rss_offloads); 4060 return -EINVAL; 4061 } 4062 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4063 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4064 rss_conf)); 4065 } 4066 4067 int 4068 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4069 struct rte_eth_rss_conf *rss_conf) 4070 { 4071 struct rte_eth_dev *dev; 4072 4073 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4074 dev = &rte_eth_devices[port_id]; 4075 4076 if (rss_conf == NULL) { 4077 RTE_ETHDEV_LOG(ERR, 4078 "Cannot get ethdev port %u RSS hash config to NULL\n", 4079 port_id); 4080 return -EINVAL; 4081 } 4082 4083 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4084 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4085 rss_conf)); 4086 } 4087 4088 int 4089 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4090 struct rte_eth_udp_tunnel *udp_tunnel) 4091 { 4092 struct rte_eth_dev *dev; 4093 4094 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4095 dev = &rte_eth_devices[port_id]; 4096 4097 if (udp_tunnel == NULL) { 4098 RTE_ETHDEV_LOG(ERR, 4099 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4100 port_id); 4101 return -EINVAL; 4102 } 4103 4104 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4105 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4106 return -EINVAL; 4107 } 4108 4109 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4110 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4111 udp_tunnel)); 4112 } 4113 4114 int 4115 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4116 struct rte_eth_udp_tunnel *udp_tunnel) 4117 { 4118 struct rte_eth_dev *dev; 4119 4120 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4121 dev = &rte_eth_devices[port_id]; 4122 4123 if (udp_tunnel == NULL) { 4124 RTE_ETHDEV_LOG(ERR, 4125 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4126 port_id); 4127 return -EINVAL; 4128 } 4129 4130 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4131 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4132 return -EINVAL; 4133 } 4134 4135 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4136 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4137 udp_tunnel)); 4138 } 4139 4140 int 4141 rte_eth_led_on(uint16_t port_id) 4142 { 4143 struct rte_eth_dev *dev; 4144 4145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4146 dev = &rte_eth_devices[port_id]; 4147 4148 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4149 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4150 } 4151 4152 int 4153 rte_eth_led_off(uint16_t port_id) 4154 { 4155 struct rte_eth_dev *dev; 4156 4157 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4158 dev = &rte_eth_devices[port_id]; 4159 4160 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4161 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4162 } 4163 4164 int 4165 rte_eth_fec_get_capability(uint16_t port_id, 4166 struct rte_eth_fec_capa *speed_fec_capa, 4167 unsigned int num) 4168 { 4169 struct rte_eth_dev *dev; 4170 int ret; 4171 4172 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4173 dev = &rte_eth_devices[port_id]; 4174 4175 if (speed_fec_capa == NULL && num > 0) { 4176 RTE_ETHDEV_LOG(ERR, 4177 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4178 port_id); 4179 return -EINVAL; 4180 } 4181 4182 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4183 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4184 4185 return ret; 4186 } 4187 4188 int 4189 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4190 { 4191 struct rte_eth_dev *dev; 4192 4193 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4194 dev = &rte_eth_devices[port_id]; 4195 4196 if (fec_capa == NULL) { 4197 RTE_ETHDEV_LOG(ERR, 4198 "Cannot get ethdev port %u current FEC mode to NULL\n", 4199 port_id); 4200 return -EINVAL; 4201 } 4202 4203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4204 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4205 } 4206 4207 int 4208 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4209 { 4210 struct rte_eth_dev *dev; 4211 4212 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4213 dev = &rte_eth_devices[port_id]; 4214 4215 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4216 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4217 } 4218 4219 /* 4220 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4221 * an empty spot. 4222 */ 4223 static int 4224 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4225 { 4226 struct rte_eth_dev_info dev_info; 4227 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4228 unsigned i; 4229 int ret; 4230 4231 ret = rte_eth_dev_info_get(port_id, &dev_info); 4232 if (ret != 0) 4233 return -1; 4234 4235 for (i = 0; i < dev_info.max_mac_addrs; i++) 4236 if (memcmp(addr, &dev->data->mac_addrs[i], 4237 RTE_ETHER_ADDR_LEN) == 0) 4238 return i; 4239 4240 return -1; 4241 } 4242 4243 static const struct rte_ether_addr null_mac_addr; 4244 4245 int 4246 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4247 uint32_t pool) 4248 { 4249 struct rte_eth_dev *dev; 4250 int index; 4251 uint64_t pool_mask; 4252 int ret; 4253 4254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4255 dev = &rte_eth_devices[port_id]; 4256 4257 if (addr == NULL) { 4258 RTE_ETHDEV_LOG(ERR, 4259 "Cannot add ethdev port %u MAC address from NULL address\n", 4260 port_id); 4261 return -EINVAL; 4262 } 4263 4264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4265 4266 if (rte_is_zero_ether_addr(addr)) { 4267 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4268 port_id); 4269 return -EINVAL; 4270 } 4271 if (pool >= ETH_64_POOLS) { 4272 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 4273 return -EINVAL; 4274 } 4275 4276 index = eth_dev_get_mac_addr_index(port_id, addr); 4277 if (index < 0) { 4278 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4279 if (index < 0) { 4280 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4281 port_id); 4282 return -ENOSPC; 4283 } 4284 } else { 4285 pool_mask = dev->data->mac_pool_sel[index]; 4286 4287 /* Check if both MAC address and pool is already there, and do nothing */ 4288 if (pool_mask & (1ULL << pool)) 4289 return 0; 4290 } 4291 4292 /* Update NIC */ 4293 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4294 4295 if (ret == 0) { 4296 /* Update address in NIC data structure */ 4297 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4298 4299 /* Update pool bitmap in NIC data structure */ 4300 dev->data->mac_pool_sel[index] |= (1ULL << pool); 4301 } 4302 4303 return eth_err(port_id, ret); 4304 } 4305 4306 int 4307 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4308 { 4309 struct rte_eth_dev *dev; 4310 int index; 4311 4312 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4313 dev = &rte_eth_devices[port_id]; 4314 4315 if (addr == NULL) { 4316 RTE_ETHDEV_LOG(ERR, 4317 "Cannot remove ethdev port %u MAC address from NULL address\n", 4318 port_id); 4319 return -EINVAL; 4320 } 4321 4322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4323 4324 index = eth_dev_get_mac_addr_index(port_id, addr); 4325 if (index == 0) { 4326 RTE_ETHDEV_LOG(ERR, 4327 "Port %u: Cannot remove default MAC address\n", 4328 port_id); 4329 return -EADDRINUSE; 4330 } else if (index < 0) 4331 return 0; /* Do nothing if address wasn't found */ 4332 4333 /* Update NIC */ 4334 (*dev->dev_ops->mac_addr_remove)(dev, index); 4335 4336 /* Update address in NIC data structure */ 4337 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4338 4339 /* reset pool bitmap */ 4340 dev->data->mac_pool_sel[index] = 0; 4341 4342 return 0; 4343 } 4344 4345 int 4346 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4347 { 4348 struct rte_eth_dev *dev; 4349 int ret; 4350 4351 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4352 dev = &rte_eth_devices[port_id]; 4353 4354 if (addr == NULL) { 4355 RTE_ETHDEV_LOG(ERR, 4356 "Cannot set ethdev port %u default MAC address from NULL address\n", 4357 port_id); 4358 return -EINVAL; 4359 } 4360 4361 if (!rte_is_valid_assigned_ether_addr(addr)) 4362 return -EINVAL; 4363 4364 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4365 4366 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4367 if (ret < 0) 4368 return ret; 4369 4370 /* Update default address in NIC data structure */ 4371 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4372 4373 return 0; 4374 } 4375 4376 4377 /* 4378 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4379 * an empty spot. 4380 */ 4381 static int 4382 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4383 const struct rte_ether_addr *addr) 4384 { 4385 struct rte_eth_dev_info dev_info; 4386 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4387 unsigned i; 4388 int ret; 4389 4390 ret = rte_eth_dev_info_get(port_id, &dev_info); 4391 if (ret != 0) 4392 return -1; 4393 4394 if (!dev->data->hash_mac_addrs) 4395 return -1; 4396 4397 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4398 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4399 RTE_ETHER_ADDR_LEN) == 0) 4400 return i; 4401 4402 return -1; 4403 } 4404 4405 int 4406 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4407 uint8_t on) 4408 { 4409 int index; 4410 int ret; 4411 struct rte_eth_dev *dev; 4412 4413 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4414 dev = &rte_eth_devices[port_id]; 4415 4416 if (addr == NULL) { 4417 RTE_ETHDEV_LOG(ERR, 4418 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4419 port_id); 4420 return -EINVAL; 4421 } 4422 4423 if (rte_is_zero_ether_addr(addr)) { 4424 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4425 port_id); 4426 return -EINVAL; 4427 } 4428 4429 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4430 /* Check if it's already there, and do nothing */ 4431 if ((index >= 0) && on) 4432 return 0; 4433 4434 if (index < 0) { 4435 if (!on) { 4436 RTE_ETHDEV_LOG(ERR, 4437 "Port %u: the MAC address was not set in UTA\n", 4438 port_id); 4439 return -EINVAL; 4440 } 4441 4442 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4443 if (index < 0) { 4444 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4445 port_id); 4446 return -ENOSPC; 4447 } 4448 } 4449 4450 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4451 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4452 if (ret == 0) { 4453 /* Update address in NIC data structure */ 4454 if (on) 4455 rte_ether_addr_copy(addr, 4456 &dev->data->hash_mac_addrs[index]); 4457 else 4458 rte_ether_addr_copy(&null_mac_addr, 4459 &dev->data->hash_mac_addrs[index]); 4460 } 4461 4462 return eth_err(port_id, ret); 4463 } 4464 4465 int 4466 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4467 { 4468 struct rte_eth_dev *dev; 4469 4470 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4471 dev = &rte_eth_devices[port_id]; 4472 4473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4474 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4475 on)); 4476 } 4477 4478 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4479 uint16_t tx_rate) 4480 { 4481 struct rte_eth_dev *dev; 4482 struct rte_eth_dev_info dev_info; 4483 struct rte_eth_link link; 4484 int ret; 4485 4486 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4487 dev = &rte_eth_devices[port_id]; 4488 4489 ret = rte_eth_dev_info_get(port_id, &dev_info); 4490 if (ret != 0) 4491 return ret; 4492 4493 link = dev->data->dev_link; 4494 4495 if (queue_idx > dev_info.max_tx_queues) { 4496 RTE_ETHDEV_LOG(ERR, 4497 "Set queue rate limit:port %u: invalid queue id=%u\n", 4498 port_id, queue_idx); 4499 return -EINVAL; 4500 } 4501 4502 if (tx_rate > link.link_speed) { 4503 RTE_ETHDEV_LOG(ERR, 4504 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4505 tx_rate, link.link_speed); 4506 return -EINVAL; 4507 } 4508 4509 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4510 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4511 queue_idx, tx_rate)); 4512 } 4513 4514 int 4515 rte_eth_mirror_rule_set(uint16_t port_id, 4516 struct rte_eth_mirror_conf *mirror_conf, 4517 uint8_t rule_id, uint8_t on) 4518 { 4519 struct rte_eth_dev *dev; 4520 4521 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4522 dev = &rte_eth_devices[port_id]; 4523 4524 if (mirror_conf == NULL) { 4525 RTE_ETHDEV_LOG(ERR, 4526 "Cannot set ethdev port %u mirror rule from NULL config\n", 4527 port_id); 4528 return -EINVAL; 4529 } 4530 4531 if (mirror_conf->rule_type == 0) { 4532 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n"); 4533 return -EINVAL; 4534 } 4535 4536 if (mirror_conf->dst_pool >= ETH_64_POOLS) { 4537 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n", 4538 ETH_64_POOLS - 1); 4539 return -EINVAL; 4540 } 4541 4542 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP | 4543 ETH_MIRROR_VIRTUAL_POOL_DOWN)) && 4544 (mirror_conf->pool_mask == 0)) { 4545 RTE_ETHDEV_LOG(ERR, 4546 "Invalid mirror pool, pool mask can not be 0\n"); 4547 return -EINVAL; 4548 } 4549 4550 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) && 4551 mirror_conf->vlan.vlan_mask == 0) { 4552 RTE_ETHDEV_LOG(ERR, 4553 "Invalid vlan mask, vlan mask can not be 0\n"); 4554 return -EINVAL; 4555 } 4556 4557 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); 4558 4559 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev, 4560 mirror_conf, rule_id, on)); 4561 } 4562 4563 int 4564 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id) 4565 { 4566 struct rte_eth_dev *dev; 4567 4568 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4569 dev = &rte_eth_devices[port_id]; 4570 4571 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); 4572 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id)); 4573 } 4574 4575 RTE_INIT(eth_dev_init_cb_lists) 4576 { 4577 uint16_t i; 4578 4579 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4580 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4581 } 4582 4583 int 4584 rte_eth_dev_callback_register(uint16_t port_id, 4585 enum rte_eth_event_type event, 4586 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4587 { 4588 struct rte_eth_dev *dev; 4589 struct rte_eth_dev_callback *user_cb; 4590 uint16_t next_port; 4591 uint16_t last_port; 4592 4593 if (cb_fn == NULL) { 4594 RTE_ETHDEV_LOG(ERR, 4595 "Cannot register ethdev port %u callback from NULL\n", 4596 port_id); 4597 return -EINVAL; 4598 } 4599 4600 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4601 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4602 return -EINVAL; 4603 } 4604 4605 if (port_id == RTE_ETH_ALL) { 4606 next_port = 0; 4607 last_port = RTE_MAX_ETHPORTS - 1; 4608 } else { 4609 next_port = last_port = port_id; 4610 } 4611 4612 rte_spinlock_lock(ð_dev_cb_lock); 4613 4614 do { 4615 dev = &rte_eth_devices[next_port]; 4616 4617 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4618 if (user_cb->cb_fn == cb_fn && 4619 user_cb->cb_arg == cb_arg && 4620 user_cb->event == event) { 4621 break; 4622 } 4623 } 4624 4625 /* create a new callback. */ 4626 if (user_cb == NULL) { 4627 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4628 sizeof(struct rte_eth_dev_callback), 0); 4629 if (user_cb != NULL) { 4630 user_cb->cb_fn = cb_fn; 4631 user_cb->cb_arg = cb_arg; 4632 user_cb->event = event; 4633 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4634 user_cb, next); 4635 } else { 4636 rte_spinlock_unlock(ð_dev_cb_lock); 4637 rte_eth_dev_callback_unregister(port_id, event, 4638 cb_fn, cb_arg); 4639 return -ENOMEM; 4640 } 4641 4642 } 4643 } while (++next_port <= last_port); 4644 4645 rte_spinlock_unlock(ð_dev_cb_lock); 4646 return 0; 4647 } 4648 4649 int 4650 rte_eth_dev_callback_unregister(uint16_t port_id, 4651 enum rte_eth_event_type event, 4652 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4653 { 4654 int ret; 4655 struct rte_eth_dev *dev; 4656 struct rte_eth_dev_callback *cb, *next; 4657 uint16_t next_port; 4658 uint16_t last_port; 4659 4660 if (cb_fn == NULL) { 4661 RTE_ETHDEV_LOG(ERR, 4662 "Cannot unregister ethdev port %u callback from NULL\n", 4663 port_id); 4664 return -EINVAL; 4665 } 4666 4667 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4668 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4669 return -EINVAL; 4670 } 4671 4672 if (port_id == RTE_ETH_ALL) { 4673 next_port = 0; 4674 last_port = RTE_MAX_ETHPORTS - 1; 4675 } else { 4676 next_port = last_port = port_id; 4677 } 4678 4679 rte_spinlock_lock(ð_dev_cb_lock); 4680 4681 do { 4682 dev = &rte_eth_devices[next_port]; 4683 ret = 0; 4684 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4685 cb = next) { 4686 4687 next = TAILQ_NEXT(cb, next); 4688 4689 if (cb->cb_fn != cb_fn || cb->event != event || 4690 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4691 continue; 4692 4693 /* 4694 * if this callback is not executing right now, 4695 * then remove it. 4696 */ 4697 if (cb->active == 0) { 4698 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4699 rte_free(cb); 4700 } else { 4701 ret = -EAGAIN; 4702 } 4703 } 4704 } while (++next_port <= last_port); 4705 4706 rte_spinlock_unlock(ð_dev_cb_lock); 4707 return ret; 4708 } 4709 4710 int 4711 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4712 enum rte_eth_event_type event, void *ret_param) 4713 { 4714 struct rte_eth_dev_callback *cb_lst; 4715 struct rte_eth_dev_callback dev_cb; 4716 int rc = 0; 4717 4718 rte_spinlock_lock(ð_dev_cb_lock); 4719 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4720 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4721 continue; 4722 dev_cb = *cb_lst; 4723 cb_lst->active = 1; 4724 if (ret_param != NULL) 4725 dev_cb.ret_param = ret_param; 4726 4727 rte_spinlock_unlock(ð_dev_cb_lock); 4728 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4729 dev_cb.cb_arg, dev_cb.ret_param); 4730 rte_spinlock_lock(ð_dev_cb_lock); 4731 cb_lst->active = 0; 4732 } 4733 rte_spinlock_unlock(ð_dev_cb_lock); 4734 return rc; 4735 } 4736 4737 void 4738 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4739 { 4740 if (dev == NULL) 4741 return; 4742 4743 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4744 4745 dev->state = RTE_ETH_DEV_ATTACHED; 4746 } 4747 4748 int 4749 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4750 { 4751 uint32_t vec; 4752 struct rte_eth_dev *dev; 4753 struct rte_intr_handle *intr_handle; 4754 uint16_t qid; 4755 int rc; 4756 4757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4758 dev = &rte_eth_devices[port_id]; 4759 4760 if (!dev->intr_handle) { 4761 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4762 return -ENOTSUP; 4763 } 4764 4765 intr_handle = dev->intr_handle; 4766 if (!intr_handle->intr_vec) { 4767 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4768 return -EPERM; 4769 } 4770 4771 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4772 vec = intr_handle->intr_vec[qid]; 4773 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4774 if (rc && rc != -EEXIST) { 4775 RTE_ETHDEV_LOG(ERR, 4776 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4777 port_id, qid, op, epfd, vec); 4778 } 4779 } 4780 4781 return 0; 4782 } 4783 4784 int 4785 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4786 { 4787 struct rte_intr_handle *intr_handle; 4788 struct rte_eth_dev *dev; 4789 unsigned int efd_idx; 4790 uint32_t vec; 4791 int fd; 4792 4793 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4794 dev = &rte_eth_devices[port_id]; 4795 4796 if (queue_id >= dev->data->nb_rx_queues) { 4797 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4798 return -1; 4799 } 4800 4801 if (!dev->intr_handle) { 4802 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4803 return -1; 4804 } 4805 4806 intr_handle = dev->intr_handle; 4807 if (!intr_handle->intr_vec) { 4808 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4809 return -1; 4810 } 4811 4812 vec = intr_handle->intr_vec[queue_id]; 4813 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4814 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4815 fd = intr_handle->efds[efd_idx]; 4816 4817 return fd; 4818 } 4819 4820 static inline int 4821 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4822 const char *ring_name) 4823 { 4824 return snprintf(name, len, "eth_p%d_q%d_%s", 4825 port_id, queue_id, ring_name); 4826 } 4827 4828 const struct rte_memzone * 4829 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4830 uint16_t queue_id, size_t size, unsigned align, 4831 int socket_id) 4832 { 4833 char z_name[RTE_MEMZONE_NAMESIZE]; 4834 const struct rte_memzone *mz; 4835 int rc; 4836 4837 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4838 queue_id, ring_name); 4839 if (rc >= RTE_MEMZONE_NAMESIZE) { 4840 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4841 rte_errno = ENAMETOOLONG; 4842 return NULL; 4843 } 4844 4845 mz = rte_memzone_lookup(z_name); 4846 if (mz) { 4847 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4848 size > mz->len || 4849 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4850 RTE_ETHDEV_LOG(ERR, 4851 "memzone %s does not justify the requested attributes\n", 4852 mz->name); 4853 return NULL; 4854 } 4855 4856 return mz; 4857 } 4858 4859 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4860 RTE_MEMZONE_IOVA_CONTIG, align); 4861 } 4862 4863 int 4864 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4865 uint16_t queue_id) 4866 { 4867 char z_name[RTE_MEMZONE_NAMESIZE]; 4868 const struct rte_memzone *mz; 4869 int rc = 0; 4870 4871 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4872 queue_id, ring_name); 4873 if (rc >= RTE_MEMZONE_NAMESIZE) { 4874 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4875 return -ENAMETOOLONG; 4876 } 4877 4878 mz = rte_memzone_lookup(z_name); 4879 if (mz) 4880 rc = rte_memzone_free(mz); 4881 else 4882 rc = -ENOENT; 4883 4884 return rc; 4885 } 4886 4887 int 4888 rte_eth_dev_create(struct rte_device *device, const char *name, 4889 size_t priv_data_size, 4890 ethdev_bus_specific_init ethdev_bus_specific_init, 4891 void *bus_init_params, 4892 ethdev_init_t ethdev_init, void *init_params) 4893 { 4894 struct rte_eth_dev *ethdev; 4895 int retval; 4896 4897 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4898 4899 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4900 ethdev = rte_eth_dev_allocate(name); 4901 if (!ethdev) 4902 return -ENODEV; 4903 4904 if (priv_data_size) { 4905 ethdev->data->dev_private = rte_zmalloc_socket( 4906 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4907 device->numa_node); 4908 4909 if (!ethdev->data->dev_private) { 4910 RTE_ETHDEV_LOG(ERR, 4911 "failed to allocate private data\n"); 4912 retval = -ENOMEM; 4913 goto probe_failed; 4914 } 4915 } 4916 } else { 4917 ethdev = rte_eth_dev_attach_secondary(name); 4918 if (!ethdev) { 4919 RTE_ETHDEV_LOG(ERR, 4920 "secondary process attach failed, ethdev doesn't exist\n"); 4921 return -ENODEV; 4922 } 4923 } 4924 4925 ethdev->device = device; 4926 4927 if (ethdev_bus_specific_init) { 4928 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4929 if (retval) { 4930 RTE_ETHDEV_LOG(ERR, 4931 "ethdev bus specific initialisation failed\n"); 4932 goto probe_failed; 4933 } 4934 } 4935 4936 retval = ethdev_init(ethdev, init_params); 4937 if (retval) { 4938 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4939 goto probe_failed; 4940 } 4941 4942 rte_eth_dev_probing_finish(ethdev); 4943 4944 return retval; 4945 4946 probe_failed: 4947 rte_eth_dev_release_port(ethdev); 4948 return retval; 4949 } 4950 4951 int 4952 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4953 ethdev_uninit_t ethdev_uninit) 4954 { 4955 int ret; 4956 4957 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4958 if (!ethdev) 4959 return -ENODEV; 4960 4961 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4962 4963 ret = ethdev_uninit(ethdev); 4964 if (ret) 4965 return ret; 4966 4967 return rte_eth_dev_release_port(ethdev); 4968 } 4969 4970 int 4971 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4972 int epfd, int op, void *data) 4973 { 4974 uint32_t vec; 4975 struct rte_eth_dev *dev; 4976 struct rte_intr_handle *intr_handle; 4977 int rc; 4978 4979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4980 dev = &rte_eth_devices[port_id]; 4981 4982 if (queue_id >= dev->data->nb_rx_queues) { 4983 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4984 return -EINVAL; 4985 } 4986 4987 if (!dev->intr_handle) { 4988 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4989 return -ENOTSUP; 4990 } 4991 4992 intr_handle = dev->intr_handle; 4993 if (!intr_handle->intr_vec) { 4994 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4995 return -EPERM; 4996 } 4997 4998 vec = intr_handle->intr_vec[queue_id]; 4999 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5000 if (rc && rc != -EEXIST) { 5001 RTE_ETHDEV_LOG(ERR, 5002 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 5003 port_id, queue_id, op, epfd, vec); 5004 return rc; 5005 } 5006 5007 return 0; 5008 } 5009 5010 int 5011 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5012 uint16_t queue_id) 5013 { 5014 struct rte_eth_dev *dev; 5015 int ret; 5016 5017 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5018 dev = &rte_eth_devices[port_id]; 5019 5020 ret = eth_dev_validate_rx_queue(dev, queue_id); 5021 if (ret != 0) 5022 return ret; 5023 5024 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5025 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5026 } 5027 5028 int 5029 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5030 uint16_t queue_id) 5031 { 5032 struct rte_eth_dev *dev; 5033 int ret; 5034 5035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5036 dev = &rte_eth_devices[port_id]; 5037 5038 ret = eth_dev_validate_rx_queue(dev, queue_id); 5039 if (ret != 0) 5040 return ret; 5041 5042 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5043 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5044 } 5045 5046 5047 const struct rte_eth_rxtx_callback * 5048 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5049 rte_rx_callback_fn fn, void *user_param) 5050 { 5051 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5052 rte_errno = ENOTSUP; 5053 return NULL; 5054 #endif 5055 struct rte_eth_dev *dev; 5056 5057 /* check input parameters */ 5058 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5059 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5060 rte_errno = EINVAL; 5061 return NULL; 5062 } 5063 dev = &rte_eth_devices[port_id]; 5064 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5065 rte_errno = EINVAL; 5066 return NULL; 5067 } 5068 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5069 5070 if (cb == NULL) { 5071 rte_errno = ENOMEM; 5072 return NULL; 5073 } 5074 5075 cb->fn.rx = fn; 5076 cb->param = user_param; 5077 5078 rte_spinlock_lock(ð_dev_rx_cb_lock); 5079 /* Add the callbacks in fifo order. */ 5080 struct rte_eth_rxtx_callback *tail = 5081 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5082 5083 if (!tail) { 5084 /* Stores to cb->fn and cb->param should complete before 5085 * cb is visible to data plane. 5086 */ 5087 __atomic_store_n( 5088 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5089 cb, __ATOMIC_RELEASE); 5090 5091 } else { 5092 while (tail->next) 5093 tail = tail->next; 5094 /* Stores to cb->fn and cb->param should complete before 5095 * cb is visible to data plane. 5096 */ 5097 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5098 } 5099 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5100 5101 return cb; 5102 } 5103 5104 const struct rte_eth_rxtx_callback * 5105 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5106 rte_rx_callback_fn fn, void *user_param) 5107 { 5108 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5109 rte_errno = ENOTSUP; 5110 return NULL; 5111 #endif 5112 /* check input parameters */ 5113 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5114 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5115 rte_errno = EINVAL; 5116 return NULL; 5117 } 5118 5119 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5120 5121 if (cb == NULL) { 5122 rte_errno = ENOMEM; 5123 return NULL; 5124 } 5125 5126 cb->fn.rx = fn; 5127 cb->param = user_param; 5128 5129 rte_spinlock_lock(ð_dev_rx_cb_lock); 5130 /* Add the callbacks at first position */ 5131 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5132 /* Stores to cb->fn, cb->param and cb->next should complete before 5133 * cb is visible to data plane threads. 5134 */ 5135 __atomic_store_n( 5136 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5137 cb, __ATOMIC_RELEASE); 5138 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5139 5140 return cb; 5141 } 5142 5143 const struct rte_eth_rxtx_callback * 5144 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5145 rte_tx_callback_fn fn, void *user_param) 5146 { 5147 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5148 rte_errno = ENOTSUP; 5149 return NULL; 5150 #endif 5151 struct rte_eth_dev *dev; 5152 5153 /* check input parameters */ 5154 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5155 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5156 rte_errno = EINVAL; 5157 return NULL; 5158 } 5159 5160 dev = &rte_eth_devices[port_id]; 5161 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5162 rte_errno = EINVAL; 5163 return NULL; 5164 } 5165 5166 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5167 5168 if (cb == NULL) { 5169 rte_errno = ENOMEM; 5170 return NULL; 5171 } 5172 5173 cb->fn.tx = fn; 5174 cb->param = user_param; 5175 5176 rte_spinlock_lock(ð_dev_tx_cb_lock); 5177 /* Add the callbacks in fifo order. */ 5178 struct rte_eth_rxtx_callback *tail = 5179 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5180 5181 if (!tail) { 5182 /* Stores to cb->fn and cb->param should complete before 5183 * cb is visible to data plane. 5184 */ 5185 __atomic_store_n( 5186 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5187 cb, __ATOMIC_RELEASE); 5188 5189 } else { 5190 while (tail->next) 5191 tail = tail->next; 5192 /* Stores to cb->fn and cb->param should complete before 5193 * cb is visible to data plane. 5194 */ 5195 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5196 } 5197 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5198 5199 return cb; 5200 } 5201 5202 int 5203 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5204 const struct rte_eth_rxtx_callback *user_cb) 5205 { 5206 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5207 return -ENOTSUP; 5208 #endif 5209 /* Check input parameters. */ 5210 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5211 if (user_cb == NULL || 5212 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5213 return -EINVAL; 5214 5215 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5216 struct rte_eth_rxtx_callback *cb; 5217 struct rte_eth_rxtx_callback **prev_cb; 5218 int ret = -EINVAL; 5219 5220 rte_spinlock_lock(ð_dev_rx_cb_lock); 5221 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5222 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5223 cb = *prev_cb; 5224 if (cb == user_cb) { 5225 /* Remove the user cb from the callback list. */ 5226 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5227 ret = 0; 5228 break; 5229 } 5230 } 5231 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5232 5233 return ret; 5234 } 5235 5236 int 5237 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5238 const struct rte_eth_rxtx_callback *user_cb) 5239 { 5240 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5241 return -ENOTSUP; 5242 #endif 5243 /* Check input parameters. */ 5244 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5245 if (user_cb == NULL || 5246 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5247 return -EINVAL; 5248 5249 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5250 int ret = -EINVAL; 5251 struct rte_eth_rxtx_callback *cb; 5252 struct rte_eth_rxtx_callback **prev_cb; 5253 5254 rte_spinlock_lock(ð_dev_tx_cb_lock); 5255 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5256 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5257 cb = *prev_cb; 5258 if (cb == user_cb) { 5259 /* Remove the user cb from the callback list. */ 5260 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5261 ret = 0; 5262 break; 5263 } 5264 } 5265 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5266 5267 return ret; 5268 } 5269 5270 int 5271 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5272 struct rte_eth_rxq_info *qinfo) 5273 { 5274 struct rte_eth_dev *dev; 5275 5276 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5277 dev = &rte_eth_devices[port_id]; 5278 5279 if (queue_id >= dev->data->nb_rx_queues) { 5280 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5281 return -EINVAL; 5282 } 5283 5284 if (qinfo == NULL) { 5285 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5286 port_id, queue_id); 5287 return -EINVAL; 5288 } 5289 5290 if (dev->data->rx_queues == NULL || 5291 dev->data->rx_queues[queue_id] == NULL) { 5292 RTE_ETHDEV_LOG(ERR, 5293 "Rx queue %"PRIu16" of device with port_id=%" 5294 PRIu16" has not been setup\n", 5295 queue_id, port_id); 5296 return -EINVAL; 5297 } 5298 5299 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5300 RTE_ETHDEV_LOG(INFO, 5301 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5302 queue_id, port_id); 5303 return -EINVAL; 5304 } 5305 5306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5307 5308 memset(qinfo, 0, sizeof(*qinfo)); 5309 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5310 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5311 5312 return 0; 5313 } 5314 5315 int 5316 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5317 struct rte_eth_txq_info *qinfo) 5318 { 5319 struct rte_eth_dev *dev; 5320 5321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5322 dev = &rte_eth_devices[port_id]; 5323 5324 if (queue_id >= dev->data->nb_tx_queues) { 5325 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5326 return -EINVAL; 5327 } 5328 5329 if (qinfo == NULL) { 5330 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5331 port_id, queue_id); 5332 return -EINVAL; 5333 } 5334 5335 if (dev->data->tx_queues == NULL || 5336 dev->data->tx_queues[queue_id] == NULL) { 5337 RTE_ETHDEV_LOG(ERR, 5338 "Tx queue %"PRIu16" of device with port_id=%" 5339 PRIu16" has not been setup\n", 5340 queue_id, port_id); 5341 return -EINVAL; 5342 } 5343 5344 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5345 RTE_ETHDEV_LOG(INFO, 5346 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5347 queue_id, port_id); 5348 return -EINVAL; 5349 } 5350 5351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5352 5353 memset(qinfo, 0, sizeof(*qinfo)); 5354 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5355 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5356 5357 return 0; 5358 } 5359 5360 int 5361 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5362 struct rte_eth_burst_mode *mode) 5363 { 5364 struct rte_eth_dev *dev; 5365 5366 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5367 dev = &rte_eth_devices[port_id]; 5368 5369 if (queue_id >= dev->data->nb_rx_queues) { 5370 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5371 return -EINVAL; 5372 } 5373 5374 if (mode == NULL) { 5375 RTE_ETHDEV_LOG(ERR, 5376 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5377 port_id, queue_id); 5378 return -EINVAL; 5379 } 5380 5381 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5382 memset(mode, 0, sizeof(*mode)); 5383 return eth_err(port_id, 5384 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5385 } 5386 5387 int 5388 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5389 struct rte_eth_burst_mode *mode) 5390 { 5391 struct rte_eth_dev *dev; 5392 5393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5394 dev = &rte_eth_devices[port_id]; 5395 5396 if (queue_id >= dev->data->nb_tx_queues) { 5397 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5398 return -EINVAL; 5399 } 5400 5401 if (mode == NULL) { 5402 RTE_ETHDEV_LOG(ERR, 5403 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5404 port_id, queue_id); 5405 return -EINVAL; 5406 } 5407 5408 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5409 memset(mode, 0, sizeof(*mode)); 5410 return eth_err(port_id, 5411 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5412 } 5413 5414 int 5415 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5416 struct rte_power_monitor_cond *pmc) 5417 { 5418 struct rte_eth_dev *dev; 5419 5420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5421 dev = &rte_eth_devices[port_id]; 5422 5423 if (queue_id >= dev->data->nb_rx_queues) { 5424 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5425 return -EINVAL; 5426 } 5427 5428 if (pmc == NULL) { 5429 RTE_ETHDEV_LOG(ERR, 5430 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5431 port_id, queue_id); 5432 return -EINVAL; 5433 } 5434 5435 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5436 return eth_err(port_id, 5437 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5438 } 5439 5440 int 5441 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5442 struct rte_ether_addr *mc_addr_set, 5443 uint32_t nb_mc_addr) 5444 { 5445 struct rte_eth_dev *dev; 5446 5447 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5448 dev = &rte_eth_devices[port_id]; 5449 5450 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5451 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5452 mc_addr_set, nb_mc_addr)); 5453 } 5454 5455 int 5456 rte_eth_timesync_enable(uint16_t port_id) 5457 { 5458 struct rte_eth_dev *dev; 5459 5460 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5461 dev = &rte_eth_devices[port_id]; 5462 5463 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5464 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5465 } 5466 5467 int 5468 rte_eth_timesync_disable(uint16_t port_id) 5469 { 5470 struct rte_eth_dev *dev; 5471 5472 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5473 dev = &rte_eth_devices[port_id]; 5474 5475 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5476 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5477 } 5478 5479 int 5480 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5481 uint32_t flags) 5482 { 5483 struct rte_eth_dev *dev; 5484 5485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5486 dev = &rte_eth_devices[port_id]; 5487 5488 if (timestamp == NULL) { 5489 RTE_ETHDEV_LOG(ERR, 5490 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5491 port_id); 5492 return -EINVAL; 5493 } 5494 5495 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5496 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5497 (dev, timestamp, flags)); 5498 } 5499 5500 int 5501 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5502 struct timespec *timestamp) 5503 { 5504 struct rte_eth_dev *dev; 5505 5506 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5507 dev = &rte_eth_devices[port_id]; 5508 5509 if (timestamp == NULL) { 5510 RTE_ETHDEV_LOG(ERR, 5511 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5512 port_id); 5513 return -EINVAL; 5514 } 5515 5516 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5517 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5518 (dev, timestamp)); 5519 } 5520 5521 int 5522 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5523 { 5524 struct rte_eth_dev *dev; 5525 5526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5527 dev = &rte_eth_devices[port_id]; 5528 5529 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5530 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5531 } 5532 5533 int 5534 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5535 { 5536 struct rte_eth_dev *dev; 5537 5538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5539 dev = &rte_eth_devices[port_id]; 5540 5541 if (timestamp == NULL) { 5542 RTE_ETHDEV_LOG(ERR, 5543 "Cannot read ethdev port %u timesync time to NULL\n", 5544 port_id); 5545 return -EINVAL; 5546 } 5547 5548 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5549 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5550 timestamp)); 5551 } 5552 5553 int 5554 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5555 { 5556 struct rte_eth_dev *dev; 5557 5558 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5559 dev = &rte_eth_devices[port_id]; 5560 5561 if (timestamp == NULL) { 5562 RTE_ETHDEV_LOG(ERR, 5563 "Cannot write ethdev port %u timesync from NULL time\n", 5564 port_id); 5565 return -EINVAL; 5566 } 5567 5568 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5569 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5570 timestamp)); 5571 } 5572 5573 int 5574 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5575 { 5576 struct rte_eth_dev *dev; 5577 5578 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5579 dev = &rte_eth_devices[port_id]; 5580 5581 if (clock == NULL) { 5582 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5583 port_id); 5584 return -EINVAL; 5585 } 5586 5587 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5588 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5589 } 5590 5591 int 5592 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5593 { 5594 struct rte_eth_dev *dev; 5595 5596 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5597 dev = &rte_eth_devices[port_id]; 5598 5599 if (info == NULL) { 5600 RTE_ETHDEV_LOG(ERR, 5601 "Cannot get ethdev port %u register info to NULL\n", 5602 port_id); 5603 return -EINVAL; 5604 } 5605 5606 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5607 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5608 } 5609 5610 int 5611 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5612 { 5613 struct rte_eth_dev *dev; 5614 5615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5616 dev = &rte_eth_devices[port_id]; 5617 5618 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5619 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5620 } 5621 5622 int 5623 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5624 { 5625 struct rte_eth_dev *dev; 5626 5627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5628 dev = &rte_eth_devices[port_id]; 5629 5630 if (info == NULL) { 5631 RTE_ETHDEV_LOG(ERR, 5632 "Cannot get ethdev port %u EEPROM info to NULL\n", 5633 port_id); 5634 return -EINVAL; 5635 } 5636 5637 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5638 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5639 } 5640 5641 int 5642 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5643 { 5644 struct rte_eth_dev *dev; 5645 5646 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5647 dev = &rte_eth_devices[port_id]; 5648 5649 if (info == NULL) { 5650 RTE_ETHDEV_LOG(ERR, 5651 "Cannot set ethdev port %u EEPROM from NULL info\n", 5652 port_id); 5653 return -EINVAL; 5654 } 5655 5656 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5657 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5658 } 5659 5660 int 5661 rte_eth_dev_get_module_info(uint16_t port_id, 5662 struct rte_eth_dev_module_info *modinfo) 5663 { 5664 struct rte_eth_dev *dev; 5665 5666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5667 dev = &rte_eth_devices[port_id]; 5668 5669 if (modinfo == NULL) { 5670 RTE_ETHDEV_LOG(ERR, 5671 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5672 port_id); 5673 return -EINVAL; 5674 } 5675 5676 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5677 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5678 } 5679 5680 int 5681 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5682 struct rte_dev_eeprom_info *info) 5683 { 5684 struct rte_eth_dev *dev; 5685 5686 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5687 dev = &rte_eth_devices[port_id]; 5688 5689 if (info == NULL) { 5690 RTE_ETHDEV_LOG(ERR, 5691 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5692 port_id); 5693 return -EINVAL; 5694 } 5695 5696 if (info->data == NULL) { 5697 RTE_ETHDEV_LOG(ERR, 5698 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5699 port_id); 5700 return -EINVAL; 5701 } 5702 5703 if (info->length == 0) { 5704 RTE_ETHDEV_LOG(ERR, 5705 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5706 port_id); 5707 return -EINVAL; 5708 } 5709 5710 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5711 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5712 } 5713 5714 int 5715 rte_eth_dev_get_dcb_info(uint16_t port_id, 5716 struct rte_eth_dcb_info *dcb_info) 5717 { 5718 struct rte_eth_dev *dev; 5719 5720 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5721 dev = &rte_eth_devices[port_id]; 5722 5723 if (dcb_info == NULL) { 5724 RTE_ETHDEV_LOG(ERR, 5725 "Cannot get ethdev port %u DCB info to NULL\n", 5726 port_id); 5727 return -EINVAL; 5728 } 5729 5730 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5731 5732 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5733 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5734 } 5735 5736 static void 5737 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5738 const struct rte_eth_desc_lim *desc_lim) 5739 { 5740 if (desc_lim->nb_align != 0) 5741 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5742 5743 if (desc_lim->nb_max != 0) 5744 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5745 5746 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5747 } 5748 5749 int 5750 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5751 uint16_t *nb_rx_desc, 5752 uint16_t *nb_tx_desc) 5753 { 5754 struct rte_eth_dev_info dev_info; 5755 int ret; 5756 5757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5758 5759 ret = rte_eth_dev_info_get(port_id, &dev_info); 5760 if (ret != 0) 5761 return ret; 5762 5763 if (nb_rx_desc != NULL) 5764 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5765 5766 if (nb_tx_desc != NULL) 5767 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5768 5769 return 0; 5770 } 5771 5772 int 5773 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5774 struct rte_eth_hairpin_cap *cap) 5775 { 5776 struct rte_eth_dev *dev; 5777 5778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5779 dev = &rte_eth_devices[port_id]; 5780 5781 if (cap == NULL) { 5782 RTE_ETHDEV_LOG(ERR, 5783 "Cannot get ethdev port %u hairpin capability to NULL\n", 5784 port_id); 5785 return -EINVAL; 5786 } 5787 5788 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5789 memset(cap, 0, sizeof(*cap)); 5790 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5791 } 5792 5793 int 5794 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5795 { 5796 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5797 return 1; 5798 return 0; 5799 } 5800 5801 int 5802 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5803 { 5804 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5805 return 1; 5806 return 0; 5807 } 5808 5809 int 5810 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5811 { 5812 struct rte_eth_dev *dev; 5813 5814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5815 dev = &rte_eth_devices[port_id]; 5816 5817 if (pool == NULL) { 5818 RTE_ETHDEV_LOG(ERR, 5819 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5820 port_id); 5821 return -EINVAL; 5822 } 5823 5824 if (*dev->dev_ops->pool_ops_supported == NULL) 5825 return 1; /* all pools are supported */ 5826 5827 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5828 } 5829 5830 /** 5831 * A set of values to describe the possible states of a switch domain. 5832 */ 5833 enum rte_eth_switch_domain_state { 5834 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5835 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5836 }; 5837 5838 /** 5839 * Array of switch domains available for allocation. Array is sized to 5840 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5841 * ethdev ports in a single process. 5842 */ 5843 static struct rte_eth_dev_switch { 5844 enum rte_eth_switch_domain_state state; 5845 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5846 5847 int 5848 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5849 { 5850 uint16_t i; 5851 5852 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5853 5854 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5855 if (eth_dev_switch_domains[i].state == 5856 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5857 eth_dev_switch_domains[i].state = 5858 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5859 *domain_id = i; 5860 return 0; 5861 } 5862 } 5863 5864 return -ENOSPC; 5865 } 5866 5867 int 5868 rte_eth_switch_domain_free(uint16_t domain_id) 5869 { 5870 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5871 domain_id >= RTE_MAX_ETHPORTS) 5872 return -EINVAL; 5873 5874 if (eth_dev_switch_domains[domain_id].state != 5875 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5876 return -EINVAL; 5877 5878 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5879 5880 return 0; 5881 } 5882 5883 static int 5884 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5885 { 5886 int state; 5887 struct rte_kvargs_pair *pair; 5888 char *letter; 5889 5890 arglist->str = strdup(str_in); 5891 if (arglist->str == NULL) 5892 return -ENOMEM; 5893 5894 letter = arglist->str; 5895 state = 0; 5896 arglist->count = 0; 5897 pair = &arglist->pairs[0]; 5898 while (1) { 5899 switch (state) { 5900 case 0: /* Initial */ 5901 if (*letter == '=') 5902 return -EINVAL; 5903 else if (*letter == '\0') 5904 return 0; 5905 5906 state = 1; 5907 pair->key = letter; 5908 /* fall-thru */ 5909 5910 case 1: /* Parsing key */ 5911 if (*letter == '=') { 5912 *letter = '\0'; 5913 pair->value = letter + 1; 5914 state = 2; 5915 } else if (*letter == ',' || *letter == '\0') 5916 return -EINVAL; 5917 break; 5918 5919 5920 case 2: /* Parsing value */ 5921 if (*letter == '[') 5922 state = 3; 5923 else if (*letter == ',') { 5924 *letter = '\0'; 5925 arglist->count++; 5926 pair = &arglist->pairs[arglist->count]; 5927 state = 0; 5928 } else if (*letter == '\0') { 5929 letter--; 5930 arglist->count++; 5931 pair = &arglist->pairs[arglist->count]; 5932 state = 0; 5933 } 5934 break; 5935 5936 case 3: /* Parsing list */ 5937 if (*letter == ']') 5938 state = 2; 5939 else if (*letter == '\0') 5940 return -EINVAL; 5941 break; 5942 } 5943 letter++; 5944 } 5945 } 5946 5947 int 5948 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5949 { 5950 struct rte_kvargs args; 5951 struct rte_kvargs_pair *pair; 5952 unsigned int i; 5953 int result = 0; 5954 5955 memset(eth_da, 0, sizeof(*eth_da)); 5956 5957 result = eth_dev_devargs_tokenise(&args, dargs); 5958 if (result < 0) 5959 goto parse_cleanup; 5960 5961 for (i = 0; i < args.count; i++) { 5962 pair = &args.pairs[i]; 5963 if (strcmp("representor", pair->key) == 0) { 5964 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5965 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5966 dargs); 5967 result = -1; 5968 goto parse_cleanup; 5969 } 5970 result = rte_eth_devargs_parse_representor_ports( 5971 pair->value, eth_da); 5972 if (result < 0) 5973 goto parse_cleanup; 5974 } 5975 } 5976 5977 parse_cleanup: 5978 if (args.str) 5979 free(args.str); 5980 5981 return result; 5982 } 5983 5984 int 5985 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev, 5986 enum rte_eth_representor_type type, 5987 int controller, int pf, int representor_port, 5988 uint16_t *repr_id) 5989 { 5990 int ret, n, i, count; 5991 struct rte_eth_representor_info *info = NULL; 5992 size_t size; 5993 5994 if (type == RTE_ETH_REPRESENTOR_NONE) 5995 return 0; 5996 if (repr_id == NULL) 5997 return -EINVAL; 5998 5999 /* Get PMD representor range info. */ 6000 ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL); 6001 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6002 controller == -1 && pf == -1) { 6003 /* Direct mapping for legacy VF representor. */ 6004 *repr_id = representor_port; 6005 return 0; 6006 } else if (ret < 0) { 6007 return ret; 6008 } 6009 n = ret; 6010 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6011 info = calloc(1, size); 6012 if (info == NULL) 6013 return -ENOMEM; 6014 ret = rte_eth_representor_info_get(ethdev->data->port_id, info); 6015 if (ret < 0) 6016 goto out; 6017 6018 /* Default controller and pf to caller. */ 6019 if (controller == -1) 6020 controller = info->controller; 6021 if (pf == -1) 6022 pf = info->pf; 6023 6024 /* Locate representor ID. */ 6025 ret = -ENOENT; 6026 for (i = 0; i < n; ++i) { 6027 if (info->ranges[i].type != type) 6028 continue; 6029 if (info->ranges[i].controller != controller) 6030 continue; 6031 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6032 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6033 ethdev->data->port_id, info->ranges[i].id_base, 6034 info->ranges[i].id_end, i); 6035 continue; 6036 6037 } 6038 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6039 switch (info->ranges[i].type) { 6040 case RTE_ETH_REPRESENTOR_PF: 6041 if (pf < info->ranges[i].pf || 6042 pf >= info->ranges[i].pf + count) 6043 continue; 6044 *repr_id = info->ranges[i].id_base + 6045 (pf - info->ranges[i].pf); 6046 ret = 0; 6047 goto out; 6048 case RTE_ETH_REPRESENTOR_VF: 6049 if (info->ranges[i].pf != pf) 6050 continue; 6051 if (representor_port < info->ranges[i].vf || 6052 representor_port >= info->ranges[i].vf + count) 6053 continue; 6054 *repr_id = info->ranges[i].id_base + 6055 (representor_port - info->ranges[i].vf); 6056 ret = 0; 6057 goto out; 6058 case RTE_ETH_REPRESENTOR_SF: 6059 if (info->ranges[i].pf != pf) 6060 continue; 6061 if (representor_port < info->ranges[i].sf || 6062 representor_port >= info->ranges[i].sf + count) 6063 continue; 6064 *repr_id = info->ranges[i].id_base + 6065 (representor_port - info->ranges[i].sf); 6066 ret = 0; 6067 goto out; 6068 default: 6069 break; 6070 } 6071 } 6072 out: 6073 free(info); 6074 return ret; 6075 } 6076 6077 static int 6078 eth_dev_handle_port_list(const char *cmd __rte_unused, 6079 const char *params __rte_unused, 6080 struct rte_tel_data *d) 6081 { 6082 int port_id; 6083 6084 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6085 RTE_ETH_FOREACH_DEV(port_id) 6086 rte_tel_data_add_array_int(d, port_id); 6087 return 0; 6088 } 6089 6090 static void 6091 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6092 const char *stat_name) 6093 { 6094 int q; 6095 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6096 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6097 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6098 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6099 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6100 } 6101 6102 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6103 6104 static int 6105 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6106 const char *params, 6107 struct rte_tel_data *d) 6108 { 6109 struct rte_eth_stats stats; 6110 int port_id, ret; 6111 6112 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6113 return -1; 6114 6115 port_id = atoi(params); 6116 if (!rte_eth_dev_is_valid_port(port_id)) 6117 return -1; 6118 6119 ret = rte_eth_stats_get(port_id, &stats); 6120 if (ret < 0) 6121 return -1; 6122 6123 rte_tel_data_start_dict(d); 6124 ADD_DICT_STAT(stats, ipackets); 6125 ADD_DICT_STAT(stats, opackets); 6126 ADD_DICT_STAT(stats, ibytes); 6127 ADD_DICT_STAT(stats, obytes); 6128 ADD_DICT_STAT(stats, imissed); 6129 ADD_DICT_STAT(stats, ierrors); 6130 ADD_DICT_STAT(stats, oerrors); 6131 ADD_DICT_STAT(stats, rx_nombuf); 6132 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6133 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6134 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6135 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6136 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6137 6138 return 0; 6139 } 6140 6141 static int 6142 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6143 const char *params, 6144 struct rte_tel_data *d) 6145 { 6146 struct rte_eth_xstat *eth_xstats; 6147 struct rte_eth_xstat_name *xstat_names; 6148 int port_id, num_xstats; 6149 int i, ret; 6150 char *end_param; 6151 6152 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6153 return -1; 6154 6155 port_id = strtoul(params, &end_param, 0); 6156 if (*end_param != '\0') 6157 RTE_ETHDEV_LOG(NOTICE, 6158 "Extra parameters passed to ethdev telemetry command, ignoring"); 6159 if (!rte_eth_dev_is_valid_port(port_id)) 6160 return -1; 6161 6162 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6163 if (num_xstats < 0) 6164 return -1; 6165 6166 /* use one malloc for both names and stats */ 6167 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6168 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6169 if (eth_xstats == NULL) 6170 return -1; 6171 xstat_names = (void *)ð_xstats[num_xstats]; 6172 6173 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6174 if (ret < 0 || ret > num_xstats) { 6175 free(eth_xstats); 6176 return -1; 6177 } 6178 6179 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6180 if (ret < 0 || ret > num_xstats) { 6181 free(eth_xstats); 6182 return -1; 6183 } 6184 6185 rte_tel_data_start_dict(d); 6186 for (i = 0; i < num_xstats; i++) 6187 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6188 eth_xstats[i].value); 6189 return 0; 6190 } 6191 6192 static int 6193 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6194 const char *params, 6195 struct rte_tel_data *d) 6196 { 6197 static const char *status_str = "status"; 6198 int ret, port_id; 6199 struct rte_eth_link link; 6200 char *end_param; 6201 6202 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6203 return -1; 6204 6205 port_id = strtoul(params, &end_param, 0); 6206 if (*end_param != '\0') 6207 RTE_ETHDEV_LOG(NOTICE, 6208 "Extra parameters passed to ethdev telemetry command, ignoring"); 6209 if (!rte_eth_dev_is_valid_port(port_id)) 6210 return -1; 6211 6212 ret = rte_eth_link_get_nowait(port_id, &link); 6213 if (ret < 0) 6214 return -1; 6215 6216 rte_tel_data_start_dict(d); 6217 if (!link.link_status) { 6218 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6219 return 0; 6220 } 6221 rte_tel_data_add_dict_string(d, status_str, "UP"); 6222 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6223 rte_tel_data_add_dict_string(d, "duplex", 6224 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6225 "full-duplex" : "half-duplex"); 6226 return 0; 6227 } 6228 6229 int 6230 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6231 struct rte_hairpin_peer_info *cur_info, 6232 struct rte_hairpin_peer_info *peer_info, 6233 uint32_t direction) 6234 { 6235 struct rte_eth_dev *dev; 6236 6237 /* Current queue information is not mandatory. */ 6238 if (peer_info == NULL) 6239 return -EINVAL; 6240 6241 /* No need to check the validity again. */ 6242 dev = &rte_eth_devices[peer_port]; 6243 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6244 -ENOTSUP); 6245 6246 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6247 cur_info, peer_info, direction); 6248 } 6249 6250 int 6251 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6252 struct rte_hairpin_peer_info *peer_info, 6253 uint32_t direction) 6254 { 6255 struct rte_eth_dev *dev; 6256 6257 if (peer_info == NULL) 6258 return -EINVAL; 6259 6260 /* No need to check the validity again. */ 6261 dev = &rte_eth_devices[cur_port]; 6262 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6263 -ENOTSUP); 6264 6265 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6266 peer_info, direction); 6267 } 6268 6269 int 6270 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6271 uint32_t direction) 6272 { 6273 struct rte_eth_dev *dev; 6274 6275 /* No need to check the validity again. */ 6276 dev = &rte_eth_devices[cur_port]; 6277 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6278 -ENOTSUP); 6279 6280 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6281 direction); 6282 } 6283 6284 int 6285 rte_eth_representor_info_get(uint16_t port_id, 6286 struct rte_eth_representor_info *info) 6287 { 6288 struct rte_eth_dev *dev; 6289 6290 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6291 dev = &rte_eth_devices[port_id]; 6292 6293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6294 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6295 } 6296 6297 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6298 6299 RTE_INIT(ethdev_init_telemetry) 6300 { 6301 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6302 "Returns list of available ethdev ports. Takes no parameters"); 6303 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6304 "Returns the common stats for a port. Parameters: int port_id"); 6305 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6306 "Returns the extended stats for a port. Parameters: int port_id"); 6307 rte_telemetry_register_cmd("/ethdev/link_status", 6308 eth_dev_handle_port_link_status, 6309 "Returns the link status for a port. Parameters: int port_id"); 6310 } 6311