1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove Rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove Tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { DEV_RX_OFFLOAD_##_name, #_name } 105 106 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 107 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 108 109 static const struct { 110 uint64_t offload; 111 const char *name; 112 } eth_dev_rx_offload_names[] = { 113 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 114 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 115 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 118 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 119 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 120 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 122 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 124 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 125 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 126 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 127 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 128 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 129 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 130 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 131 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 132 }; 133 134 #undef RTE_RX_OFFLOAD_BIT2STR 135 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 136 137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 138 { DEV_TX_OFFLOAD_##_name, #_name } 139 140 static const struct { 141 uint64_t offload; 142 const char *name; 143 } eth_dev_tx_offload_names[] = { 144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 161 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 165 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 166 }; 167 168 #undef RTE_TX_OFFLOAD_BIT2STR 169 170 /** 171 * The user application callback description. 172 * 173 * It contains callback address to be registered by user application, 174 * the pointer to the parameters for callback, and the event type. 175 */ 176 struct rte_eth_dev_callback { 177 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 178 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 179 void *cb_arg; /**< Parameter for callback */ 180 void *ret_param; /**< Return parameter */ 181 enum rte_eth_event_type event; /**< Interrupt event type */ 182 uint32_t active; /**< Callback is executing */ 183 }; 184 185 enum { 186 STAT_QMAP_TX = 0, 187 STAT_QMAP_RX 188 }; 189 190 int 191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 192 { 193 int ret; 194 struct rte_devargs devargs; 195 const char *bus_param_key; 196 char *bus_str = NULL; 197 char *cls_str = NULL; 198 int str_size; 199 200 if (iter == NULL) { 201 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 202 return -EINVAL; 203 } 204 205 if (devargs_str == NULL) { 206 RTE_ETHDEV_LOG(ERR, 207 "Cannot initialize iterator from NULL device description string\n"); 208 return -EINVAL; 209 } 210 211 memset(iter, 0, sizeof(*iter)); 212 memset(&devargs, 0, sizeof(devargs)); 213 214 /* 215 * The devargs string may use various syntaxes: 216 * - 0000:08:00.0,representor=[1-3] 217 * - pci:0000:06:00.0,representor=[0,5] 218 * - class=eth,mac=00:11:22:33:44:55 219 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 220 */ 221 222 /* 223 * Handle pure class filter (i.e. without any bus-level argument), 224 * from future new syntax. 225 * rte_devargs_parse() is not yet supporting the new syntax, 226 * that's why this simple case is temporarily parsed here. 227 */ 228 #define iter_anybus_str "class=eth," 229 if (strncmp(devargs_str, iter_anybus_str, 230 strlen(iter_anybus_str)) == 0) { 231 iter->cls_str = devargs_str + strlen(iter_anybus_str); 232 goto end; 233 } 234 235 /* Split bus, device and parameters. */ 236 ret = rte_devargs_parse(&devargs, devargs_str); 237 if (ret != 0) 238 goto error; 239 240 /* 241 * Assume parameters of old syntax can match only at ethdev level. 242 * Extra parameters will be ignored, thanks to "+" prefix. 243 */ 244 str_size = strlen(devargs.args) + 2; 245 cls_str = malloc(str_size); 246 if (cls_str == NULL) { 247 ret = -ENOMEM; 248 goto error; 249 } 250 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 251 if (ret != str_size - 1) { 252 ret = -EINVAL; 253 goto error; 254 } 255 iter->cls_str = cls_str; 256 257 iter->bus = devargs.bus; 258 if (iter->bus->dev_iterate == NULL) { 259 ret = -ENOTSUP; 260 goto error; 261 } 262 263 /* Convert bus args to new syntax for use with new API dev_iterate. */ 264 if ((strcmp(iter->bus->name, "vdev") == 0) || 265 (strcmp(iter->bus->name, "fslmc") == 0) || 266 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 267 bus_param_key = "name"; 268 } else if (strcmp(iter->bus->name, "pci") == 0) { 269 bus_param_key = "addr"; 270 } else { 271 ret = -ENOTSUP; 272 goto error; 273 } 274 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 275 bus_str = malloc(str_size); 276 if (bus_str == NULL) { 277 ret = -ENOMEM; 278 goto error; 279 } 280 ret = snprintf(bus_str, str_size, "%s=%s", 281 bus_param_key, devargs.name); 282 if (ret != str_size - 1) { 283 ret = -EINVAL; 284 goto error; 285 } 286 iter->bus_str = bus_str; 287 288 end: 289 iter->cls = rte_class_find_by_name("eth"); 290 rte_devargs_reset(&devargs); 291 return 0; 292 293 error: 294 if (ret == -ENOTSUP) 295 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 296 iter->bus->name); 297 rte_devargs_reset(&devargs); 298 free(bus_str); 299 free(cls_str); 300 return ret; 301 } 302 303 uint16_t 304 rte_eth_iterator_next(struct rte_dev_iterator *iter) 305 { 306 if (iter == NULL) { 307 RTE_ETHDEV_LOG(ERR, 308 "Cannot get next device from NULL iterator\n"); 309 return RTE_MAX_ETHPORTS; 310 } 311 312 if (iter->cls == NULL) /* invalid ethdev iterator */ 313 return RTE_MAX_ETHPORTS; 314 315 do { /* loop to try all matching rte_device */ 316 /* If not pure ethdev filter and */ 317 if (iter->bus != NULL && 318 /* not in middle of rte_eth_dev iteration, */ 319 iter->class_device == NULL) { 320 /* get next rte_device to try. */ 321 iter->device = iter->bus->dev_iterate( 322 iter->device, iter->bus_str, iter); 323 if (iter->device == NULL) 324 break; /* no more rte_device candidate */ 325 } 326 /* A device is matching bus part, need to check ethdev part. */ 327 iter->class_device = iter->cls->dev_iterate( 328 iter->class_device, iter->cls_str, iter); 329 if (iter->class_device != NULL) 330 return eth_dev_to_id(iter->class_device); /* match */ 331 } while (iter->bus != NULL); /* need to try next rte_device */ 332 333 /* No more ethdev port to iterate. */ 334 rte_eth_iterator_cleanup(iter); 335 return RTE_MAX_ETHPORTS; 336 } 337 338 void 339 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 340 { 341 if (iter == NULL) { 342 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 343 return; 344 } 345 346 if (iter->bus_str == NULL) 347 return; /* nothing to free in pure class filter */ 348 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 349 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 350 memset(iter, 0, sizeof(*iter)); 351 } 352 353 uint16_t 354 rte_eth_find_next(uint16_t port_id) 355 { 356 while (port_id < RTE_MAX_ETHPORTS && 357 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 358 port_id++; 359 360 if (port_id >= RTE_MAX_ETHPORTS) 361 return RTE_MAX_ETHPORTS; 362 363 return port_id; 364 } 365 366 /* 367 * Macro to iterate over all valid ports for internal usage. 368 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 369 */ 370 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 371 for (port_id = rte_eth_find_next(0); \ 372 port_id < RTE_MAX_ETHPORTS; \ 373 port_id = rte_eth_find_next(port_id + 1)) 374 375 uint16_t 376 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 377 { 378 port_id = rte_eth_find_next(port_id); 379 while (port_id < RTE_MAX_ETHPORTS && 380 rte_eth_devices[port_id].device != parent) 381 port_id = rte_eth_find_next(port_id + 1); 382 383 return port_id; 384 } 385 386 uint16_t 387 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 388 { 389 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 390 return rte_eth_find_next_of(port_id, 391 rte_eth_devices[ref_port_id].device); 392 } 393 394 static void 395 eth_dev_shared_data_prepare(void) 396 { 397 const unsigned flags = 0; 398 const struct rte_memzone *mz; 399 400 rte_spinlock_lock(ð_dev_shared_data_lock); 401 402 if (eth_dev_shared_data == NULL) { 403 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 404 /* Allocate port data and ownership shared memory. */ 405 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 406 sizeof(*eth_dev_shared_data), 407 rte_socket_id(), flags); 408 } else 409 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 410 if (mz == NULL) 411 rte_panic("Cannot allocate ethdev shared data\n"); 412 413 eth_dev_shared_data = mz->addr; 414 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 415 eth_dev_shared_data->next_owner_id = 416 RTE_ETH_DEV_NO_OWNER + 1; 417 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 418 memset(eth_dev_shared_data->data, 0, 419 sizeof(eth_dev_shared_data->data)); 420 } 421 } 422 423 rte_spinlock_unlock(ð_dev_shared_data_lock); 424 } 425 426 static bool 427 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 428 { 429 return ethdev->data->name[0] != '\0'; 430 } 431 432 static struct rte_eth_dev * 433 eth_dev_allocated(const char *name) 434 { 435 uint16_t i; 436 437 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 438 439 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 440 if (rte_eth_devices[i].data != NULL && 441 strcmp(rte_eth_devices[i].data->name, name) == 0) 442 return &rte_eth_devices[i]; 443 } 444 return NULL; 445 } 446 447 struct rte_eth_dev * 448 rte_eth_dev_allocated(const char *name) 449 { 450 struct rte_eth_dev *ethdev; 451 452 eth_dev_shared_data_prepare(); 453 454 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 455 456 ethdev = eth_dev_allocated(name); 457 458 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 459 460 return ethdev; 461 } 462 463 static uint16_t 464 eth_dev_find_free_port(void) 465 { 466 uint16_t i; 467 468 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 469 /* Using shared name field to find a free port. */ 470 if (eth_dev_shared_data->data[i].name[0] == '\0') { 471 RTE_ASSERT(rte_eth_devices[i].state == 472 RTE_ETH_DEV_UNUSED); 473 return i; 474 } 475 } 476 return RTE_MAX_ETHPORTS; 477 } 478 479 static struct rte_eth_dev * 480 eth_dev_get(uint16_t port_id) 481 { 482 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 483 484 eth_dev->data = ð_dev_shared_data->data[port_id]; 485 486 return eth_dev; 487 } 488 489 struct rte_eth_dev * 490 rte_eth_dev_allocate(const char *name) 491 { 492 uint16_t port_id; 493 struct rte_eth_dev *eth_dev = NULL; 494 size_t name_len; 495 496 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 497 if (name_len == 0) { 498 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 499 return NULL; 500 } 501 502 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 503 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 504 return NULL; 505 } 506 507 eth_dev_shared_data_prepare(); 508 509 /* Synchronize port creation between primary and secondary threads. */ 510 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 511 512 if (eth_dev_allocated(name) != NULL) { 513 RTE_ETHDEV_LOG(ERR, 514 "Ethernet device with name %s already allocated\n", 515 name); 516 goto unlock; 517 } 518 519 port_id = eth_dev_find_free_port(); 520 if (port_id == RTE_MAX_ETHPORTS) { 521 RTE_ETHDEV_LOG(ERR, 522 "Reached maximum number of Ethernet ports\n"); 523 goto unlock; 524 } 525 526 eth_dev = eth_dev_get(port_id); 527 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 528 eth_dev->data->port_id = port_id; 529 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 530 eth_dev->data->mtu = RTE_ETHER_MTU; 531 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 532 533 unlock: 534 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 535 536 return eth_dev; 537 } 538 539 /* 540 * Attach to a port already registered by the primary process, which 541 * makes sure that the same device would have the same port ID both 542 * in the primary and secondary process. 543 */ 544 struct rte_eth_dev * 545 rte_eth_dev_attach_secondary(const char *name) 546 { 547 uint16_t i; 548 struct rte_eth_dev *eth_dev = NULL; 549 550 eth_dev_shared_data_prepare(); 551 552 /* Synchronize port attachment to primary port creation and release. */ 553 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 554 555 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 556 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 557 break; 558 } 559 if (i == RTE_MAX_ETHPORTS) { 560 RTE_ETHDEV_LOG(ERR, 561 "Device %s is not driven by the primary process\n", 562 name); 563 } else { 564 eth_dev = eth_dev_get(i); 565 RTE_ASSERT(eth_dev->data->port_id == i); 566 } 567 568 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 569 return eth_dev; 570 } 571 572 int 573 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 574 { 575 if (eth_dev == NULL) 576 return -EINVAL; 577 578 eth_dev_shared_data_prepare(); 579 580 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 581 rte_eth_dev_callback_process(eth_dev, 582 RTE_ETH_EVENT_DESTROY, NULL); 583 584 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 585 586 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 587 588 eth_dev->state = RTE_ETH_DEV_UNUSED; 589 eth_dev->device = NULL; 590 eth_dev->process_private = NULL; 591 eth_dev->intr_handle = NULL; 592 eth_dev->rx_pkt_burst = NULL; 593 eth_dev->tx_pkt_burst = NULL; 594 eth_dev->tx_pkt_prepare = NULL; 595 eth_dev->rx_queue_count = NULL; 596 eth_dev->rx_descriptor_status = NULL; 597 eth_dev->tx_descriptor_status = NULL; 598 eth_dev->dev_ops = NULL; 599 600 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 601 rte_free(eth_dev->data->rx_queues); 602 rte_free(eth_dev->data->tx_queues); 603 rte_free(eth_dev->data->mac_addrs); 604 rte_free(eth_dev->data->hash_mac_addrs); 605 rte_free(eth_dev->data->dev_private); 606 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 607 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 608 } 609 610 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 611 612 return 0; 613 } 614 615 int 616 rte_eth_dev_is_valid_port(uint16_t port_id) 617 { 618 if (port_id >= RTE_MAX_ETHPORTS || 619 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 620 return 0; 621 else 622 return 1; 623 } 624 625 static int 626 eth_is_valid_owner_id(uint64_t owner_id) 627 { 628 if (owner_id == RTE_ETH_DEV_NO_OWNER || 629 eth_dev_shared_data->next_owner_id <= owner_id) 630 return 0; 631 return 1; 632 } 633 634 uint64_t 635 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 636 { 637 port_id = rte_eth_find_next(port_id); 638 while (port_id < RTE_MAX_ETHPORTS && 639 rte_eth_devices[port_id].data->owner.id != owner_id) 640 port_id = rte_eth_find_next(port_id + 1); 641 642 return port_id; 643 } 644 645 int 646 rte_eth_dev_owner_new(uint64_t *owner_id) 647 { 648 if (owner_id == NULL) { 649 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 650 return -EINVAL; 651 } 652 653 eth_dev_shared_data_prepare(); 654 655 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 656 657 *owner_id = eth_dev_shared_data->next_owner_id++; 658 659 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 660 return 0; 661 } 662 663 static int 664 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 665 const struct rte_eth_dev_owner *new_owner) 666 { 667 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 668 struct rte_eth_dev_owner *port_owner; 669 670 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 671 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 672 port_id); 673 return -ENODEV; 674 } 675 676 if (new_owner == NULL) { 677 RTE_ETHDEV_LOG(ERR, 678 "Cannot set ethdev port %u owner from NULL owner\n", 679 port_id); 680 return -EINVAL; 681 } 682 683 if (!eth_is_valid_owner_id(new_owner->id) && 684 !eth_is_valid_owner_id(old_owner_id)) { 685 RTE_ETHDEV_LOG(ERR, 686 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 687 old_owner_id, new_owner->id); 688 return -EINVAL; 689 } 690 691 port_owner = &rte_eth_devices[port_id].data->owner; 692 if (port_owner->id != old_owner_id) { 693 RTE_ETHDEV_LOG(ERR, 694 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 695 port_id, port_owner->name, port_owner->id); 696 return -EPERM; 697 } 698 699 /* can not truncate (same structure) */ 700 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 701 702 port_owner->id = new_owner->id; 703 704 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 705 port_id, new_owner->name, new_owner->id); 706 707 return 0; 708 } 709 710 int 711 rte_eth_dev_owner_set(const uint16_t port_id, 712 const struct rte_eth_dev_owner *owner) 713 { 714 int ret; 715 716 eth_dev_shared_data_prepare(); 717 718 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 719 720 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 721 722 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 723 return ret; 724 } 725 726 int 727 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 728 { 729 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 730 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 731 int ret; 732 733 eth_dev_shared_data_prepare(); 734 735 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 736 737 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 738 739 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 740 return ret; 741 } 742 743 int 744 rte_eth_dev_owner_delete(const uint64_t owner_id) 745 { 746 uint16_t port_id; 747 int ret = 0; 748 749 eth_dev_shared_data_prepare(); 750 751 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 752 753 if (eth_is_valid_owner_id(owner_id)) { 754 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 755 if (rte_eth_devices[port_id].data->owner.id == owner_id) 756 memset(&rte_eth_devices[port_id].data->owner, 0, 757 sizeof(struct rte_eth_dev_owner)); 758 RTE_ETHDEV_LOG(NOTICE, 759 "All port owners owned by %016"PRIx64" identifier have removed\n", 760 owner_id); 761 } else { 762 RTE_ETHDEV_LOG(ERR, 763 "Invalid owner ID=%016"PRIx64"\n", 764 owner_id); 765 ret = -EINVAL; 766 } 767 768 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 769 770 return ret; 771 } 772 773 int 774 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 775 { 776 struct rte_eth_dev *ethdev; 777 778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 779 ethdev = &rte_eth_devices[port_id]; 780 781 if (!eth_dev_is_allocated(ethdev)) { 782 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 783 port_id); 784 return -ENODEV; 785 } 786 787 if (owner == NULL) { 788 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 789 port_id); 790 return -EINVAL; 791 } 792 793 eth_dev_shared_data_prepare(); 794 795 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 796 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 797 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 798 799 return 0; 800 } 801 802 int 803 rte_eth_dev_socket_id(uint16_t port_id) 804 { 805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 806 return rte_eth_devices[port_id].data->numa_node; 807 } 808 809 void * 810 rte_eth_dev_get_sec_ctx(uint16_t port_id) 811 { 812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 813 return rte_eth_devices[port_id].security_ctx; 814 } 815 816 uint16_t 817 rte_eth_dev_count_avail(void) 818 { 819 uint16_t p; 820 uint16_t count; 821 822 count = 0; 823 824 RTE_ETH_FOREACH_DEV(p) 825 count++; 826 827 return count; 828 } 829 830 uint16_t 831 rte_eth_dev_count_total(void) 832 { 833 uint16_t port, count = 0; 834 835 RTE_ETH_FOREACH_VALID_DEV(port) 836 count++; 837 838 return count; 839 } 840 841 int 842 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 843 { 844 char *tmp; 845 846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 847 848 if (name == NULL) { 849 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 850 port_id); 851 return -EINVAL; 852 } 853 854 /* shouldn't check 'rte_eth_devices[i].data', 855 * because it might be overwritten by VDEV PMD */ 856 tmp = eth_dev_shared_data->data[port_id].name; 857 strcpy(name, tmp); 858 return 0; 859 } 860 861 int 862 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 863 { 864 uint16_t pid; 865 866 if (name == NULL) { 867 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 868 return -EINVAL; 869 } 870 871 if (port_id == NULL) { 872 RTE_ETHDEV_LOG(ERR, 873 "Cannot get port ID to NULL for %s\n", name); 874 return -EINVAL; 875 } 876 877 RTE_ETH_FOREACH_VALID_DEV(pid) 878 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 879 *port_id = pid; 880 return 0; 881 } 882 883 return -ENODEV; 884 } 885 886 static int 887 eth_err(uint16_t port_id, int ret) 888 { 889 if (ret == 0) 890 return 0; 891 if (rte_eth_dev_is_removed(port_id)) 892 return -EIO; 893 return ret; 894 } 895 896 static void 897 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 898 { 899 void **rxq = dev->data->rx_queues; 900 901 if (rxq[qid] == NULL) 902 return; 903 904 if (dev->dev_ops->rx_queue_release != NULL) 905 (*dev->dev_ops->rx_queue_release)(dev, qid); 906 rxq[qid] = NULL; 907 } 908 909 static void 910 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 911 { 912 void **txq = dev->data->tx_queues; 913 914 if (txq[qid] == NULL) 915 return; 916 917 if (dev->dev_ops->tx_queue_release != NULL) 918 (*dev->dev_ops->tx_queue_release)(dev, qid); 919 txq[qid] = NULL; 920 } 921 922 static int 923 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 924 { 925 uint16_t old_nb_queues = dev->data->nb_rx_queues; 926 unsigned i; 927 928 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 929 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 930 sizeof(dev->data->rx_queues[0]) * 931 RTE_MAX_QUEUES_PER_PORT, 932 RTE_CACHE_LINE_SIZE); 933 if (dev->data->rx_queues == NULL) { 934 dev->data->nb_rx_queues = 0; 935 return -(ENOMEM); 936 } 937 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 938 for (i = nb_queues; i < old_nb_queues; i++) 939 eth_dev_rxq_release(dev, i); 940 941 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 942 for (i = nb_queues; i < old_nb_queues; i++) 943 eth_dev_rxq_release(dev, i); 944 945 rte_free(dev->data->rx_queues); 946 dev->data->rx_queues = NULL; 947 } 948 dev->data->nb_rx_queues = nb_queues; 949 return 0; 950 } 951 952 static int 953 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 954 { 955 uint16_t port_id; 956 957 if (rx_queue_id >= dev->data->nb_rx_queues) { 958 port_id = dev->data->port_id; 959 RTE_ETHDEV_LOG(ERR, 960 "Invalid Rx queue_id=%u of device with port_id=%u\n", 961 rx_queue_id, port_id); 962 return -EINVAL; 963 } 964 965 if (dev->data->rx_queues[rx_queue_id] == NULL) { 966 port_id = dev->data->port_id; 967 RTE_ETHDEV_LOG(ERR, 968 "Queue %u of device with port_id=%u has not been setup\n", 969 rx_queue_id, port_id); 970 return -EINVAL; 971 } 972 973 return 0; 974 } 975 976 static int 977 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 978 { 979 uint16_t port_id; 980 981 if (tx_queue_id >= dev->data->nb_tx_queues) { 982 port_id = dev->data->port_id; 983 RTE_ETHDEV_LOG(ERR, 984 "Invalid Tx queue_id=%u of device with port_id=%u\n", 985 tx_queue_id, port_id); 986 return -EINVAL; 987 } 988 989 if (dev->data->tx_queues[tx_queue_id] == NULL) { 990 port_id = dev->data->port_id; 991 RTE_ETHDEV_LOG(ERR, 992 "Queue %u of device with port_id=%u has not been setup\n", 993 tx_queue_id, port_id); 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 int 1001 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1002 { 1003 struct rte_eth_dev *dev; 1004 int ret; 1005 1006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1007 dev = &rte_eth_devices[port_id]; 1008 1009 if (!dev->data->dev_started) { 1010 RTE_ETHDEV_LOG(ERR, 1011 "Port %u must be started before start any queue\n", 1012 port_id); 1013 return -EINVAL; 1014 } 1015 1016 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1017 if (ret != 0) 1018 return ret; 1019 1020 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1021 1022 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1023 RTE_ETHDEV_LOG(INFO, 1024 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1025 rx_queue_id, port_id); 1026 return -EINVAL; 1027 } 1028 1029 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1030 RTE_ETHDEV_LOG(INFO, 1031 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1032 rx_queue_id, port_id); 1033 return 0; 1034 } 1035 1036 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1037 } 1038 1039 int 1040 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1041 { 1042 struct rte_eth_dev *dev; 1043 int ret; 1044 1045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1046 dev = &rte_eth_devices[port_id]; 1047 1048 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1049 if (ret != 0) 1050 return ret; 1051 1052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1053 1054 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1055 RTE_ETHDEV_LOG(INFO, 1056 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1057 rx_queue_id, port_id); 1058 return -EINVAL; 1059 } 1060 1061 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1062 RTE_ETHDEV_LOG(INFO, 1063 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1064 rx_queue_id, port_id); 1065 return 0; 1066 } 1067 1068 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1069 } 1070 1071 int 1072 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1073 { 1074 struct rte_eth_dev *dev; 1075 int ret; 1076 1077 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1078 dev = &rte_eth_devices[port_id]; 1079 1080 if (!dev->data->dev_started) { 1081 RTE_ETHDEV_LOG(ERR, 1082 "Port %u must be started before start any queue\n", 1083 port_id); 1084 return -EINVAL; 1085 } 1086 1087 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1088 if (ret != 0) 1089 return ret; 1090 1091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1092 1093 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1094 RTE_ETHDEV_LOG(INFO, 1095 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1096 tx_queue_id, port_id); 1097 return -EINVAL; 1098 } 1099 1100 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1101 RTE_ETHDEV_LOG(INFO, 1102 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1103 tx_queue_id, port_id); 1104 return 0; 1105 } 1106 1107 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1108 } 1109 1110 int 1111 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1112 { 1113 struct rte_eth_dev *dev; 1114 int ret; 1115 1116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1117 dev = &rte_eth_devices[port_id]; 1118 1119 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1120 if (ret != 0) 1121 return ret; 1122 1123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1124 1125 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1126 RTE_ETHDEV_LOG(INFO, 1127 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1128 tx_queue_id, port_id); 1129 return -EINVAL; 1130 } 1131 1132 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1133 RTE_ETHDEV_LOG(INFO, 1134 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1135 tx_queue_id, port_id); 1136 return 0; 1137 } 1138 1139 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1140 } 1141 1142 static int 1143 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1144 { 1145 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1146 unsigned i; 1147 1148 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1149 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1150 sizeof(dev->data->tx_queues[0]) * 1151 RTE_MAX_QUEUES_PER_PORT, 1152 RTE_CACHE_LINE_SIZE); 1153 if (dev->data->tx_queues == NULL) { 1154 dev->data->nb_tx_queues = 0; 1155 return -(ENOMEM); 1156 } 1157 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1158 for (i = nb_queues; i < old_nb_queues; i++) 1159 eth_dev_txq_release(dev, i); 1160 1161 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1162 for (i = nb_queues; i < old_nb_queues; i++) 1163 eth_dev_txq_release(dev, i); 1164 1165 rte_free(dev->data->tx_queues); 1166 dev->data->tx_queues = NULL; 1167 } 1168 dev->data->nb_tx_queues = nb_queues; 1169 return 0; 1170 } 1171 1172 uint32_t 1173 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1174 { 1175 switch (speed) { 1176 case ETH_SPEED_NUM_10M: 1177 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1178 case ETH_SPEED_NUM_100M: 1179 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1180 case ETH_SPEED_NUM_1G: 1181 return ETH_LINK_SPEED_1G; 1182 case ETH_SPEED_NUM_2_5G: 1183 return ETH_LINK_SPEED_2_5G; 1184 case ETH_SPEED_NUM_5G: 1185 return ETH_LINK_SPEED_5G; 1186 case ETH_SPEED_NUM_10G: 1187 return ETH_LINK_SPEED_10G; 1188 case ETH_SPEED_NUM_20G: 1189 return ETH_LINK_SPEED_20G; 1190 case ETH_SPEED_NUM_25G: 1191 return ETH_LINK_SPEED_25G; 1192 case ETH_SPEED_NUM_40G: 1193 return ETH_LINK_SPEED_40G; 1194 case ETH_SPEED_NUM_50G: 1195 return ETH_LINK_SPEED_50G; 1196 case ETH_SPEED_NUM_56G: 1197 return ETH_LINK_SPEED_56G; 1198 case ETH_SPEED_NUM_100G: 1199 return ETH_LINK_SPEED_100G; 1200 case ETH_SPEED_NUM_200G: 1201 return ETH_LINK_SPEED_200G; 1202 default: 1203 return 0; 1204 } 1205 } 1206 1207 const char * 1208 rte_eth_dev_rx_offload_name(uint64_t offload) 1209 { 1210 const char *name = "UNKNOWN"; 1211 unsigned int i; 1212 1213 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1214 if (offload == eth_dev_rx_offload_names[i].offload) { 1215 name = eth_dev_rx_offload_names[i].name; 1216 break; 1217 } 1218 } 1219 1220 return name; 1221 } 1222 1223 const char * 1224 rte_eth_dev_tx_offload_name(uint64_t offload) 1225 { 1226 const char *name = "UNKNOWN"; 1227 unsigned int i; 1228 1229 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1230 if (offload == eth_dev_tx_offload_names[i].offload) { 1231 name = eth_dev_tx_offload_names[i].name; 1232 break; 1233 } 1234 } 1235 1236 return name; 1237 } 1238 1239 static inline int 1240 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1241 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1242 { 1243 int ret = 0; 1244 1245 if (dev_info_size == 0) { 1246 if (config_size != max_rx_pkt_len) { 1247 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1248 " %u != %u is not allowed\n", 1249 port_id, config_size, max_rx_pkt_len); 1250 ret = -EINVAL; 1251 } 1252 } else if (config_size > dev_info_size) { 1253 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1254 "> max allowed value %u\n", port_id, config_size, 1255 dev_info_size); 1256 ret = -EINVAL; 1257 } else if (config_size < RTE_ETHER_MIN_LEN) { 1258 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1259 "< min allowed value %u\n", port_id, config_size, 1260 (unsigned int)RTE_ETHER_MIN_LEN); 1261 ret = -EINVAL; 1262 } 1263 return ret; 1264 } 1265 1266 /* 1267 * Validate offloads that are requested through rte_eth_dev_configure against 1268 * the offloads successfully set by the Ethernet device. 1269 * 1270 * @param port_id 1271 * The port identifier of the Ethernet device. 1272 * @param req_offloads 1273 * The offloads that have been requested through `rte_eth_dev_configure`. 1274 * @param set_offloads 1275 * The offloads successfully set by the Ethernet device. 1276 * @param offload_type 1277 * The offload type i.e. Rx/Tx string. 1278 * @param offload_name 1279 * The function that prints the offload name. 1280 * @return 1281 * - (0) if validation successful. 1282 * - (-EINVAL) if requested offload has been silently disabled. 1283 * 1284 */ 1285 static int 1286 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1287 uint64_t set_offloads, const char *offload_type, 1288 const char *(*offload_name)(uint64_t)) 1289 { 1290 uint64_t offloads_diff = req_offloads ^ set_offloads; 1291 uint64_t offload; 1292 int ret = 0; 1293 1294 while (offloads_diff != 0) { 1295 /* Check if any offload is requested but not enabled. */ 1296 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1297 if (offload & req_offloads) { 1298 RTE_ETHDEV_LOG(ERR, 1299 "Port %u failed to enable %s offload %s\n", 1300 port_id, offload_type, offload_name(offload)); 1301 ret = -EINVAL; 1302 } 1303 1304 /* Check if offload couldn't be disabled. */ 1305 if (offload & set_offloads) { 1306 RTE_ETHDEV_LOG(DEBUG, 1307 "Port %u %s offload %s is not requested but enabled\n", 1308 port_id, offload_type, offload_name(offload)); 1309 } 1310 1311 offloads_diff &= ~offload; 1312 } 1313 1314 return ret; 1315 } 1316 1317 static uint32_t 1318 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1319 { 1320 uint32_t overhead_len; 1321 1322 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1323 overhead_len = max_rx_pktlen - max_mtu; 1324 else 1325 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1326 1327 return overhead_len; 1328 } 1329 1330 /* rte_eth_dev_info_get() should be called prior to this function */ 1331 static int 1332 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1333 uint16_t mtu) 1334 { 1335 uint32_t overhead_len; 1336 uint32_t frame_size; 1337 1338 if (mtu < dev_info->min_mtu) { 1339 RTE_ETHDEV_LOG(ERR, 1340 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1341 mtu, dev_info->min_mtu, port_id); 1342 return -EINVAL; 1343 } 1344 if (mtu > dev_info->max_mtu) { 1345 RTE_ETHDEV_LOG(ERR, 1346 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1347 mtu, dev_info->max_mtu, port_id); 1348 return -EINVAL; 1349 } 1350 1351 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1352 dev_info->max_mtu); 1353 frame_size = mtu + overhead_len; 1354 if (frame_size < RTE_ETHER_MIN_LEN) { 1355 RTE_ETHDEV_LOG(ERR, 1356 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1357 frame_size, RTE_ETHER_MIN_LEN, port_id); 1358 return -EINVAL; 1359 } 1360 1361 if (frame_size > dev_info->max_rx_pktlen) { 1362 RTE_ETHDEV_LOG(ERR, 1363 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1364 frame_size, dev_info->max_rx_pktlen, port_id); 1365 return -EINVAL; 1366 } 1367 1368 return 0; 1369 } 1370 1371 int 1372 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1373 const struct rte_eth_conf *dev_conf) 1374 { 1375 struct rte_eth_dev *dev; 1376 struct rte_eth_dev_info dev_info; 1377 struct rte_eth_conf orig_conf; 1378 int diag; 1379 int ret; 1380 uint16_t old_mtu; 1381 1382 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1383 dev = &rte_eth_devices[port_id]; 1384 1385 if (dev_conf == NULL) { 1386 RTE_ETHDEV_LOG(ERR, 1387 "Cannot configure ethdev port %u from NULL config\n", 1388 port_id); 1389 return -EINVAL; 1390 } 1391 1392 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1393 1394 if (dev->data->dev_started) { 1395 RTE_ETHDEV_LOG(ERR, 1396 "Port %u must be stopped to allow configuration\n", 1397 port_id); 1398 return -EBUSY; 1399 } 1400 1401 /* 1402 * Ensure that "dev_configured" is always 0 each time prepare to do 1403 * dev_configure() to avoid any non-anticipated behaviour. 1404 * And set to 1 when dev_configure() is executed successfully. 1405 */ 1406 dev->data->dev_configured = 0; 1407 1408 /* Store original config, as rollback required on failure */ 1409 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1410 1411 /* 1412 * Copy the dev_conf parameter into the dev structure. 1413 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1414 */ 1415 if (dev_conf != &dev->data->dev_conf) 1416 memcpy(&dev->data->dev_conf, dev_conf, 1417 sizeof(dev->data->dev_conf)); 1418 1419 /* Backup mtu for rollback */ 1420 old_mtu = dev->data->mtu; 1421 1422 ret = rte_eth_dev_info_get(port_id, &dev_info); 1423 if (ret != 0) 1424 goto rollback; 1425 1426 /* If number of queues specified by application for both Rx and Tx is 1427 * zero, use driver preferred values. This cannot be done individually 1428 * as it is valid for either Tx or Rx (but not both) to be zero. 1429 * If driver does not provide any preferred valued, fall back on 1430 * EAL defaults. 1431 */ 1432 if (nb_rx_q == 0 && nb_tx_q == 0) { 1433 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1434 if (nb_rx_q == 0) 1435 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1436 nb_tx_q = dev_info.default_txportconf.nb_queues; 1437 if (nb_tx_q == 0) 1438 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1439 } 1440 1441 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1442 RTE_ETHDEV_LOG(ERR, 1443 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1444 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1445 ret = -EINVAL; 1446 goto rollback; 1447 } 1448 1449 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1450 RTE_ETHDEV_LOG(ERR, 1451 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1452 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1453 ret = -EINVAL; 1454 goto rollback; 1455 } 1456 1457 /* 1458 * Check that the numbers of Rx and Tx queues are not greater 1459 * than the maximum number of Rx and Tx queues supported by the 1460 * configured device. 1461 */ 1462 if (nb_rx_q > dev_info.max_rx_queues) { 1463 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1464 port_id, nb_rx_q, dev_info.max_rx_queues); 1465 ret = -EINVAL; 1466 goto rollback; 1467 } 1468 1469 if (nb_tx_q > dev_info.max_tx_queues) { 1470 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1471 port_id, nb_tx_q, dev_info.max_tx_queues); 1472 ret = -EINVAL; 1473 goto rollback; 1474 } 1475 1476 /* Check that the device supports requested interrupts */ 1477 if ((dev_conf->intr_conf.lsc == 1) && 1478 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1479 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1480 dev->device->driver->name); 1481 ret = -EINVAL; 1482 goto rollback; 1483 } 1484 if ((dev_conf->intr_conf.rmv == 1) && 1485 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1486 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1487 dev->device->driver->name); 1488 ret = -EINVAL; 1489 goto rollback; 1490 } 1491 1492 if (dev_conf->rxmode.mtu == 0) 1493 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1494 1495 ret = eth_dev_validate_mtu(port_id, &dev_info, 1496 dev->data->dev_conf.rxmode.mtu); 1497 if (ret != 0) 1498 goto rollback; 1499 1500 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1501 1502 /* 1503 * If LRO is enabled, check that the maximum aggregated packet 1504 * size is supported by the configured device. 1505 */ 1506 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1507 uint32_t max_rx_pktlen; 1508 uint32_t overhead_len; 1509 1510 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1511 dev_info.max_mtu); 1512 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1513 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1514 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1515 ret = eth_dev_check_lro_pkt_size(port_id, 1516 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1517 max_rx_pktlen, 1518 dev_info.max_lro_pkt_size); 1519 if (ret != 0) 1520 goto rollback; 1521 } 1522 1523 /* Any requested offloading must be within its device capabilities */ 1524 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1525 dev_conf->rxmode.offloads) { 1526 RTE_ETHDEV_LOG(ERR, 1527 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1528 "capabilities 0x%"PRIx64" in %s()\n", 1529 port_id, dev_conf->rxmode.offloads, 1530 dev_info.rx_offload_capa, 1531 __func__); 1532 ret = -EINVAL; 1533 goto rollback; 1534 } 1535 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1536 dev_conf->txmode.offloads) { 1537 RTE_ETHDEV_LOG(ERR, 1538 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1539 "capabilities 0x%"PRIx64" in %s()\n", 1540 port_id, dev_conf->txmode.offloads, 1541 dev_info.tx_offload_capa, 1542 __func__); 1543 ret = -EINVAL; 1544 goto rollback; 1545 } 1546 1547 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1548 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1549 1550 /* Check that device supports requested rss hash functions. */ 1551 if ((dev_info.flow_type_rss_offloads | 1552 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1553 dev_info.flow_type_rss_offloads) { 1554 RTE_ETHDEV_LOG(ERR, 1555 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1556 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1557 dev_info.flow_type_rss_offloads); 1558 ret = -EINVAL; 1559 goto rollback; 1560 } 1561 1562 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1563 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1564 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1565 RTE_ETHDEV_LOG(ERR, 1566 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1567 port_id, 1568 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1569 ret = -EINVAL; 1570 goto rollback; 1571 } 1572 1573 /* 1574 * Setup new number of Rx/Tx queues and reconfigure device. 1575 */ 1576 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1577 if (diag != 0) { 1578 RTE_ETHDEV_LOG(ERR, 1579 "Port%u eth_dev_rx_queue_config = %d\n", 1580 port_id, diag); 1581 ret = diag; 1582 goto rollback; 1583 } 1584 1585 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1586 if (diag != 0) { 1587 RTE_ETHDEV_LOG(ERR, 1588 "Port%u eth_dev_tx_queue_config = %d\n", 1589 port_id, diag); 1590 eth_dev_rx_queue_config(dev, 0); 1591 ret = diag; 1592 goto rollback; 1593 } 1594 1595 diag = (*dev->dev_ops->dev_configure)(dev); 1596 if (diag != 0) { 1597 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1598 port_id, diag); 1599 ret = eth_err(port_id, diag); 1600 goto reset_queues; 1601 } 1602 1603 /* Initialize Rx profiling if enabled at compilation time. */ 1604 diag = __rte_eth_dev_profile_init(port_id, dev); 1605 if (diag != 0) { 1606 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1607 port_id, diag); 1608 ret = eth_err(port_id, diag); 1609 goto reset_queues; 1610 } 1611 1612 /* Validate Rx offloads. */ 1613 diag = eth_dev_validate_offloads(port_id, 1614 dev_conf->rxmode.offloads, 1615 dev->data->dev_conf.rxmode.offloads, "Rx", 1616 rte_eth_dev_rx_offload_name); 1617 if (diag != 0) { 1618 ret = diag; 1619 goto reset_queues; 1620 } 1621 1622 /* Validate Tx offloads. */ 1623 diag = eth_dev_validate_offloads(port_id, 1624 dev_conf->txmode.offloads, 1625 dev->data->dev_conf.txmode.offloads, "Tx", 1626 rte_eth_dev_tx_offload_name); 1627 if (diag != 0) { 1628 ret = diag; 1629 goto reset_queues; 1630 } 1631 1632 dev->data->dev_configured = 1; 1633 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1634 return 0; 1635 reset_queues: 1636 eth_dev_rx_queue_config(dev, 0); 1637 eth_dev_tx_queue_config(dev, 0); 1638 rollback: 1639 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1640 if (old_mtu != dev->data->mtu) 1641 dev->data->mtu = old_mtu; 1642 1643 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1644 return ret; 1645 } 1646 1647 void 1648 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1649 { 1650 if (dev->data->dev_started) { 1651 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1652 dev->data->port_id); 1653 return; 1654 } 1655 1656 eth_dev_rx_queue_config(dev, 0); 1657 eth_dev_tx_queue_config(dev, 0); 1658 1659 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1660 } 1661 1662 static void 1663 eth_dev_mac_restore(struct rte_eth_dev *dev, 1664 struct rte_eth_dev_info *dev_info) 1665 { 1666 struct rte_ether_addr *addr; 1667 uint16_t i; 1668 uint32_t pool = 0; 1669 uint64_t pool_mask; 1670 1671 /* replay MAC address configuration including default MAC */ 1672 addr = &dev->data->mac_addrs[0]; 1673 if (*dev->dev_ops->mac_addr_set != NULL) 1674 (*dev->dev_ops->mac_addr_set)(dev, addr); 1675 else if (*dev->dev_ops->mac_addr_add != NULL) 1676 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1677 1678 if (*dev->dev_ops->mac_addr_add != NULL) { 1679 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1680 addr = &dev->data->mac_addrs[i]; 1681 1682 /* skip zero address */ 1683 if (rte_is_zero_ether_addr(addr)) 1684 continue; 1685 1686 pool = 0; 1687 pool_mask = dev->data->mac_pool_sel[i]; 1688 1689 do { 1690 if (pool_mask & UINT64_C(1)) 1691 (*dev->dev_ops->mac_addr_add)(dev, 1692 addr, i, pool); 1693 pool_mask >>= 1; 1694 pool++; 1695 } while (pool_mask); 1696 } 1697 } 1698 } 1699 1700 static int 1701 eth_dev_config_restore(struct rte_eth_dev *dev, 1702 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1703 { 1704 int ret; 1705 1706 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1707 eth_dev_mac_restore(dev, dev_info); 1708 1709 /* replay promiscuous configuration */ 1710 /* 1711 * use callbacks directly since we don't need port_id check and 1712 * would like to bypass the same value set 1713 */ 1714 if (rte_eth_promiscuous_get(port_id) == 1 && 1715 *dev->dev_ops->promiscuous_enable != NULL) { 1716 ret = eth_err(port_id, 1717 (*dev->dev_ops->promiscuous_enable)(dev)); 1718 if (ret != 0 && ret != -ENOTSUP) { 1719 RTE_ETHDEV_LOG(ERR, 1720 "Failed to enable promiscuous mode for device (port %u): %s\n", 1721 port_id, rte_strerror(-ret)); 1722 return ret; 1723 } 1724 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1725 *dev->dev_ops->promiscuous_disable != NULL) { 1726 ret = eth_err(port_id, 1727 (*dev->dev_ops->promiscuous_disable)(dev)); 1728 if (ret != 0 && ret != -ENOTSUP) { 1729 RTE_ETHDEV_LOG(ERR, 1730 "Failed to disable promiscuous mode for device (port %u): %s\n", 1731 port_id, rte_strerror(-ret)); 1732 return ret; 1733 } 1734 } 1735 1736 /* replay all multicast configuration */ 1737 /* 1738 * use callbacks directly since we don't need port_id check and 1739 * would like to bypass the same value set 1740 */ 1741 if (rte_eth_allmulticast_get(port_id) == 1 && 1742 *dev->dev_ops->allmulticast_enable != NULL) { 1743 ret = eth_err(port_id, 1744 (*dev->dev_ops->allmulticast_enable)(dev)); 1745 if (ret != 0 && ret != -ENOTSUP) { 1746 RTE_ETHDEV_LOG(ERR, 1747 "Failed to enable allmulticast mode for device (port %u): %s\n", 1748 port_id, rte_strerror(-ret)); 1749 return ret; 1750 } 1751 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1752 *dev->dev_ops->allmulticast_disable != NULL) { 1753 ret = eth_err(port_id, 1754 (*dev->dev_ops->allmulticast_disable)(dev)); 1755 if (ret != 0 && ret != -ENOTSUP) { 1756 RTE_ETHDEV_LOG(ERR, 1757 "Failed to disable allmulticast mode for device (port %u): %s\n", 1758 port_id, rte_strerror(-ret)); 1759 return ret; 1760 } 1761 } 1762 1763 return 0; 1764 } 1765 1766 int 1767 rte_eth_dev_start(uint16_t port_id) 1768 { 1769 struct rte_eth_dev *dev; 1770 struct rte_eth_dev_info dev_info; 1771 int diag; 1772 int ret, ret_stop; 1773 1774 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1775 dev = &rte_eth_devices[port_id]; 1776 1777 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1778 1779 if (dev->data->dev_configured == 0) { 1780 RTE_ETHDEV_LOG(INFO, 1781 "Device with port_id=%"PRIu16" is not configured.\n", 1782 port_id); 1783 return -EINVAL; 1784 } 1785 1786 if (dev->data->dev_started != 0) { 1787 RTE_ETHDEV_LOG(INFO, 1788 "Device with port_id=%"PRIu16" already started\n", 1789 port_id); 1790 return 0; 1791 } 1792 1793 ret = rte_eth_dev_info_get(port_id, &dev_info); 1794 if (ret != 0) 1795 return ret; 1796 1797 /* Lets restore MAC now if device does not support live change */ 1798 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1799 eth_dev_mac_restore(dev, &dev_info); 1800 1801 diag = (*dev->dev_ops->dev_start)(dev); 1802 if (diag == 0) 1803 dev->data->dev_started = 1; 1804 else 1805 return eth_err(port_id, diag); 1806 1807 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1808 if (ret != 0) { 1809 RTE_ETHDEV_LOG(ERR, 1810 "Error during restoring configuration for device (port %u): %s\n", 1811 port_id, rte_strerror(-ret)); 1812 ret_stop = rte_eth_dev_stop(port_id); 1813 if (ret_stop != 0) { 1814 RTE_ETHDEV_LOG(ERR, 1815 "Failed to stop device (port %u): %s\n", 1816 port_id, rte_strerror(-ret_stop)); 1817 } 1818 1819 return ret; 1820 } 1821 1822 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1823 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1824 (*dev->dev_ops->link_update)(dev, 0); 1825 } 1826 1827 /* expose selection of PMD fast-path functions */ 1828 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1829 1830 rte_ethdev_trace_start(port_id); 1831 return 0; 1832 } 1833 1834 int 1835 rte_eth_dev_stop(uint16_t port_id) 1836 { 1837 struct rte_eth_dev *dev; 1838 int ret; 1839 1840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1841 dev = &rte_eth_devices[port_id]; 1842 1843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1844 1845 if (dev->data->dev_started == 0) { 1846 RTE_ETHDEV_LOG(INFO, 1847 "Device with port_id=%"PRIu16" already stopped\n", 1848 port_id); 1849 return 0; 1850 } 1851 1852 /* point fast-path functions to dummy ones */ 1853 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1854 1855 dev->data->dev_started = 0; 1856 ret = (*dev->dev_ops->dev_stop)(dev); 1857 rte_ethdev_trace_stop(port_id, ret); 1858 1859 return ret; 1860 } 1861 1862 int 1863 rte_eth_dev_set_link_up(uint16_t port_id) 1864 { 1865 struct rte_eth_dev *dev; 1866 1867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1868 dev = &rte_eth_devices[port_id]; 1869 1870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1871 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1872 } 1873 1874 int 1875 rte_eth_dev_set_link_down(uint16_t port_id) 1876 { 1877 struct rte_eth_dev *dev; 1878 1879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1880 dev = &rte_eth_devices[port_id]; 1881 1882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1883 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1884 } 1885 1886 int 1887 rte_eth_dev_close(uint16_t port_id) 1888 { 1889 struct rte_eth_dev *dev; 1890 int firsterr, binerr; 1891 int *lasterr = &firsterr; 1892 1893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1894 dev = &rte_eth_devices[port_id]; 1895 1896 if (dev->data->dev_started) { 1897 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1898 port_id); 1899 return -EINVAL; 1900 } 1901 1902 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1903 *lasterr = (*dev->dev_ops->dev_close)(dev); 1904 if (*lasterr != 0) 1905 lasterr = &binerr; 1906 1907 rte_ethdev_trace_close(port_id); 1908 *lasterr = rte_eth_dev_release_port(dev); 1909 1910 return firsterr; 1911 } 1912 1913 int 1914 rte_eth_dev_reset(uint16_t port_id) 1915 { 1916 struct rte_eth_dev *dev; 1917 int ret; 1918 1919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1920 dev = &rte_eth_devices[port_id]; 1921 1922 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1923 1924 ret = rte_eth_dev_stop(port_id); 1925 if (ret != 0) { 1926 RTE_ETHDEV_LOG(ERR, 1927 "Failed to stop device (port %u) before reset: %s - ignore\n", 1928 port_id, rte_strerror(-ret)); 1929 } 1930 ret = dev->dev_ops->dev_reset(dev); 1931 1932 return eth_err(port_id, ret); 1933 } 1934 1935 int 1936 rte_eth_dev_is_removed(uint16_t port_id) 1937 { 1938 struct rte_eth_dev *dev; 1939 int ret; 1940 1941 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1942 dev = &rte_eth_devices[port_id]; 1943 1944 if (dev->state == RTE_ETH_DEV_REMOVED) 1945 return 1; 1946 1947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1948 1949 ret = dev->dev_ops->is_removed(dev); 1950 if (ret != 0) 1951 /* Device is physically removed. */ 1952 dev->state = RTE_ETH_DEV_REMOVED; 1953 1954 return ret; 1955 } 1956 1957 static int 1958 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1959 uint16_t n_seg, uint32_t *mbp_buf_size, 1960 const struct rte_eth_dev_info *dev_info) 1961 { 1962 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1963 struct rte_mempool *mp_first; 1964 uint32_t offset_mask; 1965 uint16_t seg_idx; 1966 1967 if (n_seg > seg_capa->max_nseg) { 1968 RTE_ETHDEV_LOG(ERR, 1969 "Requested Rx segments %u exceed supported %u\n", 1970 n_seg, seg_capa->max_nseg); 1971 return -EINVAL; 1972 } 1973 /* 1974 * Check the sizes and offsets against buffer sizes 1975 * for each segment specified in extended configuration. 1976 */ 1977 mp_first = rx_seg[0].mp; 1978 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1979 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1980 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1981 uint32_t length = rx_seg[seg_idx].length; 1982 uint32_t offset = rx_seg[seg_idx].offset; 1983 1984 if (mpl == NULL) { 1985 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1986 return -EINVAL; 1987 } 1988 if (seg_idx != 0 && mp_first != mpl && 1989 seg_capa->multi_pools == 0) { 1990 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1991 return -ENOTSUP; 1992 } 1993 if (offset != 0) { 1994 if (seg_capa->offset_allowed == 0) { 1995 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1996 return -ENOTSUP; 1997 } 1998 if (offset & offset_mask) { 1999 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2000 offset, 2001 seg_capa->offset_align_log2); 2002 return -EINVAL; 2003 } 2004 } 2005 if (mpl->private_data_size < 2006 sizeof(struct rte_pktmbuf_pool_private)) { 2007 RTE_ETHDEV_LOG(ERR, 2008 "%s private_data_size %u < %u\n", 2009 mpl->name, mpl->private_data_size, 2010 (unsigned int)sizeof 2011 (struct rte_pktmbuf_pool_private)); 2012 return -ENOSPC; 2013 } 2014 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2015 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2016 length = length != 0 ? length : *mbp_buf_size; 2017 if (*mbp_buf_size < length + offset) { 2018 RTE_ETHDEV_LOG(ERR, 2019 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2020 mpl->name, *mbp_buf_size, 2021 length + offset, length, offset); 2022 return -EINVAL; 2023 } 2024 } 2025 return 0; 2026 } 2027 2028 int 2029 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2030 uint16_t nb_rx_desc, unsigned int socket_id, 2031 const struct rte_eth_rxconf *rx_conf, 2032 struct rte_mempool *mp) 2033 { 2034 int ret; 2035 uint32_t mbp_buf_size; 2036 struct rte_eth_dev *dev; 2037 struct rte_eth_dev_info dev_info; 2038 struct rte_eth_rxconf local_conf; 2039 2040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2041 dev = &rte_eth_devices[port_id]; 2042 2043 if (rx_queue_id >= dev->data->nb_rx_queues) { 2044 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2045 return -EINVAL; 2046 } 2047 2048 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2049 2050 ret = rte_eth_dev_info_get(port_id, &dev_info); 2051 if (ret != 0) 2052 return ret; 2053 2054 if (mp != NULL) { 2055 /* Single pool configuration check. */ 2056 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2057 RTE_ETHDEV_LOG(ERR, 2058 "Ambiguous segment configuration\n"); 2059 return -EINVAL; 2060 } 2061 /* 2062 * Check the size of the mbuf data buffer, this value 2063 * must be provided in the private data of the memory pool. 2064 * First check that the memory pool(s) has a valid private data. 2065 */ 2066 if (mp->private_data_size < 2067 sizeof(struct rte_pktmbuf_pool_private)) { 2068 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2069 mp->name, mp->private_data_size, 2070 (unsigned int) 2071 sizeof(struct rte_pktmbuf_pool_private)); 2072 return -ENOSPC; 2073 } 2074 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2075 if (mbp_buf_size < dev_info.min_rx_bufsize + 2076 RTE_PKTMBUF_HEADROOM) { 2077 RTE_ETHDEV_LOG(ERR, 2078 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2079 mp->name, mbp_buf_size, 2080 RTE_PKTMBUF_HEADROOM + 2081 dev_info.min_rx_bufsize, 2082 RTE_PKTMBUF_HEADROOM, 2083 dev_info.min_rx_bufsize); 2084 return -EINVAL; 2085 } 2086 } else { 2087 const struct rte_eth_rxseg_split *rx_seg; 2088 uint16_t n_seg; 2089 2090 /* Extended multi-segment configuration check. */ 2091 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2092 RTE_ETHDEV_LOG(ERR, 2093 "Memory pool is null and no extended configuration provided\n"); 2094 return -EINVAL; 2095 } 2096 2097 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2098 n_seg = rx_conf->rx_nseg; 2099 2100 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2101 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2102 &mbp_buf_size, 2103 &dev_info); 2104 if (ret != 0) 2105 return ret; 2106 } else { 2107 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2108 return -EINVAL; 2109 } 2110 } 2111 2112 /* Use default specified by driver, if nb_rx_desc is zero */ 2113 if (nb_rx_desc == 0) { 2114 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2115 /* If driver default is also zero, fall back on EAL default */ 2116 if (nb_rx_desc == 0) 2117 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2118 } 2119 2120 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2121 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2122 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2123 2124 RTE_ETHDEV_LOG(ERR, 2125 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2126 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2127 dev_info.rx_desc_lim.nb_min, 2128 dev_info.rx_desc_lim.nb_align); 2129 return -EINVAL; 2130 } 2131 2132 if (dev->data->dev_started && 2133 !(dev_info.dev_capa & 2134 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2135 return -EBUSY; 2136 2137 if (dev->data->dev_started && 2138 (dev->data->rx_queue_state[rx_queue_id] != 2139 RTE_ETH_QUEUE_STATE_STOPPED)) 2140 return -EBUSY; 2141 2142 eth_dev_rxq_release(dev, rx_queue_id); 2143 2144 if (rx_conf == NULL) 2145 rx_conf = &dev_info.default_rxconf; 2146 2147 local_conf = *rx_conf; 2148 2149 /* 2150 * If an offloading has already been enabled in 2151 * rte_eth_dev_configure(), it has been enabled on all queues, 2152 * so there is no need to enable it in this queue again. 2153 * The local_conf.offloads input to underlying PMD only carries 2154 * those offloadings which are only enabled on this queue and 2155 * not enabled on all queues. 2156 */ 2157 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2158 2159 /* 2160 * New added offloadings for this queue are those not enabled in 2161 * rte_eth_dev_configure() and they must be per-queue type. 2162 * A pure per-port offloading can't be enabled on a queue while 2163 * disabled on another queue. A pure per-port offloading can't 2164 * be enabled for any queue as new added one if it hasn't been 2165 * enabled in rte_eth_dev_configure(). 2166 */ 2167 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2168 local_conf.offloads) { 2169 RTE_ETHDEV_LOG(ERR, 2170 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2171 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2172 port_id, rx_queue_id, local_conf.offloads, 2173 dev_info.rx_queue_offload_capa, 2174 __func__); 2175 return -EINVAL; 2176 } 2177 2178 /* 2179 * If LRO is enabled, check that the maximum aggregated packet 2180 * size is supported by the configured device. 2181 */ 2182 /* Get the real Ethernet overhead length */ 2183 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2184 uint32_t overhead_len; 2185 uint32_t max_rx_pktlen; 2186 int ret; 2187 2188 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2189 dev_info.max_mtu); 2190 max_rx_pktlen = dev->data->mtu + overhead_len; 2191 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2192 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2193 ret = eth_dev_check_lro_pkt_size(port_id, 2194 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2195 max_rx_pktlen, 2196 dev_info.max_lro_pkt_size); 2197 if (ret != 0) 2198 return ret; 2199 } 2200 2201 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2202 socket_id, &local_conf, mp); 2203 if (!ret) { 2204 if (!dev->data->min_rx_buf_size || 2205 dev->data->min_rx_buf_size > mbp_buf_size) 2206 dev->data->min_rx_buf_size = mbp_buf_size; 2207 } 2208 2209 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2210 rx_conf, ret); 2211 return eth_err(port_id, ret); 2212 } 2213 2214 int 2215 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2216 uint16_t nb_rx_desc, 2217 const struct rte_eth_hairpin_conf *conf) 2218 { 2219 int ret; 2220 struct rte_eth_dev *dev; 2221 struct rte_eth_hairpin_cap cap; 2222 int i; 2223 int count; 2224 2225 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2226 dev = &rte_eth_devices[port_id]; 2227 2228 if (rx_queue_id >= dev->data->nb_rx_queues) { 2229 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2230 return -EINVAL; 2231 } 2232 2233 if (conf == NULL) { 2234 RTE_ETHDEV_LOG(ERR, 2235 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2236 port_id); 2237 return -EINVAL; 2238 } 2239 2240 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2241 if (ret != 0) 2242 return ret; 2243 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2244 -ENOTSUP); 2245 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2246 if (nb_rx_desc == 0) 2247 nb_rx_desc = cap.max_nb_desc; 2248 if (nb_rx_desc > cap.max_nb_desc) { 2249 RTE_ETHDEV_LOG(ERR, 2250 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2251 nb_rx_desc, cap.max_nb_desc); 2252 return -EINVAL; 2253 } 2254 if (conf->peer_count > cap.max_rx_2_tx) { 2255 RTE_ETHDEV_LOG(ERR, 2256 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2257 conf->peer_count, cap.max_rx_2_tx); 2258 return -EINVAL; 2259 } 2260 if (conf->peer_count == 0) { 2261 RTE_ETHDEV_LOG(ERR, 2262 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2263 conf->peer_count); 2264 return -EINVAL; 2265 } 2266 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2267 cap.max_nb_queues != UINT16_MAX; i++) { 2268 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2269 count++; 2270 } 2271 if (count > cap.max_nb_queues) { 2272 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2273 cap.max_nb_queues); 2274 return -EINVAL; 2275 } 2276 if (dev->data->dev_started) 2277 return -EBUSY; 2278 eth_dev_rxq_release(dev, rx_queue_id); 2279 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2280 nb_rx_desc, conf); 2281 if (ret == 0) 2282 dev->data->rx_queue_state[rx_queue_id] = 2283 RTE_ETH_QUEUE_STATE_HAIRPIN; 2284 return eth_err(port_id, ret); 2285 } 2286 2287 int 2288 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2289 uint16_t nb_tx_desc, unsigned int socket_id, 2290 const struct rte_eth_txconf *tx_conf) 2291 { 2292 struct rte_eth_dev *dev; 2293 struct rte_eth_dev_info dev_info; 2294 struct rte_eth_txconf local_conf; 2295 int ret; 2296 2297 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2298 dev = &rte_eth_devices[port_id]; 2299 2300 if (tx_queue_id >= dev->data->nb_tx_queues) { 2301 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2302 return -EINVAL; 2303 } 2304 2305 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2306 2307 ret = rte_eth_dev_info_get(port_id, &dev_info); 2308 if (ret != 0) 2309 return ret; 2310 2311 /* Use default specified by driver, if nb_tx_desc is zero */ 2312 if (nb_tx_desc == 0) { 2313 nb_tx_desc = dev_info.default_txportconf.ring_size; 2314 /* If driver default is zero, fall back on EAL default */ 2315 if (nb_tx_desc == 0) 2316 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2317 } 2318 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2319 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2320 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2321 RTE_ETHDEV_LOG(ERR, 2322 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2323 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2324 dev_info.tx_desc_lim.nb_min, 2325 dev_info.tx_desc_lim.nb_align); 2326 return -EINVAL; 2327 } 2328 2329 if (dev->data->dev_started && 2330 !(dev_info.dev_capa & 2331 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2332 return -EBUSY; 2333 2334 if (dev->data->dev_started && 2335 (dev->data->tx_queue_state[tx_queue_id] != 2336 RTE_ETH_QUEUE_STATE_STOPPED)) 2337 return -EBUSY; 2338 2339 eth_dev_txq_release(dev, tx_queue_id); 2340 2341 if (tx_conf == NULL) 2342 tx_conf = &dev_info.default_txconf; 2343 2344 local_conf = *tx_conf; 2345 2346 /* 2347 * If an offloading has already been enabled in 2348 * rte_eth_dev_configure(), it has been enabled on all queues, 2349 * so there is no need to enable it in this queue again. 2350 * The local_conf.offloads input to underlying PMD only carries 2351 * those offloadings which are only enabled on this queue and 2352 * not enabled on all queues. 2353 */ 2354 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2355 2356 /* 2357 * New added offloadings for this queue are those not enabled in 2358 * rte_eth_dev_configure() and they must be per-queue type. 2359 * A pure per-port offloading can't be enabled on a queue while 2360 * disabled on another queue. A pure per-port offloading can't 2361 * be enabled for any queue as new added one if it hasn't been 2362 * enabled in rte_eth_dev_configure(). 2363 */ 2364 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2365 local_conf.offloads) { 2366 RTE_ETHDEV_LOG(ERR, 2367 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2368 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2369 port_id, tx_queue_id, local_conf.offloads, 2370 dev_info.tx_queue_offload_capa, 2371 __func__); 2372 return -EINVAL; 2373 } 2374 2375 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2376 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2377 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2378 } 2379 2380 int 2381 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2382 uint16_t nb_tx_desc, 2383 const struct rte_eth_hairpin_conf *conf) 2384 { 2385 struct rte_eth_dev *dev; 2386 struct rte_eth_hairpin_cap cap; 2387 int i; 2388 int count; 2389 int ret; 2390 2391 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2392 dev = &rte_eth_devices[port_id]; 2393 2394 if (tx_queue_id >= dev->data->nb_tx_queues) { 2395 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2396 return -EINVAL; 2397 } 2398 2399 if (conf == NULL) { 2400 RTE_ETHDEV_LOG(ERR, 2401 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2402 port_id); 2403 return -EINVAL; 2404 } 2405 2406 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2407 if (ret != 0) 2408 return ret; 2409 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2410 -ENOTSUP); 2411 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2412 if (nb_tx_desc == 0) 2413 nb_tx_desc = cap.max_nb_desc; 2414 if (nb_tx_desc > cap.max_nb_desc) { 2415 RTE_ETHDEV_LOG(ERR, 2416 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2417 nb_tx_desc, cap.max_nb_desc); 2418 return -EINVAL; 2419 } 2420 if (conf->peer_count > cap.max_tx_2_rx) { 2421 RTE_ETHDEV_LOG(ERR, 2422 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2423 conf->peer_count, cap.max_tx_2_rx); 2424 return -EINVAL; 2425 } 2426 if (conf->peer_count == 0) { 2427 RTE_ETHDEV_LOG(ERR, 2428 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2429 conf->peer_count); 2430 return -EINVAL; 2431 } 2432 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2433 cap.max_nb_queues != UINT16_MAX; i++) { 2434 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2435 count++; 2436 } 2437 if (count > cap.max_nb_queues) { 2438 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2439 cap.max_nb_queues); 2440 return -EINVAL; 2441 } 2442 if (dev->data->dev_started) 2443 return -EBUSY; 2444 eth_dev_txq_release(dev, tx_queue_id); 2445 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2446 (dev, tx_queue_id, nb_tx_desc, conf); 2447 if (ret == 0) 2448 dev->data->tx_queue_state[tx_queue_id] = 2449 RTE_ETH_QUEUE_STATE_HAIRPIN; 2450 return eth_err(port_id, ret); 2451 } 2452 2453 int 2454 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2455 { 2456 struct rte_eth_dev *dev; 2457 int ret; 2458 2459 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2460 dev = &rte_eth_devices[tx_port]; 2461 2462 if (dev->data->dev_started == 0) { 2463 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2464 return -EBUSY; 2465 } 2466 2467 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2468 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2469 if (ret != 0) 2470 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2471 " to Rx %d (%d - all ports)\n", 2472 tx_port, rx_port, RTE_MAX_ETHPORTS); 2473 2474 return ret; 2475 } 2476 2477 int 2478 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2479 { 2480 struct rte_eth_dev *dev; 2481 int ret; 2482 2483 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2484 dev = &rte_eth_devices[tx_port]; 2485 2486 if (dev->data->dev_started == 0) { 2487 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2488 return -EBUSY; 2489 } 2490 2491 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2492 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2493 if (ret != 0) 2494 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2495 " from Rx %d (%d - all ports)\n", 2496 tx_port, rx_port, RTE_MAX_ETHPORTS); 2497 2498 return ret; 2499 } 2500 2501 int 2502 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2503 size_t len, uint32_t direction) 2504 { 2505 struct rte_eth_dev *dev; 2506 int ret; 2507 2508 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2509 dev = &rte_eth_devices[port_id]; 2510 2511 if (peer_ports == NULL) { 2512 RTE_ETHDEV_LOG(ERR, 2513 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2514 port_id); 2515 return -EINVAL; 2516 } 2517 2518 if (len == 0) { 2519 RTE_ETHDEV_LOG(ERR, 2520 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2521 port_id); 2522 return -EINVAL; 2523 } 2524 2525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2526 -ENOTSUP); 2527 2528 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2529 len, direction); 2530 if (ret < 0) 2531 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2532 port_id, direction ? "Rx" : "Tx"); 2533 2534 return ret; 2535 } 2536 2537 void 2538 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2539 void *userdata __rte_unused) 2540 { 2541 rte_pktmbuf_free_bulk(pkts, unsent); 2542 } 2543 2544 void 2545 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2546 void *userdata) 2547 { 2548 uint64_t *count = userdata; 2549 2550 rte_pktmbuf_free_bulk(pkts, unsent); 2551 *count += unsent; 2552 } 2553 2554 int 2555 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2556 buffer_tx_error_fn cbfn, void *userdata) 2557 { 2558 if (buffer == NULL) { 2559 RTE_ETHDEV_LOG(ERR, 2560 "Cannot set Tx buffer error callback to NULL buffer\n"); 2561 return -EINVAL; 2562 } 2563 2564 buffer->error_callback = cbfn; 2565 buffer->error_userdata = userdata; 2566 return 0; 2567 } 2568 2569 int 2570 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2571 { 2572 int ret = 0; 2573 2574 if (buffer == NULL) { 2575 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2576 return -EINVAL; 2577 } 2578 2579 buffer->size = size; 2580 if (buffer->error_callback == NULL) { 2581 ret = rte_eth_tx_buffer_set_err_callback( 2582 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2583 } 2584 2585 return ret; 2586 } 2587 2588 int 2589 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2590 { 2591 struct rte_eth_dev *dev; 2592 int ret; 2593 2594 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2595 dev = &rte_eth_devices[port_id]; 2596 2597 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2598 2599 /* Call driver to free pending mbufs. */ 2600 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2601 free_cnt); 2602 return eth_err(port_id, ret); 2603 } 2604 2605 int 2606 rte_eth_promiscuous_enable(uint16_t port_id) 2607 { 2608 struct rte_eth_dev *dev; 2609 int diag = 0; 2610 2611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2612 dev = &rte_eth_devices[port_id]; 2613 2614 if (dev->data->promiscuous == 1) 2615 return 0; 2616 2617 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2618 2619 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2620 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2621 2622 return eth_err(port_id, diag); 2623 } 2624 2625 int 2626 rte_eth_promiscuous_disable(uint16_t port_id) 2627 { 2628 struct rte_eth_dev *dev; 2629 int diag = 0; 2630 2631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2632 dev = &rte_eth_devices[port_id]; 2633 2634 if (dev->data->promiscuous == 0) 2635 return 0; 2636 2637 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2638 2639 dev->data->promiscuous = 0; 2640 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2641 if (diag != 0) 2642 dev->data->promiscuous = 1; 2643 2644 return eth_err(port_id, diag); 2645 } 2646 2647 int 2648 rte_eth_promiscuous_get(uint16_t port_id) 2649 { 2650 struct rte_eth_dev *dev; 2651 2652 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2653 dev = &rte_eth_devices[port_id]; 2654 2655 return dev->data->promiscuous; 2656 } 2657 2658 int 2659 rte_eth_allmulticast_enable(uint16_t port_id) 2660 { 2661 struct rte_eth_dev *dev; 2662 int diag; 2663 2664 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2665 dev = &rte_eth_devices[port_id]; 2666 2667 if (dev->data->all_multicast == 1) 2668 return 0; 2669 2670 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2671 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2672 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2673 2674 return eth_err(port_id, diag); 2675 } 2676 2677 int 2678 rte_eth_allmulticast_disable(uint16_t port_id) 2679 { 2680 struct rte_eth_dev *dev; 2681 int diag; 2682 2683 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2684 dev = &rte_eth_devices[port_id]; 2685 2686 if (dev->data->all_multicast == 0) 2687 return 0; 2688 2689 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2690 dev->data->all_multicast = 0; 2691 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2692 if (diag != 0) 2693 dev->data->all_multicast = 1; 2694 2695 return eth_err(port_id, diag); 2696 } 2697 2698 int 2699 rte_eth_allmulticast_get(uint16_t port_id) 2700 { 2701 struct rte_eth_dev *dev; 2702 2703 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2704 dev = &rte_eth_devices[port_id]; 2705 2706 return dev->data->all_multicast; 2707 } 2708 2709 int 2710 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2711 { 2712 struct rte_eth_dev *dev; 2713 2714 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2715 dev = &rte_eth_devices[port_id]; 2716 2717 if (eth_link == NULL) { 2718 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2719 port_id); 2720 return -EINVAL; 2721 } 2722 2723 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2724 rte_eth_linkstatus_get(dev, eth_link); 2725 else { 2726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2727 (*dev->dev_ops->link_update)(dev, 1); 2728 *eth_link = dev->data->dev_link; 2729 } 2730 2731 return 0; 2732 } 2733 2734 int 2735 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2736 { 2737 struct rte_eth_dev *dev; 2738 2739 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2740 dev = &rte_eth_devices[port_id]; 2741 2742 if (eth_link == NULL) { 2743 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2744 port_id); 2745 return -EINVAL; 2746 } 2747 2748 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2749 rte_eth_linkstatus_get(dev, eth_link); 2750 else { 2751 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2752 (*dev->dev_ops->link_update)(dev, 0); 2753 *eth_link = dev->data->dev_link; 2754 } 2755 2756 return 0; 2757 } 2758 2759 const char * 2760 rte_eth_link_speed_to_str(uint32_t link_speed) 2761 { 2762 switch (link_speed) { 2763 case ETH_SPEED_NUM_NONE: return "None"; 2764 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2765 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2766 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2767 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2768 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2769 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2770 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2771 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2772 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2773 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2774 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2775 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2776 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2777 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2778 default: return "Invalid"; 2779 } 2780 } 2781 2782 int 2783 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2784 { 2785 if (str == NULL) { 2786 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2787 return -EINVAL; 2788 } 2789 2790 if (len == 0) { 2791 RTE_ETHDEV_LOG(ERR, 2792 "Cannot convert link to string with zero size\n"); 2793 return -EINVAL; 2794 } 2795 2796 if (eth_link == NULL) { 2797 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2798 return -EINVAL; 2799 } 2800 2801 if (eth_link->link_status == ETH_LINK_DOWN) 2802 return snprintf(str, len, "Link down"); 2803 else 2804 return snprintf(str, len, "Link up at %s %s %s", 2805 rte_eth_link_speed_to_str(eth_link->link_speed), 2806 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2807 "FDX" : "HDX", 2808 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2809 "Autoneg" : "Fixed"); 2810 } 2811 2812 int 2813 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2814 { 2815 struct rte_eth_dev *dev; 2816 2817 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2818 dev = &rte_eth_devices[port_id]; 2819 2820 if (stats == NULL) { 2821 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2822 port_id); 2823 return -EINVAL; 2824 } 2825 2826 memset(stats, 0, sizeof(*stats)); 2827 2828 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2829 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2830 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2831 } 2832 2833 int 2834 rte_eth_stats_reset(uint16_t port_id) 2835 { 2836 struct rte_eth_dev *dev; 2837 int ret; 2838 2839 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2840 dev = &rte_eth_devices[port_id]; 2841 2842 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2843 ret = (*dev->dev_ops->stats_reset)(dev); 2844 if (ret != 0) 2845 return eth_err(port_id, ret); 2846 2847 dev->data->rx_mbuf_alloc_failed = 0; 2848 2849 return 0; 2850 } 2851 2852 static inline int 2853 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2854 { 2855 uint16_t nb_rxqs, nb_txqs; 2856 int count; 2857 2858 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2859 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2860 2861 count = RTE_NB_STATS; 2862 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2863 count += nb_rxqs * RTE_NB_RXQ_STATS; 2864 count += nb_txqs * RTE_NB_TXQ_STATS; 2865 } 2866 2867 return count; 2868 } 2869 2870 static int 2871 eth_dev_get_xstats_count(uint16_t port_id) 2872 { 2873 struct rte_eth_dev *dev; 2874 int count; 2875 2876 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2877 dev = &rte_eth_devices[port_id]; 2878 if (dev->dev_ops->xstats_get_names != NULL) { 2879 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2880 if (count < 0) 2881 return eth_err(port_id, count); 2882 } else 2883 count = 0; 2884 2885 2886 count += eth_dev_get_xstats_basic_count(dev); 2887 2888 return count; 2889 } 2890 2891 int 2892 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2893 uint64_t *id) 2894 { 2895 int cnt_xstats, idx_xstat; 2896 2897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2898 2899 if (xstat_name == NULL) { 2900 RTE_ETHDEV_LOG(ERR, 2901 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2902 port_id); 2903 return -ENOMEM; 2904 } 2905 2906 if (id == NULL) { 2907 RTE_ETHDEV_LOG(ERR, 2908 "Cannot get ethdev port %u xstats ID to NULL\n", 2909 port_id); 2910 return -ENOMEM; 2911 } 2912 2913 /* Get count */ 2914 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2915 if (cnt_xstats < 0) { 2916 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2917 return -ENODEV; 2918 } 2919 2920 /* Get id-name lookup table */ 2921 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2922 2923 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2924 port_id, xstats_names, cnt_xstats, NULL)) { 2925 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2926 return -1; 2927 } 2928 2929 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2930 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2931 *id = idx_xstat; 2932 return 0; 2933 }; 2934 } 2935 2936 return -EINVAL; 2937 } 2938 2939 /* retrieve basic stats names */ 2940 static int 2941 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2942 struct rte_eth_xstat_name *xstats_names) 2943 { 2944 int cnt_used_entries = 0; 2945 uint32_t idx, id_queue; 2946 uint16_t num_q; 2947 2948 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2949 strlcpy(xstats_names[cnt_used_entries].name, 2950 eth_dev_stats_strings[idx].name, 2951 sizeof(xstats_names[0].name)); 2952 cnt_used_entries++; 2953 } 2954 2955 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2956 return cnt_used_entries; 2957 2958 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2959 for (id_queue = 0; id_queue < num_q; id_queue++) { 2960 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2961 snprintf(xstats_names[cnt_used_entries].name, 2962 sizeof(xstats_names[0].name), 2963 "rx_q%u_%s", 2964 id_queue, eth_dev_rxq_stats_strings[idx].name); 2965 cnt_used_entries++; 2966 } 2967 2968 } 2969 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2970 for (id_queue = 0; id_queue < num_q; id_queue++) { 2971 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2972 snprintf(xstats_names[cnt_used_entries].name, 2973 sizeof(xstats_names[0].name), 2974 "tx_q%u_%s", 2975 id_queue, eth_dev_txq_stats_strings[idx].name); 2976 cnt_used_entries++; 2977 } 2978 } 2979 return cnt_used_entries; 2980 } 2981 2982 /* retrieve ethdev extended statistics names */ 2983 int 2984 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2985 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2986 uint64_t *ids) 2987 { 2988 struct rte_eth_xstat_name *xstats_names_copy; 2989 unsigned int no_basic_stat_requested = 1; 2990 unsigned int no_ext_stat_requested = 1; 2991 unsigned int expected_entries; 2992 unsigned int basic_count; 2993 struct rte_eth_dev *dev; 2994 unsigned int i; 2995 int ret; 2996 2997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2998 dev = &rte_eth_devices[port_id]; 2999 3000 basic_count = eth_dev_get_xstats_basic_count(dev); 3001 ret = eth_dev_get_xstats_count(port_id); 3002 if (ret < 0) 3003 return ret; 3004 expected_entries = (unsigned int)ret; 3005 3006 /* Return max number of stats if no ids given */ 3007 if (!ids) { 3008 if (!xstats_names) 3009 return expected_entries; 3010 else if (xstats_names && size < expected_entries) 3011 return expected_entries; 3012 } 3013 3014 if (ids && !xstats_names) 3015 return -EINVAL; 3016 3017 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3018 uint64_t ids_copy[size]; 3019 3020 for (i = 0; i < size; i++) { 3021 if (ids[i] < basic_count) { 3022 no_basic_stat_requested = 0; 3023 break; 3024 } 3025 3026 /* 3027 * Convert ids to xstats ids that PMD knows. 3028 * ids known by user are basic + extended stats. 3029 */ 3030 ids_copy[i] = ids[i] - basic_count; 3031 } 3032 3033 if (no_basic_stat_requested) 3034 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3035 ids_copy, xstats_names, size); 3036 } 3037 3038 /* Retrieve all stats */ 3039 if (!ids) { 3040 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3041 expected_entries); 3042 if (num_stats < 0 || num_stats > (int)expected_entries) 3043 return num_stats; 3044 else 3045 return expected_entries; 3046 } 3047 3048 xstats_names_copy = calloc(expected_entries, 3049 sizeof(struct rte_eth_xstat_name)); 3050 3051 if (!xstats_names_copy) { 3052 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3053 return -ENOMEM; 3054 } 3055 3056 if (ids) { 3057 for (i = 0; i < size; i++) { 3058 if (ids[i] >= basic_count) { 3059 no_ext_stat_requested = 0; 3060 break; 3061 } 3062 } 3063 } 3064 3065 /* Fill xstats_names_copy structure */ 3066 if (ids && no_ext_stat_requested) { 3067 eth_basic_stats_get_names(dev, xstats_names_copy); 3068 } else { 3069 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3070 expected_entries); 3071 if (ret < 0) { 3072 free(xstats_names_copy); 3073 return ret; 3074 } 3075 } 3076 3077 /* Filter stats */ 3078 for (i = 0; i < size; i++) { 3079 if (ids[i] >= expected_entries) { 3080 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3081 free(xstats_names_copy); 3082 return -1; 3083 } 3084 xstats_names[i] = xstats_names_copy[ids[i]]; 3085 } 3086 3087 free(xstats_names_copy); 3088 return size; 3089 } 3090 3091 int 3092 rte_eth_xstats_get_names(uint16_t port_id, 3093 struct rte_eth_xstat_name *xstats_names, 3094 unsigned int size) 3095 { 3096 struct rte_eth_dev *dev; 3097 int cnt_used_entries; 3098 int cnt_expected_entries; 3099 int cnt_driver_entries; 3100 3101 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3102 if (xstats_names == NULL || cnt_expected_entries < 0 || 3103 (int)size < cnt_expected_entries) 3104 return cnt_expected_entries; 3105 3106 /* port_id checked in eth_dev_get_xstats_count() */ 3107 dev = &rte_eth_devices[port_id]; 3108 3109 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3110 3111 if (dev->dev_ops->xstats_get_names != NULL) { 3112 /* If there are any driver-specific xstats, append them 3113 * to end of list. 3114 */ 3115 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3116 dev, 3117 xstats_names + cnt_used_entries, 3118 size - cnt_used_entries); 3119 if (cnt_driver_entries < 0) 3120 return eth_err(port_id, cnt_driver_entries); 3121 cnt_used_entries += cnt_driver_entries; 3122 } 3123 3124 return cnt_used_entries; 3125 } 3126 3127 3128 static int 3129 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3130 { 3131 struct rte_eth_dev *dev; 3132 struct rte_eth_stats eth_stats; 3133 unsigned int count = 0, i, q; 3134 uint64_t val, *stats_ptr; 3135 uint16_t nb_rxqs, nb_txqs; 3136 int ret; 3137 3138 ret = rte_eth_stats_get(port_id, ð_stats); 3139 if (ret < 0) 3140 return ret; 3141 3142 dev = &rte_eth_devices[port_id]; 3143 3144 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3145 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3146 3147 /* global stats */ 3148 for (i = 0; i < RTE_NB_STATS; i++) { 3149 stats_ptr = RTE_PTR_ADD(ð_stats, 3150 eth_dev_stats_strings[i].offset); 3151 val = *stats_ptr; 3152 xstats[count++].value = val; 3153 } 3154 3155 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3156 return count; 3157 3158 /* per-rxq stats */ 3159 for (q = 0; q < nb_rxqs; q++) { 3160 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3161 stats_ptr = RTE_PTR_ADD(ð_stats, 3162 eth_dev_rxq_stats_strings[i].offset + 3163 q * sizeof(uint64_t)); 3164 val = *stats_ptr; 3165 xstats[count++].value = val; 3166 } 3167 } 3168 3169 /* per-txq stats */ 3170 for (q = 0; q < nb_txqs; q++) { 3171 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3172 stats_ptr = RTE_PTR_ADD(ð_stats, 3173 eth_dev_txq_stats_strings[i].offset + 3174 q * sizeof(uint64_t)); 3175 val = *stats_ptr; 3176 xstats[count++].value = val; 3177 } 3178 } 3179 return count; 3180 } 3181 3182 /* retrieve ethdev extended statistics */ 3183 int 3184 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3185 uint64_t *values, unsigned int size) 3186 { 3187 unsigned int no_basic_stat_requested = 1; 3188 unsigned int no_ext_stat_requested = 1; 3189 unsigned int num_xstats_filled; 3190 unsigned int basic_count; 3191 uint16_t expected_entries; 3192 struct rte_eth_dev *dev; 3193 unsigned int i; 3194 int ret; 3195 3196 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3197 dev = &rte_eth_devices[port_id]; 3198 3199 ret = eth_dev_get_xstats_count(port_id); 3200 if (ret < 0) 3201 return ret; 3202 expected_entries = (uint16_t)ret; 3203 struct rte_eth_xstat xstats[expected_entries]; 3204 basic_count = eth_dev_get_xstats_basic_count(dev); 3205 3206 /* Return max number of stats if no ids given */ 3207 if (!ids) { 3208 if (!values) 3209 return expected_entries; 3210 else if (values && size < expected_entries) 3211 return expected_entries; 3212 } 3213 3214 if (ids && !values) 3215 return -EINVAL; 3216 3217 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3218 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3219 uint64_t ids_copy[size]; 3220 3221 for (i = 0; i < size; i++) { 3222 if (ids[i] < basic_count) { 3223 no_basic_stat_requested = 0; 3224 break; 3225 } 3226 3227 /* 3228 * Convert ids to xstats ids that PMD knows. 3229 * ids known by user are basic + extended stats. 3230 */ 3231 ids_copy[i] = ids[i] - basic_count; 3232 } 3233 3234 if (no_basic_stat_requested) 3235 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3236 values, size); 3237 } 3238 3239 if (ids) { 3240 for (i = 0; i < size; i++) { 3241 if (ids[i] >= basic_count) { 3242 no_ext_stat_requested = 0; 3243 break; 3244 } 3245 } 3246 } 3247 3248 /* Fill the xstats structure */ 3249 if (ids && no_ext_stat_requested) 3250 ret = eth_basic_stats_get(port_id, xstats); 3251 else 3252 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3253 3254 if (ret < 0) 3255 return ret; 3256 num_xstats_filled = (unsigned int)ret; 3257 3258 /* Return all stats */ 3259 if (!ids) { 3260 for (i = 0; i < num_xstats_filled; i++) 3261 values[i] = xstats[i].value; 3262 return expected_entries; 3263 } 3264 3265 /* Filter stats */ 3266 for (i = 0; i < size; i++) { 3267 if (ids[i] >= expected_entries) { 3268 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3269 return -1; 3270 } 3271 values[i] = xstats[ids[i]].value; 3272 } 3273 return size; 3274 } 3275 3276 int 3277 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3278 unsigned int n) 3279 { 3280 struct rte_eth_dev *dev; 3281 unsigned int count = 0, i; 3282 signed int xcount = 0; 3283 uint16_t nb_rxqs, nb_txqs; 3284 int ret; 3285 3286 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3287 dev = &rte_eth_devices[port_id]; 3288 3289 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3290 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3291 3292 /* Return generic statistics */ 3293 count = RTE_NB_STATS; 3294 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3295 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3296 3297 /* implemented by the driver */ 3298 if (dev->dev_ops->xstats_get != NULL) { 3299 /* Retrieve the xstats from the driver at the end of the 3300 * xstats struct. 3301 */ 3302 xcount = (*dev->dev_ops->xstats_get)(dev, 3303 xstats ? xstats + count : NULL, 3304 (n > count) ? n - count : 0); 3305 3306 if (xcount < 0) 3307 return eth_err(port_id, xcount); 3308 } 3309 3310 if (n < count + xcount || xstats == NULL) 3311 return count + xcount; 3312 3313 /* now fill the xstats structure */ 3314 ret = eth_basic_stats_get(port_id, xstats); 3315 if (ret < 0) 3316 return ret; 3317 count = ret; 3318 3319 for (i = 0; i < count; i++) 3320 xstats[i].id = i; 3321 /* add an offset to driver-specific stats */ 3322 for ( ; i < count + xcount; i++) 3323 xstats[i].id += count; 3324 3325 return count + xcount; 3326 } 3327 3328 /* reset ethdev extended statistics */ 3329 int 3330 rte_eth_xstats_reset(uint16_t port_id) 3331 { 3332 struct rte_eth_dev *dev; 3333 3334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3335 dev = &rte_eth_devices[port_id]; 3336 3337 /* implemented by the driver */ 3338 if (dev->dev_ops->xstats_reset != NULL) 3339 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3340 3341 /* fallback to default */ 3342 return rte_eth_stats_reset(port_id); 3343 } 3344 3345 static int 3346 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3347 uint8_t stat_idx, uint8_t is_rx) 3348 { 3349 struct rte_eth_dev *dev; 3350 3351 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3352 dev = &rte_eth_devices[port_id]; 3353 3354 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3355 return -EINVAL; 3356 3357 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3358 return -EINVAL; 3359 3360 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3361 return -EINVAL; 3362 3363 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3364 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3365 } 3366 3367 int 3368 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3369 uint8_t stat_idx) 3370 { 3371 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3372 tx_queue_id, 3373 stat_idx, STAT_QMAP_TX)); 3374 } 3375 3376 int 3377 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3378 uint8_t stat_idx) 3379 { 3380 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3381 rx_queue_id, 3382 stat_idx, STAT_QMAP_RX)); 3383 } 3384 3385 int 3386 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3387 { 3388 struct rte_eth_dev *dev; 3389 3390 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3391 dev = &rte_eth_devices[port_id]; 3392 3393 if (fw_version == NULL && fw_size > 0) { 3394 RTE_ETHDEV_LOG(ERR, 3395 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3396 port_id); 3397 return -EINVAL; 3398 } 3399 3400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3401 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3402 fw_version, fw_size)); 3403 } 3404 3405 int 3406 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3407 { 3408 struct rte_eth_dev *dev; 3409 const struct rte_eth_desc_lim lim = { 3410 .nb_max = UINT16_MAX, 3411 .nb_min = 0, 3412 .nb_align = 1, 3413 .nb_seg_max = UINT16_MAX, 3414 .nb_mtu_seg_max = UINT16_MAX, 3415 }; 3416 int diag; 3417 3418 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3419 dev = &rte_eth_devices[port_id]; 3420 3421 if (dev_info == NULL) { 3422 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3423 port_id); 3424 return -EINVAL; 3425 } 3426 3427 /* 3428 * Init dev_info before port_id check since caller does not have 3429 * return status and does not know if get is successful or not. 3430 */ 3431 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3432 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3433 3434 dev_info->rx_desc_lim = lim; 3435 dev_info->tx_desc_lim = lim; 3436 dev_info->device = dev->device; 3437 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3438 RTE_ETHER_CRC_LEN; 3439 dev_info->max_mtu = UINT16_MAX; 3440 3441 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3442 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3443 if (diag != 0) { 3444 /* Cleanup already filled in device information */ 3445 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3446 return eth_err(port_id, diag); 3447 } 3448 3449 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3450 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3451 RTE_MAX_QUEUES_PER_PORT); 3452 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3453 RTE_MAX_QUEUES_PER_PORT); 3454 3455 dev_info->driver_name = dev->device->driver->name; 3456 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3457 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3458 3459 dev_info->dev_flags = &dev->data->dev_flags; 3460 3461 return 0; 3462 } 3463 3464 int 3465 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3466 { 3467 struct rte_eth_dev *dev; 3468 3469 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3470 dev = &rte_eth_devices[port_id]; 3471 3472 if (dev_conf == NULL) { 3473 RTE_ETHDEV_LOG(ERR, 3474 "Cannot get ethdev port %u configuration to NULL\n", 3475 port_id); 3476 return -EINVAL; 3477 } 3478 3479 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3480 3481 return 0; 3482 } 3483 3484 int 3485 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3486 uint32_t *ptypes, int num) 3487 { 3488 int i, j; 3489 struct rte_eth_dev *dev; 3490 const uint32_t *all_ptypes; 3491 3492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3493 dev = &rte_eth_devices[port_id]; 3494 3495 if (ptypes == NULL && num > 0) { 3496 RTE_ETHDEV_LOG(ERR, 3497 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3498 port_id); 3499 return -EINVAL; 3500 } 3501 3502 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3503 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3504 3505 if (!all_ptypes) 3506 return 0; 3507 3508 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3509 if (all_ptypes[i] & ptype_mask) { 3510 if (j < num) 3511 ptypes[j] = all_ptypes[i]; 3512 j++; 3513 } 3514 3515 return j; 3516 } 3517 3518 int 3519 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3520 uint32_t *set_ptypes, unsigned int num) 3521 { 3522 const uint32_t valid_ptype_masks[] = { 3523 RTE_PTYPE_L2_MASK, 3524 RTE_PTYPE_L3_MASK, 3525 RTE_PTYPE_L4_MASK, 3526 RTE_PTYPE_TUNNEL_MASK, 3527 RTE_PTYPE_INNER_L2_MASK, 3528 RTE_PTYPE_INNER_L3_MASK, 3529 RTE_PTYPE_INNER_L4_MASK, 3530 }; 3531 const uint32_t *all_ptypes; 3532 struct rte_eth_dev *dev; 3533 uint32_t unused_mask; 3534 unsigned int i, j; 3535 int ret; 3536 3537 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3538 dev = &rte_eth_devices[port_id]; 3539 3540 if (num > 0 && set_ptypes == NULL) { 3541 RTE_ETHDEV_LOG(ERR, 3542 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3543 port_id); 3544 return -EINVAL; 3545 } 3546 3547 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3548 *dev->dev_ops->dev_ptypes_set == NULL) { 3549 ret = 0; 3550 goto ptype_unknown; 3551 } 3552 3553 if (ptype_mask == 0) { 3554 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3555 ptype_mask); 3556 goto ptype_unknown; 3557 } 3558 3559 unused_mask = ptype_mask; 3560 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3561 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3562 if (mask && mask != valid_ptype_masks[i]) { 3563 ret = -EINVAL; 3564 goto ptype_unknown; 3565 } 3566 unused_mask &= ~valid_ptype_masks[i]; 3567 } 3568 3569 if (unused_mask) { 3570 ret = -EINVAL; 3571 goto ptype_unknown; 3572 } 3573 3574 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3575 if (all_ptypes == NULL) { 3576 ret = 0; 3577 goto ptype_unknown; 3578 } 3579 3580 /* 3581 * Accommodate as many set_ptypes as possible. If the supplied 3582 * set_ptypes array is insufficient fill it partially. 3583 */ 3584 for (i = 0, j = 0; set_ptypes != NULL && 3585 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3586 if (ptype_mask & all_ptypes[i]) { 3587 if (j < num - 1) { 3588 set_ptypes[j] = all_ptypes[i]; 3589 j++; 3590 continue; 3591 } 3592 break; 3593 } 3594 } 3595 3596 if (set_ptypes != NULL && j < num) 3597 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3598 3599 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3600 3601 ptype_unknown: 3602 if (num > 0) 3603 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3604 3605 return ret; 3606 } 3607 3608 int 3609 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3610 unsigned int num) 3611 { 3612 int32_t ret; 3613 struct rte_eth_dev *dev; 3614 struct rte_eth_dev_info dev_info; 3615 3616 if (ma == NULL) { 3617 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3618 return -EINVAL; 3619 } 3620 3621 /* will check for us that port_id is a valid one */ 3622 ret = rte_eth_dev_info_get(port_id, &dev_info); 3623 if (ret != 0) 3624 return ret; 3625 3626 dev = &rte_eth_devices[port_id]; 3627 num = RTE_MIN(dev_info.max_mac_addrs, num); 3628 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3629 3630 return num; 3631 } 3632 3633 int 3634 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3635 { 3636 struct rte_eth_dev *dev; 3637 3638 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3639 dev = &rte_eth_devices[port_id]; 3640 3641 if (mac_addr == NULL) { 3642 RTE_ETHDEV_LOG(ERR, 3643 "Cannot get ethdev port %u MAC address to NULL\n", 3644 port_id); 3645 return -EINVAL; 3646 } 3647 3648 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3649 3650 return 0; 3651 } 3652 3653 int 3654 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3655 { 3656 struct rte_eth_dev *dev; 3657 3658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3659 dev = &rte_eth_devices[port_id]; 3660 3661 if (mtu == NULL) { 3662 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3663 port_id); 3664 return -EINVAL; 3665 } 3666 3667 *mtu = dev->data->mtu; 3668 return 0; 3669 } 3670 3671 int 3672 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3673 { 3674 int ret; 3675 struct rte_eth_dev_info dev_info; 3676 struct rte_eth_dev *dev; 3677 3678 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3679 dev = &rte_eth_devices[port_id]; 3680 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3681 3682 /* 3683 * Check if the device supports dev_infos_get, if it does not 3684 * skip min_mtu/max_mtu validation here as this requires values 3685 * that are populated within the call to rte_eth_dev_info_get() 3686 * which relies on dev->dev_ops->dev_infos_get. 3687 */ 3688 if (*dev->dev_ops->dev_infos_get != NULL) { 3689 ret = rte_eth_dev_info_get(port_id, &dev_info); 3690 if (ret != 0) 3691 return ret; 3692 3693 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3694 if (ret != 0) 3695 return ret; 3696 } 3697 3698 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3699 if (ret == 0) 3700 dev->data->mtu = mtu; 3701 3702 return eth_err(port_id, ret); 3703 } 3704 3705 int 3706 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3707 { 3708 struct rte_eth_dev *dev; 3709 int ret; 3710 3711 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3712 dev = &rte_eth_devices[port_id]; 3713 3714 if (!(dev->data->dev_conf.rxmode.offloads & 3715 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3716 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3717 port_id); 3718 return -ENOSYS; 3719 } 3720 3721 if (vlan_id > 4095) { 3722 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3723 port_id, vlan_id); 3724 return -EINVAL; 3725 } 3726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3727 3728 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3729 if (ret == 0) { 3730 struct rte_vlan_filter_conf *vfc; 3731 int vidx; 3732 int vbit; 3733 3734 vfc = &dev->data->vlan_filter_conf; 3735 vidx = vlan_id / 64; 3736 vbit = vlan_id % 64; 3737 3738 if (on) 3739 vfc->ids[vidx] |= RTE_BIT64(vbit); 3740 else 3741 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3742 } 3743 3744 return eth_err(port_id, ret); 3745 } 3746 3747 int 3748 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3749 int on) 3750 { 3751 struct rte_eth_dev *dev; 3752 3753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3754 dev = &rte_eth_devices[port_id]; 3755 3756 if (rx_queue_id >= dev->data->nb_rx_queues) { 3757 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3758 return -EINVAL; 3759 } 3760 3761 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3762 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3763 3764 return 0; 3765 } 3766 3767 int 3768 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3769 enum rte_vlan_type vlan_type, 3770 uint16_t tpid) 3771 { 3772 struct rte_eth_dev *dev; 3773 3774 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3775 dev = &rte_eth_devices[port_id]; 3776 3777 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3778 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3779 tpid)); 3780 } 3781 3782 int 3783 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3784 { 3785 struct rte_eth_dev_info dev_info; 3786 struct rte_eth_dev *dev; 3787 int ret = 0; 3788 int mask = 0; 3789 int cur, org = 0; 3790 uint64_t orig_offloads; 3791 uint64_t dev_offloads; 3792 uint64_t new_offloads; 3793 3794 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3795 dev = &rte_eth_devices[port_id]; 3796 3797 /* save original values in case of failure */ 3798 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3799 dev_offloads = orig_offloads; 3800 3801 /* check which option changed by application */ 3802 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3803 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3804 if (cur != org) { 3805 if (cur) 3806 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3807 else 3808 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3809 mask |= ETH_VLAN_STRIP_MASK; 3810 } 3811 3812 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3813 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3814 if (cur != org) { 3815 if (cur) 3816 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3817 else 3818 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3819 mask |= ETH_VLAN_FILTER_MASK; 3820 } 3821 3822 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3823 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3824 if (cur != org) { 3825 if (cur) 3826 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3827 else 3828 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3829 mask |= ETH_VLAN_EXTEND_MASK; 3830 } 3831 3832 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3833 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3834 if (cur != org) { 3835 if (cur) 3836 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3837 else 3838 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3839 mask |= ETH_QINQ_STRIP_MASK; 3840 } 3841 3842 /*no change*/ 3843 if (mask == 0) 3844 return ret; 3845 3846 ret = rte_eth_dev_info_get(port_id, &dev_info); 3847 if (ret != 0) 3848 return ret; 3849 3850 /* Rx VLAN offloading must be within its device capabilities */ 3851 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3852 new_offloads = dev_offloads & ~orig_offloads; 3853 RTE_ETHDEV_LOG(ERR, 3854 "Ethdev port_id=%u requested new added VLAN offloads " 3855 "0x%" PRIx64 " must be within Rx offloads capabilities " 3856 "0x%" PRIx64 " in %s()\n", 3857 port_id, new_offloads, dev_info.rx_offload_capa, 3858 __func__); 3859 return -EINVAL; 3860 } 3861 3862 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3863 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3864 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3865 if (ret) { 3866 /* hit an error restore original values */ 3867 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3868 } 3869 3870 return eth_err(port_id, ret); 3871 } 3872 3873 int 3874 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3875 { 3876 struct rte_eth_dev *dev; 3877 uint64_t *dev_offloads; 3878 int ret = 0; 3879 3880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3881 dev = &rte_eth_devices[port_id]; 3882 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3883 3884 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3885 ret |= ETH_VLAN_STRIP_OFFLOAD; 3886 3887 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3888 ret |= ETH_VLAN_FILTER_OFFLOAD; 3889 3890 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3891 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3892 3893 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3894 ret |= ETH_QINQ_STRIP_OFFLOAD; 3895 3896 return ret; 3897 } 3898 3899 int 3900 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3901 { 3902 struct rte_eth_dev *dev; 3903 3904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3905 dev = &rte_eth_devices[port_id]; 3906 3907 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3908 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3909 } 3910 3911 int 3912 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3913 { 3914 struct rte_eth_dev *dev; 3915 3916 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3917 dev = &rte_eth_devices[port_id]; 3918 3919 if (fc_conf == NULL) { 3920 RTE_ETHDEV_LOG(ERR, 3921 "Cannot get ethdev port %u flow control config to NULL\n", 3922 port_id); 3923 return -EINVAL; 3924 } 3925 3926 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3927 memset(fc_conf, 0, sizeof(*fc_conf)); 3928 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3929 } 3930 3931 int 3932 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3933 { 3934 struct rte_eth_dev *dev; 3935 3936 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3937 dev = &rte_eth_devices[port_id]; 3938 3939 if (fc_conf == NULL) { 3940 RTE_ETHDEV_LOG(ERR, 3941 "Cannot set ethdev port %u flow control from NULL config\n", 3942 port_id); 3943 return -EINVAL; 3944 } 3945 3946 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3947 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3948 return -EINVAL; 3949 } 3950 3951 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3952 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3953 } 3954 3955 int 3956 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3957 struct rte_eth_pfc_conf *pfc_conf) 3958 { 3959 struct rte_eth_dev *dev; 3960 3961 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3962 dev = &rte_eth_devices[port_id]; 3963 3964 if (pfc_conf == NULL) { 3965 RTE_ETHDEV_LOG(ERR, 3966 "Cannot set ethdev port %u priority flow control from NULL config\n", 3967 port_id); 3968 return -EINVAL; 3969 } 3970 3971 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3972 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3973 return -EINVAL; 3974 } 3975 3976 /* High water, low water validation are device specific */ 3977 if (*dev->dev_ops->priority_flow_ctrl_set) 3978 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3979 (dev, pfc_conf)); 3980 return -ENOTSUP; 3981 } 3982 3983 static int 3984 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3985 uint16_t reta_size) 3986 { 3987 uint16_t i, num; 3988 3989 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3990 for (i = 0; i < num; i++) { 3991 if (reta_conf[i].mask) 3992 return 0; 3993 } 3994 3995 return -EINVAL; 3996 } 3997 3998 static int 3999 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4000 uint16_t reta_size, 4001 uint16_t max_rxq) 4002 { 4003 uint16_t i, idx, shift; 4004 4005 if (max_rxq == 0) { 4006 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4007 return -EINVAL; 4008 } 4009 4010 for (i = 0; i < reta_size; i++) { 4011 idx = i / RTE_RETA_GROUP_SIZE; 4012 shift = i % RTE_RETA_GROUP_SIZE; 4013 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4014 (reta_conf[idx].reta[shift] >= max_rxq)) { 4015 RTE_ETHDEV_LOG(ERR, 4016 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4017 idx, shift, 4018 reta_conf[idx].reta[shift], max_rxq); 4019 return -EINVAL; 4020 } 4021 } 4022 4023 return 0; 4024 } 4025 4026 int 4027 rte_eth_dev_rss_reta_update(uint16_t port_id, 4028 struct rte_eth_rss_reta_entry64 *reta_conf, 4029 uint16_t reta_size) 4030 { 4031 struct rte_eth_dev *dev; 4032 int ret; 4033 4034 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4035 dev = &rte_eth_devices[port_id]; 4036 4037 if (reta_conf == NULL) { 4038 RTE_ETHDEV_LOG(ERR, 4039 "Cannot update ethdev port %u RSS RETA to NULL\n", 4040 port_id); 4041 return -EINVAL; 4042 } 4043 4044 if (reta_size == 0) { 4045 RTE_ETHDEV_LOG(ERR, 4046 "Cannot update ethdev port %u RSS RETA with zero size\n", 4047 port_id); 4048 return -EINVAL; 4049 } 4050 4051 /* Check mask bits */ 4052 ret = eth_check_reta_mask(reta_conf, reta_size); 4053 if (ret < 0) 4054 return ret; 4055 4056 /* Check entry value */ 4057 ret = eth_check_reta_entry(reta_conf, reta_size, 4058 dev->data->nb_rx_queues); 4059 if (ret < 0) 4060 return ret; 4061 4062 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4063 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4064 reta_size)); 4065 } 4066 4067 int 4068 rte_eth_dev_rss_reta_query(uint16_t port_id, 4069 struct rte_eth_rss_reta_entry64 *reta_conf, 4070 uint16_t reta_size) 4071 { 4072 struct rte_eth_dev *dev; 4073 int ret; 4074 4075 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4076 dev = &rte_eth_devices[port_id]; 4077 4078 if (reta_conf == NULL) { 4079 RTE_ETHDEV_LOG(ERR, 4080 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4081 port_id); 4082 return -EINVAL; 4083 } 4084 4085 /* Check mask bits */ 4086 ret = eth_check_reta_mask(reta_conf, reta_size); 4087 if (ret < 0) 4088 return ret; 4089 4090 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4091 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4092 reta_size)); 4093 } 4094 4095 int 4096 rte_eth_dev_rss_hash_update(uint16_t port_id, 4097 struct rte_eth_rss_conf *rss_conf) 4098 { 4099 struct rte_eth_dev *dev; 4100 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4101 int ret; 4102 4103 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4104 dev = &rte_eth_devices[port_id]; 4105 4106 if (rss_conf == NULL) { 4107 RTE_ETHDEV_LOG(ERR, 4108 "Cannot update ethdev port %u RSS hash from NULL config\n", 4109 port_id); 4110 return -EINVAL; 4111 } 4112 4113 ret = rte_eth_dev_info_get(port_id, &dev_info); 4114 if (ret != 0) 4115 return ret; 4116 4117 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4118 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4119 dev_info.flow_type_rss_offloads) { 4120 RTE_ETHDEV_LOG(ERR, 4121 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4122 port_id, rss_conf->rss_hf, 4123 dev_info.flow_type_rss_offloads); 4124 return -EINVAL; 4125 } 4126 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4127 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4128 rss_conf)); 4129 } 4130 4131 int 4132 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4133 struct rte_eth_rss_conf *rss_conf) 4134 { 4135 struct rte_eth_dev *dev; 4136 4137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4138 dev = &rte_eth_devices[port_id]; 4139 4140 if (rss_conf == NULL) { 4141 RTE_ETHDEV_LOG(ERR, 4142 "Cannot get ethdev port %u RSS hash config to NULL\n", 4143 port_id); 4144 return -EINVAL; 4145 } 4146 4147 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4148 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4149 rss_conf)); 4150 } 4151 4152 int 4153 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4154 struct rte_eth_udp_tunnel *udp_tunnel) 4155 { 4156 struct rte_eth_dev *dev; 4157 4158 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4159 dev = &rte_eth_devices[port_id]; 4160 4161 if (udp_tunnel == NULL) { 4162 RTE_ETHDEV_LOG(ERR, 4163 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4164 port_id); 4165 return -EINVAL; 4166 } 4167 4168 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4169 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4170 return -EINVAL; 4171 } 4172 4173 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4174 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4175 udp_tunnel)); 4176 } 4177 4178 int 4179 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4180 struct rte_eth_udp_tunnel *udp_tunnel) 4181 { 4182 struct rte_eth_dev *dev; 4183 4184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4185 dev = &rte_eth_devices[port_id]; 4186 4187 if (udp_tunnel == NULL) { 4188 RTE_ETHDEV_LOG(ERR, 4189 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4190 port_id); 4191 return -EINVAL; 4192 } 4193 4194 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4195 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4196 return -EINVAL; 4197 } 4198 4199 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4200 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4201 udp_tunnel)); 4202 } 4203 4204 int 4205 rte_eth_led_on(uint16_t port_id) 4206 { 4207 struct rte_eth_dev *dev; 4208 4209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4210 dev = &rte_eth_devices[port_id]; 4211 4212 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4213 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4214 } 4215 4216 int 4217 rte_eth_led_off(uint16_t port_id) 4218 { 4219 struct rte_eth_dev *dev; 4220 4221 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4222 dev = &rte_eth_devices[port_id]; 4223 4224 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4225 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4226 } 4227 4228 int 4229 rte_eth_fec_get_capability(uint16_t port_id, 4230 struct rte_eth_fec_capa *speed_fec_capa, 4231 unsigned int num) 4232 { 4233 struct rte_eth_dev *dev; 4234 int ret; 4235 4236 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4237 dev = &rte_eth_devices[port_id]; 4238 4239 if (speed_fec_capa == NULL && num > 0) { 4240 RTE_ETHDEV_LOG(ERR, 4241 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4242 port_id); 4243 return -EINVAL; 4244 } 4245 4246 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4247 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4248 4249 return ret; 4250 } 4251 4252 int 4253 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4254 { 4255 struct rte_eth_dev *dev; 4256 4257 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4258 dev = &rte_eth_devices[port_id]; 4259 4260 if (fec_capa == NULL) { 4261 RTE_ETHDEV_LOG(ERR, 4262 "Cannot get ethdev port %u current FEC mode to NULL\n", 4263 port_id); 4264 return -EINVAL; 4265 } 4266 4267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4268 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4269 } 4270 4271 int 4272 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4273 { 4274 struct rte_eth_dev *dev; 4275 4276 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4277 dev = &rte_eth_devices[port_id]; 4278 4279 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4280 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4281 } 4282 4283 /* 4284 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4285 * an empty spot. 4286 */ 4287 static int 4288 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4289 { 4290 struct rte_eth_dev_info dev_info; 4291 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4292 unsigned i; 4293 int ret; 4294 4295 ret = rte_eth_dev_info_get(port_id, &dev_info); 4296 if (ret != 0) 4297 return -1; 4298 4299 for (i = 0; i < dev_info.max_mac_addrs; i++) 4300 if (memcmp(addr, &dev->data->mac_addrs[i], 4301 RTE_ETHER_ADDR_LEN) == 0) 4302 return i; 4303 4304 return -1; 4305 } 4306 4307 static const struct rte_ether_addr null_mac_addr; 4308 4309 int 4310 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4311 uint32_t pool) 4312 { 4313 struct rte_eth_dev *dev; 4314 int index; 4315 uint64_t pool_mask; 4316 int ret; 4317 4318 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4319 dev = &rte_eth_devices[port_id]; 4320 4321 if (addr == NULL) { 4322 RTE_ETHDEV_LOG(ERR, 4323 "Cannot add ethdev port %u MAC address from NULL address\n", 4324 port_id); 4325 return -EINVAL; 4326 } 4327 4328 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4329 4330 if (rte_is_zero_ether_addr(addr)) { 4331 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4332 port_id); 4333 return -EINVAL; 4334 } 4335 if (pool >= ETH_64_POOLS) { 4336 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", ETH_64_POOLS - 1); 4337 return -EINVAL; 4338 } 4339 4340 index = eth_dev_get_mac_addr_index(port_id, addr); 4341 if (index < 0) { 4342 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4343 if (index < 0) { 4344 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4345 port_id); 4346 return -ENOSPC; 4347 } 4348 } else { 4349 pool_mask = dev->data->mac_pool_sel[index]; 4350 4351 /* Check if both MAC address and pool is already there, and do nothing */ 4352 if (pool_mask & RTE_BIT64(pool)) 4353 return 0; 4354 } 4355 4356 /* Update NIC */ 4357 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4358 4359 if (ret == 0) { 4360 /* Update address in NIC data structure */ 4361 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4362 4363 /* Update pool bitmap in NIC data structure */ 4364 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4365 } 4366 4367 return eth_err(port_id, ret); 4368 } 4369 4370 int 4371 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4372 { 4373 struct rte_eth_dev *dev; 4374 int index; 4375 4376 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4377 dev = &rte_eth_devices[port_id]; 4378 4379 if (addr == NULL) { 4380 RTE_ETHDEV_LOG(ERR, 4381 "Cannot remove ethdev port %u MAC address from NULL address\n", 4382 port_id); 4383 return -EINVAL; 4384 } 4385 4386 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4387 4388 index = eth_dev_get_mac_addr_index(port_id, addr); 4389 if (index == 0) { 4390 RTE_ETHDEV_LOG(ERR, 4391 "Port %u: Cannot remove default MAC address\n", 4392 port_id); 4393 return -EADDRINUSE; 4394 } else if (index < 0) 4395 return 0; /* Do nothing if address wasn't found */ 4396 4397 /* Update NIC */ 4398 (*dev->dev_ops->mac_addr_remove)(dev, index); 4399 4400 /* Update address in NIC data structure */ 4401 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4402 4403 /* reset pool bitmap */ 4404 dev->data->mac_pool_sel[index] = 0; 4405 4406 return 0; 4407 } 4408 4409 int 4410 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4411 { 4412 struct rte_eth_dev *dev; 4413 int ret; 4414 4415 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4416 dev = &rte_eth_devices[port_id]; 4417 4418 if (addr == NULL) { 4419 RTE_ETHDEV_LOG(ERR, 4420 "Cannot set ethdev port %u default MAC address from NULL address\n", 4421 port_id); 4422 return -EINVAL; 4423 } 4424 4425 if (!rte_is_valid_assigned_ether_addr(addr)) 4426 return -EINVAL; 4427 4428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4429 4430 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4431 if (ret < 0) 4432 return ret; 4433 4434 /* Update default address in NIC data structure */ 4435 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4436 4437 return 0; 4438 } 4439 4440 4441 /* 4442 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4443 * an empty spot. 4444 */ 4445 static int 4446 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4447 const struct rte_ether_addr *addr) 4448 { 4449 struct rte_eth_dev_info dev_info; 4450 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4451 unsigned i; 4452 int ret; 4453 4454 ret = rte_eth_dev_info_get(port_id, &dev_info); 4455 if (ret != 0) 4456 return -1; 4457 4458 if (!dev->data->hash_mac_addrs) 4459 return -1; 4460 4461 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4462 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4463 RTE_ETHER_ADDR_LEN) == 0) 4464 return i; 4465 4466 return -1; 4467 } 4468 4469 int 4470 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4471 uint8_t on) 4472 { 4473 int index; 4474 int ret; 4475 struct rte_eth_dev *dev; 4476 4477 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4478 dev = &rte_eth_devices[port_id]; 4479 4480 if (addr == NULL) { 4481 RTE_ETHDEV_LOG(ERR, 4482 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4483 port_id); 4484 return -EINVAL; 4485 } 4486 4487 if (rte_is_zero_ether_addr(addr)) { 4488 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4489 port_id); 4490 return -EINVAL; 4491 } 4492 4493 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4494 /* Check if it's already there, and do nothing */ 4495 if ((index >= 0) && on) 4496 return 0; 4497 4498 if (index < 0) { 4499 if (!on) { 4500 RTE_ETHDEV_LOG(ERR, 4501 "Port %u: the MAC address was not set in UTA\n", 4502 port_id); 4503 return -EINVAL; 4504 } 4505 4506 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4507 if (index < 0) { 4508 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4509 port_id); 4510 return -ENOSPC; 4511 } 4512 } 4513 4514 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4515 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4516 if (ret == 0) { 4517 /* Update address in NIC data structure */ 4518 if (on) 4519 rte_ether_addr_copy(addr, 4520 &dev->data->hash_mac_addrs[index]); 4521 else 4522 rte_ether_addr_copy(&null_mac_addr, 4523 &dev->data->hash_mac_addrs[index]); 4524 } 4525 4526 return eth_err(port_id, ret); 4527 } 4528 4529 int 4530 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4531 { 4532 struct rte_eth_dev *dev; 4533 4534 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4535 dev = &rte_eth_devices[port_id]; 4536 4537 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4538 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4539 on)); 4540 } 4541 4542 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4543 uint16_t tx_rate) 4544 { 4545 struct rte_eth_dev *dev; 4546 struct rte_eth_dev_info dev_info; 4547 struct rte_eth_link link; 4548 int ret; 4549 4550 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4551 dev = &rte_eth_devices[port_id]; 4552 4553 ret = rte_eth_dev_info_get(port_id, &dev_info); 4554 if (ret != 0) 4555 return ret; 4556 4557 link = dev->data->dev_link; 4558 4559 if (queue_idx > dev_info.max_tx_queues) { 4560 RTE_ETHDEV_LOG(ERR, 4561 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4562 port_id, queue_idx); 4563 return -EINVAL; 4564 } 4565 4566 if (tx_rate > link.link_speed) { 4567 RTE_ETHDEV_LOG(ERR, 4568 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4569 tx_rate, link.link_speed); 4570 return -EINVAL; 4571 } 4572 4573 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4574 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4575 queue_idx, tx_rate)); 4576 } 4577 4578 RTE_INIT(eth_dev_init_fp_ops) 4579 { 4580 uint32_t i; 4581 4582 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4583 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4584 } 4585 4586 RTE_INIT(eth_dev_init_cb_lists) 4587 { 4588 uint16_t i; 4589 4590 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4591 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4592 } 4593 4594 int 4595 rte_eth_dev_callback_register(uint16_t port_id, 4596 enum rte_eth_event_type event, 4597 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4598 { 4599 struct rte_eth_dev *dev; 4600 struct rte_eth_dev_callback *user_cb; 4601 uint16_t next_port; 4602 uint16_t last_port; 4603 4604 if (cb_fn == NULL) { 4605 RTE_ETHDEV_LOG(ERR, 4606 "Cannot register ethdev port %u callback from NULL\n", 4607 port_id); 4608 return -EINVAL; 4609 } 4610 4611 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4612 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4613 return -EINVAL; 4614 } 4615 4616 if (port_id == RTE_ETH_ALL) { 4617 next_port = 0; 4618 last_port = RTE_MAX_ETHPORTS - 1; 4619 } else { 4620 next_port = last_port = port_id; 4621 } 4622 4623 rte_spinlock_lock(ð_dev_cb_lock); 4624 4625 do { 4626 dev = &rte_eth_devices[next_port]; 4627 4628 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4629 if (user_cb->cb_fn == cb_fn && 4630 user_cb->cb_arg == cb_arg && 4631 user_cb->event == event) { 4632 break; 4633 } 4634 } 4635 4636 /* create a new callback. */ 4637 if (user_cb == NULL) { 4638 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4639 sizeof(struct rte_eth_dev_callback), 0); 4640 if (user_cb != NULL) { 4641 user_cb->cb_fn = cb_fn; 4642 user_cb->cb_arg = cb_arg; 4643 user_cb->event = event; 4644 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4645 user_cb, next); 4646 } else { 4647 rte_spinlock_unlock(ð_dev_cb_lock); 4648 rte_eth_dev_callback_unregister(port_id, event, 4649 cb_fn, cb_arg); 4650 return -ENOMEM; 4651 } 4652 4653 } 4654 } while (++next_port <= last_port); 4655 4656 rte_spinlock_unlock(ð_dev_cb_lock); 4657 return 0; 4658 } 4659 4660 int 4661 rte_eth_dev_callback_unregister(uint16_t port_id, 4662 enum rte_eth_event_type event, 4663 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4664 { 4665 int ret; 4666 struct rte_eth_dev *dev; 4667 struct rte_eth_dev_callback *cb, *next; 4668 uint16_t next_port; 4669 uint16_t last_port; 4670 4671 if (cb_fn == NULL) { 4672 RTE_ETHDEV_LOG(ERR, 4673 "Cannot unregister ethdev port %u callback from NULL\n", 4674 port_id); 4675 return -EINVAL; 4676 } 4677 4678 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4679 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4680 return -EINVAL; 4681 } 4682 4683 if (port_id == RTE_ETH_ALL) { 4684 next_port = 0; 4685 last_port = RTE_MAX_ETHPORTS - 1; 4686 } else { 4687 next_port = last_port = port_id; 4688 } 4689 4690 rte_spinlock_lock(ð_dev_cb_lock); 4691 4692 do { 4693 dev = &rte_eth_devices[next_port]; 4694 ret = 0; 4695 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4696 cb = next) { 4697 4698 next = TAILQ_NEXT(cb, next); 4699 4700 if (cb->cb_fn != cb_fn || cb->event != event || 4701 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4702 continue; 4703 4704 /* 4705 * if this callback is not executing right now, 4706 * then remove it. 4707 */ 4708 if (cb->active == 0) { 4709 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4710 rte_free(cb); 4711 } else { 4712 ret = -EAGAIN; 4713 } 4714 } 4715 } while (++next_port <= last_port); 4716 4717 rte_spinlock_unlock(ð_dev_cb_lock); 4718 return ret; 4719 } 4720 4721 int 4722 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4723 enum rte_eth_event_type event, void *ret_param) 4724 { 4725 struct rte_eth_dev_callback *cb_lst; 4726 struct rte_eth_dev_callback dev_cb; 4727 int rc = 0; 4728 4729 rte_spinlock_lock(ð_dev_cb_lock); 4730 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4731 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4732 continue; 4733 dev_cb = *cb_lst; 4734 cb_lst->active = 1; 4735 if (ret_param != NULL) 4736 dev_cb.ret_param = ret_param; 4737 4738 rte_spinlock_unlock(ð_dev_cb_lock); 4739 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4740 dev_cb.cb_arg, dev_cb.ret_param); 4741 rte_spinlock_lock(ð_dev_cb_lock); 4742 cb_lst->active = 0; 4743 } 4744 rte_spinlock_unlock(ð_dev_cb_lock); 4745 return rc; 4746 } 4747 4748 void 4749 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4750 { 4751 if (dev == NULL) 4752 return; 4753 4754 /* 4755 * for secondary process, at that point we expect device 4756 * to be already 'usable', so shared data and all function pointers 4757 * for fast-path devops have to be setup properly inside rte_eth_dev. 4758 */ 4759 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4760 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4761 4762 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4763 4764 dev->state = RTE_ETH_DEV_ATTACHED; 4765 } 4766 4767 int 4768 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4769 { 4770 uint32_t vec; 4771 struct rte_eth_dev *dev; 4772 struct rte_intr_handle *intr_handle; 4773 uint16_t qid; 4774 int rc; 4775 4776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4777 dev = &rte_eth_devices[port_id]; 4778 4779 if (!dev->intr_handle) { 4780 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4781 return -ENOTSUP; 4782 } 4783 4784 intr_handle = dev->intr_handle; 4785 if (!intr_handle->intr_vec) { 4786 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4787 return -EPERM; 4788 } 4789 4790 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4791 vec = intr_handle->intr_vec[qid]; 4792 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4793 if (rc && rc != -EEXIST) { 4794 RTE_ETHDEV_LOG(ERR, 4795 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4796 port_id, qid, op, epfd, vec); 4797 } 4798 } 4799 4800 return 0; 4801 } 4802 4803 int 4804 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4805 { 4806 struct rte_intr_handle *intr_handle; 4807 struct rte_eth_dev *dev; 4808 unsigned int efd_idx; 4809 uint32_t vec; 4810 int fd; 4811 4812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4813 dev = &rte_eth_devices[port_id]; 4814 4815 if (queue_id >= dev->data->nb_rx_queues) { 4816 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4817 return -1; 4818 } 4819 4820 if (!dev->intr_handle) { 4821 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4822 return -1; 4823 } 4824 4825 intr_handle = dev->intr_handle; 4826 if (!intr_handle->intr_vec) { 4827 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4828 return -1; 4829 } 4830 4831 vec = intr_handle->intr_vec[queue_id]; 4832 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4833 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4834 fd = intr_handle->efds[efd_idx]; 4835 4836 return fd; 4837 } 4838 4839 static inline int 4840 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4841 const char *ring_name) 4842 { 4843 return snprintf(name, len, "eth_p%d_q%d_%s", 4844 port_id, queue_id, ring_name); 4845 } 4846 4847 const struct rte_memzone * 4848 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4849 uint16_t queue_id, size_t size, unsigned align, 4850 int socket_id) 4851 { 4852 char z_name[RTE_MEMZONE_NAMESIZE]; 4853 const struct rte_memzone *mz; 4854 int rc; 4855 4856 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4857 queue_id, ring_name); 4858 if (rc >= RTE_MEMZONE_NAMESIZE) { 4859 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4860 rte_errno = ENAMETOOLONG; 4861 return NULL; 4862 } 4863 4864 mz = rte_memzone_lookup(z_name); 4865 if (mz) { 4866 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4867 size > mz->len || 4868 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4869 RTE_ETHDEV_LOG(ERR, 4870 "memzone %s does not justify the requested attributes\n", 4871 mz->name); 4872 return NULL; 4873 } 4874 4875 return mz; 4876 } 4877 4878 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4879 RTE_MEMZONE_IOVA_CONTIG, align); 4880 } 4881 4882 int 4883 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4884 uint16_t queue_id) 4885 { 4886 char z_name[RTE_MEMZONE_NAMESIZE]; 4887 const struct rte_memzone *mz; 4888 int rc = 0; 4889 4890 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4891 queue_id, ring_name); 4892 if (rc >= RTE_MEMZONE_NAMESIZE) { 4893 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4894 return -ENAMETOOLONG; 4895 } 4896 4897 mz = rte_memzone_lookup(z_name); 4898 if (mz) 4899 rc = rte_memzone_free(mz); 4900 else 4901 rc = -ENOENT; 4902 4903 return rc; 4904 } 4905 4906 int 4907 rte_eth_dev_create(struct rte_device *device, const char *name, 4908 size_t priv_data_size, 4909 ethdev_bus_specific_init ethdev_bus_specific_init, 4910 void *bus_init_params, 4911 ethdev_init_t ethdev_init, void *init_params) 4912 { 4913 struct rte_eth_dev *ethdev; 4914 int retval; 4915 4916 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4917 4918 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4919 ethdev = rte_eth_dev_allocate(name); 4920 if (!ethdev) 4921 return -ENODEV; 4922 4923 if (priv_data_size) { 4924 ethdev->data->dev_private = rte_zmalloc_socket( 4925 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4926 device->numa_node); 4927 4928 if (!ethdev->data->dev_private) { 4929 RTE_ETHDEV_LOG(ERR, 4930 "failed to allocate private data\n"); 4931 retval = -ENOMEM; 4932 goto probe_failed; 4933 } 4934 } 4935 } else { 4936 ethdev = rte_eth_dev_attach_secondary(name); 4937 if (!ethdev) { 4938 RTE_ETHDEV_LOG(ERR, 4939 "secondary process attach failed, ethdev doesn't exist\n"); 4940 return -ENODEV; 4941 } 4942 } 4943 4944 ethdev->device = device; 4945 4946 if (ethdev_bus_specific_init) { 4947 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4948 if (retval) { 4949 RTE_ETHDEV_LOG(ERR, 4950 "ethdev bus specific initialisation failed\n"); 4951 goto probe_failed; 4952 } 4953 } 4954 4955 retval = ethdev_init(ethdev, init_params); 4956 if (retval) { 4957 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4958 goto probe_failed; 4959 } 4960 4961 rte_eth_dev_probing_finish(ethdev); 4962 4963 return retval; 4964 4965 probe_failed: 4966 rte_eth_dev_release_port(ethdev); 4967 return retval; 4968 } 4969 4970 int 4971 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4972 ethdev_uninit_t ethdev_uninit) 4973 { 4974 int ret; 4975 4976 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4977 if (!ethdev) 4978 return -ENODEV; 4979 4980 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4981 4982 ret = ethdev_uninit(ethdev); 4983 if (ret) 4984 return ret; 4985 4986 return rte_eth_dev_release_port(ethdev); 4987 } 4988 4989 int 4990 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4991 int epfd, int op, void *data) 4992 { 4993 uint32_t vec; 4994 struct rte_eth_dev *dev; 4995 struct rte_intr_handle *intr_handle; 4996 int rc; 4997 4998 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4999 dev = &rte_eth_devices[port_id]; 5000 5001 if (queue_id >= dev->data->nb_rx_queues) { 5002 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5003 return -EINVAL; 5004 } 5005 5006 if (!dev->intr_handle) { 5007 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5008 return -ENOTSUP; 5009 } 5010 5011 intr_handle = dev->intr_handle; 5012 if (!intr_handle->intr_vec) { 5013 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5014 return -EPERM; 5015 } 5016 5017 vec = intr_handle->intr_vec[queue_id]; 5018 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5019 if (rc && rc != -EEXIST) { 5020 RTE_ETHDEV_LOG(ERR, 5021 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5022 port_id, queue_id, op, epfd, vec); 5023 return rc; 5024 } 5025 5026 return 0; 5027 } 5028 5029 int 5030 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5031 uint16_t queue_id) 5032 { 5033 struct rte_eth_dev *dev; 5034 int ret; 5035 5036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5037 dev = &rte_eth_devices[port_id]; 5038 5039 ret = eth_dev_validate_rx_queue(dev, queue_id); 5040 if (ret != 0) 5041 return ret; 5042 5043 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5044 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5045 } 5046 5047 int 5048 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5049 uint16_t queue_id) 5050 { 5051 struct rte_eth_dev *dev; 5052 int ret; 5053 5054 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5055 dev = &rte_eth_devices[port_id]; 5056 5057 ret = eth_dev_validate_rx_queue(dev, queue_id); 5058 if (ret != 0) 5059 return ret; 5060 5061 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5062 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5063 } 5064 5065 5066 const struct rte_eth_rxtx_callback * 5067 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5068 rte_rx_callback_fn fn, void *user_param) 5069 { 5070 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5071 rte_errno = ENOTSUP; 5072 return NULL; 5073 #endif 5074 struct rte_eth_dev *dev; 5075 5076 /* check input parameters */ 5077 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5078 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5079 rte_errno = EINVAL; 5080 return NULL; 5081 } 5082 dev = &rte_eth_devices[port_id]; 5083 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5084 rte_errno = EINVAL; 5085 return NULL; 5086 } 5087 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5088 5089 if (cb == NULL) { 5090 rte_errno = ENOMEM; 5091 return NULL; 5092 } 5093 5094 cb->fn.rx = fn; 5095 cb->param = user_param; 5096 5097 rte_spinlock_lock(ð_dev_rx_cb_lock); 5098 /* Add the callbacks in fifo order. */ 5099 struct rte_eth_rxtx_callback *tail = 5100 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5101 5102 if (!tail) { 5103 /* Stores to cb->fn and cb->param should complete before 5104 * cb is visible to data plane. 5105 */ 5106 __atomic_store_n( 5107 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5108 cb, __ATOMIC_RELEASE); 5109 5110 } else { 5111 while (tail->next) 5112 tail = tail->next; 5113 /* Stores to cb->fn and cb->param should complete before 5114 * cb is visible to data plane. 5115 */ 5116 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5117 } 5118 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5119 5120 return cb; 5121 } 5122 5123 const struct rte_eth_rxtx_callback * 5124 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5125 rte_rx_callback_fn fn, void *user_param) 5126 { 5127 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5128 rte_errno = ENOTSUP; 5129 return NULL; 5130 #endif 5131 /* check input parameters */ 5132 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5133 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5134 rte_errno = EINVAL; 5135 return NULL; 5136 } 5137 5138 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5139 5140 if (cb == NULL) { 5141 rte_errno = ENOMEM; 5142 return NULL; 5143 } 5144 5145 cb->fn.rx = fn; 5146 cb->param = user_param; 5147 5148 rte_spinlock_lock(ð_dev_rx_cb_lock); 5149 /* Add the callbacks at first position */ 5150 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5151 /* Stores to cb->fn, cb->param and cb->next should complete before 5152 * cb is visible to data plane threads. 5153 */ 5154 __atomic_store_n( 5155 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5156 cb, __ATOMIC_RELEASE); 5157 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5158 5159 return cb; 5160 } 5161 5162 const struct rte_eth_rxtx_callback * 5163 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5164 rte_tx_callback_fn fn, void *user_param) 5165 { 5166 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5167 rte_errno = ENOTSUP; 5168 return NULL; 5169 #endif 5170 struct rte_eth_dev *dev; 5171 5172 /* check input parameters */ 5173 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5174 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5175 rte_errno = EINVAL; 5176 return NULL; 5177 } 5178 5179 dev = &rte_eth_devices[port_id]; 5180 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5181 rte_errno = EINVAL; 5182 return NULL; 5183 } 5184 5185 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5186 5187 if (cb == NULL) { 5188 rte_errno = ENOMEM; 5189 return NULL; 5190 } 5191 5192 cb->fn.tx = fn; 5193 cb->param = user_param; 5194 5195 rte_spinlock_lock(ð_dev_tx_cb_lock); 5196 /* Add the callbacks in fifo order. */ 5197 struct rte_eth_rxtx_callback *tail = 5198 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5199 5200 if (!tail) { 5201 /* Stores to cb->fn and cb->param should complete before 5202 * cb is visible to data plane. 5203 */ 5204 __atomic_store_n( 5205 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5206 cb, __ATOMIC_RELEASE); 5207 5208 } else { 5209 while (tail->next) 5210 tail = tail->next; 5211 /* Stores to cb->fn and cb->param should complete before 5212 * cb is visible to data plane. 5213 */ 5214 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5215 } 5216 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5217 5218 return cb; 5219 } 5220 5221 int 5222 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5223 const struct rte_eth_rxtx_callback *user_cb) 5224 { 5225 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5226 return -ENOTSUP; 5227 #endif 5228 /* Check input parameters. */ 5229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5230 if (user_cb == NULL || 5231 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5232 return -EINVAL; 5233 5234 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5235 struct rte_eth_rxtx_callback *cb; 5236 struct rte_eth_rxtx_callback **prev_cb; 5237 int ret = -EINVAL; 5238 5239 rte_spinlock_lock(ð_dev_rx_cb_lock); 5240 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5241 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5242 cb = *prev_cb; 5243 if (cb == user_cb) { 5244 /* Remove the user cb from the callback list. */ 5245 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5246 ret = 0; 5247 break; 5248 } 5249 } 5250 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5251 5252 return ret; 5253 } 5254 5255 int 5256 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5257 const struct rte_eth_rxtx_callback *user_cb) 5258 { 5259 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5260 return -ENOTSUP; 5261 #endif 5262 /* Check input parameters. */ 5263 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5264 if (user_cb == NULL || 5265 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5266 return -EINVAL; 5267 5268 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5269 int ret = -EINVAL; 5270 struct rte_eth_rxtx_callback *cb; 5271 struct rte_eth_rxtx_callback **prev_cb; 5272 5273 rte_spinlock_lock(ð_dev_tx_cb_lock); 5274 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5275 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5276 cb = *prev_cb; 5277 if (cb == user_cb) { 5278 /* Remove the user cb from the callback list. */ 5279 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5280 ret = 0; 5281 break; 5282 } 5283 } 5284 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5285 5286 return ret; 5287 } 5288 5289 int 5290 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5291 struct rte_eth_rxq_info *qinfo) 5292 { 5293 struct rte_eth_dev *dev; 5294 5295 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5296 dev = &rte_eth_devices[port_id]; 5297 5298 if (queue_id >= dev->data->nb_rx_queues) { 5299 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5300 return -EINVAL; 5301 } 5302 5303 if (qinfo == NULL) { 5304 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5305 port_id, queue_id); 5306 return -EINVAL; 5307 } 5308 5309 if (dev->data->rx_queues == NULL || 5310 dev->data->rx_queues[queue_id] == NULL) { 5311 RTE_ETHDEV_LOG(ERR, 5312 "Rx queue %"PRIu16" of device with port_id=%" 5313 PRIu16" has not been setup\n", 5314 queue_id, port_id); 5315 return -EINVAL; 5316 } 5317 5318 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5319 RTE_ETHDEV_LOG(INFO, 5320 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5321 queue_id, port_id); 5322 return -EINVAL; 5323 } 5324 5325 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5326 5327 memset(qinfo, 0, sizeof(*qinfo)); 5328 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5329 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5330 5331 return 0; 5332 } 5333 5334 int 5335 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5336 struct rte_eth_txq_info *qinfo) 5337 { 5338 struct rte_eth_dev *dev; 5339 5340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5341 dev = &rte_eth_devices[port_id]; 5342 5343 if (queue_id >= dev->data->nb_tx_queues) { 5344 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5345 return -EINVAL; 5346 } 5347 5348 if (qinfo == NULL) { 5349 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5350 port_id, queue_id); 5351 return -EINVAL; 5352 } 5353 5354 if (dev->data->tx_queues == NULL || 5355 dev->data->tx_queues[queue_id] == NULL) { 5356 RTE_ETHDEV_LOG(ERR, 5357 "Tx queue %"PRIu16" of device with port_id=%" 5358 PRIu16" has not been setup\n", 5359 queue_id, port_id); 5360 return -EINVAL; 5361 } 5362 5363 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5364 RTE_ETHDEV_LOG(INFO, 5365 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5366 queue_id, port_id); 5367 return -EINVAL; 5368 } 5369 5370 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5371 5372 memset(qinfo, 0, sizeof(*qinfo)); 5373 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5374 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5375 5376 return 0; 5377 } 5378 5379 int 5380 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5381 struct rte_eth_burst_mode *mode) 5382 { 5383 struct rte_eth_dev *dev; 5384 5385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5386 dev = &rte_eth_devices[port_id]; 5387 5388 if (queue_id >= dev->data->nb_rx_queues) { 5389 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5390 return -EINVAL; 5391 } 5392 5393 if (mode == NULL) { 5394 RTE_ETHDEV_LOG(ERR, 5395 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5396 port_id, queue_id); 5397 return -EINVAL; 5398 } 5399 5400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5401 memset(mode, 0, sizeof(*mode)); 5402 return eth_err(port_id, 5403 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5404 } 5405 5406 int 5407 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5408 struct rte_eth_burst_mode *mode) 5409 { 5410 struct rte_eth_dev *dev; 5411 5412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5413 dev = &rte_eth_devices[port_id]; 5414 5415 if (queue_id >= dev->data->nb_tx_queues) { 5416 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5417 return -EINVAL; 5418 } 5419 5420 if (mode == NULL) { 5421 RTE_ETHDEV_LOG(ERR, 5422 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5423 port_id, queue_id); 5424 return -EINVAL; 5425 } 5426 5427 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5428 memset(mode, 0, sizeof(*mode)); 5429 return eth_err(port_id, 5430 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5431 } 5432 5433 int 5434 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5435 struct rte_power_monitor_cond *pmc) 5436 { 5437 struct rte_eth_dev *dev; 5438 5439 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5440 dev = &rte_eth_devices[port_id]; 5441 5442 if (queue_id >= dev->data->nb_rx_queues) { 5443 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5444 return -EINVAL; 5445 } 5446 5447 if (pmc == NULL) { 5448 RTE_ETHDEV_LOG(ERR, 5449 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5450 port_id, queue_id); 5451 return -EINVAL; 5452 } 5453 5454 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5455 return eth_err(port_id, 5456 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5457 } 5458 5459 int 5460 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5461 struct rte_ether_addr *mc_addr_set, 5462 uint32_t nb_mc_addr) 5463 { 5464 struct rte_eth_dev *dev; 5465 5466 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5467 dev = &rte_eth_devices[port_id]; 5468 5469 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5470 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5471 mc_addr_set, nb_mc_addr)); 5472 } 5473 5474 int 5475 rte_eth_timesync_enable(uint16_t port_id) 5476 { 5477 struct rte_eth_dev *dev; 5478 5479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5480 dev = &rte_eth_devices[port_id]; 5481 5482 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5483 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5484 } 5485 5486 int 5487 rte_eth_timesync_disable(uint16_t port_id) 5488 { 5489 struct rte_eth_dev *dev; 5490 5491 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5492 dev = &rte_eth_devices[port_id]; 5493 5494 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5495 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5496 } 5497 5498 int 5499 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5500 uint32_t flags) 5501 { 5502 struct rte_eth_dev *dev; 5503 5504 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5505 dev = &rte_eth_devices[port_id]; 5506 5507 if (timestamp == NULL) { 5508 RTE_ETHDEV_LOG(ERR, 5509 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5510 port_id); 5511 return -EINVAL; 5512 } 5513 5514 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5515 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5516 (dev, timestamp, flags)); 5517 } 5518 5519 int 5520 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5521 struct timespec *timestamp) 5522 { 5523 struct rte_eth_dev *dev; 5524 5525 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5526 dev = &rte_eth_devices[port_id]; 5527 5528 if (timestamp == NULL) { 5529 RTE_ETHDEV_LOG(ERR, 5530 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5531 port_id); 5532 return -EINVAL; 5533 } 5534 5535 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5536 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5537 (dev, timestamp)); 5538 } 5539 5540 int 5541 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5542 { 5543 struct rte_eth_dev *dev; 5544 5545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5546 dev = &rte_eth_devices[port_id]; 5547 5548 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5549 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5550 } 5551 5552 int 5553 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5554 { 5555 struct rte_eth_dev *dev; 5556 5557 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5558 dev = &rte_eth_devices[port_id]; 5559 5560 if (timestamp == NULL) { 5561 RTE_ETHDEV_LOG(ERR, 5562 "Cannot read ethdev port %u timesync time to NULL\n", 5563 port_id); 5564 return -EINVAL; 5565 } 5566 5567 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5568 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5569 timestamp)); 5570 } 5571 5572 int 5573 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5574 { 5575 struct rte_eth_dev *dev; 5576 5577 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5578 dev = &rte_eth_devices[port_id]; 5579 5580 if (timestamp == NULL) { 5581 RTE_ETHDEV_LOG(ERR, 5582 "Cannot write ethdev port %u timesync from NULL time\n", 5583 port_id); 5584 return -EINVAL; 5585 } 5586 5587 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5588 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5589 timestamp)); 5590 } 5591 5592 int 5593 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5594 { 5595 struct rte_eth_dev *dev; 5596 5597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5598 dev = &rte_eth_devices[port_id]; 5599 5600 if (clock == NULL) { 5601 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5602 port_id); 5603 return -EINVAL; 5604 } 5605 5606 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5607 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5608 } 5609 5610 int 5611 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5612 { 5613 struct rte_eth_dev *dev; 5614 5615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5616 dev = &rte_eth_devices[port_id]; 5617 5618 if (info == NULL) { 5619 RTE_ETHDEV_LOG(ERR, 5620 "Cannot get ethdev port %u register info to NULL\n", 5621 port_id); 5622 return -EINVAL; 5623 } 5624 5625 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5626 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5627 } 5628 5629 int 5630 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5631 { 5632 struct rte_eth_dev *dev; 5633 5634 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5635 dev = &rte_eth_devices[port_id]; 5636 5637 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5638 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5639 } 5640 5641 int 5642 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5643 { 5644 struct rte_eth_dev *dev; 5645 5646 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5647 dev = &rte_eth_devices[port_id]; 5648 5649 if (info == NULL) { 5650 RTE_ETHDEV_LOG(ERR, 5651 "Cannot get ethdev port %u EEPROM info to NULL\n", 5652 port_id); 5653 return -EINVAL; 5654 } 5655 5656 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5657 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5658 } 5659 5660 int 5661 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5662 { 5663 struct rte_eth_dev *dev; 5664 5665 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5666 dev = &rte_eth_devices[port_id]; 5667 5668 if (info == NULL) { 5669 RTE_ETHDEV_LOG(ERR, 5670 "Cannot set ethdev port %u EEPROM from NULL info\n", 5671 port_id); 5672 return -EINVAL; 5673 } 5674 5675 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5676 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5677 } 5678 5679 int 5680 rte_eth_dev_get_module_info(uint16_t port_id, 5681 struct rte_eth_dev_module_info *modinfo) 5682 { 5683 struct rte_eth_dev *dev; 5684 5685 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5686 dev = &rte_eth_devices[port_id]; 5687 5688 if (modinfo == NULL) { 5689 RTE_ETHDEV_LOG(ERR, 5690 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5691 port_id); 5692 return -EINVAL; 5693 } 5694 5695 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5696 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5697 } 5698 5699 int 5700 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5701 struct rte_dev_eeprom_info *info) 5702 { 5703 struct rte_eth_dev *dev; 5704 5705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5706 dev = &rte_eth_devices[port_id]; 5707 5708 if (info == NULL) { 5709 RTE_ETHDEV_LOG(ERR, 5710 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5711 port_id); 5712 return -EINVAL; 5713 } 5714 5715 if (info->data == NULL) { 5716 RTE_ETHDEV_LOG(ERR, 5717 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5718 port_id); 5719 return -EINVAL; 5720 } 5721 5722 if (info->length == 0) { 5723 RTE_ETHDEV_LOG(ERR, 5724 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5725 port_id); 5726 return -EINVAL; 5727 } 5728 5729 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5730 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5731 } 5732 5733 int 5734 rte_eth_dev_get_dcb_info(uint16_t port_id, 5735 struct rte_eth_dcb_info *dcb_info) 5736 { 5737 struct rte_eth_dev *dev; 5738 5739 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5740 dev = &rte_eth_devices[port_id]; 5741 5742 if (dcb_info == NULL) { 5743 RTE_ETHDEV_LOG(ERR, 5744 "Cannot get ethdev port %u DCB info to NULL\n", 5745 port_id); 5746 return -EINVAL; 5747 } 5748 5749 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5750 5751 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5752 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5753 } 5754 5755 static void 5756 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5757 const struct rte_eth_desc_lim *desc_lim) 5758 { 5759 if (desc_lim->nb_align != 0) 5760 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5761 5762 if (desc_lim->nb_max != 0) 5763 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5764 5765 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5766 } 5767 5768 int 5769 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5770 uint16_t *nb_rx_desc, 5771 uint16_t *nb_tx_desc) 5772 { 5773 struct rte_eth_dev_info dev_info; 5774 int ret; 5775 5776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5777 5778 ret = rte_eth_dev_info_get(port_id, &dev_info); 5779 if (ret != 0) 5780 return ret; 5781 5782 if (nb_rx_desc != NULL) 5783 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5784 5785 if (nb_tx_desc != NULL) 5786 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5787 5788 return 0; 5789 } 5790 5791 int 5792 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5793 struct rte_eth_hairpin_cap *cap) 5794 { 5795 struct rte_eth_dev *dev; 5796 5797 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5798 dev = &rte_eth_devices[port_id]; 5799 5800 if (cap == NULL) { 5801 RTE_ETHDEV_LOG(ERR, 5802 "Cannot get ethdev port %u hairpin capability to NULL\n", 5803 port_id); 5804 return -EINVAL; 5805 } 5806 5807 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5808 memset(cap, 0, sizeof(*cap)); 5809 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5810 } 5811 5812 int 5813 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5814 { 5815 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5816 return 1; 5817 return 0; 5818 } 5819 5820 int 5821 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5822 { 5823 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5824 return 1; 5825 return 0; 5826 } 5827 5828 int 5829 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5830 { 5831 struct rte_eth_dev *dev; 5832 5833 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5834 dev = &rte_eth_devices[port_id]; 5835 5836 if (pool == NULL) { 5837 RTE_ETHDEV_LOG(ERR, 5838 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5839 port_id); 5840 return -EINVAL; 5841 } 5842 5843 if (*dev->dev_ops->pool_ops_supported == NULL) 5844 return 1; /* all pools are supported */ 5845 5846 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5847 } 5848 5849 /** 5850 * A set of values to describe the possible states of a switch domain. 5851 */ 5852 enum rte_eth_switch_domain_state { 5853 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5854 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5855 }; 5856 5857 /** 5858 * Array of switch domains available for allocation. Array is sized to 5859 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5860 * ethdev ports in a single process. 5861 */ 5862 static struct rte_eth_dev_switch { 5863 enum rte_eth_switch_domain_state state; 5864 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5865 5866 int 5867 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5868 { 5869 uint16_t i; 5870 5871 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5872 5873 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5874 if (eth_dev_switch_domains[i].state == 5875 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5876 eth_dev_switch_domains[i].state = 5877 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5878 *domain_id = i; 5879 return 0; 5880 } 5881 } 5882 5883 return -ENOSPC; 5884 } 5885 5886 int 5887 rte_eth_switch_domain_free(uint16_t domain_id) 5888 { 5889 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5890 domain_id >= RTE_MAX_ETHPORTS) 5891 return -EINVAL; 5892 5893 if (eth_dev_switch_domains[domain_id].state != 5894 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5895 return -EINVAL; 5896 5897 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5898 5899 return 0; 5900 } 5901 5902 static int 5903 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5904 { 5905 int state; 5906 struct rte_kvargs_pair *pair; 5907 char *letter; 5908 5909 arglist->str = strdup(str_in); 5910 if (arglist->str == NULL) 5911 return -ENOMEM; 5912 5913 letter = arglist->str; 5914 state = 0; 5915 arglist->count = 0; 5916 pair = &arglist->pairs[0]; 5917 while (1) { 5918 switch (state) { 5919 case 0: /* Initial */ 5920 if (*letter == '=') 5921 return -EINVAL; 5922 else if (*letter == '\0') 5923 return 0; 5924 5925 state = 1; 5926 pair->key = letter; 5927 /* fall-thru */ 5928 5929 case 1: /* Parsing key */ 5930 if (*letter == '=') { 5931 *letter = '\0'; 5932 pair->value = letter + 1; 5933 state = 2; 5934 } else if (*letter == ',' || *letter == '\0') 5935 return -EINVAL; 5936 break; 5937 5938 5939 case 2: /* Parsing value */ 5940 if (*letter == '[') 5941 state = 3; 5942 else if (*letter == ',') { 5943 *letter = '\0'; 5944 arglist->count++; 5945 pair = &arglist->pairs[arglist->count]; 5946 state = 0; 5947 } else if (*letter == '\0') { 5948 letter--; 5949 arglist->count++; 5950 pair = &arglist->pairs[arglist->count]; 5951 state = 0; 5952 } 5953 break; 5954 5955 case 3: /* Parsing list */ 5956 if (*letter == ']') 5957 state = 2; 5958 else if (*letter == '\0') 5959 return -EINVAL; 5960 break; 5961 } 5962 letter++; 5963 } 5964 } 5965 5966 int 5967 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5968 { 5969 struct rte_kvargs args; 5970 struct rte_kvargs_pair *pair; 5971 unsigned int i; 5972 int result = 0; 5973 5974 memset(eth_da, 0, sizeof(*eth_da)); 5975 5976 result = eth_dev_devargs_tokenise(&args, dargs); 5977 if (result < 0) 5978 goto parse_cleanup; 5979 5980 for (i = 0; i < args.count; i++) { 5981 pair = &args.pairs[i]; 5982 if (strcmp("representor", pair->key) == 0) { 5983 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5984 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5985 dargs); 5986 result = -1; 5987 goto parse_cleanup; 5988 } 5989 result = rte_eth_devargs_parse_representor_ports( 5990 pair->value, eth_da); 5991 if (result < 0) 5992 goto parse_cleanup; 5993 } 5994 } 5995 5996 parse_cleanup: 5997 if (args.str) 5998 free(args.str); 5999 6000 return result; 6001 } 6002 6003 int 6004 rte_eth_representor_id_get(uint16_t port_id, 6005 enum rte_eth_representor_type type, 6006 int controller, int pf, int representor_port, 6007 uint16_t *repr_id) 6008 { 6009 int ret, n, count; 6010 uint32_t i; 6011 struct rte_eth_representor_info *info = NULL; 6012 size_t size; 6013 6014 if (type == RTE_ETH_REPRESENTOR_NONE) 6015 return 0; 6016 if (repr_id == NULL) 6017 return -EINVAL; 6018 6019 /* Get PMD representor range info. */ 6020 ret = rte_eth_representor_info_get(port_id, NULL); 6021 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6022 controller == -1 && pf == -1) { 6023 /* Direct mapping for legacy VF representor. */ 6024 *repr_id = representor_port; 6025 return 0; 6026 } else if (ret < 0) { 6027 return ret; 6028 } 6029 n = ret; 6030 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6031 info = calloc(1, size); 6032 if (info == NULL) 6033 return -ENOMEM; 6034 info->nb_ranges_alloc = n; 6035 ret = rte_eth_representor_info_get(port_id, info); 6036 if (ret < 0) 6037 goto out; 6038 6039 /* Default controller and pf to caller. */ 6040 if (controller == -1) 6041 controller = info->controller; 6042 if (pf == -1) 6043 pf = info->pf; 6044 6045 /* Locate representor ID. */ 6046 ret = -ENOENT; 6047 for (i = 0; i < info->nb_ranges; ++i) { 6048 if (info->ranges[i].type != type) 6049 continue; 6050 if (info->ranges[i].controller != controller) 6051 continue; 6052 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6053 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6054 port_id, info->ranges[i].id_base, 6055 info->ranges[i].id_end, i); 6056 continue; 6057 6058 } 6059 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6060 switch (info->ranges[i].type) { 6061 case RTE_ETH_REPRESENTOR_PF: 6062 if (pf < info->ranges[i].pf || 6063 pf >= info->ranges[i].pf + count) 6064 continue; 6065 *repr_id = info->ranges[i].id_base + 6066 (pf - info->ranges[i].pf); 6067 ret = 0; 6068 goto out; 6069 case RTE_ETH_REPRESENTOR_VF: 6070 if (info->ranges[i].pf != pf) 6071 continue; 6072 if (representor_port < info->ranges[i].vf || 6073 representor_port >= info->ranges[i].vf + count) 6074 continue; 6075 *repr_id = info->ranges[i].id_base + 6076 (representor_port - info->ranges[i].vf); 6077 ret = 0; 6078 goto out; 6079 case RTE_ETH_REPRESENTOR_SF: 6080 if (info->ranges[i].pf != pf) 6081 continue; 6082 if (representor_port < info->ranges[i].sf || 6083 representor_port >= info->ranges[i].sf + count) 6084 continue; 6085 *repr_id = info->ranges[i].id_base + 6086 (representor_port - info->ranges[i].sf); 6087 ret = 0; 6088 goto out; 6089 default: 6090 break; 6091 } 6092 } 6093 out: 6094 free(info); 6095 return ret; 6096 } 6097 6098 static int 6099 eth_dev_handle_port_list(const char *cmd __rte_unused, 6100 const char *params __rte_unused, 6101 struct rte_tel_data *d) 6102 { 6103 int port_id; 6104 6105 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6106 RTE_ETH_FOREACH_DEV(port_id) 6107 rte_tel_data_add_array_int(d, port_id); 6108 return 0; 6109 } 6110 6111 static void 6112 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6113 const char *stat_name) 6114 { 6115 int q; 6116 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6117 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6118 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6119 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6120 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6121 } 6122 6123 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6124 6125 static int 6126 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6127 const char *params, 6128 struct rte_tel_data *d) 6129 { 6130 struct rte_eth_stats stats; 6131 int port_id, ret; 6132 6133 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6134 return -1; 6135 6136 port_id = atoi(params); 6137 if (!rte_eth_dev_is_valid_port(port_id)) 6138 return -1; 6139 6140 ret = rte_eth_stats_get(port_id, &stats); 6141 if (ret < 0) 6142 return -1; 6143 6144 rte_tel_data_start_dict(d); 6145 ADD_DICT_STAT(stats, ipackets); 6146 ADD_DICT_STAT(stats, opackets); 6147 ADD_DICT_STAT(stats, ibytes); 6148 ADD_DICT_STAT(stats, obytes); 6149 ADD_DICT_STAT(stats, imissed); 6150 ADD_DICT_STAT(stats, ierrors); 6151 ADD_DICT_STAT(stats, oerrors); 6152 ADD_DICT_STAT(stats, rx_nombuf); 6153 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6154 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6155 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6156 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6157 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6158 6159 return 0; 6160 } 6161 6162 static int 6163 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6164 const char *params, 6165 struct rte_tel_data *d) 6166 { 6167 struct rte_eth_xstat *eth_xstats; 6168 struct rte_eth_xstat_name *xstat_names; 6169 int port_id, num_xstats; 6170 int i, ret; 6171 char *end_param; 6172 6173 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6174 return -1; 6175 6176 port_id = strtoul(params, &end_param, 0); 6177 if (*end_param != '\0') 6178 RTE_ETHDEV_LOG(NOTICE, 6179 "Extra parameters passed to ethdev telemetry command, ignoring"); 6180 if (!rte_eth_dev_is_valid_port(port_id)) 6181 return -1; 6182 6183 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6184 if (num_xstats < 0) 6185 return -1; 6186 6187 /* use one malloc for both names and stats */ 6188 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6189 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6190 if (eth_xstats == NULL) 6191 return -1; 6192 xstat_names = (void *)ð_xstats[num_xstats]; 6193 6194 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6195 if (ret < 0 || ret > num_xstats) { 6196 free(eth_xstats); 6197 return -1; 6198 } 6199 6200 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6201 if (ret < 0 || ret > num_xstats) { 6202 free(eth_xstats); 6203 return -1; 6204 } 6205 6206 rte_tel_data_start_dict(d); 6207 for (i = 0; i < num_xstats; i++) 6208 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6209 eth_xstats[i].value); 6210 return 0; 6211 } 6212 6213 static int 6214 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6215 const char *params, 6216 struct rte_tel_data *d) 6217 { 6218 static const char *status_str = "status"; 6219 int ret, port_id; 6220 struct rte_eth_link link; 6221 char *end_param; 6222 6223 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6224 return -1; 6225 6226 port_id = strtoul(params, &end_param, 0); 6227 if (*end_param != '\0') 6228 RTE_ETHDEV_LOG(NOTICE, 6229 "Extra parameters passed to ethdev telemetry command, ignoring"); 6230 if (!rte_eth_dev_is_valid_port(port_id)) 6231 return -1; 6232 6233 ret = rte_eth_link_get_nowait(port_id, &link); 6234 if (ret < 0) 6235 return -1; 6236 6237 rte_tel_data_start_dict(d); 6238 if (!link.link_status) { 6239 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6240 return 0; 6241 } 6242 rte_tel_data_add_dict_string(d, status_str, "UP"); 6243 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6244 rte_tel_data_add_dict_string(d, "duplex", 6245 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6246 "full-duplex" : "half-duplex"); 6247 return 0; 6248 } 6249 6250 static int 6251 eth_dev_handle_port_info(const char *cmd __rte_unused, 6252 const char *params, 6253 struct rte_tel_data *d) 6254 { 6255 struct rte_tel_data *rxq_state, *txq_state; 6256 char mac_addr[RTE_ETHER_ADDR_LEN]; 6257 struct rte_eth_dev *eth_dev; 6258 char *end_param; 6259 int port_id, i; 6260 6261 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6262 return -1; 6263 6264 port_id = strtoul(params, &end_param, 0); 6265 if (*end_param != '\0') 6266 RTE_ETHDEV_LOG(NOTICE, 6267 "Extra parameters passed to ethdev telemetry command, ignoring"); 6268 6269 if (!rte_eth_dev_is_valid_port(port_id)) 6270 return -EINVAL; 6271 6272 eth_dev = &rte_eth_devices[port_id]; 6273 if (!eth_dev) 6274 return -EINVAL; 6275 6276 rxq_state = rte_tel_data_alloc(); 6277 if (!rxq_state) 6278 return -ENOMEM; 6279 6280 txq_state = rte_tel_data_alloc(); 6281 if (!txq_state) 6282 return -ENOMEM; 6283 6284 rte_tel_data_start_dict(d); 6285 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6286 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6287 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6288 eth_dev->data->nb_rx_queues); 6289 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6290 eth_dev->data->nb_tx_queues); 6291 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6292 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6293 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6294 eth_dev->data->min_rx_buf_size); 6295 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6296 eth_dev->data->rx_mbuf_alloc_failed); 6297 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6298 eth_dev->data->mac_addrs->addr_bytes[0], 6299 eth_dev->data->mac_addrs->addr_bytes[1], 6300 eth_dev->data->mac_addrs->addr_bytes[2], 6301 eth_dev->data->mac_addrs->addr_bytes[3], 6302 eth_dev->data->mac_addrs->addr_bytes[4], 6303 eth_dev->data->mac_addrs->addr_bytes[5]); 6304 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6305 rte_tel_data_add_dict_int(d, "promiscuous", 6306 eth_dev->data->promiscuous); 6307 rte_tel_data_add_dict_int(d, "scattered_rx", 6308 eth_dev->data->scattered_rx); 6309 rte_tel_data_add_dict_int(d, "all_multicast", 6310 eth_dev->data->all_multicast); 6311 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6312 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6313 rte_tel_data_add_dict_int(d, "dev_configured", 6314 eth_dev->data->dev_configured); 6315 6316 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6317 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6318 rte_tel_data_add_array_int(rxq_state, 6319 eth_dev->data->rx_queue_state[i]); 6320 6321 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6322 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6323 rte_tel_data_add_array_int(txq_state, 6324 eth_dev->data->tx_queue_state[i]); 6325 6326 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6327 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6328 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6329 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6330 rte_tel_data_add_dict_int(d, "rx_offloads", 6331 eth_dev->data->dev_conf.rxmode.offloads); 6332 rte_tel_data_add_dict_int(d, "tx_offloads", 6333 eth_dev->data->dev_conf.txmode.offloads); 6334 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6335 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6336 6337 return 0; 6338 } 6339 6340 int 6341 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6342 struct rte_hairpin_peer_info *cur_info, 6343 struct rte_hairpin_peer_info *peer_info, 6344 uint32_t direction) 6345 { 6346 struct rte_eth_dev *dev; 6347 6348 /* Current queue information is not mandatory. */ 6349 if (peer_info == NULL) 6350 return -EINVAL; 6351 6352 /* No need to check the validity again. */ 6353 dev = &rte_eth_devices[peer_port]; 6354 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6355 -ENOTSUP); 6356 6357 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6358 cur_info, peer_info, direction); 6359 } 6360 6361 int 6362 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6363 struct rte_hairpin_peer_info *peer_info, 6364 uint32_t direction) 6365 { 6366 struct rte_eth_dev *dev; 6367 6368 if (peer_info == NULL) 6369 return -EINVAL; 6370 6371 /* No need to check the validity again. */ 6372 dev = &rte_eth_devices[cur_port]; 6373 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6374 -ENOTSUP); 6375 6376 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6377 peer_info, direction); 6378 } 6379 6380 int 6381 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6382 uint32_t direction) 6383 { 6384 struct rte_eth_dev *dev; 6385 6386 /* No need to check the validity again. */ 6387 dev = &rte_eth_devices[cur_port]; 6388 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6389 -ENOTSUP); 6390 6391 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6392 direction); 6393 } 6394 6395 int 6396 rte_eth_representor_info_get(uint16_t port_id, 6397 struct rte_eth_representor_info *info) 6398 { 6399 struct rte_eth_dev *dev; 6400 6401 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6402 dev = &rte_eth_devices[port_id]; 6403 6404 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6405 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6406 } 6407 6408 int 6409 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6410 { 6411 struct rte_eth_dev *dev; 6412 6413 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6414 dev = &rte_eth_devices[port_id]; 6415 6416 if (dev->data->dev_configured != 0) { 6417 RTE_ETHDEV_LOG(ERR, 6418 "The port (ID=%"PRIu16") is already configured\n", 6419 port_id); 6420 return -EBUSY; 6421 } 6422 6423 if (features == NULL) { 6424 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6425 return -EINVAL; 6426 } 6427 6428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6429 return eth_err(port_id, 6430 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6431 } 6432 6433 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6434 6435 RTE_INIT(ethdev_init_telemetry) 6436 { 6437 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6438 "Returns list of available ethdev ports. Takes no parameters"); 6439 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6440 "Returns the common stats for a port. Parameters: int port_id"); 6441 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6442 "Returns the extended stats for a port. Parameters: int port_id"); 6443 rte_telemetry_register_cmd("/ethdev/link_status", 6444 eth_dev_handle_port_link_status, 6445 "Returns the link status for a port. Parameters: int port_id"); 6446 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6447 "Returns the device info for a port. Parameters: int port_id"); 6448 } 6449