1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { DEV_RX_OFFLOAD_##_name, #_name } 105 106 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \ 107 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 108 109 static const struct { 110 uint64_t offload; 111 const char *name; 112 } eth_dev_rx_offload_names[] = { 113 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 114 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 115 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 116 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 118 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 119 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 120 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 121 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 122 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 123 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 124 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 125 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 126 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 127 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 128 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 129 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 130 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 131 RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 132 }; 133 134 #undef RTE_RX_OFFLOAD_BIT2STR 135 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 136 137 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 138 { DEV_TX_OFFLOAD_##_name, #_name } 139 140 static const struct { 141 uint64_t offload; 142 const char *name; 143 } eth_dev_tx_offload_names[] = { 144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 161 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 165 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 166 }; 167 168 #undef RTE_TX_OFFLOAD_BIT2STR 169 170 /** 171 * The user application callback description. 172 * 173 * It contains callback address to be registered by user application, 174 * the pointer to the parameters for callback, and the event type. 175 */ 176 struct rte_eth_dev_callback { 177 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 178 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 179 void *cb_arg; /**< Parameter for callback */ 180 void *ret_param; /**< Return parameter */ 181 enum rte_eth_event_type event; /**< Interrupt event type */ 182 uint32_t active; /**< Callback is executing */ 183 }; 184 185 enum { 186 STAT_QMAP_TX = 0, 187 STAT_QMAP_RX 188 }; 189 190 int 191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 192 { 193 int ret; 194 struct rte_devargs devargs; 195 const char *bus_param_key; 196 char *bus_str = NULL; 197 char *cls_str = NULL; 198 int str_size; 199 200 if (iter == NULL) { 201 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 202 return -EINVAL; 203 } 204 205 if (devargs_str == NULL) { 206 RTE_ETHDEV_LOG(ERR, 207 "Cannot initialize iterator from NULL device description string\n"); 208 return -EINVAL; 209 } 210 211 memset(iter, 0, sizeof(*iter)); 212 memset(&devargs, 0, sizeof(devargs)); 213 214 /* 215 * The devargs string may use various syntaxes: 216 * - 0000:08:00.0,representor=[1-3] 217 * - pci:0000:06:00.0,representor=[0,5] 218 * - class=eth,mac=00:11:22:33:44:55 219 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 220 */ 221 222 /* 223 * Handle pure class filter (i.e. without any bus-level argument), 224 * from future new syntax. 225 * rte_devargs_parse() is not yet supporting the new syntax, 226 * that's why this simple case is temporarily parsed here. 227 */ 228 #define iter_anybus_str "class=eth," 229 if (strncmp(devargs_str, iter_anybus_str, 230 strlen(iter_anybus_str)) == 0) { 231 iter->cls_str = devargs_str + strlen(iter_anybus_str); 232 goto end; 233 } 234 235 /* Split bus, device and parameters. */ 236 ret = rte_devargs_parse(&devargs, devargs_str); 237 if (ret != 0) 238 goto error; 239 240 /* 241 * Assume parameters of old syntax can match only at ethdev level. 242 * Extra parameters will be ignored, thanks to "+" prefix. 243 */ 244 str_size = strlen(devargs.args) + 2; 245 cls_str = malloc(str_size); 246 if (cls_str == NULL) { 247 ret = -ENOMEM; 248 goto error; 249 } 250 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 251 if (ret != str_size - 1) { 252 ret = -EINVAL; 253 goto error; 254 } 255 iter->cls_str = cls_str; 256 257 iter->bus = devargs.bus; 258 if (iter->bus->dev_iterate == NULL) { 259 ret = -ENOTSUP; 260 goto error; 261 } 262 263 /* Convert bus args to new syntax for use with new API dev_iterate. */ 264 if ((strcmp(iter->bus->name, "vdev") == 0) || 265 (strcmp(iter->bus->name, "fslmc") == 0) || 266 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 267 bus_param_key = "name"; 268 } else if (strcmp(iter->bus->name, "pci") == 0) { 269 bus_param_key = "addr"; 270 } else { 271 ret = -ENOTSUP; 272 goto error; 273 } 274 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 275 bus_str = malloc(str_size); 276 if (bus_str == NULL) { 277 ret = -ENOMEM; 278 goto error; 279 } 280 ret = snprintf(bus_str, str_size, "%s=%s", 281 bus_param_key, devargs.name); 282 if (ret != str_size - 1) { 283 ret = -EINVAL; 284 goto error; 285 } 286 iter->bus_str = bus_str; 287 288 end: 289 iter->cls = rte_class_find_by_name("eth"); 290 rte_devargs_reset(&devargs); 291 return 0; 292 293 error: 294 if (ret == -ENOTSUP) 295 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 296 iter->bus->name); 297 rte_devargs_reset(&devargs); 298 free(bus_str); 299 free(cls_str); 300 return ret; 301 } 302 303 uint16_t 304 rte_eth_iterator_next(struct rte_dev_iterator *iter) 305 { 306 if (iter == NULL) { 307 RTE_ETHDEV_LOG(ERR, 308 "Cannot get next device from NULL iterator\n"); 309 return RTE_MAX_ETHPORTS; 310 } 311 312 if (iter->cls == NULL) /* invalid ethdev iterator */ 313 return RTE_MAX_ETHPORTS; 314 315 do { /* loop to try all matching rte_device */ 316 /* If not pure ethdev filter and */ 317 if (iter->bus != NULL && 318 /* not in middle of rte_eth_dev iteration, */ 319 iter->class_device == NULL) { 320 /* get next rte_device to try. */ 321 iter->device = iter->bus->dev_iterate( 322 iter->device, iter->bus_str, iter); 323 if (iter->device == NULL) 324 break; /* no more rte_device candidate */ 325 } 326 /* A device is matching bus part, need to check ethdev part. */ 327 iter->class_device = iter->cls->dev_iterate( 328 iter->class_device, iter->cls_str, iter); 329 if (iter->class_device != NULL) 330 return eth_dev_to_id(iter->class_device); /* match */ 331 } while (iter->bus != NULL); /* need to try next rte_device */ 332 333 /* No more ethdev port to iterate. */ 334 rte_eth_iterator_cleanup(iter); 335 return RTE_MAX_ETHPORTS; 336 } 337 338 void 339 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 340 { 341 if (iter == NULL) { 342 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 343 return; 344 } 345 346 if (iter->bus_str == NULL) 347 return; /* nothing to free in pure class filter */ 348 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 349 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 350 memset(iter, 0, sizeof(*iter)); 351 } 352 353 uint16_t 354 rte_eth_find_next(uint16_t port_id) 355 { 356 while (port_id < RTE_MAX_ETHPORTS && 357 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 358 port_id++; 359 360 if (port_id >= RTE_MAX_ETHPORTS) 361 return RTE_MAX_ETHPORTS; 362 363 return port_id; 364 } 365 366 /* 367 * Macro to iterate over all valid ports for internal usage. 368 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 369 */ 370 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 371 for (port_id = rte_eth_find_next(0); \ 372 port_id < RTE_MAX_ETHPORTS; \ 373 port_id = rte_eth_find_next(port_id + 1)) 374 375 uint16_t 376 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 377 { 378 port_id = rte_eth_find_next(port_id); 379 while (port_id < RTE_MAX_ETHPORTS && 380 rte_eth_devices[port_id].device != parent) 381 port_id = rte_eth_find_next(port_id + 1); 382 383 return port_id; 384 } 385 386 uint16_t 387 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 388 { 389 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 390 return rte_eth_find_next_of(port_id, 391 rte_eth_devices[ref_port_id].device); 392 } 393 394 static void 395 eth_dev_shared_data_prepare(void) 396 { 397 const unsigned flags = 0; 398 const struct rte_memzone *mz; 399 400 rte_spinlock_lock(ð_dev_shared_data_lock); 401 402 if (eth_dev_shared_data == NULL) { 403 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 404 /* Allocate port data and ownership shared memory. */ 405 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 406 sizeof(*eth_dev_shared_data), 407 rte_socket_id(), flags); 408 } else 409 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 410 if (mz == NULL) 411 rte_panic("Cannot allocate ethdev shared data\n"); 412 413 eth_dev_shared_data = mz->addr; 414 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 415 eth_dev_shared_data->next_owner_id = 416 RTE_ETH_DEV_NO_OWNER + 1; 417 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 418 memset(eth_dev_shared_data->data, 0, 419 sizeof(eth_dev_shared_data->data)); 420 } 421 } 422 423 rte_spinlock_unlock(ð_dev_shared_data_lock); 424 } 425 426 static bool 427 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 428 { 429 return ethdev->data->name[0] != '\0'; 430 } 431 432 static struct rte_eth_dev * 433 eth_dev_allocated(const char *name) 434 { 435 uint16_t i; 436 437 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 438 439 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 440 if (rte_eth_devices[i].data != NULL && 441 strcmp(rte_eth_devices[i].data->name, name) == 0) 442 return &rte_eth_devices[i]; 443 } 444 return NULL; 445 } 446 447 struct rte_eth_dev * 448 rte_eth_dev_allocated(const char *name) 449 { 450 struct rte_eth_dev *ethdev; 451 452 eth_dev_shared_data_prepare(); 453 454 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 455 456 ethdev = eth_dev_allocated(name); 457 458 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 459 460 return ethdev; 461 } 462 463 static uint16_t 464 eth_dev_find_free_port(void) 465 { 466 uint16_t i; 467 468 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 469 /* Using shared name field to find a free port. */ 470 if (eth_dev_shared_data->data[i].name[0] == '\0') { 471 RTE_ASSERT(rte_eth_devices[i].state == 472 RTE_ETH_DEV_UNUSED); 473 return i; 474 } 475 } 476 return RTE_MAX_ETHPORTS; 477 } 478 479 static struct rte_eth_dev * 480 eth_dev_get(uint16_t port_id) 481 { 482 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 483 484 eth_dev->data = ð_dev_shared_data->data[port_id]; 485 486 return eth_dev; 487 } 488 489 struct rte_eth_dev * 490 rte_eth_dev_allocate(const char *name) 491 { 492 uint16_t port_id; 493 struct rte_eth_dev *eth_dev = NULL; 494 size_t name_len; 495 496 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 497 if (name_len == 0) { 498 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 499 return NULL; 500 } 501 502 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 503 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 504 return NULL; 505 } 506 507 eth_dev_shared_data_prepare(); 508 509 /* Synchronize port creation between primary and secondary threads. */ 510 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 511 512 if (eth_dev_allocated(name) != NULL) { 513 RTE_ETHDEV_LOG(ERR, 514 "Ethernet device with name %s already allocated\n", 515 name); 516 goto unlock; 517 } 518 519 port_id = eth_dev_find_free_port(); 520 if (port_id == RTE_MAX_ETHPORTS) { 521 RTE_ETHDEV_LOG(ERR, 522 "Reached maximum number of Ethernet ports\n"); 523 goto unlock; 524 } 525 526 eth_dev = eth_dev_get(port_id); 527 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 528 eth_dev->data->port_id = port_id; 529 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 530 eth_dev->data->mtu = RTE_ETHER_MTU; 531 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 532 533 unlock: 534 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 535 536 return eth_dev; 537 } 538 539 /* 540 * Attach to a port already registered by the primary process, which 541 * makes sure that the same device would have the same port id both 542 * in the primary and secondary process. 543 */ 544 struct rte_eth_dev * 545 rte_eth_dev_attach_secondary(const char *name) 546 { 547 uint16_t i; 548 struct rte_eth_dev *eth_dev = NULL; 549 550 eth_dev_shared_data_prepare(); 551 552 /* Synchronize port attachment to primary port creation and release. */ 553 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 554 555 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 556 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 557 break; 558 } 559 if (i == RTE_MAX_ETHPORTS) { 560 RTE_ETHDEV_LOG(ERR, 561 "Device %s is not driven by the primary process\n", 562 name); 563 } else { 564 eth_dev = eth_dev_get(i); 565 RTE_ASSERT(eth_dev->data->port_id == i); 566 } 567 568 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 569 return eth_dev; 570 } 571 572 int 573 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 574 { 575 if (eth_dev == NULL) 576 return -EINVAL; 577 578 eth_dev_shared_data_prepare(); 579 580 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 581 rte_eth_dev_callback_process(eth_dev, 582 RTE_ETH_EVENT_DESTROY, NULL); 583 584 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 585 586 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 587 588 eth_dev->state = RTE_ETH_DEV_UNUSED; 589 eth_dev->device = NULL; 590 eth_dev->process_private = NULL; 591 eth_dev->intr_handle = NULL; 592 eth_dev->rx_pkt_burst = NULL; 593 eth_dev->tx_pkt_burst = NULL; 594 eth_dev->tx_pkt_prepare = NULL; 595 eth_dev->rx_queue_count = NULL; 596 eth_dev->rx_descriptor_status = NULL; 597 eth_dev->tx_descriptor_status = NULL; 598 eth_dev->dev_ops = NULL; 599 600 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 601 rte_free(eth_dev->data->rx_queues); 602 rte_free(eth_dev->data->tx_queues); 603 rte_free(eth_dev->data->mac_addrs); 604 rte_free(eth_dev->data->hash_mac_addrs); 605 rte_free(eth_dev->data->dev_private); 606 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 607 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 608 } 609 610 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 611 612 return 0; 613 } 614 615 int 616 rte_eth_dev_is_valid_port(uint16_t port_id) 617 { 618 if (port_id >= RTE_MAX_ETHPORTS || 619 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 620 return 0; 621 else 622 return 1; 623 } 624 625 static int 626 eth_is_valid_owner_id(uint64_t owner_id) 627 { 628 if (owner_id == RTE_ETH_DEV_NO_OWNER || 629 eth_dev_shared_data->next_owner_id <= owner_id) 630 return 0; 631 return 1; 632 } 633 634 uint64_t 635 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 636 { 637 port_id = rte_eth_find_next(port_id); 638 while (port_id < RTE_MAX_ETHPORTS && 639 rte_eth_devices[port_id].data->owner.id != owner_id) 640 port_id = rte_eth_find_next(port_id + 1); 641 642 return port_id; 643 } 644 645 int 646 rte_eth_dev_owner_new(uint64_t *owner_id) 647 { 648 if (owner_id == NULL) { 649 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 650 return -EINVAL; 651 } 652 653 eth_dev_shared_data_prepare(); 654 655 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 656 657 *owner_id = eth_dev_shared_data->next_owner_id++; 658 659 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 660 return 0; 661 } 662 663 static int 664 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 665 const struct rte_eth_dev_owner *new_owner) 666 { 667 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 668 struct rte_eth_dev_owner *port_owner; 669 670 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 671 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 672 port_id); 673 return -ENODEV; 674 } 675 676 if (new_owner == NULL) { 677 RTE_ETHDEV_LOG(ERR, 678 "Cannot set ethdev port %u owner from NULL owner\n", 679 port_id); 680 return -EINVAL; 681 } 682 683 if (!eth_is_valid_owner_id(new_owner->id) && 684 !eth_is_valid_owner_id(old_owner_id)) { 685 RTE_ETHDEV_LOG(ERR, 686 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 687 old_owner_id, new_owner->id); 688 return -EINVAL; 689 } 690 691 port_owner = &rte_eth_devices[port_id].data->owner; 692 if (port_owner->id != old_owner_id) { 693 RTE_ETHDEV_LOG(ERR, 694 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 695 port_id, port_owner->name, port_owner->id); 696 return -EPERM; 697 } 698 699 /* can not truncate (same structure) */ 700 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 701 702 port_owner->id = new_owner->id; 703 704 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 705 port_id, new_owner->name, new_owner->id); 706 707 return 0; 708 } 709 710 int 711 rte_eth_dev_owner_set(const uint16_t port_id, 712 const struct rte_eth_dev_owner *owner) 713 { 714 int ret; 715 716 eth_dev_shared_data_prepare(); 717 718 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 719 720 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 721 722 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 723 return ret; 724 } 725 726 int 727 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 728 { 729 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 730 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 731 int ret; 732 733 eth_dev_shared_data_prepare(); 734 735 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 736 737 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 738 739 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 740 return ret; 741 } 742 743 int 744 rte_eth_dev_owner_delete(const uint64_t owner_id) 745 { 746 uint16_t port_id; 747 int ret = 0; 748 749 eth_dev_shared_data_prepare(); 750 751 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 752 753 if (eth_is_valid_owner_id(owner_id)) { 754 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) 755 if (rte_eth_devices[port_id].data->owner.id == owner_id) 756 memset(&rte_eth_devices[port_id].data->owner, 0, 757 sizeof(struct rte_eth_dev_owner)); 758 RTE_ETHDEV_LOG(NOTICE, 759 "All port owners owned by %016"PRIx64" identifier have removed\n", 760 owner_id); 761 } else { 762 RTE_ETHDEV_LOG(ERR, 763 "Invalid owner id=%016"PRIx64"\n", 764 owner_id); 765 ret = -EINVAL; 766 } 767 768 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 769 770 return ret; 771 } 772 773 int 774 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 775 { 776 struct rte_eth_dev *ethdev; 777 778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 779 ethdev = &rte_eth_devices[port_id]; 780 781 if (!eth_dev_is_allocated(ethdev)) { 782 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", 783 port_id); 784 return -ENODEV; 785 } 786 787 if (owner == NULL) { 788 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 789 port_id); 790 return -EINVAL; 791 } 792 793 eth_dev_shared_data_prepare(); 794 795 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 796 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 797 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 798 799 return 0; 800 } 801 802 int 803 rte_eth_dev_socket_id(uint16_t port_id) 804 { 805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 806 return rte_eth_devices[port_id].data->numa_node; 807 } 808 809 void * 810 rte_eth_dev_get_sec_ctx(uint16_t port_id) 811 { 812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 813 return rte_eth_devices[port_id].security_ctx; 814 } 815 816 uint16_t 817 rte_eth_dev_count_avail(void) 818 { 819 uint16_t p; 820 uint16_t count; 821 822 count = 0; 823 824 RTE_ETH_FOREACH_DEV(p) 825 count++; 826 827 return count; 828 } 829 830 uint16_t 831 rte_eth_dev_count_total(void) 832 { 833 uint16_t port, count = 0; 834 835 RTE_ETH_FOREACH_VALID_DEV(port) 836 count++; 837 838 return count; 839 } 840 841 int 842 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 843 { 844 char *tmp; 845 846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 847 848 if (name == NULL) { 849 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 850 port_id); 851 return -EINVAL; 852 } 853 854 /* shouldn't check 'rte_eth_devices[i].data', 855 * because it might be overwritten by VDEV PMD */ 856 tmp = eth_dev_shared_data->data[port_id].name; 857 strcpy(name, tmp); 858 return 0; 859 } 860 861 int 862 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 863 { 864 uint16_t pid; 865 866 if (name == NULL) { 867 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 868 return -EINVAL; 869 } 870 871 if (port_id == NULL) { 872 RTE_ETHDEV_LOG(ERR, 873 "Cannot get port ID to NULL for %s\n", name); 874 return -EINVAL; 875 } 876 877 RTE_ETH_FOREACH_VALID_DEV(pid) 878 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 879 *port_id = pid; 880 return 0; 881 } 882 883 return -ENODEV; 884 } 885 886 static int 887 eth_err(uint16_t port_id, int ret) 888 { 889 if (ret == 0) 890 return 0; 891 if (rte_eth_dev_is_removed(port_id)) 892 return -EIO; 893 return ret; 894 } 895 896 static void 897 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 898 { 899 void **rxq = dev->data->rx_queues; 900 901 if (rxq[qid] == NULL) 902 return; 903 904 if (dev->dev_ops->rx_queue_release != NULL) 905 (*dev->dev_ops->rx_queue_release)(dev, qid); 906 rxq[qid] = NULL; 907 } 908 909 static void 910 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 911 { 912 void **txq = dev->data->tx_queues; 913 914 if (txq[qid] == NULL) 915 return; 916 917 if (dev->dev_ops->tx_queue_release != NULL) 918 (*dev->dev_ops->tx_queue_release)(dev, qid); 919 txq[qid] = NULL; 920 } 921 922 static int 923 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 924 { 925 uint16_t old_nb_queues = dev->data->nb_rx_queues; 926 unsigned i; 927 928 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 929 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 930 sizeof(dev->data->rx_queues[0]) * 931 RTE_MAX_QUEUES_PER_PORT, 932 RTE_CACHE_LINE_SIZE); 933 if (dev->data->rx_queues == NULL) { 934 dev->data->nb_rx_queues = 0; 935 return -(ENOMEM); 936 } 937 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 938 for (i = nb_queues; i < old_nb_queues; i++) 939 eth_dev_rxq_release(dev, i); 940 941 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 942 for (i = nb_queues; i < old_nb_queues; i++) 943 eth_dev_rxq_release(dev, i); 944 945 rte_free(dev->data->rx_queues); 946 dev->data->rx_queues = NULL; 947 } 948 dev->data->nb_rx_queues = nb_queues; 949 return 0; 950 } 951 952 static int 953 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 954 { 955 uint16_t port_id; 956 957 if (rx_queue_id >= dev->data->nb_rx_queues) { 958 port_id = dev->data->port_id; 959 RTE_ETHDEV_LOG(ERR, 960 "Invalid Rx queue_id=%u of device with port_id=%u\n", 961 rx_queue_id, port_id); 962 return -EINVAL; 963 } 964 965 if (dev->data->rx_queues[rx_queue_id] == NULL) { 966 port_id = dev->data->port_id; 967 RTE_ETHDEV_LOG(ERR, 968 "Queue %u of device with port_id=%u has not been setup\n", 969 rx_queue_id, port_id); 970 return -EINVAL; 971 } 972 973 return 0; 974 } 975 976 static int 977 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 978 { 979 uint16_t port_id; 980 981 if (tx_queue_id >= dev->data->nb_tx_queues) { 982 port_id = dev->data->port_id; 983 RTE_ETHDEV_LOG(ERR, 984 "Invalid Tx queue_id=%u of device with port_id=%u\n", 985 tx_queue_id, port_id); 986 return -EINVAL; 987 } 988 989 if (dev->data->tx_queues[tx_queue_id] == NULL) { 990 port_id = dev->data->port_id; 991 RTE_ETHDEV_LOG(ERR, 992 "Queue %u of device with port_id=%u has not been setup\n", 993 tx_queue_id, port_id); 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 int 1001 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1002 { 1003 struct rte_eth_dev *dev; 1004 int ret; 1005 1006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1007 dev = &rte_eth_devices[port_id]; 1008 1009 if (!dev->data->dev_started) { 1010 RTE_ETHDEV_LOG(ERR, 1011 "Port %u must be started before start any queue\n", 1012 port_id); 1013 return -EINVAL; 1014 } 1015 1016 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1017 if (ret != 0) 1018 return ret; 1019 1020 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1021 1022 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1023 RTE_ETHDEV_LOG(INFO, 1024 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1025 rx_queue_id, port_id); 1026 return -EINVAL; 1027 } 1028 1029 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1030 RTE_ETHDEV_LOG(INFO, 1031 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1032 rx_queue_id, port_id); 1033 return 0; 1034 } 1035 1036 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1037 } 1038 1039 int 1040 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1041 { 1042 struct rte_eth_dev *dev; 1043 int ret; 1044 1045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1046 dev = &rte_eth_devices[port_id]; 1047 1048 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1049 if (ret != 0) 1050 return ret; 1051 1052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1053 1054 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1055 RTE_ETHDEV_LOG(INFO, 1056 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1057 rx_queue_id, port_id); 1058 return -EINVAL; 1059 } 1060 1061 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1062 RTE_ETHDEV_LOG(INFO, 1063 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1064 rx_queue_id, port_id); 1065 return 0; 1066 } 1067 1068 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1069 } 1070 1071 int 1072 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1073 { 1074 struct rte_eth_dev *dev; 1075 int ret; 1076 1077 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1078 dev = &rte_eth_devices[port_id]; 1079 1080 if (!dev->data->dev_started) { 1081 RTE_ETHDEV_LOG(ERR, 1082 "Port %u must be started before start any queue\n", 1083 port_id); 1084 return -EINVAL; 1085 } 1086 1087 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1088 if (ret != 0) 1089 return ret; 1090 1091 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1092 1093 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1094 RTE_ETHDEV_LOG(INFO, 1095 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1096 tx_queue_id, port_id); 1097 return -EINVAL; 1098 } 1099 1100 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1101 RTE_ETHDEV_LOG(INFO, 1102 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1103 tx_queue_id, port_id); 1104 return 0; 1105 } 1106 1107 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1108 } 1109 1110 int 1111 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1112 { 1113 struct rte_eth_dev *dev; 1114 int ret; 1115 1116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1117 dev = &rte_eth_devices[port_id]; 1118 1119 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1120 if (ret != 0) 1121 return ret; 1122 1123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1124 1125 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1126 RTE_ETHDEV_LOG(INFO, 1127 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1128 tx_queue_id, port_id); 1129 return -EINVAL; 1130 } 1131 1132 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1133 RTE_ETHDEV_LOG(INFO, 1134 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1135 tx_queue_id, port_id); 1136 return 0; 1137 } 1138 1139 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1140 } 1141 1142 static int 1143 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1144 { 1145 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1146 unsigned i; 1147 1148 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1149 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1150 sizeof(dev->data->tx_queues[0]) * 1151 RTE_MAX_QUEUES_PER_PORT, 1152 RTE_CACHE_LINE_SIZE); 1153 if (dev->data->tx_queues == NULL) { 1154 dev->data->nb_tx_queues = 0; 1155 return -(ENOMEM); 1156 } 1157 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1158 for (i = nb_queues; i < old_nb_queues; i++) 1159 eth_dev_txq_release(dev, i); 1160 1161 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1162 for (i = nb_queues; i < old_nb_queues; i++) 1163 eth_dev_txq_release(dev, i); 1164 1165 rte_free(dev->data->tx_queues); 1166 dev->data->tx_queues = NULL; 1167 } 1168 dev->data->nb_tx_queues = nb_queues; 1169 return 0; 1170 } 1171 1172 uint32_t 1173 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1174 { 1175 switch (speed) { 1176 case ETH_SPEED_NUM_10M: 1177 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD; 1178 case ETH_SPEED_NUM_100M: 1179 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD; 1180 case ETH_SPEED_NUM_1G: 1181 return ETH_LINK_SPEED_1G; 1182 case ETH_SPEED_NUM_2_5G: 1183 return ETH_LINK_SPEED_2_5G; 1184 case ETH_SPEED_NUM_5G: 1185 return ETH_LINK_SPEED_5G; 1186 case ETH_SPEED_NUM_10G: 1187 return ETH_LINK_SPEED_10G; 1188 case ETH_SPEED_NUM_20G: 1189 return ETH_LINK_SPEED_20G; 1190 case ETH_SPEED_NUM_25G: 1191 return ETH_LINK_SPEED_25G; 1192 case ETH_SPEED_NUM_40G: 1193 return ETH_LINK_SPEED_40G; 1194 case ETH_SPEED_NUM_50G: 1195 return ETH_LINK_SPEED_50G; 1196 case ETH_SPEED_NUM_56G: 1197 return ETH_LINK_SPEED_56G; 1198 case ETH_SPEED_NUM_100G: 1199 return ETH_LINK_SPEED_100G; 1200 case ETH_SPEED_NUM_200G: 1201 return ETH_LINK_SPEED_200G; 1202 default: 1203 return 0; 1204 } 1205 } 1206 1207 const char * 1208 rte_eth_dev_rx_offload_name(uint64_t offload) 1209 { 1210 const char *name = "UNKNOWN"; 1211 unsigned int i; 1212 1213 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1214 if (offload == eth_dev_rx_offload_names[i].offload) { 1215 name = eth_dev_rx_offload_names[i].name; 1216 break; 1217 } 1218 } 1219 1220 return name; 1221 } 1222 1223 const char * 1224 rte_eth_dev_tx_offload_name(uint64_t offload) 1225 { 1226 const char *name = "UNKNOWN"; 1227 unsigned int i; 1228 1229 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1230 if (offload == eth_dev_tx_offload_names[i].offload) { 1231 name = eth_dev_tx_offload_names[i].name; 1232 break; 1233 } 1234 } 1235 1236 return name; 1237 } 1238 1239 static inline int 1240 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1241 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1242 { 1243 int ret = 0; 1244 1245 if (dev_info_size == 0) { 1246 if (config_size != max_rx_pkt_len) { 1247 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1248 " %u != %u is not allowed\n", 1249 port_id, config_size, max_rx_pkt_len); 1250 ret = -EINVAL; 1251 } 1252 } else if (config_size > dev_info_size) { 1253 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1254 "> max allowed value %u\n", port_id, config_size, 1255 dev_info_size); 1256 ret = -EINVAL; 1257 } else if (config_size < RTE_ETHER_MIN_LEN) { 1258 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1259 "< min allowed value %u\n", port_id, config_size, 1260 (unsigned int)RTE_ETHER_MIN_LEN); 1261 ret = -EINVAL; 1262 } 1263 return ret; 1264 } 1265 1266 /* 1267 * Validate offloads that are requested through rte_eth_dev_configure against 1268 * the offloads successfully set by the ethernet device. 1269 * 1270 * @param port_id 1271 * The port identifier of the Ethernet device. 1272 * @param req_offloads 1273 * The offloads that have been requested through `rte_eth_dev_configure`. 1274 * @param set_offloads 1275 * The offloads successfully set by the ethernet device. 1276 * @param offload_type 1277 * The offload type i.e. Rx/Tx string. 1278 * @param offload_name 1279 * The function that prints the offload name. 1280 * @return 1281 * - (0) if validation successful. 1282 * - (-EINVAL) if requested offload has been silently disabled. 1283 * 1284 */ 1285 static int 1286 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1287 uint64_t set_offloads, const char *offload_type, 1288 const char *(*offload_name)(uint64_t)) 1289 { 1290 uint64_t offloads_diff = req_offloads ^ set_offloads; 1291 uint64_t offload; 1292 int ret = 0; 1293 1294 while (offloads_diff != 0) { 1295 /* Check if any offload is requested but not enabled. */ 1296 offload = 1ULL << __builtin_ctzll(offloads_diff); 1297 if (offload & req_offloads) { 1298 RTE_ETHDEV_LOG(ERR, 1299 "Port %u failed to enable %s offload %s\n", 1300 port_id, offload_type, offload_name(offload)); 1301 ret = -EINVAL; 1302 } 1303 1304 /* Check if offload couldn't be disabled. */ 1305 if (offload & set_offloads) { 1306 RTE_ETHDEV_LOG(DEBUG, 1307 "Port %u %s offload %s is not requested but enabled\n", 1308 port_id, offload_type, offload_name(offload)); 1309 } 1310 1311 offloads_diff &= ~offload; 1312 } 1313 1314 return ret; 1315 } 1316 1317 static uint32_t 1318 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1319 { 1320 uint32_t overhead_len; 1321 1322 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1323 overhead_len = max_rx_pktlen - max_mtu; 1324 else 1325 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1326 1327 return overhead_len; 1328 } 1329 1330 /* rte_eth_dev_info_get() should be called prior to this function */ 1331 static int 1332 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1333 uint16_t mtu) 1334 { 1335 uint32_t overhead_len; 1336 uint32_t frame_size; 1337 1338 if (mtu < dev_info->min_mtu) { 1339 RTE_ETHDEV_LOG(ERR, 1340 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1341 mtu, dev_info->min_mtu, port_id); 1342 return -EINVAL; 1343 } 1344 if (mtu > dev_info->max_mtu) { 1345 RTE_ETHDEV_LOG(ERR, 1346 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1347 mtu, dev_info->max_mtu, port_id); 1348 return -EINVAL; 1349 } 1350 1351 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1352 dev_info->max_mtu); 1353 frame_size = mtu + overhead_len; 1354 if (frame_size < RTE_ETHER_MIN_LEN) { 1355 RTE_ETHDEV_LOG(ERR, 1356 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1357 frame_size, RTE_ETHER_MIN_LEN, port_id); 1358 return -EINVAL; 1359 } 1360 1361 if (frame_size > dev_info->max_rx_pktlen) { 1362 RTE_ETHDEV_LOG(ERR, 1363 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1364 frame_size, dev_info->max_rx_pktlen, port_id); 1365 return -EINVAL; 1366 } 1367 1368 return 0; 1369 } 1370 1371 int 1372 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1373 const struct rte_eth_conf *dev_conf) 1374 { 1375 struct rte_eth_dev *dev; 1376 struct rte_eth_dev_info dev_info; 1377 struct rte_eth_conf orig_conf; 1378 int diag; 1379 int ret; 1380 uint16_t old_mtu; 1381 1382 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1383 dev = &rte_eth_devices[port_id]; 1384 1385 if (dev_conf == NULL) { 1386 RTE_ETHDEV_LOG(ERR, 1387 "Cannot configure ethdev port %u from NULL config\n", 1388 port_id); 1389 return -EINVAL; 1390 } 1391 1392 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1393 1394 if (dev->data->dev_started) { 1395 RTE_ETHDEV_LOG(ERR, 1396 "Port %u must be stopped to allow configuration\n", 1397 port_id); 1398 return -EBUSY; 1399 } 1400 1401 /* 1402 * Ensure that "dev_configured" is always 0 each time prepare to do 1403 * dev_configure() to avoid any non-anticipated behaviour. 1404 * And set to 1 when dev_configure() is executed successfully. 1405 */ 1406 dev->data->dev_configured = 0; 1407 1408 /* Store original config, as rollback required on failure */ 1409 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1410 1411 /* 1412 * Copy the dev_conf parameter into the dev structure. 1413 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1414 */ 1415 if (dev_conf != &dev->data->dev_conf) 1416 memcpy(&dev->data->dev_conf, dev_conf, 1417 sizeof(dev->data->dev_conf)); 1418 1419 /* Backup mtu for rollback */ 1420 old_mtu = dev->data->mtu; 1421 1422 ret = rte_eth_dev_info_get(port_id, &dev_info); 1423 if (ret != 0) 1424 goto rollback; 1425 1426 /* If number of queues specified by application for both Rx and Tx is 1427 * zero, use driver preferred values. This cannot be done individually 1428 * as it is valid for either Tx or Rx (but not both) to be zero. 1429 * If driver does not provide any preferred valued, fall back on 1430 * EAL defaults. 1431 */ 1432 if (nb_rx_q == 0 && nb_tx_q == 0) { 1433 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1434 if (nb_rx_q == 0) 1435 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1436 nb_tx_q = dev_info.default_txportconf.nb_queues; 1437 if (nb_tx_q == 0) 1438 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1439 } 1440 1441 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1442 RTE_ETHDEV_LOG(ERR, 1443 "Number of RX queues requested (%u) is greater than max supported(%d)\n", 1444 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1445 ret = -EINVAL; 1446 goto rollback; 1447 } 1448 1449 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1450 RTE_ETHDEV_LOG(ERR, 1451 "Number of TX queues requested (%u) is greater than max supported(%d)\n", 1452 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1453 ret = -EINVAL; 1454 goto rollback; 1455 } 1456 1457 /* 1458 * Check that the numbers of RX and TX queues are not greater 1459 * than the maximum number of RX and TX queues supported by the 1460 * configured device. 1461 */ 1462 if (nb_rx_q > dev_info.max_rx_queues) { 1463 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1464 port_id, nb_rx_q, dev_info.max_rx_queues); 1465 ret = -EINVAL; 1466 goto rollback; 1467 } 1468 1469 if (nb_tx_q > dev_info.max_tx_queues) { 1470 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1471 port_id, nb_tx_q, dev_info.max_tx_queues); 1472 ret = -EINVAL; 1473 goto rollback; 1474 } 1475 1476 /* Check that the device supports requested interrupts */ 1477 if ((dev_conf->intr_conf.lsc == 1) && 1478 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1479 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1480 dev->device->driver->name); 1481 ret = -EINVAL; 1482 goto rollback; 1483 } 1484 if ((dev_conf->intr_conf.rmv == 1) && 1485 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1486 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1487 dev->device->driver->name); 1488 ret = -EINVAL; 1489 goto rollback; 1490 } 1491 1492 if (dev_conf->rxmode.mtu == 0) 1493 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1494 1495 ret = eth_dev_validate_mtu(port_id, &dev_info, 1496 dev->data->dev_conf.rxmode.mtu); 1497 if (ret != 0) 1498 goto rollback; 1499 1500 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1501 1502 /* 1503 * If LRO is enabled, check that the maximum aggregated packet 1504 * size is supported by the configured device. 1505 */ 1506 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1507 uint32_t max_rx_pktlen; 1508 uint32_t overhead_len; 1509 1510 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1511 dev_info.max_mtu); 1512 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1513 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1514 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1515 ret = eth_dev_check_lro_pkt_size(port_id, 1516 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1517 max_rx_pktlen, 1518 dev_info.max_lro_pkt_size); 1519 if (ret != 0) 1520 goto rollback; 1521 } 1522 1523 /* Any requested offloading must be within its device capabilities */ 1524 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1525 dev_conf->rxmode.offloads) { 1526 RTE_ETHDEV_LOG(ERR, 1527 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1528 "capabilities 0x%"PRIx64" in %s()\n", 1529 port_id, dev_conf->rxmode.offloads, 1530 dev_info.rx_offload_capa, 1531 __func__); 1532 ret = -EINVAL; 1533 goto rollback; 1534 } 1535 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1536 dev_conf->txmode.offloads) { 1537 RTE_ETHDEV_LOG(ERR, 1538 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1539 "capabilities 0x%"PRIx64" in %s()\n", 1540 port_id, dev_conf->txmode.offloads, 1541 dev_info.tx_offload_capa, 1542 __func__); 1543 ret = -EINVAL; 1544 goto rollback; 1545 } 1546 1547 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1548 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1549 1550 /* Check that device supports requested rss hash functions. */ 1551 if ((dev_info.flow_type_rss_offloads | 1552 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1553 dev_info.flow_type_rss_offloads) { 1554 RTE_ETHDEV_LOG(ERR, 1555 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1556 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1557 dev_info.flow_type_rss_offloads); 1558 ret = -EINVAL; 1559 goto rollback; 1560 } 1561 1562 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1563 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && 1564 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { 1565 RTE_ETHDEV_LOG(ERR, 1566 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1567 port_id, 1568 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); 1569 ret = -EINVAL; 1570 goto rollback; 1571 } 1572 1573 /* 1574 * Setup new number of RX/TX queues and reconfigure device. 1575 */ 1576 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1577 if (diag != 0) { 1578 RTE_ETHDEV_LOG(ERR, 1579 "Port%u eth_dev_rx_queue_config = %d\n", 1580 port_id, diag); 1581 ret = diag; 1582 goto rollback; 1583 } 1584 1585 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1586 if (diag != 0) { 1587 RTE_ETHDEV_LOG(ERR, 1588 "Port%u eth_dev_tx_queue_config = %d\n", 1589 port_id, diag); 1590 eth_dev_rx_queue_config(dev, 0); 1591 ret = diag; 1592 goto rollback; 1593 } 1594 1595 diag = (*dev->dev_ops->dev_configure)(dev); 1596 if (diag != 0) { 1597 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1598 port_id, diag); 1599 ret = eth_err(port_id, diag); 1600 goto reset_queues; 1601 } 1602 1603 /* Initialize Rx profiling if enabled at compilation time. */ 1604 diag = __rte_eth_dev_profile_init(port_id, dev); 1605 if (diag != 0) { 1606 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1607 port_id, diag); 1608 ret = eth_err(port_id, diag); 1609 goto reset_queues; 1610 } 1611 1612 /* Validate Rx offloads. */ 1613 diag = eth_dev_validate_offloads(port_id, 1614 dev_conf->rxmode.offloads, 1615 dev->data->dev_conf.rxmode.offloads, "Rx", 1616 rte_eth_dev_rx_offload_name); 1617 if (diag != 0) { 1618 ret = diag; 1619 goto reset_queues; 1620 } 1621 1622 /* Validate Tx offloads. */ 1623 diag = eth_dev_validate_offloads(port_id, 1624 dev_conf->txmode.offloads, 1625 dev->data->dev_conf.txmode.offloads, "Tx", 1626 rte_eth_dev_tx_offload_name); 1627 if (diag != 0) { 1628 ret = diag; 1629 goto reset_queues; 1630 } 1631 1632 dev->data->dev_configured = 1; 1633 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1634 return 0; 1635 reset_queues: 1636 eth_dev_rx_queue_config(dev, 0); 1637 eth_dev_tx_queue_config(dev, 0); 1638 rollback: 1639 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1640 if (old_mtu != dev->data->mtu) 1641 dev->data->mtu = old_mtu; 1642 1643 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1644 return ret; 1645 } 1646 1647 void 1648 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1649 { 1650 if (dev->data->dev_started) { 1651 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1652 dev->data->port_id); 1653 return; 1654 } 1655 1656 eth_dev_rx_queue_config(dev, 0); 1657 eth_dev_tx_queue_config(dev, 0); 1658 1659 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1660 } 1661 1662 static void 1663 eth_dev_mac_restore(struct rte_eth_dev *dev, 1664 struct rte_eth_dev_info *dev_info) 1665 { 1666 struct rte_ether_addr *addr; 1667 uint16_t i; 1668 uint32_t pool = 0; 1669 uint64_t pool_mask; 1670 1671 /* replay MAC address configuration including default MAC */ 1672 addr = &dev->data->mac_addrs[0]; 1673 if (*dev->dev_ops->mac_addr_set != NULL) 1674 (*dev->dev_ops->mac_addr_set)(dev, addr); 1675 else if (*dev->dev_ops->mac_addr_add != NULL) 1676 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1677 1678 if (*dev->dev_ops->mac_addr_add != NULL) { 1679 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1680 addr = &dev->data->mac_addrs[i]; 1681 1682 /* skip zero address */ 1683 if (rte_is_zero_ether_addr(addr)) 1684 continue; 1685 1686 pool = 0; 1687 pool_mask = dev->data->mac_pool_sel[i]; 1688 1689 do { 1690 if (pool_mask & 1ULL) 1691 (*dev->dev_ops->mac_addr_add)(dev, 1692 addr, i, pool); 1693 pool_mask >>= 1; 1694 pool++; 1695 } while (pool_mask); 1696 } 1697 } 1698 } 1699 1700 static int 1701 eth_dev_config_restore(struct rte_eth_dev *dev, 1702 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1703 { 1704 int ret; 1705 1706 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1707 eth_dev_mac_restore(dev, dev_info); 1708 1709 /* replay promiscuous configuration */ 1710 /* 1711 * use callbacks directly since we don't need port_id check and 1712 * would like to bypass the same value set 1713 */ 1714 if (rte_eth_promiscuous_get(port_id) == 1 && 1715 *dev->dev_ops->promiscuous_enable != NULL) { 1716 ret = eth_err(port_id, 1717 (*dev->dev_ops->promiscuous_enable)(dev)); 1718 if (ret != 0 && ret != -ENOTSUP) { 1719 RTE_ETHDEV_LOG(ERR, 1720 "Failed to enable promiscuous mode for device (port %u): %s\n", 1721 port_id, rte_strerror(-ret)); 1722 return ret; 1723 } 1724 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1725 *dev->dev_ops->promiscuous_disable != NULL) { 1726 ret = eth_err(port_id, 1727 (*dev->dev_ops->promiscuous_disable)(dev)); 1728 if (ret != 0 && ret != -ENOTSUP) { 1729 RTE_ETHDEV_LOG(ERR, 1730 "Failed to disable promiscuous mode for device (port %u): %s\n", 1731 port_id, rte_strerror(-ret)); 1732 return ret; 1733 } 1734 } 1735 1736 /* replay all multicast configuration */ 1737 /* 1738 * use callbacks directly since we don't need port_id check and 1739 * would like to bypass the same value set 1740 */ 1741 if (rte_eth_allmulticast_get(port_id) == 1 && 1742 *dev->dev_ops->allmulticast_enable != NULL) { 1743 ret = eth_err(port_id, 1744 (*dev->dev_ops->allmulticast_enable)(dev)); 1745 if (ret != 0 && ret != -ENOTSUP) { 1746 RTE_ETHDEV_LOG(ERR, 1747 "Failed to enable allmulticast mode for device (port %u): %s\n", 1748 port_id, rte_strerror(-ret)); 1749 return ret; 1750 } 1751 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1752 *dev->dev_ops->allmulticast_disable != NULL) { 1753 ret = eth_err(port_id, 1754 (*dev->dev_ops->allmulticast_disable)(dev)); 1755 if (ret != 0 && ret != -ENOTSUP) { 1756 RTE_ETHDEV_LOG(ERR, 1757 "Failed to disable allmulticast mode for device (port %u): %s\n", 1758 port_id, rte_strerror(-ret)); 1759 return ret; 1760 } 1761 } 1762 1763 return 0; 1764 } 1765 1766 int 1767 rte_eth_dev_start(uint16_t port_id) 1768 { 1769 struct rte_eth_dev *dev; 1770 struct rte_eth_dev_info dev_info; 1771 int diag; 1772 int ret, ret_stop; 1773 1774 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1775 dev = &rte_eth_devices[port_id]; 1776 1777 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1778 1779 if (dev->data->dev_configured == 0) { 1780 RTE_ETHDEV_LOG(INFO, 1781 "Device with port_id=%"PRIu16" is not configured.\n", 1782 port_id); 1783 return -EINVAL; 1784 } 1785 1786 if (dev->data->dev_started != 0) { 1787 RTE_ETHDEV_LOG(INFO, 1788 "Device with port_id=%"PRIu16" already started\n", 1789 port_id); 1790 return 0; 1791 } 1792 1793 ret = rte_eth_dev_info_get(port_id, &dev_info); 1794 if (ret != 0) 1795 return ret; 1796 1797 /* Lets restore MAC now if device does not support live change */ 1798 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1799 eth_dev_mac_restore(dev, &dev_info); 1800 1801 diag = (*dev->dev_ops->dev_start)(dev); 1802 if (diag == 0) 1803 dev->data->dev_started = 1; 1804 else 1805 return eth_err(port_id, diag); 1806 1807 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1808 if (ret != 0) { 1809 RTE_ETHDEV_LOG(ERR, 1810 "Error during restoring configuration for device (port %u): %s\n", 1811 port_id, rte_strerror(-ret)); 1812 ret_stop = rte_eth_dev_stop(port_id); 1813 if (ret_stop != 0) { 1814 RTE_ETHDEV_LOG(ERR, 1815 "Failed to stop device (port %u): %s\n", 1816 port_id, rte_strerror(-ret_stop)); 1817 } 1818 1819 return ret; 1820 } 1821 1822 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1823 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1824 (*dev->dev_ops->link_update)(dev, 0); 1825 } 1826 1827 /* expose selection of PMD fast-path functions */ 1828 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1829 1830 rte_ethdev_trace_start(port_id); 1831 return 0; 1832 } 1833 1834 int 1835 rte_eth_dev_stop(uint16_t port_id) 1836 { 1837 struct rte_eth_dev *dev; 1838 int ret; 1839 1840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1841 dev = &rte_eth_devices[port_id]; 1842 1843 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1844 1845 if (dev->data->dev_started == 0) { 1846 RTE_ETHDEV_LOG(INFO, 1847 "Device with port_id=%"PRIu16" already stopped\n", 1848 port_id); 1849 return 0; 1850 } 1851 1852 /* point fast-path functions to dummy ones */ 1853 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1854 1855 dev->data->dev_started = 0; 1856 ret = (*dev->dev_ops->dev_stop)(dev); 1857 rte_ethdev_trace_stop(port_id, ret); 1858 1859 return ret; 1860 } 1861 1862 int 1863 rte_eth_dev_set_link_up(uint16_t port_id) 1864 { 1865 struct rte_eth_dev *dev; 1866 1867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1868 dev = &rte_eth_devices[port_id]; 1869 1870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1871 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1872 } 1873 1874 int 1875 rte_eth_dev_set_link_down(uint16_t port_id) 1876 { 1877 struct rte_eth_dev *dev; 1878 1879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1880 dev = &rte_eth_devices[port_id]; 1881 1882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1883 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1884 } 1885 1886 int 1887 rte_eth_dev_close(uint16_t port_id) 1888 { 1889 struct rte_eth_dev *dev; 1890 int firsterr, binerr; 1891 int *lasterr = &firsterr; 1892 1893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1894 dev = &rte_eth_devices[port_id]; 1895 1896 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1897 *lasterr = (*dev->dev_ops->dev_close)(dev); 1898 if (*lasterr != 0) 1899 lasterr = &binerr; 1900 1901 rte_ethdev_trace_close(port_id); 1902 *lasterr = rte_eth_dev_release_port(dev); 1903 1904 return firsterr; 1905 } 1906 1907 int 1908 rte_eth_dev_reset(uint16_t port_id) 1909 { 1910 struct rte_eth_dev *dev; 1911 int ret; 1912 1913 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1914 dev = &rte_eth_devices[port_id]; 1915 1916 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1917 1918 ret = rte_eth_dev_stop(port_id); 1919 if (ret != 0) { 1920 RTE_ETHDEV_LOG(ERR, 1921 "Failed to stop device (port %u) before reset: %s - ignore\n", 1922 port_id, rte_strerror(-ret)); 1923 } 1924 ret = dev->dev_ops->dev_reset(dev); 1925 1926 return eth_err(port_id, ret); 1927 } 1928 1929 int 1930 rte_eth_dev_is_removed(uint16_t port_id) 1931 { 1932 struct rte_eth_dev *dev; 1933 int ret; 1934 1935 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1936 dev = &rte_eth_devices[port_id]; 1937 1938 if (dev->state == RTE_ETH_DEV_REMOVED) 1939 return 1; 1940 1941 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1942 1943 ret = dev->dev_ops->is_removed(dev); 1944 if (ret != 0) 1945 /* Device is physically removed. */ 1946 dev->state = RTE_ETH_DEV_REMOVED; 1947 1948 return ret; 1949 } 1950 1951 static int 1952 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1953 uint16_t n_seg, uint32_t *mbp_buf_size, 1954 const struct rte_eth_dev_info *dev_info) 1955 { 1956 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1957 struct rte_mempool *mp_first; 1958 uint32_t offset_mask; 1959 uint16_t seg_idx; 1960 1961 if (n_seg > seg_capa->max_nseg) { 1962 RTE_ETHDEV_LOG(ERR, 1963 "Requested Rx segments %u exceed supported %u\n", 1964 n_seg, seg_capa->max_nseg); 1965 return -EINVAL; 1966 } 1967 /* 1968 * Check the sizes and offsets against buffer sizes 1969 * for each segment specified in extended configuration. 1970 */ 1971 mp_first = rx_seg[0].mp; 1972 offset_mask = (1u << seg_capa->offset_align_log2) - 1; 1973 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1974 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1975 uint32_t length = rx_seg[seg_idx].length; 1976 uint32_t offset = rx_seg[seg_idx].offset; 1977 1978 if (mpl == NULL) { 1979 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1980 return -EINVAL; 1981 } 1982 if (seg_idx != 0 && mp_first != mpl && 1983 seg_capa->multi_pools == 0) { 1984 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1985 return -ENOTSUP; 1986 } 1987 if (offset != 0) { 1988 if (seg_capa->offset_allowed == 0) { 1989 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1990 return -ENOTSUP; 1991 } 1992 if (offset & offset_mask) { 1993 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1994 offset, 1995 seg_capa->offset_align_log2); 1996 return -EINVAL; 1997 } 1998 } 1999 if (mpl->private_data_size < 2000 sizeof(struct rte_pktmbuf_pool_private)) { 2001 RTE_ETHDEV_LOG(ERR, 2002 "%s private_data_size %u < %u\n", 2003 mpl->name, mpl->private_data_size, 2004 (unsigned int)sizeof 2005 (struct rte_pktmbuf_pool_private)); 2006 return -ENOSPC; 2007 } 2008 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2009 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2010 length = length != 0 ? length : *mbp_buf_size; 2011 if (*mbp_buf_size < length + offset) { 2012 RTE_ETHDEV_LOG(ERR, 2013 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2014 mpl->name, *mbp_buf_size, 2015 length + offset, length, offset); 2016 return -EINVAL; 2017 } 2018 } 2019 return 0; 2020 } 2021 2022 int 2023 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2024 uint16_t nb_rx_desc, unsigned int socket_id, 2025 const struct rte_eth_rxconf *rx_conf, 2026 struct rte_mempool *mp) 2027 { 2028 int ret; 2029 uint32_t mbp_buf_size; 2030 struct rte_eth_dev *dev; 2031 struct rte_eth_dev_info dev_info; 2032 struct rte_eth_rxconf local_conf; 2033 2034 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2035 dev = &rte_eth_devices[port_id]; 2036 2037 if (rx_queue_id >= dev->data->nb_rx_queues) { 2038 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2039 return -EINVAL; 2040 } 2041 2042 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2043 2044 ret = rte_eth_dev_info_get(port_id, &dev_info); 2045 if (ret != 0) 2046 return ret; 2047 2048 if (mp != NULL) { 2049 /* Single pool configuration check. */ 2050 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2051 RTE_ETHDEV_LOG(ERR, 2052 "Ambiguous segment configuration\n"); 2053 return -EINVAL; 2054 } 2055 /* 2056 * Check the size of the mbuf data buffer, this value 2057 * must be provided in the private data of the memory pool. 2058 * First check that the memory pool(s) has a valid private data. 2059 */ 2060 if (mp->private_data_size < 2061 sizeof(struct rte_pktmbuf_pool_private)) { 2062 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2063 mp->name, mp->private_data_size, 2064 (unsigned int) 2065 sizeof(struct rte_pktmbuf_pool_private)); 2066 return -ENOSPC; 2067 } 2068 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2069 if (mbp_buf_size < dev_info.min_rx_bufsize + 2070 RTE_PKTMBUF_HEADROOM) { 2071 RTE_ETHDEV_LOG(ERR, 2072 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2073 mp->name, mbp_buf_size, 2074 RTE_PKTMBUF_HEADROOM + 2075 dev_info.min_rx_bufsize, 2076 RTE_PKTMBUF_HEADROOM, 2077 dev_info.min_rx_bufsize); 2078 return -EINVAL; 2079 } 2080 } else { 2081 const struct rte_eth_rxseg_split *rx_seg; 2082 uint16_t n_seg; 2083 2084 /* Extended multi-segment configuration check. */ 2085 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2086 RTE_ETHDEV_LOG(ERR, 2087 "Memory pool is null and no extended configuration provided\n"); 2088 return -EINVAL; 2089 } 2090 2091 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2092 n_seg = rx_conf->rx_nseg; 2093 2094 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2095 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2096 &mbp_buf_size, 2097 &dev_info); 2098 if (ret != 0) 2099 return ret; 2100 } else { 2101 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2102 return -EINVAL; 2103 } 2104 } 2105 2106 /* Use default specified by driver, if nb_rx_desc is zero */ 2107 if (nb_rx_desc == 0) { 2108 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2109 /* If driver default is also zero, fall back on EAL default */ 2110 if (nb_rx_desc == 0) 2111 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2112 } 2113 2114 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2115 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2116 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2117 2118 RTE_ETHDEV_LOG(ERR, 2119 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2120 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2121 dev_info.rx_desc_lim.nb_min, 2122 dev_info.rx_desc_lim.nb_align); 2123 return -EINVAL; 2124 } 2125 2126 if (dev->data->dev_started && 2127 !(dev_info.dev_capa & 2128 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2129 return -EBUSY; 2130 2131 if (dev->data->dev_started && 2132 (dev->data->rx_queue_state[rx_queue_id] != 2133 RTE_ETH_QUEUE_STATE_STOPPED)) 2134 return -EBUSY; 2135 2136 eth_dev_rxq_release(dev, rx_queue_id); 2137 2138 if (rx_conf == NULL) 2139 rx_conf = &dev_info.default_rxconf; 2140 2141 local_conf = *rx_conf; 2142 2143 /* 2144 * If an offloading has already been enabled in 2145 * rte_eth_dev_configure(), it has been enabled on all queues, 2146 * so there is no need to enable it in this queue again. 2147 * The local_conf.offloads input to underlying PMD only carries 2148 * those offloadings which are only enabled on this queue and 2149 * not enabled on all queues. 2150 */ 2151 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2152 2153 /* 2154 * New added offloadings for this queue are those not enabled in 2155 * rte_eth_dev_configure() and they must be per-queue type. 2156 * A pure per-port offloading can't be enabled on a queue while 2157 * disabled on another queue. A pure per-port offloading can't 2158 * be enabled for any queue as new added one if it hasn't been 2159 * enabled in rte_eth_dev_configure(). 2160 */ 2161 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2162 local_conf.offloads) { 2163 RTE_ETHDEV_LOG(ERR, 2164 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2165 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2166 port_id, rx_queue_id, local_conf.offloads, 2167 dev_info.rx_queue_offload_capa, 2168 __func__); 2169 return -EINVAL; 2170 } 2171 2172 /* 2173 * If LRO is enabled, check that the maximum aggregated packet 2174 * size is supported by the configured device. 2175 */ 2176 /* Get the real Ethernet overhead length */ 2177 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 2178 uint32_t overhead_len; 2179 uint32_t max_rx_pktlen; 2180 int ret; 2181 2182 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2183 dev_info.max_mtu); 2184 max_rx_pktlen = dev->data->mtu + overhead_len; 2185 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2186 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2187 ret = eth_dev_check_lro_pkt_size(port_id, 2188 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2189 max_rx_pktlen, 2190 dev_info.max_lro_pkt_size); 2191 if (ret != 0) 2192 return ret; 2193 } 2194 2195 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2196 socket_id, &local_conf, mp); 2197 if (!ret) { 2198 if (!dev->data->min_rx_buf_size || 2199 dev->data->min_rx_buf_size > mbp_buf_size) 2200 dev->data->min_rx_buf_size = mbp_buf_size; 2201 } 2202 2203 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2204 rx_conf, ret); 2205 return eth_err(port_id, ret); 2206 } 2207 2208 int 2209 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2210 uint16_t nb_rx_desc, 2211 const struct rte_eth_hairpin_conf *conf) 2212 { 2213 int ret; 2214 struct rte_eth_dev *dev; 2215 struct rte_eth_hairpin_cap cap; 2216 int i; 2217 int count; 2218 2219 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2220 dev = &rte_eth_devices[port_id]; 2221 2222 if (rx_queue_id >= dev->data->nb_rx_queues) { 2223 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); 2224 return -EINVAL; 2225 } 2226 2227 if (conf == NULL) { 2228 RTE_ETHDEV_LOG(ERR, 2229 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2230 port_id); 2231 return -EINVAL; 2232 } 2233 2234 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2235 if (ret != 0) 2236 return ret; 2237 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2238 -ENOTSUP); 2239 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2240 if (nb_rx_desc == 0) 2241 nb_rx_desc = cap.max_nb_desc; 2242 if (nb_rx_desc > cap.max_nb_desc) { 2243 RTE_ETHDEV_LOG(ERR, 2244 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2245 nb_rx_desc, cap.max_nb_desc); 2246 return -EINVAL; 2247 } 2248 if (conf->peer_count > cap.max_rx_2_tx) { 2249 RTE_ETHDEV_LOG(ERR, 2250 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2251 conf->peer_count, cap.max_rx_2_tx); 2252 return -EINVAL; 2253 } 2254 if (conf->peer_count == 0) { 2255 RTE_ETHDEV_LOG(ERR, 2256 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2257 conf->peer_count); 2258 return -EINVAL; 2259 } 2260 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2261 cap.max_nb_queues != UINT16_MAX; i++) { 2262 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2263 count++; 2264 } 2265 if (count > cap.max_nb_queues) { 2266 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2267 cap.max_nb_queues); 2268 return -EINVAL; 2269 } 2270 if (dev->data->dev_started) 2271 return -EBUSY; 2272 eth_dev_rxq_release(dev, rx_queue_id); 2273 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2274 nb_rx_desc, conf); 2275 if (ret == 0) 2276 dev->data->rx_queue_state[rx_queue_id] = 2277 RTE_ETH_QUEUE_STATE_HAIRPIN; 2278 return eth_err(port_id, ret); 2279 } 2280 2281 int 2282 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2283 uint16_t nb_tx_desc, unsigned int socket_id, 2284 const struct rte_eth_txconf *tx_conf) 2285 { 2286 struct rte_eth_dev *dev; 2287 struct rte_eth_dev_info dev_info; 2288 struct rte_eth_txconf local_conf; 2289 int ret; 2290 2291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2292 dev = &rte_eth_devices[port_id]; 2293 2294 if (tx_queue_id >= dev->data->nb_tx_queues) { 2295 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2296 return -EINVAL; 2297 } 2298 2299 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2300 2301 ret = rte_eth_dev_info_get(port_id, &dev_info); 2302 if (ret != 0) 2303 return ret; 2304 2305 /* Use default specified by driver, if nb_tx_desc is zero */ 2306 if (nb_tx_desc == 0) { 2307 nb_tx_desc = dev_info.default_txportconf.ring_size; 2308 /* If driver default is zero, fall back on EAL default */ 2309 if (nb_tx_desc == 0) 2310 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2311 } 2312 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2313 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2314 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2315 RTE_ETHDEV_LOG(ERR, 2316 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2317 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2318 dev_info.tx_desc_lim.nb_min, 2319 dev_info.tx_desc_lim.nb_align); 2320 return -EINVAL; 2321 } 2322 2323 if (dev->data->dev_started && 2324 !(dev_info.dev_capa & 2325 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2326 return -EBUSY; 2327 2328 if (dev->data->dev_started && 2329 (dev->data->tx_queue_state[tx_queue_id] != 2330 RTE_ETH_QUEUE_STATE_STOPPED)) 2331 return -EBUSY; 2332 2333 eth_dev_txq_release(dev, tx_queue_id); 2334 2335 if (tx_conf == NULL) 2336 tx_conf = &dev_info.default_txconf; 2337 2338 local_conf = *tx_conf; 2339 2340 /* 2341 * If an offloading has already been enabled in 2342 * rte_eth_dev_configure(), it has been enabled on all queues, 2343 * so there is no need to enable it in this queue again. 2344 * The local_conf.offloads input to underlying PMD only carries 2345 * those offloadings which are only enabled on this queue and 2346 * not enabled on all queues. 2347 */ 2348 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2349 2350 /* 2351 * New added offloadings for this queue are those not enabled in 2352 * rte_eth_dev_configure() and they must be per-queue type. 2353 * A pure per-port offloading can't be enabled on a queue while 2354 * disabled on another queue. A pure per-port offloading can't 2355 * be enabled for any queue as new added one if it hasn't been 2356 * enabled in rte_eth_dev_configure(). 2357 */ 2358 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2359 local_conf.offloads) { 2360 RTE_ETHDEV_LOG(ERR, 2361 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2362 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2363 port_id, tx_queue_id, local_conf.offloads, 2364 dev_info.tx_queue_offload_capa, 2365 __func__); 2366 return -EINVAL; 2367 } 2368 2369 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2370 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2371 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2372 } 2373 2374 int 2375 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2376 uint16_t nb_tx_desc, 2377 const struct rte_eth_hairpin_conf *conf) 2378 { 2379 struct rte_eth_dev *dev; 2380 struct rte_eth_hairpin_cap cap; 2381 int i; 2382 int count; 2383 int ret; 2384 2385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2386 dev = &rte_eth_devices[port_id]; 2387 2388 if (tx_queue_id >= dev->data->nb_tx_queues) { 2389 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); 2390 return -EINVAL; 2391 } 2392 2393 if (conf == NULL) { 2394 RTE_ETHDEV_LOG(ERR, 2395 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2396 port_id); 2397 return -EINVAL; 2398 } 2399 2400 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2401 if (ret != 0) 2402 return ret; 2403 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2404 -ENOTSUP); 2405 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2406 if (nb_tx_desc == 0) 2407 nb_tx_desc = cap.max_nb_desc; 2408 if (nb_tx_desc > cap.max_nb_desc) { 2409 RTE_ETHDEV_LOG(ERR, 2410 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2411 nb_tx_desc, cap.max_nb_desc); 2412 return -EINVAL; 2413 } 2414 if (conf->peer_count > cap.max_tx_2_rx) { 2415 RTE_ETHDEV_LOG(ERR, 2416 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2417 conf->peer_count, cap.max_tx_2_rx); 2418 return -EINVAL; 2419 } 2420 if (conf->peer_count == 0) { 2421 RTE_ETHDEV_LOG(ERR, 2422 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2423 conf->peer_count); 2424 return -EINVAL; 2425 } 2426 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2427 cap.max_nb_queues != UINT16_MAX; i++) { 2428 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2429 count++; 2430 } 2431 if (count > cap.max_nb_queues) { 2432 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2433 cap.max_nb_queues); 2434 return -EINVAL; 2435 } 2436 if (dev->data->dev_started) 2437 return -EBUSY; 2438 eth_dev_txq_release(dev, tx_queue_id); 2439 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2440 (dev, tx_queue_id, nb_tx_desc, conf); 2441 if (ret == 0) 2442 dev->data->tx_queue_state[tx_queue_id] = 2443 RTE_ETH_QUEUE_STATE_HAIRPIN; 2444 return eth_err(port_id, ret); 2445 } 2446 2447 int 2448 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2449 { 2450 struct rte_eth_dev *dev; 2451 int ret; 2452 2453 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2454 dev = &rte_eth_devices[tx_port]; 2455 2456 if (dev->data->dev_started == 0) { 2457 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2458 return -EBUSY; 2459 } 2460 2461 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2462 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2463 if (ret != 0) 2464 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2465 " to Rx %d (%d - all ports)\n", 2466 tx_port, rx_port, RTE_MAX_ETHPORTS); 2467 2468 return ret; 2469 } 2470 2471 int 2472 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2473 { 2474 struct rte_eth_dev *dev; 2475 int ret; 2476 2477 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2478 dev = &rte_eth_devices[tx_port]; 2479 2480 if (dev->data->dev_started == 0) { 2481 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2482 return -EBUSY; 2483 } 2484 2485 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2486 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2487 if (ret != 0) 2488 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2489 " from Rx %d (%d - all ports)\n", 2490 tx_port, rx_port, RTE_MAX_ETHPORTS); 2491 2492 return ret; 2493 } 2494 2495 int 2496 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2497 size_t len, uint32_t direction) 2498 { 2499 struct rte_eth_dev *dev; 2500 int ret; 2501 2502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2503 dev = &rte_eth_devices[port_id]; 2504 2505 if (peer_ports == NULL) { 2506 RTE_ETHDEV_LOG(ERR, 2507 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2508 port_id); 2509 return -EINVAL; 2510 } 2511 2512 if (len == 0) { 2513 RTE_ETHDEV_LOG(ERR, 2514 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2515 port_id); 2516 return -EINVAL; 2517 } 2518 2519 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2520 -ENOTSUP); 2521 2522 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2523 len, direction); 2524 if (ret < 0) 2525 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2526 port_id, direction ? "Rx" : "Tx"); 2527 2528 return ret; 2529 } 2530 2531 void 2532 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2533 void *userdata __rte_unused) 2534 { 2535 rte_pktmbuf_free_bulk(pkts, unsent); 2536 } 2537 2538 void 2539 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2540 void *userdata) 2541 { 2542 uint64_t *count = userdata; 2543 2544 rte_pktmbuf_free_bulk(pkts, unsent); 2545 *count += unsent; 2546 } 2547 2548 int 2549 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2550 buffer_tx_error_fn cbfn, void *userdata) 2551 { 2552 if (buffer == NULL) { 2553 RTE_ETHDEV_LOG(ERR, 2554 "Cannot set Tx buffer error callback to NULL buffer\n"); 2555 return -EINVAL; 2556 } 2557 2558 buffer->error_callback = cbfn; 2559 buffer->error_userdata = userdata; 2560 return 0; 2561 } 2562 2563 int 2564 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2565 { 2566 int ret = 0; 2567 2568 if (buffer == NULL) { 2569 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2570 return -EINVAL; 2571 } 2572 2573 buffer->size = size; 2574 if (buffer->error_callback == NULL) { 2575 ret = rte_eth_tx_buffer_set_err_callback( 2576 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2577 } 2578 2579 return ret; 2580 } 2581 2582 int 2583 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2584 { 2585 struct rte_eth_dev *dev; 2586 int ret; 2587 2588 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2589 dev = &rte_eth_devices[port_id]; 2590 2591 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2592 2593 /* Call driver to free pending mbufs. */ 2594 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2595 free_cnt); 2596 return eth_err(port_id, ret); 2597 } 2598 2599 int 2600 rte_eth_promiscuous_enable(uint16_t port_id) 2601 { 2602 struct rte_eth_dev *dev; 2603 int diag = 0; 2604 2605 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2606 dev = &rte_eth_devices[port_id]; 2607 2608 if (dev->data->promiscuous == 1) 2609 return 0; 2610 2611 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2612 2613 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2614 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2615 2616 return eth_err(port_id, diag); 2617 } 2618 2619 int 2620 rte_eth_promiscuous_disable(uint16_t port_id) 2621 { 2622 struct rte_eth_dev *dev; 2623 int diag = 0; 2624 2625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2626 dev = &rte_eth_devices[port_id]; 2627 2628 if (dev->data->promiscuous == 0) 2629 return 0; 2630 2631 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2632 2633 dev->data->promiscuous = 0; 2634 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2635 if (diag != 0) 2636 dev->data->promiscuous = 1; 2637 2638 return eth_err(port_id, diag); 2639 } 2640 2641 int 2642 rte_eth_promiscuous_get(uint16_t port_id) 2643 { 2644 struct rte_eth_dev *dev; 2645 2646 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2647 dev = &rte_eth_devices[port_id]; 2648 2649 return dev->data->promiscuous; 2650 } 2651 2652 int 2653 rte_eth_allmulticast_enable(uint16_t port_id) 2654 { 2655 struct rte_eth_dev *dev; 2656 int diag; 2657 2658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2659 dev = &rte_eth_devices[port_id]; 2660 2661 if (dev->data->all_multicast == 1) 2662 return 0; 2663 2664 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2665 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2666 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2667 2668 return eth_err(port_id, diag); 2669 } 2670 2671 int 2672 rte_eth_allmulticast_disable(uint16_t port_id) 2673 { 2674 struct rte_eth_dev *dev; 2675 int diag; 2676 2677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2678 dev = &rte_eth_devices[port_id]; 2679 2680 if (dev->data->all_multicast == 0) 2681 return 0; 2682 2683 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2684 dev->data->all_multicast = 0; 2685 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2686 if (diag != 0) 2687 dev->data->all_multicast = 1; 2688 2689 return eth_err(port_id, diag); 2690 } 2691 2692 int 2693 rte_eth_allmulticast_get(uint16_t port_id) 2694 { 2695 struct rte_eth_dev *dev; 2696 2697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2698 dev = &rte_eth_devices[port_id]; 2699 2700 return dev->data->all_multicast; 2701 } 2702 2703 int 2704 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2705 { 2706 struct rte_eth_dev *dev; 2707 2708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2709 dev = &rte_eth_devices[port_id]; 2710 2711 if (eth_link == NULL) { 2712 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2713 port_id); 2714 return -EINVAL; 2715 } 2716 2717 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2718 rte_eth_linkstatus_get(dev, eth_link); 2719 else { 2720 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2721 (*dev->dev_ops->link_update)(dev, 1); 2722 *eth_link = dev->data->dev_link; 2723 } 2724 2725 return 0; 2726 } 2727 2728 int 2729 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2730 { 2731 struct rte_eth_dev *dev; 2732 2733 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2734 dev = &rte_eth_devices[port_id]; 2735 2736 if (eth_link == NULL) { 2737 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2738 port_id); 2739 return -EINVAL; 2740 } 2741 2742 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2743 rte_eth_linkstatus_get(dev, eth_link); 2744 else { 2745 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2746 (*dev->dev_ops->link_update)(dev, 0); 2747 *eth_link = dev->data->dev_link; 2748 } 2749 2750 return 0; 2751 } 2752 2753 const char * 2754 rte_eth_link_speed_to_str(uint32_t link_speed) 2755 { 2756 switch (link_speed) { 2757 case ETH_SPEED_NUM_NONE: return "None"; 2758 case ETH_SPEED_NUM_10M: return "10 Mbps"; 2759 case ETH_SPEED_NUM_100M: return "100 Mbps"; 2760 case ETH_SPEED_NUM_1G: return "1 Gbps"; 2761 case ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2762 case ETH_SPEED_NUM_5G: return "5 Gbps"; 2763 case ETH_SPEED_NUM_10G: return "10 Gbps"; 2764 case ETH_SPEED_NUM_20G: return "20 Gbps"; 2765 case ETH_SPEED_NUM_25G: return "25 Gbps"; 2766 case ETH_SPEED_NUM_40G: return "40 Gbps"; 2767 case ETH_SPEED_NUM_50G: return "50 Gbps"; 2768 case ETH_SPEED_NUM_56G: return "56 Gbps"; 2769 case ETH_SPEED_NUM_100G: return "100 Gbps"; 2770 case ETH_SPEED_NUM_200G: return "200 Gbps"; 2771 case ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2772 default: return "Invalid"; 2773 } 2774 } 2775 2776 int 2777 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2778 { 2779 if (str == NULL) { 2780 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2781 return -EINVAL; 2782 } 2783 2784 if (len == 0) { 2785 RTE_ETHDEV_LOG(ERR, 2786 "Cannot convert link to string with zero size\n"); 2787 return -EINVAL; 2788 } 2789 2790 if (eth_link == NULL) { 2791 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2792 return -EINVAL; 2793 } 2794 2795 if (eth_link->link_status == ETH_LINK_DOWN) 2796 return snprintf(str, len, "Link down"); 2797 else 2798 return snprintf(str, len, "Link up at %s %s %s", 2799 rte_eth_link_speed_to_str(eth_link->link_speed), 2800 (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 2801 "FDX" : "HDX", 2802 (eth_link->link_autoneg == ETH_LINK_AUTONEG) ? 2803 "Autoneg" : "Fixed"); 2804 } 2805 2806 int 2807 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2808 { 2809 struct rte_eth_dev *dev; 2810 2811 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2812 dev = &rte_eth_devices[port_id]; 2813 2814 if (stats == NULL) { 2815 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2816 port_id); 2817 return -EINVAL; 2818 } 2819 2820 memset(stats, 0, sizeof(*stats)); 2821 2822 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2823 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2824 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2825 } 2826 2827 int 2828 rte_eth_stats_reset(uint16_t port_id) 2829 { 2830 struct rte_eth_dev *dev; 2831 int ret; 2832 2833 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2834 dev = &rte_eth_devices[port_id]; 2835 2836 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2837 ret = (*dev->dev_ops->stats_reset)(dev); 2838 if (ret != 0) 2839 return eth_err(port_id, ret); 2840 2841 dev->data->rx_mbuf_alloc_failed = 0; 2842 2843 return 0; 2844 } 2845 2846 static inline int 2847 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2848 { 2849 uint16_t nb_rxqs, nb_txqs; 2850 int count; 2851 2852 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2853 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2854 2855 count = RTE_NB_STATS; 2856 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2857 count += nb_rxqs * RTE_NB_RXQ_STATS; 2858 count += nb_txqs * RTE_NB_TXQ_STATS; 2859 } 2860 2861 return count; 2862 } 2863 2864 static int 2865 eth_dev_get_xstats_count(uint16_t port_id) 2866 { 2867 struct rte_eth_dev *dev; 2868 int count; 2869 2870 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2871 dev = &rte_eth_devices[port_id]; 2872 if (dev->dev_ops->xstats_get_names != NULL) { 2873 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2874 if (count < 0) 2875 return eth_err(port_id, count); 2876 } else 2877 count = 0; 2878 2879 2880 count += eth_dev_get_xstats_basic_count(dev); 2881 2882 return count; 2883 } 2884 2885 int 2886 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2887 uint64_t *id) 2888 { 2889 int cnt_xstats, idx_xstat; 2890 2891 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2892 2893 if (xstat_name == NULL) { 2894 RTE_ETHDEV_LOG(ERR, 2895 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2896 port_id); 2897 return -ENOMEM; 2898 } 2899 2900 if (id == NULL) { 2901 RTE_ETHDEV_LOG(ERR, 2902 "Cannot get ethdev port %u xstats ID to NULL\n", 2903 port_id); 2904 return -ENOMEM; 2905 } 2906 2907 /* Get count */ 2908 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2909 if (cnt_xstats < 0) { 2910 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2911 return -ENODEV; 2912 } 2913 2914 /* Get id-name lookup table */ 2915 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2916 2917 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2918 port_id, xstats_names, cnt_xstats, NULL)) { 2919 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2920 return -1; 2921 } 2922 2923 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2924 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2925 *id = idx_xstat; 2926 return 0; 2927 }; 2928 } 2929 2930 return -EINVAL; 2931 } 2932 2933 /* retrieve basic stats names */ 2934 static int 2935 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2936 struct rte_eth_xstat_name *xstats_names) 2937 { 2938 int cnt_used_entries = 0; 2939 uint32_t idx, id_queue; 2940 uint16_t num_q; 2941 2942 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2943 strlcpy(xstats_names[cnt_used_entries].name, 2944 eth_dev_stats_strings[idx].name, 2945 sizeof(xstats_names[0].name)); 2946 cnt_used_entries++; 2947 } 2948 2949 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2950 return cnt_used_entries; 2951 2952 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2953 for (id_queue = 0; id_queue < num_q; id_queue++) { 2954 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2955 snprintf(xstats_names[cnt_used_entries].name, 2956 sizeof(xstats_names[0].name), 2957 "rx_q%u_%s", 2958 id_queue, eth_dev_rxq_stats_strings[idx].name); 2959 cnt_used_entries++; 2960 } 2961 2962 } 2963 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2964 for (id_queue = 0; id_queue < num_q; id_queue++) { 2965 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2966 snprintf(xstats_names[cnt_used_entries].name, 2967 sizeof(xstats_names[0].name), 2968 "tx_q%u_%s", 2969 id_queue, eth_dev_txq_stats_strings[idx].name); 2970 cnt_used_entries++; 2971 } 2972 } 2973 return cnt_used_entries; 2974 } 2975 2976 /* retrieve ethdev extended statistics names */ 2977 int 2978 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2979 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2980 uint64_t *ids) 2981 { 2982 struct rte_eth_xstat_name *xstats_names_copy; 2983 unsigned int no_basic_stat_requested = 1; 2984 unsigned int no_ext_stat_requested = 1; 2985 unsigned int expected_entries; 2986 unsigned int basic_count; 2987 struct rte_eth_dev *dev; 2988 unsigned int i; 2989 int ret; 2990 2991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2992 dev = &rte_eth_devices[port_id]; 2993 2994 basic_count = eth_dev_get_xstats_basic_count(dev); 2995 ret = eth_dev_get_xstats_count(port_id); 2996 if (ret < 0) 2997 return ret; 2998 expected_entries = (unsigned int)ret; 2999 3000 /* Return max number of stats if no ids given */ 3001 if (!ids) { 3002 if (!xstats_names) 3003 return expected_entries; 3004 else if (xstats_names && size < expected_entries) 3005 return expected_entries; 3006 } 3007 3008 if (ids && !xstats_names) 3009 return -EINVAL; 3010 3011 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3012 uint64_t ids_copy[size]; 3013 3014 for (i = 0; i < size; i++) { 3015 if (ids[i] < basic_count) { 3016 no_basic_stat_requested = 0; 3017 break; 3018 } 3019 3020 /* 3021 * Convert ids to xstats ids that PMD knows. 3022 * ids known by user are basic + extended stats. 3023 */ 3024 ids_copy[i] = ids[i] - basic_count; 3025 } 3026 3027 if (no_basic_stat_requested) 3028 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3029 ids_copy, xstats_names, size); 3030 } 3031 3032 /* Retrieve all stats */ 3033 if (!ids) { 3034 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3035 expected_entries); 3036 if (num_stats < 0 || num_stats > (int)expected_entries) 3037 return num_stats; 3038 else 3039 return expected_entries; 3040 } 3041 3042 xstats_names_copy = calloc(expected_entries, 3043 sizeof(struct rte_eth_xstat_name)); 3044 3045 if (!xstats_names_copy) { 3046 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3047 return -ENOMEM; 3048 } 3049 3050 if (ids) { 3051 for (i = 0; i < size; i++) { 3052 if (ids[i] >= basic_count) { 3053 no_ext_stat_requested = 0; 3054 break; 3055 } 3056 } 3057 } 3058 3059 /* Fill xstats_names_copy structure */ 3060 if (ids && no_ext_stat_requested) { 3061 eth_basic_stats_get_names(dev, xstats_names_copy); 3062 } else { 3063 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3064 expected_entries); 3065 if (ret < 0) { 3066 free(xstats_names_copy); 3067 return ret; 3068 } 3069 } 3070 3071 /* Filter stats */ 3072 for (i = 0; i < size; i++) { 3073 if (ids[i] >= expected_entries) { 3074 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3075 free(xstats_names_copy); 3076 return -1; 3077 } 3078 xstats_names[i] = xstats_names_copy[ids[i]]; 3079 } 3080 3081 free(xstats_names_copy); 3082 return size; 3083 } 3084 3085 int 3086 rte_eth_xstats_get_names(uint16_t port_id, 3087 struct rte_eth_xstat_name *xstats_names, 3088 unsigned int size) 3089 { 3090 struct rte_eth_dev *dev; 3091 int cnt_used_entries; 3092 int cnt_expected_entries; 3093 int cnt_driver_entries; 3094 3095 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3096 if (xstats_names == NULL || cnt_expected_entries < 0 || 3097 (int)size < cnt_expected_entries) 3098 return cnt_expected_entries; 3099 3100 /* port_id checked in eth_dev_get_xstats_count() */ 3101 dev = &rte_eth_devices[port_id]; 3102 3103 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3104 3105 if (dev->dev_ops->xstats_get_names != NULL) { 3106 /* If there are any driver-specific xstats, append them 3107 * to end of list. 3108 */ 3109 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3110 dev, 3111 xstats_names + cnt_used_entries, 3112 size - cnt_used_entries); 3113 if (cnt_driver_entries < 0) 3114 return eth_err(port_id, cnt_driver_entries); 3115 cnt_used_entries += cnt_driver_entries; 3116 } 3117 3118 return cnt_used_entries; 3119 } 3120 3121 3122 static int 3123 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3124 { 3125 struct rte_eth_dev *dev; 3126 struct rte_eth_stats eth_stats; 3127 unsigned int count = 0, i, q; 3128 uint64_t val, *stats_ptr; 3129 uint16_t nb_rxqs, nb_txqs; 3130 int ret; 3131 3132 ret = rte_eth_stats_get(port_id, ð_stats); 3133 if (ret < 0) 3134 return ret; 3135 3136 dev = &rte_eth_devices[port_id]; 3137 3138 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3139 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3140 3141 /* global stats */ 3142 for (i = 0; i < RTE_NB_STATS; i++) { 3143 stats_ptr = RTE_PTR_ADD(ð_stats, 3144 eth_dev_stats_strings[i].offset); 3145 val = *stats_ptr; 3146 xstats[count++].value = val; 3147 } 3148 3149 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3150 return count; 3151 3152 /* per-rxq stats */ 3153 for (q = 0; q < nb_rxqs; q++) { 3154 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3155 stats_ptr = RTE_PTR_ADD(ð_stats, 3156 eth_dev_rxq_stats_strings[i].offset + 3157 q * sizeof(uint64_t)); 3158 val = *stats_ptr; 3159 xstats[count++].value = val; 3160 } 3161 } 3162 3163 /* per-txq stats */ 3164 for (q = 0; q < nb_txqs; q++) { 3165 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3166 stats_ptr = RTE_PTR_ADD(ð_stats, 3167 eth_dev_txq_stats_strings[i].offset + 3168 q * sizeof(uint64_t)); 3169 val = *stats_ptr; 3170 xstats[count++].value = val; 3171 } 3172 } 3173 return count; 3174 } 3175 3176 /* retrieve ethdev extended statistics */ 3177 int 3178 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3179 uint64_t *values, unsigned int size) 3180 { 3181 unsigned int no_basic_stat_requested = 1; 3182 unsigned int no_ext_stat_requested = 1; 3183 unsigned int num_xstats_filled; 3184 unsigned int basic_count; 3185 uint16_t expected_entries; 3186 struct rte_eth_dev *dev; 3187 unsigned int i; 3188 int ret; 3189 3190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3191 dev = &rte_eth_devices[port_id]; 3192 3193 ret = eth_dev_get_xstats_count(port_id); 3194 if (ret < 0) 3195 return ret; 3196 expected_entries = (uint16_t)ret; 3197 struct rte_eth_xstat xstats[expected_entries]; 3198 basic_count = eth_dev_get_xstats_basic_count(dev); 3199 3200 /* Return max number of stats if no ids given */ 3201 if (!ids) { 3202 if (!values) 3203 return expected_entries; 3204 else if (values && size < expected_entries) 3205 return expected_entries; 3206 } 3207 3208 if (ids && !values) 3209 return -EINVAL; 3210 3211 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3212 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3213 uint64_t ids_copy[size]; 3214 3215 for (i = 0; i < size; i++) { 3216 if (ids[i] < basic_count) { 3217 no_basic_stat_requested = 0; 3218 break; 3219 } 3220 3221 /* 3222 * Convert ids to xstats ids that PMD knows. 3223 * ids known by user are basic + extended stats. 3224 */ 3225 ids_copy[i] = ids[i] - basic_count; 3226 } 3227 3228 if (no_basic_stat_requested) 3229 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3230 values, size); 3231 } 3232 3233 if (ids) { 3234 for (i = 0; i < size; i++) { 3235 if (ids[i] >= basic_count) { 3236 no_ext_stat_requested = 0; 3237 break; 3238 } 3239 } 3240 } 3241 3242 /* Fill the xstats structure */ 3243 if (ids && no_ext_stat_requested) 3244 ret = eth_basic_stats_get(port_id, xstats); 3245 else 3246 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3247 3248 if (ret < 0) 3249 return ret; 3250 num_xstats_filled = (unsigned int)ret; 3251 3252 /* Return all stats */ 3253 if (!ids) { 3254 for (i = 0; i < num_xstats_filled; i++) 3255 values[i] = xstats[i].value; 3256 return expected_entries; 3257 } 3258 3259 /* Filter stats */ 3260 for (i = 0; i < size; i++) { 3261 if (ids[i] >= expected_entries) { 3262 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3263 return -1; 3264 } 3265 values[i] = xstats[ids[i]].value; 3266 } 3267 return size; 3268 } 3269 3270 int 3271 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3272 unsigned int n) 3273 { 3274 struct rte_eth_dev *dev; 3275 unsigned int count = 0, i; 3276 signed int xcount = 0; 3277 uint16_t nb_rxqs, nb_txqs; 3278 int ret; 3279 3280 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3281 dev = &rte_eth_devices[port_id]; 3282 3283 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3284 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3285 3286 /* Return generic statistics */ 3287 count = RTE_NB_STATS; 3288 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3289 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3290 3291 /* implemented by the driver */ 3292 if (dev->dev_ops->xstats_get != NULL) { 3293 /* Retrieve the xstats from the driver at the end of the 3294 * xstats struct. 3295 */ 3296 xcount = (*dev->dev_ops->xstats_get)(dev, 3297 xstats ? xstats + count : NULL, 3298 (n > count) ? n - count : 0); 3299 3300 if (xcount < 0) 3301 return eth_err(port_id, xcount); 3302 } 3303 3304 if (n < count + xcount || xstats == NULL) 3305 return count + xcount; 3306 3307 /* now fill the xstats structure */ 3308 ret = eth_basic_stats_get(port_id, xstats); 3309 if (ret < 0) 3310 return ret; 3311 count = ret; 3312 3313 for (i = 0; i < count; i++) 3314 xstats[i].id = i; 3315 /* add an offset to driver-specific stats */ 3316 for ( ; i < count + xcount; i++) 3317 xstats[i].id += count; 3318 3319 return count + xcount; 3320 } 3321 3322 /* reset ethdev extended statistics */ 3323 int 3324 rte_eth_xstats_reset(uint16_t port_id) 3325 { 3326 struct rte_eth_dev *dev; 3327 3328 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3329 dev = &rte_eth_devices[port_id]; 3330 3331 /* implemented by the driver */ 3332 if (dev->dev_ops->xstats_reset != NULL) 3333 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3334 3335 /* fallback to default */ 3336 return rte_eth_stats_reset(port_id); 3337 } 3338 3339 static int 3340 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3341 uint8_t stat_idx, uint8_t is_rx) 3342 { 3343 struct rte_eth_dev *dev; 3344 3345 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3346 dev = &rte_eth_devices[port_id]; 3347 3348 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3349 return -EINVAL; 3350 3351 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3352 return -EINVAL; 3353 3354 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3355 return -EINVAL; 3356 3357 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3358 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3359 } 3360 3361 int 3362 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3363 uint8_t stat_idx) 3364 { 3365 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3366 tx_queue_id, 3367 stat_idx, STAT_QMAP_TX)); 3368 } 3369 3370 int 3371 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3372 uint8_t stat_idx) 3373 { 3374 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3375 rx_queue_id, 3376 stat_idx, STAT_QMAP_RX)); 3377 } 3378 3379 int 3380 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3381 { 3382 struct rte_eth_dev *dev; 3383 3384 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3385 dev = &rte_eth_devices[port_id]; 3386 3387 if (fw_version == NULL && fw_size > 0) { 3388 RTE_ETHDEV_LOG(ERR, 3389 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3390 port_id); 3391 return -EINVAL; 3392 } 3393 3394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3395 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3396 fw_version, fw_size)); 3397 } 3398 3399 int 3400 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3401 { 3402 struct rte_eth_dev *dev; 3403 const struct rte_eth_desc_lim lim = { 3404 .nb_max = UINT16_MAX, 3405 .nb_min = 0, 3406 .nb_align = 1, 3407 .nb_seg_max = UINT16_MAX, 3408 .nb_mtu_seg_max = UINT16_MAX, 3409 }; 3410 int diag; 3411 3412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3413 dev = &rte_eth_devices[port_id]; 3414 3415 if (dev_info == NULL) { 3416 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3417 port_id); 3418 return -EINVAL; 3419 } 3420 3421 /* 3422 * Init dev_info before port_id check since caller does not have 3423 * return status and does not know if get is successful or not. 3424 */ 3425 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3426 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3427 3428 dev_info->rx_desc_lim = lim; 3429 dev_info->tx_desc_lim = lim; 3430 dev_info->device = dev->device; 3431 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3432 RTE_ETHER_CRC_LEN; 3433 dev_info->max_mtu = UINT16_MAX; 3434 3435 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3436 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3437 if (diag != 0) { 3438 /* Cleanup already filled in device information */ 3439 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3440 return eth_err(port_id, diag); 3441 } 3442 3443 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3444 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3445 RTE_MAX_QUEUES_PER_PORT); 3446 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3447 RTE_MAX_QUEUES_PER_PORT); 3448 3449 dev_info->driver_name = dev->device->driver->name; 3450 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3451 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3452 3453 dev_info->dev_flags = &dev->data->dev_flags; 3454 3455 return 0; 3456 } 3457 3458 int 3459 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3460 { 3461 struct rte_eth_dev *dev; 3462 3463 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3464 dev = &rte_eth_devices[port_id]; 3465 3466 if (dev_conf == NULL) { 3467 RTE_ETHDEV_LOG(ERR, 3468 "Cannot get ethdev port %u configuration to NULL\n", 3469 port_id); 3470 return -EINVAL; 3471 } 3472 3473 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3474 3475 return 0; 3476 } 3477 3478 int 3479 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3480 uint32_t *ptypes, int num) 3481 { 3482 int i, j; 3483 struct rte_eth_dev *dev; 3484 const uint32_t *all_ptypes; 3485 3486 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3487 dev = &rte_eth_devices[port_id]; 3488 3489 if (ptypes == NULL && num > 0) { 3490 RTE_ETHDEV_LOG(ERR, 3491 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3492 port_id); 3493 return -EINVAL; 3494 } 3495 3496 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3497 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3498 3499 if (!all_ptypes) 3500 return 0; 3501 3502 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3503 if (all_ptypes[i] & ptype_mask) { 3504 if (j < num) 3505 ptypes[j] = all_ptypes[i]; 3506 j++; 3507 } 3508 3509 return j; 3510 } 3511 3512 int 3513 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3514 uint32_t *set_ptypes, unsigned int num) 3515 { 3516 const uint32_t valid_ptype_masks[] = { 3517 RTE_PTYPE_L2_MASK, 3518 RTE_PTYPE_L3_MASK, 3519 RTE_PTYPE_L4_MASK, 3520 RTE_PTYPE_TUNNEL_MASK, 3521 RTE_PTYPE_INNER_L2_MASK, 3522 RTE_PTYPE_INNER_L3_MASK, 3523 RTE_PTYPE_INNER_L4_MASK, 3524 }; 3525 const uint32_t *all_ptypes; 3526 struct rte_eth_dev *dev; 3527 uint32_t unused_mask; 3528 unsigned int i, j; 3529 int ret; 3530 3531 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3532 dev = &rte_eth_devices[port_id]; 3533 3534 if (num > 0 && set_ptypes == NULL) { 3535 RTE_ETHDEV_LOG(ERR, 3536 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3537 port_id); 3538 return -EINVAL; 3539 } 3540 3541 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3542 *dev->dev_ops->dev_ptypes_set == NULL) { 3543 ret = 0; 3544 goto ptype_unknown; 3545 } 3546 3547 if (ptype_mask == 0) { 3548 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3549 ptype_mask); 3550 goto ptype_unknown; 3551 } 3552 3553 unused_mask = ptype_mask; 3554 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3555 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3556 if (mask && mask != valid_ptype_masks[i]) { 3557 ret = -EINVAL; 3558 goto ptype_unknown; 3559 } 3560 unused_mask &= ~valid_ptype_masks[i]; 3561 } 3562 3563 if (unused_mask) { 3564 ret = -EINVAL; 3565 goto ptype_unknown; 3566 } 3567 3568 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3569 if (all_ptypes == NULL) { 3570 ret = 0; 3571 goto ptype_unknown; 3572 } 3573 3574 /* 3575 * Accommodate as many set_ptypes as possible. If the supplied 3576 * set_ptypes array is insufficient fill it partially. 3577 */ 3578 for (i = 0, j = 0; set_ptypes != NULL && 3579 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3580 if (ptype_mask & all_ptypes[i]) { 3581 if (j < num - 1) { 3582 set_ptypes[j] = all_ptypes[i]; 3583 j++; 3584 continue; 3585 } 3586 break; 3587 } 3588 } 3589 3590 if (set_ptypes != NULL && j < num) 3591 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3592 3593 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3594 3595 ptype_unknown: 3596 if (num > 0) 3597 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3598 3599 return ret; 3600 } 3601 3602 int 3603 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3604 unsigned int num) 3605 { 3606 int32_t ret; 3607 struct rte_eth_dev *dev; 3608 struct rte_eth_dev_info dev_info; 3609 3610 if (ma == NULL) { 3611 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3612 return -EINVAL; 3613 } 3614 3615 /* will check for us that port_id is a valid one */ 3616 ret = rte_eth_dev_info_get(port_id, &dev_info); 3617 if (ret != 0) 3618 return ret; 3619 3620 dev = &rte_eth_devices[port_id]; 3621 num = RTE_MIN(dev_info.max_mac_addrs, num); 3622 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3623 3624 return num; 3625 } 3626 3627 int 3628 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3629 { 3630 struct rte_eth_dev *dev; 3631 3632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3633 dev = &rte_eth_devices[port_id]; 3634 3635 if (mac_addr == NULL) { 3636 RTE_ETHDEV_LOG(ERR, 3637 "Cannot get ethdev port %u MAC address to NULL\n", 3638 port_id); 3639 return -EINVAL; 3640 } 3641 3642 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3643 3644 return 0; 3645 } 3646 3647 int 3648 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3649 { 3650 struct rte_eth_dev *dev; 3651 3652 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3653 dev = &rte_eth_devices[port_id]; 3654 3655 if (mtu == NULL) { 3656 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3657 port_id); 3658 return -EINVAL; 3659 } 3660 3661 *mtu = dev->data->mtu; 3662 return 0; 3663 } 3664 3665 int 3666 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3667 { 3668 int ret; 3669 struct rte_eth_dev_info dev_info; 3670 struct rte_eth_dev *dev; 3671 3672 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3673 dev = &rte_eth_devices[port_id]; 3674 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3675 3676 /* 3677 * Check if the device supports dev_infos_get, if it does not 3678 * skip min_mtu/max_mtu validation here as this requires values 3679 * that are populated within the call to rte_eth_dev_info_get() 3680 * which relies on dev->dev_ops->dev_infos_get. 3681 */ 3682 if (*dev->dev_ops->dev_infos_get != NULL) { 3683 ret = rte_eth_dev_info_get(port_id, &dev_info); 3684 if (ret != 0) 3685 return ret; 3686 3687 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3688 if (ret != 0) 3689 return ret; 3690 } 3691 3692 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3693 if (ret == 0) 3694 dev->data->mtu = mtu; 3695 3696 return eth_err(port_id, ret); 3697 } 3698 3699 int 3700 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3701 { 3702 struct rte_eth_dev *dev; 3703 int ret; 3704 3705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3706 dev = &rte_eth_devices[port_id]; 3707 3708 if (!(dev->data->dev_conf.rxmode.offloads & 3709 DEV_RX_OFFLOAD_VLAN_FILTER)) { 3710 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n", 3711 port_id); 3712 return -ENOSYS; 3713 } 3714 3715 if (vlan_id > 4095) { 3716 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3717 port_id, vlan_id); 3718 return -EINVAL; 3719 } 3720 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3721 3722 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3723 if (ret == 0) { 3724 struct rte_vlan_filter_conf *vfc; 3725 int vidx; 3726 int vbit; 3727 3728 vfc = &dev->data->vlan_filter_conf; 3729 vidx = vlan_id / 64; 3730 vbit = vlan_id % 64; 3731 3732 if (on) 3733 vfc->ids[vidx] |= UINT64_C(1) << vbit; 3734 else 3735 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit); 3736 } 3737 3738 return eth_err(port_id, ret); 3739 } 3740 3741 int 3742 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3743 int on) 3744 { 3745 struct rte_eth_dev *dev; 3746 3747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3748 dev = &rte_eth_devices[port_id]; 3749 3750 if (rx_queue_id >= dev->data->nb_rx_queues) { 3751 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3752 return -EINVAL; 3753 } 3754 3755 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3756 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3757 3758 return 0; 3759 } 3760 3761 int 3762 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3763 enum rte_vlan_type vlan_type, 3764 uint16_t tpid) 3765 { 3766 struct rte_eth_dev *dev; 3767 3768 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3769 dev = &rte_eth_devices[port_id]; 3770 3771 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3772 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3773 tpid)); 3774 } 3775 3776 int 3777 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3778 { 3779 struct rte_eth_dev_info dev_info; 3780 struct rte_eth_dev *dev; 3781 int ret = 0; 3782 int mask = 0; 3783 int cur, org = 0; 3784 uint64_t orig_offloads; 3785 uint64_t dev_offloads; 3786 uint64_t new_offloads; 3787 3788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3789 dev = &rte_eth_devices[port_id]; 3790 3791 /* save original values in case of failure */ 3792 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3793 dev_offloads = orig_offloads; 3794 3795 /* check which option changed by application */ 3796 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); 3797 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 3798 if (cur != org) { 3799 if (cur) 3800 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 3801 else 3802 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 3803 mask |= ETH_VLAN_STRIP_MASK; 3804 } 3805 3806 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); 3807 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); 3808 if (cur != org) { 3809 if (cur) 3810 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 3811 else 3812 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; 3813 mask |= ETH_VLAN_FILTER_MASK; 3814 } 3815 3816 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); 3817 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); 3818 if (cur != org) { 3819 if (cur) 3820 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; 3821 else 3822 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; 3823 mask |= ETH_VLAN_EXTEND_MASK; 3824 } 3825 3826 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); 3827 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); 3828 if (cur != org) { 3829 if (cur) 3830 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; 3831 else 3832 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; 3833 mask |= ETH_QINQ_STRIP_MASK; 3834 } 3835 3836 /*no change*/ 3837 if (mask == 0) 3838 return ret; 3839 3840 ret = rte_eth_dev_info_get(port_id, &dev_info); 3841 if (ret != 0) 3842 return ret; 3843 3844 /* Rx VLAN offloading must be within its device capabilities */ 3845 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3846 new_offloads = dev_offloads & ~orig_offloads; 3847 RTE_ETHDEV_LOG(ERR, 3848 "Ethdev port_id=%u requested new added VLAN offloads " 3849 "0x%" PRIx64 " must be within Rx offloads capabilities " 3850 "0x%" PRIx64 " in %s()\n", 3851 port_id, new_offloads, dev_info.rx_offload_capa, 3852 __func__); 3853 return -EINVAL; 3854 } 3855 3856 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3857 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3858 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3859 if (ret) { 3860 /* hit an error restore original values */ 3861 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3862 } 3863 3864 return eth_err(port_id, ret); 3865 } 3866 3867 int 3868 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3869 { 3870 struct rte_eth_dev *dev; 3871 uint64_t *dev_offloads; 3872 int ret = 0; 3873 3874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3875 dev = &rte_eth_devices[port_id]; 3876 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3877 3878 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 3879 ret |= ETH_VLAN_STRIP_OFFLOAD; 3880 3881 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 3882 ret |= ETH_VLAN_FILTER_OFFLOAD; 3883 3884 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 3885 ret |= ETH_VLAN_EXTEND_OFFLOAD; 3886 3887 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) 3888 ret |= ETH_QINQ_STRIP_OFFLOAD; 3889 3890 return ret; 3891 } 3892 3893 int 3894 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3895 { 3896 struct rte_eth_dev *dev; 3897 3898 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3899 dev = &rte_eth_devices[port_id]; 3900 3901 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3902 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3903 } 3904 3905 int 3906 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3907 { 3908 struct rte_eth_dev *dev; 3909 3910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3911 dev = &rte_eth_devices[port_id]; 3912 3913 if (fc_conf == NULL) { 3914 RTE_ETHDEV_LOG(ERR, 3915 "Cannot get ethdev port %u flow control config to NULL\n", 3916 port_id); 3917 return -EINVAL; 3918 } 3919 3920 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3921 memset(fc_conf, 0, sizeof(*fc_conf)); 3922 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3923 } 3924 3925 int 3926 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3927 { 3928 struct rte_eth_dev *dev; 3929 3930 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3931 dev = &rte_eth_devices[port_id]; 3932 3933 if (fc_conf == NULL) { 3934 RTE_ETHDEV_LOG(ERR, 3935 "Cannot set ethdev port %u flow control from NULL config\n", 3936 port_id); 3937 return -EINVAL; 3938 } 3939 3940 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3941 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3942 return -EINVAL; 3943 } 3944 3945 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3946 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3947 } 3948 3949 int 3950 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3951 struct rte_eth_pfc_conf *pfc_conf) 3952 { 3953 struct rte_eth_dev *dev; 3954 3955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3956 dev = &rte_eth_devices[port_id]; 3957 3958 if (pfc_conf == NULL) { 3959 RTE_ETHDEV_LOG(ERR, 3960 "Cannot set ethdev port %u priority flow control from NULL config\n", 3961 port_id); 3962 return -EINVAL; 3963 } 3964 3965 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3966 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3967 return -EINVAL; 3968 } 3969 3970 /* High water, low water validation are device specific */ 3971 if (*dev->dev_ops->priority_flow_ctrl_set) 3972 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3973 (dev, pfc_conf)); 3974 return -ENOTSUP; 3975 } 3976 3977 static int 3978 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3979 uint16_t reta_size) 3980 { 3981 uint16_t i, num; 3982 3983 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; 3984 for (i = 0; i < num; i++) { 3985 if (reta_conf[i].mask) 3986 return 0; 3987 } 3988 3989 return -EINVAL; 3990 } 3991 3992 static int 3993 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3994 uint16_t reta_size, 3995 uint16_t max_rxq) 3996 { 3997 uint16_t i, idx, shift; 3998 3999 if (max_rxq == 0) { 4000 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4001 return -EINVAL; 4002 } 4003 4004 for (i = 0; i < reta_size; i++) { 4005 idx = i / RTE_RETA_GROUP_SIZE; 4006 shift = i % RTE_RETA_GROUP_SIZE; 4007 if ((reta_conf[idx].mask & (1ULL << shift)) && 4008 (reta_conf[idx].reta[shift] >= max_rxq)) { 4009 RTE_ETHDEV_LOG(ERR, 4010 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4011 idx, shift, 4012 reta_conf[idx].reta[shift], max_rxq); 4013 return -EINVAL; 4014 } 4015 } 4016 4017 return 0; 4018 } 4019 4020 int 4021 rte_eth_dev_rss_reta_update(uint16_t port_id, 4022 struct rte_eth_rss_reta_entry64 *reta_conf, 4023 uint16_t reta_size) 4024 { 4025 struct rte_eth_dev *dev; 4026 int ret; 4027 4028 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4029 dev = &rte_eth_devices[port_id]; 4030 4031 if (reta_conf == NULL) { 4032 RTE_ETHDEV_LOG(ERR, 4033 "Cannot update ethdev port %u RSS RETA to NULL\n", 4034 port_id); 4035 return -EINVAL; 4036 } 4037 4038 if (reta_size == 0) { 4039 RTE_ETHDEV_LOG(ERR, 4040 "Cannot update ethdev port %u RSS RETA with zero size\n", 4041 port_id); 4042 return -EINVAL; 4043 } 4044 4045 /* Check mask bits */ 4046 ret = eth_check_reta_mask(reta_conf, reta_size); 4047 if (ret < 0) 4048 return ret; 4049 4050 /* Check entry value */ 4051 ret = eth_check_reta_entry(reta_conf, reta_size, 4052 dev->data->nb_rx_queues); 4053 if (ret < 0) 4054 return ret; 4055 4056 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4057 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4058 reta_size)); 4059 } 4060 4061 int 4062 rte_eth_dev_rss_reta_query(uint16_t port_id, 4063 struct rte_eth_rss_reta_entry64 *reta_conf, 4064 uint16_t reta_size) 4065 { 4066 struct rte_eth_dev *dev; 4067 int ret; 4068 4069 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4070 dev = &rte_eth_devices[port_id]; 4071 4072 if (reta_conf == NULL) { 4073 RTE_ETHDEV_LOG(ERR, 4074 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4075 port_id); 4076 return -EINVAL; 4077 } 4078 4079 /* Check mask bits */ 4080 ret = eth_check_reta_mask(reta_conf, reta_size); 4081 if (ret < 0) 4082 return ret; 4083 4084 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4085 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4086 reta_size)); 4087 } 4088 4089 int 4090 rte_eth_dev_rss_hash_update(uint16_t port_id, 4091 struct rte_eth_rss_conf *rss_conf) 4092 { 4093 struct rte_eth_dev *dev; 4094 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4095 int ret; 4096 4097 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4098 dev = &rte_eth_devices[port_id]; 4099 4100 if (rss_conf == NULL) { 4101 RTE_ETHDEV_LOG(ERR, 4102 "Cannot update ethdev port %u RSS hash from NULL config\n", 4103 port_id); 4104 return -EINVAL; 4105 } 4106 4107 ret = rte_eth_dev_info_get(port_id, &dev_info); 4108 if (ret != 0) 4109 return ret; 4110 4111 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4112 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4113 dev_info.flow_type_rss_offloads) { 4114 RTE_ETHDEV_LOG(ERR, 4115 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4116 port_id, rss_conf->rss_hf, 4117 dev_info.flow_type_rss_offloads); 4118 return -EINVAL; 4119 } 4120 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4121 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4122 rss_conf)); 4123 } 4124 4125 int 4126 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4127 struct rte_eth_rss_conf *rss_conf) 4128 { 4129 struct rte_eth_dev *dev; 4130 4131 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4132 dev = &rte_eth_devices[port_id]; 4133 4134 if (rss_conf == NULL) { 4135 RTE_ETHDEV_LOG(ERR, 4136 "Cannot get ethdev port %u RSS hash config to NULL\n", 4137 port_id); 4138 return -EINVAL; 4139 } 4140 4141 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4142 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4143 rss_conf)); 4144 } 4145 4146 int 4147 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4148 struct rte_eth_udp_tunnel *udp_tunnel) 4149 { 4150 struct rte_eth_dev *dev; 4151 4152 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4153 dev = &rte_eth_devices[port_id]; 4154 4155 if (udp_tunnel == NULL) { 4156 RTE_ETHDEV_LOG(ERR, 4157 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4158 port_id); 4159 return -EINVAL; 4160 } 4161 4162 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4163 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4164 return -EINVAL; 4165 } 4166 4167 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4168 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4169 udp_tunnel)); 4170 } 4171 4172 int 4173 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4174 struct rte_eth_udp_tunnel *udp_tunnel) 4175 { 4176 struct rte_eth_dev *dev; 4177 4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4179 dev = &rte_eth_devices[port_id]; 4180 4181 if (udp_tunnel == NULL) { 4182 RTE_ETHDEV_LOG(ERR, 4183 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4184 port_id); 4185 return -EINVAL; 4186 } 4187 4188 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) { 4189 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4190 return -EINVAL; 4191 } 4192 4193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4194 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4195 udp_tunnel)); 4196 } 4197 4198 int 4199 rte_eth_led_on(uint16_t port_id) 4200 { 4201 struct rte_eth_dev *dev; 4202 4203 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4204 dev = &rte_eth_devices[port_id]; 4205 4206 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4207 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4208 } 4209 4210 int 4211 rte_eth_led_off(uint16_t port_id) 4212 { 4213 struct rte_eth_dev *dev; 4214 4215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4216 dev = &rte_eth_devices[port_id]; 4217 4218 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4219 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4220 } 4221 4222 int 4223 rte_eth_fec_get_capability(uint16_t port_id, 4224 struct rte_eth_fec_capa *speed_fec_capa, 4225 unsigned int num) 4226 { 4227 struct rte_eth_dev *dev; 4228 int ret; 4229 4230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4231 dev = &rte_eth_devices[port_id]; 4232 4233 if (speed_fec_capa == NULL && num > 0) { 4234 RTE_ETHDEV_LOG(ERR, 4235 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4236 port_id); 4237 return -EINVAL; 4238 } 4239 4240 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4241 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4242 4243 return ret; 4244 } 4245 4246 int 4247 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4248 { 4249 struct rte_eth_dev *dev; 4250 4251 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4252 dev = &rte_eth_devices[port_id]; 4253 4254 if (fec_capa == NULL) { 4255 RTE_ETHDEV_LOG(ERR, 4256 "Cannot get ethdev port %u current FEC mode to NULL\n", 4257 port_id); 4258 return -EINVAL; 4259 } 4260 4261 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4262 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4263 } 4264 4265 int 4266 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4267 { 4268 struct rte_eth_dev *dev; 4269 4270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4271 dev = &rte_eth_devices[port_id]; 4272 4273 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4274 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4275 } 4276 4277 /* 4278 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4279 * an empty spot. 4280 */ 4281 static int 4282 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4283 { 4284 struct rte_eth_dev_info dev_info; 4285 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4286 unsigned i; 4287 int ret; 4288 4289 ret = rte_eth_dev_info_get(port_id, &dev_info); 4290 if (ret != 0) 4291 return -1; 4292 4293 for (i = 0; i < dev_info.max_mac_addrs; i++) 4294 if (memcmp(addr, &dev->data->mac_addrs[i], 4295 RTE_ETHER_ADDR_LEN) == 0) 4296 return i; 4297 4298 return -1; 4299 } 4300 4301 static const struct rte_ether_addr null_mac_addr; 4302 4303 int 4304 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4305 uint32_t pool) 4306 { 4307 struct rte_eth_dev *dev; 4308 int index; 4309 uint64_t pool_mask; 4310 int ret; 4311 4312 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4313 dev = &rte_eth_devices[port_id]; 4314 4315 if (addr == NULL) { 4316 RTE_ETHDEV_LOG(ERR, 4317 "Cannot add ethdev port %u MAC address from NULL address\n", 4318 port_id); 4319 return -EINVAL; 4320 } 4321 4322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4323 4324 if (rte_is_zero_ether_addr(addr)) { 4325 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4326 port_id); 4327 return -EINVAL; 4328 } 4329 if (pool >= ETH_64_POOLS) { 4330 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1); 4331 return -EINVAL; 4332 } 4333 4334 index = eth_dev_get_mac_addr_index(port_id, addr); 4335 if (index < 0) { 4336 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4337 if (index < 0) { 4338 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4339 port_id); 4340 return -ENOSPC; 4341 } 4342 } else { 4343 pool_mask = dev->data->mac_pool_sel[index]; 4344 4345 /* Check if both MAC address and pool is already there, and do nothing */ 4346 if (pool_mask & (1ULL << pool)) 4347 return 0; 4348 } 4349 4350 /* Update NIC */ 4351 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4352 4353 if (ret == 0) { 4354 /* Update address in NIC data structure */ 4355 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4356 4357 /* Update pool bitmap in NIC data structure */ 4358 dev->data->mac_pool_sel[index] |= (1ULL << pool); 4359 } 4360 4361 return eth_err(port_id, ret); 4362 } 4363 4364 int 4365 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4366 { 4367 struct rte_eth_dev *dev; 4368 int index; 4369 4370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4371 dev = &rte_eth_devices[port_id]; 4372 4373 if (addr == NULL) { 4374 RTE_ETHDEV_LOG(ERR, 4375 "Cannot remove ethdev port %u MAC address from NULL address\n", 4376 port_id); 4377 return -EINVAL; 4378 } 4379 4380 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4381 4382 index = eth_dev_get_mac_addr_index(port_id, addr); 4383 if (index == 0) { 4384 RTE_ETHDEV_LOG(ERR, 4385 "Port %u: Cannot remove default MAC address\n", 4386 port_id); 4387 return -EADDRINUSE; 4388 } else if (index < 0) 4389 return 0; /* Do nothing if address wasn't found */ 4390 4391 /* Update NIC */ 4392 (*dev->dev_ops->mac_addr_remove)(dev, index); 4393 4394 /* Update address in NIC data structure */ 4395 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4396 4397 /* reset pool bitmap */ 4398 dev->data->mac_pool_sel[index] = 0; 4399 4400 return 0; 4401 } 4402 4403 int 4404 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4405 { 4406 struct rte_eth_dev *dev; 4407 int ret; 4408 4409 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4410 dev = &rte_eth_devices[port_id]; 4411 4412 if (addr == NULL) { 4413 RTE_ETHDEV_LOG(ERR, 4414 "Cannot set ethdev port %u default MAC address from NULL address\n", 4415 port_id); 4416 return -EINVAL; 4417 } 4418 4419 if (!rte_is_valid_assigned_ether_addr(addr)) 4420 return -EINVAL; 4421 4422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4423 4424 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4425 if (ret < 0) 4426 return ret; 4427 4428 /* Update default address in NIC data structure */ 4429 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4430 4431 return 0; 4432 } 4433 4434 4435 /* 4436 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4437 * an empty spot. 4438 */ 4439 static int 4440 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4441 const struct rte_ether_addr *addr) 4442 { 4443 struct rte_eth_dev_info dev_info; 4444 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4445 unsigned i; 4446 int ret; 4447 4448 ret = rte_eth_dev_info_get(port_id, &dev_info); 4449 if (ret != 0) 4450 return -1; 4451 4452 if (!dev->data->hash_mac_addrs) 4453 return -1; 4454 4455 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4456 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4457 RTE_ETHER_ADDR_LEN) == 0) 4458 return i; 4459 4460 return -1; 4461 } 4462 4463 int 4464 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4465 uint8_t on) 4466 { 4467 int index; 4468 int ret; 4469 struct rte_eth_dev *dev; 4470 4471 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4472 dev = &rte_eth_devices[port_id]; 4473 4474 if (addr == NULL) { 4475 RTE_ETHDEV_LOG(ERR, 4476 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4477 port_id); 4478 return -EINVAL; 4479 } 4480 4481 if (rte_is_zero_ether_addr(addr)) { 4482 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4483 port_id); 4484 return -EINVAL; 4485 } 4486 4487 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4488 /* Check if it's already there, and do nothing */ 4489 if ((index >= 0) && on) 4490 return 0; 4491 4492 if (index < 0) { 4493 if (!on) { 4494 RTE_ETHDEV_LOG(ERR, 4495 "Port %u: the MAC address was not set in UTA\n", 4496 port_id); 4497 return -EINVAL; 4498 } 4499 4500 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4501 if (index < 0) { 4502 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4503 port_id); 4504 return -ENOSPC; 4505 } 4506 } 4507 4508 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4509 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4510 if (ret == 0) { 4511 /* Update address in NIC data structure */ 4512 if (on) 4513 rte_ether_addr_copy(addr, 4514 &dev->data->hash_mac_addrs[index]); 4515 else 4516 rte_ether_addr_copy(&null_mac_addr, 4517 &dev->data->hash_mac_addrs[index]); 4518 } 4519 4520 return eth_err(port_id, ret); 4521 } 4522 4523 int 4524 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4525 { 4526 struct rte_eth_dev *dev; 4527 4528 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4529 dev = &rte_eth_devices[port_id]; 4530 4531 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4532 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4533 on)); 4534 } 4535 4536 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4537 uint16_t tx_rate) 4538 { 4539 struct rte_eth_dev *dev; 4540 struct rte_eth_dev_info dev_info; 4541 struct rte_eth_link link; 4542 int ret; 4543 4544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4545 dev = &rte_eth_devices[port_id]; 4546 4547 ret = rte_eth_dev_info_get(port_id, &dev_info); 4548 if (ret != 0) 4549 return ret; 4550 4551 link = dev->data->dev_link; 4552 4553 if (queue_idx > dev_info.max_tx_queues) { 4554 RTE_ETHDEV_LOG(ERR, 4555 "Set queue rate limit:port %u: invalid queue id=%u\n", 4556 port_id, queue_idx); 4557 return -EINVAL; 4558 } 4559 4560 if (tx_rate > link.link_speed) { 4561 RTE_ETHDEV_LOG(ERR, 4562 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4563 tx_rate, link.link_speed); 4564 return -EINVAL; 4565 } 4566 4567 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4568 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4569 queue_idx, tx_rate)); 4570 } 4571 4572 RTE_INIT(eth_dev_init_fp_ops) 4573 { 4574 uint32_t i; 4575 4576 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4577 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4578 } 4579 4580 RTE_INIT(eth_dev_init_cb_lists) 4581 { 4582 uint16_t i; 4583 4584 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4585 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4586 } 4587 4588 int 4589 rte_eth_dev_callback_register(uint16_t port_id, 4590 enum rte_eth_event_type event, 4591 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4592 { 4593 struct rte_eth_dev *dev; 4594 struct rte_eth_dev_callback *user_cb; 4595 uint16_t next_port; 4596 uint16_t last_port; 4597 4598 if (cb_fn == NULL) { 4599 RTE_ETHDEV_LOG(ERR, 4600 "Cannot register ethdev port %u callback from NULL\n", 4601 port_id); 4602 return -EINVAL; 4603 } 4604 4605 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4606 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4607 return -EINVAL; 4608 } 4609 4610 if (port_id == RTE_ETH_ALL) { 4611 next_port = 0; 4612 last_port = RTE_MAX_ETHPORTS - 1; 4613 } else { 4614 next_port = last_port = port_id; 4615 } 4616 4617 rte_spinlock_lock(ð_dev_cb_lock); 4618 4619 do { 4620 dev = &rte_eth_devices[next_port]; 4621 4622 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4623 if (user_cb->cb_fn == cb_fn && 4624 user_cb->cb_arg == cb_arg && 4625 user_cb->event == event) { 4626 break; 4627 } 4628 } 4629 4630 /* create a new callback. */ 4631 if (user_cb == NULL) { 4632 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4633 sizeof(struct rte_eth_dev_callback), 0); 4634 if (user_cb != NULL) { 4635 user_cb->cb_fn = cb_fn; 4636 user_cb->cb_arg = cb_arg; 4637 user_cb->event = event; 4638 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4639 user_cb, next); 4640 } else { 4641 rte_spinlock_unlock(ð_dev_cb_lock); 4642 rte_eth_dev_callback_unregister(port_id, event, 4643 cb_fn, cb_arg); 4644 return -ENOMEM; 4645 } 4646 4647 } 4648 } while (++next_port <= last_port); 4649 4650 rte_spinlock_unlock(ð_dev_cb_lock); 4651 return 0; 4652 } 4653 4654 int 4655 rte_eth_dev_callback_unregister(uint16_t port_id, 4656 enum rte_eth_event_type event, 4657 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4658 { 4659 int ret; 4660 struct rte_eth_dev *dev; 4661 struct rte_eth_dev_callback *cb, *next; 4662 uint16_t next_port; 4663 uint16_t last_port; 4664 4665 if (cb_fn == NULL) { 4666 RTE_ETHDEV_LOG(ERR, 4667 "Cannot unregister ethdev port %u callback from NULL\n", 4668 port_id); 4669 return -EINVAL; 4670 } 4671 4672 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4673 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4674 return -EINVAL; 4675 } 4676 4677 if (port_id == RTE_ETH_ALL) { 4678 next_port = 0; 4679 last_port = RTE_MAX_ETHPORTS - 1; 4680 } else { 4681 next_port = last_port = port_id; 4682 } 4683 4684 rte_spinlock_lock(ð_dev_cb_lock); 4685 4686 do { 4687 dev = &rte_eth_devices[next_port]; 4688 ret = 0; 4689 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4690 cb = next) { 4691 4692 next = TAILQ_NEXT(cb, next); 4693 4694 if (cb->cb_fn != cb_fn || cb->event != event || 4695 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4696 continue; 4697 4698 /* 4699 * if this callback is not executing right now, 4700 * then remove it. 4701 */ 4702 if (cb->active == 0) { 4703 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4704 rte_free(cb); 4705 } else { 4706 ret = -EAGAIN; 4707 } 4708 } 4709 } while (++next_port <= last_port); 4710 4711 rte_spinlock_unlock(ð_dev_cb_lock); 4712 return ret; 4713 } 4714 4715 int 4716 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4717 enum rte_eth_event_type event, void *ret_param) 4718 { 4719 struct rte_eth_dev_callback *cb_lst; 4720 struct rte_eth_dev_callback dev_cb; 4721 int rc = 0; 4722 4723 rte_spinlock_lock(ð_dev_cb_lock); 4724 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4725 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4726 continue; 4727 dev_cb = *cb_lst; 4728 cb_lst->active = 1; 4729 if (ret_param != NULL) 4730 dev_cb.ret_param = ret_param; 4731 4732 rte_spinlock_unlock(ð_dev_cb_lock); 4733 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4734 dev_cb.cb_arg, dev_cb.ret_param); 4735 rte_spinlock_lock(ð_dev_cb_lock); 4736 cb_lst->active = 0; 4737 } 4738 rte_spinlock_unlock(ð_dev_cb_lock); 4739 return rc; 4740 } 4741 4742 void 4743 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4744 { 4745 if (dev == NULL) 4746 return; 4747 4748 /* 4749 * for secondary process, at that point we expect device 4750 * to be already 'usable', so shared data and all function pointers 4751 * for fast-path devops have to be setup properly inside rte_eth_dev. 4752 */ 4753 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4754 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4755 4756 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4757 4758 dev->state = RTE_ETH_DEV_ATTACHED; 4759 } 4760 4761 int 4762 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4763 { 4764 uint32_t vec; 4765 struct rte_eth_dev *dev; 4766 struct rte_intr_handle *intr_handle; 4767 uint16_t qid; 4768 int rc; 4769 4770 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4771 dev = &rte_eth_devices[port_id]; 4772 4773 if (!dev->intr_handle) { 4774 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4775 return -ENOTSUP; 4776 } 4777 4778 intr_handle = dev->intr_handle; 4779 if (!intr_handle->intr_vec) { 4780 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4781 return -EPERM; 4782 } 4783 4784 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4785 vec = intr_handle->intr_vec[qid]; 4786 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4787 if (rc && rc != -EEXIST) { 4788 RTE_ETHDEV_LOG(ERR, 4789 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 4790 port_id, qid, op, epfd, vec); 4791 } 4792 } 4793 4794 return 0; 4795 } 4796 4797 int 4798 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4799 { 4800 struct rte_intr_handle *intr_handle; 4801 struct rte_eth_dev *dev; 4802 unsigned int efd_idx; 4803 uint32_t vec; 4804 int fd; 4805 4806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4807 dev = &rte_eth_devices[port_id]; 4808 4809 if (queue_id >= dev->data->nb_rx_queues) { 4810 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4811 return -1; 4812 } 4813 4814 if (!dev->intr_handle) { 4815 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 4816 return -1; 4817 } 4818 4819 intr_handle = dev->intr_handle; 4820 if (!intr_handle->intr_vec) { 4821 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 4822 return -1; 4823 } 4824 4825 vec = intr_handle->intr_vec[queue_id]; 4826 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4827 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4828 fd = intr_handle->efds[efd_idx]; 4829 4830 return fd; 4831 } 4832 4833 static inline int 4834 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4835 const char *ring_name) 4836 { 4837 return snprintf(name, len, "eth_p%d_q%d_%s", 4838 port_id, queue_id, ring_name); 4839 } 4840 4841 const struct rte_memzone * 4842 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4843 uint16_t queue_id, size_t size, unsigned align, 4844 int socket_id) 4845 { 4846 char z_name[RTE_MEMZONE_NAMESIZE]; 4847 const struct rte_memzone *mz; 4848 int rc; 4849 4850 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4851 queue_id, ring_name); 4852 if (rc >= RTE_MEMZONE_NAMESIZE) { 4853 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4854 rte_errno = ENAMETOOLONG; 4855 return NULL; 4856 } 4857 4858 mz = rte_memzone_lookup(z_name); 4859 if (mz) { 4860 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4861 size > mz->len || 4862 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4863 RTE_ETHDEV_LOG(ERR, 4864 "memzone %s does not justify the requested attributes\n", 4865 mz->name); 4866 return NULL; 4867 } 4868 4869 return mz; 4870 } 4871 4872 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4873 RTE_MEMZONE_IOVA_CONTIG, align); 4874 } 4875 4876 int 4877 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4878 uint16_t queue_id) 4879 { 4880 char z_name[RTE_MEMZONE_NAMESIZE]; 4881 const struct rte_memzone *mz; 4882 int rc = 0; 4883 4884 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4885 queue_id, ring_name); 4886 if (rc >= RTE_MEMZONE_NAMESIZE) { 4887 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4888 return -ENAMETOOLONG; 4889 } 4890 4891 mz = rte_memzone_lookup(z_name); 4892 if (mz) 4893 rc = rte_memzone_free(mz); 4894 else 4895 rc = -ENOENT; 4896 4897 return rc; 4898 } 4899 4900 int 4901 rte_eth_dev_create(struct rte_device *device, const char *name, 4902 size_t priv_data_size, 4903 ethdev_bus_specific_init ethdev_bus_specific_init, 4904 void *bus_init_params, 4905 ethdev_init_t ethdev_init, void *init_params) 4906 { 4907 struct rte_eth_dev *ethdev; 4908 int retval; 4909 4910 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4911 4912 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4913 ethdev = rte_eth_dev_allocate(name); 4914 if (!ethdev) 4915 return -ENODEV; 4916 4917 if (priv_data_size) { 4918 ethdev->data->dev_private = rte_zmalloc_socket( 4919 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4920 device->numa_node); 4921 4922 if (!ethdev->data->dev_private) { 4923 RTE_ETHDEV_LOG(ERR, 4924 "failed to allocate private data\n"); 4925 retval = -ENOMEM; 4926 goto probe_failed; 4927 } 4928 } 4929 } else { 4930 ethdev = rte_eth_dev_attach_secondary(name); 4931 if (!ethdev) { 4932 RTE_ETHDEV_LOG(ERR, 4933 "secondary process attach failed, ethdev doesn't exist\n"); 4934 return -ENODEV; 4935 } 4936 } 4937 4938 ethdev->device = device; 4939 4940 if (ethdev_bus_specific_init) { 4941 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4942 if (retval) { 4943 RTE_ETHDEV_LOG(ERR, 4944 "ethdev bus specific initialisation failed\n"); 4945 goto probe_failed; 4946 } 4947 } 4948 4949 retval = ethdev_init(ethdev, init_params); 4950 if (retval) { 4951 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4952 goto probe_failed; 4953 } 4954 4955 rte_eth_dev_probing_finish(ethdev); 4956 4957 return retval; 4958 4959 probe_failed: 4960 rte_eth_dev_release_port(ethdev); 4961 return retval; 4962 } 4963 4964 int 4965 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 4966 ethdev_uninit_t ethdev_uninit) 4967 { 4968 int ret; 4969 4970 ethdev = rte_eth_dev_allocated(ethdev->data->name); 4971 if (!ethdev) 4972 return -ENODEV; 4973 4974 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 4975 4976 ret = ethdev_uninit(ethdev); 4977 if (ret) 4978 return ret; 4979 4980 return rte_eth_dev_release_port(ethdev); 4981 } 4982 4983 int 4984 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4985 int epfd, int op, void *data) 4986 { 4987 uint32_t vec; 4988 struct rte_eth_dev *dev; 4989 struct rte_intr_handle *intr_handle; 4990 int rc; 4991 4992 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4993 dev = &rte_eth_devices[port_id]; 4994 4995 if (queue_id >= dev->data->nb_rx_queues) { 4996 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 4997 return -EINVAL; 4998 } 4999 5000 if (!dev->intr_handle) { 5001 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n"); 5002 return -ENOTSUP; 5003 } 5004 5005 intr_handle = dev->intr_handle; 5006 if (!intr_handle->intr_vec) { 5007 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n"); 5008 return -EPERM; 5009 } 5010 5011 vec = intr_handle->intr_vec[queue_id]; 5012 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5013 if (rc && rc != -EEXIST) { 5014 RTE_ETHDEV_LOG(ERR, 5015 "p %u q %u rx ctl error op %d epfd %d vec %u\n", 5016 port_id, queue_id, op, epfd, vec); 5017 return rc; 5018 } 5019 5020 return 0; 5021 } 5022 5023 int 5024 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5025 uint16_t queue_id) 5026 { 5027 struct rte_eth_dev *dev; 5028 int ret; 5029 5030 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5031 dev = &rte_eth_devices[port_id]; 5032 5033 ret = eth_dev_validate_rx_queue(dev, queue_id); 5034 if (ret != 0) 5035 return ret; 5036 5037 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5038 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5039 } 5040 5041 int 5042 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5043 uint16_t queue_id) 5044 { 5045 struct rte_eth_dev *dev; 5046 int ret; 5047 5048 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5049 dev = &rte_eth_devices[port_id]; 5050 5051 ret = eth_dev_validate_rx_queue(dev, queue_id); 5052 if (ret != 0) 5053 return ret; 5054 5055 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5056 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5057 } 5058 5059 5060 const struct rte_eth_rxtx_callback * 5061 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5062 rte_rx_callback_fn fn, void *user_param) 5063 { 5064 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5065 rte_errno = ENOTSUP; 5066 return NULL; 5067 #endif 5068 struct rte_eth_dev *dev; 5069 5070 /* check input parameters */ 5071 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5072 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5073 rte_errno = EINVAL; 5074 return NULL; 5075 } 5076 dev = &rte_eth_devices[port_id]; 5077 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5078 rte_errno = EINVAL; 5079 return NULL; 5080 } 5081 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5082 5083 if (cb == NULL) { 5084 rte_errno = ENOMEM; 5085 return NULL; 5086 } 5087 5088 cb->fn.rx = fn; 5089 cb->param = user_param; 5090 5091 rte_spinlock_lock(ð_dev_rx_cb_lock); 5092 /* Add the callbacks in fifo order. */ 5093 struct rte_eth_rxtx_callback *tail = 5094 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5095 5096 if (!tail) { 5097 /* Stores to cb->fn and cb->param should complete before 5098 * cb is visible to data plane. 5099 */ 5100 __atomic_store_n( 5101 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5102 cb, __ATOMIC_RELEASE); 5103 5104 } else { 5105 while (tail->next) 5106 tail = tail->next; 5107 /* Stores to cb->fn and cb->param should complete before 5108 * cb is visible to data plane. 5109 */ 5110 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5111 } 5112 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5113 5114 return cb; 5115 } 5116 5117 const struct rte_eth_rxtx_callback * 5118 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5119 rte_rx_callback_fn fn, void *user_param) 5120 { 5121 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5122 rte_errno = ENOTSUP; 5123 return NULL; 5124 #endif 5125 /* check input parameters */ 5126 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5127 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5128 rte_errno = EINVAL; 5129 return NULL; 5130 } 5131 5132 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5133 5134 if (cb == NULL) { 5135 rte_errno = ENOMEM; 5136 return NULL; 5137 } 5138 5139 cb->fn.rx = fn; 5140 cb->param = user_param; 5141 5142 rte_spinlock_lock(ð_dev_rx_cb_lock); 5143 /* Add the callbacks at first position */ 5144 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5145 /* Stores to cb->fn, cb->param and cb->next should complete before 5146 * cb is visible to data plane threads. 5147 */ 5148 __atomic_store_n( 5149 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5150 cb, __ATOMIC_RELEASE); 5151 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5152 5153 return cb; 5154 } 5155 5156 const struct rte_eth_rxtx_callback * 5157 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5158 rte_tx_callback_fn fn, void *user_param) 5159 { 5160 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5161 rte_errno = ENOTSUP; 5162 return NULL; 5163 #endif 5164 struct rte_eth_dev *dev; 5165 5166 /* check input parameters */ 5167 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5168 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5169 rte_errno = EINVAL; 5170 return NULL; 5171 } 5172 5173 dev = &rte_eth_devices[port_id]; 5174 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5175 rte_errno = EINVAL; 5176 return NULL; 5177 } 5178 5179 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5180 5181 if (cb == NULL) { 5182 rte_errno = ENOMEM; 5183 return NULL; 5184 } 5185 5186 cb->fn.tx = fn; 5187 cb->param = user_param; 5188 5189 rte_spinlock_lock(ð_dev_tx_cb_lock); 5190 /* Add the callbacks in fifo order. */ 5191 struct rte_eth_rxtx_callback *tail = 5192 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5193 5194 if (!tail) { 5195 /* Stores to cb->fn and cb->param should complete before 5196 * cb is visible to data plane. 5197 */ 5198 __atomic_store_n( 5199 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5200 cb, __ATOMIC_RELEASE); 5201 5202 } else { 5203 while (tail->next) 5204 tail = tail->next; 5205 /* Stores to cb->fn and cb->param should complete before 5206 * cb is visible to data plane. 5207 */ 5208 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5209 } 5210 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5211 5212 return cb; 5213 } 5214 5215 int 5216 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5217 const struct rte_eth_rxtx_callback *user_cb) 5218 { 5219 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5220 return -ENOTSUP; 5221 #endif 5222 /* Check input parameters. */ 5223 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5224 if (user_cb == NULL || 5225 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5226 return -EINVAL; 5227 5228 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5229 struct rte_eth_rxtx_callback *cb; 5230 struct rte_eth_rxtx_callback **prev_cb; 5231 int ret = -EINVAL; 5232 5233 rte_spinlock_lock(ð_dev_rx_cb_lock); 5234 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5235 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5236 cb = *prev_cb; 5237 if (cb == user_cb) { 5238 /* Remove the user cb from the callback list. */ 5239 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5240 ret = 0; 5241 break; 5242 } 5243 } 5244 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5245 5246 return ret; 5247 } 5248 5249 int 5250 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5251 const struct rte_eth_rxtx_callback *user_cb) 5252 { 5253 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5254 return -ENOTSUP; 5255 #endif 5256 /* Check input parameters. */ 5257 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5258 if (user_cb == NULL || 5259 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5260 return -EINVAL; 5261 5262 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5263 int ret = -EINVAL; 5264 struct rte_eth_rxtx_callback *cb; 5265 struct rte_eth_rxtx_callback **prev_cb; 5266 5267 rte_spinlock_lock(ð_dev_tx_cb_lock); 5268 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5269 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5270 cb = *prev_cb; 5271 if (cb == user_cb) { 5272 /* Remove the user cb from the callback list. */ 5273 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5274 ret = 0; 5275 break; 5276 } 5277 } 5278 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5279 5280 return ret; 5281 } 5282 5283 int 5284 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5285 struct rte_eth_rxq_info *qinfo) 5286 { 5287 struct rte_eth_dev *dev; 5288 5289 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5290 dev = &rte_eth_devices[port_id]; 5291 5292 if (queue_id >= dev->data->nb_rx_queues) { 5293 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5294 return -EINVAL; 5295 } 5296 5297 if (qinfo == NULL) { 5298 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5299 port_id, queue_id); 5300 return -EINVAL; 5301 } 5302 5303 if (dev->data->rx_queues == NULL || 5304 dev->data->rx_queues[queue_id] == NULL) { 5305 RTE_ETHDEV_LOG(ERR, 5306 "Rx queue %"PRIu16" of device with port_id=%" 5307 PRIu16" has not been setup\n", 5308 queue_id, port_id); 5309 return -EINVAL; 5310 } 5311 5312 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5313 RTE_ETHDEV_LOG(INFO, 5314 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5315 queue_id, port_id); 5316 return -EINVAL; 5317 } 5318 5319 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5320 5321 memset(qinfo, 0, sizeof(*qinfo)); 5322 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5323 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5324 5325 return 0; 5326 } 5327 5328 int 5329 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5330 struct rte_eth_txq_info *qinfo) 5331 { 5332 struct rte_eth_dev *dev; 5333 5334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5335 dev = &rte_eth_devices[port_id]; 5336 5337 if (queue_id >= dev->data->nb_tx_queues) { 5338 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5339 return -EINVAL; 5340 } 5341 5342 if (qinfo == NULL) { 5343 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5344 port_id, queue_id); 5345 return -EINVAL; 5346 } 5347 5348 if (dev->data->tx_queues == NULL || 5349 dev->data->tx_queues[queue_id] == NULL) { 5350 RTE_ETHDEV_LOG(ERR, 5351 "Tx queue %"PRIu16" of device with port_id=%" 5352 PRIu16" has not been setup\n", 5353 queue_id, port_id); 5354 return -EINVAL; 5355 } 5356 5357 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5358 RTE_ETHDEV_LOG(INFO, 5359 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5360 queue_id, port_id); 5361 return -EINVAL; 5362 } 5363 5364 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5365 5366 memset(qinfo, 0, sizeof(*qinfo)); 5367 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5368 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5369 5370 return 0; 5371 } 5372 5373 int 5374 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5375 struct rte_eth_burst_mode *mode) 5376 { 5377 struct rte_eth_dev *dev; 5378 5379 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5380 dev = &rte_eth_devices[port_id]; 5381 5382 if (queue_id >= dev->data->nb_rx_queues) { 5383 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); 5384 return -EINVAL; 5385 } 5386 5387 if (mode == NULL) { 5388 RTE_ETHDEV_LOG(ERR, 5389 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5390 port_id, queue_id); 5391 return -EINVAL; 5392 } 5393 5394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5395 memset(mode, 0, sizeof(*mode)); 5396 return eth_err(port_id, 5397 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5398 } 5399 5400 int 5401 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5402 struct rte_eth_burst_mode *mode) 5403 { 5404 struct rte_eth_dev *dev; 5405 5406 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5407 dev = &rte_eth_devices[port_id]; 5408 5409 if (queue_id >= dev->data->nb_tx_queues) { 5410 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); 5411 return -EINVAL; 5412 } 5413 5414 if (mode == NULL) { 5415 RTE_ETHDEV_LOG(ERR, 5416 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5417 port_id, queue_id); 5418 return -EINVAL; 5419 } 5420 5421 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5422 memset(mode, 0, sizeof(*mode)); 5423 return eth_err(port_id, 5424 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5425 } 5426 5427 int 5428 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5429 struct rte_power_monitor_cond *pmc) 5430 { 5431 struct rte_eth_dev *dev; 5432 5433 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5434 dev = &rte_eth_devices[port_id]; 5435 5436 if (queue_id >= dev->data->nb_rx_queues) { 5437 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5438 return -EINVAL; 5439 } 5440 5441 if (pmc == NULL) { 5442 RTE_ETHDEV_LOG(ERR, 5443 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5444 port_id, queue_id); 5445 return -EINVAL; 5446 } 5447 5448 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5449 return eth_err(port_id, 5450 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5451 } 5452 5453 int 5454 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5455 struct rte_ether_addr *mc_addr_set, 5456 uint32_t nb_mc_addr) 5457 { 5458 struct rte_eth_dev *dev; 5459 5460 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5461 dev = &rte_eth_devices[port_id]; 5462 5463 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5464 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5465 mc_addr_set, nb_mc_addr)); 5466 } 5467 5468 int 5469 rte_eth_timesync_enable(uint16_t port_id) 5470 { 5471 struct rte_eth_dev *dev; 5472 5473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5474 dev = &rte_eth_devices[port_id]; 5475 5476 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5477 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5478 } 5479 5480 int 5481 rte_eth_timesync_disable(uint16_t port_id) 5482 { 5483 struct rte_eth_dev *dev; 5484 5485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5486 dev = &rte_eth_devices[port_id]; 5487 5488 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5489 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5490 } 5491 5492 int 5493 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5494 uint32_t flags) 5495 { 5496 struct rte_eth_dev *dev; 5497 5498 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5499 dev = &rte_eth_devices[port_id]; 5500 5501 if (timestamp == NULL) { 5502 RTE_ETHDEV_LOG(ERR, 5503 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5504 port_id); 5505 return -EINVAL; 5506 } 5507 5508 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5509 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5510 (dev, timestamp, flags)); 5511 } 5512 5513 int 5514 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5515 struct timespec *timestamp) 5516 { 5517 struct rte_eth_dev *dev; 5518 5519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5520 dev = &rte_eth_devices[port_id]; 5521 5522 if (timestamp == NULL) { 5523 RTE_ETHDEV_LOG(ERR, 5524 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5525 port_id); 5526 return -EINVAL; 5527 } 5528 5529 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5530 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5531 (dev, timestamp)); 5532 } 5533 5534 int 5535 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5536 { 5537 struct rte_eth_dev *dev; 5538 5539 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5540 dev = &rte_eth_devices[port_id]; 5541 5542 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5543 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5544 } 5545 5546 int 5547 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5548 { 5549 struct rte_eth_dev *dev; 5550 5551 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5552 dev = &rte_eth_devices[port_id]; 5553 5554 if (timestamp == NULL) { 5555 RTE_ETHDEV_LOG(ERR, 5556 "Cannot read ethdev port %u timesync time to NULL\n", 5557 port_id); 5558 return -EINVAL; 5559 } 5560 5561 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5562 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5563 timestamp)); 5564 } 5565 5566 int 5567 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5568 { 5569 struct rte_eth_dev *dev; 5570 5571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5572 dev = &rte_eth_devices[port_id]; 5573 5574 if (timestamp == NULL) { 5575 RTE_ETHDEV_LOG(ERR, 5576 "Cannot write ethdev port %u timesync from NULL time\n", 5577 port_id); 5578 return -EINVAL; 5579 } 5580 5581 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5582 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5583 timestamp)); 5584 } 5585 5586 int 5587 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5588 { 5589 struct rte_eth_dev *dev; 5590 5591 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5592 dev = &rte_eth_devices[port_id]; 5593 5594 if (clock == NULL) { 5595 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5596 port_id); 5597 return -EINVAL; 5598 } 5599 5600 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5601 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5602 } 5603 5604 int 5605 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5606 { 5607 struct rte_eth_dev *dev; 5608 5609 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5610 dev = &rte_eth_devices[port_id]; 5611 5612 if (info == NULL) { 5613 RTE_ETHDEV_LOG(ERR, 5614 "Cannot get ethdev port %u register info to NULL\n", 5615 port_id); 5616 return -EINVAL; 5617 } 5618 5619 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5620 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5621 } 5622 5623 int 5624 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5625 { 5626 struct rte_eth_dev *dev; 5627 5628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5629 dev = &rte_eth_devices[port_id]; 5630 5631 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5632 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5633 } 5634 5635 int 5636 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5637 { 5638 struct rte_eth_dev *dev; 5639 5640 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5641 dev = &rte_eth_devices[port_id]; 5642 5643 if (info == NULL) { 5644 RTE_ETHDEV_LOG(ERR, 5645 "Cannot get ethdev port %u EEPROM info to NULL\n", 5646 port_id); 5647 return -EINVAL; 5648 } 5649 5650 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5651 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5652 } 5653 5654 int 5655 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5656 { 5657 struct rte_eth_dev *dev; 5658 5659 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5660 dev = &rte_eth_devices[port_id]; 5661 5662 if (info == NULL) { 5663 RTE_ETHDEV_LOG(ERR, 5664 "Cannot set ethdev port %u EEPROM from NULL info\n", 5665 port_id); 5666 return -EINVAL; 5667 } 5668 5669 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5670 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5671 } 5672 5673 int 5674 rte_eth_dev_get_module_info(uint16_t port_id, 5675 struct rte_eth_dev_module_info *modinfo) 5676 { 5677 struct rte_eth_dev *dev; 5678 5679 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5680 dev = &rte_eth_devices[port_id]; 5681 5682 if (modinfo == NULL) { 5683 RTE_ETHDEV_LOG(ERR, 5684 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5685 port_id); 5686 return -EINVAL; 5687 } 5688 5689 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5690 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5691 } 5692 5693 int 5694 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5695 struct rte_dev_eeprom_info *info) 5696 { 5697 struct rte_eth_dev *dev; 5698 5699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5700 dev = &rte_eth_devices[port_id]; 5701 5702 if (info == NULL) { 5703 RTE_ETHDEV_LOG(ERR, 5704 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5705 port_id); 5706 return -EINVAL; 5707 } 5708 5709 if (info->data == NULL) { 5710 RTE_ETHDEV_LOG(ERR, 5711 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5712 port_id); 5713 return -EINVAL; 5714 } 5715 5716 if (info->length == 0) { 5717 RTE_ETHDEV_LOG(ERR, 5718 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5719 port_id); 5720 return -EINVAL; 5721 } 5722 5723 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5724 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5725 } 5726 5727 int 5728 rte_eth_dev_get_dcb_info(uint16_t port_id, 5729 struct rte_eth_dcb_info *dcb_info) 5730 { 5731 struct rte_eth_dev *dev; 5732 5733 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5734 dev = &rte_eth_devices[port_id]; 5735 5736 if (dcb_info == NULL) { 5737 RTE_ETHDEV_LOG(ERR, 5738 "Cannot get ethdev port %u DCB info to NULL\n", 5739 port_id); 5740 return -EINVAL; 5741 } 5742 5743 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5744 5745 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5746 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5747 } 5748 5749 static void 5750 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5751 const struct rte_eth_desc_lim *desc_lim) 5752 { 5753 if (desc_lim->nb_align != 0) 5754 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5755 5756 if (desc_lim->nb_max != 0) 5757 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5758 5759 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5760 } 5761 5762 int 5763 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5764 uint16_t *nb_rx_desc, 5765 uint16_t *nb_tx_desc) 5766 { 5767 struct rte_eth_dev_info dev_info; 5768 int ret; 5769 5770 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5771 5772 ret = rte_eth_dev_info_get(port_id, &dev_info); 5773 if (ret != 0) 5774 return ret; 5775 5776 if (nb_rx_desc != NULL) 5777 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5778 5779 if (nb_tx_desc != NULL) 5780 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5781 5782 return 0; 5783 } 5784 5785 int 5786 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5787 struct rte_eth_hairpin_cap *cap) 5788 { 5789 struct rte_eth_dev *dev; 5790 5791 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5792 dev = &rte_eth_devices[port_id]; 5793 5794 if (cap == NULL) { 5795 RTE_ETHDEV_LOG(ERR, 5796 "Cannot get ethdev port %u hairpin capability to NULL\n", 5797 port_id); 5798 return -EINVAL; 5799 } 5800 5801 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5802 memset(cap, 0, sizeof(*cap)); 5803 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5804 } 5805 5806 int 5807 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5808 { 5809 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5810 return 1; 5811 return 0; 5812 } 5813 5814 int 5815 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5816 { 5817 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5818 return 1; 5819 return 0; 5820 } 5821 5822 int 5823 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5824 { 5825 struct rte_eth_dev *dev; 5826 5827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5828 dev = &rte_eth_devices[port_id]; 5829 5830 if (pool == NULL) { 5831 RTE_ETHDEV_LOG(ERR, 5832 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5833 port_id); 5834 return -EINVAL; 5835 } 5836 5837 if (*dev->dev_ops->pool_ops_supported == NULL) 5838 return 1; /* all pools are supported */ 5839 5840 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5841 } 5842 5843 /** 5844 * A set of values to describe the possible states of a switch domain. 5845 */ 5846 enum rte_eth_switch_domain_state { 5847 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5848 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5849 }; 5850 5851 /** 5852 * Array of switch domains available for allocation. Array is sized to 5853 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5854 * ethdev ports in a single process. 5855 */ 5856 static struct rte_eth_dev_switch { 5857 enum rte_eth_switch_domain_state state; 5858 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5859 5860 int 5861 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5862 { 5863 uint16_t i; 5864 5865 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5866 5867 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5868 if (eth_dev_switch_domains[i].state == 5869 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5870 eth_dev_switch_domains[i].state = 5871 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5872 *domain_id = i; 5873 return 0; 5874 } 5875 } 5876 5877 return -ENOSPC; 5878 } 5879 5880 int 5881 rte_eth_switch_domain_free(uint16_t domain_id) 5882 { 5883 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5884 domain_id >= RTE_MAX_ETHPORTS) 5885 return -EINVAL; 5886 5887 if (eth_dev_switch_domains[domain_id].state != 5888 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5889 return -EINVAL; 5890 5891 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5892 5893 return 0; 5894 } 5895 5896 static int 5897 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5898 { 5899 int state; 5900 struct rte_kvargs_pair *pair; 5901 char *letter; 5902 5903 arglist->str = strdup(str_in); 5904 if (arglist->str == NULL) 5905 return -ENOMEM; 5906 5907 letter = arglist->str; 5908 state = 0; 5909 arglist->count = 0; 5910 pair = &arglist->pairs[0]; 5911 while (1) { 5912 switch (state) { 5913 case 0: /* Initial */ 5914 if (*letter == '=') 5915 return -EINVAL; 5916 else if (*letter == '\0') 5917 return 0; 5918 5919 state = 1; 5920 pair->key = letter; 5921 /* fall-thru */ 5922 5923 case 1: /* Parsing key */ 5924 if (*letter == '=') { 5925 *letter = '\0'; 5926 pair->value = letter + 1; 5927 state = 2; 5928 } else if (*letter == ',' || *letter == '\0') 5929 return -EINVAL; 5930 break; 5931 5932 5933 case 2: /* Parsing value */ 5934 if (*letter == '[') 5935 state = 3; 5936 else if (*letter == ',') { 5937 *letter = '\0'; 5938 arglist->count++; 5939 pair = &arglist->pairs[arglist->count]; 5940 state = 0; 5941 } else if (*letter == '\0') { 5942 letter--; 5943 arglist->count++; 5944 pair = &arglist->pairs[arglist->count]; 5945 state = 0; 5946 } 5947 break; 5948 5949 case 3: /* Parsing list */ 5950 if (*letter == ']') 5951 state = 2; 5952 else if (*letter == '\0') 5953 return -EINVAL; 5954 break; 5955 } 5956 letter++; 5957 } 5958 } 5959 5960 int 5961 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 5962 { 5963 struct rte_kvargs args; 5964 struct rte_kvargs_pair *pair; 5965 unsigned int i; 5966 int result = 0; 5967 5968 memset(eth_da, 0, sizeof(*eth_da)); 5969 5970 result = eth_dev_devargs_tokenise(&args, dargs); 5971 if (result < 0) 5972 goto parse_cleanup; 5973 5974 for (i = 0; i < args.count; i++) { 5975 pair = &args.pairs[i]; 5976 if (strcmp("representor", pair->key) == 0) { 5977 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 5978 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 5979 dargs); 5980 result = -1; 5981 goto parse_cleanup; 5982 } 5983 result = rte_eth_devargs_parse_representor_ports( 5984 pair->value, eth_da); 5985 if (result < 0) 5986 goto parse_cleanup; 5987 } 5988 } 5989 5990 parse_cleanup: 5991 if (args.str) 5992 free(args.str); 5993 5994 return result; 5995 } 5996 5997 int 5998 rte_eth_representor_id_get(uint16_t port_id, 5999 enum rte_eth_representor_type type, 6000 int controller, int pf, int representor_port, 6001 uint16_t *repr_id) 6002 { 6003 int ret, n, count; 6004 uint32_t i; 6005 struct rte_eth_representor_info *info = NULL; 6006 size_t size; 6007 6008 if (type == RTE_ETH_REPRESENTOR_NONE) 6009 return 0; 6010 if (repr_id == NULL) 6011 return -EINVAL; 6012 6013 /* Get PMD representor range info. */ 6014 ret = rte_eth_representor_info_get(port_id, NULL); 6015 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6016 controller == -1 && pf == -1) { 6017 /* Direct mapping for legacy VF representor. */ 6018 *repr_id = representor_port; 6019 return 0; 6020 } else if (ret < 0) { 6021 return ret; 6022 } 6023 n = ret; 6024 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6025 info = calloc(1, size); 6026 if (info == NULL) 6027 return -ENOMEM; 6028 info->nb_ranges_alloc = n; 6029 ret = rte_eth_representor_info_get(port_id, info); 6030 if (ret < 0) 6031 goto out; 6032 6033 /* Default controller and pf to caller. */ 6034 if (controller == -1) 6035 controller = info->controller; 6036 if (pf == -1) 6037 pf = info->pf; 6038 6039 /* Locate representor ID. */ 6040 ret = -ENOENT; 6041 for (i = 0; i < info->nb_ranges; ++i) { 6042 if (info->ranges[i].type != type) 6043 continue; 6044 if (info->ranges[i].controller != controller) 6045 continue; 6046 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6047 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6048 port_id, info->ranges[i].id_base, 6049 info->ranges[i].id_end, i); 6050 continue; 6051 6052 } 6053 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6054 switch (info->ranges[i].type) { 6055 case RTE_ETH_REPRESENTOR_PF: 6056 if (pf < info->ranges[i].pf || 6057 pf >= info->ranges[i].pf + count) 6058 continue; 6059 *repr_id = info->ranges[i].id_base + 6060 (pf - info->ranges[i].pf); 6061 ret = 0; 6062 goto out; 6063 case RTE_ETH_REPRESENTOR_VF: 6064 if (info->ranges[i].pf != pf) 6065 continue; 6066 if (representor_port < info->ranges[i].vf || 6067 representor_port >= info->ranges[i].vf + count) 6068 continue; 6069 *repr_id = info->ranges[i].id_base + 6070 (representor_port - info->ranges[i].vf); 6071 ret = 0; 6072 goto out; 6073 case RTE_ETH_REPRESENTOR_SF: 6074 if (info->ranges[i].pf != pf) 6075 continue; 6076 if (representor_port < info->ranges[i].sf || 6077 representor_port >= info->ranges[i].sf + count) 6078 continue; 6079 *repr_id = info->ranges[i].id_base + 6080 (representor_port - info->ranges[i].sf); 6081 ret = 0; 6082 goto out; 6083 default: 6084 break; 6085 } 6086 } 6087 out: 6088 free(info); 6089 return ret; 6090 } 6091 6092 static int 6093 eth_dev_handle_port_list(const char *cmd __rte_unused, 6094 const char *params __rte_unused, 6095 struct rte_tel_data *d) 6096 { 6097 int port_id; 6098 6099 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6100 RTE_ETH_FOREACH_DEV(port_id) 6101 rte_tel_data_add_array_int(d, port_id); 6102 return 0; 6103 } 6104 6105 static void 6106 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6107 const char *stat_name) 6108 { 6109 int q; 6110 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6111 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6112 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6113 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6114 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6115 } 6116 6117 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6118 6119 static int 6120 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6121 const char *params, 6122 struct rte_tel_data *d) 6123 { 6124 struct rte_eth_stats stats; 6125 int port_id, ret; 6126 6127 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6128 return -1; 6129 6130 port_id = atoi(params); 6131 if (!rte_eth_dev_is_valid_port(port_id)) 6132 return -1; 6133 6134 ret = rte_eth_stats_get(port_id, &stats); 6135 if (ret < 0) 6136 return -1; 6137 6138 rte_tel_data_start_dict(d); 6139 ADD_DICT_STAT(stats, ipackets); 6140 ADD_DICT_STAT(stats, opackets); 6141 ADD_DICT_STAT(stats, ibytes); 6142 ADD_DICT_STAT(stats, obytes); 6143 ADD_DICT_STAT(stats, imissed); 6144 ADD_DICT_STAT(stats, ierrors); 6145 ADD_DICT_STAT(stats, oerrors); 6146 ADD_DICT_STAT(stats, rx_nombuf); 6147 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6148 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6149 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6150 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6151 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6152 6153 return 0; 6154 } 6155 6156 static int 6157 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6158 const char *params, 6159 struct rte_tel_data *d) 6160 { 6161 struct rte_eth_xstat *eth_xstats; 6162 struct rte_eth_xstat_name *xstat_names; 6163 int port_id, num_xstats; 6164 int i, ret; 6165 char *end_param; 6166 6167 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6168 return -1; 6169 6170 port_id = strtoul(params, &end_param, 0); 6171 if (*end_param != '\0') 6172 RTE_ETHDEV_LOG(NOTICE, 6173 "Extra parameters passed to ethdev telemetry command, ignoring"); 6174 if (!rte_eth_dev_is_valid_port(port_id)) 6175 return -1; 6176 6177 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6178 if (num_xstats < 0) 6179 return -1; 6180 6181 /* use one malloc for both names and stats */ 6182 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6183 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6184 if (eth_xstats == NULL) 6185 return -1; 6186 xstat_names = (void *)ð_xstats[num_xstats]; 6187 6188 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6189 if (ret < 0 || ret > num_xstats) { 6190 free(eth_xstats); 6191 return -1; 6192 } 6193 6194 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6195 if (ret < 0 || ret > num_xstats) { 6196 free(eth_xstats); 6197 return -1; 6198 } 6199 6200 rte_tel_data_start_dict(d); 6201 for (i = 0; i < num_xstats; i++) 6202 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6203 eth_xstats[i].value); 6204 return 0; 6205 } 6206 6207 static int 6208 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6209 const char *params, 6210 struct rte_tel_data *d) 6211 { 6212 static const char *status_str = "status"; 6213 int ret, port_id; 6214 struct rte_eth_link link; 6215 char *end_param; 6216 6217 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6218 return -1; 6219 6220 port_id = strtoul(params, &end_param, 0); 6221 if (*end_param != '\0') 6222 RTE_ETHDEV_LOG(NOTICE, 6223 "Extra parameters passed to ethdev telemetry command, ignoring"); 6224 if (!rte_eth_dev_is_valid_port(port_id)) 6225 return -1; 6226 6227 ret = rte_eth_link_get_nowait(port_id, &link); 6228 if (ret < 0) 6229 return -1; 6230 6231 rte_tel_data_start_dict(d); 6232 if (!link.link_status) { 6233 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6234 return 0; 6235 } 6236 rte_tel_data_add_dict_string(d, status_str, "UP"); 6237 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6238 rte_tel_data_add_dict_string(d, "duplex", 6239 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 6240 "full-duplex" : "half-duplex"); 6241 return 0; 6242 } 6243 6244 static int 6245 eth_dev_handle_port_info(const char *cmd __rte_unused, 6246 const char *params, 6247 struct rte_tel_data *d) 6248 { 6249 struct rte_tel_data *rxq_state, *txq_state; 6250 char mac_addr[RTE_ETHER_ADDR_LEN]; 6251 struct rte_eth_dev *eth_dev; 6252 char *end_param; 6253 int port_id, i; 6254 6255 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6256 return -1; 6257 6258 port_id = strtoul(params, &end_param, 0); 6259 if (*end_param != '\0') 6260 RTE_ETHDEV_LOG(NOTICE, 6261 "Extra parameters passed to ethdev telemetry command, ignoring"); 6262 6263 if (!rte_eth_dev_is_valid_port(port_id)) 6264 return -EINVAL; 6265 6266 eth_dev = &rte_eth_devices[port_id]; 6267 if (!eth_dev) 6268 return -EINVAL; 6269 6270 rxq_state = rte_tel_data_alloc(); 6271 if (!rxq_state) 6272 return -ENOMEM; 6273 6274 txq_state = rte_tel_data_alloc(); 6275 if (!txq_state) 6276 return -ENOMEM; 6277 6278 rte_tel_data_start_dict(d); 6279 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6280 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6281 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6282 eth_dev->data->nb_rx_queues); 6283 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6284 eth_dev->data->nb_tx_queues); 6285 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6286 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6287 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6288 eth_dev->data->min_rx_buf_size); 6289 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6290 eth_dev->data->rx_mbuf_alloc_failed); 6291 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6292 eth_dev->data->mac_addrs->addr_bytes[0], 6293 eth_dev->data->mac_addrs->addr_bytes[1], 6294 eth_dev->data->mac_addrs->addr_bytes[2], 6295 eth_dev->data->mac_addrs->addr_bytes[3], 6296 eth_dev->data->mac_addrs->addr_bytes[4], 6297 eth_dev->data->mac_addrs->addr_bytes[5]); 6298 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6299 rte_tel_data_add_dict_int(d, "promiscuous", 6300 eth_dev->data->promiscuous); 6301 rte_tel_data_add_dict_int(d, "scattered_rx", 6302 eth_dev->data->scattered_rx); 6303 rte_tel_data_add_dict_int(d, "all_multicast", 6304 eth_dev->data->all_multicast); 6305 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6306 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6307 rte_tel_data_add_dict_int(d, "dev_configured", 6308 eth_dev->data->dev_configured); 6309 6310 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6311 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6312 rte_tel_data_add_array_int(rxq_state, 6313 eth_dev->data->rx_queue_state[i]); 6314 6315 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6316 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6317 rte_tel_data_add_array_int(txq_state, 6318 eth_dev->data->tx_queue_state[i]); 6319 6320 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6321 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6322 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6323 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6324 rte_tel_data_add_dict_int(d, "rx_offloads", 6325 eth_dev->data->dev_conf.rxmode.offloads); 6326 rte_tel_data_add_dict_int(d, "tx_offloads", 6327 eth_dev->data->dev_conf.txmode.offloads); 6328 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6329 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6330 6331 return 0; 6332 } 6333 6334 int 6335 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6336 struct rte_hairpin_peer_info *cur_info, 6337 struct rte_hairpin_peer_info *peer_info, 6338 uint32_t direction) 6339 { 6340 struct rte_eth_dev *dev; 6341 6342 /* Current queue information is not mandatory. */ 6343 if (peer_info == NULL) 6344 return -EINVAL; 6345 6346 /* No need to check the validity again. */ 6347 dev = &rte_eth_devices[peer_port]; 6348 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6349 -ENOTSUP); 6350 6351 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6352 cur_info, peer_info, direction); 6353 } 6354 6355 int 6356 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6357 struct rte_hairpin_peer_info *peer_info, 6358 uint32_t direction) 6359 { 6360 struct rte_eth_dev *dev; 6361 6362 if (peer_info == NULL) 6363 return -EINVAL; 6364 6365 /* No need to check the validity again. */ 6366 dev = &rte_eth_devices[cur_port]; 6367 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6368 -ENOTSUP); 6369 6370 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6371 peer_info, direction); 6372 } 6373 6374 int 6375 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6376 uint32_t direction) 6377 { 6378 struct rte_eth_dev *dev; 6379 6380 /* No need to check the validity again. */ 6381 dev = &rte_eth_devices[cur_port]; 6382 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6383 -ENOTSUP); 6384 6385 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6386 direction); 6387 } 6388 6389 int 6390 rte_eth_representor_info_get(uint16_t port_id, 6391 struct rte_eth_representor_info *info) 6392 { 6393 struct rte_eth_dev *dev; 6394 6395 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6396 dev = &rte_eth_devices[port_id]; 6397 6398 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6399 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6400 } 6401 6402 int 6403 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6404 { 6405 struct rte_eth_dev *dev; 6406 6407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6408 dev = &rte_eth_devices[port_id]; 6409 6410 if (dev->data->dev_configured != 0) { 6411 RTE_ETHDEV_LOG(ERR, 6412 "The port (id=%"PRIu16") is already configured\n", 6413 port_id); 6414 return -EBUSY; 6415 } 6416 6417 if (features == NULL) { 6418 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6419 return -EINVAL; 6420 } 6421 6422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6423 return eth_err(port_id, 6424 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6425 } 6426 6427 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6428 6429 RTE_INIT(ethdev_init_telemetry) 6430 { 6431 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6432 "Returns list of available ethdev ports. Takes no parameters"); 6433 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6434 "Returns the common stats for a port. Parameters: int port_id"); 6435 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6436 "Returns the extended stats for a port. Parameters: int port_id"); 6437 rte_telemetry_register_cmd("/ethdev/link_status", 6438 eth_dev_handle_port_link_status, 6439 "Returns the link status for a port. Parameters: int port_id"); 6440 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6441 "Returns the device info for a port. Parameters: int port_id"); 6442 } 6443