1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove Rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove Tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 105 106 static const struct { 107 uint64_t offload; 108 const char *name; 109 } eth_dev_rx_offload_names[] = { 110 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 111 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 114 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 115 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 118 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 119 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 120 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 121 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 122 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 123 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 124 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 125 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 127 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 128 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 129 }; 130 131 #undef RTE_RX_OFFLOAD_BIT2STR 132 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 133 134 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 135 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 136 137 static const struct { 138 uint64_t offload; 139 const char *name; 140 } eth_dev_tx_offload_names[] = { 141 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 142 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 143 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 144 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 148 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 150 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 155 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 156 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 157 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 158 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 159 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 160 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 161 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 162 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 163 }; 164 165 #undef RTE_TX_OFFLOAD_BIT2STR 166 167 static const struct { 168 uint64_t offload; 169 const char *name; 170 } rte_eth_dev_capa_names[] = { 171 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 172 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 173 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 174 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 175 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 176 }; 177 178 /** 179 * The user application callback description. 180 * 181 * It contains callback address to be registered by user application, 182 * the pointer to the parameters for callback, and the event type. 183 */ 184 struct rte_eth_dev_callback { 185 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 186 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 187 void *cb_arg; /**< Parameter for callback */ 188 void *ret_param; /**< Return parameter */ 189 enum rte_eth_event_type event; /**< Interrupt event type */ 190 uint32_t active; /**< Callback is executing */ 191 }; 192 193 enum { 194 STAT_QMAP_TX = 0, 195 STAT_QMAP_RX 196 }; 197 198 int 199 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 200 { 201 int ret; 202 struct rte_devargs devargs; 203 const char *bus_param_key; 204 char *bus_str = NULL; 205 char *cls_str = NULL; 206 int str_size; 207 208 if (iter == NULL) { 209 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 210 return -EINVAL; 211 } 212 213 if (devargs_str == NULL) { 214 RTE_ETHDEV_LOG(ERR, 215 "Cannot initialize iterator from NULL device description string\n"); 216 return -EINVAL; 217 } 218 219 memset(iter, 0, sizeof(*iter)); 220 memset(&devargs, 0, sizeof(devargs)); 221 222 /* 223 * The devargs string may use various syntaxes: 224 * - 0000:08:00.0,representor=[1-3] 225 * - pci:0000:06:00.0,representor=[0,5] 226 * - class=eth,mac=00:11:22:33:44:55 227 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 228 */ 229 230 /* 231 * Handle pure class filter (i.e. without any bus-level argument), 232 * from future new syntax. 233 * rte_devargs_parse() is not yet supporting the new syntax, 234 * that's why this simple case is temporarily parsed here. 235 */ 236 #define iter_anybus_str "class=eth," 237 if (strncmp(devargs_str, iter_anybus_str, 238 strlen(iter_anybus_str)) == 0) { 239 iter->cls_str = devargs_str + strlen(iter_anybus_str); 240 goto end; 241 } 242 243 /* Split bus, device and parameters. */ 244 ret = rte_devargs_parse(&devargs, devargs_str); 245 if (ret != 0) 246 goto error; 247 248 /* 249 * Assume parameters of old syntax can match only at ethdev level. 250 * Extra parameters will be ignored, thanks to "+" prefix. 251 */ 252 str_size = strlen(devargs.args) + 2; 253 cls_str = malloc(str_size); 254 if (cls_str == NULL) { 255 ret = -ENOMEM; 256 goto error; 257 } 258 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 259 if (ret != str_size - 1) { 260 ret = -EINVAL; 261 goto error; 262 } 263 iter->cls_str = cls_str; 264 265 iter->bus = devargs.bus; 266 if (iter->bus->dev_iterate == NULL) { 267 ret = -ENOTSUP; 268 goto error; 269 } 270 271 /* Convert bus args to new syntax for use with new API dev_iterate. */ 272 if ((strcmp(iter->bus->name, "vdev") == 0) || 273 (strcmp(iter->bus->name, "fslmc") == 0) || 274 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 275 bus_param_key = "name"; 276 } else if (strcmp(iter->bus->name, "pci") == 0) { 277 bus_param_key = "addr"; 278 } else { 279 ret = -ENOTSUP; 280 goto error; 281 } 282 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 283 bus_str = malloc(str_size); 284 if (bus_str == NULL) { 285 ret = -ENOMEM; 286 goto error; 287 } 288 ret = snprintf(bus_str, str_size, "%s=%s", 289 bus_param_key, devargs.name); 290 if (ret != str_size - 1) { 291 ret = -EINVAL; 292 goto error; 293 } 294 iter->bus_str = bus_str; 295 296 end: 297 iter->cls = rte_class_find_by_name("eth"); 298 rte_devargs_reset(&devargs); 299 return 0; 300 301 error: 302 if (ret == -ENOTSUP) 303 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 304 iter->bus->name); 305 rte_devargs_reset(&devargs); 306 free(bus_str); 307 free(cls_str); 308 return ret; 309 } 310 311 uint16_t 312 rte_eth_iterator_next(struct rte_dev_iterator *iter) 313 { 314 if (iter == NULL) { 315 RTE_ETHDEV_LOG(ERR, 316 "Cannot get next device from NULL iterator\n"); 317 return RTE_MAX_ETHPORTS; 318 } 319 320 if (iter->cls == NULL) /* invalid ethdev iterator */ 321 return RTE_MAX_ETHPORTS; 322 323 do { /* loop to try all matching rte_device */ 324 /* If not pure ethdev filter and */ 325 if (iter->bus != NULL && 326 /* not in middle of rte_eth_dev iteration, */ 327 iter->class_device == NULL) { 328 /* get next rte_device to try. */ 329 iter->device = iter->bus->dev_iterate( 330 iter->device, iter->bus_str, iter); 331 if (iter->device == NULL) 332 break; /* no more rte_device candidate */ 333 } 334 /* A device is matching bus part, need to check ethdev part. */ 335 iter->class_device = iter->cls->dev_iterate( 336 iter->class_device, iter->cls_str, iter); 337 if (iter->class_device != NULL) 338 return eth_dev_to_id(iter->class_device); /* match */ 339 } while (iter->bus != NULL); /* need to try next rte_device */ 340 341 /* No more ethdev port to iterate. */ 342 rte_eth_iterator_cleanup(iter); 343 return RTE_MAX_ETHPORTS; 344 } 345 346 void 347 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 348 { 349 if (iter == NULL) { 350 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 351 return; 352 } 353 354 if (iter->bus_str == NULL) 355 return; /* nothing to free in pure class filter */ 356 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 357 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 358 memset(iter, 0, sizeof(*iter)); 359 } 360 361 uint16_t 362 rte_eth_find_next(uint16_t port_id) 363 { 364 while (port_id < RTE_MAX_ETHPORTS && 365 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 366 port_id++; 367 368 if (port_id >= RTE_MAX_ETHPORTS) 369 return RTE_MAX_ETHPORTS; 370 371 return port_id; 372 } 373 374 /* 375 * Macro to iterate over all valid ports for internal usage. 376 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 377 */ 378 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 379 for (port_id = rte_eth_find_next(0); \ 380 port_id < RTE_MAX_ETHPORTS; \ 381 port_id = rte_eth_find_next(port_id + 1)) 382 383 uint16_t 384 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 385 { 386 port_id = rte_eth_find_next(port_id); 387 while (port_id < RTE_MAX_ETHPORTS && 388 rte_eth_devices[port_id].device != parent) 389 port_id = rte_eth_find_next(port_id + 1); 390 391 return port_id; 392 } 393 394 uint16_t 395 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 396 { 397 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 398 return rte_eth_find_next_of(port_id, 399 rte_eth_devices[ref_port_id].device); 400 } 401 402 static void 403 eth_dev_shared_data_prepare(void) 404 { 405 const unsigned flags = 0; 406 const struct rte_memzone *mz; 407 408 rte_spinlock_lock(ð_dev_shared_data_lock); 409 410 if (eth_dev_shared_data == NULL) { 411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 412 /* Allocate port data and ownership shared memory. */ 413 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 414 sizeof(*eth_dev_shared_data), 415 rte_socket_id(), flags); 416 } else 417 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 418 if (mz == NULL) 419 rte_panic("Cannot allocate ethdev shared data\n"); 420 421 eth_dev_shared_data = mz->addr; 422 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 423 eth_dev_shared_data->next_owner_id = 424 RTE_ETH_DEV_NO_OWNER + 1; 425 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 426 memset(eth_dev_shared_data->data, 0, 427 sizeof(eth_dev_shared_data->data)); 428 } 429 } 430 431 rte_spinlock_unlock(ð_dev_shared_data_lock); 432 } 433 434 static bool 435 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 436 { 437 return ethdev->data->name[0] != '\0'; 438 } 439 440 static struct rte_eth_dev * 441 eth_dev_allocated(const char *name) 442 { 443 uint16_t i; 444 445 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 446 447 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 448 if (rte_eth_devices[i].data != NULL && 449 strcmp(rte_eth_devices[i].data->name, name) == 0) 450 return &rte_eth_devices[i]; 451 } 452 return NULL; 453 } 454 455 struct rte_eth_dev * 456 rte_eth_dev_allocated(const char *name) 457 { 458 struct rte_eth_dev *ethdev; 459 460 eth_dev_shared_data_prepare(); 461 462 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 463 464 ethdev = eth_dev_allocated(name); 465 466 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 467 468 return ethdev; 469 } 470 471 static uint16_t 472 eth_dev_find_free_port(void) 473 { 474 uint16_t i; 475 476 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 477 /* Using shared name field to find a free port. */ 478 if (eth_dev_shared_data->data[i].name[0] == '\0') { 479 RTE_ASSERT(rte_eth_devices[i].state == 480 RTE_ETH_DEV_UNUSED); 481 return i; 482 } 483 } 484 return RTE_MAX_ETHPORTS; 485 } 486 487 static struct rte_eth_dev * 488 eth_dev_get(uint16_t port_id) 489 { 490 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 491 492 eth_dev->data = ð_dev_shared_data->data[port_id]; 493 494 return eth_dev; 495 } 496 497 struct rte_eth_dev * 498 rte_eth_dev_allocate(const char *name) 499 { 500 uint16_t port_id; 501 struct rte_eth_dev *eth_dev = NULL; 502 size_t name_len; 503 504 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 505 if (name_len == 0) { 506 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 507 return NULL; 508 } 509 510 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 511 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 512 return NULL; 513 } 514 515 eth_dev_shared_data_prepare(); 516 517 /* Synchronize port creation between primary and secondary threads. */ 518 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 519 520 if (eth_dev_allocated(name) != NULL) { 521 RTE_ETHDEV_LOG(ERR, 522 "Ethernet device with name %s already allocated\n", 523 name); 524 goto unlock; 525 } 526 527 port_id = eth_dev_find_free_port(); 528 if (port_id == RTE_MAX_ETHPORTS) { 529 RTE_ETHDEV_LOG(ERR, 530 "Reached maximum number of Ethernet ports\n"); 531 goto unlock; 532 } 533 534 eth_dev = eth_dev_get(port_id); 535 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 536 eth_dev->data->port_id = port_id; 537 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 538 eth_dev->data->mtu = RTE_ETHER_MTU; 539 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 540 541 unlock: 542 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 543 544 return eth_dev; 545 } 546 547 /* 548 * Attach to a port already registered by the primary process, which 549 * makes sure that the same device would have the same port ID both 550 * in the primary and secondary process. 551 */ 552 struct rte_eth_dev * 553 rte_eth_dev_attach_secondary(const char *name) 554 { 555 uint16_t i; 556 struct rte_eth_dev *eth_dev = NULL; 557 558 eth_dev_shared_data_prepare(); 559 560 /* Synchronize port attachment to primary port creation and release. */ 561 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 562 563 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 564 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 565 break; 566 } 567 if (i == RTE_MAX_ETHPORTS) { 568 RTE_ETHDEV_LOG(ERR, 569 "Device %s is not driven by the primary process\n", 570 name); 571 } else { 572 eth_dev = eth_dev_get(i); 573 RTE_ASSERT(eth_dev->data->port_id == i); 574 } 575 576 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 577 return eth_dev; 578 } 579 580 int 581 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 582 { 583 if (eth_dev == NULL) 584 return -EINVAL; 585 586 eth_dev_shared_data_prepare(); 587 588 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 589 rte_eth_dev_callback_process(eth_dev, 590 RTE_ETH_EVENT_DESTROY, NULL); 591 592 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 593 594 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 595 596 eth_dev->state = RTE_ETH_DEV_UNUSED; 597 eth_dev->device = NULL; 598 eth_dev->process_private = NULL; 599 eth_dev->intr_handle = NULL; 600 eth_dev->rx_pkt_burst = NULL; 601 eth_dev->tx_pkt_burst = NULL; 602 eth_dev->tx_pkt_prepare = NULL; 603 eth_dev->rx_queue_count = NULL; 604 eth_dev->rx_descriptor_status = NULL; 605 eth_dev->tx_descriptor_status = NULL; 606 eth_dev->dev_ops = NULL; 607 608 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 609 rte_free(eth_dev->data->rx_queues); 610 rte_free(eth_dev->data->tx_queues); 611 rte_free(eth_dev->data->mac_addrs); 612 rte_free(eth_dev->data->hash_mac_addrs); 613 rte_free(eth_dev->data->dev_private); 614 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 615 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 616 } 617 618 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 619 620 return 0; 621 } 622 623 int 624 rte_eth_dev_is_valid_port(uint16_t port_id) 625 { 626 if (port_id >= RTE_MAX_ETHPORTS || 627 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 628 return 0; 629 else 630 return 1; 631 } 632 633 static int 634 eth_is_valid_owner_id(uint64_t owner_id) 635 { 636 if (owner_id == RTE_ETH_DEV_NO_OWNER || 637 eth_dev_shared_data->next_owner_id <= owner_id) 638 return 0; 639 return 1; 640 } 641 642 uint64_t 643 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 644 { 645 port_id = rte_eth_find_next(port_id); 646 while (port_id < RTE_MAX_ETHPORTS && 647 rte_eth_devices[port_id].data->owner.id != owner_id) 648 port_id = rte_eth_find_next(port_id + 1); 649 650 return port_id; 651 } 652 653 int 654 rte_eth_dev_owner_new(uint64_t *owner_id) 655 { 656 if (owner_id == NULL) { 657 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 658 return -EINVAL; 659 } 660 661 eth_dev_shared_data_prepare(); 662 663 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 664 665 *owner_id = eth_dev_shared_data->next_owner_id++; 666 667 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 668 return 0; 669 } 670 671 static int 672 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 673 const struct rte_eth_dev_owner *new_owner) 674 { 675 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 676 struct rte_eth_dev_owner *port_owner; 677 678 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 679 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 680 port_id); 681 return -ENODEV; 682 } 683 684 if (new_owner == NULL) { 685 RTE_ETHDEV_LOG(ERR, 686 "Cannot set ethdev port %u owner from NULL owner\n", 687 port_id); 688 return -EINVAL; 689 } 690 691 if (!eth_is_valid_owner_id(new_owner->id) && 692 !eth_is_valid_owner_id(old_owner_id)) { 693 RTE_ETHDEV_LOG(ERR, 694 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 695 old_owner_id, new_owner->id); 696 return -EINVAL; 697 } 698 699 port_owner = &rte_eth_devices[port_id].data->owner; 700 if (port_owner->id != old_owner_id) { 701 RTE_ETHDEV_LOG(ERR, 702 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 703 port_id, port_owner->name, port_owner->id); 704 return -EPERM; 705 } 706 707 /* can not truncate (same structure) */ 708 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 709 710 port_owner->id = new_owner->id; 711 712 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 713 port_id, new_owner->name, new_owner->id); 714 715 return 0; 716 } 717 718 int 719 rte_eth_dev_owner_set(const uint16_t port_id, 720 const struct rte_eth_dev_owner *owner) 721 { 722 int ret; 723 724 eth_dev_shared_data_prepare(); 725 726 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 727 728 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 729 730 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 731 return ret; 732 } 733 734 int 735 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 736 { 737 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 738 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 739 int ret; 740 741 eth_dev_shared_data_prepare(); 742 743 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 744 745 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 746 747 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 748 return ret; 749 } 750 751 int 752 rte_eth_dev_owner_delete(const uint64_t owner_id) 753 { 754 uint16_t port_id; 755 int ret = 0; 756 757 eth_dev_shared_data_prepare(); 758 759 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 760 761 if (eth_is_valid_owner_id(owner_id)) { 762 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 763 struct rte_eth_dev_data *data = 764 rte_eth_devices[port_id].data; 765 if (data != NULL && data->owner.id == owner_id) 766 memset(&data->owner, 0, 767 sizeof(struct rte_eth_dev_owner)); 768 } 769 RTE_ETHDEV_LOG(NOTICE, 770 "All port owners owned by %016"PRIx64" identifier have removed\n", 771 owner_id); 772 } else { 773 RTE_ETHDEV_LOG(ERR, 774 "Invalid owner ID=%016"PRIx64"\n", 775 owner_id); 776 ret = -EINVAL; 777 } 778 779 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 780 781 return ret; 782 } 783 784 int 785 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 786 { 787 struct rte_eth_dev *ethdev; 788 789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 790 ethdev = &rte_eth_devices[port_id]; 791 792 if (!eth_dev_is_allocated(ethdev)) { 793 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 794 port_id); 795 return -ENODEV; 796 } 797 798 if (owner == NULL) { 799 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 800 port_id); 801 return -EINVAL; 802 } 803 804 eth_dev_shared_data_prepare(); 805 806 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 807 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 808 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 809 810 return 0; 811 } 812 813 int 814 rte_eth_dev_socket_id(uint16_t port_id) 815 { 816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 817 return rte_eth_devices[port_id].data->numa_node; 818 } 819 820 void * 821 rte_eth_dev_get_sec_ctx(uint16_t port_id) 822 { 823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 824 return rte_eth_devices[port_id].security_ctx; 825 } 826 827 uint16_t 828 rte_eth_dev_count_avail(void) 829 { 830 uint16_t p; 831 uint16_t count; 832 833 count = 0; 834 835 RTE_ETH_FOREACH_DEV(p) 836 count++; 837 838 return count; 839 } 840 841 uint16_t 842 rte_eth_dev_count_total(void) 843 { 844 uint16_t port, count = 0; 845 846 RTE_ETH_FOREACH_VALID_DEV(port) 847 count++; 848 849 return count; 850 } 851 852 int 853 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 854 { 855 char *tmp; 856 857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 858 859 if (name == NULL) { 860 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 861 port_id); 862 return -EINVAL; 863 } 864 865 /* shouldn't check 'rte_eth_devices[i].data', 866 * because it might be overwritten by VDEV PMD */ 867 tmp = eth_dev_shared_data->data[port_id].name; 868 strcpy(name, tmp); 869 return 0; 870 } 871 872 int 873 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 874 { 875 uint16_t pid; 876 877 if (name == NULL) { 878 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 879 return -EINVAL; 880 } 881 882 if (port_id == NULL) { 883 RTE_ETHDEV_LOG(ERR, 884 "Cannot get port ID to NULL for %s\n", name); 885 return -EINVAL; 886 } 887 888 RTE_ETH_FOREACH_VALID_DEV(pid) 889 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 890 *port_id = pid; 891 return 0; 892 } 893 894 return -ENODEV; 895 } 896 897 struct rte_eth_dev * 898 rte_eth_dev_get_by_name(const char *name) 899 { 900 uint16_t pid; 901 902 if (rte_eth_dev_get_port_by_name(name, &pid)) 903 return NULL; 904 905 return &rte_eth_devices[pid]; 906 } 907 908 static int 909 eth_err(uint16_t port_id, int ret) 910 { 911 if (ret == 0) 912 return 0; 913 if (rte_eth_dev_is_removed(port_id)) 914 return -EIO; 915 return ret; 916 } 917 918 static void 919 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 920 { 921 void **rxq = dev->data->rx_queues; 922 923 if (rxq[qid] == NULL) 924 return; 925 926 if (dev->dev_ops->rx_queue_release != NULL) 927 (*dev->dev_ops->rx_queue_release)(dev, qid); 928 rxq[qid] = NULL; 929 } 930 931 static void 932 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 933 { 934 void **txq = dev->data->tx_queues; 935 936 if (txq[qid] == NULL) 937 return; 938 939 if (dev->dev_ops->tx_queue_release != NULL) 940 (*dev->dev_ops->tx_queue_release)(dev, qid); 941 txq[qid] = NULL; 942 } 943 944 static int 945 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 946 { 947 uint16_t old_nb_queues = dev->data->nb_rx_queues; 948 unsigned i; 949 950 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 951 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 952 sizeof(dev->data->rx_queues[0]) * 953 RTE_MAX_QUEUES_PER_PORT, 954 RTE_CACHE_LINE_SIZE); 955 if (dev->data->rx_queues == NULL) { 956 dev->data->nb_rx_queues = 0; 957 return -(ENOMEM); 958 } 959 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 960 for (i = nb_queues; i < old_nb_queues; i++) 961 eth_dev_rxq_release(dev, i); 962 963 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 964 for (i = nb_queues; i < old_nb_queues; i++) 965 eth_dev_rxq_release(dev, i); 966 967 rte_free(dev->data->rx_queues); 968 dev->data->rx_queues = NULL; 969 } 970 dev->data->nb_rx_queues = nb_queues; 971 return 0; 972 } 973 974 static int 975 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 976 { 977 uint16_t port_id; 978 979 if (rx_queue_id >= dev->data->nb_rx_queues) { 980 port_id = dev->data->port_id; 981 RTE_ETHDEV_LOG(ERR, 982 "Invalid Rx queue_id=%u of device with port_id=%u\n", 983 rx_queue_id, port_id); 984 return -EINVAL; 985 } 986 987 if (dev->data->rx_queues[rx_queue_id] == NULL) { 988 port_id = dev->data->port_id; 989 RTE_ETHDEV_LOG(ERR, 990 "Queue %u of device with port_id=%u has not been setup\n", 991 rx_queue_id, port_id); 992 return -EINVAL; 993 } 994 995 return 0; 996 } 997 998 static int 999 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 1000 { 1001 uint16_t port_id; 1002 1003 if (tx_queue_id >= dev->data->nb_tx_queues) { 1004 port_id = dev->data->port_id; 1005 RTE_ETHDEV_LOG(ERR, 1006 "Invalid Tx queue_id=%u of device with port_id=%u\n", 1007 tx_queue_id, port_id); 1008 return -EINVAL; 1009 } 1010 1011 if (dev->data->tx_queues[tx_queue_id] == NULL) { 1012 port_id = dev->data->port_id; 1013 RTE_ETHDEV_LOG(ERR, 1014 "Queue %u of device with port_id=%u has not been setup\n", 1015 tx_queue_id, port_id); 1016 return -EINVAL; 1017 } 1018 1019 return 0; 1020 } 1021 1022 int 1023 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1024 { 1025 struct rte_eth_dev *dev; 1026 int ret; 1027 1028 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1029 dev = &rte_eth_devices[port_id]; 1030 1031 if (!dev->data->dev_started) { 1032 RTE_ETHDEV_LOG(ERR, 1033 "Port %u must be started before start any queue\n", 1034 port_id); 1035 return -EINVAL; 1036 } 1037 1038 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1039 if (ret != 0) 1040 return ret; 1041 1042 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1043 1044 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1045 RTE_ETHDEV_LOG(INFO, 1046 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1047 rx_queue_id, port_id); 1048 return -EINVAL; 1049 } 1050 1051 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1052 RTE_ETHDEV_LOG(INFO, 1053 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1054 rx_queue_id, port_id); 1055 return 0; 1056 } 1057 1058 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1059 } 1060 1061 int 1062 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1063 { 1064 struct rte_eth_dev *dev; 1065 int ret; 1066 1067 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1068 dev = &rte_eth_devices[port_id]; 1069 1070 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1071 if (ret != 0) 1072 return ret; 1073 1074 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1075 1076 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1077 RTE_ETHDEV_LOG(INFO, 1078 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1079 rx_queue_id, port_id); 1080 return -EINVAL; 1081 } 1082 1083 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1084 RTE_ETHDEV_LOG(INFO, 1085 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1086 rx_queue_id, port_id); 1087 return 0; 1088 } 1089 1090 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1091 } 1092 1093 int 1094 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1095 { 1096 struct rte_eth_dev *dev; 1097 int ret; 1098 1099 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1100 dev = &rte_eth_devices[port_id]; 1101 1102 if (!dev->data->dev_started) { 1103 RTE_ETHDEV_LOG(ERR, 1104 "Port %u must be started before start any queue\n", 1105 port_id); 1106 return -EINVAL; 1107 } 1108 1109 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1110 if (ret != 0) 1111 return ret; 1112 1113 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1114 1115 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1116 RTE_ETHDEV_LOG(INFO, 1117 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1118 tx_queue_id, port_id); 1119 return -EINVAL; 1120 } 1121 1122 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1123 RTE_ETHDEV_LOG(INFO, 1124 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1125 tx_queue_id, port_id); 1126 return 0; 1127 } 1128 1129 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1130 } 1131 1132 int 1133 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1134 { 1135 struct rte_eth_dev *dev; 1136 int ret; 1137 1138 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1139 dev = &rte_eth_devices[port_id]; 1140 1141 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1142 if (ret != 0) 1143 return ret; 1144 1145 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1146 1147 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1148 RTE_ETHDEV_LOG(INFO, 1149 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1150 tx_queue_id, port_id); 1151 return -EINVAL; 1152 } 1153 1154 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1155 RTE_ETHDEV_LOG(INFO, 1156 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1157 tx_queue_id, port_id); 1158 return 0; 1159 } 1160 1161 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1162 } 1163 1164 static int 1165 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1166 { 1167 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1168 unsigned i; 1169 1170 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1171 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1172 sizeof(dev->data->tx_queues[0]) * 1173 RTE_MAX_QUEUES_PER_PORT, 1174 RTE_CACHE_LINE_SIZE); 1175 if (dev->data->tx_queues == NULL) { 1176 dev->data->nb_tx_queues = 0; 1177 return -(ENOMEM); 1178 } 1179 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1180 for (i = nb_queues; i < old_nb_queues; i++) 1181 eth_dev_txq_release(dev, i); 1182 1183 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1184 for (i = nb_queues; i < old_nb_queues; i++) 1185 eth_dev_txq_release(dev, i); 1186 1187 rte_free(dev->data->tx_queues); 1188 dev->data->tx_queues = NULL; 1189 } 1190 dev->data->nb_tx_queues = nb_queues; 1191 return 0; 1192 } 1193 1194 uint32_t 1195 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1196 { 1197 switch (speed) { 1198 case RTE_ETH_SPEED_NUM_10M: 1199 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1200 case RTE_ETH_SPEED_NUM_100M: 1201 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1202 case RTE_ETH_SPEED_NUM_1G: 1203 return RTE_ETH_LINK_SPEED_1G; 1204 case RTE_ETH_SPEED_NUM_2_5G: 1205 return RTE_ETH_LINK_SPEED_2_5G; 1206 case RTE_ETH_SPEED_NUM_5G: 1207 return RTE_ETH_LINK_SPEED_5G; 1208 case RTE_ETH_SPEED_NUM_10G: 1209 return RTE_ETH_LINK_SPEED_10G; 1210 case RTE_ETH_SPEED_NUM_20G: 1211 return RTE_ETH_LINK_SPEED_20G; 1212 case RTE_ETH_SPEED_NUM_25G: 1213 return RTE_ETH_LINK_SPEED_25G; 1214 case RTE_ETH_SPEED_NUM_40G: 1215 return RTE_ETH_LINK_SPEED_40G; 1216 case RTE_ETH_SPEED_NUM_50G: 1217 return RTE_ETH_LINK_SPEED_50G; 1218 case RTE_ETH_SPEED_NUM_56G: 1219 return RTE_ETH_LINK_SPEED_56G; 1220 case RTE_ETH_SPEED_NUM_100G: 1221 return RTE_ETH_LINK_SPEED_100G; 1222 case RTE_ETH_SPEED_NUM_200G: 1223 return RTE_ETH_LINK_SPEED_200G; 1224 default: 1225 return 0; 1226 } 1227 } 1228 1229 const char * 1230 rte_eth_dev_rx_offload_name(uint64_t offload) 1231 { 1232 const char *name = "UNKNOWN"; 1233 unsigned int i; 1234 1235 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1236 if (offload == eth_dev_rx_offload_names[i].offload) { 1237 name = eth_dev_rx_offload_names[i].name; 1238 break; 1239 } 1240 } 1241 1242 return name; 1243 } 1244 1245 const char * 1246 rte_eth_dev_tx_offload_name(uint64_t offload) 1247 { 1248 const char *name = "UNKNOWN"; 1249 unsigned int i; 1250 1251 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1252 if (offload == eth_dev_tx_offload_names[i].offload) { 1253 name = eth_dev_tx_offload_names[i].name; 1254 break; 1255 } 1256 } 1257 1258 return name; 1259 } 1260 1261 const char * 1262 rte_eth_dev_capability_name(uint64_t capability) 1263 { 1264 const char *name = "UNKNOWN"; 1265 unsigned int i; 1266 1267 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1268 if (capability == rte_eth_dev_capa_names[i].offload) { 1269 name = rte_eth_dev_capa_names[i].name; 1270 break; 1271 } 1272 } 1273 1274 return name; 1275 } 1276 1277 static inline int 1278 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1279 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1280 { 1281 int ret = 0; 1282 1283 if (dev_info_size == 0) { 1284 if (config_size != max_rx_pkt_len) { 1285 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1286 " %u != %u is not allowed\n", 1287 port_id, config_size, max_rx_pkt_len); 1288 ret = -EINVAL; 1289 } 1290 } else if (config_size > dev_info_size) { 1291 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1292 "> max allowed value %u\n", port_id, config_size, 1293 dev_info_size); 1294 ret = -EINVAL; 1295 } else if (config_size < RTE_ETHER_MIN_LEN) { 1296 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1297 "< min allowed value %u\n", port_id, config_size, 1298 (unsigned int)RTE_ETHER_MIN_LEN); 1299 ret = -EINVAL; 1300 } 1301 return ret; 1302 } 1303 1304 /* 1305 * Validate offloads that are requested through rte_eth_dev_configure against 1306 * the offloads successfully set by the Ethernet device. 1307 * 1308 * @param port_id 1309 * The port identifier of the Ethernet device. 1310 * @param req_offloads 1311 * The offloads that have been requested through `rte_eth_dev_configure`. 1312 * @param set_offloads 1313 * The offloads successfully set by the Ethernet device. 1314 * @param offload_type 1315 * The offload type i.e. Rx/Tx string. 1316 * @param offload_name 1317 * The function that prints the offload name. 1318 * @return 1319 * - (0) if validation successful. 1320 * - (-EINVAL) if requested offload has been silently disabled. 1321 * 1322 */ 1323 static int 1324 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1325 uint64_t set_offloads, const char *offload_type, 1326 const char *(*offload_name)(uint64_t)) 1327 { 1328 uint64_t offloads_diff = req_offloads ^ set_offloads; 1329 uint64_t offload; 1330 int ret = 0; 1331 1332 while (offloads_diff != 0) { 1333 /* Check if any offload is requested but not enabled. */ 1334 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1335 if (offload & req_offloads) { 1336 RTE_ETHDEV_LOG(ERR, 1337 "Port %u failed to enable %s offload %s\n", 1338 port_id, offload_type, offload_name(offload)); 1339 ret = -EINVAL; 1340 } 1341 1342 /* Check if offload couldn't be disabled. */ 1343 if (offload & set_offloads) { 1344 RTE_ETHDEV_LOG(DEBUG, 1345 "Port %u %s offload %s is not requested but enabled\n", 1346 port_id, offload_type, offload_name(offload)); 1347 } 1348 1349 offloads_diff &= ~offload; 1350 } 1351 1352 return ret; 1353 } 1354 1355 static uint32_t 1356 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1357 { 1358 uint32_t overhead_len; 1359 1360 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1361 overhead_len = max_rx_pktlen - max_mtu; 1362 else 1363 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1364 1365 return overhead_len; 1366 } 1367 1368 /* rte_eth_dev_info_get() should be called prior to this function */ 1369 static int 1370 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1371 uint16_t mtu) 1372 { 1373 uint32_t overhead_len; 1374 uint32_t frame_size; 1375 1376 if (mtu < dev_info->min_mtu) { 1377 RTE_ETHDEV_LOG(ERR, 1378 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1379 mtu, dev_info->min_mtu, port_id); 1380 return -EINVAL; 1381 } 1382 if (mtu > dev_info->max_mtu) { 1383 RTE_ETHDEV_LOG(ERR, 1384 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1385 mtu, dev_info->max_mtu, port_id); 1386 return -EINVAL; 1387 } 1388 1389 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1390 dev_info->max_mtu); 1391 frame_size = mtu + overhead_len; 1392 if (frame_size < RTE_ETHER_MIN_LEN) { 1393 RTE_ETHDEV_LOG(ERR, 1394 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1395 frame_size, RTE_ETHER_MIN_LEN, port_id); 1396 return -EINVAL; 1397 } 1398 1399 if (frame_size > dev_info->max_rx_pktlen) { 1400 RTE_ETHDEV_LOG(ERR, 1401 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1402 frame_size, dev_info->max_rx_pktlen, port_id); 1403 return -EINVAL; 1404 } 1405 1406 return 0; 1407 } 1408 1409 int 1410 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1411 const struct rte_eth_conf *dev_conf) 1412 { 1413 struct rte_eth_dev *dev; 1414 struct rte_eth_dev_info dev_info; 1415 struct rte_eth_conf orig_conf; 1416 int diag; 1417 int ret; 1418 uint16_t old_mtu; 1419 1420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1421 dev = &rte_eth_devices[port_id]; 1422 1423 if (dev_conf == NULL) { 1424 RTE_ETHDEV_LOG(ERR, 1425 "Cannot configure ethdev port %u from NULL config\n", 1426 port_id); 1427 return -EINVAL; 1428 } 1429 1430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1431 1432 if (dev->data->dev_started) { 1433 RTE_ETHDEV_LOG(ERR, 1434 "Port %u must be stopped to allow configuration\n", 1435 port_id); 1436 return -EBUSY; 1437 } 1438 1439 /* 1440 * Ensure that "dev_configured" is always 0 each time prepare to do 1441 * dev_configure() to avoid any non-anticipated behaviour. 1442 * And set to 1 when dev_configure() is executed successfully. 1443 */ 1444 dev->data->dev_configured = 0; 1445 1446 /* Store original config, as rollback required on failure */ 1447 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1448 1449 /* 1450 * Copy the dev_conf parameter into the dev structure. 1451 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1452 */ 1453 if (dev_conf != &dev->data->dev_conf) 1454 memcpy(&dev->data->dev_conf, dev_conf, 1455 sizeof(dev->data->dev_conf)); 1456 1457 /* Backup mtu for rollback */ 1458 old_mtu = dev->data->mtu; 1459 1460 ret = rte_eth_dev_info_get(port_id, &dev_info); 1461 if (ret != 0) 1462 goto rollback; 1463 1464 /* If number of queues specified by application for both Rx and Tx is 1465 * zero, use driver preferred values. This cannot be done individually 1466 * as it is valid for either Tx or Rx (but not both) to be zero. 1467 * If driver does not provide any preferred valued, fall back on 1468 * EAL defaults. 1469 */ 1470 if (nb_rx_q == 0 && nb_tx_q == 0) { 1471 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1472 if (nb_rx_q == 0) 1473 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1474 nb_tx_q = dev_info.default_txportconf.nb_queues; 1475 if (nb_tx_q == 0) 1476 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1477 } 1478 1479 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1480 RTE_ETHDEV_LOG(ERR, 1481 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1482 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1483 ret = -EINVAL; 1484 goto rollback; 1485 } 1486 1487 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1488 RTE_ETHDEV_LOG(ERR, 1489 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1490 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1491 ret = -EINVAL; 1492 goto rollback; 1493 } 1494 1495 /* 1496 * Check that the numbers of Rx and Tx queues are not greater 1497 * than the maximum number of Rx and Tx queues supported by the 1498 * configured device. 1499 */ 1500 if (nb_rx_q > dev_info.max_rx_queues) { 1501 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1502 port_id, nb_rx_q, dev_info.max_rx_queues); 1503 ret = -EINVAL; 1504 goto rollback; 1505 } 1506 1507 if (nb_tx_q > dev_info.max_tx_queues) { 1508 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1509 port_id, nb_tx_q, dev_info.max_tx_queues); 1510 ret = -EINVAL; 1511 goto rollback; 1512 } 1513 1514 /* Check that the device supports requested interrupts */ 1515 if ((dev_conf->intr_conf.lsc == 1) && 1516 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1517 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1518 dev->device->driver->name); 1519 ret = -EINVAL; 1520 goto rollback; 1521 } 1522 if ((dev_conf->intr_conf.rmv == 1) && 1523 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1524 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1525 dev->device->driver->name); 1526 ret = -EINVAL; 1527 goto rollback; 1528 } 1529 1530 if (dev_conf->rxmode.mtu == 0) 1531 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1532 1533 ret = eth_dev_validate_mtu(port_id, &dev_info, 1534 dev->data->dev_conf.rxmode.mtu); 1535 if (ret != 0) 1536 goto rollback; 1537 1538 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1539 1540 /* 1541 * If LRO is enabled, check that the maximum aggregated packet 1542 * size is supported by the configured device. 1543 */ 1544 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1545 uint32_t max_rx_pktlen; 1546 uint32_t overhead_len; 1547 1548 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1549 dev_info.max_mtu); 1550 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1551 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1552 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1553 ret = eth_dev_check_lro_pkt_size(port_id, 1554 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1555 max_rx_pktlen, 1556 dev_info.max_lro_pkt_size); 1557 if (ret != 0) 1558 goto rollback; 1559 } 1560 1561 /* Any requested offloading must be within its device capabilities */ 1562 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1563 dev_conf->rxmode.offloads) { 1564 RTE_ETHDEV_LOG(ERR, 1565 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1566 "capabilities 0x%"PRIx64" in %s()\n", 1567 port_id, dev_conf->rxmode.offloads, 1568 dev_info.rx_offload_capa, 1569 __func__); 1570 ret = -EINVAL; 1571 goto rollback; 1572 } 1573 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1574 dev_conf->txmode.offloads) { 1575 RTE_ETHDEV_LOG(ERR, 1576 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1577 "capabilities 0x%"PRIx64" in %s()\n", 1578 port_id, dev_conf->txmode.offloads, 1579 dev_info.tx_offload_capa, 1580 __func__); 1581 ret = -EINVAL; 1582 goto rollback; 1583 } 1584 1585 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1586 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1587 1588 /* Check that device supports requested rss hash functions. */ 1589 if ((dev_info.flow_type_rss_offloads | 1590 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1591 dev_info.flow_type_rss_offloads) { 1592 RTE_ETHDEV_LOG(ERR, 1593 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1594 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1595 dev_info.flow_type_rss_offloads); 1596 ret = -EINVAL; 1597 goto rollback; 1598 } 1599 1600 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1601 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1602 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1603 RTE_ETHDEV_LOG(ERR, 1604 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1605 port_id, 1606 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1607 ret = -EINVAL; 1608 goto rollback; 1609 } 1610 1611 /* 1612 * Setup new number of Rx/Tx queues and reconfigure device. 1613 */ 1614 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1615 if (diag != 0) { 1616 RTE_ETHDEV_LOG(ERR, 1617 "Port%u eth_dev_rx_queue_config = %d\n", 1618 port_id, diag); 1619 ret = diag; 1620 goto rollback; 1621 } 1622 1623 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1624 if (diag != 0) { 1625 RTE_ETHDEV_LOG(ERR, 1626 "Port%u eth_dev_tx_queue_config = %d\n", 1627 port_id, diag); 1628 eth_dev_rx_queue_config(dev, 0); 1629 ret = diag; 1630 goto rollback; 1631 } 1632 1633 diag = (*dev->dev_ops->dev_configure)(dev); 1634 if (diag != 0) { 1635 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1636 port_id, diag); 1637 ret = eth_err(port_id, diag); 1638 goto reset_queues; 1639 } 1640 1641 /* Initialize Rx profiling if enabled at compilation time. */ 1642 diag = __rte_eth_dev_profile_init(port_id, dev); 1643 if (diag != 0) { 1644 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1645 port_id, diag); 1646 ret = eth_err(port_id, diag); 1647 goto reset_queues; 1648 } 1649 1650 /* Validate Rx offloads. */ 1651 diag = eth_dev_validate_offloads(port_id, 1652 dev_conf->rxmode.offloads, 1653 dev->data->dev_conf.rxmode.offloads, "Rx", 1654 rte_eth_dev_rx_offload_name); 1655 if (diag != 0) { 1656 ret = diag; 1657 goto reset_queues; 1658 } 1659 1660 /* Validate Tx offloads. */ 1661 diag = eth_dev_validate_offloads(port_id, 1662 dev_conf->txmode.offloads, 1663 dev->data->dev_conf.txmode.offloads, "Tx", 1664 rte_eth_dev_tx_offload_name); 1665 if (diag != 0) { 1666 ret = diag; 1667 goto reset_queues; 1668 } 1669 1670 dev->data->dev_configured = 1; 1671 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1672 return 0; 1673 reset_queues: 1674 eth_dev_rx_queue_config(dev, 0); 1675 eth_dev_tx_queue_config(dev, 0); 1676 rollback: 1677 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1678 if (old_mtu != dev->data->mtu) 1679 dev->data->mtu = old_mtu; 1680 1681 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1682 return ret; 1683 } 1684 1685 void 1686 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1687 { 1688 if (dev->data->dev_started) { 1689 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1690 dev->data->port_id); 1691 return; 1692 } 1693 1694 eth_dev_rx_queue_config(dev, 0); 1695 eth_dev_tx_queue_config(dev, 0); 1696 1697 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1698 } 1699 1700 static void 1701 eth_dev_mac_restore(struct rte_eth_dev *dev, 1702 struct rte_eth_dev_info *dev_info) 1703 { 1704 struct rte_ether_addr *addr; 1705 uint16_t i; 1706 uint32_t pool = 0; 1707 uint64_t pool_mask; 1708 1709 /* replay MAC address configuration including default MAC */ 1710 addr = &dev->data->mac_addrs[0]; 1711 if (*dev->dev_ops->mac_addr_set != NULL) 1712 (*dev->dev_ops->mac_addr_set)(dev, addr); 1713 else if (*dev->dev_ops->mac_addr_add != NULL) 1714 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1715 1716 if (*dev->dev_ops->mac_addr_add != NULL) { 1717 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1718 addr = &dev->data->mac_addrs[i]; 1719 1720 /* skip zero address */ 1721 if (rte_is_zero_ether_addr(addr)) 1722 continue; 1723 1724 pool = 0; 1725 pool_mask = dev->data->mac_pool_sel[i]; 1726 1727 do { 1728 if (pool_mask & UINT64_C(1)) 1729 (*dev->dev_ops->mac_addr_add)(dev, 1730 addr, i, pool); 1731 pool_mask >>= 1; 1732 pool++; 1733 } while (pool_mask); 1734 } 1735 } 1736 } 1737 1738 static int 1739 eth_dev_config_restore(struct rte_eth_dev *dev, 1740 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1741 { 1742 int ret; 1743 1744 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1745 eth_dev_mac_restore(dev, dev_info); 1746 1747 /* replay promiscuous configuration */ 1748 /* 1749 * use callbacks directly since we don't need port_id check and 1750 * would like to bypass the same value set 1751 */ 1752 if (rte_eth_promiscuous_get(port_id) == 1 && 1753 *dev->dev_ops->promiscuous_enable != NULL) { 1754 ret = eth_err(port_id, 1755 (*dev->dev_ops->promiscuous_enable)(dev)); 1756 if (ret != 0 && ret != -ENOTSUP) { 1757 RTE_ETHDEV_LOG(ERR, 1758 "Failed to enable promiscuous mode for device (port %u): %s\n", 1759 port_id, rte_strerror(-ret)); 1760 return ret; 1761 } 1762 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1763 *dev->dev_ops->promiscuous_disable != NULL) { 1764 ret = eth_err(port_id, 1765 (*dev->dev_ops->promiscuous_disable)(dev)); 1766 if (ret != 0 && ret != -ENOTSUP) { 1767 RTE_ETHDEV_LOG(ERR, 1768 "Failed to disable promiscuous mode for device (port %u): %s\n", 1769 port_id, rte_strerror(-ret)); 1770 return ret; 1771 } 1772 } 1773 1774 /* replay all multicast configuration */ 1775 /* 1776 * use callbacks directly since we don't need port_id check and 1777 * would like to bypass the same value set 1778 */ 1779 if (rte_eth_allmulticast_get(port_id) == 1 && 1780 *dev->dev_ops->allmulticast_enable != NULL) { 1781 ret = eth_err(port_id, 1782 (*dev->dev_ops->allmulticast_enable)(dev)); 1783 if (ret != 0 && ret != -ENOTSUP) { 1784 RTE_ETHDEV_LOG(ERR, 1785 "Failed to enable allmulticast mode for device (port %u): %s\n", 1786 port_id, rte_strerror(-ret)); 1787 return ret; 1788 } 1789 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1790 *dev->dev_ops->allmulticast_disable != NULL) { 1791 ret = eth_err(port_id, 1792 (*dev->dev_ops->allmulticast_disable)(dev)); 1793 if (ret != 0 && ret != -ENOTSUP) { 1794 RTE_ETHDEV_LOG(ERR, 1795 "Failed to disable allmulticast mode for device (port %u): %s\n", 1796 port_id, rte_strerror(-ret)); 1797 return ret; 1798 } 1799 } 1800 1801 return 0; 1802 } 1803 1804 int 1805 rte_eth_dev_start(uint16_t port_id) 1806 { 1807 struct rte_eth_dev *dev; 1808 struct rte_eth_dev_info dev_info; 1809 int diag; 1810 int ret, ret_stop; 1811 1812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1813 dev = &rte_eth_devices[port_id]; 1814 1815 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1816 1817 if (dev->data->dev_configured == 0) { 1818 RTE_ETHDEV_LOG(INFO, 1819 "Device with port_id=%"PRIu16" is not configured.\n", 1820 port_id); 1821 return -EINVAL; 1822 } 1823 1824 if (dev->data->dev_started != 0) { 1825 RTE_ETHDEV_LOG(INFO, 1826 "Device with port_id=%"PRIu16" already started\n", 1827 port_id); 1828 return 0; 1829 } 1830 1831 ret = rte_eth_dev_info_get(port_id, &dev_info); 1832 if (ret != 0) 1833 return ret; 1834 1835 /* Lets restore MAC now if device does not support live change */ 1836 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1837 eth_dev_mac_restore(dev, &dev_info); 1838 1839 diag = (*dev->dev_ops->dev_start)(dev); 1840 if (diag == 0) 1841 dev->data->dev_started = 1; 1842 else 1843 return eth_err(port_id, diag); 1844 1845 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1846 if (ret != 0) { 1847 RTE_ETHDEV_LOG(ERR, 1848 "Error during restoring configuration for device (port %u): %s\n", 1849 port_id, rte_strerror(-ret)); 1850 ret_stop = rte_eth_dev_stop(port_id); 1851 if (ret_stop != 0) { 1852 RTE_ETHDEV_LOG(ERR, 1853 "Failed to stop device (port %u): %s\n", 1854 port_id, rte_strerror(-ret_stop)); 1855 } 1856 1857 return ret; 1858 } 1859 1860 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1861 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1862 (*dev->dev_ops->link_update)(dev, 0); 1863 } 1864 1865 /* expose selection of PMD fast-path functions */ 1866 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1867 1868 rte_ethdev_trace_start(port_id); 1869 return 0; 1870 } 1871 1872 int 1873 rte_eth_dev_stop(uint16_t port_id) 1874 { 1875 struct rte_eth_dev *dev; 1876 int ret; 1877 1878 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1879 dev = &rte_eth_devices[port_id]; 1880 1881 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1882 1883 if (dev->data->dev_started == 0) { 1884 RTE_ETHDEV_LOG(INFO, 1885 "Device with port_id=%"PRIu16" already stopped\n", 1886 port_id); 1887 return 0; 1888 } 1889 1890 /* point fast-path functions to dummy ones */ 1891 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1892 1893 dev->data->dev_started = 0; 1894 ret = (*dev->dev_ops->dev_stop)(dev); 1895 rte_ethdev_trace_stop(port_id, ret); 1896 1897 return ret; 1898 } 1899 1900 int 1901 rte_eth_dev_set_link_up(uint16_t port_id) 1902 { 1903 struct rte_eth_dev *dev; 1904 1905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1906 dev = &rte_eth_devices[port_id]; 1907 1908 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1909 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1910 } 1911 1912 int 1913 rte_eth_dev_set_link_down(uint16_t port_id) 1914 { 1915 struct rte_eth_dev *dev; 1916 1917 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1918 dev = &rte_eth_devices[port_id]; 1919 1920 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1921 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1922 } 1923 1924 int 1925 rte_eth_dev_close(uint16_t port_id) 1926 { 1927 struct rte_eth_dev *dev; 1928 int firsterr, binerr; 1929 int *lasterr = &firsterr; 1930 1931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1932 dev = &rte_eth_devices[port_id]; 1933 1934 if (dev->data->dev_started) { 1935 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1936 port_id); 1937 return -EINVAL; 1938 } 1939 1940 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1941 *lasterr = (*dev->dev_ops->dev_close)(dev); 1942 if (*lasterr != 0) 1943 lasterr = &binerr; 1944 1945 rte_ethdev_trace_close(port_id); 1946 *lasterr = rte_eth_dev_release_port(dev); 1947 1948 return firsterr; 1949 } 1950 1951 int 1952 rte_eth_dev_reset(uint16_t port_id) 1953 { 1954 struct rte_eth_dev *dev; 1955 int ret; 1956 1957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1958 dev = &rte_eth_devices[port_id]; 1959 1960 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1961 1962 ret = rte_eth_dev_stop(port_id); 1963 if (ret != 0) { 1964 RTE_ETHDEV_LOG(ERR, 1965 "Failed to stop device (port %u) before reset: %s - ignore\n", 1966 port_id, rte_strerror(-ret)); 1967 } 1968 ret = dev->dev_ops->dev_reset(dev); 1969 1970 return eth_err(port_id, ret); 1971 } 1972 1973 int 1974 rte_eth_dev_is_removed(uint16_t port_id) 1975 { 1976 struct rte_eth_dev *dev; 1977 int ret; 1978 1979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1980 dev = &rte_eth_devices[port_id]; 1981 1982 if (dev->state == RTE_ETH_DEV_REMOVED) 1983 return 1; 1984 1985 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1986 1987 ret = dev->dev_ops->is_removed(dev); 1988 if (ret != 0) 1989 /* Device is physically removed. */ 1990 dev->state = RTE_ETH_DEV_REMOVED; 1991 1992 return ret; 1993 } 1994 1995 static int 1996 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1997 uint16_t n_seg, uint32_t *mbp_buf_size, 1998 const struct rte_eth_dev_info *dev_info) 1999 { 2000 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 2001 struct rte_mempool *mp_first; 2002 uint32_t offset_mask; 2003 uint16_t seg_idx; 2004 2005 if (n_seg > seg_capa->max_nseg) { 2006 RTE_ETHDEV_LOG(ERR, 2007 "Requested Rx segments %u exceed supported %u\n", 2008 n_seg, seg_capa->max_nseg); 2009 return -EINVAL; 2010 } 2011 /* 2012 * Check the sizes and offsets against buffer sizes 2013 * for each segment specified in extended configuration. 2014 */ 2015 mp_first = rx_seg[0].mp; 2016 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2017 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2018 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2019 uint32_t length = rx_seg[seg_idx].length; 2020 uint32_t offset = rx_seg[seg_idx].offset; 2021 2022 if (mpl == NULL) { 2023 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 2024 return -EINVAL; 2025 } 2026 if (seg_idx != 0 && mp_first != mpl && 2027 seg_capa->multi_pools == 0) { 2028 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 2029 return -ENOTSUP; 2030 } 2031 if (offset != 0) { 2032 if (seg_capa->offset_allowed == 0) { 2033 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 2034 return -ENOTSUP; 2035 } 2036 if (offset & offset_mask) { 2037 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2038 offset, 2039 seg_capa->offset_align_log2); 2040 return -EINVAL; 2041 } 2042 } 2043 if (mpl->private_data_size < 2044 sizeof(struct rte_pktmbuf_pool_private)) { 2045 RTE_ETHDEV_LOG(ERR, 2046 "%s private_data_size %u < %u\n", 2047 mpl->name, mpl->private_data_size, 2048 (unsigned int)sizeof 2049 (struct rte_pktmbuf_pool_private)); 2050 return -ENOSPC; 2051 } 2052 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2053 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2054 length = length != 0 ? length : *mbp_buf_size; 2055 if (*mbp_buf_size < length + offset) { 2056 RTE_ETHDEV_LOG(ERR, 2057 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2058 mpl->name, *mbp_buf_size, 2059 length + offset, length, offset); 2060 return -EINVAL; 2061 } 2062 } 2063 return 0; 2064 } 2065 2066 int 2067 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2068 uint16_t nb_rx_desc, unsigned int socket_id, 2069 const struct rte_eth_rxconf *rx_conf, 2070 struct rte_mempool *mp) 2071 { 2072 int ret; 2073 uint32_t mbp_buf_size; 2074 struct rte_eth_dev *dev; 2075 struct rte_eth_dev_info dev_info; 2076 struct rte_eth_rxconf local_conf; 2077 2078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2079 dev = &rte_eth_devices[port_id]; 2080 2081 if (rx_queue_id >= dev->data->nb_rx_queues) { 2082 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2083 return -EINVAL; 2084 } 2085 2086 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2087 2088 ret = rte_eth_dev_info_get(port_id, &dev_info); 2089 if (ret != 0) 2090 return ret; 2091 2092 if (mp != NULL) { 2093 /* Single pool configuration check. */ 2094 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2095 RTE_ETHDEV_LOG(ERR, 2096 "Ambiguous segment configuration\n"); 2097 return -EINVAL; 2098 } 2099 /* 2100 * Check the size of the mbuf data buffer, this value 2101 * must be provided in the private data of the memory pool. 2102 * First check that the memory pool(s) has a valid private data. 2103 */ 2104 if (mp->private_data_size < 2105 sizeof(struct rte_pktmbuf_pool_private)) { 2106 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2107 mp->name, mp->private_data_size, 2108 (unsigned int) 2109 sizeof(struct rte_pktmbuf_pool_private)); 2110 return -ENOSPC; 2111 } 2112 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2113 if (mbp_buf_size < dev_info.min_rx_bufsize + 2114 RTE_PKTMBUF_HEADROOM) { 2115 RTE_ETHDEV_LOG(ERR, 2116 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2117 mp->name, mbp_buf_size, 2118 RTE_PKTMBUF_HEADROOM + 2119 dev_info.min_rx_bufsize, 2120 RTE_PKTMBUF_HEADROOM, 2121 dev_info.min_rx_bufsize); 2122 return -EINVAL; 2123 } 2124 } else { 2125 const struct rte_eth_rxseg_split *rx_seg; 2126 uint16_t n_seg; 2127 2128 /* Extended multi-segment configuration check. */ 2129 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2130 RTE_ETHDEV_LOG(ERR, 2131 "Memory pool is null and no extended configuration provided\n"); 2132 return -EINVAL; 2133 } 2134 2135 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2136 n_seg = rx_conf->rx_nseg; 2137 2138 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2139 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2140 &mbp_buf_size, 2141 &dev_info); 2142 if (ret != 0) 2143 return ret; 2144 } else { 2145 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2146 return -EINVAL; 2147 } 2148 } 2149 2150 /* Use default specified by driver, if nb_rx_desc is zero */ 2151 if (nb_rx_desc == 0) { 2152 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2153 /* If driver default is also zero, fall back on EAL default */ 2154 if (nb_rx_desc == 0) 2155 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2156 } 2157 2158 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2159 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2160 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2161 2162 RTE_ETHDEV_LOG(ERR, 2163 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2164 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2165 dev_info.rx_desc_lim.nb_min, 2166 dev_info.rx_desc_lim.nb_align); 2167 return -EINVAL; 2168 } 2169 2170 if (dev->data->dev_started && 2171 !(dev_info.dev_capa & 2172 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2173 return -EBUSY; 2174 2175 if (dev->data->dev_started && 2176 (dev->data->rx_queue_state[rx_queue_id] != 2177 RTE_ETH_QUEUE_STATE_STOPPED)) 2178 return -EBUSY; 2179 2180 eth_dev_rxq_release(dev, rx_queue_id); 2181 2182 if (rx_conf == NULL) 2183 rx_conf = &dev_info.default_rxconf; 2184 2185 local_conf = *rx_conf; 2186 2187 /* 2188 * If an offloading has already been enabled in 2189 * rte_eth_dev_configure(), it has been enabled on all queues, 2190 * so there is no need to enable it in this queue again. 2191 * The local_conf.offloads input to underlying PMD only carries 2192 * those offloadings which are only enabled on this queue and 2193 * not enabled on all queues. 2194 */ 2195 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2196 2197 /* 2198 * New added offloadings for this queue are those not enabled in 2199 * rte_eth_dev_configure() and they must be per-queue type. 2200 * A pure per-port offloading can't be enabled on a queue while 2201 * disabled on another queue. A pure per-port offloading can't 2202 * be enabled for any queue as new added one if it hasn't been 2203 * enabled in rte_eth_dev_configure(). 2204 */ 2205 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2206 local_conf.offloads) { 2207 RTE_ETHDEV_LOG(ERR, 2208 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2209 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2210 port_id, rx_queue_id, local_conf.offloads, 2211 dev_info.rx_queue_offload_capa, 2212 __func__); 2213 return -EINVAL; 2214 } 2215 2216 if (local_conf.share_group > 0 && 2217 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2218 RTE_ETHDEV_LOG(ERR, 2219 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2220 port_id, rx_queue_id, local_conf.share_group); 2221 return -EINVAL; 2222 } 2223 2224 /* 2225 * If LRO is enabled, check that the maximum aggregated packet 2226 * size is supported by the configured device. 2227 */ 2228 /* Get the real Ethernet overhead length */ 2229 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2230 uint32_t overhead_len; 2231 uint32_t max_rx_pktlen; 2232 int ret; 2233 2234 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2235 dev_info.max_mtu); 2236 max_rx_pktlen = dev->data->mtu + overhead_len; 2237 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2238 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2239 ret = eth_dev_check_lro_pkt_size(port_id, 2240 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2241 max_rx_pktlen, 2242 dev_info.max_lro_pkt_size); 2243 if (ret != 0) 2244 return ret; 2245 } 2246 2247 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2248 socket_id, &local_conf, mp); 2249 if (!ret) { 2250 if (!dev->data->min_rx_buf_size || 2251 dev->data->min_rx_buf_size > mbp_buf_size) 2252 dev->data->min_rx_buf_size = mbp_buf_size; 2253 } 2254 2255 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2256 rx_conf, ret); 2257 return eth_err(port_id, ret); 2258 } 2259 2260 int 2261 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2262 uint16_t nb_rx_desc, 2263 const struct rte_eth_hairpin_conf *conf) 2264 { 2265 int ret; 2266 struct rte_eth_dev *dev; 2267 struct rte_eth_hairpin_cap cap; 2268 int i; 2269 int count; 2270 2271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2272 dev = &rte_eth_devices[port_id]; 2273 2274 if (rx_queue_id >= dev->data->nb_rx_queues) { 2275 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2276 return -EINVAL; 2277 } 2278 2279 if (conf == NULL) { 2280 RTE_ETHDEV_LOG(ERR, 2281 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2282 port_id); 2283 return -EINVAL; 2284 } 2285 2286 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2287 if (ret != 0) 2288 return ret; 2289 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2290 -ENOTSUP); 2291 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2292 if (nb_rx_desc == 0) 2293 nb_rx_desc = cap.max_nb_desc; 2294 if (nb_rx_desc > cap.max_nb_desc) { 2295 RTE_ETHDEV_LOG(ERR, 2296 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2297 nb_rx_desc, cap.max_nb_desc); 2298 return -EINVAL; 2299 } 2300 if (conf->peer_count > cap.max_rx_2_tx) { 2301 RTE_ETHDEV_LOG(ERR, 2302 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2303 conf->peer_count, cap.max_rx_2_tx); 2304 return -EINVAL; 2305 } 2306 if (conf->peer_count == 0) { 2307 RTE_ETHDEV_LOG(ERR, 2308 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2309 conf->peer_count); 2310 return -EINVAL; 2311 } 2312 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2313 cap.max_nb_queues != UINT16_MAX; i++) { 2314 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2315 count++; 2316 } 2317 if (count > cap.max_nb_queues) { 2318 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2319 cap.max_nb_queues); 2320 return -EINVAL; 2321 } 2322 if (dev->data->dev_started) 2323 return -EBUSY; 2324 eth_dev_rxq_release(dev, rx_queue_id); 2325 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2326 nb_rx_desc, conf); 2327 if (ret == 0) 2328 dev->data->rx_queue_state[rx_queue_id] = 2329 RTE_ETH_QUEUE_STATE_HAIRPIN; 2330 return eth_err(port_id, ret); 2331 } 2332 2333 int 2334 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2335 uint16_t nb_tx_desc, unsigned int socket_id, 2336 const struct rte_eth_txconf *tx_conf) 2337 { 2338 struct rte_eth_dev *dev; 2339 struct rte_eth_dev_info dev_info; 2340 struct rte_eth_txconf local_conf; 2341 int ret; 2342 2343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2344 dev = &rte_eth_devices[port_id]; 2345 2346 if (tx_queue_id >= dev->data->nb_tx_queues) { 2347 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2348 return -EINVAL; 2349 } 2350 2351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2352 2353 ret = rte_eth_dev_info_get(port_id, &dev_info); 2354 if (ret != 0) 2355 return ret; 2356 2357 /* Use default specified by driver, if nb_tx_desc is zero */ 2358 if (nb_tx_desc == 0) { 2359 nb_tx_desc = dev_info.default_txportconf.ring_size; 2360 /* If driver default is zero, fall back on EAL default */ 2361 if (nb_tx_desc == 0) 2362 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2363 } 2364 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2365 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2366 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2367 RTE_ETHDEV_LOG(ERR, 2368 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2369 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2370 dev_info.tx_desc_lim.nb_min, 2371 dev_info.tx_desc_lim.nb_align); 2372 return -EINVAL; 2373 } 2374 2375 if (dev->data->dev_started && 2376 !(dev_info.dev_capa & 2377 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2378 return -EBUSY; 2379 2380 if (dev->data->dev_started && 2381 (dev->data->tx_queue_state[tx_queue_id] != 2382 RTE_ETH_QUEUE_STATE_STOPPED)) 2383 return -EBUSY; 2384 2385 eth_dev_txq_release(dev, tx_queue_id); 2386 2387 if (tx_conf == NULL) 2388 tx_conf = &dev_info.default_txconf; 2389 2390 local_conf = *tx_conf; 2391 2392 /* 2393 * If an offloading has already been enabled in 2394 * rte_eth_dev_configure(), it has been enabled on all queues, 2395 * so there is no need to enable it in this queue again. 2396 * The local_conf.offloads input to underlying PMD only carries 2397 * those offloadings which are only enabled on this queue and 2398 * not enabled on all queues. 2399 */ 2400 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2401 2402 /* 2403 * New added offloadings for this queue are those not enabled in 2404 * rte_eth_dev_configure() and they must be per-queue type. 2405 * A pure per-port offloading can't be enabled on a queue while 2406 * disabled on another queue. A pure per-port offloading can't 2407 * be enabled for any queue as new added one if it hasn't been 2408 * enabled in rte_eth_dev_configure(). 2409 */ 2410 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2411 local_conf.offloads) { 2412 RTE_ETHDEV_LOG(ERR, 2413 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2414 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2415 port_id, tx_queue_id, local_conf.offloads, 2416 dev_info.tx_queue_offload_capa, 2417 __func__); 2418 return -EINVAL; 2419 } 2420 2421 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2422 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2423 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2424 } 2425 2426 int 2427 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2428 uint16_t nb_tx_desc, 2429 const struct rte_eth_hairpin_conf *conf) 2430 { 2431 struct rte_eth_dev *dev; 2432 struct rte_eth_hairpin_cap cap; 2433 int i; 2434 int count; 2435 int ret; 2436 2437 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2438 dev = &rte_eth_devices[port_id]; 2439 2440 if (tx_queue_id >= dev->data->nb_tx_queues) { 2441 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2442 return -EINVAL; 2443 } 2444 2445 if (conf == NULL) { 2446 RTE_ETHDEV_LOG(ERR, 2447 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2448 port_id); 2449 return -EINVAL; 2450 } 2451 2452 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2453 if (ret != 0) 2454 return ret; 2455 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2456 -ENOTSUP); 2457 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2458 if (nb_tx_desc == 0) 2459 nb_tx_desc = cap.max_nb_desc; 2460 if (nb_tx_desc > cap.max_nb_desc) { 2461 RTE_ETHDEV_LOG(ERR, 2462 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2463 nb_tx_desc, cap.max_nb_desc); 2464 return -EINVAL; 2465 } 2466 if (conf->peer_count > cap.max_tx_2_rx) { 2467 RTE_ETHDEV_LOG(ERR, 2468 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2469 conf->peer_count, cap.max_tx_2_rx); 2470 return -EINVAL; 2471 } 2472 if (conf->peer_count == 0) { 2473 RTE_ETHDEV_LOG(ERR, 2474 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2475 conf->peer_count); 2476 return -EINVAL; 2477 } 2478 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2479 cap.max_nb_queues != UINT16_MAX; i++) { 2480 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2481 count++; 2482 } 2483 if (count > cap.max_nb_queues) { 2484 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2485 cap.max_nb_queues); 2486 return -EINVAL; 2487 } 2488 if (dev->data->dev_started) 2489 return -EBUSY; 2490 eth_dev_txq_release(dev, tx_queue_id); 2491 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2492 (dev, tx_queue_id, nb_tx_desc, conf); 2493 if (ret == 0) 2494 dev->data->tx_queue_state[tx_queue_id] = 2495 RTE_ETH_QUEUE_STATE_HAIRPIN; 2496 return eth_err(port_id, ret); 2497 } 2498 2499 int 2500 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2501 { 2502 struct rte_eth_dev *dev; 2503 int ret; 2504 2505 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2506 dev = &rte_eth_devices[tx_port]; 2507 2508 if (dev->data->dev_started == 0) { 2509 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2510 return -EBUSY; 2511 } 2512 2513 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2514 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2515 if (ret != 0) 2516 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2517 " to Rx %d (%d - all ports)\n", 2518 tx_port, rx_port, RTE_MAX_ETHPORTS); 2519 2520 return ret; 2521 } 2522 2523 int 2524 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2525 { 2526 struct rte_eth_dev *dev; 2527 int ret; 2528 2529 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2530 dev = &rte_eth_devices[tx_port]; 2531 2532 if (dev->data->dev_started == 0) { 2533 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2534 return -EBUSY; 2535 } 2536 2537 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2538 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2539 if (ret != 0) 2540 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2541 " from Rx %d (%d - all ports)\n", 2542 tx_port, rx_port, RTE_MAX_ETHPORTS); 2543 2544 return ret; 2545 } 2546 2547 int 2548 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2549 size_t len, uint32_t direction) 2550 { 2551 struct rte_eth_dev *dev; 2552 int ret; 2553 2554 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2555 dev = &rte_eth_devices[port_id]; 2556 2557 if (peer_ports == NULL) { 2558 RTE_ETHDEV_LOG(ERR, 2559 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2560 port_id); 2561 return -EINVAL; 2562 } 2563 2564 if (len == 0) { 2565 RTE_ETHDEV_LOG(ERR, 2566 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2567 port_id); 2568 return -EINVAL; 2569 } 2570 2571 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2572 -ENOTSUP); 2573 2574 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2575 len, direction); 2576 if (ret < 0) 2577 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2578 port_id, direction ? "Rx" : "Tx"); 2579 2580 return ret; 2581 } 2582 2583 void 2584 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2585 void *userdata __rte_unused) 2586 { 2587 rte_pktmbuf_free_bulk(pkts, unsent); 2588 } 2589 2590 void 2591 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2592 void *userdata) 2593 { 2594 uint64_t *count = userdata; 2595 2596 rte_pktmbuf_free_bulk(pkts, unsent); 2597 *count += unsent; 2598 } 2599 2600 int 2601 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2602 buffer_tx_error_fn cbfn, void *userdata) 2603 { 2604 if (buffer == NULL) { 2605 RTE_ETHDEV_LOG(ERR, 2606 "Cannot set Tx buffer error callback to NULL buffer\n"); 2607 return -EINVAL; 2608 } 2609 2610 buffer->error_callback = cbfn; 2611 buffer->error_userdata = userdata; 2612 return 0; 2613 } 2614 2615 int 2616 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2617 { 2618 int ret = 0; 2619 2620 if (buffer == NULL) { 2621 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2622 return -EINVAL; 2623 } 2624 2625 buffer->size = size; 2626 if (buffer->error_callback == NULL) { 2627 ret = rte_eth_tx_buffer_set_err_callback( 2628 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2629 } 2630 2631 return ret; 2632 } 2633 2634 int 2635 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2636 { 2637 struct rte_eth_dev *dev; 2638 int ret; 2639 2640 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2641 dev = &rte_eth_devices[port_id]; 2642 2643 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2644 2645 /* Call driver to free pending mbufs. */ 2646 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2647 free_cnt); 2648 return eth_err(port_id, ret); 2649 } 2650 2651 int 2652 rte_eth_promiscuous_enable(uint16_t port_id) 2653 { 2654 struct rte_eth_dev *dev; 2655 int diag = 0; 2656 2657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2658 dev = &rte_eth_devices[port_id]; 2659 2660 if (dev->data->promiscuous == 1) 2661 return 0; 2662 2663 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2664 2665 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2666 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2667 2668 return eth_err(port_id, diag); 2669 } 2670 2671 int 2672 rte_eth_promiscuous_disable(uint16_t port_id) 2673 { 2674 struct rte_eth_dev *dev; 2675 int diag = 0; 2676 2677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2678 dev = &rte_eth_devices[port_id]; 2679 2680 if (dev->data->promiscuous == 0) 2681 return 0; 2682 2683 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2684 2685 dev->data->promiscuous = 0; 2686 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2687 if (diag != 0) 2688 dev->data->promiscuous = 1; 2689 2690 return eth_err(port_id, diag); 2691 } 2692 2693 int 2694 rte_eth_promiscuous_get(uint16_t port_id) 2695 { 2696 struct rte_eth_dev *dev; 2697 2698 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2699 dev = &rte_eth_devices[port_id]; 2700 2701 return dev->data->promiscuous; 2702 } 2703 2704 int 2705 rte_eth_allmulticast_enable(uint16_t port_id) 2706 { 2707 struct rte_eth_dev *dev; 2708 int diag; 2709 2710 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2711 dev = &rte_eth_devices[port_id]; 2712 2713 if (dev->data->all_multicast == 1) 2714 return 0; 2715 2716 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2717 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2718 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2719 2720 return eth_err(port_id, diag); 2721 } 2722 2723 int 2724 rte_eth_allmulticast_disable(uint16_t port_id) 2725 { 2726 struct rte_eth_dev *dev; 2727 int diag; 2728 2729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2730 dev = &rte_eth_devices[port_id]; 2731 2732 if (dev->data->all_multicast == 0) 2733 return 0; 2734 2735 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2736 dev->data->all_multicast = 0; 2737 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2738 if (diag != 0) 2739 dev->data->all_multicast = 1; 2740 2741 return eth_err(port_id, diag); 2742 } 2743 2744 int 2745 rte_eth_allmulticast_get(uint16_t port_id) 2746 { 2747 struct rte_eth_dev *dev; 2748 2749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2750 dev = &rte_eth_devices[port_id]; 2751 2752 return dev->data->all_multicast; 2753 } 2754 2755 int 2756 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2757 { 2758 struct rte_eth_dev *dev; 2759 2760 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2761 dev = &rte_eth_devices[port_id]; 2762 2763 if (eth_link == NULL) { 2764 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2765 port_id); 2766 return -EINVAL; 2767 } 2768 2769 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2770 rte_eth_linkstatus_get(dev, eth_link); 2771 else { 2772 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2773 (*dev->dev_ops->link_update)(dev, 1); 2774 *eth_link = dev->data->dev_link; 2775 } 2776 2777 return 0; 2778 } 2779 2780 int 2781 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2782 { 2783 struct rte_eth_dev *dev; 2784 2785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2786 dev = &rte_eth_devices[port_id]; 2787 2788 if (eth_link == NULL) { 2789 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2790 port_id); 2791 return -EINVAL; 2792 } 2793 2794 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2795 rte_eth_linkstatus_get(dev, eth_link); 2796 else { 2797 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2798 (*dev->dev_ops->link_update)(dev, 0); 2799 *eth_link = dev->data->dev_link; 2800 } 2801 2802 return 0; 2803 } 2804 2805 const char * 2806 rte_eth_link_speed_to_str(uint32_t link_speed) 2807 { 2808 switch (link_speed) { 2809 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2810 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2811 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2812 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2813 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2814 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2815 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2816 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2817 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2818 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2819 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2820 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2821 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2822 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2823 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2824 default: return "Invalid"; 2825 } 2826 } 2827 2828 int 2829 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2830 { 2831 if (str == NULL) { 2832 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2833 return -EINVAL; 2834 } 2835 2836 if (len == 0) { 2837 RTE_ETHDEV_LOG(ERR, 2838 "Cannot convert link to string with zero size\n"); 2839 return -EINVAL; 2840 } 2841 2842 if (eth_link == NULL) { 2843 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2844 return -EINVAL; 2845 } 2846 2847 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2848 return snprintf(str, len, "Link down"); 2849 else 2850 return snprintf(str, len, "Link up at %s %s %s", 2851 rte_eth_link_speed_to_str(eth_link->link_speed), 2852 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2853 "FDX" : "HDX", 2854 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2855 "Autoneg" : "Fixed"); 2856 } 2857 2858 int 2859 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2860 { 2861 struct rte_eth_dev *dev; 2862 2863 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2864 dev = &rte_eth_devices[port_id]; 2865 2866 if (stats == NULL) { 2867 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2868 port_id); 2869 return -EINVAL; 2870 } 2871 2872 memset(stats, 0, sizeof(*stats)); 2873 2874 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2875 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2876 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2877 } 2878 2879 int 2880 rte_eth_stats_reset(uint16_t port_id) 2881 { 2882 struct rte_eth_dev *dev; 2883 int ret; 2884 2885 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2886 dev = &rte_eth_devices[port_id]; 2887 2888 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2889 ret = (*dev->dev_ops->stats_reset)(dev); 2890 if (ret != 0) 2891 return eth_err(port_id, ret); 2892 2893 dev->data->rx_mbuf_alloc_failed = 0; 2894 2895 return 0; 2896 } 2897 2898 static inline int 2899 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2900 { 2901 uint16_t nb_rxqs, nb_txqs; 2902 int count; 2903 2904 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2905 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2906 2907 count = RTE_NB_STATS; 2908 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2909 count += nb_rxqs * RTE_NB_RXQ_STATS; 2910 count += nb_txqs * RTE_NB_TXQ_STATS; 2911 } 2912 2913 return count; 2914 } 2915 2916 static int 2917 eth_dev_get_xstats_count(uint16_t port_id) 2918 { 2919 struct rte_eth_dev *dev; 2920 int count; 2921 2922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2923 dev = &rte_eth_devices[port_id]; 2924 if (dev->dev_ops->xstats_get_names != NULL) { 2925 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2926 if (count < 0) 2927 return eth_err(port_id, count); 2928 } else 2929 count = 0; 2930 2931 2932 count += eth_dev_get_xstats_basic_count(dev); 2933 2934 return count; 2935 } 2936 2937 int 2938 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2939 uint64_t *id) 2940 { 2941 int cnt_xstats, idx_xstat; 2942 2943 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2944 2945 if (xstat_name == NULL) { 2946 RTE_ETHDEV_LOG(ERR, 2947 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2948 port_id); 2949 return -ENOMEM; 2950 } 2951 2952 if (id == NULL) { 2953 RTE_ETHDEV_LOG(ERR, 2954 "Cannot get ethdev port %u xstats ID to NULL\n", 2955 port_id); 2956 return -ENOMEM; 2957 } 2958 2959 /* Get count */ 2960 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2961 if (cnt_xstats < 0) { 2962 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2963 return -ENODEV; 2964 } 2965 2966 /* Get id-name lookup table */ 2967 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2968 2969 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2970 port_id, xstats_names, cnt_xstats, NULL)) { 2971 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2972 return -1; 2973 } 2974 2975 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2976 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2977 *id = idx_xstat; 2978 return 0; 2979 }; 2980 } 2981 2982 return -EINVAL; 2983 } 2984 2985 /* retrieve basic stats names */ 2986 static int 2987 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2988 struct rte_eth_xstat_name *xstats_names) 2989 { 2990 int cnt_used_entries = 0; 2991 uint32_t idx, id_queue; 2992 uint16_t num_q; 2993 2994 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2995 strlcpy(xstats_names[cnt_used_entries].name, 2996 eth_dev_stats_strings[idx].name, 2997 sizeof(xstats_names[0].name)); 2998 cnt_used_entries++; 2999 } 3000 3001 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3002 return cnt_used_entries; 3003 3004 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3005 for (id_queue = 0; id_queue < num_q; id_queue++) { 3006 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3007 snprintf(xstats_names[cnt_used_entries].name, 3008 sizeof(xstats_names[0].name), 3009 "rx_q%u_%s", 3010 id_queue, eth_dev_rxq_stats_strings[idx].name); 3011 cnt_used_entries++; 3012 } 3013 3014 } 3015 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3016 for (id_queue = 0; id_queue < num_q; id_queue++) { 3017 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3018 snprintf(xstats_names[cnt_used_entries].name, 3019 sizeof(xstats_names[0].name), 3020 "tx_q%u_%s", 3021 id_queue, eth_dev_txq_stats_strings[idx].name); 3022 cnt_used_entries++; 3023 } 3024 } 3025 return cnt_used_entries; 3026 } 3027 3028 /* retrieve ethdev extended statistics names */ 3029 int 3030 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3031 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3032 uint64_t *ids) 3033 { 3034 struct rte_eth_xstat_name *xstats_names_copy; 3035 unsigned int no_basic_stat_requested = 1; 3036 unsigned int no_ext_stat_requested = 1; 3037 unsigned int expected_entries; 3038 unsigned int basic_count; 3039 struct rte_eth_dev *dev; 3040 unsigned int i; 3041 int ret; 3042 3043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3044 dev = &rte_eth_devices[port_id]; 3045 3046 basic_count = eth_dev_get_xstats_basic_count(dev); 3047 ret = eth_dev_get_xstats_count(port_id); 3048 if (ret < 0) 3049 return ret; 3050 expected_entries = (unsigned int)ret; 3051 3052 /* Return max number of stats if no ids given */ 3053 if (!ids) { 3054 if (!xstats_names) 3055 return expected_entries; 3056 else if (xstats_names && size < expected_entries) 3057 return expected_entries; 3058 } 3059 3060 if (ids && !xstats_names) 3061 return -EINVAL; 3062 3063 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3064 uint64_t ids_copy[size]; 3065 3066 for (i = 0; i < size; i++) { 3067 if (ids[i] < basic_count) { 3068 no_basic_stat_requested = 0; 3069 break; 3070 } 3071 3072 /* 3073 * Convert ids to xstats ids that PMD knows. 3074 * ids known by user are basic + extended stats. 3075 */ 3076 ids_copy[i] = ids[i] - basic_count; 3077 } 3078 3079 if (no_basic_stat_requested) 3080 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3081 ids_copy, xstats_names, size); 3082 } 3083 3084 /* Retrieve all stats */ 3085 if (!ids) { 3086 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3087 expected_entries); 3088 if (num_stats < 0 || num_stats > (int)expected_entries) 3089 return num_stats; 3090 else 3091 return expected_entries; 3092 } 3093 3094 xstats_names_copy = calloc(expected_entries, 3095 sizeof(struct rte_eth_xstat_name)); 3096 3097 if (!xstats_names_copy) { 3098 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3099 return -ENOMEM; 3100 } 3101 3102 if (ids) { 3103 for (i = 0; i < size; i++) { 3104 if (ids[i] >= basic_count) { 3105 no_ext_stat_requested = 0; 3106 break; 3107 } 3108 } 3109 } 3110 3111 /* Fill xstats_names_copy structure */ 3112 if (ids && no_ext_stat_requested) { 3113 eth_basic_stats_get_names(dev, xstats_names_copy); 3114 } else { 3115 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3116 expected_entries); 3117 if (ret < 0) { 3118 free(xstats_names_copy); 3119 return ret; 3120 } 3121 } 3122 3123 /* Filter stats */ 3124 for (i = 0; i < size; i++) { 3125 if (ids[i] >= expected_entries) { 3126 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3127 free(xstats_names_copy); 3128 return -1; 3129 } 3130 xstats_names[i] = xstats_names_copy[ids[i]]; 3131 } 3132 3133 free(xstats_names_copy); 3134 return size; 3135 } 3136 3137 int 3138 rte_eth_xstats_get_names(uint16_t port_id, 3139 struct rte_eth_xstat_name *xstats_names, 3140 unsigned int size) 3141 { 3142 struct rte_eth_dev *dev; 3143 int cnt_used_entries; 3144 int cnt_expected_entries; 3145 int cnt_driver_entries; 3146 3147 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3148 if (xstats_names == NULL || cnt_expected_entries < 0 || 3149 (int)size < cnt_expected_entries) 3150 return cnt_expected_entries; 3151 3152 /* port_id checked in eth_dev_get_xstats_count() */ 3153 dev = &rte_eth_devices[port_id]; 3154 3155 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3156 3157 if (dev->dev_ops->xstats_get_names != NULL) { 3158 /* If there are any driver-specific xstats, append them 3159 * to end of list. 3160 */ 3161 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3162 dev, 3163 xstats_names + cnt_used_entries, 3164 size - cnt_used_entries); 3165 if (cnt_driver_entries < 0) 3166 return eth_err(port_id, cnt_driver_entries); 3167 cnt_used_entries += cnt_driver_entries; 3168 } 3169 3170 return cnt_used_entries; 3171 } 3172 3173 3174 static int 3175 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3176 { 3177 struct rte_eth_dev *dev; 3178 struct rte_eth_stats eth_stats; 3179 unsigned int count = 0, i, q; 3180 uint64_t val, *stats_ptr; 3181 uint16_t nb_rxqs, nb_txqs; 3182 int ret; 3183 3184 ret = rte_eth_stats_get(port_id, ð_stats); 3185 if (ret < 0) 3186 return ret; 3187 3188 dev = &rte_eth_devices[port_id]; 3189 3190 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3191 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3192 3193 /* global stats */ 3194 for (i = 0; i < RTE_NB_STATS; i++) { 3195 stats_ptr = RTE_PTR_ADD(ð_stats, 3196 eth_dev_stats_strings[i].offset); 3197 val = *stats_ptr; 3198 xstats[count++].value = val; 3199 } 3200 3201 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3202 return count; 3203 3204 /* per-rxq stats */ 3205 for (q = 0; q < nb_rxqs; q++) { 3206 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3207 stats_ptr = RTE_PTR_ADD(ð_stats, 3208 eth_dev_rxq_stats_strings[i].offset + 3209 q * sizeof(uint64_t)); 3210 val = *stats_ptr; 3211 xstats[count++].value = val; 3212 } 3213 } 3214 3215 /* per-txq stats */ 3216 for (q = 0; q < nb_txqs; q++) { 3217 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3218 stats_ptr = RTE_PTR_ADD(ð_stats, 3219 eth_dev_txq_stats_strings[i].offset + 3220 q * sizeof(uint64_t)); 3221 val = *stats_ptr; 3222 xstats[count++].value = val; 3223 } 3224 } 3225 return count; 3226 } 3227 3228 /* retrieve ethdev extended statistics */ 3229 int 3230 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3231 uint64_t *values, unsigned int size) 3232 { 3233 unsigned int no_basic_stat_requested = 1; 3234 unsigned int no_ext_stat_requested = 1; 3235 unsigned int num_xstats_filled; 3236 unsigned int basic_count; 3237 uint16_t expected_entries; 3238 struct rte_eth_dev *dev; 3239 unsigned int i; 3240 int ret; 3241 3242 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3243 dev = &rte_eth_devices[port_id]; 3244 3245 ret = eth_dev_get_xstats_count(port_id); 3246 if (ret < 0) 3247 return ret; 3248 expected_entries = (uint16_t)ret; 3249 struct rte_eth_xstat xstats[expected_entries]; 3250 basic_count = eth_dev_get_xstats_basic_count(dev); 3251 3252 /* Return max number of stats if no ids given */ 3253 if (!ids) { 3254 if (!values) 3255 return expected_entries; 3256 else if (values && size < expected_entries) 3257 return expected_entries; 3258 } 3259 3260 if (ids && !values) 3261 return -EINVAL; 3262 3263 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3264 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3265 uint64_t ids_copy[size]; 3266 3267 for (i = 0; i < size; i++) { 3268 if (ids[i] < basic_count) { 3269 no_basic_stat_requested = 0; 3270 break; 3271 } 3272 3273 /* 3274 * Convert ids to xstats ids that PMD knows. 3275 * ids known by user are basic + extended stats. 3276 */ 3277 ids_copy[i] = ids[i] - basic_count; 3278 } 3279 3280 if (no_basic_stat_requested) 3281 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3282 values, size); 3283 } 3284 3285 if (ids) { 3286 for (i = 0; i < size; i++) { 3287 if (ids[i] >= basic_count) { 3288 no_ext_stat_requested = 0; 3289 break; 3290 } 3291 } 3292 } 3293 3294 /* Fill the xstats structure */ 3295 if (ids && no_ext_stat_requested) 3296 ret = eth_basic_stats_get(port_id, xstats); 3297 else 3298 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3299 3300 if (ret < 0) 3301 return ret; 3302 num_xstats_filled = (unsigned int)ret; 3303 3304 /* Return all stats */ 3305 if (!ids) { 3306 for (i = 0; i < num_xstats_filled; i++) 3307 values[i] = xstats[i].value; 3308 return expected_entries; 3309 } 3310 3311 /* Filter stats */ 3312 for (i = 0; i < size; i++) { 3313 if (ids[i] >= expected_entries) { 3314 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3315 return -1; 3316 } 3317 values[i] = xstats[ids[i]].value; 3318 } 3319 return size; 3320 } 3321 3322 int 3323 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3324 unsigned int n) 3325 { 3326 struct rte_eth_dev *dev; 3327 unsigned int count = 0, i; 3328 signed int xcount = 0; 3329 uint16_t nb_rxqs, nb_txqs; 3330 int ret; 3331 3332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3333 dev = &rte_eth_devices[port_id]; 3334 3335 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3336 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3337 3338 /* Return generic statistics */ 3339 count = RTE_NB_STATS; 3340 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3341 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3342 3343 /* implemented by the driver */ 3344 if (dev->dev_ops->xstats_get != NULL) { 3345 /* Retrieve the xstats from the driver at the end of the 3346 * xstats struct. 3347 */ 3348 xcount = (*dev->dev_ops->xstats_get)(dev, 3349 xstats ? xstats + count : NULL, 3350 (n > count) ? n - count : 0); 3351 3352 if (xcount < 0) 3353 return eth_err(port_id, xcount); 3354 } 3355 3356 if (n < count + xcount || xstats == NULL) 3357 return count + xcount; 3358 3359 /* now fill the xstats structure */ 3360 ret = eth_basic_stats_get(port_id, xstats); 3361 if (ret < 0) 3362 return ret; 3363 count = ret; 3364 3365 for (i = 0; i < count; i++) 3366 xstats[i].id = i; 3367 /* add an offset to driver-specific stats */ 3368 for ( ; i < count + xcount; i++) 3369 xstats[i].id += count; 3370 3371 return count + xcount; 3372 } 3373 3374 /* reset ethdev extended statistics */ 3375 int 3376 rte_eth_xstats_reset(uint16_t port_id) 3377 { 3378 struct rte_eth_dev *dev; 3379 3380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3381 dev = &rte_eth_devices[port_id]; 3382 3383 /* implemented by the driver */ 3384 if (dev->dev_ops->xstats_reset != NULL) 3385 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3386 3387 /* fallback to default */ 3388 return rte_eth_stats_reset(port_id); 3389 } 3390 3391 static int 3392 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3393 uint8_t stat_idx, uint8_t is_rx) 3394 { 3395 struct rte_eth_dev *dev; 3396 3397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3398 dev = &rte_eth_devices[port_id]; 3399 3400 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3401 return -EINVAL; 3402 3403 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3404 return -EINVAL; 3405 3406 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3407 return -EINVAL; 3408 3409 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3410 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3411 } 3412 3413 int 3414 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3415 uint8_t stat_idx) 3416 { 3417 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3418 tx_queue_id, 3419 stat_idx, STAT_QMAP_TX)); 3420 } 3421 3422 int 3423 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3424 uint8_t stat_idx) 3425 { 3426 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3427 rx_queue_id, 3428 stat_idx, STAT_QMAP_RX)); 3429 } 3430 3431 int 3432 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3433 { 3434 struct rte_eth_dev *dev; 3435 3436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3437 dev = &rte_eth_devices[port_id]; 3438 3439 if (fw_version == NULL && fw_size > 0) { 3440 RTE_ETHDEV_LOG(ERR, 3441 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3442 port_id); 3443 return -EINVAL; 3444 } 3445 3446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3447 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3448 fw_version, fw_size)); 3449 } 3450 3451 int 3452 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3453 { 3454 struct rte_eth_dev *dev; 3455 const struct rte_eth_desc_lim lim = { 3456 .nb_max = UINT16_MAX, 3457 .nb_min = 0, 3458 .nb_align = 1, 3459 .nb_seg_max = UINT16_MAX, 3460 .nb_mtu_seg_max = UINT16_MAX, 3461 }; 3462 int diag; 3463 3464 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3465 dev = &rte_eth_devices[port_id]; 3466 3467 if (dev_info == NULL) { 3468 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3469 port_id); 3470 return -EINVAL; 3471 } 3472 3473 /* 3474 * Init dev_info before port_id check since caller does not have 3475 * return status and does not know if get is successful or not. 3476 */ 3477 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3478 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3479 3480 dev_info->rx_desc_lim = lim; 3481 dev_info->tx_desc_lim = lim; 3482 dev_info->device = dev->device; 3483 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3484 RTE_ETHER_CRC_LEN; 3485 dev_info->max_mtu = UINT16_MAX; 3486 3487 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3488 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3489 if (diag != 0) { 3490 /* Cleanup already filled in device information */ 3491 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3492 return eth_err(port_id, diag); 3493 } 3494 3495 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3496 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3497 RTE_MAX_QUEUES_PER_PORT); 3498 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3499 RTE_MAX_QUEUES_PER_PORT); 3500 3501 dev_info->driver_name = dev->device->driver->name; 3502 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3503 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3504 3505 dev_info->dev_flags = &dev->data->dev_flags; 3506 3507 return 0; 3508 } 3509 3510 int 3511 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3512 { 3513 struct rte_eth_dev *dev; 3514 3515 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3516 dev = &rte_eth_devices[port_id]; 3517 3518 if (dev_conf == NULL) { 3519 RTE_ETHDEV_LOG(ERR, 3520 "Cannot get ethdev port %u configuration to NULL\n", 3521 port_id); 3522 return -EINVAL; 3523 } 3524 3525 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3526 3527 return 0; 3528 } 3529 3530 int 3531 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3532 uint32_t *ptypes, int num) 3533 { 3534 int i, j; 3535 struct rte_eth_dev *dev; 3536 const uint32_t *all_ptypes; 3537 3538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3539 dev = &rte_eth_devices[port_id]; 3540 3541 if (ptypes == NULL && num > 0) { 3542 RTE_ETHDEV_LOG(ERR, 3543 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3544 port_id); 3545 return -EINVAL; 3546 } 3547 3548 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3549 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3550 3551 if (!all_ptypes) 3552 return 0; 3553 3554 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3555 if (all_ptypes[i] & ptype_mask) { 3556 if (j < num) 3557 ptypes[j] = all_ptypes[i]; 3558 j++; 3559 } 3560 3561 return j; 3562 } 3563 3564 int 3565 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3566 uint32_t *set_ptypes, unsigned int num) 3567 { 3568 const uint32_t valid_ptype_masks[] = { 3569 RTE_PTYPE_L2_MASK, 3570 RTE_PTYPE_L3_MASK, 3571 RTE_PTYPE_L4_MASK, 3572 RTE_PTYPE_TUNNEL_MASK, 3573 RTE_PTYPE_INNER_L2_MASK, 3574 RTE_PTYPE_INNER_L3_MASK, 3575 RTE_PTYPE_INNER_L4_MASK, 3576 }; 3577 const uint32_t *all_ptypes; 3578 struct rte_eth_dev *dev; 3579 uint32_t unused_mask; 3580 unsigned int i, j; 3581 int ret; 3582 3583 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3584 dev = &rte_eth_devices[port_id]; 3585 3586 if (num > 0 && set_ptypes == NULL) { 3587 RTE_ETHDEV_LOG(ERR, 3588 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3589 port_id); 3590 return -EINVAL; 3591 } 3592 3593 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3594 *dev->dev_ops->dev_ptypes_set == NULL) { 3595 ret = 0; 3596 goto ptype_unknown; 3597 } 3598 3599 if (ptype_mask == 0) { 3600 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3601 ptype_mask); 3602 goto ptype_unknown; 3603 } 3604 3605 unused_mask = ptype_mask; 3606 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3607 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3608 if (mask && mask != valid_ptype_masks[i]) { 3609 ret = -EINVAL; 3610 goto ptype_unknown; 3611 } 3612 unused_mask &= ~valid_ptype_masks[i]; 3613 } 3614 3615 if (unused_mask) { 3616 ret = -EINVAL; 3617 goto ptype_unknown; 3618 } 3619 3620 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3621 if (all_ptypes == NULL) { 3622 ret = 0; 3623 goto ptype_unknown; 3624 } 3625 3626 /* 3627 * Accommodate as many set_ptypes as possible. If the supplied 3628 * set_ptypes array is insufficient fill it partially. 3629 */ 3630 for (i = 0, j = 0; set_ptypes != NULL && 3631 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3632 if (ptype_mask & all_ptypes[i]) { 3633 if (j < num - 1) { 3634 set_ptypes[j] = all_ptypes[i]; 3635 j++; 3636 continue; 3637 } 3638 break; 3639 } 3640 } 3641 3642 if (set_ptypes != NULL && j < num) 3643 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3644 3645 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3646 3647 ptype_unknown: 3648 if (num > 0) 3649 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3650 3651 return ret; 3652 } 3653 3654 int 3655 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3656 unsigned int num) 3657 { 3658 int32_t ret; 3659 struct rte_eth_dev *dev; 3660 struct rte_eth_dev_info dev_info; 3661 3662 if (ma == NULL) { 3663 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3664 return -EINVAL; 3665 } 3666 3667 /* will check for us that port_id is a valid one */ 3668 ret = rte_eth_dev_info_get(port_id, &dev_info); 3669 if (ret != 0) 3670 return ret; 3671 3672 dev = &rte_eth_devices[port_id]; 3673 num = RTE_MIN(dev_info.max_mac_addrs, num); 3674 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3675 3676 return num; 3677 } 3678 3679 int 3680 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3681 { 3682 struct rte_eth_dev *dev; 3683 3684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3685 dev = &rte_eth_devices[port_id]; 3686 3687 if (mac_addr == NULL) { 3688 RTE_ETHDEV_LOG(ERR, 3689 "Cannot get ethdev port %u MAC address to NULL\n", 3690 port_id); 3691 return -EINVAL; 3692 } 3693 3694 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3695 3696 return 0; 3697 } 3698 3699 int 3700 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3701 { 3702 struct rte_eth_dev *dev; 3703 3704 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3705 dev = &rte_eth_devices[port_id]; 3706 3707 if (mtu == NULL) { 3708 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3709 port_id); 3710 return -EINVAL; 3711 } 3712 3713 *mtu = dev->data->mtu; 3714 return 0; 3715 } 3716 3717 int 3718 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3719 { 3720 int ret; 3721 struct rte_eth_dev_info dev_info; 3722 struct rte_eth_dev *dev; 3723 3724 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3725 dev = &rte_eth_devices[port_id]; 3726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3727 3728 /* 3729 * Check if the device supports dev_infos_get, if it does not 3730 * skip min_mtu/max_mtu validation here as this requires values 3731 * that are populated within the call to rte_eth_dev_info_get() 3732 * which relies on dev->dev_ops->dev_infos_get. 3733 */ 3734 if (*dev->dev_ops->dev_infos_get != NULL) { 3735 ret = rte_eth_dev_info_get(port_id, &dev_info); 3736 if (ret != 0) 3737 return ret; 3738 3739 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3740 if (ret != 0) 3741 return ret; 3742 } 3743 3744 if (dev->data->dev_configured == 0) { 3745 RTE_ETHDEV_LOG(ERR, 3746 "Port %u must be configured before MTU set\n", 3747 port_id); 3748 return -EINVAL; 3749 } 3750 3751 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3752 if (ret == 0) 3753 dev->data->mtu = mtu; 3754 3755 return eth_err(port_id, ret); 3756 } 3757 3758 int 3759 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3760 { 3761 struct rte_eth_dev *dev; 3762 int ret; 3763 3764 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3765 dev = &rte_eth_devices[port_id]; 3766 3767 if (!(dev->data->dev_conf.rxmode.offloads & 3768 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3769 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3770 port_id); 3771 return -ENOSYS; 3772 } 3773 3774 if (vlan_id > 4095) { 3775 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3776 port_id, vlan_id); 3777 return -EINVAL; 3778 } 3779 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3780 3781 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3782 if (ret == 0) { 3783 struct rte_vlan_filter_conf *vfc; 3784 int vidx; 3785 int vbit; 3786 3787 vfc = &dev->data->vlan_filter_conf; 3788 vidx = vlan_id / 64; 3789 vbit = vlan_id % 64; 3790 3791 if (on) 3792 vfc->ids[vidx] |= RTE_BIT64(vbit); 3793 else 3794 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3795 } 3796 3797 return eth_err(port_id, ret); 3798 } 3799 3800 int 3801 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3802 int on) 3803 { 3804 struct rte_eth_dev *dev; 3805 3806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3807 dev = &rte_eth_devices[port_id]; 3808 3809 if (rx_queue_id >= dev->data->nb_rx_queues) { 3810 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3811 return -EINVAL; 3812 } 3813 3814 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3815 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3816 3817 return 0; 3818 } 3819 3820 int 3821 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3822 enum rte_vlan_type vlan_type, 3823 uint16_t tpid) 3824 { 3825 struct rte_eth_dev *dev; 3826 3827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3828 dev = &rte_eth_devices[port_id]; 3829 3830 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3831 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3832 tpid)); 3833 } 3834 3835 int 3836 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3837 { 3838 struct rte_eth_dev_info dev_info; 3839 struct rte_eth_dev *dev; 3840 int ret = 0; 3841 int mask = 0; 3842 int cur, org = 0; 3843 uint64_t orig_offloads; 3844 uint64_t dev_offloads; 3845 uint64_t new_offloads; 3846 3847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3848 dev = &rte_eth_devices[port_id]; 3849 3850 /* save original values in case of failure */ 3851 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3852 dev_offloads = orig_offloads; 3853 3854 /* check which option changed by application */ 3855 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3856 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3857 if (cur != org) { 3858 if (cur) 3859 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3860 else 3861 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3862 mask |= RTE_ETH_VLAN_STRIP_MASK; 3863 } 3864 3865 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3866 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3867 if (cur != org) { 3868 if (cur) 3869 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3870 else 3871 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3872 mask |= RTE_ETH_VLAN_FILTER_MASK; 3873 } 3874 3875 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3876 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3877 if (cur != org) { 3878 if (cur) 3879 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3880 else 3881 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3882 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3883 } 3884 3885 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3886 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3887 if (cur != org) { 3888 if (cur) 3889 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3890 else 3891 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3892 mask |= RTE_ETH_QINQ_STRIP_MASK; 3893 } 3894 3895 /*no change*/ 3896 if (mask == 0) 3897 return ret; 3898 3899 ret = rte_eth_dev_info_get(port_id, &dev_info); 3900 if (ret != 0) 3901 return ret; 3902 3903 /* Rx VLAN offloading must be within its device capabilities */ 3904 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3905 new_offloads = dev_offloads & ~orig_offloads; 3906 RTE_ETHDEV_LOG(ERR, 3907 "Ethdev port_id=%u requested new added VLAN offloads " 3908 "0x%" PRIx64 " must be within Rx offloads capabilities " 3909 "0x%" PRIx64 " in %s()\n", 3910 port_id, new_offloads, dev_info.rx_offload_capa, 3911 __func__); 3912 return -EINVAL; 3913 } 3914 3915 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3916 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3917 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3918 if (ret) { 3919 /* hit an error restore original values */ 3920 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3921 } 3922 3923 return eth_err(port_id, ret); 3924 } 3925 3926 int 3927 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3928 { 3929 struct rte_eth_dev *dev; 3930 uint64_t *dev_offloads; 3931 int ret = 0; 3932 3933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3934 dev = &rte_eth_devices[port_id]; 3935 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3936 3937 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3938 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3939 3940 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3941 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3942 3943 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3944 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3945 3946 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3947 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3948 3949 return ret; 3950 } 3951 3952 int 3953 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3954 { 3955 struct rte_eth_dev *dev; 3956 3957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3958 dev = &rte_eth_devices[port_id]; 3959 3960 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3961 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3962 } 3963 3964 int 3965 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3966 { 3967 struct rte_eth_dev *dev; 3968 3969 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3970 dev = &rte_eth_devices[port_id]; 3971 3972 if (fc_conf == NULL) { 3973 RTE_ETHDEV_LOG(ERR, 3974 "Cannot get ethdev port %u flow control config to NULL\n", 3975 port_id); 3976 return -EINVAL; 3977 } 3978 3979 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3980 memset(fc_conf, 0, sizeof(*fc_conf)); 3981 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3982 } 3983 3984 int 3985 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3986 { 3987 struct rte_eth_dev *dev; 3988 3989 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3990 dev = &rte_eth_devices[port_id]; 3991 3992 if (fc_conf == NULL) { 3993 RTE_ETHDEV_LOG(ERR, 3994 "Cannot set ethdev port %u flow control from NULL config\n", 3995 port_id); 3996 return -EINVAL; 3997 } 3998 3999 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4000 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4001 return -EINVAL; 4002 } 4003 4004 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 4005 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4006 } 4007 4008 int 4009 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4010 struct rte_eth_pfc_conf *pfc_conf) 4011 { 4012 struct rte_eth_dev *dev; 4013 4014 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4015 dev = &rte_eth_devices[port_id]; 4016 4017 if (pfc_conf == NULL) { 4018 RTE_ETHDEV_LOG(ERR, 4019 "Cannot set ethdev port %u priority flow control from NULL config\n", 4020 port_id); 4021 return -EINVAL; 4022 } 4023 4024 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4025 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4026 return -EINVAL; 4027 } 4028 4029 /* High water, low water validation are device specific */ 4030 if (*dev->dev_ops->priority_flow_ctrl_set) 4031 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4032 (dev, pfc_conf)); 4033 return -ENOTSUP; 4034 } 4035 4036 static int 4037 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4038 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4039 { 4040 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4041 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4042 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4043 RTE_ETHDEV_LOG(ERR, 4044 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4045 pfc_queue_conf->rx_pause.tx_qid, 4046 dev_info->nb_tx_queues); 4047 return -EINVAL; 4048 } 4049 4050 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4051 RTE_ETHDEV_LOG(ERR, 4052 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4053 pfc_queue_conf->rx_pause.tc, tc_max); 4054 return -EINVAL; 4055 } 4056 } 4057 4058 return 0; 4059 } 4060 4061 static int 4062 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4063 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4064 { 4065 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4066 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4067 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4068 RTE_ETHDEV_LOG(ERR, 4069 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4070 pfc_queue_conf->tx_pause.rx_qid, 4071 dev_info->nb_rx_queues); 4072 return -EINVAL; 4073 } 4074 4075 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4076 RTE_ETHDEV_LOG(ERR, 4077 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4078 pfc_queue_conf->tx_pause.tc, tc_max); 4079 return -EINVAL; 4080 } 4081 } 4082 4083 return 0; 4084 } 4085 4086 int 4087 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4088 struct rte_eth_pfc_queue_info *pfc_queue_info) 4089 { 4090 struct rte_eth_dev *dev; 4091 4092 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4093 dev = &rte_eth_devices[port_id]; 4094 4095 if (pfc_queue_info == NULL) { 4096 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4097 port_id); 4098 return -EINVAL; 4099 } 4100 4101 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4102 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4103 (dev, pfc_queue_info)); 4104 return -ENOTSUP; 4105 } 4106 4107 int 4108 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4109 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4110 { 4111 struct rte_eth_pfc_queue_info pfc_info; 4112 struct rte_eth_dev_info dev_info; 4113 struct rte_eth_dev *dev; 4114 int ret; 4115 4116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4117 dev = &rte_eth_devices[port_id]; 4118 4119 if (pfc_queue_conf == NULL) { 4120 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4121 port_id); 4122 return -EINVAL; 4123 } 4124 4125 ret = rte_eth_dev_info_get(port_id, &dev_info); 4126 if (ret != 0) 4127 return ret; 4128 4129 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4130 if (ret != 0) 4131 return ret; 4132 4133 if (pfc_info.tc_max == 0) { 4134 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4135 port_id); 4136 return -ENOTSUP; 4137 } 4138 4139 /* Check requested mode supported or not */ 4140 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4141 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4142 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4143 port_id); 4144 return -EINVAL; 4145 } 4146 4147 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4148 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4149 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4150 port_id); 4151 return -EINVAL; 4152 } 4153 4154 /* Validate Rx pause parameters */ 4155 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4156 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4157 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4158 pfc_queue_conf); 4159 if (ret != 0) 4160 return ret; 4161 } 4162 4163 /* Validate Tx pause parameters */ 4164 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4165 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4166 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4167 pfc_queue_conf); 4168 if (ret != 0) 4169 return ret; 4170 } 4171 4172 if (*dev->dev_ops->priority_flow_ctrl_queue_config) 4173 return eth_err(port_id, 4174 (*dev->dev_ops->priority_flow_ctrl_queue_config)( 4175 dev, pfc_queue_conf)); 4176 return -ENOTSUP; 4177 } 4178 4179 static int 4180 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4181 uint16_t reta_size) 4182 { 4183 uint16_t i, num; 4184 4185 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4186 for (i = 0; i < num; i++) { 4187 if (reta_conf[i].mask) 4188 return 0; 4189 } 4190 4191 return -EINVAL; 4192 } 4193 4194 static int 4195 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4196 uint16_t reta_size, 4197 uint16_t max_rxq) 4198 { 4199 uint16_t i, idx, shift; 4200 4201 if (max_rxq == 0) { 4202 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4203 return -EINVAL; 4204 } 4205 4206 for (i = 0; i < reta_size; i++) { 4207 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4208 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4209 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4210 (reta_conf[idx].reta[shift] >= max_rxq)) { 4211 RTE_ETHDEV_LOG(ERR, 4212 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4213 idx, shift, 4214 reta_conf[idx].reta[shift], max_rxq); 4215 return -EINVAL; 4216 } 4217 } 4218 4219 return 0; 4220 } 4221 4222 int 4223 rte_eth_dev_rss_reta_update(uint16_t port_id, 4224 struct rte_eth_rss_reta_entry64 *reta_conf, 4225 uint16_t reta_size) 4226 { 4227 struct rte_eth_dev *dev; 4228 int ret; 4229 4230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4231 dev = &rte_eth_devices[port_id]; 4232 4233 if (reta_conf == NULL) { 4234 RTE_ETHDEV_LOG(ERR, 4235 "Cannot update ethdev port %u RSS RETA to NULL\n", 4236 port_id); 4237 return -EINVAL; 4238 } 4239 4240 if (reta_size == 0) { 4241 RTE_ETHDEV_LOG(ERR, 4242 "Cannot update ethdev port %u RSS RETA with zero size\n", 4243 port_id); 4244 return -EINVAL; 4245 } 4246 4247 /* Check mask bits */ 4248 ret = eth_check_reta_mask(reta_conf, reta_size); 4249 if (ret < 0) 4250 return ret; 4251 4252 /* Check entry value */ 4253 ret = eth_check_reta_entry(reta_conf, reta_size, 4254 dev->data->nb_rx_queues); 4255 if (ret < 0) 4256 return ret; 4257 4258 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4259 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4260 reta_size)); 4261 } 4262 4263 int 4264 rte_eth_dev_rss_reta_query(uint16_t port_id, 4265 struct rte_eth_rss_reta_entry64 *reta_conf, 4266 uint16_t reta_size) 4267 { 4268 struct rte_eth_dev *dev; 4269 int ret; 4270 4271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4272 dev = &rte_eth_devices[port_id]; 4273 4274 if (reta_conf == NULL) { 4275 RTE_ETHDEV_LOG(ERR, 4276 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4277 port_id); 4278 return -EINVAL; 4279 } 4280 4281 /* Check mask bits */ 4282 ret = eth_check_reta_mask(reta_conf, reta_size); 4283 if (ret < 0) 4284 return ret; 4285 4286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4287 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4288 reta_size)); 4289 } 4290 4291 int 4292 rte_eth_dev_rss_hash_update(uint16_t port_id, 4293 struct rte_eth_rss_conf *rss_conf) 4294 { 4295 struct rte_eth_dev *dev; 4296 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4297 int ret; 4298 4299 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4300 dev = &rte_eth_devices[port_id]; 4301 4302 if (rss_conf == NULL) { 4303 RTE_ETHDEV_LOG(ERR, 4304 "Cannot update ethdev port %u RSS hash from NULL config\n", 4305 port_id); 4306 return -EINVAL; 4307 } 4308 4309 ret = rte_eth_dev_info_get(port_id, &dev_info); 4310 if (ret != 0) 4311 return ret; 4312 4313 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4314 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4315 dev_info.flow_type_rss_offloads) { 4316 RTE_ETHDEV_LOG(ERR, 4317 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4318 port_id, rss_conf->rss_hf, 4319 dev_info.flow_type_rss_offloads); 4320 return -EINVAL; 4321 } 4322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4323 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4324 rss_conf)); 4325 } 4326 4327 int 4328 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4329 struct rte_eth_rss_conf *rss_conf) 4330 { 4331 struct rte_eth_dev *dev; 4332 4333 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4334 dev = &rte_eth_devices[port_id]; 4335 4336 if (rss_conf == NULL) { 4337 RTE_ETHDEV_LOG(ERR, 4338 "Cannot get ethdev port %u RSS hash config to NULL\n", 4339 port_id); 4340 return -EINVAL; 4341 } 4342 4343 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4344 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4345 rss_conf)); 4346 } 4347 4348 int 4349 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4350 struct rte_eth_udp_tunnel *udp_tunnel) 4351 { 4352 struct rte_eth_dev *dev; 4353 4354 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4355 dev = &rte_eth_devices[port_id]; 4356 4357 if (udp_tunnel == NULL) { 4358 RTE_ETHDEV_LOG(ERR, 4359 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4360 port_id); 4361 return -EINVAL; 4362 } 4363 4364 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4365 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4366 return -EINVAL; 4367 } 4368 4369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4370 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4371 udp_tunnel)); 4372 } 4373 4374 int 4375 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4376 struct rte_eth_udp_tunnel *udp_tunnel) 4377 { 4378 struct rte_eth_dev *dev; 4379 4380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4381 dev = &rte_eth_devices[port_id]; 4382 4383 if (udp_tunnel == NULL) { 4384 RTE_ETHDEV_LOG(ERR, 4385 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4386 port_id); 4387 return -EINVAL; 4388 } 4389 4390 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4391 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4392 return -EINVAL; 4393 } 4394 4395 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4396 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4397 udp_tunnel)); 4398 } 4399 4400 int 4401 rte_eth_led_on(uint16_t port_id) 4402 { 4403 struct rte_eth_dev *dev; 4404 4405 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4406 dev = &rte_eth_devices[port_id]; 4407 4408 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4409 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4410 } 4411 4412 int 4413 rte_eth_led_off(uint16_t port_id) 4414 { 4415 struct rte_eth_dev *dev; 4416 4417 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4418 dev = &rte_eth_devices[port_id]; 4419 4420 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4421 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4422 } 4423 4424 int 4425 rte_eth_fec_get_capability(uint16_t port_id, 4426 struct rte_eth_fec_capa *speed_fec_capa, 4427 unsigned int num) 4428 { 4429 struct rte_eth_dev *dev; 4430 int ret; 4431 4432 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4433 dev = &rte_eth_devices[port_id]; 4434 4435 if (speed_fec_capa == NULL && num > 0) { 4436 RTE_ETHDEV_LOG(ERR, 4437 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4438 port_id); 4439 return -EINVAL; 4440 } 4441 4442 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4443 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4444 4445 return ret; 4446 } 4447 4448 int 4449 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4450 { 4451 struct rte_eth_dev *dev; 4452 4453 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4454 dev = &rte_eth_devices[port_id]; 4455 4456 if (fec_capa == NULL) { 4457 RTE_ETHDEV_LOG(ERR, 4458 "Cannot get ethdev port %u current FEC mode to NULL\n", 4459 port_id); 4460 return -EINVAL; 4461 } 4462 4463 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4464 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4465 } 4466 4467 int 4468 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4469 { 4470 struct rte_eth_dev *dev; 4471 4472 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4473 dev = &rte_eth_devices[port_id]; 4474 4475 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4476 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4477 } 4478 4479 /* 4480 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4481 * an empty spot. 4482 */ 4483 static int 4484 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4485 { 4486 struct rte_eth_dev_info dev_info; 4487 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4488 unsigned i; 4489 int ret; 4490 4491 ret = rte_eth_dev_info_get(port_id, &dev_info); 4492 if (ret != 0) 4493 return -1; 4494 4495 for (i = 0; i < dev_info.max_mac_addrs; i++) 4496 if (memcmp(addr, &dev->data->mac_addrs[i], 4497 RTE_ETHER_ADDR_LEN) == 0) 4498 return i; 4499 4500 return -1; 4501 } 4502 4503 static const struct rte_ether_addr null_mac_addr; 4504 4505 int 4506 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4507 uint32_t pool) 4508 { 4509 struct rte_eth_dev *dev; 4510 int index; 4511 uint64_t pool_mask; 4512 int ret; 4513 4514 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4515 dev = &rte_eth_devices[port_id]; 4516 4517 if (addr == NULL) { 4518 RTE_ETHDEV_LOG(ERR, 4519 "Cannot add ethdev port %u MAC address from NULL address\n", 4520 port_id); 4521 return -EINVAL; 4522 } 4523 4524 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4525 4526 if (rte_is_zero_ether_addr(addr)) { 4527 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4528 port_id); 4529 return -EINVAL; 4530 } 4531 if (pool >= RTE_ETH_64_POOLS) { 4532 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4533 return -EINVAL; 4534 } 4535 4536 index = eth_dev_get_mac_addr_index(port_id, addr); 4537 if (index < 0) { 4538 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4539 if (index < 0) { 4540 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4541 port_id); 4542 return -ENOSPC; 4543 } 4544 } else { 4545 pool_mask = dev->data->mac_pool_sel[index]; 4546 4547 /* Check if both MAC address and pool is already there, and do nothing */ 4548 if (pool_mask & RTE_BIT64(pool)) 4549 return 0; 4550 } 4551 4552 /* Update NIC */ 4553 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4554 4555 if (ret == 0) { 4556 /* Update address in NIC data structure */ 4557 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4558 4559 /* Update pool bitmap in NIC data structure */ 4560 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4561 } 4562 4563 return eth_err(port_id, ret); 4564 } 4565 4566 int 4567 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4568 { 4569 struct rte_eth_dev *dev; 4570 int index; 4571 4572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4573 dev = &rte_eth_devices[port_id]; 4574 4575 if (addr == NULL) { 4576 RTE_ETHDEV_LOG(ERR, 4577 "Cannot remove ethdev port %u MAC address from NULL address\n", 4578 port_id); 4579 return -EINVAL; 4580 } 4581 4582 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4583 4584 index = eth_dev_get_mac_addr_index(port_id, addr); 4585 if (index == 0) { 4586 RTE_ETHDEV_LOG(ERR, 4587 "Port %u: Cannot remove default MAC address\n", 4588 port_id); 4589 return -EADDRINUSE; 4590 } else if (index < 0) 4591 return 0; /* Do nothing if address wasn't found */ 4592 4593 /* Update NIC */ 4594 (*dev->dev_ops->mac_addr_remove)(dev, index); 4595 4596 /* Update address in NIC data structure */ 4597 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4598 4599 /* reset pool bitmap */ 4600 dev->data->mac_pool_sel[index] = 0; 4601 4602 return 0; 4603 } 4604 4605 int 4606 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4607 { 4608 struct rte_eth_dev *dev; 4609 int ret; 4610 4611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4612 dev = &rte_eth_devices[port_id]; 4613 4614 if (addr == NULL) { 4615 RTE_ETHDEV_LOG(ERR, 4616 "Cannot set ethdev port %u default MAC address from NULL address\n", 4617 port_id); 4618 return -EINVAL; 4619 } 4620 4621 if (!rte_is_valid_assigned_ether_addr(addr)) 4622 return -EINVAL; 4623 4624 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4625 4626 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4627 if (ret < 0) 4628 return ret; 4629 4630 /* Update default address in NIC data structure */ 4631 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4632 4633 return 0; 4634 } 4635 4636 4637 /* 4638 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4639 * an empty spot. 4640 */ 4641 static int 4642 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4643 const struct rte_ether_addr *addr) 4644 { 4645 struct rte_eth_dev_info dev_info; 4646 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4647 unsigned i; 4648 int ret; 4649 4650 ret = rte_eth_dev_info_get(port_id, &dev_info); 4651 if (ret != 0) 4652 return -1; 4653 4654 if (!dev->data->hash_mac_addrs) 4655 return -1; 4656 4657 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4658 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4659 RTE_ETHER_ADDR_LEN) == 0) 4660 return i; 4661 4662 return -1; 4663 } 4664 4665 int 4666 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4667 uint8_t on) 4668 { 4669 int index; 4670 int ret; 4671 struct rte_eth_dev *dev; 4672 4673 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4674 dev = &rte_eth_devices[port_id]; 4675 4676 if (addr == NULL) { 4677 RTE_ETHDEV_LOG(ERR, 4678 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4679 port_id); 4680 return -EINVAL; 4681 } 4682 4683 if (rte_is_zero_ether_addr(addr)) { 4684 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4685 port_id); 4686 return -EINVAL; 4687 } 4688 4689 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4690 /* Check if it's already there, and do nothing */ 4691 if ((index >= 0) && on) 4692 return 0; 4693 4694 if (index < 0) { 4695 if (!on) { 4696 RTE_ETHDEV_LOG(ERR, 4697 "Port %u: the MAC address was not set in UTA\n", 4698 port_id); 4699 return -EINVAL; 4700 } 4701 4702 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4703 if (index < 0) { 4704 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4705 port_id); 4706 return -ENOSPC; 4707 } 4708 } 4709 4710 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4711 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4712 if (ret == 0) { 4713 /* Update address in NIC data structure */ 4714 if (on) 4715 rte_ether_addr_copy(addr, 4716 &dev->data->hash_mac_addrs[index]); 4717 else 4718 rte_ether_addr_copy(&null_mac_addr, 4719 &dev->data->hash_mac_addrs[index]); 4720 } 4721 4722 return eth_err(port_id, ret); 4723 } 4724 4725 int 4726 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4727 { 4728 struct rte_eth_dev *dev; 4729 4730 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4731 dev = &rte_eth_devices[port_id]; 4732 4733 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4734 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4735 on)); 4736 } 4737 4738 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4739 uint16_t tx_rate) 4740 { 4741 struct rte_eth_dev *dev; 4742 struct rte_eth_dev_info dev_info; 4743 struct rte_eth_link link; 4744 int ret; 4745 4746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4747 dev = &rte_eth_devices[port_id]; 4748 4749 ret = rte_eth_dev_info_get(port_id, &dev_info); 4750 if (ret != 0) 4751 return ret; 4752 4753 link = dev->data->dev_link; 4754 4755 if (queue_idx > dev_info.max_tx_queues) { 4756 RTE_ETHDEV_LOG(ERR, 4757 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4758 port_id, queue_idx); 4759 return -EINVAL; 4760 } 4761 4762 if (tx_rate > link.link_speed) { 4763 RTE_ETHDEV_LOG(ERR, 4764 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4765 tx_rate, link.link_speed); 4766 return -EINVAL; 4767 } 4768 4769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4770 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4771 queue_idx, tx_rate)); 4772 } 4773 4774 RTE_INIT(eth_dev_init_fp_ops) 4775 { 4776 uint32_t i; 4777 4778 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4779 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4780 } 4781 4782 RTE_INIT(eth_dev_init_cb_lists) 4783 { 4784 uint16_t i; 4785 4786 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4787 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4788 } 4789 4790 int 4791 rte_eth_dev_callback_register(uint16_t port_id, 4792 enum rte_eth_event_type event, 4793 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4794 { 4795 struct rte_eth_dev *dev; 4796 struct rte_eth_dev_callback *user_cb; 4797 uint16_t next_port; 4798 uint16_t last_port; 4799 4800 if (cb_fn == NULL) { 4801 RTE_ETHDEV_LOG(ERR, 4802 "Cannot register ethdev port %u callback from NULL\n", 4803 port_id); 4804 return -EINVAL; 4805 } 4806 4807 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4808 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4809 return -EINVAL; 4810 } 4811 4812 if (port_id == RTE_ETH_ALL) { 4813 next_port = 0; 4814 last_port = RTE_MAX_ETHPORTS - 1; 4815 } else { 4816 next_port = last_port = port_id; 4817 } 4818 4819 rte_spinlock_lock(ð_dev_cb_lock); 4820 4821 do { 4822 dev = &rte_eth_devices[next_port]; 4823 4824 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4825 if (user_cb->cb_fn == cb_fn && 4826 user_cb->cb_arg == cb_arg && 4827 user_cb->event == event) { 4828 break; 4829 } 4830 } 4831 4832 /* create a new callback. */ 4833 if (user_cb == NULL) { 4834 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4835 sizeof(struct rte_eth_dev_callback), 0); 4836 if (user_cb != NULL) { 4837 user_cb->cb_fn = cb_fn; 4838 user_cb->cb_arg = cb_arg; 4839 user_cb->event = event; 4840 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4841 user_cb, next); 4842 } else { 4843 rte_spinlock_unlock(ð_dev_cb_lock); 4844 rte_eth_dev_callback_unregister(port_id, event, 4845 cb_fn, cb_arg); 4846 return -ENOMEM; 4847 } 4848 4849 } 4850 } while (++next_port <= last_port); 4851 4852 rte_spinlock_unlock(ð_dev_cb_lock); 4853 return 0; 4854 } 4855 4856 int 4857 rte_eth_dev_callback_unregister(uint16_t port_id, 4858 enum rte_eth_event_type event, 4859 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4860 { 4861 int ret; 4862 struct rte_eth_dev *dev; 4863 struct rte_eth_dev_callback *cb, *next; 4864 uint16_t next_port; 4865 uint16_t last_port; 4866 4867 if (cb_fn == NULL) { 4868 RTE_ETHDEV_LOG(ERR, 4869 "Cannot unregister ethdev port %u callback from NULL\n", 4870 port_id); 4871 return -EINVAL; 4872 } 4873 4874 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4875 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4876 return -EINVAL; 4877 } 4878 4879 if (port_id == RTE_ETH_ALL) { 4880 next_port = 0; 4881 last_port = RTE_MAX_ETHPORTS - 1; 4882 } else { 4883 next_port = last_port = port_id; 4884 } 4885 4886 rte_spinlock_lock(ð_dev_cb_lock); 4887 4888 do { 4889 dev = &rte_eth_devices[next_port]; 4890 ret = 0; 4891 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4892 cb = next) { 4893 4894 next = TAILQ_NEXT(cb, next); 4895 4896 if (cb->cb_fn != cb_fn || cb->event != event || 4897 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4898 continue; 4899 4900 /* 4901 * if this callback is not executing right now, 4902 * then remove it. 4903 */ 4904 if (cb->active == 0) { 4905 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4906 rte_free(cb); 4907 } else { 4908 ret = -EAGAIN; 4909 } 4910 } 4911 } while (++next_port <= last_port); 4912 4913 rte_spinlock_unlock(ð_dev_cb_lock); 4914 return ret; 4915 } 4916 4917 int 4918 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4919 enum rte_eth_event_type event, void *ret_param) 4920 { 4921 struct rte_eth_dev_callback *cb_lst; 4922 struct rte_eth_dev_callback dev_cb; 4923 int rc = 0; 4924 4925 rte_spinlock_lock(ð_dev_cb_lock); 4926 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4927 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4928 continue; 4929 dev_cb = *cb_lst; 4930 cb_lst->active = 1; 4931 if (ret_param != NULL) 4932 dev_cb.ret_param = ret_param; 4933 4934 rte_spinlock_unlock(ð_dev_cb_lock); 4935 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4936 dev_cb.cb_arg, dev_cb.ret_param); 4937 rte_spinlock_lock(ð_dev_cb_lock); 4938 cb_lst->active = 0; 4939 } 4940 rte_spinlock_unlock(ð_dev_cb_lock); 4941 return rc; 4942 } 4943 4944 void 4945 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4946 { 4947 if (dev == NULL) 4948 return; 4949 4950 /* 4951 * for secondary process, at that point we expect device 4952 * to be already 'usable', so shared data and all function pointers 4953 * for fast-path devops have to be setup properly inside rte_eth_dev. 4954 */ 4955 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4956 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4957 4958 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4959 4960 dev->state = RTE_ETH_DEV_ATTACHED; 4961 } 4962 4963 int 4964 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4965 { 4966 uint32_t vec; 4967 struct rte_eth_dev *dev; 4968 struct rte_intr_handle *intr_handle; 4969 uint16_t qid; 4970 int rc; 4971 4972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4973 dev = &rte_eth_devices[port_id]; 4974 4975 if (!dev->intr_handle) { 4976 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4977 return -ENOTSUP; 4978 } 4979 4980 intr_handle = dev->intr_handle; 4981 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4982 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4983 return -EPERM; 4984 } 4985 4986 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4987 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4988 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4989 if (rc && rc != -EEXIST) { 4990 RTE_ETHDEV_LOG(ERR, 4991 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4992 port_id, qid, op, epfd, vec); 4993 } 4994 } 4995 4996 return 0; 4997 } 4998 4999 int 5000 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5001 { 5002 struct rte_intr_handle *intr_handle; 5003 struct rte_eth_dev *dev; 5004 unsigned int efd_idx; 5005 uint32_t vec; 5006 int fd; 5007 5008 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5009 dev = &rte_eth_devices[port_id]; 5010 5011 if (queue_id >= dev->data->nb_rx_queues) { 5012 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5013 return -1; 5014 } 5015 5016 if (!dev->intr_handle) { 5017 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5018 return -1; 5019 } 5020 5021 intr_handle = dev->intr_handle; 5022 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5023 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5024 return -1; 5025 } 5026 5027 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5028 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5029 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5030 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5031 5032 return fd; 5033 } 5034 5035 static inline int 5036 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 5037 const char *ring_name) 5038 { 5039 return snprintf(name, len, "eth_p%d_q%d_%s", 5040 port_id, queue_id, ring_name); 5041 } 5042 5043 const struct rte_memzone * 5044 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 5045 uint16_t queue_id, size_t size, unsigned align, 5046 int socket_id) 5047 { 5048 char z_name[RTE_MEMZONE_NAMESIZE]; 5049 const struct rte_memzone *mz; 5050 int rc; 5051 5052 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 5053 queue_id, ring_name); 5054 if (rc >= RTE_MEMZONE_NAMESIZE) { 5055 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 5056 rte_errno = ENAMETOOLONG; 5057 return NULL; 5058 } 5059 5060 mz = rte_memzone_lookup(z_name); 5061 if (mz) { 5062 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 5063 size > mz->len || 5064 ((uintptr_t)mz->addr & (align - 1)) != 0) { 5065 RTE_ETHDEV_LOG(ERR, 5066 "memzone %s does not justify the requested attributes\n", 5067 mz->name); 5068 return NULL; 5069 } 5070 5071 return mz; 5072 } 5073 5074 return rte_memzone_reserve_aligned(z_name, size, socket_id, 5075 RTE_MEMZONE_IOVA_CONTIG, align); 5076 } 5077 5078 int 5079 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 5080 uint16_t queue_id) 5081 { 5082 char z_name[RTE_MEMZONE_NAMESIZE]; 5083 const struct rte_memzone *mz; 5084 int rc = 0; 5085 5086 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 5087 queue_id, ring_name); 5088 if (rc >= RTE_MEMZONE_NAMESIZE) { 5089 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 5090 return -ENAMETOOLONG; 5091 } 5092 5093 mz = rte_memzone_lookup(z_name); 5094 if (mz) 5095 rc = rte_memzone_free(mz); 5096 else 5097 rc = -ENOENT; 5098 5099 return rc; 5100 } 5101 5102 int 5103 rte_eth_dev_create(struct rte_device *device, const char *name, 5104 size_t priv_data_size, 5105 ethdev_bus_specific_init ethdev_bus_specific_init, 5106 void *bus_init_params, 5107 ethdev_init_t ethdev_init, void *init_params) 5108 { 5109 struct rte_eth_dev *ethdev; 5110 int retval; 5111 5112 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 5113 5114 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 5115 ethdev = rte_eth_dev_allocate(name); 5116 if (!ethdev) 5117 return -ENODEV; 5118 5119 if (priv_data_size) { 5120 ethdev->data->dev_private = rte_zmalloc_socket( 5121 name, priv_data_size, RTE_CACHE_LINE_SIZE, 5122 device->numa_node); 5123 5124 if (!ethdev->data->dev_private) { 5125 RTE_ETHDEV_LOG(ERR, 5126 "failed to allocate private data\n"); 5127 retval = -ENOMEM; 5128 goto probe_failed; 5129 } 5130 } 5131 } else { 5132 ethdev = rte_eth_dev_attach_secondary(name); 5133 if (!ethdev) { 5134 RTE_ETHDEV_LOG(ERR, 5135 "secondary process attach failed, ethdev doesn't exist\n"); 5136 return -ENODEV; 5137 } 5138 } 5139 5140 ethdev->device = device; 5141 5142 if (ethdev_bus_specific_init) { 5143 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 5144 if (retval) { 5145 RTE_ETHDEV_LOG(ERR, 5146 "ethdev bus specific initialisation failed\n"); 5147 goto probe_failed; 5148 } 5149 } 5150 5151 retval = ethdev_init(ethdev, init_params); 5152 if (retval) { 5153 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 5154 goto probe_failed; 5155 } 5156 5157 rte_eth_dev_probing_finish(ethdev); 5158 5159 return retval; 5160 5161 probe_failed: 5162 rte_eth_dev_release_port(ethdev); 5163 return retval; 5164 } 5165 5166 int 5167 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 5168 ethdev_uninit_t ethdev_uninit) 5169 { 5170 int ret; 5171 5172 ethdev = rte_eth_dev_allocated(ethdev->data->name); 5173 if (!ethdev) 5174 return -ENODEV; 5175 5176 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 5177 5178 ret = ethdev_uninit(ethdev); 5179 if (ret) 5180 return ret; 5181 5182 return rte_eth_dev_release_port(ethdev); 5183 } 5184 5185 int 5186 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5187 int epfd, int op, void *data) 5188 { 5189 uint32_t vec; 5190 struct rte_eth_dev *dev; 5191 struct rte_intr_handle *intr_handle; 5192 int rc; 5193 5194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5195 dev = &rte_eth_devices[port_id]; 5196 5197 if (queue_id >= dev->data->nb_rx_queues) { 5198 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5199 return -EINVAL; 5200 } 5201 5202 if (!dev->intr_handle) { 5203 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5204 return -ENOTSUP; 5205 } 5206 5207 intr_handle = dev->intr_handle; 5208 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5209 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5210 return -EPERM; 5211 } 5212 5213 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5214 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5215 if (rc && rc != -EEXIST) { 5216 RTE_ETHDEV_LOG(ERR, 5217 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5218 port_id, queue_id, op, epfd, vec); 5219 return rc; 5220 } 5221 5222 return 0; 5223 } 5224 5225 int 5226 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5227 uint16_t queue_id) 5228 { 5229 struct rte_eth_dev *dev; 5230 int ret; 5231 5232 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5233 dev = &rte_eth_devices[port_id]; 5234 5235 ret = eth_dev_validate_rx_queue(dev, queue_id); 5236 if (ret != 0) 5237 return ret; 5238 5239 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5240 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5241 } 5242 5243 int 5244 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5245 uint16_t queue_id) 5246 { 5247 struct rte_eth_dev *dev; 5248 int ret; 5249 5250 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5251 dev = &rte_eth_devices[port_id]; 5252 5253 ret = eth_dev_validate_rx_queue(dev, queue_id); 5254 if (ret != 0) 5255 return ret; 5256 5257 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5258 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5259 } 5260 5261 5262 const struct rte_eth_rxtx_callback * 5263 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5264 rte_rx_callback_fn fn, void *user_param) 5265 { 5266 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5267 rte_errno = ENOTSUP; 5268 return NULL; 5269 #endif 5270 struct rte_eth_dev *dev; 5271 5272 /* check input parameters */ 5273 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5274 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5275 rte_errno = EINVAL; 5276 return NULL; 5277 } 5278 dev = &rte_eth_devices[port_id]; 5279 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5280 rte_errno = EINVAL; 5281 return NULL; 5282 } 5283 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5284 5285 if (cb == NULL) { 5286 rte_errno = ENOMEM; 5287 return NULL; 5288 } 5289 5290 cb->fn.rx = fn; 5291 cb->param = user_param; 5292 5293 rte_spinlock_lock(ð_dev_rx_cb_lock); 5294 /* Add the callbacks in fifo order. */ 5295 struct rte_eth_rxtx_callback *tail = 5296 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5297 5298 if (!tail) { 5299 /* Stores to cb->fn and cb->param should complete before 5300 * cb is visible to data plane. 5301 */ 5302 __atomic_store_n( 5303 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5304 cb, __ATOMIC_RELEASE); 5305 5306 } else { 5307 while (tail->next) 5308 tail = tail->next; 5309 /* Stores to cb->fn and cb->param should complete before 5310 * cb is visible to data plane. 5311 */ 5312 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5313 } 5314 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5315 5316 return cb; 5317 } 5318 5319 const struct rte_eth_rxtx_callback * 5320 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5321 rte_rx_callback_fn fn, void *user_param) 5322 { 5323 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5324 rte_errno = ENOTSUP; 5325 return NULL; 5326 #endif 5327 /* check input parameters */ 5328 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5329 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5330 rte_errno = EINVAL; 5331 return NULL; 5332 } 5333 5334 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5335 5336 if (cb == NULL) { 5337 rte_errno = ENOMEM; 5338 return NULL; 5339 } 5340 5341 cb->fn.rx = fn; 5342 cb->param = user_param; 5343 5344 rte_spinlock_lock(ð_dev_rx_cb_lock); 5345 /* Add the callbacks at first position */ 5346 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5347 /* Stores to cb->fn, cb->param and cb->next should complete before 5348 * cb is visible to data plane threads. 5349 */ 5350 __atomic_store_n( 5351 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5352 cb, __ATOMIC_RELEASE); 5353 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5354 5355 return cb; 5356 } 5357 5358 const struct rte_eth_rxtx_callback * 5359 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5360 rte_tx_callback_fn fn, void *user_param) 5361 { 5362 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5363 rte_errno = ENOTSUP; 5364 return NULL; 5365 #endif 5366 struct rte_eth_dev *dev; 5367 5368 /* check input parameters */ 5369 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5370 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5371 rte_errno = EINVAL; 5372 return NULL; 5373 } 5374 5375 dev = &rte_eth_devices[port_id]; 5376 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5377 rte_errno = EINVAL; 5378 return NULL; 5379 } 5380 5381 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5382 5383 if (cb == NULL) { 5384 rte_errno = ENOMEM; 5385 return NULL; 5386 } 5387 5388 cb->fn.tx = fn; 5389 cb->param = user_param; 5390 5391 rte_spinlock_lock(ð_dev_tx_cb_lock); 5392 /* Add the callbacks in fifo order. */ 5393 struct rte_eth_rxtx_callback *tail = 5394 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5395 5396 if (!tail) { 5397 /* Stores to cb->fn and cb->param should complete before 5398 * cb is visible to data plane. 5399 */ 5400 __atomic_store_n( 5401 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5402 cb, __ATOMIC_RELEASE); 5403 5404 } else { 5405 while (tail->next) 5406 tail = tail->next; 5407 /* Stores to cb->fn and cb->param should complete before 5408 * cb is visible to data plane. 5409 */ 5410 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5411 } 5412 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5413 5414 return cb; 5415 } 5416 5417 int 5418 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5419 const struct rte_eth_rxtx_callback *user_cb) 5420 { 5421 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5422 return -ENOTSUP; 5423 #endif 5424 /* Check input parameters. */ 5425 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5426 if (user_cb == NULL || 5427 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5428 return -EINVAL; 5429 5430 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5431 struct rte_eth_rxtx_callback *cb; 5432 struct rte_eth_rxtx_callback **prev_cb; 5433 int ret = -EINVAL; 5434 5435 rte_spinlock_lock(ð_dev_rx_cb_lock); 5436 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5437 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5438 cb = *prev_cb; 5439 if (cb == user_cb) { 5440 /* Remove the user cb from the callback list. */ 5441 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5442 ret = 0; 5443 break; 5444 } 5445 } 5446 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5447 5448 return ret; 5449 } 5450 5451 int 5452 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5453 const struct rte_eth_rxtx_callback *user_cb) 5454 { 5455 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5456 return -ENOTSUP; 5457 #endif 5458 /* Check input parameters. */ 5459 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5460 if (user_cb == NULL || 5461 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5462 return -EINVAL; 5463 5464 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5465 int ret = -EINVAL; 5466 struct rte_eth_rxtx_callback *cb; 5467 struct rte_eth_rxtx_callback **prev_cb; 5468 5469 rte_spinlock_lock(ð_dev_tx_cb_lock); 5470 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5471 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5472 cb = *prev_cb; 5473 if (cb == user_cb) { 5474 /* Remove the user cb from the callback list. */ 5475 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5476 ret = 0; 5477 break; 5478 } 5479 } 5480 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5481 5482 return ret; 5483 } 5484 5485 int 5486 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5487 struct rte_eth_rxq_info *qinfo) 5488 { 5489 struct rte_eth_dev *dev; 5490 5491 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5492 dev = &rte_eth_devices[port_id]; 5493 5494 if (queue_id >= dev->data->nb_rx_queues) { 5495 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5496 return -EINVAL; 5497 } 5498 5499 if (qinfo == NULL) { 5500 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5501 port_id, queue_id); 5502 return -EINVAL; 5503 } 5504 5505 if (dev->data->rx_queues == NULL || 5506 dev->data->rx_queues[queue_id] == NULL) { 5507 RTE_ETHDEV_LOG(ERR, 5508 "Rx queue %"PRIu16" of device with port_id=%" 5509 PRIu16" has not been setup\n", 5510 queue_id, port_id); 5511 return -EINVAL; 5512 } 5513 5514 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5515 RTE_ETHDEV_LOG(INFO, 5516 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5517 queue_id, port_id); 5518 return -EINVAL; 5519 } 5520 5521 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5522 5523 memset(qinfo, 0, sizeof(*qinfo)); 5524 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5525 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5526 5527 return 0; 5528 } 5529 5530 int 5531 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5532 struct rte_eth_txq_info *qinfo) 5533 { 5534 struct rte_eth_dev *dev; 5535 5536 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5537 dev = &rte_eth_devices[port_id]; 5538 5539 if (queue_id >= dev->data->nb_tx_queues) { 5540 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5541 return -EINVAL; 5542 } 5543 5544 if (qinfo == NULL) { 5545 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5546 port_id, queue_id); 5547 return -EINVAL; 5548 } 5549 5550 if (dev->data->tx_queues == NULL || 5551 dev->data->tx_queues[queue_id] == NULL) { 5552 RTE_ETHDEV_LOG(ERR, 5553 "Tx queue %"PRIu16" of device with port_id=%" 5554 PRIu16" has not been setup\n", 5555 queue_id, port_id); 5556 return -EINVAL; 5557 } 5558 5559 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5560 RTE_ETHDEV_LOG(INFO, 5561 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5562 queue_id, port_id); 5563 return -EINVAL; 5564 } 5565 5566 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5567 5568 memset(qinfo, 0, sizeof(*qinfo)); 5569 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5570 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5571 5572 return 0; 5573 } 5574 5575 int 5576 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5577 struct rte_eth_burst_mode *mode) 5578 { 5579 struct rte_eth_dev *dev; 5580 5581 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5582 dev = &rte_eth_devices[port_id]; 5583 5584 if (queue_id >= dev->data->nb_rx_queues) { 5585 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5586 return -EINVAL; 5587 } 5588 5589 if (mode == NULL) { 5590 RTE_ETHDEV_LOG(ERR, 5591 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5592 port_id, queue_id); 5593 return -EINVAL; 5594 } 5595 5596 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5597 memset(mode, 0, sizeof(*mode)); 5598 return eth_err(port_id, 5599 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5600 } 5601 5602 int 5603 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5604 struct rte_eth_burst_mode *mode) 5605 { 5606 struct rte_eth_dev *dev; 5607 5608 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5609 dev = &rte_eth_devices[port_id]; 5610 5611 if (queue_id >= dev->data->nb_tx_queues) { 5612 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5613 return -EINVAL; 5614 } 5615 5616 if (mode == NULL) { 5617 RTE_ETHDEV_LOG(ERR, 5618 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5619 port_id, queue_id); 5620 return -EINVAL; 5621 } 5622 5623 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5624 memset(mode, 0, sizeof(*mode)); 5625 return eth_err(port_id, 5626 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5627 } 5628 5629 int 5630 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5631 struct rte_power_monitor_cond *pmc) 5632 { 5633 struct rte_eth_dev *dev; 5634 5635 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5636 dev = &rte_eth_devices[port_id]; 5637 5638 if (queue_id >= dev->data->nb_rx_queues) { 5639 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5640 return -EINVAL; 5641 } 5642 5643 if (pmc == NULL) { 5644 RTE_ETHDEV_LOG(ERR, 5645 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5646 port_id, queue_id); 5647 return -EINVAL; 5648 } 5649 5650 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5651 return eth_err(port_id, 5652 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5653 } 5654 5655 int 5656 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5657 struct rte_ether_addr *mc_addr_set, 5658 uint32_t nb_mc_addr) 5659 { 5660 struct rte_eth_dev *dev; 5661 5662 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5663 dev = &rte_eth_devices[port_id]; 5664 5665 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5666 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5667 mc_addr_set, nb_mc_addr)); 5668 } 5669 5670 int 5671 rte_eth_timesync_enable(uint16_t port_id) 5672 { 5673 struct rte_eth_dev *dev; 5674 5675 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5676 dev = &rte_eth_devices[port_id]; 5677 5678 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5679 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5680 } 5681 5682 int 5683 rte_eth_timesync_disable(uint16_t port_id) 5684 { 5685 struct rte_eth_dev *dev; 5686 5687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5688 dev = &rte_eth_devices[port_id]; 5689 5690 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5691 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5692 } 5693 5694 int 5695 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5696 uint32_t flags) 5697 { 5698 struct rte_eth_dev *dev; 5699 5700 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5701 dev = &rte_eth_devices[port_id]; 5702 5703 if (timestamp == NULL) { 5704 RTE_ETHDEV_LOG(ERR, 5705 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5706 port_id); 5707 return -EINVAL; 5708 } 5709 5710 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5711 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5712 (dev, timestamp, flags)); 5713 } 5714 5715 int 5716 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5717 struct timespec *timestamp) 5718 { 5719 struct rte_eth_dev *dev; 5720 5721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5722 dev = &rte_eth_devices[port_id]; 5723 5724 if (timestamp == NULL) { 5725 RTE_ETHDEV_LOG(ERR, 5726 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5727 port_id); 5728 return -EINVAL; 5729 } 5730 5731 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5732 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5733 (dev, timestamp)); 5734 } 5735 5736 int 5737 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5738 { 5739 struct rte_eth_dev *dev; 5740 5741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5742 dev = &rte_eth_devices[port_id]; 5743 5744 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5745 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5746 } 5747 5748 int 5749 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5750 { 5751 struct rte_eth_dev *dev; 5752 5753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5754 dev = &rte_eth_devices[port_id]; 5755 5756 if (timestamp == NULL) { 5757 RTE_ETHDEV_LOG(ERR, 5758 "Cannot read ethdev port %u timesync time to NULL\n", 5759 port_id); 5760 return -EINVAL; 5761 } 5762 5763 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5764 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5765 timestamp)); 5766 } 5767 5768 int 5769 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5770 { 5771 struct rte_eth_dev *dev; 5772 5773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5774 dev = &rte_eth_devices[port_id]; 5775 5776 if (timestamp == NULL) { 5777 RTE_ETHDEV_LOG(ERR, 5778 "Cannot write ethdev port %u timesync from NULL time\n", 5779 port_id); 5780 return -EINVAL; 5781 } 5782 5783 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5784 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5785 timestamp)); 5786 } 5787 5788 int 5789 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5790 { 5791 struct rte_eth_dev *dev; 5792 5793 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5794 dev = &rte_eth_devices[port_id]; 5795 5796 if (clock == NULL) { 5797 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5798 port_id); 5799 return -EINVAL; 5800 } 5801 5802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5803 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5804 } 5805 5806 int 5807 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5808 { 5809 struct rte_eth_dev *dev; 5810 5811 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5812 dev = &rte_eth_devices[port_id]; 5813 5814 if (info == NULL) { 5815 RTE_ETHDEV_LOG(ERR, 5816 "Cannot get ethdev port %u register info to NULL\n", 5817 port_id); 5818 return -EINVAL; 5819 } 5820 5821 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5822 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5823 } 5824 5825 int 5826 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5827 { 5828 struct rte_eth_dev *dev; 5829 5830 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5831 dev = &rte_eth_devices[port_id]; 5832 5833 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5834 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5835 } 5836 5837 int 5838 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5839 { 5840 struct rte_eth_dev *dev; 5841 5842 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5843 dev = &rte_eth_devices[port_id]; 5844 5845 if (info == NULL) { 5846 RTE_ETHDEV_LOG(ERR, 5847 "Cannot get ethdev port %u EEPROM info to NULL\n", 5848 port_id); 5849 return -EINVAL; 5850 } 5851 5852 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5853 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5854 } 5855 5856 int 5857 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5858 { 5859 struct rte_eth_dev *dev; 5860 5861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5862 dev = &rte_eth_devices[port_id]; 5863 5864 if (info == NULL) { 5865 RTE_ETHDEV_LOG(ERR, 5866 "Cannot set ethdev port %u EEPROM from NULL info\n", 5867 port_id); 5868 return -EINVAL; 5869 } 5870 5871 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5872 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5873 } 5874 5875 int 5876 rte_eth_dev_get_module_info(uint16_t port_id, 5877 struct rte_eth_dev_module_info *modinfo) 5878 { 5879 struct rte_eth_dev *dev; 5880 5881 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5882 dev = &rte_eth_devices[port_id]; 5883 5884 if (modinfo == NULL) { 5885 RTE_ETHDEV_LOG(ERR, 5886 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5887 port_id); 5888 return -EINVAL; 5889 } 5890 5891 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5892 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5893 } 5894 5895 int 5896 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5897 struct rte_dev_eeprom_info *info) 5898 { 5899 struct rte_eth_dev *dev; 5900 5901 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5902 dev = &rte_eth_devices[port_id]; 5903 5904 if (info == NULL) { 5905 RTE_ETHDEV_LOG(ERR, 5906 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5907 port_id); 5908 return -EINVAL; 5909 } 5910 5911 if (info->data == NULL) { 5912 RTE_ETHDEV_LOG(ERR, 5913 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5914 port_id); 5915 return -EINVAL; 5916 } 5917 5918 if (info->length == 0) { 5919 RTE_ETHDEV_LOG(ERR, 5920 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5921 port_id); 5922 return -EINVAL; 5923 } 5924 5925 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5926 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5927 } 5928 5929 int 5930 rte_eth_dev_get_dcb_info(uint16_t port_id, 5931 struct rte_eth_dcb_info *dcb_info) 5932 { 5933 struct rte_eth_dev *dev; 5934 5935 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5936 dev = &rte_eth_devices[port_id]; 5937 5938 if (dcb_info == NULL) { 5939 RTE_ETHDEV_LOG(ERR, 5940 "Cannot get ethdev port %u DCB info to NULL\n", 5941 port_id); 5942 return -EINVAL; 5943 } 5944 5945 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5946 5947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5948 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5949 } 5950 5951 static void 5952 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5953 const struct rte_eth_desc_lim *desc_lim) 5954 { 5955 if (desc_lim->nb_align != 0) 5956 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5957 5958 if (desc_lim->nb_max != 0) 5959 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5960 5961 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5962 } 5963 5964 int 5965 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5966 uint16_t *nb_rx_desc, 5967 uint16_t *nb_tx_desc) 5968 { 5969 struct rte_eth_dev_info dev_info; 5970 int ret; 5971 5972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5973 5974 ret = rte_eth_dev_info_get(port_id, &dev_info); 5975 if (ret != 0) 5976 return ret; 5977 5978 if (nb_rx_desc != NULL) 5979 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5980 5981 if (nb_tx_desc != NULL) 5982 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5983 5984 return 0; 5985 } 5986 5987 int 5988 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5989 struct rte_eth_hairpin_cap *cap) 5990 { 5991 struct rte_eth_dev *dev; 5992 5993 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5994 dev = &rte_eth_devices[port_id]; 5995 5996 if (cap == NULL) { 5997 RTE_ETHDEV_LOG(ERR, 5998 "Cannot get ethdev port %u hairpin capability to NULL\n", 5999 port_id); 6000 return -EINVAL; 6001 } 6002 6003 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 6004 memset(cap, 0, sizeof(*cap)); 6005 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6006 } 6007 6008 int 6009 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 6010 { 6011 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 6012 return 1; 6013 return 0; 6014 } 6015 6016 int 6017 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 6018 { 6019 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 6020 return 1; 6021 return 0; 6022 } 6023 6024 int 6025 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6026 { 6027 struct rte_eth_dev *dev; 6028 6029 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6030 dev = &rte_eth_devices[port_id]; 6031 6032 if (pool == NULL) { 6033 RTE_ETHDEV_LOG(ERR, 6034 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6035 port_id); 6036 return -EINVAL; 6037 } 6038 6039 if (*dev->dev_ops->pool_ops_supported == NULL) 6040 return 1; /* all pools are supported */ 6041 6042 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 6043 } 6044 6045 /** 6046 * A set of values to describe the possible states of a switch domain. 6047 */ 6048 enum rte_eth_switch_domain_state { 6049 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 6050 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 6051 }; 6052 6053 /** 6054 * Array of switch domains available for allocation. Array is sized to 6055 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 6056 * ethdev ports in a single process. 6057 */ 6058 static struct rte_eth_dev_switch { 6059 enum rte_eth_switch_domain_state state; 6060 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 6061 6062 int 6063 rte_eth_switch_domain_alloc(uint16_t *domain_id) 6064 { 6065 uint16_t i; 6066 6067 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 6068 6069 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 6070 if (eth_dev_switch_domains[i].state == 6071 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 6072 eth_dev_switch_domains[i].state = 6073 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 6074 *domain_id = i; 6075 return 0; 6076 } 6077 } 6078 6079 return -ENOSPC; 6080 } 6081 6082 int 6083 rte_eth_switch_domain_free(uint16_t domain_id) 6084 { 6085 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 6086 domain_id >= RTE_MAX_ETHPORTS) 6087 return -EINVAL; 6088 6089 if (eth_dev_switch_domains[domain_id].state != 6090 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 6091 return -EINVAL; 6092 6093 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 6094 6095 return 0; 6096 } 6097 6098 static int 6099 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 6100 { 6101 int state; 6102 struct rte_kvargs_pair *pair; 6103 char *letter; 6104 6105 arglist->str = strdup(str_in); 6106 if (arglist->str == NULL) 6107 return -ENOMEM; 6108 6109 letter = arglist->str; 6110 state = 0; 6111 arglist->count = 0; 6112 pair = &arglist->pairs[0]; 6113 while (1) { 6114 switch (state) { 6115 case 0: /* Initial */ 6116 if (*letter == '=') 6117 return -EINVAL; 6118 else if (*letter == '\0') 6119 return 0; 6120 6121 state = 1; 6122 pair->key = letter; 6123 /* fall-thru */ 6124 6125 case 1: /* Parsing key */ 6126 if (*letter == '=') { 6127 *letter = '\0'; 6128 pair->value = letter + 1; 6129 state = 2; 6130 } else if (*letter == ',' || *letter == '\0') 6131 return -EINVAL; 6132 break; 6133 6134 6135 case 2: /* Parsing value */ 6136 if (*letter == '[') 6137 state = 3; 6138 else if (*letter == ',') { 6139 *letter = '\0'; 6140 arglist->count++; 6141 pair = &arglist->pairs[arglist->count]; 6142 state = 0; 6143 } else if (*letter == '\0') { 6144 letter--; 6145 arglist->count++; 6146 pair = &arglist->pairs[arglist->count]; 6147 state = 0; 6148 } 6149 break; 6150 6151 case 3: /* Parsing list */ 6152 if (*letter == ']') 6153 state = 2; 6154 else if (*letter == '\0') 6155 return -EINVAL; 6156 break; 6157 } 6158 letter++; 6159 } 6160 } 6161 6162 int 6163 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 6164 { 6165 struct rte_kvargs args; 6166 struct rte_kvargs_pair *pair; 6167 unsigned int i; 6168 int result = 0; 6169 6170 memset(eth_da, 0, sizeof(*eth_da)); 6171 6172 result = eth_dev_devargs_tokenise(&args, dargs); 6173 if (result < 0) 6174 goto parse_cleanup; 6175 6176 for (i = 0; i < args.count; i++) { 6177 pair = &args.pairs[i]; 6178 if (strcmp("representor", pair->key) == 0) { 6179 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 6180 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 6181 dargs); 6182 result = -1; 6183 goto parse_cleanup; 6184 } 6185 result = rte_eth_devargs_parse_representor_ports( 6186 pair->value, eth_da); 6187 if (result < 0) 6188 goto parse_cleanup; 6189 } 6190 } 6191 6192 parse_cleanup: 6193 if (args.str) 6194 free(args.str); 6195 6196 return result; 6197 } 6198 6199 int 6200 rte_eth_representor_id_get(uint16_t port_id, 6201 enum rte_eth_representor_type type, 6202 int controller, int pf, int representor_port, 6203 uint16_t *repr_id) 6204 { 6205 int ret, n, count; 6206 uint32_t i; 6207 struct rte_eth_representor_info *info = NULL; 6208 size_t size; 6209 6210 if (type == RTE_ETH_REPRESENTOR_NONE) 6211 return 0; 6212 if (repr_id == NULL) 6213 return -EINVAL; 6214 6215 /* Get PMD representor range info. */ 6216 ret = rte_eth_representor_info_get(port_id, NULL); 6217 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6218 controller == -1 && pf == -1) { 6219 /* Direct mapping for legacy VF representor. */ 6220 *repr_id = representor_port; 6221 return 0; 6222 } else if (ret < 0) { 6223 return ret; 6224 } 6225 n = ret; 6226 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6227 info = calloc(1, size); 6228 if (info == NULL) 6229 return -ENOMEM; 6230 info->nb_ranges_alloc = n; 6231 ret = rte_eth_representor_info_get(port_id, info); 6232 if (ret < 0) 6233 goto out; 6234 6235 /* Default controller and pf to caller. */ 6236 if (controller == -1) 6237 controller = info->controller; 6238 if (pf == -1) 6239 pf = info->pf; 6240 6241 /* Locate representor ID. */ 6242 ret = -ENOENT; 6243 for (i = 0; i < info->nb_ranges; ++i) { 6244 if (info->ranges[i].type != type) 6245 continue; 6246 if (info->ranges[i].controller != controller) 6247 continue; 6248 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6249 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6250 port_id, info->ranges[i].id_base, 6251 info->ranges[i].id_end, i); 6252 continue; 6253 6254 } 6255 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6256 switch (info->ranges[i].type) { 6257 case RTE_ETH_REPRESENTOR_PF: 6258 if (pf < info->ranges[i].pf || 6259 pf >= info->ranges[i].pf + count) 6260 continue; 6261 *repr_id = info->ranges[i].id_base + 6262 (pf - info->ranges[i].pf); 6263 ret = 0; 6264 goto out; 6265 case RTE_ETH_REPRESENTOR_VF: 6266 if (info->ranges[i].pf != pf) 6267 continue; 6268 if (representor_port < info->ranges[i].vf || 6269 representor_port >= info->ranges[i].vf + count) 6270 continue; 6271 *repr_id = info->ranges[i].id_base + 6272 (representor_port - info->ranges[i].vf); 6273 ret = 0; 6274 goto out; 6275 case RTE_ETH_REPRESENTOR_SF: 6276 if (info->ranges[i].pf != pf) 6277 continue; 6278 if (representor_port < info->ranges[i].sf || 6279 representor_port >= info->ranges[i].sf + count) 6280 continue; 6281 *repr_id = info->ranges[i].id_base + 6282 (representor_port - info->ranges[i].sf); 6283 ret = 0; 6284 goto out; 6285 default: 6286 break; 6287 } 6288 } 6289 out: 6290 free(info); 6291 return ret; 6292 } 6293 6294 static int 6295 eth_dev_handle_port_list(const char *cmd __rte_unused, 6296 const char *params __rte_unused, 6297 struct rte_tel_data *d) 6298 { 6299 int port_id; 6300 6301 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6302 RTE_ETH_FOREACH_DEV(port_id) 6303 rte_tel_data_add_array_int(d, port_id); 6304 return 0; 6305 } 6306 6307 static void 6308 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6309 const char *stat_name) 6310 { 6311 int q; 6312 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6313 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6314 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6315 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6316 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6317 } 6318 6319 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6320 6321 static int 6322 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6323 const char *params, 6324 struct rte_tel_data *d) 6325 { 6326 struct rte_eth_stats stats; 6327 int port_id, ret; 6328 6329 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6330 return -1; 6331 6332 port_id = atoi(params); 6333 if (!rte_eth_dev_is_valid_port(port_id)) 6334 return -1; 6335 6336 ret = rte_eth_stats_get(port_id, &stats); 6337 if (ret < 0) 6338 return -1; 6339 6340 rte_tel_data_start_dict(d); 6341 ADD_DICT_STAT(stats, ipackets); 6342 ADD_DICT_STAT(stats, opackets); 6343 ADD_DICT_STAT(stats, ibytes); 6344 ADD_DICT_STAT(stats, obytes); 6345 ADD_DICT_STAT(stats, imissed); 6346 ADD_DICT_STAT(stats, ierrors); 6347 ADD_DICT_STAT(stats, oerrors); 6348 ADD_DICT_STAT(stats, rx_nombuf); 6349 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6350 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6351 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6352 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6353 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6354 6355 return 0; 6356 } 6357 6358 static int 6359 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6360 const char *params, 6361 struct rte_tel_data *d) 6362 { 6363 struct rte_eth_xstat *eth_xstats; 6364 struct rte_eth_xstat_name *xstat_names; 6365 int port_id, num_xstats; 6366 int i, ret; 6367 char *end_param; 6368 6369 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6370 return -1; 6371 6372 port_id = strtoul(params, &end_param, 0); 6373 if (*end_param != '\0') 6374 RTE_ETHDEV_LOG(NOTICE, 6375 "Extra parameters passed to ethdev telemetry command, ignoring"); 6376 if (!rte_eth_dev_is_valid_port(port_id)) 6377 return -1; 6378 6379 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6380 if (num_xstats < 0) 6381 return -1; 6382 6383 /* use one malloc for both names and stats */ 6384 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6385 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6386 if (eth_xstats == NULL) 6387 return -1; 6388 xstat_names = (void *)ð_xstats[num_xstats]; 6389 6390 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6391 if (ret < 0 || ret > num_xstats) { 6392 free(eth_xstats); 6393 return -1; 6394 } 6395 6396 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6397 if (ret < 0 || ret > num_xstats) { 6398 free(eth_xstats); 6399 return -1; 6400 } 6401 6402 rte_tel_data_start_dict(d); 6403 for (i = 0; i < num_xstats; i++) 6404 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6405 eth_xstats[i].value); 6406 return 0; 6407 } 6408 6409 static int 6410 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6411 const char *params, 6412 struct rte_tel_data *d) 6413 { 6414 static const char *status_str = "status"; 6415 int ret, port_id; 6416 struct rte_eth_link link; 6417 char *end_param; 6418 6419 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6420 return -1; 6421 6422 port_id = strtoul(params, &end_param, 0); 6423 if (*end_param != '\0') 6424 RTE_ETHDEV_LOG(NOTICE, 6425 "Extra parameters passed to ethdev telemetry command, ignoring"); 6426 if (!rte_eth_dev_is_valid_port(port_id)) 6427 return -1; 6428 6429 ret = rte_eth_link_get_nowait(port_id, &link); 6430 if (ret < 0) 6431 return -1; 6432 6433 rte_tel_data_start_dict(d); 6434 if (!link.link_status) { 6435 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6436 return 0; 6437 } 6438 rte_tel_data_add_dict_string(d, status_str, "UP"); 6439 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6440 rte_tel_data_add_dict_string(d, "duplex", 6441 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6442 "full-duplex" : "half-duplex"); 6443 return 0; 6444 } 6445 6446 static int 6447 eth_dev_handle_port_info(const char *cmd __rte_unused, 6448 const char *params, 6449 struct rte_tel_data *d) 6450 { 6451 struct rte_tel_data *rxq_state, *txq_state; 6452 char mac_addr[RTE_ETHER_ADDR_LEN]; 6453 struct rte_eth_dev *eth_dev; 6454 char *end_param; 6455 int port_id, i; 6456 6457 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6458 return -1; 6459 6460 port_id = strtoul(params, &end_param, 0); 6461 if (*end_param != '\0') 6462 RTE_ETHDEV_LOG(NOTICE, 6463 "Extra parameters passed to ethdev telemetry command, ignoring"); 6464 6465 if (!rte_eth_dev_is_valid_port(port_id)) 6466 return -EINVAL; 6467 6468 eth_dev = &rte_eth_devices[port_id]; 6469 if (!eth_dev) 6470 return -EINVAL; 6471 6472 rxq_state = rte_tel_data_alloc(); 6473 if (!rxq_state) 6474 return -ENOMEM; 6475 6476 txq_state = rte_tel_data_alloc(); 6477 if (!txq_state) { 6478 rte_tel_data_free(rxq_state); 6479 return -ENOMEM; 6480 } 6481 6482 rte_tel_data_start_dict(d); 6483 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6484 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6485 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6486 eth_dev->data->nb_rx_queues); 6487 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6488 eth_dev->data->nb_tx_queues); 6489 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6490 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6491 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6492 eth_dev->data->min_rx_buf_size); 6493 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6494 eth_dev->data->rx_mbuf_alloc_failed); 6495 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6496 eth_dev->data->mac_addrs->addr_bytes[0], 6497 eth_dev->data->mac_addrs->addr_bytes[1], 6498 eth_dev->data->mac_addrs->addr_bytes[2], 6499 eth_dev->data->mac_addrs->addr_bytes[3], 6500 eth_dev->data->mac_addrs->addr_bytes[4], 6501 eth_dev->data->mac_addrs->addr_bytes[5]); 6502 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6503 rte_tel_data_add_dict_int(d, "promiscuous", 6504 eth_dev->data->promiscuous); 6505 rte_tel_data_add_dict_int(d, "scattered_rx", 6506 eth_dev->data->scattered_rx); 6507 rte_tel_data_add_dict_int(d, "all_multicast", 6508 eth_dev->data->all_multicast); 6509 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6510 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6511 rte_tel_data_add_dict_int(d, "dev_configured", 6512 eth_dev->data->dev_configured); 6513 6514 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6515 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6516 rte_tel_data_add_array_int(rxq_state, 6517 eth_dev->data->rx_queue_state[i]); 6518 6519 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6520 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6521 rte_tel_data_add_array_int(txq_state, 6522 eth_dev->data->tx_queue_state[i]); 6523 6524 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6525 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6526 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6527 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6528 rte_tel_data_add_dict_int(d, "rx_offloads", 6529 eth_dev->data->dev_conf.rxmode.offloads); 6530 rte_tel_data_add_dict_int(d, "tx_offloads", 6531 eth_dev->data->dev_conf.txmode.offloads); 6532 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6533 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6534 6535 return 0; 6536 } 6537 6538 int 6539 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6540 struct rte_hairpin_peer_info *cur_info, 6541 struct rte_hairpin_peer_info *peer_info, 6542 uint32_t direction) 6543 { 6544 struct rte_eth_dev *dev; 6545 6546 /* Current queue information is not mandatory. */ 6547 if (peer_info == NULL) 6548 return -EINVAL; 6549 6550 /* No need to check the validity again. */ 6551 dev = &rte_eth_devices[peer_port]; 6552 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6553 -ENOTSUP); 6554 6555 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6556 cur_info, peer_info, direction); 6557 } 6558 6559 int 6560 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6561 struct rte_hairpin_peer_info *peer_info, 6562 uint32_t direction) 6563 { 6564 struct rte_eth_dev *dev; 6565 6566 if (peer_info == NULL) 6567 return -EINVAL; 6568 6569 /* No need to check the validity again. */ 6570 dev = &rte_eth_devices[cur_port]; 6571 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6572 -ENOTSUP); 6573 6574 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6575 peer_info, direction); 6576 } 6577 6578 int 6579 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6580 uint32_t direction) 6581 { 6582 struct rte_eth_dev *dev; 6583 6584 /* No need to check the validity again. */ 6585 dev = &rte_eth_devices[cur_port]; 6586 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6587 -ENOTSUP); 6588 6589 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6590 direction); 6591 } 6592 6593 int 6594 rte_eth_representor_info_get(uint16_t port_id, 6595 struct rte_eth_representor_info *info) 6596 { 6597 struct rte_eth_dev *dev; 6598 6599 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6600 dev = &rte_eth_devices[port_id]; 6601 6602 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6603 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6604 } 6605 6606 int 6607 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6608 { 6609 struct rte_eth_dev *dev; 6610 6611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6612 dev = &rte_eth_devices[port_id]; 6613 6614 if (dev->data->dev_configured != 0) { 6615 RTE_ETHDEV_LOG(ERR, 6616 "The port (ID=%"PRIu16") is already configured\n", 6617 port_id); 6618 return -EBUSY; 6619 } 6620 6621 if (features == NULL) { 6622 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6623 return -EINVAL; 6624 } 6625 6626 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6627 return eth_err(port_id, 6628 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6629 } 6630 6631 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6632 6633 RTE_INIT(ethdev_init_telemetry) 6634 { 6635 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6636 "Returns list of available ethdev ports. Takes no parameters"); 6637 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6638 "Returns the common stats for a port. Parameters: int port_id"); 6639 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6640 "Returns the extended stats for a port. Parameters: int port_id"); 6641 rte_telemetry_register_cmd("/ethdev/link_status", 6642 eth_dev_handle_port_link_status, 6643 "Returns the link status for a port. Parameters: int port_id"); 6644 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6645 "Returns the device info for a port. Parameters: int port_id"); 6646 } 6647