1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove Rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove Tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 105 106 static const struct { 107 uint64_t offload; 108 const char *name; 109 } eth_dev_rx_offload_names[] = { 110 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 111 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 114 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 115 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 118 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 119 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 120 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 121 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 122 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 123 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 124 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 125 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 127 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 128 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 129 }; 130 131 #undef RTE_RX_OFFLOAD_BIT2STR 132 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 133 134 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 135 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 136 137 static const struct { 138 uint64_t offload; 139 const char *name; 140 } eth_dev_tx_offload_names[] = { 141 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 142 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 143 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 144 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 148 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 150 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 155 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 156 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 157 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 158 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 159 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 160 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 161 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 162 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 163 }; 164 165 #undef RTE_TX_OFFLOAD_BIT2STR 166 167 static const struct { 168 uint64_t offload; 169 const char *name; 170 } rte_eth_dev_capa_names[] = { 171 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 172 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 173 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 174 }; 175 176 /** 177 * The user application callback description. 178 * 179 * It contains callback address to be registered by user application, 180 * the pointer to the parameters for callback, and the event type. 181 */ 182 struct rte_eth_dev_callback { 183 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 184 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 185 void *cb_arg; /**< Parameter for callback */ 186 void *ret_param; /**< Return parameter */ 187 enum rte_eth_event_type event; /**< Interrupt event type */ 188 uint32_t active; /**< Callback is executing */ 189 }; 190 191 enum { 192 STAT_QMAP_TX = 0, 193 STAT_QMAP_RX 194 }; 195 196 int 197 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 198 { 199 int ret; 200 struct rte_devargs devargs; 201 const char *bus_param_key; 202 char *bus_str = NULL; 203 char *cls_str = NULL; 204 int str_size; 205 206 if (iter == NULL) { 207 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 208 return -EINVAL; 209 } 210 211 if (devargs_str == NULL) { 212 RTE_ETHDEV_LOG(ERR, 213 "Cannot initialize iterator from NULL device description string\n"); 214 return -EINVAL; 215 } 216 217 memset(iter, 0, sizeof(*iter)); 218 memset(&devargs, 0, sizeof(devargs)); 219 220 /* 221 * The devargs string may use various syntaxes: 222 * - 0000:08:00.0,representor=[1-3] 223 * - pci:0000:06:00.0,representor=[0,5] 224 * - class=eth,mac=00:11:22:33:44:55 225 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 226 */ 227 228 /* 229 * Handle pure class filter (i.e. without any bus-level argument), 230 * from future new syntax. 231 * rte_devargs_parse() is not yet supporting the new syntax, 232 * that's why this simple case is temporarily parsed here. 233 */ 234 #define iter_anybus_str "class=eth," 235 if (strncmp(devargs_str, iter_anybus_str, 236 strlen(iter_anybus_str)) == 0) { 237 iter->cls_str = devargs_str + strlen(iter_anybus_str); 238 goto end; 239 } 240 241 /* Split bus, device and parameters. */ 242 ret = rte_devargs_parse(&devargs, devargs_str); 243 if (ret != 0) 244 goto error; 245 246 /* 247 * Assume parameters of old syntax can match only at ethdev level. 248 * Extra parameters will be ignored, thanks to "+" prefix. 249 */ 250 str_size = strlen(devargs.args) + 2; 251 cls_str = malloc(str_size); 252 if (cls_str == NULL) { 253 ret = -ENOMEM; 254 goto error; 255 } 256 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 257 if (ret != str_size - 1) { 258 ret = -EINVAL; 259 goto error; 260 } 261 iter->cls_str = cls_str; 262 263 iter->bus = devargs.bus; 264 if (iter->bus->dev_iterate == NULL) { 265 ret = -ENOTSUP; 266 goto error; 267 } 268 269 /* Convert bus args to new syntax for use with new API dev_iterate. */ 270 if ((strcmp(iter->bus->name, "vdev") == 0) || 271 (strcmp(iter->bus->name, "fslmc") == 0) || 272 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 273 bus_param_key = "name"; 274 } else if (strcmp(iter->bus->name, "pci") == 0) { 275 bus_param_key = "addr"; 276 } else { 277 ret = -ENOTSUP; 278 goto error; 279 } 280 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 281 bus_str = malloc(str_size); 282 if (bus_str == NULL) { 283 ret = -ENOMEM; 284 goto error; 285 } 286 ret = snprintf(bus_str, str_size, "%s=%s", 287 bus_param_key, devargs.name); 288 if (ret != str_size - 1) { 289 ret = -EINVAL; 290 goto error; 291 } 292 iter->bus_str = bus_str; 293 294 end: 295 iter->cls = rte_class_find_by_name("eth"); 296 rte_devargs_reset(&devargs); 297 return 0; 298 299 error: 300 if (ret == -ENOTSUP) 301 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 302 iter->bus->name); 303 rte_devargs_reset(&devargs); 304 free(bus_str); 305 free(cls_str); 306 return ret; 307 } 308 309 uint16_t 310 rte_eth_iterator_next(struct rte_dev_iterator *iter) 311 { 312 if (iter == NULL) { 313 RTE_ETHDEV_LOG(ERR, 314 "Cannot get next device from NULL iterator\n"); 315 return RTE_MAX_ETHPORTS; 316 } 317 318 if (iter->cls == NULL) /* invalid ethdev iterator */ 319 return RTE_MAX_ETHPORTS; 320 321 do { /* loop to try all matching rte_device */ 322 /* If not pure ethdev filter and */ 323 if (iter->bus != NULL && 324 /* not in middle of rte_eth_dev iteration, */ 325 iter->class_device == NULL) { 326 /* get next rte_device to try. */ 327 iter->device = iter->bus->dev_iterate( 328 iter->device, iter->bus_str, iter); 329 if (iter->device == NULL) 330 break; /* no more rte_device candidate */ 331 } 332 /* A device is matching bus part, need to check ethdev part. */ 333 iter->class_device = iter->cls->dev_iterate( 334 iter->class_device, iter->cls_str, iter); 335 if (iter->class_device != NULL) 336 return eth_dev_to_id(iter->class_device); /* match */ 337 } while (iter->bus != NULL); /* need to try next rte_device */ 338 339 /* No more ethdev port to iterate. */ 340 rte_eth_iterator_cleanup(iter); 341 return RTE_MAX_ETHPORTS; 342 } 343 344 void 345 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 346 { 347 if (iter == NULL) { 348 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 349 return; 350 } 351 352 if (iter->bus_str == NULL) 353 return; /* nothing to free in pure class filter */ 354 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 355 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 356 memset(iter, 0, sizeof(*iter)); 357 } 358 359 uint16_t 360 rte_eth_find_next(uint16_t port_id) 361 { 362 while (port_id < RTE_MAX_ETHPORTS && 363 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 364 port_id++; 365 366 if (port_id >= RTE_MAX_ETHPORTS) 367 return RTE_MAX_ETHPORTS; 368 369 return port_id; 370 } 371 372 /* 373 * Macro to iterate over all valid ports for internal usage. 374 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 375 */ 376 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 377 for (port_id = rte_eth_find_next(0); \ 378 port_id < RTE_MAX_ETHPORTS; \ 379 port_id = rte_eth_find_next(port_id + 1)) 380 381 uint16_t 382 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 383 { 384 port_id = rte_eth_find_next(port_id); 385 while (port_id < RTE_MAX_ETHPORTS && 386 rte_eth_devices[port_id].device != parent) 387 port_id = rte_eth_find_next(port_id + 1); 388 389 return port_id; 390 } 391 392 uint16_t 393 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 394 { 395 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 396 return rte_eth_find_next_of(port_id, 397 rte_eth_devices[ref_port_id].device); 398 } 399 400 static void 401 eth_dev_shared_data_prepare(void) 402 { 403 const unsigned flags = 0; 404 const struct rte_memzone *mz; 405 406 rte_spinlock_lock(ð_dev_shared_data_lock); 407 408 if (eth_dev_shared_data == NULL) { 409 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 410 /* Allocate port data and ownership shared memory. */ 411 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 412 sizeof(*eth_dev_shared_data), 413 rte_socket_id(), flags); 414 } else 415 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 416 if (mz == NULL) 417 rte_panic("Cannot allocate ethdev shared data\n"); 418 419 eth_dev_shared_data = mz->addr; 420 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 421 eth_dev_shared_data->next_owner_id = 422 RTE_ETH_DEV_NO_OWNER + 1; 423 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 424 memset(eth_dev_shared_data->data, 0, 425 sizeof(eth_dev_shared_data->data)); 426 } 427 } 428 429 rte_spinlock_unlock(ð_dev_shared_data_lock); 430 } 431 432 static bool 433 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 434 { 435 return ethdev->data->name[0] != '\0'; 436 } 437 438 static struct rte_eth_dev * 439 eth_dev_allocated(const char *name) 440 { 441 uint16_t i; 442 443 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 444 445 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 446 if (rte_eth_devices[i].data != NULL && 447 strcmp(rte_eth_devices[i].data->name, name) == 0) 448 return &rte_eth_devices[i]; 449 } 450 return NULL; 451 } 452 453 struct rte_eth_dev * 454 rte_eth_dev_allocated(const char *name) 455 { 456 struct rte_eth_dev *ethdev; 457 458 eth_dev_shared_data_prepare(); 459 460 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 461 462 ethdev = eth_dev_allocated(name); 463 464 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 465 466 return ethdev; 467 } 468 469 static uint16_t 470 eth_dev_find_free_port(void) 471 { 472 uint16_t i; 473 474 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 475 /* Using shared name field to find a free port. */ 476 if (eth_dev_shared_data->data[i].name[0] == '\0') { 477 RTE_ASSERT(rte_eth_devices[i].state == 478 RTE_ETH_DEV_UNUSED); 479 return i; 480 } 481 } 482 return RTE_MAX_ETHPORTS; 483 } 484 485 static struct rte_eth_dev * 486 eth_dev_get(uint16_t port_id) 487 { 488 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 489 490 eth_dev->data = ð_dev_shared_data->data[port_id]; 491 492 return eth_dev; 493 } 494 495 struct rte_eth_dev * 496 rte_eth_dev_allocate(const char *name) 497 { 498 uint16_t port_id; 499 struct rte_eth_dev *eth_dev = NULL; 500 size_t name_len; 501 502 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 503 if (name_len == 0) { 504 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 505 return NULL; 506 } 507 508 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 509 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 510 return NULL; 511 } 512 513 eth_dev_shared_data_prepare(); 514 515 /* Synchronize port creation between primary and secondary threads. */ 516 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 517 518 if (eth_dev_allocated(name) != NULL) { 519 RTE_ETHDEV_LOG(ERR, 520 "Ethernet device with name %s already allocated\n", 521 name); 522 goto unlock; 523 } 524 525 port_id = eth_dev_find_free_port(); 526 if (port_id == RTE_MAX_ETHPORTS) { 527 RTE_ETHDEV_LOG(ERR, 528 "Reached maximum number of Ethernet ports\n"); 529 goto unlock; 530 } 531 532 eth_dev = eth_dev_get(port_id); 533 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 534 eth_dev->data->port_id = port_id; 535 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 536 eth_dev->data->mtu = RTE_ETHER_MTU; 537 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 538 539 unlock: 540 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 541 542 return eth_dev; 543 } 544 545 /* 546 * Attach to a port already registered by the primary process, which 547 * makes sure that the same device would have the same port ID both 548 * in the primary and secondary process. 549 */ 550 struct rte_eth_dev * 551 rte_eth_dev_attach_secondary(const char *name) 552 { 553 uint16_t i; 554 struct rte_eth_dev *eth_dev = NULL; 555 556 eth_dev_shared_data_prepare(); 557 558 /* Synchronize port attachment to primary port creation and release. */ 559 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 560 561 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 562 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 563 break; 564 } 565 if (i == RTE_MAX_ETHPORTS) { 566 RTE_ETHDEV_LOG(ERR, 567 "Device %s is not driven by the primary process\n", 568 name); 569 } else { 570 eth_dev = eth_dev_get(i); 571 RTE_ASSERT(eth_dev->data->port_id == i); 572 } 573 574 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 575 return eth_dev; 576 } 577 578 int 579 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 580 { 581 if (eth_dev == NULL) 582 return -EINVAL; 583 584 eth_dev_shared_data_prepare(); 585 586 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 587 rte_eth_dev_callback_process(eth_dev, 588 RTE_ETH_EVENT_DESTROY, NULL); 589 590 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 591 592 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 593 594 eth_dev->state = RTE_ETH_DEV_UNUSED; 595 eth_dev->device = NULL; 596 eth_dev->process_private = NULL; 597 eth_dev->intr_handle = NULL; 598 eth_dev->rx_pkt_burst = NULL; 599 eth_dev->tx_pkt_burst = NULL; 600 eth_dev->tx_pkt_prepare = NULL; 601 eth_dev->rx_queue_count = NULL; 602 eth_dev->rx_descriptor_status = NULL; 603 eth_dev->tx_descriptor_status = NULL; 604 eth_dev->dev_ops = NULL; 605 606 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 607 rte_free(eth_dev->data->rx_queues); 608 rte_free(eth_dev->data->tx_queues); 609 rte_free(eth_dev->data->mac_addrs); 610 rte_free(eth_dev->data->hash_mac_addrs); 611 rte_free(eth_dev->data->dev_private); 612 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 613 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 614 } 615 616 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 617 618 return 0; 619 } 620 621 int 622 rte_eth_dev_is_valid_port(uint16_t port_id) 623 { 624 if (port_id >= RTE_MAX_ETHPORTS || 625 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 626 return 0; 627 else 628 return 1; 629 } 630 631 static int 632 eth_is_valid_owner_id(uint64_t owner_id) 633 { 634 if (owner_id == RTE_ETH_DEV_NO_OWNER || 635 eth_dev_shared_data->next_owner_id <= owner_id) 636 return 0; 637 return 1; 638 } 639 640 uint64_t 641 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 642 { 643 port_id = rte_eth_find_next(port_id); 644 while (port_id < RTE_MAX_ETHPORTS && 645 rte_eth_devices[port_id].data->owner.id != owner_id) 646 port_id = rte_eth_find_next(port_id + 1); 647 648 return port_id; 649 } 650 651 int 652 rte_eth_dev_owner_new(uint64_t *owner_id) 653 { 654 if (owner_id == NULL) { 655 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 656 return -EINVAL; 657 } 658 659 eth_dev_shared_data_prepare(); 660 661 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 662 663 *owner_id = eth_dev_shared_data->next_owner_id++; 664 665 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 666 return 0; 667 } 668 669 static int 670 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 671 const struct rte_eth_dev_owner *new_owner) 672 { 673 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 674 struct rte_eth_dev_owner *port_owner; 675 676 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 677 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 678 port_id); 679 return -ENODEV; 680 } 681 682 if (new_owner == NULL) { 683 RTE_ETHDEV_LOG(ERR, 684 "Cannot set ethdev port %u owner from NULL owner\n", 685 port_id); 686 return -EINVAL; 687 } 688 689 if (!eth_is_valid_owner_id(new_owner->id) && 690 !eth_is_valid_owner_id(old_owner_id)) { 691 RTE_ETHDEV_LOG(ERR, 692 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 693 old_owner_id, new_owner->id); 694 return -EINVAL; 695 } 696 697 port_owner = &rte_eth_devices[port_id].data->owner; 698 if (port_owner->id != old_owner_id) { 699 RTE_ETHDEV_LOG(ERR, 700 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 701 port_id, port_owner->name, port_owner->id); 702 return -EPERM; 703 } 704 705 /* can not truncate (same structure) */ 706 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 707 708 port_owner->id = new_owner->id; 709 710 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 711 port_id, new_owner->name, new_owner->id); 712 713 return 0; 714 } 715 716 int 717 rte_eth_dev_owner_set(const uint16_t port_id, 718 const struct rte_eth_dev_owner *owner) 719 { 720 int ret; 721 722 eth_dev_shared_data_prepare(); 723 724 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 725 726 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 727 728 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 729 return ret; 730 } 731 732 int 733 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 734 { 735 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 736 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 737 int ret; 738 739 eth_dev_shared_data_prepare(); 740 741 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 742 743 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 744 745 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 746 return ret; 747 } 748 749 int 750 rte_eth_dev_owner_delete(const uint64_t owner_id) 751 { 752 uint16_t port_id; 753 int ret = 0; 754 755 eth_dev_shared_data_prepare(); 756 757 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 758 759 if (eth_is_valid_owner_id(owner_id)) { 760 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 761 struct rte_eth_dev_data *data = 762 rte_eth_devices[port_id].data; 763 if (data != NULL && data->owner.id == owner_id) 764 memset(&data->owner, 0, 765 sizeof(struct rte_eth_dev_owner)); 766 } 767 RTE_ETHDEV_LOG(NOTICE, 768 "All port owners owned by %016"PRIx64" identifier have removed\n", 769 owner_id); 770 } else { 771 RTE_ETHDEV_LOG(ERR, 772 "Invalid owner ID=%016"PRIx64"\n", 773 owner_id); 774 ret = -EINVAL; 775 } 776 777 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 778 779 return ret; 780 } 781 782 int 783 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 784 { 785 struct rte_eth_dev *ethdev; 786 787 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 788 ethdev = &rte_eth_devices[port_id]; 789 790 if (!eth_dev_is_allocated(ethdev)) { 791 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 792 port_id); 793 return -ENODEV; 794 } 795 796 if (owner == NULL) { 797 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 798 port_id); 799 return -EINVAL; 800 } 801 802 eth_dev_shared_data_prepare(); 803 804 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 805 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 806 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 807 808 return 0; 809 } 810 811 int 812 rte_eth_dev_socket_id(uint16_t port_id) 813 { 814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 815 return rte_eth_devices[port_id].data->numa_node; 816 } 817 818 void * 819 rte_eth_dev_get_sec_ctx(uint16_t port_id) 820 { 821 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 822 return rte_eth_devices[port_id].security_ctx; 823 } 824 825 uint16_t 826 rte_eth_dev_count_avail(void) 827 { 828 uint16_t p; 829 uint16_t count; 830 831 count = 0; 832 833 RTE_ETH_FOREACH_DEV(p) 834 count++; 835 836 return count; 837 } 838 839 uint16_t 840 rte_eth_dev_count_total(void) 841 { 842 uint16_t port, count = 0; 843 844 RTE_ETH_FOREACH_VALID_DEV(port) 845 count++; 846 847 return count; 848 } 849 850 int 851 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 852 { 853 char *tmp; 854 855 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 856 857 if (name == NULL) { 858 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 859 port_id); 860 return -EINVAL; 861 } 862 863 /* shouldn't check 'rte_eth_devices[i].data', 864 * because it might be overwritten by VDEV PMD */ 865 tmp = eth_dev_shared_data->data[port_id].name; 866 strcpy(name, tmp); 867 return 0; 868 } 869 870 int 871 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 872 { 873 uint16_t pid; 874 875 if (name == NULL) { 876 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 877 return -EINVAL; 878 } 879 880 if (port_id == NULL) { 881 RTE_ETHDEV_LOG(ERR, 882 "Cannot get port ID to NULL for %s\n", name); 883 return -EINVAL; 884 } 885 886 RTE_ETH_FOREACH_VALID_DEV(pid) 887 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 888 *port_id = pid; 889 return 0; 890 } 891 892 return -ENODEV; 893 } 894 895 static int 896 eth_err(uint16_t port_id, int ret) 897 { 898 if (ret == 0) 899 return 0; 900 if (rte_eth_dev_is_removed(port_id)) 901 return -EIO; 902 return ret; 903 } 904 905 static void 906 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 907 { 908 void **rxq = dev->data->rx_queues; 909 910 if (rxq[qid] == NULL) 911 return; 912 913 if (dev->dev_ops->rx_queue_release != NULL) 914 (*dev->dev_ops->rx_queue_release)(dev, qid); 915 rxq[qid] = NULL; 916 } 917 918 static void 919 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 920 { 921 void **txq = dev->data->tx_queues; 922 923 if (txq[qid] == NULL) 924 return; 925 926 if (dev->dev_ops->tx_queue_release != NULL) 927 (*dev->dev_ops->tx_queue_release)(dev, qid); 928 txq[qid] = NULL; 929 } 930 931 static int 932 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 933 { 934 uint16_t old_nb_queues = dev->data->nb_rx_queues; 935 unsigned i; 936 937 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 938 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 939 sizeof(dev->data->rx_queues[0]) * 940 RTE_MAX_QUEUES_PER_PORT, 941 RTE_CACHE_LINE_SIZE); 942 if (dev->data->rx_queues == NULL) { 943 dev->data->nb_rx_queues = 0; 944 return -(ENOMEM); 945 } 946 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 947 for (i = nb_queues; i < old_nb_queues; i++) 948 eth_dev_rxq_release(dev, i); 949 950 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 951 for (i = nb_queues; i < old_nb_queues; i++) 952 eth_dev_rxq_release(dev, i); 953 954 rte_free(dev->data->rx_queues); 955 dev->data->rx_queues = NULL; 956 } 957 dev->data->nb_rx_queues = nb_queues; 958 return 0; 959 } 960 961 static int 962 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 963 { 964 uint16_t port_id; 965 966 if (rx_queue_id >= dev->data->nb_rx_queues) { 967 port_id = dev->data->port_id; 968 RTE_ETHDEV_LOG(ERR, 969 "Invalid Rx queue_id=%u of device with port_id=%u\n", 970 rx_queue_id, port_id); 971 return -EINVAL; 972 } 973 974 if (dev->data->rx_queues[rx_queue_id] == NULL) { 975 port_id = dev->data->port_id; 976 RTE_ETHDEV_LOG(ERR, 977 "Queue %u of device with port_id=%u has not been setup\n", 978 rx_queue_id, port_id); 979 return -EINVAL; 980 } 981 982 return 0; 983 } 984 985 static int 986 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 987 { 988 uint16_t port_id; 989 990 if (tx_queue_id >= dev->data->nb_tx_queues) { 991 port_id = dev->data->port_id; 992 RTE_ETHDEV_LOG(ERR, 993 "Invalid Tx queue_id=%u of device with port_id=%u\n", 994 tx_queue_id, port_id); 995 return -EINVAL; 996 } 997 998 if (dev->data->tx_queues[tx_queue_id] == NULL) { 999 port_id = dev->data->port_id; 1000 RTE_ETHDEV_LOG(ERR, 1001 "Queue %u of device with port_id=%u has not been setup\n", 1002 tx_queue_id, port_id); 1003 return -EINVAL; 1004 } 1005 1006 return 0; 1007 } 1008 1009 int 1010 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1011 { 1012 struct rte_eth_dev *dev; 1013 int ret; 1014 1015 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1016 dev = &rte_eth_devices[port_id]; 1017 1018 if (!dev->data->dev_started) { 1019 RTE_ETHDEV_LOG(ERR, 1020 "Port %u must be started before start any queue\n", 1021 port_id); 1022 return -EINVAL; 1023 } 1024 1025 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1026 if (ret != 0) 1027 return ret; 1028 1029 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1030 1031 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1032 RTE_ETHDEV_LOG(INFO, 1033 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1034 rx_queue_id, port_id); 1035 return -EINVAL; 1036 } 1037 1038 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1039 RTE_ETHDEV_LOG(INFO, 1040 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1041 rx_queue_id, port_id); 1042 return 0; 1043 } 1044 1045 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1046 } 1047 1048 int 1049 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1050 { 1051 struct rte_eth_dev *dev; 1052 int ret; 1053 1054 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1055 dev = &rte_eth_devices[port_id]; 1056 1057 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1058 if (ret != 0) 1059 return ret; 1060 1061 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1062 1063 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1064 RTE_ETHDEV_LOG(INFO, 1065 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1066 rx_queue_id, port_id); 1067 return -EINVAL; 1068 } 1069 1070 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1071 RTE_ETHDEV_LOG(INFO, 1072 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1073 rx_queue_id, port_id); 1074 return 0; 1075 } 1076 1077 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1078 } 1079 1080 int 1081 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1082 { 1083 struct rte_eth_dev *dev; 1084 int ret; 1085 1086 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1087 dev = &rte_eth_devices[port_id]; 1088 1089 if (!dev->data->dev_started) { 1090 RTE_ETHDEV_LOG(ERR, 1091 "Port %u must be started before start any queue\n", 1092 port_id); 1093 return -EINVAL; 1094 } 1095 1096 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1097 if (ret != 0) 1098 return ret; 1099 1100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1101 1102 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1103 RTE_ETHDEV_LOG(INFO, 1104 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1105 tx_queue_id, port_id); 1106 return -EINVAL; 1107 } 1108 1109 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1110 RTE_ETHDEV_LOG(INFO, 1111 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1112 tx_queue_id, port_id); 1113 return 0; 1114 } 1115 1116 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1117 } 1118 1119 int 1120 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1121 { 1122 struct rte_eth_dev *dev; 1123 int ret; 1124 1125 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1126 dev = &rte_eth_devices[port_id]; 1127 1128 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1129 if (ret != 0) 1130 return ret; 1131 1132 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1133 1134 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1135 RTE_ETHDEV_LOG(INFO, 1136 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1137 tx_queue_id, port_id); 1138 return -EINVAL; 1139 } 1140 1141 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1142 RTE_ETHDEV_LOG(INFO, 1143 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1144 tx_queue_id, port_id); 1145 return 0; 1146 } 1147 1148 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1149 } 1150 1151 static int 1152 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1153 { 1154 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1155 unsigned i; 1156 1157 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1158 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1159 sizeof(dev->data->tx_queues[0]) * 1160 RTE_MAX_QUEUES_PER_PORT, 1161 RTE_CACHE_LINE_SIZE); 1162 if (dev->data->tx_queues == NULL) { 1163 dev->data->nb_tx_queues = 0; 1164 return -(ENOMEM); 1165 } 1166 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1167 for (i = nb_queues; i < old_nb_queues; i++) 1168 eth_dev_txq_release(dev, i); 1169 1170 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1171 for (i = nb_queues; i < old_nb_queues; i++) 1172 eth_dev_txq_release(dev, i); 1173 1174 rte_free(dev->data->tx_queues); 1175 dev->data->tx_queues = NULL; 1176 } 1177 dev->data->nb_tx_queues = nb_queues; 1178 return 0; 1179 } 1180 1181 uint32_t 1182 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1183 { 1184 switch (speed) { 1185 case RTE_ETH_SPEED_NUM_10M: 1186 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1187 case RTE_ETH_SPEED_NUM_100M: 1188 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1189 case RTE_ETH_SPEED_NUM_1G: 1190 return RTE_ETH_LINK_SPEED_1G; 1191 case RTE_ETH_SPEED_NUM_2_5G: 1192 return RTE_ETH_LINK_SPEED_2_5G; 1193 case RTE_ETH_SPEED_NUM_5G: 1194 return RTE_ETH_LINK_SPEED_5G; 1195 case RTE_ETH_SPEED_NUM_10G: 1196 return RTE_ETH_LINK_SPEED_10G; 1197 case RTE_ETH_SPEED_NUM_20G: 1198 return RTE_ETH_LINK_SPEED_20G; 1199 case RTE_ETH_SPEED_NUM_25G: 1200 return RTE_ETH_LINK_SPEED_25G; 1201 case RTE_ETH_SPEED_NUM_40G: 1202 return RTE_ETH_LINK_SPEED_40G; 1203 case RTE_ETH_SPEED_NUM_50G: 1204 return RTE_ETH_LINK_SPEED_50G; 1205 case RTE_ETH_SPEED_NUM_56G: 1206 return RTE_ETH_LINK_SPEED_56G; 1207 case RTE_ETH_SPEED_NUM_100G: 1208 return RTE_ETH_LINK_SPEED_100G; 1209 case RTE_ETH_SPEED_NUM_200G: 1210 return RTE_ETH_LINK_SPEED_200G; 1211 default: 1212 return 0; 1213 } 1214 } 1215 1216 const char * 1217 rte_eth_dev_rx_offload_name(uint64_t offload) 1218 { 1219 const char *name = "UNKNOWN"; 1220 unsigned int i; 1221 1222 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1223 if (offload == eth_dev_rx_offload_names[i].offload) { 1224 name = eth_dev_rx_offload_names[i].name; 1225 break; 1226 } 1227 } 1228 1229 return name; 1230 } 1231 1232 const char * 1233 rte_eth_dev_tx_offload_name(uint64_t offload) 1234 { 1235 const char *name = "UNKNOWN"; 1236 unsigned int i; 1237 1238 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1239 if (offload == eth_dev_tx_offload_names[i].offload) { 1240 name = eth_dev_tx_offload_names[i].name; 1241 break; 1242 } 1243 } 1244 1245 return name; 1246 } 1247 1248 const char * 1249 rte_eth_dev_capability_name(uint64_t capability) 1250 { 1251 const char *name = "UNKNOWN"; 1252 unsigned int i; 1253 1254 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1255 if (capability == rte_eth_dev_capa_names[i].offload) { 1256 name = rte_eth_dev_capa_names[i].name; 1257 break; 1258 } 1259 } 1260 1261 return name; 1262 } 1263 1264 static inline int 1265 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1266 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1267 { 1268 int ret = 0; 1269 1270 if (dev_info_size == 0) { 1271 if (config_size != max_rx_pkt_len) { 1272 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1273 " %u != %u is not allowed\n", 1274 port_id, config_size, max_rx_pkt_len); 1275 ret = -EINVAL; 1276 } 1277 } else if (config_size > dev_info_size) { 1278 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1279 "> max allowed value %u\n", port_id, config_size, 1280 dev_info_size); 1281 ret = -EINVAL; 1282 } else if (config_size < RTE_ETHER_MIN_LEN) { 1283 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1284 "< min allowed value %u\n", port_id, config_size, 1285 (unsigned int)RTE_ETHER_MIN_LEN); 1286 ret = -EINVAL; 1287 } 1288 return ret; 1289 } 1290 1291 /* 1292 * Validate offloads that are requested through rte_eth_dev_configure against 1293 * the offloads successfully set by the Ethernet device. 1294 * 1295 * @param port_id 1296 * The port identifier of the Ethernet device. 1297 * @param req_offloads 1298 * The offloads that have been requested through `rte_eth_dev_configure`. 1299 * @param set_offloads 1300 * The offloads successfully set by the Ethernet device. 1301 * @param offload_type 1302 * The offload type i.e. Rx/Tx string. 1303 * @param offload_name 1304 * The function that prints the offload name. 1305 * @return 1306 * - (0) if validation successful. 1307 * - (-EINVAL) if requested offload has been silently disabled. 1308 * 1309 */ 1310 static int 1311 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1312 uint64_t set_offloads, const char *offload_type, 1313 const char *(*offload_name)(uint64_t)) 1314 { 1315 uint64_t offloads_diff = req_offloads ^ set_offloads; 1316 uint64_t offload; 1317 int ret = 0; 1318 1319 while (offloads_diff != 0) { 1320 /* Check if any offload is requested but not enabled. */ 1321 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1322 if (offload & req_offloads) { 1323 RTE_ETHDEV_LOG(ERR, 1324 "Port %u failed to enable %s offload %s\n", 1325 port_id, offload_type, offload_name(offload)); 1326 ret = -EINVAL; 1327 } 1328 1329 /* Check if offload couldn't be disabled. */ 1330 if (offload & set_offloads) { 1331 RTE_ETHDEV_LOG(DEBUG, 1332 "Port %u %s offload %s is not requested but enabled\n", 1333 port_id, offload_type, offload_name(offload)); 1334 } 1335 1336 offloads_diff &= ~offload; 1337 } 1338 1339 return ret; 1340 } 1341 1342 static uint32_t 1343 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1344 { 1345 uint32_t overhead_len; 1346 1347 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1348 overhead_len = max_rx_pktlen - max_mtu; 1349 else 1350 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1351 1352 return overhead_len; 1353 } 1354 1355 /* rte_eth_dev_info_get() should be called prior to this function */ 1356 static int 1357 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1358 uint16_t mtu) 1359 { 1360 uint32_t overhead_len; 1361 uint32_t frame_size; 1362 1363 if (mtu < dev_info->min_mtu) { 1364 RTE_ETHDEV_LOG(ERR, 1365 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1366 mtu, dev_info->min_mtu, port_id); 1367 return -EINVAL; 1368 } 1369 if (mtu > dev_info->max_mtu) { 1370 RTE_ETHDEV_LOG(ERR, 1371 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1372 mtu, dev_info->max_mtu, port_id); 1373 return -EINVAL; 1374 } 1375 1376 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1377 dev_info->max_mtu); 1378 frame_size = mtu + overhead_len; 1379 if (frame_size < RTE_ETHER_MIN_LEN) { 1380 RTE_ETHDEV_LOG(ERR, 1381 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1382 frame_size, RTE_ETHER_MIN_LEN, port_id); 1383 return -EINVAL; 1384 } 1385 1386 if (frame_size > dev_info->max_rx_pktlen) { 1387 RTE_ETHDEV_LOG(ERR, 1388 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1389 frame_size, dev_info->max_rx_pktlen, port_id); 1390 return -EINVAL; 1391 } 1392 1393 return 0; 1394 } 1395 1396 int 1397 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1398 const struct rte_eth_conf *dev_conf) 1399 { 1400 struct rte_eth_dev *dev; 1401 struct rte_eth_dev_info dev_info; 1402 struct rte_eth_conf orig_conf; 1403 int diag; 1404 int ret; 1405 uint16_t old_mtu; 1406 1407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1408 dev = &rte_eth_devices[port_id]; 1409 1410 if (dev_conf == NULL) { 1411 RTE_ETHDEV_LOG(ERR, 1412 "Cannot configure ethdev port %u from NULL config\n", 1413 port_id); 1414 return -EINVAL; 1415 } 1416 1417 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1418 1419 if (dev->data->dev_started) { 1420 RTE_ETHDEV_LOG(ERR, 1421 "Port %u must be stopped to allow configuration\n", 1422 port_id); 1423 return -EBUSY; 1424 } 1425 1426 /* 1427 * Ensure that "dev_configured" is always 0 each time prepare to do 1428 * dev_configure() to avoid any non-anticipated behaviour. 1429 * And set to 1 when dev_configure() is executed successfully. 1430 */ 1431 dev->data->dev_configured = 0; 1432 1433 /* Store original config, as rollback required on failure */ 1434 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1435 1436 /* 1437 * Copy the dev_conf parameter into the dev structure. 1438 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1439 */ 1440 if (dev_conf != &dev->data->dev_conf) 1441 memcpy(&dev->data->dev_conf, dev_conf, 1442 sizeof(dev->data->dev_conf)); 1443 1444 /* Backup mtu for rollback */ 1445 old_mtu = dev->data->mtu; 1446 1447 ret = rte_eth_dev_info_get(port_id, &dev_info); 1448 if (ret != 0) 1449 goto rollback; 1450 1451 /* If number of queues specified by application for both Rx and Tx is 1452 * zero, use driver preferred values. This cannot be done individually 1453 * as it is valid for either Tx or Rx (but not both) to be zero. 1454 * If driver does not provide any preferred valued, fall back on 1455 * EAL defaults. 1456 */ 1457 if (nb_rx_q == 0 && nb_tx_q == 0) { 1458 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1459 if (nb_rx_q == 0) 1460 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1461 nb_tx_q = dev_info.default_txportconf.nb_queues; 1462 if (nb_tx_q == 0) 1463 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1464 } 1465 1466 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1467 RTE_ETHDEV_LOG(ERR, 1468 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1469 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1470 ret = -EINVAL; 1471 goto rollback; 1472 } 1473 1474 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1475 RTE_ETHDEV_LOG(ERR, 1476 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1477 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1478 ret = -EINVAL; 1479 goto rollback; 1480 } 1481 1482 /* 1483 * Check that the numbers of Rx and Tx queues are not greater 1484 * than the maximum number of Rx and Tx queues supported by the 1485 * configured device. 1486 */ 1487 if (nb_rx_q > dev_info.max_rx_queues) { 1488 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1489 port_id, nb_rx_q, dev_info.max_rx_queues); 1490 ret = -EINVAL; 1491 goto rollback; 1492 } 1493 1494 if (nb_tx_q > dev_info.max_tx_queues) { 1495 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1496 port_id, nb_tx_q, dev_info.max_tx_queues); 1497 ret = -EINVAL; 1498 goto rollback; 1499 } 1500 1501 /* Check that the device supports requested interrupts */ 1502 if ((dev_conf->intr_conf.lsc == 1) && 1503 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1504 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1505 dev->device->driver->name); 1506 ret = -EINVAL; 1507 goto rollback; 1508 } 1509 if ((dev_conf->intr_conf.rmv == 1) && 1510 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1511 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1512 dev->device->driver->name); 1513 ret = -EINVAL; 1514 goto rollback; 1515 } 1516 1517 if (dev_conf->rxmode.mtu == 0) 1518 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1519 1520 ret = eth_dev_validate_mtu(port_id, &dev_info, 1521 dev->data->dev_conf.rxmode.mtu); 1522 if (ret != 0) 1523 goto rollback; 1524 1525 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1526 1527 /* 1528 * If LRO is enabled, check that the maximum aggregated packet 1529 * size is supported by the configured device. 1530 */ 1531 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1532 uint32_t max_rx_pktlen; 1533 uint32_t overhead_len; 1534 1535 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1536 dev_info.max_mtu); 1537 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1538 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1539 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1540 ret = eth_dev_check_lro_pkt_size(port_id, 1541 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1542 max_rx_pktlen, 1543 dev_info.max_lro_pkt_size); 1544 if (ret != 0) 1545 goto rollback; 1546 } 1547 1548 /* Any requested offloading must be within its device capabilities */ 1549 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1550 dev_conf->rxmode.offloads) { 1551 RTE_ETHDEV_LOG(ERR, 1552 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1553 "capabilities 0x%"PRIx64" in %s()\n", 1554 port_id, dev_conf->rxmode.offloads, 1555 dev_info.rx_offload_capa, 1556 __func__); 1557 ret = -EINVAL; 1558 goto rollback; 1559 } 1560 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1561 dev_conf->txmode.offloads) { 1562 RTE_ETHDEV_LOG(ERR, 1563 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1564 "capabilities 0x%"PRIx64" in %s()\n", 1565 port_id, dev_conf->txmode.offloads, 1566 dev_info.tx_offload_capa, 1567 __func__); 1568 ret = -EINVAL; 1569 goto rollback; 1570 } 1571 1572 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1573 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1574 1575 /* Check that device supports requested rss hash functions. */ 1576 if ((dev_info.flow_type_rss_offloads | 1577 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1578 dev_info.flow_type_rss_offloads) { 1579 RTE_ETHDEV_LOG(ERR, 1580 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1581 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1582 dev_info.flow_type_rss_offloads); 1583 ret = -EINVAL; 1584 goto rollback; 1585 } 1586 1587 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1588 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1589 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1590 RTE_ETHDEV_LOG(ERR, 1591 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1592 port_id, 1593 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1594 ret = -EINVAL; 1595 goto rollback; 1596 } 1597 1598 /* 1599 * Setup new number of Rx/Tx queues and reconfigure device. 1600 */ 1601 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1602 if (diag != 0) { 1603 RTE_ETHDEV_LOG(ERR, 1604 "Port%u eth_dev_rx_queue_config = %d\n", 1605 port_id, diag); 1606 ret = diag; 1607 goto rollback; 1608 } 1609 1610 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1611 if (diag != 0) { 1612 RTE_ETHDEV_LOG(ERR, 1613 "Port%u eth_dev_tx_queue_config = %d\n", 1614 port_id, diag); 1615 eth_dev_rx_queue_config(dev, 0); 1616 ret = diag; 1617 goto rollback; 1618 } 1619 1620 diag = (*dev->dev_ops->dev_configure)(dev); 1621 if (diag != 0) { 1622 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1623 port_id, diag); 1624 ret = eth_err(port_id, diag); 1625 goto reset_queues; 1626 } 1627 1628 /* Initialize Rx profiling if enabled at compilation time. */ 1629 diag = __rte_eth_dev_profile_init(port_id, dev); 1630 if (diag != 0) { 1631 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1632 port_id, diag); 1633 ret = eth_err(port_id, diag); 1634 goto reset_queues; 1635 } 1636 1637 /* Validate Rx offloads. */ 1638 diag = eth_dev_validate_offloads(port_id, 1639 dev_conf->rxmode.offloads, 1640 dev->data->dev_conf.rxmode.offloads, "Rx", 1641 rte_eth_dev_rx_offload_name); 1642 if (diag != 0) { 1643 ret = diag; 1644 goto reset_queues; 1645 } 1646 1647 /* Validate Tx offloads. */ 1648 diag = eth_dev_validate_offloads(port_id, 1649 dev_conf->txmode.offloads, 1650 dev->data->dev_conf.txmode.offloads, "Tx", 1651 rte_eth_dev_tx_offload_name); 1652 if (diag != 0) { 1653 ret = diag; 1654 goto reset_queues; 1655 } 1656 1657 dev->data->dev_configured = 1; 1658 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1659 return 0; 1660 reset_queues: 1661 eth_dev_rx_queue_config(dev, 0); 1662 eth_dev_tx_queue_config(dev, 0); 1663 rollback: 1664 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1665 if (old_mtu != dev->data->mtu) 1666 dev->data->mtu = old_mtu; 1667 1668 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1669 return ret; 1670 } 1671 1672 void 1673 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1674 { 1675 if (dev->data->dev_started) { 1676 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1677 dev->data->port_id); 1678 return; 1679 } 1680 1681 eth_dev_rx_queue_config(dev, 0); 1682 eth_dev_tx_queue_config(dev, 0); 1683 1684 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1685 } 1686 1687 static void 1688 eth_dev_mac_restore(struct rte_eth_dev *dev, 1689 struct rte_eth_dev_info *dev_info) 1690 { 1691 struct rte_ether_addr *addr; 1692 uint16_t i; 1693 uint32_t pool = 0; 1694 uint64_t pool_mask; 1695 1696 /* replay MAC address configuration including default MAC */ 1697 addr = &dev->data->mac_addrs[0]; 1698 if (*dev->dev_ops->mac_addr_set != NULL) 1699 (*dev->dev_ops->mac_addr_set)(dev, addr); 1700 else if (*dev->dev_ops->mac_addr_add != NULL) 1701 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1702 1703 if (*dev->dev_ops->mac_addr_add != NULL) { 1704 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1705 addr = &dev->data->mac_addrs[i]; 1706 1707 /* skip zero address */ 1708 if (rte_is_zero_ether_addr(addr)) 1709 continue; 1710 1711 pool = 0; 1712 pool_mask = dev->data->mac_pool_sel[i]; 1713 1714 do { 1715 if (pool_mask & UINT64_C(1)) 1716 (*dev->dev_ops->mac_addr_add)(dev, 1717 addr, i, pool); 1718 pool_mask >>= 1; 1719 pool++; 1720 } while (pool_mask); 1721 } 1722 } 1723 } 1724 1725 static int 1726 eth_dev_config_restore(struct rte_eth_dev *dev, 1727 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1728 { 1729 int ret; 1730 1731 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1732 eth_dev_mac_restore(dev, dev_info); 1733 1734 /* replay promiscuous configuration */ 1735 /* 1736 * use callbacks directly since we don't need port_id check and 1737 * would like to bypass the same value set 1738 */ 1739 if (rte_eth_promiscuous_get(port_id) == 1 && 1740 *dev->dev_ops->promiscuous_enable != NULL) { 1741 ret = eth_err(port_id, 1742 (*dev->dev_ops->promiscuous_enable)(dev)); 1743 if (ret != 0 && ret != -ENOTSUP) { 1744 RTE_ETHDEV_LOG(ERR, 1745 "Failed to enable promiscuous mode for device (port %u): %s\n", 1746 port_id, rte_strerror(-ret)); 1747 return ret; 1748 } 1749 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1750 *dev->dev_ops->promiscuous_disable != NULL) { 1751 ret = eth_err(port_id, 1752 (*dev->dev_ops->promiscuous_disable)(dev)); 1753 if (ret != 0 && ret != -ENOTSUP) { 1754 RTE_ETHDEV_LOG(ERR, 1755 "Failed to disable promiscuous mode for device (port %u): %s\n", 1756 port_id, rte_strerror(-ret)); 1757 return ret; 1758 } 1759 } 1760 1761 /* replay all multicast configuration */ 1762 /* 1763 * use callbacks directly since we don't need port_id check and 1764 * would like to bypass the same value set 1765 */ 1766 if (rte_eth_allmulticast_get(port_id) == 1 && 1767 *dev->dev_ops->allmulticast_enable != NULL) { 1768 ret = eth_err(port_id, 1769 (*dev->dev_ops->allmulticast_enable)(dev)); 1770 if (ret != 0 && ret != -ENOTSUP) { 1771 RTE_ETHDEV_LOG(ERR, 1772 "Failed to enable allmulticast mode for device (port %u): %s\n", 1773 port_id, rte_strerror(-ret)); 1774 return ret; 1775 } 1776 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1777 *dev->dev_ops->allmulticast_disable != NULL) { 1778 ret = eth_err(port_id, 1779 (*dev->dev_ops->allmulticast_disable)(dev)); 1780 if (ret != 0 && ret != -ENOTSUP) { 1781 RTE_ETHDEV_LOG(ERR, 1782 "Failed to disable allmulticast mode for device (port %u): %s\n", 1783 port_id, rte_strerror(-ret)); 1784 return ret; 1785 } 1786 } 1787 1788 return 0; 1789 } 1790 1791 int 1792 rte_eth_dev_start(uint16_t port_id) 1793 { 1794 struct rte_eth_dev *dev; 1795 struct rte_eth_dev_info dev_info; 1796 int diag; 1797 int ret, ret_stop; 1798 1799 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1800 dev = &rte_eth_devices[port_id]; 1801 1802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1803 1804 if (dev->data->dev_configured == 0) { 1805 RTE_ETHDEV_LOG(INFO, 1806 "Device with port_id=%"PRIu16" is not configured.\n", 1807 port_id); 1808 return -EINVAL; 1809 } 1810 1811 if (dev->data->dev_started != 0) { 1812 RTE_ETHDEV_LOG(INFO, 1813 "Device with port_id=%"PRIu16" already started\n", 1814 port_id); 1815 return 0; 1816 } 1817 1818 ret = rte_eth_dev_info_get(port_id, &dev_info); 1819 if (ret != 0) 1820 return ret; 1821 1822 /* Lets restore MAC now if device does not support live change */ 1823 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1824 eth_dev_mac_restore(dev, &dev_info); 1825 1826 diag = (*dev->dev_ops->dev_start)(dev); 1827 if (diag == 0) 1828 dev->data->dev_started = 1; 1829 else 1830 return eth_err(port_id, diag); 1831 1832 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1833 if (ret != 0) { 1834 RTE_ETHDEV_LOG(ERR, 1835 "Error during restoring configuration for device (port %u): %s\n", 1836 port_id, rte_strerror(-ret)); 1837 ret_stop = rte_eth_dev_stop(port_id); 1838 if (ret_stop != 0) { 1839 RTE_ETHDEV_LOG(ERR, 1840 "Failed to stop device (port %u): %s\n", 1841 port_id, rte_strerror(-ret_stop)); 1842 } 1843 1844 return ret; 1845 } 1846 1847 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1848 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1849 (*dev->dev_ops->link_update)(dev, 0); 1850 } 1851 1852 /* expose selection of PMD fast-path functions */ 1853 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1854 1855 rte_ethdev_trace_start(port_id); 1856 return 0; 1857 } 1858 1859 int 1860 rte_eth_dev_stop(uint16_t port_id) 1861 { 1862 struct rte_eth_dev *dev; 1863 int ret; 1864 1865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1866 dev = &rte_eth_devices[port_id]; 1867 1868 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1869 1870 if (dev->data->dev_started == 0) { 1871 RTE_ETHDEV_LOG(INFO, 1872 "Device with port_id=%"PRIu16" already stopped\n", 1873 port_id); 1874 return 0; 1875 } 1876 1877 /* point fast-path functions to dummy ones */ 1878 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1879 1880 dev->data->dev_started = 0; 1881 ret = (*dev->dev_ops->dev_stop)(dev); 1882 rte_ethdev_trace_stop(port_id, ret); 1883 1884 return ret; 1885 } 1886 1887 int 1888 rte_eth_dev_set_link_up(uint16_t port_id) 1889 { 1890 struct rte_eth_dev *dev; 1891 1892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1893 dev = &rte_eth_devices[port_id]; 1894 1895 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1896 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1897 } 1898 1899 int 1900 rte_eth_dev_set_link_down(uint16_t port_id) 1901 { 1902 struct rte_eth_dev *dev; 1903 1904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1905 dev = &rte_eth_devices[port_id]; 1906 1907 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1908 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1909 } 1910 1911 int 1912 rte_eth_dev_close(uint16_t port_id) 1913 { 1914 struct rte_eth_dev *dev; 1915 int firsterr, binerr; 1916 int *lasterr = &firsterr; 1917 1918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1919 dev = &rte_eth_devices[port_id]; 1920 1921 if (dev->data->dev_started) { 1922 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1923 port_id); 1924 return -EINVAL; 1925 } 1926 1927 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1928 *lasterr = (*dev->dev_ops->dev_close)(dev); 1929 if (*lasterr != 0) 1930 lasterr = &binerr; 1931 1932 rte_ethdev_trace_close(port_id); 1933 *lasterr = rte_eth_dev_release_port(dev); 1934 1935 return firsterr; 1936 } 1937 1938 int 1939 rte_eth_dev_reset(uint16_t port_id) 1940 { 1941 struct rte_eth_dev *dev; 1942 int ret; 1943 1944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1945 dev = &rte_eth_devices[port_id]; 1946 1947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1948 1949 ret = rte_eth_dev_stop(port_id); 1950 if (ret != 0) { 1951 RTE_ETHDEV_LOG(ERR, 1952 "Failed to stop device (port %u) before reset: %s - ignore\n", 1953 port_id, rte_strerror(-ret)); 1954 } 1955 ret = dev->dev_ops->dev_reset(dev); 1956 1957 return eth_err(port_id, ret); 1958 } 1959 1960 int 1961 rte_eth_dev_is_removed(uint16_t port_id) 1962 { 1963 struct rte_eth_dev *dev; 1964 int ret; 1965 1966 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1967 dev = &rte_eth_devices[port_id]; 1968 1969 if (dev->state == RTE_ETH_DEV_REMOVED) 1970 return 1; 1971 1972 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1973 1974 ret = dev->dev_ops->is_removed(dev); 1975 if (ret != 0) 1976 /* Device is physically removed. */ 1977 dev->state = RTE_ETH_DEV_REMOVED; 1978 1979 return ret; 1980 } 1981 1982 static int 1983 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1984 uint16_t n_seg, uint32_t *mbp_buf_size, 1985 const struct rte_eth_dev_info *dev_info) 1986 { 1987 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1988 struct rte_mempool *mp_first; 1989 uint32_t offset_mask; 1990 uint16_t seg_idx; 1991 1992 if (n_seg > seg_capa->max_nseg) { 1993 RTE_ETHDEV_LOG(ERR, 1994 "Requested Rx segments %u exceed supported %u\n", 1995 n_seg, seg_capa->max_nseg); 1996 return -EINVAL; 1997 } 1998 /* 1999 * Check the sizes and offsets against buffer sizes 2000 * for each segment specified in extended configuration. 2001 */ 2002 mp_first = rx_seg[0].mp; 2003 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2004 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2005 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2006 uint32_t length = rx_seg[seg_idx].length; 2007 uint32_t offset = rx_seg[seg_idx].offset; 2008 2009 if (mpl == NULL) { 2010 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 2011 return -EINVAL; 2012 } 2013 if (seg_idx != 0 && mp_first != mpl && 2014 seg_capa->multi_pools == 0) { 2015 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 2016 return -ENOTSUP; 2017 } 2018 if (offset != 0) { 2019 if (seg_capa->offset_allowed == 0) { 2020 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 2021 return -ENOTSUP; 2022 } 2023 if (offset & offset_mask) { 2024 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2025 offset, 2026 seg_capa->offset_align_log2); 2027 return -EINVAL; 2028 } 2029 } 2030 if (mpl->private_data_size < 2031 sizeof(struct rte_pktmbuf_pool_private)) { 2032 RTE_ETHDEV_LOG(ERR, 2033 "%s private_data_size %u < %u\n", 2034 mpl->name, mpl->private_data_size, 2035 (unsigned int)sizeof 2036 (struct rte_pktmbuf_pool_private)); 2037 return -ENOSPC; 2038 } 2039 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2040 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2041 length = length != 0 ? length : *mbp_buf_size; 2042 if (*mbp_buf_size < length + offset) { 2043 RTE_ETHDEV_LOG(ERR, 2044 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2045 mpl->name, *mbp_buf_size, 2046 length + offset, length, offset); 2047 return -EINVAL; 2048 } 2049 } 2050 return 0; 2051 } 2052 2053 int 2054 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2055 uint16_t nb_rx_desc, unsigned int socket_id, 2056 const struct rte_eth_rxconf *rx_conf, 2057 struct rte_mempool *mp) 2058 { 2059 int ret; 2060 uint32_t mbp_buf_size; 2061 struct rte_eth_dev *dev; 2062 struct rte_eth_dev_info dev_info; 2063 struct rte_eth_rxconf local_conf; 2064 2065 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2066 dev = &rte_eth_devices[port_id]; 2067 2068 if (rx_queue_id >= dev->data->nb_rx_queues) { 2069 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2070 return -EINVAL; 2071 } 2072 2073 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2074 2075 ret = rte_eth_dev_info_get(port_id, &dev_info); 2076 if (ret != 0) 2077 return ret; 2078 2079 if (mp != NULL) { 2080 /* Single pool configuration check. */ 2081 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2082 RTE_ETHDEV_LOG(ERR, 2083 "Ambiguous segment configuration\n"); 2084 return -EINVAL; 2085 } 2086 /* 2087 * Check the size of the mbuf data buffer, this value 2088 * must be provided in the private data of the memory pool. 2089 * First check that the memory pool(s) has a valid private data. 2090 */ 2091 if (mp->private_data_size < 2092 sizeof(struct rte_pktmbuf_pool_private)) { 2093 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2094 mp->name, mp->private_data_size, 2095 (unsigned int) 2096 sizeof(struct rte_pktmbuf_pool_private)); 2097 return -ENOSPC; 2098 } 2099 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2100 if (mbp_buf_size < dev_info.min_rx_bufsize + 2101 RTE_PKTMBUF_HEADROOM) { 2102 RTE_ETHDEV_LOG(ERR, 2103 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2104 mp->name, mbp_buf_size, 2105 RTE_PKTMBUF_HEADROOM + 2106 dev_info.min_rx_bufsize, 2107 RTE_PKTMBUF_HEADROOM, 2108 dev_info.min_rx_bufsize); 2109 return -EINVAL; 2110 } 2111 } else { 2112 const struct rte_eth_rxseg_split *rx_seg; 2113 uint16_t n_seg; 2114 2115 /* Extended multi-segment configuration check. */ 2116 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2117 RTE_ETHDEV_LOG(ERR, 2118 "Memory pool is null and no extended configuration provided\n"); 2119 return -EINVAL; 2120 } 2121 2122 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2123 n_seg = rx_conf->rx_nseg; 2124 2125 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2126 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2127 &mbp_buf_size, 2128 &dev_info); 2129 if (ret != 0) 2130 return ret; 2131 } else { 2132 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2133 return -EINVAL; 2134 } 2135 } 2136 2137 /* Use default specified by driver, if nb_rx_desc is zero */ 2138 if (nb_rx_desc == 0) { 2139 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2140 /* If driver default is also zero, fall back on EAL default */ 2141 if (nb_rx_desc == 0) 2142 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2143 } 2144 2145 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2146 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2147 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2148 2149 RTE_ETHDEV_LOG(ERR, 2150 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2151 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2152 dev_info.rx_desc_lim.nb_min, 2153 dev_info.rx_desc_lim.nb_align); 2154 return -EINVAL; 2155 } 2156 2157 if (dev->data->dev_started && 2158 !(dev_info.dev_capa & 2159 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2160 return -EBUSY; 2161 2162 if (dev->data->dev_started && 2163 (dev->data->rx_queue_state[rx_queue_id] != 2164 RTE_ETH_QUEUE_STATE_STOPPED)) 2165 return -EBUSY; 2166 2167 eth_dev_rxq_release(dev, rx_queue_id); 2168 2169 if (rx_conf == NULL) 2170 rx_conf = &dev_info.default_rxconf; 2171 2172 local_conf = *rx_conf; 2173 2174 /* 2175 * If an offloading has already been enabled in 2176 * rte_eth_dev_configure(), it has been enabled on all queues, 2177 * so there is no need to enable it in this queue again. 2178 * The local_conf.offloads input to underlying PMD only carries 2179 * those offloadings which are only enabled on this queue and 2180 * not enabled on all queues. 2181 */ 2182 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2183 2184 /* 2185 * New added offloadings for this queue are those not enabled in 2186 * rte_eth_dev_configure() and they must be per-queue type. 2187 * A pure per-port offloading can't be enabled on a queue while 2188 * disabled on another queue. A pure per-port offloading can't 2189 * be enabled for any queue as new added one if it hasn't been 2190 * enabled in rte_eth_dev_configure(). 2191 */ 2192 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2193 local_conf.offloads) { 2194 RTE_ETHDEV_LOG(ERR, 2195 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2196 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2197 port_id, rx_queue_id, local_conf.offloads, 2198 dev_info.rx_queue_offload_capa, 2199 __func__); 2200 return -EINVAL; 2201 } 2202 2203 if (local_conf.share_group > 0 && 2204 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2205 RTE_ETHDEV_LOG(ERR, 2206 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2207 port_id, rx_queue_id, local_conf.share_group); 2208 return -EINVAL; 2209 } 2210 2211 /* 2212 * If LRO is enabled, check that the maximum aggregated packet 2213 * size is supported by the configured device. 2214 */ 2215 /* Get the real Ethernet overhead length */ 2216 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2217 uint32_t overhead_len; 2218 uint32_t max_rx_pktlen; 2219 int ret; 2220 2221 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2222 dev_info.max_mtu); 2223 max_rx_pktlen = dev->data->mtu + overhead_len; 2224 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2225 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2226 ret = eth_dev_check_lro_pkt_size(port_id, 2227 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2228 max_rx_pktlen, 2229 dev_info.max_lro_pkt_size); 2230 if (ret != 0) 2231 return ret; 2232 } 2233 2234 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2235 socket_id, &local_conf, mp); 2236 if (!ret) { 2237 if (!dev->data->min_rx_buf_size || 2238 dev->data->min_rx_buf_size > mbp_buf_size) 2239 dev->data->min_rx_buf_size = mbp_buf_size; 2240 } 2241 2242 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2243 rx_conf, ret); 2244 return eth_err(port_id, ret); 2245 } 2246 2247 int 2248 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2249 uint16_t nb_rx_desc, 2250 const struct rte_eth_hairpin_conf *conf) 2251 { 2252 int ret; 2253 struct rte_eth_dev *dev; 2254 struct rte_eth_hairpin_cap cap; 2255 int i; 2256 int count; 2257 2258 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2259 dev = &rte_eth_devices[port_id]; 2260 2261 if (rx_queue_id >= dev->data->nb_rx_queues) { 2262 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2263 return -EINVAL; 2264 } 2265 2266 if (conf == NULL) { 2267 RTE_ETHDEV_LOG(ERR, 2268 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2269 port_id); 2270 return -EINVAL; 2271 } 2272 2273 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2274 if (ret != 0) 2275 return ret; 2276 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2277 -ENOTSUP); 2278 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2279 if (nb_rx_desc == 0) 2280 nb_rx_desc = cap.max_nb_desc; 2281 if (nb_rx_desc > cap.max_nb_desc) { 2282 RTE_ETHDEV_LOG(ERR, 2283 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2284 nb_rx_desc, cap.max_nb_desc); 2285 return -EINVAL; 2286 } 2287 if (conf->peer_count > cap.max_rx_2_tx) { 2288 RTE_ETHDEV_LOG(ERR, 2289 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2290 conf->peer_count, cap.max_rx_2_tx); 2291 return -EINVAL; 2292 } 2293 if (conf->peer_count == 0) { 2294 RTE_ETHDEV_LOG(ERR, 2295 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2296 conf->peer_count); 2297 return -EINVAL; 2298 } 2299 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2300 cap.max_nb_queues != UINT16_MAX; i++) { 2301 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2302 count++; 2303 } 2304 if (count > cap.max_nb_queues) { 2305 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2306 cap.max_nb_queues); 2307 return -EINVAL; 2308 } 2309 if (dev->data->dev_started) 2310 return -EBUSY; 2311 eth_dev_rxq_release(dev, rx_queue_id); 2312 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2313 nb_rx_desc, conf); 2314 if (ret == 0) 2315 dev->data->rx_queue_state[rx_queue_id] = 2316 RTE_ETH_QUEUE_STATE_HAIRPIN; 2317 return eth_err(port_id, ret); 2318 } 2319 2320 int 2321 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2322 uint16_t nb_tx_desc, unsigned int socket_id, 2323 const struct rte_eth_txconf *tx_conf) 2324 { 2325 struct rte_eth_dev *dev; 2326 struct rte_eth_dev_info dev_info; 2327 struct rte_eth_txconf local_conf; 2328 int ret; 2329 2330 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2331 dev = &rte_eth_devices[port_id]; 2332 2333 if (tx_queue_id >= dev->data->nb_tx_queues) { 2334 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2335 return -EINVAL; 2336 } 2337 2338 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2339 2340 ret = rte_eth_dev_info_get(port_id, &dev_info); 2341 if (ret != 0) 2342 return ret; 2343 2344 /* Use default specified by driver, if nb_tx_desc is zero */ 2345 if (nb_tx_desc == 0) { 2346 nb_tx_desc = dev_info.default_txportconf.ring_size; 2347 /* If driver default is zero, fall back on EAL default */ 2348 if (nb_tx_desc == 0) 2349 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2350 } 2351 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2352 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2353 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2354 RTE_ETHDEV_LOG(ERR, 2355 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2356 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2357 dev_info.tx_desc_lim.nb_min, 2358 dev_info.tx_desc_lim.nb_align); 2359 return -EINVAL; 2360 } 2361 2362 if (dev->data->dev_started && 2363 !(dev_info.dev_capa & 2364 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2365 return -EBUSY; 2366 2367 if (dev->data->dev_started && 2368 (dev->data->tx_queue_state[tx_queue_id] != 2369 RTE_ETH_QUEUE_STATE_STOPPED)) 2370 return -EBUSY; 2371 2372 eth_dev_txq_release(dev, tx_queue_id); 2373 2374 if (tx_conf == NULL) 2375 tx_conf = &dev_info.default_txconf; 2376 2377 local_conf = *tx_conf; 2378 2379 /* 2380 * If an offloading has already been enabled in 2381 * rte_eth_dev_configure(), it has been enabled on all queues, 2382 * so there is no need to enable it in this queue again. 2383 * The local_conf.offloads input to underlying PMD only carries 2384 * those offloadings which are only enabled on this queue and 2385 * not enabled on all queues. 2386 */ 2387 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2388 2389 /* 2390 * New added offloadings for this queue are those not enabled in 2391 * rte_eth_dev_configure() and they must be per-queue type. 2392 * A pure per-port offloading can't be enabled on a queue while 2393 * disabled on another queue. A pure per-port offloading can't 2394 * be enabled for any queue as new added one if it hasn't been 2395 * enabled in rte_eth_dev_configure(). 2396 */ 2397 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2398 local_conf.offloads) { 2399 RTE_ETHDEV_LOG(ERR, 2400 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2401 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2402 port_id, tx_queue_id, local_conf.offloads, 2403 dev_info.tx_queue_offload_capa, 2404 __func__); 2405 return -EINVAL; 2406 } 2407 2408 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2409 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2410 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2411 } 2412 2413 int 2414 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2415 uint16_t nb_tx_desc, 2416 const struct rte_eth_hairpin_conf *conf) 2417 { 2418 struct rte_eth_dev *dev; 2419 struct rte_eth_hairpin_cap cap; 2420 int i; 2421 int count; 2422 int ret; 2423 2424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2425 dev = &rte_eth_devices[port_id]; 2426 2427 if (tx_queue_id >= dev->data->nb_tx_queues) { 2428 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2429 return -EINVAL; 2430 } 2431 2432 if (conf == NULL) { 2433 RTE_ETHDEV_LOG(ERR, 2434 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2435 port_id); 2436 return -EINVAL; 2437 } 2438 2439 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2440 if (ret != 0) 2441 return ret; 2442 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2443 -ENOTSUP); 2444 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2445 if (nb_tx_desc == 0) 2446 nb_tx_desc = cap.max_nb_desc; 2447 if (nb_tx_desc > cap.max_nb_desc) { 2448 RTE_ETHDEV_LOG(ERR, 2449 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2450 nb_tx_desc, cap.max_nb_desc); 2451 return -EINVAL; 2452 } 2453 if (conf->peer_count > cap.max_tx_2_rx) { 2454 RTE_ETHDEV_LOG(ERR, 2455 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2456 conf->peer_count, cap.max_tx_2_rx); 2457 return -EINVAL; 2458 } 2459 if (conf->peer_count == 0) { 2460 RTE_ETHDEV_LOG(ERR, 2461 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2462 conf->peer_count); 2463 return -EINVAL; 2464 } 2465 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2466 cap.max_nb_queues != UINT16_MAX; i++) { 2467 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2468 count++; 2469 } 2470 if (count > cap.max_nb_queues) { 2471 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2472 cap.max_nb_queues); 2473 return -EINVAL; 2474 } 2475 if (dev->data->dev_started) 2476 return -EBUSY; 2477 eth_dev_txq_release(dev, tx_queue_id); 2478 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2479 (dev, tx_queue_id, nb_tx_desc, conf); 2480 if (ret == 0) 2481 dev->data->tx_queue_state[tx_queue_id] = 2482 RTE_ETH_QUEUE_STATE_HAIRPIN; 2483 return eth_err(port_id, ret); 2484 } 2485 2486 int 2487 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2488 { 2489 struct rte_eth_dev *dev; 2490 int ret; 2491 2492 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2493 dev = &rte_eth_devices[tx_port]; 2494 2495 if (dev->data->dev_started == 0) { 2496 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2497 return -EBUSY; 2498 } 2499 2500 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2501 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2502 if (ret != 0) 2503 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2504 " to Rx %d (%d - all ports)\n", 2505 tx_port, rx_port, RTE_MAX_ETHPORTS); 2506 2507 return ret; 2508 } 2509 2510 int 2511 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2512 { 2513 struct rte_eth_dev *dev; 2514 int ret; 2515 2516 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2517 dev = &rte_eth_devices[tx_port]; 2518 2519 if (dev->data->dev_started == 0) { 2520 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2521 return -EBUSY; 2522 } 2523 2524 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2525 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2526 if (ret != 0) 2527 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2528 " from Rx %d (%d - all ports)\n", 2529 tx_port, rx_port, RTE_MAX_ETHPORTS); 2530 2531 return ret; 2532 } 2533 2534 int 2535 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2536 size_t len, uint32_t direction) 2537 { 2538 struct rte_eth_dev *dev; 2539 int ret; 2540 2541 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2542 dev = &rte_eth_devices[port_id]; 2543 2544 if (peer_ports == NULL) { 2545 RTE_ETHDEV_LOG(ERR, 2546 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2547 port_id); 2548 return -EINVAL; 2549 } 2550 2551 if (len == 0) { 2552 RTE_ETHDEV_LOG(ERR, 2553 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2554 port_id); 2555 return -EINVAL; 2556 } 2557 2558 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2559 -ENOTSUP); 2560 2561 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2562 len, direction); 2563 if (ret < 0) 2564 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2565 port_id, direction ? "Rx" : "Tx"); 2566 2567 return ret; 2568 } 2569 2570 void 2571 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2572 void *userdata __rte_unused) 2573 { 2574 rte_pktmbuf_free_bulk(pkts, unsent); 2575 } 2576 2577 void 2578 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2579 void *userdata) 2580 { 2581 uint64_t *count = userdata; 2582 2583 rte_pktmbuf_free_bulk(pkts, unsent); 2584 *count += unsent; 2585 } 2586 2587 int 2588 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2589 buffer_tx_error_fn cbfn, void *userdata) 2590 { 2591 if (buffer == NULL) { 2592 RTE_ETHDEV_LOG(ERR, 2593 "Cannot set Tx buffer error callback to NULL buffer\n"); 2594 return -EINVAL; 2595 } 2596 2597 buffer->error_callback = cbfn; 2598 buffer->error_userdata = userdata; 2599 return 0; 2600 } 2601 2602 int 2603 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2604 { 2605 int ret = 0; 2606 2607 if (buffer == NULL) { 2608 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2609 return -EINVAL; 2610 } 2611 2612 buffer->size = size; 2613 if (buffer->error_callback == NULL) { 2614 ret = rte_eth_tx_buffer_set_err_callback( 2615 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2616 } 2617 2618 return ret; 2619 } 2620 2621 int 2622 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2623 { 2624 struct rte_eth_dev *dev; 2625 int ret; 2626 2627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2628 dev = &rte_eth_devices[port_id]; 2629 2630 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2631 2632 /* Call driver to free pending mbufs. */ 2633 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2634 free_cnt); 2635 return eth_err(port_id, ret); 2636 } 2637 2638 int 2639 rte_eth_promiscuous_enable(uint16_t port_id) 2640 { 2641 struct rte_eth_dev *dev; 2642 int diag = 0; 2643 2644 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2645 dev = &rte_eth_devices[port_id]; 2646 2647 if (dev->data->promiscuous == 1) 2648 return 0; 2649 2650 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2651 2652 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2653 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2654 2655 return eth_err(port_id, diag); 2656 } 2657 2658 int 2659 rte_eth_promiscuous_disable(uint16_t port_id) 2660 { 2661 struct rte_eth_dev *dev; 2662 int diag = 0; 2663 2664 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2665 dev = &rte_eth_devices[port_id]; 2666 2667 if (dev->data->promiscuous == 0) 2668 return 0; 2669 2670 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2671 2672 dev->data->promiscuous = 0; 2673 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2674 if (diag != 0) 2675 dev->data->promiscuous = 1; 2676 2677 return eth_err(port_id, diag); 2678 } 2679 2680 int 2681 rte_eth_promiscuous_get(uint16_t port_id) 2682 { 2683 struct rte_eth_dev *dev; 2684 2685 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2686 dev = &rte_eth_devices[port_id]; 2687 2688 return dev->data->promiscuous; 2689 } 2690 2691 int 2692 rte_eth_allmulticast_enable(uint16_t port_id) 2693 { 2694 struct rte_eth_dev *dev; 2695 int diag; 2696 2697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2698 dev = &rte_eth_devices[port_id]; 2699 2700 if (dev->data->all_multicast == 1) 2701 return 0; 2702 2703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2704 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2705 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2706 2707 return eth_err(port_id, diag); 2708 } 2709 2710 int 2711 rte_eth_allmulticast_disable(uint16_t port_id) 2712 { 2713 struct rte_eth_dev *dev; 2714 int diag; 2715 2716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2717 dev = &rte_eth_devices[port_id]; 2718 2719 if (dev->data->all_multicast == 0) 2720 return 0; 2721 2722 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2723 dev->data->all_multicast = 0; 2724 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2725 if (diag != 0) 2726 dev->data->all_multicast = 1; 2727 2728 return eth_err(port_id, diag); 2729 } 2730 2731 int 2732 rte_eth_allmulticast_get(uint16_t port_id) 2733 { 2734 struct rte_eth_dev *dev; 2735 2736 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2737 dev = &rte_eth_devices[port_id]; 2738 2739 return dev->data->all_multicast; 2740 } 2741 2742 int 2743 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2744 { 2745 struct rte_eth_dev *dev; 2746 2747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2748 dev = &rte_eth_devices[port_id]; 2749 2750 if (eth_link == NULL) { 2751 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2752 port_id); 2753 return -EINVAL; 2754 } 2755 2756 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2757 rte_eth_linkstatus_get(dev, eth_link); 2758 else { 2759 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2760 (*dev->dev_ops->link_update)(dev, 1); 2761 *eth_link = dev->data->dev_link; 2762 } 2763 2764 return 0; 2765 } 2766 2767 int 2768 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2769 { 2770 struct rte_eth_dev *dev; 2771 2772 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2773 dev = &rte_eth_devices[port_id]; 2774 2775 if (eth_link == NULL) { 2776 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2777 port_id); 2778 return -EINVAL; 2779 } 2780 2781 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2782 rte_eth_linkstatus_get(dev, eth_link); 2783 else { 2784 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2785 (*dev->dev_ops->link_update)(dev, 0); 2786 *eth_link = dev->data->dev_link; 2787 } 2788 2789 return 0; 2790 } 2791 2792 const char * 2793 rte_eth_link_speed_to_str(uint32_t link_speed) 2794 { 2795 switch (link_speed) { 2796 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2797 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2798 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2799 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2800 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2801 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2802 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2803 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2804 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2805 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2806 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2807 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2808 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2809 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2810 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2811 default: return "Invalid"; 2812 } 2813 } 2814 2815 int 2816 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2817 { 2818 if (str == NULL) { 2819 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2820 return -EINVAL; 2821 } 2822 2823 if (len == 0) { 2824 RTE_ETHDEV_LOG(ERR, 2825 "Cannot convert link to string with zero size\n"); 2826 return -EINVAL; 2827 } 2828 2829 if (eth_link == NULL) { 2830 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2831 return -EINVAL; 2832 } 2833 2834 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2835 return snprintf(str, len, "Link down"); 2836 else 2837 return snprintf(str, len, "Link up at %s %s %s", 2838 rte_eth_link_speed_to_str(eth_link->link_speed), 2839 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2840 "FDX" : "HDX", 2841 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2842 "Autoneg" : "Fixed"); 2843 } 2844 2845 int 2846 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2847 { 2848 struct rte_eth_dev *dev; 2849 2850 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2851 dev = &rte_eth_devices[port_id]; 2852 2853 if (stats == NULL) { 2854 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2855 port_id); 2856 return -EINVAL; 2857 } 2858 2859 memset(stats, 0, sizeof(*stats)); 2860 2861 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2862 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2863 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2864 } 2865 2866 int 2867 rte_eth_stats_reset(uint16_t port_id) 2868 { 2869 struct rte_eth_dev *dev; 2870 int ret; 2871 2872 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2873 dev = &rte_eth_devices[port_id]; 2874 2875 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2876 ret = (*dev->dev_ops->stats_reset)(dev); 2877 if (ret != 0) 2878 return eth_err(port_id, ret); 2879 2880 dev->data->rx_mbuf_alloc_failed = 0; 2881 2882 return 0; 2883 } 2884 2885 static inline int 2886 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2887 { 2888 uint16_t nb_rxqs, nb_txqs; 2889 int count; 2890 2891 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2892 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2893 2894 count = RTE_NB_STATS; 2895 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2896 count += nb_rxqs * RTE_NB_RXQ_STATS; 2897 count += nb_txqs * RTE_NB_TXQ_STATS; 2898 } 2899 2900 return count; 2901 } 2902 2903 static int 2904 eth_dev_get_xstats_count(uint16_t port_id) 2905 { 2906 struct rte_eth_dev *dev; 2907 int count; 2908 2909 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2910 dev = &rte_eth_devices[port_id]; 2911 if (dev->dev_ops->xstats_get_names != NULL) { 2912 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2913 if (count < 0) 2914 return eth_err(port_id, count); 2915 } else 2916 count = 0; 2917 2918 2919 count += eth_dev_get_xstats_basic_count(dev); 2920 2921 return count; 2922 } 2923 2924 int 2925 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2926 uint64_t *id) 2927 { 2928 int cnt_xstats, idx_xstat; 2929 2930 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2931 2932 if (xstat_name == NULL) { 2933 RTE_ETHDEV_LOG(ERR, 2934 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2935 port_id); 2936 return -ENOMEM; 2937 } 2938 2939 if (id == NULL) { 2940 RTE_ETHDEV_LOG(ERR, 2941 "Cannot get ethdev port %u xstats ID to NULL\n", 2942 port_id); 2943 return -ENOMEM; 2944 } 2945 2946 /* Get count */ 2947 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2948 if (cnt_xstats < 0) { 2949 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2950 return -ENODEV; 2951 } 2952 2953 /* Get id-name lookup table */ 2954 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2955 2956 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2957 port_id, xstats_names, cnt_xstats, NULL)) { 2958 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2959 return -1; 2960 } 2961 2962 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2963 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2964 *id = idx_xstat; 2965 return 0; 2966 }; 2967 } 2968 2969 return -EINVAL; 2970 } 2971 2972 /* retrieve basic stats names */ 2973 static int 2974 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2975 struct rte_eth_xstat_name *xstats_names) 2976 { 2977 int cnt_used_entries = 0; 2978 uint32_t idx, id_queue; 2979 uint16_t num_q; 2980 2981 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2982 strlcpy(xstats_names[cnt_used_entries].name, 2983 eth_dev_stats_strings[idx].name, 2984 sizeof(xstats_names[0].name)); 2985 cnt_used_entries++; 2986 } 2987 2988 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2989 return cnt_used_entries; 2990 2991 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2992 for (id_queue = 0; id_queue < num_q; id_queue++) { 2993 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2994 snprintf(xstats_names[cnt_used_entries].name, 2995 sizeof(xstats_names[0].name), 2996 "rx_q%u_%s", 2997 id_queue, eth_dev_rxq_stats_strings[idx].name); 2998 cnt_used_entries++; 2999 } 3000 3001 } 3002 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3003 for (id_queue = 0; id_queue < num_q; id_queue++) { 3004 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3005 snprintf(xstats_names[cnt_used_entries].name, 3006 sizeof(xstats_names[0].name), 3007 "tx_q%u_%s", 3008 id_queue, eth_dev_txq_stats_strings[idx].name); 3009 cnt_used_entries++; 3010 } 3011 } 3012 return cnt_used_entries; 3013 } 3014 3015 /* retrieve ethdev extended statistics names */ 3016 int 3017 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3018 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3019 uint64_t *ids) 3020 { 3021 struct rte_eth_xstat_name *xstats_names_copy; 3022 unsigned int no_basic_stat_requested = 1; 3023 unsigned int no_ext_stat_requested = 1; 3024 unsigned int expected_entries; 3025 unsigned int basic_count; 3026 struct rte_eth_dev *dev; 3027 unsigned int i; 3028 int ret; 3029 3030 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3031 dev = &rte_eth_devices[port_id]; 3032 3033 basic_count = eth_dev_get_xstats_basic_count(dev); 3034 ret = eth_dev_get_xstats_count(port_id); 3035 if (ret < 0) 3036 return ret; 3037 expected_entries = (unsigned int)ret; 3038 3039 /* Return max number of stats if no ids given */ 3040 if (!ids) { 3041 if (!xstats_names) 3042 return expected_entries; 3043 else if (xstats_names && size < expected_entries) 3044 return expected_entries; 3045 } 3046 3047 if (ids && !xstats_names) 3048 return -EINVAL; 3049 3050 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3051 uint64_t ids_copy[size]; 3052 3053 for (i = 0; i < size; i++) { 3054 if (ids[i] < basic_count) { 3055 no_basic_stat_requested = 0; 3056 break; 3057 } 3058 3059 /* 3060 * Convert ids to xstats ids that PMD knows. 3061 * ids known by user are basic + extended stats. 3062 */ 3063 ids_copy[i] = ids[i] - basic_count; 3064 } 3065 3066 if (no_basic_stat_requested) 3067 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3068 ids_copy, xstats_names, size); 3069 } 3070 3071 /* Retrieve all stats */ 3072 if (!ids) { 3073 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3074 expected_entries); 3075 if (num_stats < 0 || num_stats > (int)expected_entries) 3076 return num_stats; 3077 else 3078 return expected_entries; 3079 } 3080 3081 xstats_names_copy = calloc(expected_entries, 3082 sizeof(struct rte_eth_xstat_name)); 3083 3084 if (!xstats_names_copy) { 3085 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3086 return -ENOMEM; 3087 } 3088 3089 if (ids) { 3090 for (i = 0; i < size; i++) { 3091 if (ids[i] >= basic_count) { 3092 no_ext_stat_requested = 0; 3093 break; 3094 } 3095 } 3096 } 3097 3098 /* Fill xstats_names_copy structure */ 3099 if (ids && no_ext_stat_requested) { 3100 eth_basic_stats_get_names(dev, xstats_names_copy); 3101 } else { 3102 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3103 expected_entries); 3104 if (ret < 0) { 3105 free(xstats_names_copy); 3106 return ret; 3107 } 3108 } 3109 3110 /* Filter stats */ 3111 for (i = 0; i < size; i++) { 3112 if (ids[i] >= expected_entries) { 3113 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3114 free(xstats_names_copy); 3115 return -1; 3116 } 3117 xstats_names[i] = xstats_names_copy[ids[i]]; 3118 } 3119 3120 free(xstats_names_copy); 3121 return size; 3122 } 3123 3124 int 3125 rte_eth_xstats_get_names(uint16_t port_id, 3126 struct rte_eth_xstat_name *xstats_names, 3127 unsigned int size) 3128 { 3129 struct rte_eth_dev *dev; 3130 int cnt_used_entries; 3131 int cnt_expected_entries; 3132 int cnt_driver_entries; 3133 3134 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3135 if (xstats_names == NULL || cnt_expected_entries < 0 || 3136 (int)size < cnt_expected_entries) 3137 return cnt_expected_entries; 3138 3139 /* port_id checked in eth_dev_get_xstats_count() */ 3140 dev = &rte_eth_devices[port_id]; 3141 3142 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3143 3144 if (dev->dev_ops->xstats_get_names != NULL) { 3145 /* If there are any driver-specific xstats, append them 3146 * to end of list. 3147 */ 3148 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3149 dev, 3150 xstats_names + cnt_used_entries, 3151 size - cnt_used_entries); 3152 if (cnt_driver_entries < 0) 3153 return eth_err(port_id, cnt_driver_entries); 3154 cnt_used_entries += cnt_driver_entries; 3155 } 3156 3157 return cnt_used_entries; 3158 } 3159 3160 3161 static int 3162 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3163 { 3164 struct rte_eth_dev *dev; 3165 struct rte_eth_stats eth_stats; 3166 unsigned int count = 0, i, q; 3167 uint64_t val, *stats_ptr; 3168 uint16_t nb_rxqs, nb_txqs; 3169 int ret; 3170 3171 ret = rte_eth_stats_get(port_id, ð_stats); 3172 if (ret < 0) 3173 return ret; 3174 3175 dev = &rte_eth_devices[port_id]; 3176 3177 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3178 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3179 3180 /* global stats */ 3181 for (i = 0; i < RTE_NB_STATS; i++) { 3182 stats_ptr = RTE_PTR_ADD(ð_stats, 3183 eth_dev_stats_strings[i].offset); 3184 val = *stats_ptr; 3185 xstats[count++].value = val; 3186 } 3187 3188 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3189 return count; 3190 3191 /* per-rxq stats */ 3192 for (q = 0; q < nb_rxqs; q++) { 3193 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3194 stats_ptr = RTE_PTR_ADD(ð_stats, 3195 eth_dev_rxq_stats_strings[i].offset + 3196 q * sizeof(uint64_t)); 3197 val = *stats_ptr; 3198 xstats[count++].value = val; 3199 } 3200 } 3201 3202 /* per-txq stats */ 3203 for (q = 0; q < nb_txqs; q++) { 3204 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3205 stats_ptr = RTE_PTR_ADD(ð_stats, 3206 eth_dev_txq_stats_strings[i].offset + 3207 q * sizeof(uint64_t)); 3208 val = *stats_ptr; 3209 xstats[count++].value = val; 3210 } 3211 } 3212 return count; 3213 } 3214 3215 /* retrieve ethdev extended statistics */ 3216 int 3217 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3218 uint64_t *values, unsigned int size) 3219 { 3220 unsigned int no_basic_stat_requested = 1; 3221 unsigned int no_ext_stat_requested = 1; 3222 unsigned int num_xstats_filled; 3223 unsigned int basic_count; 3224 uint16_t expected_entries; 3225 struct rte_eth_dev *dev; 3226 unsigned int i; 3227 int ret; 3228 3229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3230 dev = &rte_eth_devices[port_id]; 3231 3232 ret = eth_dev_get_xstats_count(port_id); 3233 if (ret < 0) 3234 return ret; 3235 expected_entries = (uint16_t)ret; 3236 struct rte_eth_xstat xstats[expected_entries]; 3237 basic_count = eth_dev_get_xstats_basic_count(dev); 3238 3239 /* Return max number of stats if no ids given */ 3240 if (!ids) { 3241 if (!values) 3242 return expected_entries; 3243 else if (values && size < expected_entries) 3244 return expected_entries; 3245 } 3246 3247 if (ids && !values) 3248 return -EINVAL; 3249 3250 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3251 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3252 uint64_t ids_copy[size]; 3253 3254 for (i = 0; i < size; i++) { 3255 if (ids[i] < basic_count) { 3256 no_basic_stat_requested = 0; 3257 break; 3258 } 3259 3260 /* 3261 * Convert ids to xstats ids that PMD knows. 3262 * ids known by user are basic + extended stats. 3263 */ 3264 ids_copy[i] = ids[i] - basic_count; 3265 } 3266 3267 if (no_basic_stat_requested) 3268 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3269 values, size); 3270 } 3271 3272 if (ids) { 3273 for (i = 0; i < size; i++) { 3274 if (ids[i] >= basic_count) { 3275 no_ext_stat_requested = 0; 3276 break; 3277 } 3278 } 3279 } 3280 3281 /* Fill the xstats structure */ 3282 if (ids && no_ext_stat_requested) 3283 ret = eth_basic_stats_get(port_id, xstats); 3284 else 3285 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3286 3287 if (ret < 0) 3288 return ret; 3289 num_xstats_filled = (unsigned int)ret; 3290 3291 /* Return all stats */ 3292 if (!ids) { 3293 for (i = 0; i < num_xstats_filled; i++) 3294 values[i] = xstats[i].value; 3295 return expected_entries; 3296 } 3297 3298 /* Filter stats */ 3299 for (i = 0; i < size; i++) { 3300 if (ids[i] >= expected_entries) { 3301 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3302 return -1; 3303 } 3304 values[i] = xstats[ids[i]].value; 3305 } 3306 return size; 3307 } 3308 3309 int 3310 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3311 unsigned int n) 3312 { 3313 struct rte_eth_dev *dev; 3314 unsigned int count = 0, i; 3315 signed int xcount = 0; 3316 uint16_t nb_rxqs, nb_txqs; 3317 int ret; 3318 3319 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3320 dev = &rte_eth_devices[port_id]; 3321 3322 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3323 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3324 3325 /* Return generic statistics */ 3326 count = RTE_NB_STATS; 3327 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3328 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3329 3330 /* implemented by the driver */ 3331 if (dev->dev_ops->xstats_get != NULL) { 3332 /* Retrieve the xstats from the driver at the end of the 3333 * xstats struct. 3334 */ 3335 xcount = (*dev->dev_ops->xstats_get)(dev, 3336 xstats ? xstats + count : NULL, 3337 (n > count) ? n - count : 0); 3338 3339 if (xcount < 0) 3340 return eth_err(port_id, xcount); 3341 } 3342 3343 if (n < count + xcount || xstats == NULL) 3344 return count + xcount; 3345 3346 /* now fill the xstats structure */ 3347 ret = eth_basic_stats_get(port_id, xstats); 3348 if (ret < 0) 3349 return ret; 3350 count = ret; 3351 3352 for (i = 0; i < count; i++) 3353 xstats[i].id = i; 3354 /* add an offset to driver-specific stats */ 3355 for ( ; i < count + xcount; i++) 3356 xstats[i].id += count; 3357 3358 return count + xcount; 3359 } 3360 3361 /* reset ethdev extended statistics */ 3362 int 3363 rte_eth_xstats_reset(uint16_t port_id) 3364 { 3365 struct rte_eth_dev *dev; 3366 3367 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3368 dev = &rte_eth_devices[port_id]; 3369 3370 /* implemented by the driver */ 3371 if (dev->dev_ops->xstats_reset != NULL) 3372 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3373 3374 /* fallback to default */ 3375 return rte_eth_stats_reset(port_id); 3376 } 3377 3378 static int 3379 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3380 uint8_t stat_idx, uint8_t is_rx) 3381 { 3382 struct rte_eth_dev *dev; 3383 3384 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3385 dev = &rte_eth_devices[port_id]; 3386 3387 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3388 return -EINVAL; 3389 3390 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3391 return -EINVAL; 3392 3393 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3394 return -EINVAL; 3395 3396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3397 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3398 } 3399 3400 int 3401 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3402 uint8_t stat_idx) 3403 { 3404 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3405 tx_queue_id, 3406 stat_idx, STAT_QMAP_TX)); 3407 } 3408 3409 int 3410 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3411 uint8_t stat_idx) 3412 { 3413 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3414 rx_queue_id, 3415 stat_idx, STAT_QMAP_RX)); 3416 } 3417 3418 int 3419 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3420 { 3421 struct rte_eth_dev *dev; 3422 3423 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3424 dev = &rte_eth_devices[port_id]; 3425 3426 if (fw_version == NULL && fw_size > 0) { 3427 RTE_ETHDEV_LOG(ERR, 3428 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3429 port_id); 3430 return -EINVAL; 3431 } 3432 3433 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3434 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3435 fw_version, fw_size)); 3436 } 3437 3438 int 3439 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3440 { 3441 struct rte_eth_dev *dev; 3442 const struct rte_eth_desc_lim lim = { 3443 .nb_max = UINT16_MAX, 3444 .nb_min = 0, 3445 .nb_align = 1, 3446 .nb_seg_max = UINT16_MAX, 3447 .nb_mtu_seg_max = UINT16_MAX, 3448 }; 3449 int diag; 3450 3451 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3452 dev = &rte_eth_devices[port_id]; 3453 3454 if (dev_info == NULL) { 3455 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3456 port_id); 3457 return -EINVAL; 3458 } 3459 3460 /* 3461 * Init dev_info before port_id check since caller does not have 3462 * return status and does not know if get is successful or not. 3463 */ 3464 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3465 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3466 3467 dev_info->rx_desc_lim = lim; 3468 dev_info->tx_desc_lim = lim; 3469 dev_info->device = dev->device; 3470 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3471 RTE_ETHER_CRC_LEN; 3472 dev_info->max_mtu = UINT16_MAX; 3473 3474 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3475 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3476 if (diag != 0) { 3477 /* Cleanup already filled in device information */ 3478 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3479 return eth_err(port_id, diag); 3480 } 3481 3482 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3483 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3484 RTE_MAX_QUEUES_PER_PORT); 3485 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3486 RTE_MAX_QUEUES_PER_PORT); 3487 3488 dev_info->driver_name = dev->device->driver->name; 3489 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3490 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3491 3492 dev_info->dev_flags = &dev->data->dev_flags; 3493 3494 return 0; 3495 } 3496 3497 int 3498 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3499 { 3500 struct rte_eth_dev *dev; 3501 3502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3503 dev = &rte_eth_devices[port_id]; 3504 3505 if (dev_conf == NULL) { 3506 RTE_ETHDEV_LOG(ERR, 3507 "Cannot get ethdev port %u configuration to NULL\n", 3508 port_id); 3509 return -EINVAL; 3510 } 3511 3512 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3513 3514 return 0; 3515 } 3516 3517 int 3518 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3519 uint32_t *ptypes, int num) 3520 { 3521 int i, j; 3522 struct rte_eth_dev *dev; 3523 const uint32_t *all_ptypes; 3524 3525 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3526 dev = &rte_eth_devices[port_id]; 3527 3528 if (ptypes == NULL && num > 0) { 3529 RTE_ETHDEV_LOG(ERR, 3530 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3531 port_id); 3532 return -EINVAL; 3533 } 3534 3535 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3536 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3537 3538 if (!all_ptypes) 3539 return 0; 3540 3541 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3542 if (all_ptypes[i] & ptype_mask) { 3543 if (j < num) 3544 ptypes[j] = all_ptypes[i]; 3545 j++; 3546 } 3547 3548 return j; 3549 } 3550 3551 int 3552 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3553 uint32_t *set_ptypes, unsigned int num) 3554 { 3555 const uint32_t valid_ptype_masks[] = { 3556 RTE_PTYPE_L2_MASK, 3557 RTE_PTYPE_L3_MASK, 3558 RTE_PTYPE_L4_MASK, 3559 RTE_PTYPE_TUNNEL_MASK, 3560 RTE_PTYPE_INNER_L2_MASK, 3561 RTE_PTYPE_INNER_L3_MASK, 3562 RTE_PTYPE_INNER_L4_MASK, 3563 }; 3564 const uint32_t *all_ptypes; 3565 struct rte_eth_dev *dev; 3566 uint32_t unused_mask; 3567 unsigned int i, j; 3568 int ret; 3569 3570 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3571 dev = &rte_eth_devices[port_id]; 3572 3573 if (num > 0 && set_ptypes == NULL) { 3574 RTE_ETHDEV_LOG(ERR, 3575 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3576 port_id); 3577 return -EINVAL; 3578 } 3579 3580 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3581 *dev->dev_ops->dev_ptypes_set == NULL) { 3582 ret = 0; 3583 goto ptype_unknown; 3584 } 3585 3586 if (ptype_mask == 0) { 3587 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3588 ptype_mask); 3589 goto ptype_unknown; 3590 } 3591 3592 unused_mask = ptype_mask; 3593 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3594 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3595 if (mask && mask != valid_ptype_masks[i]) { 3596 ret = -EINVAL; 3597 goto ptype_unknown; 3598 } 3599 unused_mask &= ~valid_ptype_masks[i]; 3600 } 3601 3602 if (unused_mask) { 3603 ret = -EINVAL; 3604 goto ptype_unknown; 3605 } 3606 3607 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3608 if (all_ptypes == NULL) { 3609 ret = 0; 3610 goto ptype_unknown; 3611 } 3612 3613 /* 3614 * Accommodate as many set_ptypes as possible. If the supplied 3615 * set_ptypes array is insufficient fill it partially. 3616 */ 3617 for (i = 0, j = 0; set_ptypes != NULL && 3618 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3619 if (ptype_mask & all_ptypes[i]) { 3620 if (j < num - 1) { 3621 set_ptypes[j] = all_ptypes[i]; 3622 j++; 3623 continue; 3624 } 3625 break; 3626 } 3627 } 3628 3629 if (set_ptypes != NULL && j < num) 3630 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3631 3632 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3633 3634 ptype_unknown: 3635 if (num > 0) 3636 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3637 3638 return ret; 3639 } 3640 3641 int 3642 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3643 unsigned int num) 3644 { 3645 int32_t ret; 3646 struct rte_eth_dev *dev; 3647 struct rte_eth_dev_info dev_info; 3648 3649 if (ma == NULL) { 3650 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3651 return -EINVAL; 3652 } 3653 3654 /* will check for us that port_id is a valid one */ 3655 ret = rte_eth_dev_info_get(port_id, &dev_info); 3656 if (ret != 0) 3657 return ret; 3658 3659 dev = &rte_eth_devices[port_id]; 3660 num = RTE_MIN(dev_info.max_mac_addrs, num); 3661 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3662 3663 return num; 3664 } 3665 3666 int 3667 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3668 { 3669 struct rte_eth_dev *dev; 3670 3671 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3672 dev = &rte_eth_devices[port_id]; 3673 3674 if (mac_addr == NULL) { 3675 RTE_ETHDEV_LOG(ERR, 3676 "Cannot get ethdev port %u MAC address to NULL\n", 3677 port_id); 3678 return -EINVAL; 3679 } 3680 3681 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3682 3683 return 0; 3684 } 3685 3686 int 3687 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3688 { 3689 struct rte_eth_dev *dev; 3690 3691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3692 dev = &rte_eth_devices[port_id]; 3693 3694 if (mtu == NULL) { 3695 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3696 port_id); 3697 return -EINVAL; 3698 } 3699 3700 *mtu = dev->data->mtu; 3701 return 0; 3702 } 3703 3704 int 3705 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3706 { 3707 int ret; 3708 struct rte_eth_dev_info dev_info; 3709 struct rte_eth_dev *dev; 3710 3711 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3712 dev = &rte_eth_devices[port_id]; 3713 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3714 3715 /* 3716 * Check if the device supports dev_infos_get, if it does not 3717 * skip min_mtu/max_mtu validation here as this requires values 3718 * that are populated within the call to rte_eth_dev_info_get() 3719 * which relies on dev->dev_ops->dev_infos_get. 3720 */ 3721 if (*dev->dev_ops->dev_infos_get != NULL) { 3722 ret = rte_eth_dev_info_get(port_id, &dev_info); 3723 if (ret != 0) 3724 return ret; 3725 3726 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3727 if (ret != 0) 3728 return ret; 3729 } 3730 3731 if (dev->data->dev_configured == 0) { 3732 RTE_ETHDEV_LOG(ERR, 3733 "Port %u must be configured before MTU set\n", 3734 port_id); 3735 return -EINVAL; 3736 } 3737 3738 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3739 if (ret == 0) 3740 dev->data->mtu = mtu; 3741 3742 return eth_err(port_id, ret); 3743 } 3744 3745 int 3746 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3747 { 3748 struct rte_eth_dev *dev; 3749 int ret; 3750 3751 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3752 dev = &rte_eth_devices[port_id]; 3753 3754 if (!(dev->data->dev_conf.rxmode.offloads & 3755 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3756 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3757 port_id); 3758 return -ENOSYS; 3759 } 3760 3761 if (vlan_id > 4095) { 3762 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3763 port_id, vlan_id); 3764 return -EINVAL; 3765 } 3766 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3767 3768 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3769 if (ret == 0) { 3770 struct rte_vlan_filter_conf *vfc; 3771 int vidx; 3772 int vbit; 3773 3774 vfc = &dev->data->vlan_filter_conf; 3775 vidx = vlan_id / 64; 3776 vbit = vlan_id % 64; 3777 3778 if (on) 3779 vfc->ids[vidx] |= RTE_BIT64(vbit); 3780 else 3781 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3782 } 3783 3784 return eth_err(port_id, ret); 3785 } 3786 3787 int 3788 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3789 int on) 3790 { 3791 struct rte_eth_dev *dev; 3792 3793 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3794 dev = &rte_eth_devices[port_id]; 3795 3796 if (rx_queue_id >= dev->data->nb_rx_queues) { 3797 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3798 return -EINVAL; 3799 } 3800 3801 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3802 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3803 3804 return 0; 3805 } 3806 3807 int 3808 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3809 enum rte_vlan_type vlan_type, 3810 uint16_t tpid) 3811 { 3812 struct rte_eth_dev *dev; 3813 3814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3815 dev = &rte_eth_devices[port_id]; 3816 3817 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3818 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3819 tpid)); 3820 } 3821 3822 int 3823 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3824 { 3825 struct rte_eth_dev_info dev_info; 3826 struct rte_eth_dev *dev; 3827 int ret = 0; 3828 int mask = 0; 3829 int cur, org = 0; 3830 uint64_t orig_offloads; 3831 uint64_t dev_offloads; 3832 uint64_t new_offloads; 3833 3834 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3835 dev = &rte_eth_devices[port_id]; 3836 3837 /* save original values in case of failure */ 3838 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3839 dev_offloads = orig_offloads; 3840 3841 /* check which option changed by application */ 3842 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3843 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3844 if (cur != org) { 3845 if (cur) 3846 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3847 else 3848 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3849 mask |= RTE_ETH_VLAN_STRIP_MASK; 3850 } 3851 3852 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3853 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3854 if (cur != org) { 3855 if (cur) 3856 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3857 else 3858 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3859 mask |= RTE_ETH_VLAN_FILTER_MASK; 3860 } 3861 3862 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3863 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3864 if (cur != org) { 3865 if (cur) 3866 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3867 else 3868 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3869 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3870 } 3871 3872 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3873 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3874 if (cur != org) { 3875 if (cur) 3876 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3877 else 3878 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3879 mask |= RTE_ETH_QINQ_STRIP_MASK; 3880 } 3881 3882 /*no change*/ 3883 if (mask == 0) 3884 return ret; 3885 3886 ret = rte_eth_dev_info_get(port_id, &dev_info); 3887 if (ret != 0) 3888 return ret; 3889 3890 /* Rx VLAN offloading must be within its device capabilities */ 3891 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3892 new_offloads = dev_offloads & ~orig_offloads; 3893 RTE_ETHDEV_LOG(ERR, 3894 "Ethdev port_id=%u requested new added VLAN offloads " 3895 "0x%" PRIx64 " must be within Rx offloads capabilities " 3896 "0x%" PRIx64 " in %s()\n", 3897 port_id, new_offloads, dev_info.rx_offload_capa, 3898 __func__); 3899 return -EINVAL; 3900 } 3901 3902 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3903 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3904 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3905 if (ret) { 3906 /* hit an error restore original values */ 3907 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3908 } 3909 3910 return eth_err(port_id, ret); 3911 } 3912 3913 int 3914 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3915 { 3916 struct rte_eth_dev *dev; 3917 uint64_t *dev_offloads; 3918 int ret = 0; 3919 3920 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3921 dev = &rte_eth_devices[port_id]; 3922 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3923 3924 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3925 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3926 3927 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3928 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3929 3930 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3931 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3932 3933 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3934 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3935 3936 return ret; 3937 } 3938 3939 int 3940 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3941 { 3942 struct rte_eth_dev *dev; 3943 3944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3945 dev = &rte_eth_devices[port_id]; 3946 3947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3948 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3949 } 3950 3951 int 3952 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3953 { 3954 struct rte_eth_dev *dev; 3955 3956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3957 dev = &rte_eth_devices[port_id]; 3958 3959 if (fc_conf == NULL) { 3960 RTE_ETHDEV_LOG(ERR, 3961 "Cannot get ethdev port %u flow control config to NULL\n", 3962 port_id); 3963 return -EINVAL; 3964 } 3965 3966 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3967 memset(fc_conf, 0, sizeof(*fc_conf)); 3968 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3969 } 3970 3971 int 3972 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3973 { 3974 struct rte_eth_dev *dev; 3975 3976 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3977 dev = &rte_eth_devices[port_id]; 3978 3979 if (fc_conf == NULL) { 3980 RTE_ETHDEV_LOG(ERR, 3981 "Cannot set ethdev port %u flow control from NULL config\n", 3982 port_id); 3983 return -EINVAL; 3984 } 3985 3986 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3987 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3988 return -EINVAL; 3989 } 3990 3991 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3992 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3993 } 3994 3995 int 3996 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3997 struct rte_eth_pfc_conf *pfc_conf) 3998 { 3999 struct rte_eth_dev *dev; 4000 4001 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4002 dev = &rte_eth_devices[port_id]; 4003 4004 if (pfc_conf == NULL) { 4005 RTE_ETHDEV_LOG(ERR, 4006 "Cannot set ethdev port %u priority flow control from NULL config\n", 4007 port_id); 4008 return -EINVAL; 4009 } 4010 4011 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4012 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4013 return -EINVAL; 4014 } 4015 4016 /* High water, low water validation are device specific */ 4017 if (*dev->dev_ops->priority_flow_ctrl_set) 4018 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4019 (dev, pfc_conf)); 4020 return -ENOTSUP; 4021 } 4022 4023 static int 4024 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4025 uint16_t reta_size) 4026 { 4027 uint16_t i, num; 4028 4029 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4030 for (i = 0; i < num; i++) { 4031 if (reta_conf[i].mask) 4032 return 0; 4033 } 4034 4035 return -EINVAL; 4036 } 4037 4038 static int 4039 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4040 uint16_t reta_size, 4041 uint16_t max_rxq) 4042 { 4043 uint16_t i, idx, shift; 4044 4045 if (max_rxq == 0) { 4046 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4047 return -EINVAL; 4048 } 4049 4050 for (i = 0; i < reta_size; i++) { 4051 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4052 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4053 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4054 (reta_conf[idx].reta[shift] >= max_rxq)) { 4055 RTE_ETHDEV_LOG(ERR, 4056 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4057 idx, shift, 4058 reta_conf[idx].reta[shift], max_rxq); 4059 return -EINVAL; 4060 } 4061 } 4062 4063 return 0; 4064 } 4065 4066 int 4067 rte_eth_dev_rss_reta_update(uint16_t port_id, 4068 struct rte_eth_rss_reta_entry64 *reta_conf, 4069 uint16_t reta_size) 4070 { 4071 struct rte_eth_dev *dev; 4072 int ret; 4073 4074 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4075 dev = &rte_eth_devices[port_id]; 4076 4077 if (reta_conf == NULL) { 4078 RTE_ETHDEV_LOG(ERR, 4079 "Cannot update ethdev port %u RSS RETA to NULL\n", 4080 port_id); 4081 return -EINVAL; 4082 } 4083 4084 if (reta_size == 0) { 4085 RTE_ETHDEV_LOG(ERR, 4086 "Cannot update ethdev port %u RSS RETA with zero size\n", 4087 port_id); 4088 return -EINVAL; 4089 } 4090 4091 /* Check mask bits */ 4092 ret = eth_check_reta_mask(reta_conf, reta_size); 4093 if (ret < 0) 4094 return ret; 4095 4096 /* Check entry value */ 4097 ret = eth_check_reta_entry(reta_conf, reta_size, 4098 dev->data->nb_rx_queues); 4099 if (ret < 0) 4100 return ret; 4101 4102 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4103 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4104 reta_size)); 4105 } 4106 4107 int 4108 rte_eth_dev_rss_reta_query(uint16_t port_id, 4109 struct rte_eth_rss_reta_entry64 *reta_conf, 4110 uint16_t reta_size) 4111 { 4112 struct rte_eth_dev *dev; 4113 int ret; 4114 4115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4116 dev = &rte_eth_devices[port_id]; 4117 4118 if (reta_conf == NULL) { 4119 RTE_ETHDEV_LOG(ERR, 4120 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4121 port_id); 4122 return -EINVAL; 4123 } 4124 4125 /* Check mask bits */ 4126 ret = eth_check_reta_mask(reta_conf, reta_size); 4127 if (ret < 0) 4128 return ret; 4129 4130 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4131 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4132 reta_size)); 4133 } 4134 4135 int 4136 rte_eth_dev_rss_hash_update(uint16_t port_id, 4137 struct rte_eth_rss_conf *rss_conf) 4138 { 4139 struct rte_eth_dev *dev; 4140 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4141 int ret; 4142 4143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4144 dev = &rte_eth_devices[port_id]; 4145 4146 if (rss_conf == NULL) { 4147 RTE_ETHDEV_LOG(ERR, 4148 "Cannot update ethdev port %u RSS hash from NULL config\n", 4149 port_id); 4150 return -EINVAL; 4151 } 4152 4153 ret = rte_eth_dev_info_get(port_id, &dev_info); 4154 if (ret != 0) 4155 return ret; 4156 4157 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4158 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4159 dev_info.flow_type_rss_offloads) { 4160 RTE_ETHDEV_LOG(ERR, 4161 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4162 port_id, rss_conf->rss_hf, 4163 dev_info.flow_type_rss_offloads); 4164 return -EINVAL; 4165 } 4166 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4167 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4168 rss_conf)); 4169 } 4170 4171 int 4172 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4173 struct rte_eth_rss_conf *rss_conf) 4174 { 4175 struct rte_eth_dev *dev; 4176 4177 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4178 dev = &rte_eth_devices[port_id]; 4179 4180 if (rss_conf == NULL) { 4181 RTE_ETHDEV_LOG(ERR, 4182 "Cannot get ethdev port %u RSS hash config to NULL\n", 4183 port_id); 4184 return -EINVAL; 4185 } 4186 4187 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4188 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4189 rss_conf)); 4190 } 4191 4192 int 4193 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4194 struct rte_eth_udp_tunnel *udp_tunnel) 4195 { 4196 struct rte_eth_dev *dev; 4197 4198 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4199 dev = &rte_eth_devices[port_id]; 4200 4201 if (udp_tunnel == NULL) { 4202 RTE_ETHDEV_LOG(ERR, 4203 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4204 port_id); 4205 return -EINVAL; 4206 } 4207 4208 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4209 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4210 return -EINVAL; 4211 } 4212 4213 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4214 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4215 udp_tunnel)); 4216 } 4217 4218 int 4219 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4220 struct rte_eth_udp_tunnel *udp_tunnel) 4221 { 4222 struct rte_eth_dev *dev; 4223 4224 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4225 dev = &rte_eth_devices[port_id]; 4226 4227 if (udp_tunnel == NULL) { 4228 RTE_ETHDEV_LOG(ERR, 4229 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4230 port_id); 4231 return -EINVAL; 4232 } 4233 4234 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4235 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4236 return -EINVAL; 4237 } 4238 4239 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4240 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4241 udp_tunnel)); 4242 } 4243 4244 int 4245 rte_eth_led_on(uint16_t port_id) 4246 { 4247 struct rte_eth_dev *dev; 4248 4249 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4250 dev = &rte_eth_devices[port_id]; 4251 4252 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4253 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4254 } 4255 4256 int 4257 rte_eth_led_off(uint16_t port_id) 4258 { 4259 struct rte_eth_dev *dev; 4260 4261 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4262 dev = &rte_eth_devices[port_id]; 4263 4264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4265 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4266 } 4267 4268 int 4269 rte_eth_fec_get_capability(uint16_t port_id, 4270 struct rte_eth_fec_capa *speed_fec_capa, 4271 unsigned int num) 4272 { 4273 struct rte_eth_dev *dev; 4274 int ret; 4275 4276 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4277 dev = &rte_eth_devices[port_id]; 4278 4279 if (speed_fec_capa == NULL && num > 0) { 4280 RTE_ETHDEV_LOG(ERR, 4281 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4282 port_id); 4283 return -EINVAL; 4284 } 4285 4286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4287 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4288 4289 return ret; 4290 } 4291 4292 int 4293 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4294 { 4295 struct rte_eth_dev *dev; 4296 4297 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4298 dev = &rte_eth_devices[port_id]; 4299 4300 if (fec_capa == NULL) { 4301 RTE_ETHDEV_LOG(ERR, 4302 "Cannot get ethdev port %u current FEC mode to NULL\n", 4303 port_id); 4304 return -EINVAL; 4305 } 4306 4307 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4308 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4309 } 4310 4311 int 4312 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4313 { 4314 struct rte_eth_dev *dev; 4315 4316 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4317 dev = &rte_eth_devices[port_id]; 4318 4319 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4320 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4321 } 4322 4323 /* 4324 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4325 * an empty spot. 4326 */ 4327 static int 4328 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4329 { 4330 struct rte_eth_dev_info dev_info; 4331 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4332 unsigned i; 4333 int ret; 4334 4335 ret = rte_eth_dev_info_get(port_id, &dev_info); 4336 if (ret != 0) 4337 return -1; 4338 4339 for (i = 0; i < dev_info.max_mac_addrs; i++) 4340 if (memcmp(addr, &dev->data->mac_addrs[i], 4341 RTE_ETHER_ADDR_LEN) == 0) 4342 return i; 4343 4344 return -1; 4345 } 4346 4347 static const struct rte_ether_addr null_mac_addr; 4348 4349 int 4350 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4351 uint32_t pool) 4352 { 4353 struct rte_eth_dev *dev; 4354 int index; 4355 uint64_t pool_mask; 4356 int ret; 4357 4358 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4359 dev = &rte_eth_devices[port_id]; 4360 4361 if (addr == NULL) { 4362 RTE_ETHDEV_LOG(ERR, 4363 "Cannot add ethdev port %u MAC address from NULL address\n", 4364 port_id); 4365 return -EINVAL; 4366 } 4367 4368 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4369 4370 if (rte_is_zero_ether_addr(addr)) { 4371 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4372 port_id); 4373 return -EINVAL; 4374 } 4375 if (pool >= RTE_ETH_64_POOLS) { 4376 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4377 return -EINVAL; 4378 } 4379 4380 index = eth_dev_get_mac_addr_index(port_id, addr); 4381 if (index < 0) { 4382 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4383 if (index < 0) { 4384 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4385 port_id); 4386 return -ENOSPC; 4387 } 4388 } else { 4389 pool_mask = dev->data->mac_pool_sel[index]; 4390 4391 /* Check if both MAC address and pool is already there, and do nothing */ 4392 if (pool_mask & RTE_BIT64(pool)) 4393 return 0; 4394 } 4395 4396 /* Update NIC */ 4397 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4398 4399 if (ret == 0) { 4400 /* Update address in NIC data structure */ 4401 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4402 4403 /* Update pool bitmap in NIC data structure */ 4404 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4405 } 4406 4407 return eth_err(port_id, ret); 4408 } 4409 4410 int 4411 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4412 { 4413 struct rte_eth_dev *dev; 4414 int index; 4415 4416 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4417 dev = &rte_eth_devices[port_id]; 4418 4419 if (addr == NULL) { 4420 RTE_ETHDEV_LOG(ERR, 4421 "Cannot remove ethdev port %u MAC address from NULL address\n", 4422 port_id); 4423 return -EINVAL; 4424 } 4425 4426 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4427 4428 index = eth_dev_get_mac_addr_index(port_id, addr); 4429 if (index == 0) { 4430 RTE_ETHDEV_LOG(ERR, 4431 "Port %u: Cannot remove default MAC address\n", 4432 port_id); 4433 return -EADDRINUSE; 4434 } else if (index < 0) 4435 return 0; /* Do nothing if address wasn't found */ 4436 4437 /* Update NIC */ 4438 (*dev->dev_ops->mac_addr_remove)(dev, index); 4439 4440 /* Update address in NIC data structure */ 4441 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4442 4443 /* reset pool bitmap */ 4444 dev->data->mac_pool_sel[index] = 0; 4445 4446 return 0; 4447 } 4448 4449 int 4450 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4451 { 4452 struct rte_eth_dev *dev; 4453 int ret; 4454 4455 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4456 dev = &rte_eth_devices[port_id]; 4457 4458 if (addr == NULL) { 4459 RTE_ETHDEV_LOG(ERR, 4460 "Cannot set ethdev port %u default MAC address from NULL address\n", 4461 port_id); 4462 return -EINVAL; 4463 } 4464 4465 if (!rte_is_valid_assigned_ether_addr(addr)) 4466 return -EINVAL; 4467 4468 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4469 4470 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4471 if (ret < 0) 4472 return ret; 4473 4474 /* Update default address in NIC data structure */ 4475 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4476 4477 return 0; 4478 } 4479 4480 4481 /* 4482 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4483 * an empty spot. 4484 */ 4485 static int 4486 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4487 const struct rte_ether_addr *addr) 4488 { 4489 struct rte_eth_dev_info dev_info; 4490 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4491 unsigned i; 4492 int ret; 4493 4494 ret = rte_eth_dev_info_get(port_id, &dev_info); 4495 if (ret != 0) 4496 return -1; 4497 4498 if (!dev->data->hash_mac_addrs) 4499 return -1; 4500 4501 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4502 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4503 RTE_ETHER_ADDR_LEN) == 0) 4504 return i; 4505 4506 return -1; 4507 } 4508 4509 int 4510 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4511 uint8_t on) 4512 { 4513 int index; 4514 int ret; 4515 struct rte_eth_dev *dev; 4516 4517 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4518 dev = &rte_eth_devices[port_id]; 4519 4520 if (addr == NULL) { 4521 RTE_ETHDEV_LOG(ERR, 4522 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4523 port_id); 4524 return -EINVAL; 4525 } 4526 4527 if (rte_is_zero_ether_addr(addr)) { 4528 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4529 port_id); 4530 return -EINVAL; 4531 } 4532 4533 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4534 /* Check if it's already there, and do nothing */ 4535 if ((index >= 0) && on) 4536 return 0; 4537 4538 if (index < 0) { 4539 if (!on) { 4540 RTE_ETHDEV_LOG(ERR, 4541 "Port %u: the MAC address was not set in UTA\n", 4542 port_id); 4543 return -EINVAL; 4544 } 4545 4546 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4547 if (index < 0) { 4548 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4549 port_id); 4550 return -ENOSPC; 4551 } 4552 } 4553 4554 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4555 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4556 if (ret == 0) { 4557 /* Update address in NIC data structure */ 4558 if (on) 4559 rte_ether_addr_copy(addr, 4560 &dev->data->hash_mac_addrs[index]); 4561 else 4562 rte_ether_addr_copy(&null_mac_addr, 4563 &dev->data->hash_mac_addrs[index]); 4564 } 4565 4566 return eth_err(port_id, ret); 4567 } 4568 4569 int 4570 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4571 { 4572 struct rte_eth_dev *dev; 4573 4574 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4575 dev = &rte_eth_devices[port_id]; 4576 4577 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4578 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4579 on)); 4580 } 4581 4582 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4583 uint16_t tx_rate) 4584 { 4585 struct rte_eth_dev *dev; 4586 struct rte_eth_dev_info dev_info; 4587 struct rte_eth_link link; 4588 int ret; 4589 4590 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4591 dev = &rte_eth_devices[port_id]; 4592 4593 ret = rte_eth_dev_info_get(port_id, &dev_info); 4594 if (ret != 0) 4595 return ret; 4596 4597 link = dev->data->dev_link; 4598 4599 if (queue_idx > dev_info.max_tx_queues) { 4600 RTE_ETHDEV_LOG(ERR, 4601 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4602 port_id, queue_idx); 4603 return -EINVAL; 4604 } 4605 4606 if (tx_rate > link.link_speed) { 4607 RTE_ETHDEV_LOG(ERR, 4608 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4609 tx_rate, link.link_speed); 4610 return -EINVAL; 4611 } 4612 4613 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4614 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4615 queue_idx, tx_rate)); 4616 } 4617 4618 RTE_INIT(eth_dev_init_fp_ops) 4619 { 4620 uint32_t i; 4621 4622 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4623 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4624 } 4625 4626 RTE_INIT(eth_dev_init_cb_lists) 4627 { 4628 uint16_t i; 4629 4630 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4631 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4632 } 4633 4634 int 4635 rte_eth_dev_callback_register(uint16_t port_id, 4636 enum rte_eth_event_type event, 4637 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4638 { 4639 struct rte_eth_dev *dev; 4640 struct rte_eth_dev_callback *user_cb; 4641 uint16_t next_port; 4642 uint16_t last_port; 4643 4644 if (cb_fn == NULL) { 4645 RTE_ETHDEV_LOG(ERR, 4646 "Cannot register ethdev port %u callback from NULL\n", 4647 port_id); 4648 return -EINVAL; 4649 } 4650 4651 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4652 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4653 return -EINVAL; 4654 } 4655 4656 if (port_id == RTE_ETH_ALL) { 4657 next_port = 0; 4658 last_port = RTE_MAX_ETHPORTS - 1; 4659 } else { 4660 next_port = last_port = port_id; 4661 } 4662 4663 rte_spinlock_lock(ð_dev_cb_lock); 4664 4665 do { 4666 dev = &rte_eth_devices[next_port]; 4667 4668 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4669 if (user_cb->cb_fn == cb_fn && 4670 user_cb->cb_arg == cb_arg && 4671 user_cb->event == event) { 4672 break; 4673 } 4674 } 4675 4676 /* create a new callback. */ 4677 if (user_cb == NULL) { 4678 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4679 sizeof(struct rte_eth_dev_callback), 0); 4680 if (user_cb != NULL) { 4681 user_cb->cb_fn = cb_fn; 4682 user_cb->cb_arg = cb_arg; 4683 user_cb->event = event; 4684 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4685 user_cb, next); 4686 } else { 4687 rte_spinlock_unlock(ð_dev_cb_lock); 4688 rte_eth_dev_callback_unregister(port_id, event, 4689 cb_fn, cb_arg); 4690 return -ENOMEM; 4691 } 4692 4693 } 4694 } while (++next_port <= last_port); 4695 4696 rte_spinlock_unlock(ð_dev_cb_lock); 4697 return 0; 4698 } 4699 4700 int 4701 rte_eth_dev_callback_unregister(uint16_t port_id, 4702 enum rte_eth_event_type event, 4703 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4704 { 4705 int ret; 4706 struct rte_eth_dev *dev; 4707 struct rte_eth_dev_callback *cb, *next; 4708 uint16_t next_port; 4709 uint16_t last_port; 4710 4711 if (cb_fn == NULL) { 4712 RTE_ETHDEV_LOG(ERR, 4713 "Cannot unregister ethdev port %u callback from NULL\n", 4714 port_id); 4715 return -EINVAL; 4716 } 4717 4718 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4719 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4720 return -EINVAL; 4721 } 4722 4723 if (port_id == RTE_ETH_ALL) { 4724 next_port = 0; 4725 last_port = RTE_MAX_ETHPORTS - 1; 4726 } else { 4727 next_port = last_port = port_id; 4728 } 4729 4730 rte_spinlock_lock(ð_dev_cb_lock); 4731 4732 do { 4733 dev = &rte_eth_devices[next_port]; 4734 ret = 0; 4735 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4736 cb = next) { 4737 4738 next = TAILQ_NEXT(cb, next); 4739 4740 if (cb->cb_fn != cb_fn || cb->event != event || 4741 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4742 continue; 4743 4744 /* 4745 * if this callback is not executing right now, 4746 * then remove it. 4747 */ 4748 if (cb->active == 0) { 4749 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4750 rte_free(cb); 4751 } else { 4752 ret = -EAGAIN; 4753 } 4754 } 4755 } while (++next_port <= last_port); 4756 4757 rte_spinlock_unlock(ð_dev_cb_lock); 4758 return ret; 4759 } 4760 4761 int 4762 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4763 enum rte_eth_event_type event, void *ret_param) 4764 { 4765 struct rte_eth_dev_callback *cb_lst; 4766 struct rte_eth_dev_callback dev_cb; 4767 int rc = 0; 4768 4769 rte_spinlock_lock(ð_dev_cb_lock); 4770 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4771 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4772 continue; 4773 dev_cb = *cb_lst; 4774 cb_lst->active = 1; 4775 if (ret_param != NULL) 4776 dev_cb.ret_param = ret_param; 4777 4778 rte_spinlock_unlock(ð_dev_cb_lock); 4779 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4780 dev_cb.cb_arg, dev_cb.ret_param); 4781 rte_spinlock_lock(ð_dev_cb_lock); 4782 cb_lst->active = 0; 4783 } 4784 rte_spinlock_unlock(ð_dev_cb_lock); 4785 return rc; 4786 } 4787 4788 void 4789 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4790 { 4791 if (dev == NULL) 4792 return; 4793 4794 /* 4795 * for secondary process, at that point we expect device 4796 * to be already 'usable', so shared data and all function pointers 4797 * for fast-path devops have to be setup properly inside rte_eth_dev. 4798 */ 4799 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4800 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4801 4802 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4803 4804 dev->state = RTE_ETH_DEV_ATTACHED; 4805 } 4806 4807 int 4808 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4809 { 4810 uint32_t vec; 4811 struct rte_eth_dev *dev; 4812 struct rte_intr_handle *intr_handle; 4813 uint16_t qid; 4814 int rc; 4815 4816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4817 dev = &rte_eth_devices[port_id]; 4818 4819 if (!dev->intr_handle) { 4820 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4821 return -ENOTSUP; 4822 } 4823 4824 intr_handle = dev->intr_handle; 4825 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4826 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4827 return -EPERM; 4828 } 4829 4830 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4831 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4832 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4833 if (rc && rc != -EEXIST) { 4834 RTE_ETHDEV_LOG(ERR, 4835 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4836 port_id, qid, op, epfd, vec); 4837 } 4838 } 4839 4840 return 0; 4841 } 4842 4843 int 4844 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4845 { 4846 struct rte_intr_handle *intr_handle; 4847 struct rte_eth_dev *dev; 4848 unsigned int efd_idx; 4849 uint32_t vec; 4850 int fd; 4851 4852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4853 dev = &rte_eth_devices[port_id]; 4854 4855 if (queue_id >= dev->data->nb_rx_queues) { 4856 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4857 return -1; 4858 } 4859 4860 if (!dev->intr_handle) { 4861 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4862 return -1; 4863 } 4864 4865 intr_handle = dev->intr_handle; 4866 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4867 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4868 return -1; 4869 } 4870 4871 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4872 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4873 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4874 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4875 4876 return fd; 4877 } 4878 4879 static inline int 4880 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4881 const char *ring_name) 4882 { 4883 return snprintf(name, len, "eth_p%d_q%d_%s", 4884 port_id, queue_id, ring_name); 4885 } 4886 4887 const struct rte_memzone * 4888 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4889 uint16_t queue_id, size_t size, unsigned align, 4890 int socket_id) 4891 { 4892 char z_name[RTE_MEMZONE_NAMESIZE]; 4893 const struct rte_memzone *mz; 4894 int rc; 4895 4896 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4897 queue_id, ring_name); 4898 if (rc >= RTE_MEMZONE_NAMESIZE) { 4899 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4900 rte_errno = ENAMETOOLONG; 4901 return NULL; 4902 } 4903 4904 mz = rte_memzone_lookup(z_name); 4905 if (mz) { 4906 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4907 size > mz->len || 4908 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4909 RTE_ETHDEV_LOG(ERR, 4910 "memzone %s does not justify the requested attributes\n", 4911 mz->name); 4912 return NULL; 4913 } 4914 4915 return mz; 4916 } 4917 4918 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4919 RTE_MEMZONE_IOVA_CONTIG, align); 4920 } 4921 4922 int 4923 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4924 uint16_t queue_id) 4925 { 4926 char z_name[RTE_MEMZONE_NAMESIZE]; 4927 const struct rte_memzone *mz; 4928 int rc = 0; 4929 4930 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4931 queue_id, ring_name); 4932 if (rc >= RTE_MEMZONE_NAMESIZE) { 4933 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4934 return -ENAMETOOLONG; 4935 } 4936 4937 mz = rte_memzone_lookup(z_name); 4938 if (mz) 4939 rc = rte_memzone_free(mz); 4940 else 4941 rc = -ENOENT; 4942 4943 return rc; 4944 } 4945 4946 int 4947 rte_eth_dev_create(struct rte_device *device, const char *name, 4948 size_t priv_data_size, 4949 ethdev_bus_specific_init ethdev_bus_specific_init, 4950 void *bus_init_params, 4951 ethdev_init_t ethdev_init, void *init_params) 4952 { 4953 struct rte_eth_dev *ethdev; 4954 int retval; 4955 4956 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4957 4958 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4959 ethdev = rte_eth_dev_allocate(name); 4960 if (!ethdev) 4961 return -ENODEV; 4962 4963 if (priv_data_size) { 4964 ethdev->data->dev_private = rte_zmalloc_socket( 4965 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4966 device->numa_node); 4967 4968 if (!ethdev->data->dev_private) { 4969 RTE_ETHDEV_LOG(ERR, 4970 "failed to allocate private data\n"); 4971 retval = -ENOMEM; 4972 goto probe_failed; 4973 } 4974 } 4975 } else { 4976 ethdev = rte_eth_dev_attach_secondary(name); 4977 if (!ethdev) { 4978 RTE_ETHDEV_LOG(ERR, 4979 "secondary process attach failed, ethdev doesn't exist\n"); 4980 return -ENODEV; 4981 } 4982 } 4983 4984 ethdev->device = device; 4985 4986 if (ethdev_bus_specific_init) { 4987 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4988 if (retval) { 4989 RTE_ETHDEV_LOG(ERR, 4990 "ethdev bus specific initialisation failed\n"); 4991 goto probe_failed; 4992 } 4993 } 4994 4995 retval = ethdev_init(ethdev, init_params); 4996 if (retval) { 4997 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 4998 goto probe_failed; 4999 } 5000 5001 rte_eth_dev_probing_finish(ethdev); 5002 5003 return retval; 5004 5005 probe_failed: 5006 rte_eth_dev_release_port(ethdev); 5007 return retval; 5008 } 5009 5010 int 5011 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 5012 ethdev_uninit_t ethdev_uninit) 5013 { 5014 int ret; 5015 5016 ethdev = rte_eth_dev_allocated(ethdev->data->name); 5017 if (!ethdev) 5018 return -ENODEV; 5019 5020 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 5021 5022 ret = ethdev_uninit(ethdev); 5023 if (ret) 5024 return ret; 5025 5026 return rte_eth_dev_release_port(ethdev); 5027 } 5028 5029 int 5030 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5031 int epfd, int op, void *data) 5032 { 5033 uint32_t vec; 5034 struct rte_eth_dev *dev; 5035 struct rte_intr_handle *intr_handle; 5036 int rc; 5037 5038 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5039 dev = &rte_eth_devices[port_id]; 5040 5041 if (queue_id >= dev->data->nb_rx_queues) { 5042 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5043 return -EINVAL; 5044 } 5045 5046 if (!dev->intr_handle) { 5047 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5048 return -ENOTSUP; 5049 } 5050 5051 intr_handle = dev->intr_handle; 5052 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5053 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5054 return -EPERM; 5055 } 5056 5057 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5058 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5059 if (rc && rc != -EEXIST) { 5060 RTE_ETHDEV_LOG(ERR, 5061 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5062 port_id, queue_id, op, epfd, vec); 5063 return rc; 5064 } 5065 5066 return 0; 5067 } 5068 5069 int 5070 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5071 uint16_t queue_id) 5072 { 5073 struct rte_eth_dev *dev; 5074 int ret; 5075 5076 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5077 dev = &rte_eth_devices[port_id]; 5078 5079 ret = eth_dev_validate_rx_queue(dev, queue_id); 5080 if (ret != 0) 5081 return ret; 5082 5083 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5084 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5085 } 5086 5087 int 5088 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5089 uint16_t queue_id) 5090 { 5091 struct rte_eth_dev *dev; 5092 int ret; 5093 5094 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5095 dev = &rte_eth_devices[port_id]; 5096 5097 ret = eth_dev_validate_rx_queue(dev, queue_id); 5098 if (ret != 0) 5099 return ret; 5100 5101 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5102 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5103 } 5104 5105 5106 const struct rte_eth_rxtx_callback * 5107 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5108 rte_rx_callback_fn fn, void *user_param) 5109 { 5110 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5111 rte_errno = ENOTSUP; 5112 return NULL; 5113 #endif 5114 struct rte_eth_dev *dev; 5115 5116 /* check input parameters */ 5117 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5118 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5119 rte_errno = EINVAL; 5120 return NULL; 5121 } 5122 dev = &rte_eth_devices[port_id]; 5123 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5124 rte_errno = EINVAL; 5125 return NULL; 5126 } 5127 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5128 5129 if (cb == NULL) { 5130 rte_errno = ENOMEM; 5131 return NULL; 5132 } 5133 5134 cb->fn.rx = fn; 5135 cb->param = user_param; 5136 5137 rte_spinlock_lock(ð_dev_rx_cb_lock); 5138 /* Add the callbacks in fifo order. */ 5139 struct rte_eth_rxtx_callback *tail = 5140 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5141 5142 if (!tail) { 5143 /* Stores to cb->fn and cb->param should complete before 5144 * cb is visible to data plane. 5145 */ 5146 __atomic_store_n( 5147 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5148 cb, __ATOMIC_RELEASE); 5149 5150 } else { 5151 while (tail->next) 5152 tail = tail->next; 5153 /* Stores to cb->fn and cb->param should complete before 5154 * cb is visible to data plane. 5155 */ 5156 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5157 } 5158 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5159 5160 return cb; 5161 } 5162 5163 const struct rte_eth_rxtx_callback * 5164 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5165 rte_rx_callback_fn fn, void *user_param) 5166 { 5167 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5168 rte_errno = ENOTSUP; 5169 return NULL; 5170 #endif 5171 /* check input parameters */ 5172 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5173 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5174 rte_errno = EINVAL; 5175 return NULL; 5176 } 5177 5178 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5179 5180 if (cb == NULL) { 5181 rte_errno = ENOMEM; 5182 return NULL; 5183 } 5184 5185 cb->fn.rx = fn; 5186 cb->param = user_param; 5187 5188 rte_spinlock_lock(ð_dev_rx_cb_lock); 5189 /* Add the callbacks at first position */ 5190 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5191 /* Stores to cb->fn, cb->param and cb->next should complete before 5192 * cb is visible to data plane threads. 5193 */ 5194 __atomic_store_n( 5195 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5196 cb, __ATOMIC_RELEASE); 5197 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5198 5199 return cb; 5200 } 5201 5202 const struct rte_eth_rxtx_callback * 5203 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5204 rte_tx_callback_fn fn, void *user_param) 5205 { 5206 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5207 rte_errno = ENOTSUP; 5208 return NULL; 5209 #endif 5210 struct rte_eth_dev *dev; 5211 5212 /* check input parameters */ 5213 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5214 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5215 rte_errno = EINVAL; 5216 return NULL; 5217 } 5218 5219 dev = &rte_eth_devices[port_id]; 5220 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5221 rte_errno = EINVAL; 5222 return NULL; 5223 } 5224 5225 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5226 5227 if (cb == NULL) { 5228 rte_errno = ENOMEM; 5229 return NULL; 5230 } 5231 5232 cb->fn.tx = fn; 5233 cb->param = user_param; 5234 5235 rte_spinlock_lock(ð_dev_tx_cb_lock); 5236 /* Add the callbacks in fifo order. */ 5237 struct rte_eth_rxtx_callback *tail = 5238 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5239 5240 if (!tail) { 5241 /* Stores to cb->fn and cb->param should complete before 5242 * cb is visible to data plane. 5243 */ 5244 __atomic_store_n( 5245 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5246 cb, __ATOMIC_RELEASE); 5247 5248 } else { 5249 while (tail->next) 5250 tail = tail->next; 5251 /* Stores to cb->fn and cb->param should complete before 5252 * cb is visible to data plane. 5253 */ 5254 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5255 } 5256 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5257 5258 return cb; 5259 } 5260 5261 int 5262 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5263 const struct rte_eth_rxtx_callback *user_cb) 5264 { 5265 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5266 return -ENOTSUP; 5267 #endif 5268 /* Check input parameters. */ 5269 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5270 if (user_cb == NULL || 5271 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5272 return -EINVAL; 5273 5274 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5275 struct rte_eth_rxtx_callback *cb; 5276 struct rte_eth_rxtx_callback **prev_cb; 5277 int ret = -EINVAL; 5278 5279 rte_spinlock_lock(ð_dev_rx_cb_lock); 5280 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5281 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5282 cb = *prev_cb; 5283 if (cb == user_cb) { 5284 /* Remove the user cb from the callback list. */ 5285 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5286 ret = 0; 5287 break; 5288 } 5289 } 5290 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5291 5292 return ret; 5293 } 5294 5295 int 5296 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5297 const struct rte_eth_rxtx_callback *user_cb) 5298 { 5299 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5300 return -ENOTSUP; 5301 #endif 5302 /* Check input parameters. */ 5303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5304 if (user_cb == NULL || 5305 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5306 return -EINVAL; 5307 5308 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5309 int ret = -EINVAL; 5310 struct rte_eth_rxtx_callback *cb; 5311 struct rte_eth_rxtx_callback **prev_cb; 5312 5313 rte_spinlock_lock(ð_dev_tx_cb_lock); 5314 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5315 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5316 cb = *prev_cb; 5317 if (cb == user_cb) { 5318 /* Remove the user cb from the callback list. */ 5319 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5320 ret = 0; 5321 break; 5322 } 5323 } 5324 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5325 5326 return ret; 5327 } 5328 5329 int 5330 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5331 struct rte_eth_rxq_info *qinfo) 5332 { 5333 struct rte_eth_dev *dev; 5334 5335 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5336 dev = &rte_eth_devices[port_id]; 5337 5338 if (queue_id >= dev->data->nb_rx_queues) { 5339 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5340 return -EINVAL; 5341 } 5342 5343 if (qinfo == NULL) { 5344 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5345 port_id, queue_id); 5346 return -EINVAL; 5347 } 5348 5349 if (dev->data->rx_queues == NULL || 5350 dev->data->rx_queues[queue_id] == NULL) { 5351 RTE_ETHDEV_LOG(ERR, 5352 "Rx queue %"PRIu16" of device with port_id=%" 5353 PRIu16" has not been setup\n", 5354 queue_id, port_id); 5355 return -EINVAL; 5356 } 5357 5358 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5359 RTE_ETHDEV_LOG(INFO, 5360 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5361 queue_id, port_id); 5362 return -EINVAL; 5363 } 5364 5365 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5366 5367 memset(qinfo, 0, sizeof(*qinfo)); 5368 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5369 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5370 5371 return 0; 5372 } 5373 5374 int 5375 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5376 struct rte_eth_txq_info *qinfo) 5377 { 5378 struct rte_eth_dev *dev; 5379 5380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5381 dev = &rte_eth_devices[port_id]; 5382 5383 if (queue_id >= dev->data->nb_tx_queues) { 5384 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5385 return -EINVAL; 5386 } 5387 5388 if (qinfo == NULL) { 5389 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5390 port_id, queue_id); 5391 return -EINVAL; 5392 } 5393 5394 if (dev->data->tx_queues == NULL || 5395 dev->data->tx_queues[queue_id] == NULL) { 5396 RTE_ETHDEV_LOG(ERR, 5397 "Tx queue %"PRIu16" of device with port_id=%" 5398 PRIu16" has not been setup\n", 5399 queue_id, port_id); 5400 return -EINVAL; 5401 } 5402 5403 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5404 RTE_ETHDEV_LOG(INFO, 5405 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5406 queue_id, port_id); 5407 return -EINVAL; 5408 } 5409 5410 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5411 5412 memset(qinfo, 0, sizeof(*qinfo)); 5413 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5414 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5415 5416 return 0; 5417 } 5418 5419 int 5420 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5421 struct rte_eth_burst_mode *mode) 5422 { 5423 struct rte_eth_dev *dev; 5424 5425 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5426 dev = &rte_eth_devices[port_id]; 5427 5428 if (queue_id >= dev->data->nb_rx_queues) { 5429 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5430 return -EINVAL; 5431 } 5432 5433 if (mode == NULL) { 5434 RTE_ETHDEV_LOG(ERR, 5435 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5436 port_id, queue_id); 5437 return -EINVAL; 5438 } 5439 5440 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5441 memset(mode, 0, sizeof(*mode)); 5442 return eth_err(port_id, 5443 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5444 } 5445 5446 int 5447 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5448 struct rte_eth_burst_mode *mode) 5449 { 5450 struct rte_eth_dev *dev; 5451 5452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5453 dev = &rte_eth_devices[port_id]; 5454 5455 if (queue_id >= dev->data->nb_tx_queues) { 5456 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5457 return -EINVAL; 5458 } 5459 5460 if (mode == NULL) { 5461 RTE_ETHDEV_LOG(ERR, 5462 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5463 port_id, queue_id); 5464 return -EINVAL; 5465 } 5466 5467 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5468 memset(mode, 0, sizeof(*mode)); 5469 return eth_err(port_id, 5470 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5471 } 5472 5473 int 5474 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5475 struct rte_power_monitor_cond *pmc) 5476 { 5477 struct rte_eth_dev *dev; 5478 5479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5480 dev = &rte_eth_devices[port_id]; 5481 5482 if (queue_id >= dev->data->nb_rx_queues) { 5483 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5484 return -EINVAL; 5485 } 5486 5487 if (pmc == NULL) { 5488 RTE_ETHDEV_LOG(ERR, 5489 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5490 port_id, queue_id); 5491 return -EINVAL; 5492 } 5493 5494 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5495 return eth_err(port_id, 5496 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5497 } 5498 5499 int 5500 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5501 struct rte_ether_addr *mc_addr_set, 5502 uint32_t nb_mc_addr) 5503 { 5504 struct rte_eth_dev *dev; 5505 5506 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5507 dev = &rte_eth_devices[port_id]; 5508 5509 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5510 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5511 mc_addr_set, nb_mc_addr)); 5512 } 5513 5514 int 5515 rte_eth_timesync_enable(uint16_t port_id) 5516 { 5517 struct rte_eth_dev *dev; 5518 5519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5520 dev = &rte_eth_devices[port_id]; 5521 5522 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5523 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5524 } 5525 5526 int 5527 rte_eth_timesync_disable(uint16_t port_id) 5528 { 5529 struct rte_eth_dev *dev; 5530 5531 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5532 dev = &rte_eth_devices[port_id]; 5533 5534 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5535 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5536 } 5537 5538 int 5539 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5540 uint32_t flags) 5541 { 5542 struct rte_eth_dev *dev; 5543 5544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5545 dev = &rte_eth_devices[port_id]; 5546 5547 if (timestamp == NULL) { 5548 RTE_ETHDEV_LOG(ERR, 5549 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5550 port_id); 5551 return -EINVAL; 5552 } 5553 5554 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5555 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5556 (dev, timestamp, flags)); 5557 } 5558 5559 int 5560 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5561 struct timespec *timestamp) 5562 { 5563 struct rte_eth_dev *dev; 5564 5565 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5566 dev = &rte_eth_devices[port_id]; 5567 5568 if (timestamp == NULL) { 5569 RTE_ETHDEV_LOG(ERR, 5570 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5571 port_id); 5572 return -EINVAL; 5573 } 5574 5575 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5576 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5577 (dev, timestamp)); 5578 } 5579 5580 int 5581 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5582 { 5583 struct rte_eth_dev *dev; 5584 5585 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5586 dev = &rte_eth_devices[port_id]; 5587 5588 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5589 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5590 } 5591 5592 int 5593 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5594 { 5595 struct rte_eth_dev *dev; 5596 5597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5598 dev = &rte_eth_devices[port_id]; 5599 5600 if (timestamp == NULL) { 5601 RTE_ETHDEV_LOG(ERR, 5602 "Cannot read ethdev port %u timesync time to NULL\n", 5603 port_id); 5604 return -EINVAL; 5605 } 5606 5607 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5608 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5609 timestamp)); 5610 } 5611 5612 int 5613 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5614 { 5615 struct rte_eth_dev *dev; 5616 5617 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5618 dev = &rte_eth_devices[port_id]; 5619 5620 if (timestamp == NULL) { 5621 RTE_ETHDEV_LOG(ERR, 5622 "Cannot write ethdev port %u timesync from NULL time\n", 5623 port_id); 5624 return -EINVAL; 5625 } 5626 5627 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5628 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5629 timestamp)); 5630 } 5631 5632 int 5633 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5634 { 5635 struct rte_eth_dev *dev; 5636 5637 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5638 dev = &rte_eth_devices[port_id]; 5639 5640 if (clock == NULL) { 5641 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5642 port_id); 5643 return -EINVAL; 5644 } 5645 5646 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5647 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5648 } 5649 5650 int 5651 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5652 { 5653 struct rte_eth_dev *dev; 5654 5655 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5656 dev = &rte_eth_devices[port_id]; 5657 5658 if (info == NULL) { 5659 RTE_ETHDEV_LOG(ERR, 5660 "Cannot get ethdev port %u register info to NULL\n", 5661 port_id); 5662 return -EINVAL; 5663 } 5664 5665 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5666 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5667 } 5668 5669 int 5670 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5671 { 5672 struct rte_eth_dev *dev; 5673 5674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5675 dev = &rte_eth_devices[port_id]; 5676 5677 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5678 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5679 } 5680 5681 int 5682 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5683 { 5684 struct rte_eth_dev *dev; 5685 5686 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5687 dev = &rte_eth_devices[port_id]; 5688 5689 if (info == NULL) { 5690 RTE_ETHDEV_LOG(ERR, 5691 "Cannot get ethdev port %u EEPROM info to NULL\n", 5692 port_id); 5693 return -EINVAL; 5694 } 5695 5696 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5697 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5698 } 5699 5700 int 5701 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5702 { 5703 struct rte_eth_dev *dev; 5704 5705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5706 dev = &rte_eth_devices[port_id]; 5707 5708 if (info == NULL) { 5709 RTE_ETHDEV_LOG(ERR, 5710 "Cannot set ethdev port %u EEPROM from NULL info\n", 5711 port_id); 5712 return -EINVAL; 5713 } 5714 5715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5716 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5717 } 5718 5719 int 5720 rte_eth_dev_get_module_info(uint16_t port_id, 5721 struct rte_eth_dev_module_info *modinfo) 5722 { 5723 struct rte_eth_dev *dev; 5724 5725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5726 dev = &rte_eth_devices[port_id]; 5727 5728 if (modinfo == NULL) { 5729 RTE_ETHDEV_LOG(ERR, 5730 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5731 port_id); 5732 return -EINVAL; 5733 } 5734 5735 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5736 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5737 } 5738 5739 int 5740 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5741 struct rte_dev_eeprom_info *info) 5742 { 5743 struct rte_eth_dev *dev; 5744 5745 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5746 dev = &rte_eth_devices[port_id]; 5747 5748 if (info == NULL) { 5749 RTE_ETHDEV_LOG(ERR, 5750 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5751 port_id); 5752 return -EINVAL; 5753 } 5754 5755 if (info->data == NULL) { 5756 RTE_ETHDEV_LOG(ERR, 5757 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5758 port_id); 5759 return -EINVAL; 5760 } 5761 5762 if (info->length == 0) { 5763 RTE_ETHDEV_LOG(ERR, 5764 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5765 port_id); 5766 return -EINVAL; 5767 } 5768 5769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5770 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5771 } 5772 5773 int 5774 rte_eth_dev_get_dcb_info(uint16_t port_id, 5775 struct rte_eth_dcb_info *dcb_info) 5776 { 5777 struct rte_eth_dev *dev; 5778 5779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5780 dev = &rte_eth_devices[port_id]; 5781 5782 if (dcb_info == NULL) { 5783 RTE_ETHDEV_LOG(ERR, 5784 "Cannot get ethdev port %u DCB info to NULL\n", 5785 port_id); 5786 return -EINVAL; 5787 } 5788 5789 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5790 5791 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5792 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5793 } 5794 5795 static void 5796 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5797 const struct rte_eth_desc_lim *desc_lim) 5798 { 5799 if (desc_lim->nb_align != 0) 5800 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5801 5802 if (desc_lim->nb_max != 0) 5803 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5804 5805 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5806 } 5807 5808 int 5809 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5810 uint16_t *nb_rx_desc, 5811 uint16_t *nb_tx_desc) 5812 { 5813 struct rte_eth_dev_info dev_info; 5814 int ret; 5815 5816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5817 5818 ret = rte_eth_dev_info_get(port_id, &dev_info); 5819 if (ret != 0) 5820 return ret; 5821 5822 if (nb_rx_desc != NULL) 5823 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5824 5825 if (nb_tx_desc != NULL) 5826 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5827 5828 return 0; 5829 } 5830 5831 int 5832 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5833 struct rte_eth_hairpin_cap *cap) 5834 { 5835 struct rte_eth_dev *dev; 5836 5837 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5838 dev = &rte_eth_devices[port_id]; 5839 5840 if (cap == NULL) { 5841 RTE_ETHDEV_LOG(ERR, 5842 "Cannot get ethdev port %u hairpin capability to NULL\n", 5843 port_id); 5844 return -EINVAL; 5845 } 5846 5847 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5848 memset(cap, 0, sizeof(*cap)); 5849 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5850 } 5851 5852 int 5853 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5854 { 5855 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5856 return 1; 5857 return 0; 5858 } 5859 5860 int 5861 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5862 { 5863 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5864 return 1; 5865 return 0; 5866 } 5867 5868 int 5869 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5870 { 5871 struct rte_eth_dev *dev; 5872 5873 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5874 dev = &rte_eth_devices[port_id]; 5875 5876 if (pool == NULL) { 5877 RTE_ETHDEV_LOG(ERR, 5878 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5879 port_id); 5880 return -EINVAL; 5881 } 5882 5883 if (*dev->dev_ops->pool_ops_supported == NULL) 5884 return 1; /* all pools are supported */ 5885 5886 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5887 } 5888 5889 /** 5890 * A set of values to describe the possible states of a switch domain. 5891 */ 5892 enum rte_eth_switch_domain_state { 5893 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5894 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5895 }; 5896 5897 /** 5898 * Array of switch domains available for allocation. Array is sized to 5899 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5900 * ethdev ports in a single process. 5901 */ 5902 static struct rte_eth_dev_switch { 5903 enum rte_eth_switch_domain_state state; 5904 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5905 5906 int 5907 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5908 { 5909 uint16_t i; 5910 5911 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5912 5913 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5914 if (eth_dev_switch_domains[i].state == 5915 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5916 eth_dev_switch_domains[i].state = 5917 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5918 *domain_id = i; 5919 return 0; 5920 } 5921 } 5922 5923 return -ENOSPC; 5924 } 5925 5926 int 5927 rte_eth_switch_domain_free(uint16_t domain_id) 5928 { 5929 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5930 domain_id >= RTE_MAX_ETHPORTS) 5931 return -EINVAL; 5932 5933 if (eth_dev_switch_domains[domain_id].state != 5934 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5935 return -EINVAL; 5936 5937 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5938 5939 return 0; 5940 } 5941 5942 static int 5943 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5944 { 5945 int state; 5946 struct rte_kvargs_pair *pair; 5947 char *letter; 5948 5949 arglist->str = strdup(str_in); 5950 if (arglist->str == NULL) 5951 return -ENOMEM; 5952 5953 letter = arglist->str; 5954 state = 0; 5955 arglist->count = 0; 5956 pair = &arglist->pairs[0]; 5957 while (1) { 5958 switch (state) { 5959 case 0: /* Initial */ 5960 if (*letter == '=') 5961 return -EINVAL; 5962 else if (*letter == '\0') 5963 return 0; 5964 5965 state = 1; 5966 pair->key = letter; 5967 /* fall-thru */ 5968 5969 case 1: /* Parsing key */ 5970 if (*letter == '=') { 5971 *letter = '\0'; 5972 pair->value = letter + 1; 5973 state = 2; 5974 } else if (*letter == ',' || *letter == '\0') 5975 return -EINVAL; 5976 break; 5977 5978 5979 case 2: /* Parsing value */ 5980 if (*letter == '[') 5981 state = 3; 5982 else if (*letter == ',') { 5983 *letter = '\0'; 5984 arglist->count++; 5985 pair = &arglist->pairs[arglist->count]; 5986 state = 0; 5987 } else if (*letter == '\0') { 5988 letter--; 5989 arglist->count++; 5990 pair = &arglist->pairs[arglist->count]; 5991 state = 0; 5992 } 5993 break; 5994 5995 case 3: /* Parsing list */ 5996 if (*letter == ']') 5997 state = 2; 5998 else if (*letter == '\0') 5999 return -EINVAL; 6000 break; 6001 } 6002 letter++; 6003 } 6004 } 6005 6006 int 6007 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 6008 { 6009 struct rte_kvargs args; 6010 struct rte_kvargs_pair *pair; 6011 unsigned int i; 6012 int result = 0; 6013 6014 memset(eth_da, 0, sizeof(*eth_da)); 6015 6016 result = eth_dev_devargs_tokenise(&args, dargs); 6017 if (result < 0) 6018 goto parse_cleanup; 6019 6020 for (i = 0; i < args.count; i++) { 6021 pair = &args.pairs[i]; 6022 if (strcmp("representor", pair->key) == 0) { 6023 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 6024 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 6025 dargs); 6026 result = -1; 6027 goto parse_cleanup; 6028 } 6029 result = rte_eth_devargs_parse_representor_ports( 6030 pair->value, eth_da); 6031 if (result < 0) 6032 goto parse_cleanup; 6033 } 6034 } 6035 6036 parse_cleanup: 6037 if (args.str) 6038 free(args.str); 6039 6040 return result; 6041 } 6042 6043 int 6044 rte_eth_representor_id_get(uint16_t port_id, 6045 enum rte_eth_representor_type type, 6046 int controller, int pf, int representor_port, 6047 uint16_t *repr_id) 6048 { 6049 int ret, n, count; 6050 uint32_t i; 6051 struct rte_eth_representor_info *info = NULL; 6052 size_t size; 6053 6054 if (type == RTE_ETH_REPRESENTOR_NONE) 6055 return 0; 6056 if (repr_id == NULL) 6057 return -EINVAL; 6058 6059 /* Get PMD representor range info. */ 6060 ret = rte_eth_representor_info_get(port_id, NULL); 6061 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6062 controller == -1 && pf == -1) { 6063 /* Direct mapping for legacy VF representor. */ 6064 *repr_id = representor_port; 6065 return 0; 6066 } else if (ret < 0) { 6067 return ret; 6068 } 6069 n = ret; 6070 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6071 info = calloc(1, size); 6072 if (info == NULL) 6073 return -ENOMEM; 6074 info->nb_ranges_alloc = n; 6075 ret = rte_eth_representor_info_get(port_id, info); 6076 if (ret < 0) 6077 goto out; 6078 6079 /* Default controller and pf to caller. */ 6080 if (controller == -1) 6081 controller = info->controller; 6082 if (pf == -1) 6083 pf = info->pf; 6084 6085 /* Locate representor ID. */ 6086 ret = -ENOENT; 6087 for (i = 0; i < info->nb_ranges; ++i) { 6088 if (info->ranges[i].type != type) 6089 continue; 6090 if (info->ranges[i].controller != controller) 6091 continue; 6092 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6093 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6094 port_id, info->ranges[i].id_base, 6095 info->ranges[i].id_end, i); 6096 continue; 6097 6098 } 6099 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6100 switch (info->ranges[i].type) { 6101 case RTE_ETH_REPRESENTOR_PF: 6102 if (pf < info->ranges[i].pf || 6103 pf >= info->ranges[i].pf + count) 6104 continue; 6105 *repr_id = info->ranges[i].id_base + 6106 (pf - info->ranges[i].pf); 6107 ret = 0; 6108 goto out; 6109 case RTE_ETH_REPRESENTOR_VF: 6110 if (info->ranges[i].pf != pf) 6111 continue; 6112 if (representor_port < info->ranges[i].vf || 6113 representor_port >= info->ranges[i].vf + count) 6114 continue; 6115 *repr_id = info->ranges[i].id_base + 6116 (representor_port - info->ranges[i].vf); 6117 ret = 0; 6118 goto out; 6119 case RTE_ETH_REPRESENTOR_SF: 6120 if (info->ranges[i].pf != pf) 6121 continue; 6122 if (representor_port < info->ranges[i].sf || 6123 representor_port >= info->ranges[i].sf + count) 6124 continue; 6125 *repr_id = info->ranges[i].id_base + 6126 (representor_port - info->ranges[i].sf); 6127 ret = 0; 6128 goto out; 6129 default: 6130 break; 6131 } 6132 } 6133 out: 6134 free(info); 6135 return ret; 6136 } 6137 6138 static int 6139 eth_dev_handle_port_list(const char *cmd __rte_unused, 6140 const char *params __rte_unused, 6141 struct rte_tel_data *d) 6142 { 6143 int port_id; 6144 6145 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6146 RTE_ETH_FOREACH_DEV(port_id) 6147 rte_tel_data_add_array_int(d, port_id); 6148 return 0; 6149 } 6150 6151 static void 6152 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6153 const char *stat_name) 6154 { 6155 int q; 6156 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6157 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6158 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6159 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6160 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6161 } 6162 6163 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6164 6165 static int 6166 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6167 const char *params, 6168 struct rte_tel_data *d) 6169 { 6170 struct rte_eth_stats stats; 6171 int port_id, ret; 6172 6173 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6174 return -1; 6175 6176 port_id = atoi(params); 6177 if (!rte_eth_dev_is_valid_port(port_id)) 6178 return -1; 6179 6180 ret = rte_eth_stats_get(port_id, &stats); 6181 if (ret < 0) 6182 return -1; 6183 6184 rte_tel_data_start_dict(d); 6185 ADD_DICT_STAT(stats, ipackets); 6186 ADD_DICT_STAT(stats, opackets); 6187 ADD_DICT_STAT(stats, ibytes); 6188 ADD_DICT_STAT(stats, obytes); 6189 ADD_DICT_STAT(stats, imissed); 6190 ADD_DICT_STAT(stats, ierrors); 6191 ADD_DICT_STAT(stats, oerrors); 6192 ADD_DICT_STAT(stats, rx_nombuf); 6193 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6194 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6195 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6196 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6197 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6198 6199 return 0; 6200 } 6201 6202 static int 6203 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6204 const char *params, 6205 struct rte_tel_data *d) 6206 { 6207 struct rte_eth_xstat *eth_xstats; 6208 struct rte_eth_xstat_name *xstat_names; 6209 int port_id, num_xstats; 6210 int i, ret; 6211 char *end_param; 6212 6213 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6214 return -1; 6215 6216 port_id = strtoul(params, &end_param, 0); 6217 if (*end_param != '\0') 6218 RTE_ETHDEV_LOG(NOTICE, 6219 "Extra parameters passed to ethdev telemetry command, ignoring"); 6220 if (!rte_eth_dev_is_valid_port(port_id)) 6221 return -1; 6222 6223 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6224 if (num_xstats < 0) 6225 return -1; 6226 6227 /* use one malloc for both names and stats */ 6228 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6229 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6230 if (eth_xstats == NULL) 6231 return -1; 6232 xstat_names = (void *)ð_xstats[num_xstats]; 6233 6234 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6235 if (ret < 0 || ret > num_xstats) { 6236 free(eth_xstats); 6237 return -1; 6238 } 6239 6240 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6241 if (ret < 0 || ret > num_xstats) { 6242 free(eth_xstats); 6243 return -1; 6244 } 6245 6246 rte_tel_data_start_dict(d); 6247 for (i = 0; i < num_xstats; i++) 6248 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6249 eth_xstats[i].value); 6250 return 0; 6251 } 6252 6253 static int 6254 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6255 const char *params, 6256 struct rte_tel_data *d) 6257 { 6258 static const char *status_str = "status"; 6259 int ret, port_id; 6260 struct rte_eth_link link; 6261 char *end_param; 6262 6263 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6264 return -1; 6265 6266 port_id = strtoul(params, &end_param, 0); 6267 if (*end_param != '\0') 6268 RTE_ETHDEV_LOG(NOTICE, 6269 "Extra parameters passed to ethdev telemetry command, ignoring"); 6270 if (!rte_eth_dev_is_valid_port(port_id)) 6271 return -1; 6272 6273 ret = rte_eth_link_get_nowait(port_id, &link); 6274 if (ret < 0) 6275 return -1; 6276 6277 rte_tel_data_start_dict(d); 6278 if (!link.link_status) { 6279 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6280 return 0; 6281 } 6282 rte_tel_data_add_dict_string(d, status_str, "UP"); 6283 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6284 rte_tel_data_add_dict_string(d, "duplex", 6285 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6286 "full-duplex" : "half-duplex"); 6287 return 0; 6288 } 6289 6290 static int 6291 eth_dev_handle_port_info(const char *cmd __rte_unused, 6292 const char *params, 6293 struct rte_tel_data *d) 6294 { 6295 struct rte_tel_data *rxq_state, *txq_state; 6296 char mac_addr[RTE_ETHER_ADDR_LEN]; 6297 struct rte_eth_dev *eth_dev; 6298 char *end_param; 6299 int port_id, i; 6300 6301 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6302 return -1; 6303 6304 port_id = strtoul(params, &end_param, 0); 6305 if (*end_param != '\0') 6306 RTE_ETHDEV_LOG(NOTICE, 6307 "Extra parameters passed to ethdev telemetry command, ignoring"); 6308 6309 if (!rte_eth_dev_is_valid_port(port_id)) 6310 return -EINVAL; 6311 6312 eth_dev = &rte_eth_devices[port_id]; 6313 if (!eth_dev) 6314 return -EINVAL; 6315 6316 rxq_state = rte_tel_data_alloc(); 6317 if (!rxq_state) 6318 return -ENOMEM; 6319 6320 txq_state = rte_tel_data_alloc(); 6321 if (!txq_state) 6322 return -ENOMEM; 6323 6324 rte_tel_data_start_dict(d); 6325 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6326 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6327 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6328 eth_dev->data->nb_rx_queues); 6329 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6330 eth_dev->data->nb_tx_queues); 6331 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6332 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6333 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6334 eth_dev->data->min_rx_buf_size); 6335 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6336 eth_dev->data->rx_mbuf_alloc_failed); 6337 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6338 eth_dev->data->mac_addrs->addr_bytes[0], 6339 eth_dev->data->mac_addrs->addr_bytes[1], 6340 eth_dev->data->mac_addrs->addr_bytes[2], 6341 eth_dev->data->mac_addrs->addr_bytes[3], 6342 eth_dev->data->mac_addrs->addr_bytes[4], 6343 eth_dev->data->mac_addrs->addr_bytes[5]); 6344 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6345 rte_tel_data_add_dict_int(d, "promiscuous", 6346 eth_dev->data->promiscuous); 6347 rte_tel_data_add_dict_int(d, "scattered_rx", 6348 eth_dev->data->scattered_rx); 6349 rte_tel_data_add_dict_int(d, "all_multicast", 6350 eth_dev->data->all_multicast); 6351 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6352 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6353 rte_tel_data_add_dict_int(d, "dev_configured", 6354 eth_dev->data->dev_configured); 6355 6356 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6357 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6358 rte_tel_data_add_array_int(rxq_state, 6359 eth_dev->data->rx_queue_state[i]); 6360 6361 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6362 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6363 rte_tel_data_add_array_int(txq_state, 6364 eth_dev->data->tx_queue_state[i]); 6365 6366 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6367 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6368 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6369 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6370 rte_tel_data_add_dict_int(d, "rx_offloads", 6371 eth_dev->data->dev_conf.rxmode.offloads); 6372 rte_tel_data_add_dict_int(d, "tx_offloads", 6373 eth_dev->data->dev_conf.txmode.offloads); 6374 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6375 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6376 6377 return 0; 6378 } 6379 6380 int 6381 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6382 struct rte_hairpin_peer_info *cur_info, 6383 struct rte_hairpin_peer_info *peer_info, 6384 uint32_t direction) 6385 { 6386 struct rte_eth_dev *dev; 6387 6388 /* Current queue information is not mandatory. */ 6389 if (peer_info == NULL) 6390 return -EINVAL; 6391 6392 /* No need to check the validity again. */ 6393 dev = &rte_eth_devices[peer_port]; 6394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6395 -ENOTSUP); 6396 6397 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6398 cur_info, peer_info, direction); 6399 } 6400 6401 int 6402 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6403 struct rte_hairpin_peer_info *peer_info, 6404 uint32_t direction) 6405 { 6406 struct rte_eth_dev *dev; 6407 6408 if (peer_info == NULL) 6409 return -EINVAL; 6410 6411 /* No need to check the validity again. */ 6412 dev = &rte_eth_devices[cur_port]; 6413 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6414 -ENOTSUP); 6415 6416 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6417 peer_info, direction); 6418 } 6419 6420 int 6421 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6422 uint32_t direction) 6423 { 6424 struct rte_eth_dev *dev; 6425 6426 /* No need to check the validity again. */ 6427 dev = &rte_eth_devices[cur_port]; 6428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6429 -ENOTSUP); 6430 6431 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6432 direction); 6433 } 6434 6435 int 6436 rte_eth_representor_info_get(uint16_t port_id, 6437 struct rte_eth_representor_info *info) 6438 { 6439 struct rte_eth_dev *dev; 6440 6441 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6442 dev = &rte_eth_devices[port_id]; 6443 6444 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6445 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6446 } 6447 6448 int 6449 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6450 { 6451 struct rte_eth_dev *dev; 6452 6453 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6454 dev = &rte_eth_devices[port_id]; 6455 6456 if (dev->data->dev_configured != 0) { 6457 RTE_ETHDEV_LOG(ERR, 6458 "The port (ID=%"PRIu16") is already configured\n", 6459 port_id); 6460 return -EBUSY; 6461 } 6462 6463 if (features == NULL) { 6464 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6465 return -EINVAL; 6466 } 6467 6468 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6469 return eth_err(port_id, 6470 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6471 } 6472 6473 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6474 6475 RTE_INIT(ethdev_init_telemetry) 6476 { 6477 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6478 "Returns list of available ethdev ports. Takes no parameters"); 6479 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6480 "Returns the common stats for a port. Parameters: int port_id"); 6481 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6482 "Returns the extended stats for a port. Parameters: int port_id"); 6483 rte_telemetry_register_cmd("/ethdev/link_status", 6484 eth_dev_handle_port_link_status, 6485 "Returns the link status for a port. Parameters: int port_id"); 6486 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6487 "Returns the device info for a port. Parameters: int port_id"); 6488 } 6489