1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove Rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove Tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 105 106 static const struct { 107 uint64_t offload; 108 const char *name; 109 } eth_dev_rx_offload_names[] = { 110 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 111 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 114 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 115 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 118 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 119 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 120 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 121 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 122 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 123 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 124 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 125 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 127 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 128 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 129 }; 130 131 #undef RTE_RX_OFFLOAD_BIT2STR 132 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 133 134 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 135 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 136 137 static const struct { 138 uint64_t offload; 139 const char *name; 140 } eth_dev_tx_offload_names[] = { 141 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 142 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 143 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 144 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 148 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 150 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 155 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 156 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 157 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 158 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 159 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 160 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 161 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 162 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 163 }; 164 165 #undef RTE_TX_OFFLOAD_BIT2STR 166 167 static const struct { 168 uint64_t offload; 169 const char *name; 170 } rte_eth_dev_capa_names[] = { 171 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 172 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 173 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 174 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 175 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 176 }; 177 178 /** 179 * The user application callback description. 180 * 181 * It contains callback address to be registered by user application, 182 * the pointer to the parameters for callback, and the event type. 183 */ 184 struct rte_eth_dev_callback { 185 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 186 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 187 void *cb_arg; /**< Parameter for callback */ 188 void *ret_param; /**< Return parameter */ 189 enum rte_eth_event_type event; /**< Interrupt event type */ 190 uint32_t active; /**< Callback is executing */ 191 }; 192 193 enum { 194 STAT_QMAP_TX = 0, 195 STAT_QMAP_RX 196 }; 197 198 int 199 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 200 { 201 int ret; 202 struct rte_devargs devargs; 203 const char *bus_param_key; 204 char *bus_str = NULL; 205 char *cls_str = NULL; 206 int str_size; 207 208 if (iter == NULL) { 209 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 210 return -EINVAL; 211 } 212 213 if (devargs_str == NULL) { 214 RTE_ETHDEV_LOG(ERR, 215 "Cannot initialize iterator from NULL device description string\n"); 216 return -EINVAL; 217 } 218 219 memset(iter, 0, sizeof(*iter)); 220 memset(&devargs, 0, sizeof(devargs)); 221 222 /* 223 * The devargs string may use various syntaxes: 224 * - 0000:08:00.0,representor=[1-3] 225 * - pci:0000:06:00.0,representor=[0,5] 226 * - class=eth,mac=00:11:22:33:44:55 227 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 228 */ 229 230 /* 231 * Handle pure class filter (i.e. without any bus-level argument), 232 * from future new syntax. 233 * rte_devargs_parse() is not yet supporting the new syntax, 234 * that's why this simple case is temporarily parsed here. 235 */ 236 #define iter_anybus_str "class=eth," 237 if (strncmp(devargs_str, iter_anybus_str, 238 strlen(iter_anybus_str)) == 0) { 239 iter->cls_str = devargs_str + strlen(iter_anybus_str); 240 goto end; 241 } 242 243 /* Split bus, device and parameters. */ 244 ret = rte_devargs_parse(&devargs, devargs_str); 245 if (ret != 0) 246 goto error; 247 248 /* 249 * Assume parameters of old syntax can match only at ethdev level. 250 * Extra parameters will be ignored, thanks to "+" prefix. 251 */ 252 str_size = strlen(devargs.args) + 2; 253 cls_str = malloc(str_size); 254 if (cls_str == NULL) { 255 ret = -ENOMEM; 256 goto error; 257 } 258 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 259 if (ret != str_size - 1) { 260 ret = -EINVAL; 261 goto error; 262 } 263 iter->cls_str = cls_str; 264 265 iter->bus = devargs.bus; 266 if (iter->bus->dev_iterate == NULL) { 267 ret = -ENOTSUP; 268 goto error; 269 } 270 271 /* Convert bus args to new syntax for use with new API dev_iterate. */ 272 if ((strcmp(iter->bus->name, "vdev") == 0) || 273 (strcmp(iter->bus->name, "fslmc") == 0) || 274 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 275 bus_param_key = "name"; 276 } else if (strcmp(iter->bus->name, "pci") == 0) { 277 bus_param_key = "addr"; 278 } else { 279 ret = -ENOTSUP; 280 goto error; 281 } 282 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 283 bus_str = malloc(str_size); 284 if (bus_str == NULL) { 285 ret = -ENOMEM; 286 goto error; 287 } 288 ret = snprintf(bus_str, str_size, "%s=%s", 289 bus_param_key, devargs.name); 290 if (ret != str_size - 1) { 291 ret = -EINVAL; 292 goto error; 293 } 294 iter->bus_str = bus_str; 295 296 end: 297 iter->cls = rte_class_find_by_name("eth"); 298 rte_devargs_reset(&devargs); 299 return 0; 300 301 error: 302 if (ret == -ENOTSUP) 303 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 304 iter->bus->name); 305 rte_devargs_reset(&devargs); 306 free(bus_str); 307 free(cls_str); 308 return ret; 309 } 310 311 uint16_t 312 rte_eth_iterator_next(struct rte_dev_iterator *iter) 313 { 314 if (iter == NULL) { 315 RTE_ETHDEV_LOG(ERR, 316 "Cannot get next device from NULL iterator\n"); 317 return RTE_MAX_ETHPORTS; 318 } 319 320 if (iter->cls == NULL) /* invalid ethdev iterator */ 321 return RTE_MAX_ETHPORTS; 322 323 do { /* loop to try all matching rte_device */ 324 /* If not pure ethdev filter and */ 325 if (iter->bus != NULL && 326 /* not in middle of rte_eth_dev iteration, */ 327 iter->class_device == NULL) { 328 /* get next rte_device to try. */ 329 iter->device = iter->bus->dev_iterate( 330 iter->device, iter->bus_str, iter); 331 if (iter->device == NULL) 332 break; /* no more rte_device candidate */ 333 } 334 /* A device is matching bus part, need to check ethdev part. */ 335 iter->class_device = iter->cls->dev_iterate( 336 iter->class_device, iter->cls_str, iter); 337 if (iter->class_device != NULL) 338 return eth_dev_to_id(iter->class_device); /* match */ 339 } while (iter->bus != NULL); /* need to try next rte_device */ 340 341 /* No more ethdev port to iterate. */ 342 rte_eth_iterator_cleanup(iter); 343 return RTE_MAX_ETHPORTS; 344 } 345 346 void 347 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 348 { 349 if (iter == NULL) { 350 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 351 return; 352 } 353 354 if (iter->bus_str == NULL) 355 return; /* nothing to free in pure class filter */ 356 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 357 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 358 memset(iter, 0, sizeof(*iter)); 359 } 360 361 uint16_t 362 rte_eth_find_next(uint16_t port_id) 363 { 364 while (port_id < RTE_MAX_ETHPORTS && 365 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 366 port_id++; 367 368 if (port_id >= RTE_MAX_ETHPORTS) 369 return RTE_MAX_ETHPORTS; 370 371 return port_id; 372 } 373 374 /* 375 * Macro to iterate over all valid ports for internal usage. 376 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 377 */ 378 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 379 for (port_id = rte_eth_find_next(0); \ 380 port_id < RTE_MAX_ETHPORTS; \ 381 port_id = rte_eth_find_next(port_id + 1)) 382 383 uint16_t 384 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 385 { 386 port_id = rte_eth_find_next(port_id); 387 while (port_id < RTE_MAX_ETHPORTS && 388 rte_eth_devices[port_id].device != parent) 389 port_id = rte_eth_find_next(port_id + 1); 390 391 return port_id; 392 } 393 394 uint16_t 395 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 396 { 397 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 398 return rte_eth_find_next_of(port_id, 399 rte_eth_devices[ref_port_id].device); 400 } 401 402 static void 403 eth_dev_shared_data_prepare(void) 404 { 405 const unsigned flags = 0; 406 const struct rte_memzone *mz; 407 408 rte_spinlock_lock(ð_dev_shared_data_lock); 409 410 if (eth_dev_shared_data == NULL) { 411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 412 /* Allocate port data and ownership shared memory. */ 413 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 414 sizeof(*eth_dev_shared_data), 415 rte_socket_id(), flags); 416 } else 417 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 418 if (mz == NULL) 419 rte_panic("Cannot allocate ethdev shared data\n"); 420 421 eth_dev_shared_data = mz->addr; 422 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 423 eth_dev_shared_data->next_owner_id = 424 RTE_ETH_DEV_NO_OWNER + 1; 425 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 426 memset(eth_dev_shared_data->data, 0, 427 sizeof(eth_dev_shared_data->data)); 428 } 429 } 430 431 rte_spinlock_unlock(ð_dev_shared_data_lock); 432 } 433 434 static bool 435 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 436 { 437 return ethdev->data->name[0] != '\0'; 438 } 439 440 static struct rte_eth_dev * 441 eth_dev_allocated(const char *name) 442 { 443 uint16_t i; 444 445 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 446 447 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 448 if (rte_eth_devices[i].data != NULL && 449 strcmp(rte_eth_devices[i].data->name, name) == 0) 450 return &rte_eth_devices[i]; 451 } 452 return NULL; 453 } 454 455 struct rte_eth_dev * 456 rte_eth_dev_allocated(const char *name) 457 { 458 struct rte_eth_dev *ethdev; 459 460 eth_dev_shared_data_prepare(); 461 462 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 463 464 ethdev = eth_dev_allocated(name); 465 466 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 467 468 return ethdev; 469 } 470 471 static uint16_t 472 eth_dev_find_free_port(void) 473 { 474 uint16_t i; 475 476 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 477 /* Using shared name field to find a free port. */ 478 if (eth_dev_shared_data->data[i].name[0] == '\0') { 479 RTE_ASSERT(rte_eth_devices[i].state == 480 RTE_ETH_DEV_UNUSED); 481 return i; 482 } 483 } 484 return RTE_MAX_ETHPORTS; 485 } 486 487 static struct rte_eth_dev * 488 eth_dev_get(uint16_t port_id) 489 { 490 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 491 492 eth_dev->data = ð_dev_shared_data->data[port_id]; 493 494 return eth_dev; 495 } 496 497 struct rte_eth_dev * 498 rte_eth_dev_allocate(const char *name) 499 { 500 uint16_t port_id; 501 struct rte_eth_dev *eth_dev = NULL; 502 size_t name_len; 503 504 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 505 if (name_len == 0) { 506 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 507 return NULL; 508 } 509 510 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 511 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 512 return NULL; 513 } 514 515 eth_dev_shared_data_prepare(); 516 517 /* Synchronize port creation between primary and secondary threads. */ 518 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 519 520 if (eth_dev_allocated(name) != NULL) { 521 RTE_ETHDEV_LOG(ERR, 522 "Ethernet device with name %s already allocated\n", 523 name); 524 goto unlock; 525 } 526 527 port_id = eth_dev_find_free_port(); 528 if (port_id == RTE_MAX_ETHPORTS) { 529 RTE_ETHDEV_LOG(ERR, 530 "Reached maximum number of Ethernet ports\n"); 531 goto unlock; 532 } 533 534 eth_dev = eth_dev_get(port_id); 535 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 536 eth_dev->data->port_id = port_id; 537 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 538 eth_dev->data->mtu = RTE_ETHER_MTU; 539 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 540 541 unlock: 542 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 543 544 return eth_dev; 545 } 546 547 /* 548 * Attach to a port already registered by the primary process, which 549 * makes sure that the same device would have the same port ID both 550 * in the primary and secondary process. 551 */ 552 struct rte_eth_dev * 553 rte_eth_dev_attach_secondary(const char *name) 554 { 555 uint16_t i; 556 struct rte_eth_dev *eth_dev = NULL; 557 558 eth_dev_shared_data_prepare(); 559 560 /* Synchronize port attachment to primary port creation and release. */ 561 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 562 563 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 564 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 565 break; 566 } 567 if (i == RTE_MAX_ETHPORTS) { 568 RTE_ETHDEV_LOG(ERR, 569 "Device %s is not driven by the primary process\n", 570 name); 571 } else { 572 eth_dev = eth_dev_get(i); 573 RTE_ASSERT(eth_dev->data->port_id == i); 574 } 575 576 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 577 return eth_dev; 578 } 579 580 int 581 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 582 { 583 if (eth_dev == NULL) 584 return -EINVAL; 585 586 eth_dev_shared_data_prepare(); 587 588 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 589 rte_eth_dev_callback_process(eth_dev, 590 RTE_ETH_EVENT_DESTROY, NULL); 591 592 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 593 594 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 595 596 eth_dev->state = RTE_ETH_DEV_UNUSED; 597 eth_dev->device = NULL; 598 eth_dev->process_private = NULL; 599 eth_dev->intr_handle = NULL; 600 eth_dev->rx_pkt_burst = NULL; 601 eth_dev->tx_pkt_burst = NULL; 602 eth_dev->tx_pkt_prepare = NULL; 603 eth_dev->rx_queue_count = NULL; 604 eth_dev->rx_descriptor_status = NULL; 605 eth_dev->tx_descriptor_status = NULL; 606 eth_dev->dev_ops = NULL; 607 608 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 609 rte_free(eth_dev->data->rx_queues); 610 rte_free(eth_dev->data->tx_queues); 611 rte_free(eth_dev->data->mac_addrs); 612 rte_free(eth_dev->data->hash_mac_addrs); 613 rte_free(eth_dev->data->dev_private); 614 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 615 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 616 } 617 618 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 619 620 return 0; 621 } 622 623 int 624 rte_eth_dev_is_valid_port(uint16_t port_id) 625 { 626 if (port_id >= RTE_MAX_ETHPORTS || 627 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 628 return 0; 629 else 630 return 1; 631 } 632 633 static int 634 eth_is_valid_owner_id(uint64_t owner_id) 635 { 636 if (owner_id == RTE_ETH_DEV_NO_OWNER || 637 eth_dev_shared_data->next_owner_id <= owner_id) 638 return 0; 639 return 1; 640 } 641 642 uint64_t 643 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 644 { 645 port_id = rte_eth_find_next(port_id); 646 while (port_id < RTE_MAX_ETHPORTS && 647 rte_eth_devices[port_id].data->owner.id != owner_id) 648 port_id = rte_eth_find_next(port_id + 1); 649 650 return port_id; 651 } 652 653 int 654 rte_eth_dev_owner_new(uint64_t *owner_id) 655 { 656 if (owner_id == NULL) { 657 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 658 return -EINVAL; 659 } 660 661 eth_dev_shared_data_prepare(); 662 663 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 664 665 *owner_id = eth_dev_shared_data->next_owner_id++; 666 667 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 668 return 0; 669 } 670 671 static int 672 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 673 const struct rte_eth_dev_owner *new_owner) 674 { 675 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 676 struct rte_eth_dev_owner *port_owner; 677 678 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 679 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 680 port_id); 681 return -ENODEV; 682 } 683 684 if (new_owner == NULL) { 685 RTE_ETHDEV_LOG(ERR, 686 "Cannot set ethdev port %u owner from NULL owner\n", 687 port_id); 688 return -EINVAL; 689 } 690 691 if (!eth_is_valid_owner_id(new_owner->id) && 692 !eth_is_valid_owner_id(old_owner_id)) { 693 RTE_ETHDEV_LOG(ERR, 694 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 695 old_owner_id, new_owner->id); 696 return -EINVAL; 697 } 698 699 port_owner = &rte_eth_devices[port_id].data->owner; 700 if (port_owner->id != old_owner_id) { 701 RTE_ETHDEV_LOG(ERR, 702 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 703 port_id, port_owner->name, port_owner->id); 704 return -EPERM; 705 } 706 707 /* can not truncate (same structure) */ 708 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 709 710 port_owner->id = new_owner->id; 711 712 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 713 port_id, new_owner->name, new_owner->id); 714 715 return 0; 716 } 717 718 int 719 rte_eth_dev_owner_set(const uint16_t port_id, 720 const struct rte_eth_dev_owner *owner) 721 { 722 int ret; 723 724 eth_dev_shared_data_prepare(); 725 726 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 727 728 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 729 730 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 731 return ret; 732 } 733 734 int 735 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 736 { 737 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 738 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 739 int ret; 740 741 eth_dev_shared_data_prepare(); 742 743 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 744 745 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 746 747 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 748 return ret; 749 } 750 751 int 752 rte_eth_dev_owner_delete(const uint64_t owner_id) 753 { 754 uint16_t port_id; 755 int ret = 0; 756 757 eth_dev_shared_data_prepare(); 758 759 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 760 761 if (eth_is_valid_owner_id(owner_id)) { 762 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 763 struct rte_eth_dev_data *data = 764 rte_eth_devices[port_id].data; 765 if (data != NULL && data->owner.id == owner_id) 766 memset(&data->owner, 0, 767 sizeof(struct rte_eth_dev_owner)); 768 } 769 RTE_ETHDEV_LOG(NOTICE, 770 "All port owners owned by %016"PRIx64" identifier have removed\n", 771 owner_id); 772 } else { 773 RTE_ETHDEV_LOG(ERR, 774 "Invalid owner ID=%016"PRIx64"\n", 775 owner_id); 776 ret = -EINVAL; 777 } 778 779 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 780 781 return ret; 782 } 783 784 int 785 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 786 { 787 struct rte_eth_dev *ethdev; 788 789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 790 ethdev = &rte_eth_devices[port_id]; 791 792 if (!eth_dev_is_allocated(ethdev)) { 793 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 794 port_id); 795 return -ENODEV; 796 } 797 798 if (owner == NULL) { 799 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 800 port_id); 801 return -EINVAL; 802 } 803 804 eth_dev_shared_data_prepare(); 805 806 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 807 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 808 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 809 810 return 0; 811 } 812 813 int 814 rte_eth_dev_socket_id(uint16_t port_id) 815 { 816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 817 return rte_eth_devices[port_id].data->numa_node; 818 } 819 820 void * 821 rte_eth_dev_get_sec_ctx(uint16_t port_id) 822 { 823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 824 return rte_eth_devices[port_id].security_ctx; 825 } 826 827 uint16_t 828 rte_eth_dev_count_avail(void) 829 { 830 uint16_t p; 831 uint16_t count; 832 833 count = 0; 834 835 RTE_ETH_FOREACH_DEV(p) 836 count++; 837 838 return count; 839 } 840 841 uint16_t 842 rte_eth_dev_count_total(void) 843 { 844 uint16_t port, count = 0; 845 846 RTE_ETH_FOREACH_VALID_DEV(port) 847 count++; 848 849 return count; 850 } 851 852 int 853 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 854 { 855 char *tmp; 856 857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 858 859 if (name == NULL) { 860 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 861 port_id); 862 return -EINVAL; 863 } 864 865 /* shouldn't check 'rte_eth_devices[i].data', 866 * because it might be overwritten by VDEV PMD */ 867 tmp = eth_dev_shared_data->data[port_id].name; 868 strcpy(name, tmp); 869 return 0; 870 } 871 872 int 873 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 874 { 875 uint16_t pid; 876 877 if (name == NULL) { 878 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 879 return -EINVAL; 880 } 881 882 if (port_id == NULL) { 883 RTE_ETHDEV_LOG(ERR, 884 "Cannot get port ID to NULL for %s\n", name); 885 return -EINVAL; 886 } 887 888 RTE_ETH_FOREACH_VALID_DEV(pid) 889 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 890 *port_id = pid; 891 return 0; 892 } 893 894 return -ENODEV; 895 } 896 897 static int 898 eth_err(uint16_t port_id, int ret) 899 { 900 if (ret == 0) 901 return 0; 902 if (rte_eth_dev_is_removed(port_id)) 903 return -EIO; 904 return ret; 905 } 906 907 static void 908 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 909 { 910 void **rxq = dev->data->rx_queues; 911 912 if (rxq[qid] == NULL) 913 return; 914 915 if (dev->dev_ops->rx_queue_release != NULL) 916 (*dev->dev_ops->rx_queue_release)(dev, qid); 917 rxq[qid] = NULL; 918 } 919 920 static void 921 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 922 { 923 void **txq = dev->data->tx_queues; 924 925 if (txq[qid] == NULL) 926 return; 927 928 if (dev->dev_ops->tx_queue_release != NULL) 929 (*dev->dev_ops->tx_queue_release)(dev, qid); 930 txq[qid] = NULL; 931 } 932 933 static int 934 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 935 { 936 uint16_t old_nb_queues = dev->data->nb_rx_queues; 937 unsigned i; 938 939 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 940 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 941 sizeof(dev->data->rx_queues[0]) * 942 RTE_MAX_QUEUES_PER_PORT, 943 RTE_CACHE_LINE_SIZE); 944 if (dev->data->rx_queues == NULL) { 945 dev->data->nb_rx_queues = 0; 946 return -(ENOMEM); 947 } 948 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 949 for (i = nb_queues; i < old_nb_queues; i++) 950 eth_dev_rxq_release(dev, i); 951 952 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 953 for (i = nb_queues; i < old_nb_queues; i++) 954 eth_dev_rxq_release(dev, i); 955 956 rte_free(dev->data->rx_queues); 957 dev->data->rx_queues = NULL; 958 } 959 dev->data->nb_rx_queues = nb_queues; 960 return 0; 961 } 962 963 static int 964 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 965 { 966 uint16_t port_id; 967 968 if (rx_queue_id >= dev->data->nb_rx_queues) { 969 port_id = dev->data->port_id; 970 RTE_ETHDEV_LOG(ERR, 971 "Invalid Rx queue_id=%u of device with port_id=%u\n", 972 rx_queue_id, port_id); 973 return -EINVAL; 974 } 975 976 if (dev->data->rx_queues[rx_queue_id] == NULL) { 977 port_id = dev->data->port_id; 978 RTE_ETHDEV_LOG(ERR, 979 "Queue %u of device with port_id=%u has not been setup\n", 980 rx_queue_id, port_id); 981 return -EINVAL; 982 } 983 984 return 0; 985 } 986 987 static int 988 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 989 { 990 uint16_t port_id; 991 992 if (tx_queue_id >= dev->data->nb_tx_queues) { 993 port_id = dev->data->port_id; 994 RTE_ETHDEV_LOG(ERR, 995 "Invalid Tx queue_id=%u of device with port_id=%u\n", 996 tx_queue_id, port_id); 997 return -EINVAL; 998 } 999 1000 if (dev->data->tx_queues[tx_queue_id] == NULL) { 1001 port_id = dev->data->port_id; 1002 RTE_ETHDEV_LOG(ERR, 1003 "Queue %u of device with port_id=%u has not been setup\n", 1004 tx_queue_id, port_id); 1005 return -EINVAL; 1006 } 1007 1008 return 0; 1009 } 1010 1011 int 1012 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1013 { 1014 struct rte_eth_dev *dev; 1015 int ret; 1016 1017 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1018 dev = &rte_eth_devices[port_id]; 1019 1020 if (!dev->data->dev_started) { 1021 RTE_ETHDEV_LOG(ERR, 1022 "Port %u must be started before start any queue\n", 1023 port_id); 1024 return -EINVAL; 1025 } 1026 1027 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1028 if (ret != 0) 1029 return ret; 1030 1031 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1032 1033 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1034 RTE_ETHDEV_LOG(INFO, 1035 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1036 rx_queue_id, port_id); 1037 return -EINVAL; 1038 } 1039 1040 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1041 RTE_ETHDEV_LOG(INFO, 1042 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1043 rx_queue_id, port_id); 1044 return 0; 1045 } 1046 1047 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1048 } 1049 1050 int 1051 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1052 { 1053 struct rte_eth_dev *dev; 1054 int ret; 1055 1056 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1057 dev = &rte_eth_devices[port_id]; 1058 1059 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1060 if (ret != 0) 1061 return ret; 1062 1063 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1064 1065 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1066 RTE_ETHDEV_LOG(INFO, 1067 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1068 rx_queue_id, port_id); 1069 return -EINVAL; 1070 } 1071 1072 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1073 RTE_ETHDEV_LOG(INFO, 1074 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1075 rx_queue_id, port_id); 1076 return 0; 1077 } 1078 1079 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1080 } 1081 1082 int 1083 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1084 { 1085 struct rte_eth_dev *dev; 1086 int ret; 1087 1088 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1089 dev = &rte_eth_devices[port_id]; 1090 1091 if (!dev->data->dev_started) { 1092 RTE_ETHDEV_LOG(ERR, 1093 "Port %u must be started before start any queue\n", 1094 port_id); 1095 return -EINVAL; 1096 } 1097 1098 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1099 if (ret != 0) 1100 return ret; 1101 1102 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1103 1104 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1105 RTE_ETHDEV_LOG(INFO, 1106 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1107 tx_queue_id, port_id); 1108 return -EINVAL; 1109 } 1110 1111 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1112 RTE_ETHDEV_LOG(INFO, 1113 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1114 tx_queue_id, port_id); 1115 return 0; 1116 } 1117 1118 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1119 } 1120 1121 int 1122 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1123 { 1124 struct rte_eth_dev *dev; 1125 int ret; 1126 1127 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1128 dev = &rte_eth_devices[port_id]; 1129 1130 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1131 if (ret != 0) 1132 return ret; 1133 1134 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1135 1136 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1137 RTE_ETHDEV_LOG(INFO, 1138 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1139 tx_queue_id, port_id); 1140 return -EINVAL; 1141 } 1142 1143 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1144 RTE_ETHDEV_LOG(INFO, 1145 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1146 tx_queue_id, port_id); 1147 return 0; 1148 } 1149 1150 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1151 } 1152 1153 static int 1154 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1155 { 1156 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1157 unsigned i; 1158 1159 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1160 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1161 sizeof(dev->data->tx_queues[0]) * 1162 RTE_MAX_QUEUES_PER_PORT, 1163 RTE_CACHE_LINE_SIZE); 1164 if (dev->data->tx_queues == NULL) { 1165 dev->data->nb_tx_queues = 0; 1166 return -(ENOMEM); 1167 } 1168 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1169 for (i = nb_queues; i < old_nb_queues; i++) 1170 eth_dev_txq_release(dev, i); 1171 1172 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1173 for (i = nb_queues; i < old_nb_queues; i++) 1174 eth_dev_txq_release(dev, i); 1175 1176 rte_free(dev->data->tx_queues); 1177 dev->data->tx_queues = NULL; 1178 } 1179 dev->data->nb_tx_queues = nb_queues; 1180 return 0; 1181 } 1182 1183 uint32_t 1184 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1185 { 1186 switch (speed) { 1187 case RTE_ETH_SPEED_NUM_10M: 1188 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1189 case RTE_ETH_SPEED_NUM_100M: 1190 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1191 case RTE_ETH_SPEED_NUM_1G: 1192 return RTE_ETH_LINK_SPEED_1G; 1193 case RTE_ETH_SPEED_NUM_2_5G: 1194 return RTE_ETH_LINK_SPEED_2_5G; 1195 case RTE_ETH_SPEED_NUM_5G: 1196 return RTE_ETH_LINK_SPEED_5G; 1197 case RTE_ETH_SPEED_NUM_10G: 1198 return RTE_ETH_LINK_SPEED_10G; 1199 case RTE_ETH_SPEED_NUM_20G: 1200 return RTE_ETH_LINK_SPEED_20G; 1201 case RTE_ETH_SPEED_NUM_25G: 1202 return RTE_ETH_LINK_SPEED_25G; 1203 case RTE_ETH_SPEED_NUM_40G: 1204 return RTE_ETH_LINK_SPEED_40G; 1205 case RTE_ETH_SPEED_NUM_50G: 1206 return RTE_ETH_LINK_SPEED_50G; 1207 case RTE_ETH_SPEED_NUM_56G: 1208 return RTE_ETH_LINK_SPEED_56G; 1209 case RTE_ETH_SPEED_NUM_100G: 1210 return RTE_ETH_LINK_SPEED_100G; 1211 case RTE_ETH_SPEED_NUM_200G: 1212 return RTE_ETH_LINK_SPEED_200G; 1213 default: 1214 return 0; 1215 } 1216 } 1217 1218 const char * 1219 rte_eth_dev_rx_offload_name(uint64_t offload) 1220 { 1221 const char *name = "UNKNOWN"; 1222 unsigned int i; 1223 1224 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1225 if (offload == eth_dev_rx_offload_names[i].offload) { 1226 name = eth_dev_rx_offload_names[i].name; 1227 break; 1228 } 1229 } 1230 1231 return name; 1232 } 1233 1234 const char * 1235 rte_eth_dev_tx_offload_name(uint64_t offload) 1236 { 1237 const char *name = "UNKNOWN"; 1238 unsigned int i; 1239 1240 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1241 if (offload == eth_dev_tx_offload_names[i].offload) { 1242 name = eth_dev_tx_offload_names[i].name; 1243 break; 1244 } 1245 } 1246 1247 return name; 1248 } 1249 1250 const char * 1251 rte_eth_dev_capability_name(uint64_t capability) 1252 { 1253 const char *name = "UNKNOWN"; 1254 unsigned int i; 1255 1256 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1257 if (capability == rte_eth_dev_capa_names[i].offload) { 1258 name = rte_eth_dev_capa_names[i].name; 1259 break; 1260 } 1261 } 1262 1263 return name; 1264 } 1265 1266 static inline int 1267 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1268 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1269 { 1270 int ret = 0; 1271 1272 if (dev_info_size == 0) { 1273 if (config_size != max_rx_pkt_len) { 1274 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1275 " %u != %u is not allowed\n", 1276 port_id, config_size, max_rx_pkt_len); 1277 ret = -EINVAL; 1278 } 1279 } else if (config_size > dev_info_size) { 1280 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1281 "> max allowed value %u\n", port_id, config_size, 1282 dev_info_size); 1283 ret = -EINVAL; 1284 } else if (config_size < RTE_ETHER_MIN_LEN) { 1285 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1286 "< min allowed value %u\n", port_id, config_size, 1287 (unsigned int)RTE_ETHER_MIN_LEN); 1288 ret = -EINVAL; 1289 } 1290 return ret; 1291 } 1292 1293 /* 1294 * Validate offloads that are requested through rte_eth_dev_configure against 1295 * the offloads successfully set by the Ethernet device. 1296 * 1297 * @param port_id 1298 * The port identifier of the Ethernet device. 1299 * @param req_offloads 1300 * The offloads that have been requested through `rte_eth_dev_configure`. 1301 * @param set_offloads 1302 * The offloads successfully set by the Ethernet device. 1303 * @param offload_type 1304 * The offload type i.e. Rx/Tx string. 1305 * @param offload_name 1306 * The function that prints the offload name. 1307 * @return 1308 * - (0) if validation successful. 1309 * - (-EINVAL) if requested offload has been silently disabled. 1310 * 1311 */ 1312 static int 1313 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1314 uint64_t set_offloads, const char *offload_type, 1315 const char *(*offload_name)(uint64_t)) 1316 { 1317 uint64_t offloads_diff = req_offloads ^ set_offloads; 1318 uint64_t offload; 1319 int ret = 0; 1320 1321 while (offloads_diff != 0) { 1322 /* Check if any offload is requested but not enabled. */ 1323 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1324 if (offload & req_offloads) { 1325 RTE_ETHDEV_LOG(ERR, 1326 "Port %u failed to enable %s offload %s\n", 1327 port_id, offload_type, offload_name(offload)); 1328 ret = -EINVAL; 1329 } 1330 1331 /* Check if offload couldn't be disabled. */ 1332 if (offload & set_offloads) { 1333 RTE_ETHDEV_LOG(DEBUG, 1334 "Port %u %s offload %s is not requested but enabled\n", 1335 port_id, offload_type, offload_name(offload)); 1336 } 1337 1338 offloads_diff &= ~offload; 1339 } 1340 1341 return ret; 1342 } 1343 1344 static uint32_t 1345 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1346 { 1347 uint32_t overhead_len; 1348 1349 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1350 overhead_len = max_rx_pktlen - max_mtu; 1351 else 1352 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1353 1354 return overhead_len; 1355 } 1356 1357 /* rte_eth_dev_info_get() should be called prior to this function */ 1358 static int 1359 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1360 uint16_t mtu) 1361 { 1362 uint32_t overhead_len; 1363 uint32_t frame_size; 1364 1365 if (mtu < dev_info->min_mtu) { 1366 RTE_ETHDEV_LOG(ERR, 1367 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1368 mtu, dev_info->min_mtu, port_id); 1369 return -EINVAL; 1370 } 1371 if (mtu > dev_info->max_mtu) { 1372 RTE_ETHDEV_LOG(ERR, 1373 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1374 mtu, dev_info->max_mtu, port_id); 1375 return -EINVAL; 1376 } 1377 1378 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1379 dev_info->max_mtu); 1380 frame_size = mtu + overhead_len; 1381 if (frame_size < RTE_ETHER_MIN_LEN) { 1382 RTE_ETHDEV_LOG(ERR, 1383 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1384 frame_size, RTE_ETHER_MIN_LEN, port_id); 1385 return -EINVAL; 1386 } 1387 1388 if (frame_size > dev_info->max_rx_pktlen) { 1389 RTE_ETHDEV_LOG(ERR, 1390 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1391 frame_size, dev_info->max_rx_pktlen, port_id); 1392 return -EINVAL; 1393 } 1394 1395 return 0; 1396 } 1397 1398 int 1399 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1400 const struct rte_eth_conf *dev_conf) 1401 { 1402 struct rte_eth_dev *dev; 1403 struct rte_eth_dev_info dev_info; 1404 struct rte_eth_conf orig_conf; 1405 int diag; 1406 int ret; 1407 uint16_t old_mtu; 1408 1409 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1410 dev = &rte_eth_devices[port_id]; 1411 1412 if (dev_conf == NULL) { 1413 RTE_ETHDEV_LOG(ERR, 1414 "Cannot configure ethdev port %u from NULL config\n", 1415 port_id); 1416 return -EINVAL; 1417 } 1418 1419 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1420 1421 if (dev->data->dev_started) { 1422 RTE_ETHDEV_LOG(ERR, 1423 "Port %u must be stopped to allow configuration\n", 1424 port_id); 1425 return -EBUSY; 1426 } 1427 1428 /* 1429 * Ensure that "dev_configured" is always 0 each time prepare to do 1430 * dev_configure() to avoid any non-anticipated behaviour. 1431 * And set to 1 when dev_configure() is executed successfully. 1432 */ 1433 dev->data->dev_configured = 0; 1434 1435 /* Store original config, as rollback required on failure */ 1436 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1437 1438 /* 1439 * Copy the dev_conf parameter into the dev structure. 1440 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1441 */ 1442 if (dev_conf != &dev->data->dev_conf) 1443 memcpy(&dev->data->dev_conf, dev_conf, 1444 sizeof(dev->data->dev_conf)); 1445 1446 /* Backup mtu for rollback */ 1447 old_mtu = dev->data->mtu; 1448 1449 ret = rte_eth_dev_info_get(port_id, &dev_info); 1450 if (ret != 0) 1451 goto rollback; 1452 1453 /* If number of queues specified by application for both Rx and Tx is 1454 * zero, use driver preferred values. This cannot be done individually 1455 * as it is valid for either Tx or Rx (but not both) to be zero. 1456 * If driver does not provide any preferred valued, fall back on 1457 * EAL defaults. 1458 */ 1459 if (nb_rx_q == 0 && nb_tx_q == 0) { 1460 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1461 if (nb_rx_q == 0) 1462 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1463 nb_tx_q = dev_info.default_txportconf.nb_queues; 1464 if (nb_tx_q == 0) 1465 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1466 } 1467 1468 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1469 RTE_ETHDEV_LOG(ERR, 1470 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1471 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1472 ret = -EINVAL; 1473 goto rollback; 1474 } 1475 1476 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1477 RTE_ETHDEV_LOG(ERR, 1478 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1479 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1480 ret = -EINVAL; 1481 goto rollback; 1482 } 1483 1484 /* 1485 * Check that the numbers of Rx and Tx queues are not greater 1486 * than the maximum number of Rx and Tx queues supported by the 1487 * configured device. 1488 */ 1489 if (nb_rx_q > dev_info.max_rx_queues) { 1490 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1491 port_id, nb_rx_q, dev_info.max_rx_queues); 1492 ret = -EINVAL; 1493 goto rollback; 1494 } 1495 1496 if (nb_tx_q > dev_info.max_tx_queues) { 1497 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1498 port_id, nb_tx_q, dev_info.max_tx_queues); 1499 ret = -EINVAL; 1500 goto rollback; 1501 } 1502 1503 /* Check that the device supports requested interrupts */ 1504 if ((dev_conf->intr_conf.lsc == 1) && 1505 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1506 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1507 dev->device->driver->name); 1508 ret = -EINVAL; 1509 goto rollback; 1510 } 1511 if ((dev_conf->intr_conf.rmv == 1) && 1512 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1513 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1514 dev->device->driver->name); 1515 ret = -EINVAL; 1516 goto rollback; 1517 } 1518 1519 if (dev_conf->rxmode.mtu == 0) 1520 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1521 1522 ret = eth_dev_validate_mtu(port_id, &dev_info, 1523 dev->data->dev_conf.rxmode.mtu); 1524 if (ret != 0) 1525 goto rollback; 1526 1527 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1528 1529 /* 1530 * If LRO is enabled, check that the maximum aggregated packet 1531 * size is supported by the configured device. 1532 */ 1533 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1534 uint32_t max_rx_pktlen; 1535 uint32_t overhead_len; 1536 1537 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1538 dev_info.max_mtu); 1539 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1540 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1541 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1542 ret = eth_dev_check_lro_pkt_size(port_id, 1543 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1544 max_rx_pktlen, 1545 dev_info.max_lro_pkt_size); 1546 if (ret != 0) 1547 goto rollback; 1548 } 1549 1550 /* Any requested offloading must be within its device capabilities */ 1551 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1552 dev_conf->rxmode.offloads) { 1553 RTE_ETHDEV_LOG(ERR, 1554 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1555 "capabilities 0x%"PRIx64" in %s()\n", 1556 port_id, dev_conf->rxmode.offloads, 1557 dev_info.rx_offload_capa, 1558 __func__); 1559 ret = -EINVAL; 1560 goto rollback; 1561 } 1562 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1563 dev_conf->txmode.offloads) { 1564 RTE_ETHDEV_LOG(ERR, 1565 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1566 "capabilities 0x%"PRIx64" in %s()\n", 1567 port_id, dev_conf->txmode.offloads, 1568 dev_info.tx_offload_capa, 1569 __func__); 1570 ret = -EINVAL; 1571 goto rollback; 1572 } 1573 1574 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1575 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1576 1577 /* Check that device supports requested rss hash functions. */ 1578 if ((dev_info.flow_type_rss_offloads | 1579 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1580 dev_info.flow_type_rss_offloads) { 1581 RTE_ETHDEV_LOG(ERR, 1582 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1583 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1584 dev_info.flow_type_rss_offloads); 1585 ret = -EINVAL; 1586 goto rollback; 1587 } 1588 1589 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1590 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1591 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1592 RTE_ETHDEV_LOG(ERR, 1593 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1594 port_id, 1595 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1596 ret = -EINVAL; 1597 goto rollback; 1598 } 1599 1600 /* 1601 * Setup new number of Rx/Tx queues and reconfigure device. 1602 */ 1603 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1604 if (diag != 0) { 1605 RTE_ETHDEV_LOG(ERR, 1606 "Port%u eth_dev_rx_queue_config = %d\n", 1607 port_id, diag); 1608 ret = diag; 1609 goto rollback; 1610 } 1611 1612 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1613 if (diag != 0) { 1614 RTE_ETHDEV_LOG(ERR, 1615 "Port%u eth_dev_tx_queue_config = %d\n", 1616 port_id, diag); 1617 eth_dev_rx_queue_config(dev, 0); 1618 ret = diag; 1619 goto rollback; 1620 } 1621 1622 diag = (*dev->dev_ops->dev_configure)(dev); 1623 if (diag != 0) { 1624 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1625 port_id, diag); 1626 ret = eth_err(port_id, diag); 1627 goto reset_queues; 1628 } 1629 1630 /* Initialize Rx profiling if enabled at compilation time. */ 1631 diag = __rte_eth_dev_profile_init(port_id, dev); 1632 if (diag != 0) { 1633 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1634 port_id, diag); 1635 ret = eth_err(port_id, diag); 1636 goto reset_queues; 1637 } 1638 1639 /* Validate Rx offloads. */ 1640 diag = eth_dev_validate_offloads(port_id, 1641 dev_conf->rxmode.offloads, 1642 dev->data->dev_conf.rxmode.offloads, "Rx", 1643 rte_eth_dev_rx_offload_name); 1644 if (diag != 0) { 1645 ret = diag; 1646 goto reset_queues; 1647 } 1648 1649 /* Validate Tx offloads. */ 1650 diag = eth_dev_validate_offloads(port_id, 1651 dev_conf->txmode.offloads, 1652 dev->data->dev_conf.txmode.offloads, "Tx", 1653 rte_eth_dev_tx_offload_name); 1654 if (diag != 0) { 1655 ret = diag; 1656 goto reset_queues; 1657 } 1658 1659 dev->data->dev_configured = 1; 1660 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1661 return 0; 1662 reset_queues: 1663 eth_dev_rx_queue_config(dev, 0); 1664 eth_dev_tx_queue_config(dev, 0); 1665 rollback: 1666 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1667 if (old_mtu != dev->data->mtu) 1668 dev->data->mtu = old_mtu; 1669 1670 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1671 return ret; 1672 } 1673 1674 void 1675 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1676 { 1677 if (dev->data->dev_started) { 1678 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1679 dev->data->port_id); 1680 return; 1681 } 1682 1683 eth_dev_rx_queue_config(dev, 0); 1684 eth_dev_tx_queue_config(dev, 0); 1685 1686 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1687 } 1688 1689 static void 1690 eth_dev_mac_restore(struct rte_eth_dev *dev, 1691 struct rte_eth_dev_info *dev_info) 1692 { 1693 struct rte_ether_addr *addr; 1694 uint16_t i; 1695 uint32_t pool = 0; 1696 uint64_t pool_mask; 1697 1698 /* replay MAC address configuration including default MAC */ 1699 addr = &dev->data->mac_addrs[0]; 1700 if (*dev->dev_ops->mac_addr_set != NULL) 1701 (*dev->dev_ops->mac_addr_set)(dev, addr); 1702 else if (*dev->dev_ops->mac_addr_add != NULL) 1703 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1704 1705 if (*dev->dev_ops->mac_addr_add != NULL) { 1706 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1707 addr = &dev->data->mac_addrs[i]; 1708 1709 /* skip zero address */ 1710 if (rte_is_zero_ether_addr(addr)) 1711 continue; 1712 1713 pool = 0; 1714 pool_mask = dev->data->mac_pool_sel[i]; 1715 1716 do { 1717 if (pool_mask & UINT64_C(1)) 1718 (*dev->dev_ops->mac_addr_add)(dev, 1719 addr, i, pool); 1720 pool_mask >>= 1; 1721 pool++; 1722 } while (pool_mask); 1723 } 1724 } 1725 } 1726 1727 static int 1728 eth_dev_config_restore(struct rte_eth_dev *dev, 1729 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1730 { 1731 int ret; 1732 1733 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1734 eth_dev_mac_restore(dev, dev_info); 1735 1736 /* replay promiscuous configuration */ 1737 /* 1738 * use callbacks directly since we don't need port_id check and 1739 * would like to bypass the same value set 1740 */ 1741 if (rte_eth_promiscuous_get(port_id) == 1 && 1742 *dev->dev_ops->promiscuous_enable != NULL) { 1743 ret = eth_err(port_id, 1744 (*dev->dev_ops->promiscuous_enable)(dev)); 1745 if (ret != 0 && ret != -ENOTSUP) { 1746 RTE_ETHDEV_LOG(ERR, 1747 "Failed to enable promiscuous mode for device (port %u): %s\n", 1748 port_id, rte_strerror(-ret)); 1749 return ret; 1750 } 1751 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1752 *dev->dev_ops->promiscuous_disable != NULL) { 1753 ret = eth_err(port_id, 1754 (*dev->dev_ops->promiscuous_disable)(dev)); 1755 if (ret != 0 && ret != -ENOTSUP) { 1756 RTE_ETHDEV_LOG(ERR, 1757 "Failed to disable promiscuous mode for device (port %u): %s\n", 1758 port_id, rte_strerror(-ret)); 1759 return ret; 1760 } 1761 } 1762 1763 /* replay all multicast configuration */ 1764 /* 1765 * use callbacks directly since we don't need port_id check and 1766 * would like to bypass the same value set 1767 */ 1768 if (rte_eth_allmulticast_get(port_id) == 1 && 1769 *dev->dev_ops->allmulticast_enable != NULL) { 1770 ret = eth_err(port_id, 1771 (*dev->dev_ops->allmulticast_enable)(dev)); 1772 if (ret != 0 && ret != -ENOTSUP) { 1773 RTE_ETHDEV_LOG(ERR, 1774 "Failed to enable allmulticast mode for device (port %u): %s\n", 1775 port_id, rte_strerror(-ret)); 1776 return ret; 1777 } 1778 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1779 *dev->dev_ops->allmulticast_disable != NULL) { 1780 ret = eth_err(port_id, 1781 (*dev->dev_ops->allmulticast_disable)(dev)); 1782 if (ret != 0 && ret != -ENOTSUP) { 1783 RTE_ETHDEV_LOG(ERR, 1784 "Failed to disable allmulticast mode for device (port %u): %s\n", 1785 port_id, rte_strerror(-ret)); 1786 return ret; 1787 } 1788 } 1789 1790 return 0; 1791 } 1792 1793 int 1794 rte_eth_dev_start(uint16_t port_id) 1795 { 1796 struct rte_eth_dev *dev; 1797 struct rte_eth_dev_info dev_info; 1798 int diag; 1799 int ret, ret_stop; 1800 1801 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1802 dev = &rte_eth_devices[port_id]; 1803 1804 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1805 1806 if (dev->data->dev_configured == 0) { 1807 RTE_ETHDEV_LOG(INFO, 1808 "Device with port_id=%"PRIu16" is not configured.\n", 1809 port_id); 1810 return -EINVAL; 1811 } 1812 1813 if (dev->data->dev_started != 0) { 1814 RTE_ETHDEV_LOG(INFO, 1815 "Device with port_id=%"PRIu16" already started\n", 1816 port_id); 1817 return 0; 1818 } 1819 1820 ret = rte_eth_dev_info_get(port_id, &dev_info); 1821 if (ret != 0) 1822 return ret; 1823 1824 /* Lets restore MAC now if device does not support live change */ 1825 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1826 eth_dev_mac_restore(dev, &dev_info); 1827 1828 diag = (*dev->dev_ops->dev_start)(dev); 1829 if (diag == 0) 1830 dev->data->dev_started = 1; 1831 else 1832 return eth_err(port_id, diag); 1833 1834 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1835 if (ret != 0) { 1836 RTE_ETHDEV_LOG(ERR, 1837 "Error during restoring configuration for device (port %u): %s\n", 1838 port_id, rte_strerror(-ret)); 1839 ret_stop = rte_eth_dev_stop(port_id); 1840 if (ret_stop != 0) { 1841 RTE_ETHDEV_LOG(ERR, 1842 "Failed to stop device (port %u): %s\n", 1843 port_id, rte_strerror(-ret_stop)); 1844 } 1845 1846 return ret; 1847 } 1848 1849 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1850 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1851 (*dev->dev_ops->link_update)(dev, 0); 1852 } 1853 1854 /* expose selection of PMD fast-path functions */ 1855 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1856 1857 rte_ethdev_trace_start(port_id); 1858 return 0; 1859 } 1860 1861 int 1862 rte_eth_dev_stop(uint16_t port_id) 1863 { 1864 struct rte_eth_dev *dev; 1865 int ret; 1866 1867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1868 dev = &rte_eth_devices[port_id]; 1869 1870 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1871 1872 if (dev->data->dev_started == 0) { 1873 RTE_ETHDEV_LOG(INFO, 1874 "Device with port_id=%"PRIu16" already stopped\n", 1875 port_id); 1876 return 0; 1877 } 1878 1879 /* point fast-path functions to dummy ones */ 1880 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1881 1882 dev->data->dev_started = 0; 1883 ret = (*dev->dev_ops->dev_stop)(dev); 1884 rte_ethdev_trace_stop(port_id, ret); 1885 1886 return ret; 1887 } 1888 1889 int 1890 rte_eth_dev_set_link_up(uint16_t port_id) 1891 { 1892 struct rte_eth_dev *dev; 1893 1894 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1895 dev = &rte_eth_devices[port_id]; 1896 1897 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1898 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1899 } 1900 1901 int 1902 rte_eth_dev_set_link_down(uint16_t port_id) 1903 { 1904 struct rte_eth_dev *dev; 1905 1906 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1907 dev = &rte_eth_devices[port_id]; 1908 1909 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1910 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1911 } 1912 1913 int 1914 rte_eth_dev_close(uint16_t port_id) 1915 { 1916 struct rte_eth_dev *dev; 1917 int firsterr, binerr; 1918 int *lasterr = &firsterr; 1919 1920 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1921 dev = &rte_eth_devices[port_id]; 1922 1923 if (dev->data->dev_started) { 1924 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1925 port_id); 1926 return -EINVAL; 1927 } 1928 1929 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1930 *lasterr = (*dev->dev_ops->dev_close)(dev); 1931 if (*lasterr != 0) 1932 lasterr = &binerr; 1933 1934 rte_ethdev_trace_close(port_id); 1935 *lasterr = rte_eth_dev_release_port(dev); 1936 1937 return firsterr; 1938 } 1939 1940 int 1941 rte_eth_dev_reset(uint16_t port_id) 1942 { 1943 struct rte_eth_dev *dev; 1944 int ret; 1945 1946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1947 dev = &rte_eth_devices[port_id]; 1948 1949 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1950 1951 ret = rte_eth_dev_stop(port_id); 1952 if (ret != 0) { 1953 RTE_ETHDEV_LOG(ERR, 1954 "Failed to stop device (port %u) before reset: %s - ignore\n", 1955 port_id, rte_strerror(-ret)); 1956 } 1957 ret = dev->dev_ops->dev_reset(dev); 1958 1959 return eth_err(port_id, ret); 1960 } 1961 1962 int 1963 rte_eth_dev_is_removed(uint16_t port_id) 1964 { 1965 struct rte_eth_dev *dev; 1966 int ret; 1967 1968 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1969 dev = &rte_eth_devices[port_id]; 1970 1971 if (dev->state == RTE_ETH_DEV_REMOVED) 1972 return 1; 1973 1974 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1975 1976 ret = dev->dev_ops->is_removed(dev); 1977 if (ret != 0) 1978 /* Device is physically removed. */ 1979 dev->state = RTE_ETH_DEV_REMOVED; 1980 1981 return ret; 1982 } 1983 1984 static int 1985 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1986 uint16_t n_seg, uint32_t *mbp_buf_size, 1987 const struct rte_eth_dev_info *dev_info) 1988 { 1989 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1990 struct rte_mempool *mp_first; 1991 uint32_t offset_mask; 1992 uint16_t seg_idx; 1993 1994 if (n_seg > seg_capa->max_nseg) { 1995 RTE_ETHDEV_LOG(ERR, 1996 "Requested Rx segments %u exceed supported %u\n", 1997 n_seg, seg_capa->max_nseg); 1998 return -EINVAL; 1999 } 2000 /* 2001 * Check the sizes and offsets against buffer sizes 2002 * for each segment specified in extended configuration. 2003 */ 2004 mp_first = rx_seg[0].mp; 2005 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2006 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2007 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2008 uint32_t length = rx_seg[seg_idx].length; 2009 uint32_t offset = rx_seg[seg_idx].offset; 2010 2011 if (mpl == NULL) { 2012 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 2013 return -EINVAL; 2014 } 2015 if (seg_idx != 0 && mp_first != mpl && 2016 seg_capa->multi_pools == 0) { 2017 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 2018 return -ENOTSUP; 2019 } 2020 if (offset != 0) { 2021 if (seg_capa->offset_allowed == 0) { 2022 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 2023 return -ENOTSUP; 2024 } 2025 if (offset & offset_mask) { 2026 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2027 offset, 2028 seg_capa->offset_align_log2); 2029 return -EINVAL; 2030 } 2031 } 2032 if (mpl->private_data_size < 2033 sizeof(struct rte_pktmbuf_pool_private)) { 2034 RTE_ETHDEV_LOG(ERR, 2035 "%s private_data_size %u < %u\n", 2036 mpl->name, mpl->private_data_size, 2037 (unsigned int)sizeof 2038 (struct rte_pktmbuf_pool_private)); 2039 return -ENOSPC; 2040 } 2041 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2042 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2043 length = length != 0 ? length : *mbp_buf_size; 2044 if (*mbp_buf_size < length + offset) { 2045 RTE_ETHDEV_LOG(ERR, 2046 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2047 mpl->name, *mbp_buf_size, 2048 length + offset, length, offset); 2049 return -EINVAL; 2050 } 2051 } 2052 return 0; 2053 } 2054 2055 int 2056 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2057 uint16_t nb_rx_desc, unsigned int socket_id, 2058 const struct rte_eth_rxconf *rx_conf, 2059 struct rte_mempool *mp) 2060 { 2061 int ret; 2062 uint32_t mbp_buf_size; 2063 struct rte_eth_dev *dev; 2064 struct rte_eth_dev_info dev_info; 2065 struct rte_eth_rxconf local_conf; 2066 2067 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2068 dev = &rte_eth_devices[port_id]; 2069 2070 if (rx_queue_id >= dev->data->nb_rx_queues) { 2071 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2072 return -EINVAL; 2073 } 2074 2075 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2076 2077 ret = rte_eth_dev_info_get(port_id, &dev_info); 2078 if (ret != 0) 2079 return ret; 2080 2081 if (mp != NULL) { 2082 /* Single pool configuration check. */ 2083 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2084 RTE_ETHDEV_LOG(ERR, 2085 "Ambiguous segment configuration\n"); 2086 return -EINVAL; 2087 } 2088 /* 2089 * Check the size of the mbuf data buffer, this value 2090 * must be provided in the private data of the memory pool. 2091 * First check that the memory pool(s) has a valid private data. 2092 */ 2093 if (mp->private_data_size < 2094 sizeof(struct rte_pktmbuf_pool_private)) { 2095 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2096 mp->name, mp->private_data_size, 2097 (unsigned int) 2098 sizeof(struct rte_pktmbuf_pool_private)); 2099 return -ENOSPC; 2100 } 2101 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2102 if (mbp_buf_size < dev_info.min_rx_bufsize + 2103 RTE_PKTMBUF_HEADROOM) { 2104 RTE_ETHDEV_LOG(ERR, 2105 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2106 mp->name, mbp_buf_size, 2107 RTE_PKTMBUF_HEADROOM + 2108 dev_info.min_rx_bufsize, 2109 RTE_PKTMBUF_HEADROOM, 2110 dev_info.min_rx_bufsize); 2111 return -EINVAL; 2112 } 2113 } else { 2114 const struct rte_eth_rxseg_split *rx_seg; 2115 uint16_t n_seg; 2116 2117 /* Extended multi-segment configuration check. */ 2118 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2119 RTE_ETHDEV_LOG(ERR, 2120 "Memory pool is null and no extended configuration provided\n"); 2121 return -EINVAL; 2122 } 2123 2124 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2125 n_seg = rx_conf->rx_nseg; 2126 2127 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2128 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2129 &mbp_buf_size, 2130 &dev_info); 2131 if (ret != 0) 2132 return ret; 2133 } else { 2134 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2135 return -EINVAL; 2136 } 2137 } 2138 2139 /* Use default specified by driver, if nb_rx_desc is zero */ 2140 if (nb_rx_desc == 0) { 2141 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2142 /* If driver default is also zero, fall back on EAL default */ 2143 if (nb_rx_desc == 0) 2144 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2145 } 2146 2147 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2148 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2149 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2150 2151 RTE_ETHDEV_LOG(ERR, 2152 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2153 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2154 dev_info.rx_desc_lim.nb_min, 2155 dev_info.rx_desc_lim.nb_align); 2156 return -EINVAL; 2157 } 2158 2159 if (dev->data->dev_started && 2160 !(dev_info.dev_capa & 2161 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2162 return -EBUSY; 2163 2164 if (dev->data->dev_started && 2165 (dev->data->rx_queue_state[rx_queue_id] != 2166 RTE_ETH_QUEUE_STATE_STOPPED)) 2167 return -EBUSY; 2168 2169 eth_dev_rxq_release(dev, rx_queue_id); 2170 2171 if (rx_conf == NULL) 2172 rx_conf = &dev_info.default_rxconf; 2173 2174 local_conf = *rx_conf; 2175 2176 /* 2177 * If an offloading has already been enabled in 2178 * rte_eth_dev_configure(), it has been enabled on all queues, 2179 * so there is no need to enable it in this queue again. 2180 * The local_conf.offloads input to underlying PMD only carries 2181 * those offloadings which are only enabled on this queue and 2182 * not enabled on all queues. 2183 */ 2184 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2185 2186 /* 2187 * New added offloadings for this queue are those not enabled in 2188 * rte_eth_dev_configure() and they must be per-queue type. 2189 * A pure per-port offloading can't be enabled on a queue while 2190 * disabled on another queue. A pure per-port offloading can't 2191 * be enabled for any queue as new added one if it hasn't been 2192 * enabled in rte_eth_dev_configure(). 2193 */ 2194 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2195 local_conf.offloads) { 2196 RTE_ETHDEV_LOG(ERR, 2197 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2198 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2199 port_id, rx_queue_id, local_conf.offloads, 2200 dev_info.rx_queue_offload_capa, 2201 __func__); 2202 return -EINVAL; 2203 } 2204 2205 if (local_conf.share_group > 0 && 2206 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2207 RTE_ETHDEV_LOG(ERR, 2208 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2209 port_id, rx_queue_id, local_conf.share_group); 2210 return -EINVAL; 2211 } 2212 2213 /* 2214 * If LRO is enabled, check that the maximum aggregated packet 2215 * size is supported by the configured device. 2216 */ 2217 /* Get the real Ethernet overhead length */ 2218 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2219 uint32_t overhead_len; 2220 uint32_t max_rx_pktlen; 2221 int ret; 2222 2223 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2224 dev_info.max_mtu); 2225 max_rx_pktlen = dev->data->mtu + overhead_len; 2226 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2227 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2228 ret = eth_dev_check_lro_pkt_size(port_id, 2229 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2230 max_rx_pktlen, 2231 dev_info.max_lro_pkt_size); 2232 if (ret != 0) 2233 return ret; 2234 } 2235 2236 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2237 socket_id, &local_conf, mp); 2238 if (!ret) { 2239 if (!dev->data->min_rx_buf_size || 2240 dev->data->min_rx_buf_size > mbp_buf_size) 2241 dev->data->min_rx_buf_size = mbp_buf_size; 2242 } 2243 2244 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2245 rx_conf, ret); 2246 return eth_err(port_id, ret); 2247 } 2248 2249 int 2250 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2251 uint16_t nb_rx_desc, 2252 const struct rte_eth_hairpin_conf *conf) 2253 { 2254 int ret; 2255 struct rte_eth_dev *dev; 2256 struct rte_eth_hairpin_cap cap; 2257 int i; 2258 int count; 2259 2260 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2261 dev = &rte_eth_devices[port_id]; 2262 2263 if (rx_queue_id >= dev->data->nb_rx_queues) { 2264 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2265 return -EINVAL; 2266 } 2267 2268 if (conf == NULL) { 2269 RTE_ETHDEV_LOG(ERR, 2270 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2271 port_id); 2272 return -EINVAL; 2273 } 2274 2275 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2276 if (ret != 0) 2277 return ret; 2278 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2279 -ENOTSUP); 2280 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2281 if (nb_rx_desc == 0) 2282 nb_rx_desc = cap.max_nb_desc; 2283 if (nb_rx_desc > cap.max_nb_desc) { 2284 RTE_ETHDEV_LOG(ERR, 2285 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2286 nb_rx_desc, cap.max_nb_desc); 2287 return -EINVAL; 2288 } 2289 if (conf->peer_count > cap.max_rx_2_tx) { 2290 RTE_ETHDEV_LOG(ERR, 2291 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2292 conf->peer_count, cap.max_rx_2_tx); 2293 return -EINVAL; 2294 } 2295 if (conf->peer_count == 0) { 2296 RTE_ETHDEV_LOG(ERR, 2297 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2298 conf->peer_count); 2299 return -EINVAL; 2300 } 2301 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2302 cap.max_nb_queues != UINT16_MAX; i++) { 2303 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2304 count++; 2305 } 2306 if (count > cap.max_nb_queues) { 2307 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2308 cap.max_nb_queues); 2309 return -EINVAL; 2310 } 2311 if (dev->data->dev_started) 2312 return -EBUSY; 2313 eth_dev_rxq_release(dev, rx_queue_id); 2314 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2315 nb_rx_desc, conf); 2316 if (ret == 0) 2317 dev->data->rx_queue_state[rx_queue_id] = 2318 RTE_ETH_QUEUE_STATE_HAIRPIN; 2319 return eth_err(port_id, ret); 2320 } 2321 2322 int 2323 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2324 uint16_t nb_tx_desc, unsigned int socket_id, 2325 const struct rte_eth_txconf *tx_conf) 2326 { 2327 struct rte_eth_dev *dev; 2328 struct rte_eth_dev_info dev_info; 2329 struct rte_eth_txconf local_conf; 2330 int ret; 2331 2332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2333 dev = &rte_eth_devices[port_id]; 2334 2335 if (tx_queue_id >= dev->data->nb_tx_queues) { 2336 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2337 return -EINVAL; 2338 } 2339 2340 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2341 2342 ret = rte_eth_dev_info_get(port_id, &dev_info); 2343 if (ret != 0) 2344 return ret; 2345 2346 /* Use default specified by driver, if nb_tx_desc is zero */ 2347 if (nb_tx_desc == 0) { 2348 nb_tx_desc = dev_info.default_txportconf.ring_size; 2349 /* If driver default is zero, fall back on EAL default */ 2350 if (nb_tx_desc == 0) 2351 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2352 } 2353 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2354 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2355 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2356 RTE_ETHDEV_LOG(ERR, 2357 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2358 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2359 dev_info.tx_desc_lim.nb_min, 2360 dev_info.tx_desc_lim.nb_align); 2361 return -EINVAL; 2362 } 2363 2364 if (dev->data->dev_started && 2365 !(dev_info.dev_capa & 2366 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2367 return -EBUSY; 2368 2369 if (dev->data->dev_started && 2370 (dev->data->tx_queue_state[tx_queue_id] != 2371 RTE_ETH_QUEUE_STATE_STOPPED)) 2372 return -EBUSY; 2373 2374 eth_dev_txq_release(dev, tx_queue_id); 2375 2376 if (tx_conf == NULL) 2377 tx_conf = &dev_info.default_txconf; 2378 2379 local_conf = *tx_conf; 2380 2381 /* 2382 * If an offloading has already been enabled in 2383 * rte_eth_dev_configure(), it has been enabled on all queues, 2384 * so there is no need to enable it in this queue again. 2385 * The local_conf.offloads input to underlying PMD only carries 2386 * those offloadings which are only enabled on this queue and 2387 * not enabled on all queues. 2388 */ 2389 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2390 2391 /* 2392 * New added offloadings for this queue are those not enabled in 2393 * rte_eth_dev_configure() and they must be per-queue type. 2394 * A pure per-port offloading can't be enabled on a queue while 2395 * disabled on another queue. A pure per-port offloading can't 2396 * be enabled for any queue as new added one if it hasn't been 2397 * enabled in rte_eth_dev_configure(). 2398 */ 2399 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2400 local_conf.offloads) { 2401 RTE_ETHDEV_LOG(ERR, 2402 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2403 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2404 port_id, tx_queue_id, local_conf.offloads, 2405 dev_info.tx_queue_offload_capa, 2406 __func__); 2407 return -EINVAL; 2408 } 2409 2410 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2411 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2412 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2413 } 2414 2415 int 2416 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2417 uint16_t nb_tx_desc, 2418 const struct rte_eth_hairpin_conf *conf) 2419 { 2420 struct rte_eth_dev *dev; 2421 struct rte_eth_hairpin_cap cap; 2422 int i; 2423 int count; 2424 int ret; 2425 2426 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2427 dev = &rte_eth_devices[port_id]; 2428 2429 if (tx_queue_id >= dev->data->nb_tx_queues) { 2430 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2431 return -EINVAL; 2432 } 2433 2434 if (conf == NULL) { 2435 RTE_ETHDEV_LOG(ERR, 2436 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2437 port_id); 2438 return -EINVAL; 2439 } 2440 2441 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2442 if (ret != 0) 2443 return ret; 2444 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2445 -ENOTSUP); 2446 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2447 if (nb_tx_desc == 0) 2448 nb_tx_desc = cap.max_nb_desc; 2449 if (nb_tx_desc > cap.max_nb_desc) { 2450 RTE_ETHDEV_LOG(ERR, 2451 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2452 nb_tx_desc, cap.max_nb_desc); 2453 return -EINVAL; 2454 } 2455 if (conf->peer_count > cap.max_tx_2_rx) { 2456 RTE_ETHDEV_LOG(ERR, 2457 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2458 conf->peer_count, cap.max_tx_2_rx); 2459 return -EINVAL; 2460 } 2461 if (conf->peer_count == 0) { 2462 RTE_ETHDEV_LOG(ERR, 2463 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2464 conf->peer_count); 2465 return -EINVAL; 2466 } 2467 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2468 cap.max_nb_queues != UINT16_MAX; i++) { 2469 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2470 count++; 2471 } 2472 if (count > cap.max_nb_queues) { 2473 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2474 cap.max_nb_queues); 2475 return -EINVAL; 2476 } 2477 if (dev->data->dev_started) 2478 return -EBUSY; 2479 eth_dev_txq_release(dev, tx_queue_id); 2480 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2481 (dev, tx_queue_id, nb_tx_desc, conf); 2482 if (ret == 0) 2483 dev->data->tx_queue_state[tx_queue_id] = 2484 RTE_ETH_QUEUE_STATE_HAIRPIN; 2485 return eth_err(port_id, ret); 2486 } 2487 2488 int 2489 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2490 { 2491 struct rte_eth_dev *dev; 2492 int ret; 2493 2494 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2495 dev = &rte_eth_devices[tx_port]; 2496 2497 if (dev->data->dev_started == 0) { 2498 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2499 return -EBUSY; 2500 } 2501 2502 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2503 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2504 if (ret != 0) 2505 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2506 " to Rx %d (%d - all ports)\n", 2507 tx_port, rx_port, RTE_MAX_ETHPORTS); 2508 2509 return ret; 2510 } 2511 2512 int 2513 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2514 { 2515 struct rte_eth_dev *dev; 2516 int ret; 2517 2518 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2519 dev = &rte_eth_devices[tx_port]; 2520 2521 if (dev->data->dev_started == 0) { 2522 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2523 return -EBUSY; 2524 } 2525 2526 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2527 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2528 if (ret != 0) 2529 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2530 " from Rx %d (%d - all ports)\n", 2531 tx_port, rx_port, RTE_MAX_ETHPORTS); 2532 2533 return ret; 2534 } 2535 2536 int 2537 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2538 size_t len, uint32_t direction) 2539 { 2540 struct rte_eth_dev *dev; 2541 int ret; 2542 2543 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2544 dev = &rte_eth_devices[port_id]; 2545 2546 if (peer_ports == NULL) { 2547 RTE_ETHDEV_LOG(ERR, 2548 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2549 port_id); 2550 return -EINVAL; 2551 } 2552 2553 if (len == 0) { 2554 RTE_ETHDEV_LOG(ERR, 2555 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2556 port_id); 2557 return -EINVAL; 2558 } 2559 2560 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2561 -ENOTSUP); 2562 2563 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2564 len, direction); 2565 if (ret < 0) 2566 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2567 port_id, direction ? "Rx" : "Tx"); 2568 2569 return ret; 2570 } 2571 2572 void 2573 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2574 void *userdata __rte_unused) 2575 { 2576 rte_pktmbuf_free_bulk(pkts, unsent); 2577 } 2578 2579 void 2580 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2581 void *userdata) 2582 { 2583 uint64_t *count = userdata; 2584 2585 rte_pktmbuf_free_bulk(pkts, unsent); 2586 *count += unsent; 2587 } 2588 2589 int 2590 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2591 buffer_tx_error_fn cbfn, void *userdata) 2592 { 2593 if (buffer == NULL) { 2594 RTE_ETHDEV_LOG(ERR, 2595 "Cannot set Tx buffer error callback to NULL buffer\n"); 2596 return -EINVAL; 2597 } 2598 2599 buffer->error_callback = cbfn; 2600 buffer->error_userdata = userdata; 2601 return 0; 2602 } 2603 2604 int 2605 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2606 { 2607 int ret = 0; 2608 2609 if (buffer == NULL) { 2610 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2611 return -EINVAL; 2612 } 2613 2614 buffer->size = size; 2615 if (buffer->error_callback == NULL) { 2616 ret = rte_eth_tx_buffer_set_err_callback( 2617 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2618 } 2619 2620 return ret; 2621 } 2622 2623 int 2624 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2625 { 2626 struct rte_eth_dev *dev; 2627 int ret; 2628 2629 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2630 dev = &rte_eth_devices[port_id]; 2631 2632 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2633 2634 /* Call driver to free pending mbufs. */ 2635 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2636 free_cnt); 2637 return eth_err(port_id, ret); 2638 } 2639 2640 int 2641 rte_eth_promiscuous_enable(uint16_t port_id) 2642 { 2643 struct rte_eth_dev *dev; 2644 int diag = 0; 2645 2646 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2647 dev = &rte_eth_devices[port_id]; 2648 2649 if (dev->data->promiscuous == 1) 2650 return 0; 2651 2652 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2653 2654 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2655 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2656 2657 return eth_err(port_id, diag); 2658 } 2659 2660 int 2661 rte_eth_promiscuous_disable(uint16_t port_id) 2662 { 2663 struct rte_eth_dev *dev; 2664 int diag = 0; 2665 2666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2667 dev = &rte_eth_devices[port_id]; 2668 2669 if (dev->data->promiscuous == 0) 2670 return 0; 2671 2672 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2673 2674 dev->data->promiscuous = 0; 2675 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2676 if (diag != 0) 2677 dev->data->promiscuous = 1; 2678 2679 return eth_err(port_id, diag); 2680 } 2681 2682 int 2683 rte_eth_promiscuous_get(uint16_t port_id) 2684 { 2685 struct rte_eth_dev *dev; 2686 2687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2688 dev = &rte_eth_devices[port_id]; 2689 2690 return dev->data->promiscuous; 2691 } 2692 2693 int 2694 rte_eth_allmulticast_enable(uint16_t port_id) 2695 { 2696 struct rte_eth_dev *dev; 2697 int diag; 2698 2699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2700 dev = &rte_eth_devices[port_id]; 2701 2702 if (dev->data->all_multicast == 1) 2703 return 0; 2704 2705 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2706 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2707 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2708 2709 return eth_err(port_id, diag); 2710 } 2711 2712 int 2713 rte_eth_allmulticast_disable(uint16_t port_id) 2714 { 2715 struct rte_eth_dev *dev; 2716 int diag; 2717 2718 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2719 dev = &rte_eth_devices[port_id]; 2720 2721 if (dev->data->all_multicast == 0) 2722 return 0; 2723 2724 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2725 dev->data->all_multicast = 0; 2726 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2727 if (diag != 0) 2728 dev->data->all_multicast = 1; 2729 2730 return eth_err(port_id, diag); 2731 } 2732 2733 int 2734 rte_eth_allmulticast_get(uint16_t port_id) 2735 { 2736 struct rte_eth_dev *dev; 2737 2738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2739 dev = &rte_eth_devices[port_id]; 2740 2741 return dev->data->all_multicast; 2742 } 2743 2744 int 2745 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2746 { 2747 struct rte_eth_dev *dev; 2748 2749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2750 dev = &rte_eth_devices[port_id]; 2751 2752 if (eth_link == NULL) { 2753 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2754 port_id); 2755 return -EINVAL; 2756 } 2757 2758 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2759 rte_eth_linkstatus_get(dev, eth_link); 2760 else { 2761 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2762 (*dev->dev_ops->link_update)(dev, 1); 2763 *eth_link = dev->data->dev_link; 2764 } 2765 2766 return 0; 2767 } 2768 2769 int 2770 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2771 { 2772 struct rte_eth_dev *dev; 2773 2774 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2775 dev = &rte_eth_devices[port_id]; 2776 2777 if (eth_link == NULL) { 2778 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2779 port_id); 2780 return -EINVAL; 2781 } 2782 2783 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2784 rte_eth_linkstatus_get(dev, eth_link); 2785 else { 2786 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2787 (*dev->dev_ops->link_update)(dev, 0); 2788 *eth_link = dev->data->dev_link; 2789 } 2790 2791 return 0; 2792 } 2793 2794 const char * 2795 rte_eth_link_speed_to_str(uint32_t link_speed) 2796 { 2797 switch (link_speed) { 2798 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2799 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2800 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2801 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2802 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2803 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2804 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2805 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2806 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2807 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2808 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2809 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2810 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2811 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2812 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2813 default: return "Invalid"; 2814 } 2815 } 2816 2817 int 2818 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2819 { 2820 if (str == NULL) { 2821 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2822 return -EINVAL; 2823 } 2824 2825 if (len == 0) { 2826 RTE_ETHDEV_LOG(ERR, 2827 "Cannot convert link to string with zero size\n"); 2828 return -EINVAL; 2829 } 2830 2831 if (eth_link == NULL) { 2832 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2833 return -EINVAL; 2834 } 2835 2836 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2837 return snprintf(str, len, "Link down"); 2838 else 2839 return snprintf(str, len, "Link up at %s %s %s", 2840 rte_eth_link_speed_to_str(eth_link->link_speed), 2841 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2842 "FDX" : "HDX", 2843 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2844 "Autoneg" : "Fixed"); 2845 } 2846 2847 int 2848 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2849 { 2850 struct rte_eth_dev *dev; 2851 2852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2853 dev = &rte_eth_devices[port_id]; 2854 2855 if (stats == NULL) { 2856 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2857 port_id); 2858 return -EINVAL; 2859 } 2860 2861 memset(stats, 0, sizeof(*stats)); 2862 2863 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2864 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2865 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2866 } 2867 2868 int 2869 rte_eth_stats_reset(uint16_t port_id) 2870 { 2871 struct rte_eth_dev *dev; 2872 int ret; 2873 2874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2875 dev = &rte_eth_devices[port_id]; 2876 2877 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2878 ret = (*dev->dev_ops->stats_reset)(dev); 2879 if (ret != 0) 2880 return eth_err(port_id, ret); 2881 2882 dev->data->rx_mbuf_alloc_failed = 0; 2883 2884 return 0; 2885 } 2886 2887 static inline int 2888 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2889 { 2890 uint16_t nb_rxqs, nb_txqs; 2891 int count; 2892 2893 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2894 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2895 2896 count = RTE_NB_STATS; 2897 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2898 count += nb_rxqs * RTE_NB_RXQ_STATS; 2899 count += nb_txqs * RTE_NB_TXQ_STATS; 2900 } 2901 2902 return count; 2903 } 2904 2905 static int 2906 eth_dev_get_xstats_count(uint16_t port_id) 2907 { 2908 struct rte_eth_dev *dev; 2909 int count; 2910 2911 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2912 dev = &rte_eth_devices[port_id]; 2913 if (dev->dev_ops->xstats_get_names != NULL) { 2914 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2915 if (count < 0) 2916 return eth_err(port_id, count); 2917 } else 2918 count = 0; 2919 2920 2921 count += eth_dev_get_xstats_basic_count(dev); 2922 2923 return count; 2924 } 2925 2926 int 2927 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2928 uint64_t *id) 2929 { 2930 int cnt_xstats, idx_xstat; 2931 2932 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2933 2934 if (xstat_name == NULL) { 2935 RTE_ETHDEV_LOG(ERR, 2936 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2937 port_id); 2938 return -ENOMEM; 2939 } 2940 2941 if (id == NULL) { 2942 RTE_ETHDEV_LOG(ERR, 2943 "Cannot get ethdev port %u xstats ID to NULL\n", 2944 port_id); 2945 return -ENOMEM; 2946 } 2947 2948 /* Get count */ 2949 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2950 if (cnt_xstats < 0) { 2951 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2952 return -ENODEV; 2953 } 2954 2955 /* Get id-name lookup table */ 2956 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2957 2958 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2959 port_id, xstats_names, cnt_xstats, NULL)) { 2960 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2961 return -1; 2962 } 2963 2964 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2965 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2966 *id = idx_xstat; 2967 return 0; 2968 }; 2969 } 2970 2971 return -EINVAL; 2972 } 2973 2974 /* retrieve basic stats names */ 2975 static int 2976 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2977 struct rte_eth_xstat_name *xstats_names) 2978 { 2979 int cnt_used_entries = 0; 2980 uint32_t idx, id_queue; 2981 uint16_t num_q; 2982 2983 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2984 strlcpy(xstats_names[cnt_used_entries].name, 2985 eth_dev_stats_strings[idx].name, 2986 sizeof(xstats_names[0].name)); 2987 cnt_used_entries++; 2988 } 2989 2990 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2991 return cnt_used_entries; 2992 2993 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2994 for (id_queue = 0; id_queue < num_q; id_queue++) { 2995 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2996 snprintf(xstats_names[cnt_used_entries].name, 2997 sizeof(xstats_names[0].name), 2998 "rx_q%u_%s", 2999 id_queue, eth_dev_rxq_stats_strings[idx].name); 3000 cnt_used_entries++; 3001 } 3002 3003 } 3004 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3005 for (id_queue = 0; id_queue < num_q; id_queue++) { 3006 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3007 snprintf(xstats_names[cnt_used_entries].name, 3008 sizeof(xstats_names[0].name), 3009 "tx_q%u_%s", 3010 id_queue, eth_dev_txq_stats_strings[idx].name); 3011 cnt_used_entries++; 3012 } 3013 } 3014 return cnt_used_entries; 3015 } 3016 3017 /* retrieve ethdev extended statistics names */ 3018 int 3019 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3020 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3021 uint64_t *ids) 3022 { 3023 struct rte_eth_xstat_name *xstats_names_copy; 3024 unsigned int no_basic_stat_requested = 1; 3025 unsigned int no_ext_stat_requested = 1; 3026 unsigned int expected_entries; 3027 unsigned int basic_count; 3028 struct rte_eth_dev *dev; 3029 unsigned int i; 3030 int ret; 3031 3032 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3033 dev = &rte_eth_devices[port_id]; 3034 3035 basic_count = eth_dev_get_xstats_basic_count(dev); 3036 ret = eth_dev_get_xstats_count(port_id); 3037 if (ret < 0) 3038 return ret; 3039 expected_entries = (unsigned int)ret; 3040 3041 /* Return max number of stats if no ids given */ 3042 if (!ids) { 3043 if (!xstats_names) 3044 return expected_entries; 3045 else if (xstats_names && size < expected_entries) 3046 return expected_entries; 3047 } 3048 3049 if (ids && !xstats_names) 3050 return -EINVAL; 3051 3052 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3053 uint64_t ids_copy[size]; 3054 3055 for (i = 0; i < size; i++) { 3056 if (ids[i] < basic_count) { 3057 no_basic_stat_requested = 0; 3058 break; 3059 } 3060 3061 /* 3062 * Convert ids to xstats ids that PMD knows. 3063 * ids known by user are basic + extended stats. 3064 */ 3065 ids_copy[i] = ids[i] - basic_count; 3066 } 3067 3068 if (no_basic_stat_requested) 3069 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3070 ids_copy, xstats_names, size); 3071 } 3072 3073 /* Retrieve all stats */ 3074 if (!ids) { 3075 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3076 expected_entries); 3077 if (num_stats < 0 || num_stats > (int)expected_entries) 3078 return num_stats; 3079 else 3080 return expected_entries; 3081 } 3082 3083 xstats_names_copy = calloc(expected_entries, 3084 sizeof(struct rte_eth_xstat_name)); 3085 3086 if (!xstats_names_copy) { 3087 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3088 return -ENOMEM; 3089 } 3090 3091 if (ids) { 3092 for (i = 0; i < size; i++) { 3093 if (ids[i] >= basic_count) { 3094 no_ext_stat_requested = 0; 3095 break; 3096 } 3097 } 3098 } 3099 3100 /* Fill xstats_names_copy structure */ 3101 if (ids && no_ext_stat_requested) { 3102 eth_basic_stats_get_names(dev, xstats_names_copy); 3103 } else { 3104 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3105 expected_entries); 3106 if (ret < 0) { 3107 free(xstats_names_copy); 3108 return ret; 3109 } 3110 } 3111 3112 /* Filter stats */ 3113 for (i = 0; i < size; i++) { 3114 if (ids[i] >= expected_entries) { 3115 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3116 free(xstats_names_copy); 3117 return -1; 3118 } 3119 xstats_names[i] = xstats_names_copy[ids[i]]; 3120 } 3121 3122 free(xstats_names_copy); 3123 return size; 3124 } 3125 3126 int 3127 rte_eth_xstats_get_names(uint16_t port_id, 3128 struct rte_eth_xstat_name *xstats_names, 3129 unsigned int size) 3130 { 3131 struct rte_eth_dev *dev; 3132 int cnt_used_entries; 3133 int cnt_expected_entries; 3134 int cnt_driver_entries; 3135 3136 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3137 if (xstats_names == NULL || cnt_expected_entries < 0 || 3138 (int)size < cnt_expected_entries) 3139 return cnt_expected_entries; 3140 3141 /* port_id checked in eth_dev_get_xstats_count() */ 3142 dev = &rte_eth_devices[port_id]; 3143 3144 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3145 3146 if (dev->dev_ops->xstats_get_names != NULL) { 3147 /* If there are any driver-specific xstats, append them 3148 * to end of list. 3149 */ 3150 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3151 dev, 3152 xstats_names + cnt_used_entries, 3153 size - cnt_used_entries); 3154 if (cnt_driver_entries < 0) 3155 return eth_err(port_id, cnt_driver_entries); 3156 cnt_used_entries += cnt_driver_entries; 3157 } 3158 3159 return cnt_used_entries; 3160 } 3161 3162 3163 static int 3164 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3165 { 3166 struct rte_eth_dev *dev; 3167 struct rte_eth_stats eth_stats; 3168 unsigned int count = 0, i, q; 3169 uint64_t val, *stats_ptr; 3170 uint16_t nb_rxqs, nb_txqs; 3171 int ret; 3172 3173 ret = rte_eth_stats_get(port_id, ð_stats); 3174 if (ret < 0) 3175 return ret; 3176 3177 dev = &rte_eth_devices[port_id]; 3178 3179 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3180 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3181 3182 /* global stats */ 3183 for (i = 0; i < RTE_NB_STATS; i++) { 3184 stats_ptr = RTE_PTR_ADD(ð_stats, 3185 eth_dev_stats_strings[i].offset); 3186 val = *stats_ptr; 3187 xstats[count++].value = val; 3188 } 3189 3190 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3191 return count; 3192 3193 /* per-rxq stats */ 3194 for (q = 0; q < nb_rxqs; q++) { 3195 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3196 stats_ptr = RTE_PTR_ADD(ð_stats, 3197 eth_dev_rxq_stats_strings[i].offset + 3198 q * sizeof(uint64_t)); 3199 val = *stats_ptr; 3200 xstats[count++].value = val; 3201 } 3202 } 3203 3204 /* per-txq stats */ 3205 for (q = 0; q < nb_txqs; q++) { 3206 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3207 stats_ptr = RTE_PTR_ADD(ð_stats, 3208 eth_dev_txq_stats_strings[i].offset + 3209 q * sizeof(uint64_t)); 3210 val = *stats_ptr; 3211 xstats[count++].value = val; 3212 } 3213 } 3214 return count; 3215 } 3216 3217 /* retrieve ethdev extended statistics */ 3218 int 3219 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3220 uint64_t *values, unsigned int size) 3221 { 3222 unsigned int no_basic_stat_requested = 1; 3223 unsigned int no_ext_stat_requested = 1; 3224 unsigned int num_xstats_filled; 3225 unsigned int basic_count; 3226 uint16_t expected_entries; 3227 struct rte_eth_dev *dev; 3228 unsigned int i; 3229 int ret; 3230 3231 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3232 dev = &rte_eth_devices[port_id]; 3233 3234 ret = eth_dev_get_xstats_count(port_id); 3235 if (ret < 0) 3236 return ret; 3237 expected_entries = (uint16_t)ret; 3238 struct rte_eth_xstat xstats[expected_entries]; 3239 basic_count = eth_dev_get_xstats_basic_count(dev); 3240 3241 /* Return max number of stats if no ids given */ 3242 if (!ids) { 3243 if (!values) 3244 return expected_entries; 3245 else if (values && size < expected_entries) 3246 return expected_entries; 3247 } 3248 3249 if (ids && !values) 3250 return -EINVAL; 3251 3252 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3253 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3254 uint64_t ids_copy[size]; 3255 3256 for (i = 0; i < size; i++) { 3257 if (ids[i] < basic_count) { 3258 no_basic_stat_requested = 0; 3259 break; 3260 } 3261 3262 /* 3263 * Convert ids to xstats ids that PMD knows. 3264 * ids known by user are basic + extended stats. 3265 */ 3266 ids_copy[i] = ids[i] - basic_count; 3267 } 3268 3269 if (no_basic_stat_requested) 3270 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3271 values, size); 3272 } 3273 3274 if (ids) { 3275 for (i = 0; i < size; i++) { 3276 if (ids[i] >= basic_count) { 3277 no_ext_stat_requested = 0; 3278 break; 3279 } 3280 } 3281 } 3282 3283 /* Fill the xstats structure */ 3284 if (ids && no_ext_stat_requested) 3285 ret = eth_basic_stats_get(port_id, xstats); 3286 else 3287 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3288 3289 if (ret < 0) 3290 return ret; 3291 num_xstats_filled = (unsigned int)ret; 3292 3293 /* Return all stats */ 3294 if (!ids) { 3295 for (i = 0; i < num_xstats_filled; i++) 3296 values[i] = xstats[i].value; 3297 return expected_entries; 3298 } 3299 3300 /* Filter stats */ 3301 for (i = 0; i < size; i++) { 3302 if (ids[i] >= expected_entries) { 3303 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3304 return -1; 3305 } 3306 values[i] = xstats[ids[i]].value; 3307 } 3308 return size; 3309 } 3310 3311 int 3312 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3313 unsigned int n) 3314 { 3315 struct rte_eth_dev *dev; 3316 unsigned int count = 0, i; 3317 signed int xcount = 0; 3318 uint16_t nb_rxqs, nb_txqs; 3319 int ret; 3320 3321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3322 dev = &rte_eth_devices[port_id]; 3323 3324 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3325 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3326 3327 /* Return generic statistics */ 3328 count = RTE_NB_STATS; 3329 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3330 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3331 3332 /* implemented by the driver */ 3333 if (dev->dev_ops->xstats_get != NULL) { 3334 /* Retrieve the xstats from the driver at the end of the 3335 * xstats struct. 3336 */ 3337 xcount = (*dev->dev_ops->xstats_get)(dev, 3338 xstats ? xstats + count : NULL, 3339 (n > count) ? n - count : 0); 3340 3341 if (xcount < 0) 3342 return eth_err(port_id, xcount); 3343 } 3344 3345 if (n < count + xcount || xstats == NULL) 3346 return count + xcount; 3347 3348 /* now fill the xstats structure */ 3349 ret = eth_basic_stats_get(port_id, xstats); 3350 if (ret < 0) 3351 return ret; 3352 count = ret; 3353 3354 for (i = 0; i < count; i++) 3355 xstats[i].id = i; 3356 /* add an offset to driver-specific stats */ 3357 for ( ; i < count + xcount; i++) 3358 xstats[i].id += count; 3359 3360 return count + xcount; 3361 } 3362 3363 /* reset ethdev extended statistics */ 3364 int 3365 rte_eth_xstats_reset(uint16_t port_id) 3366 { 3367 struct rte_eth_dev *dev; 3368 3369 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3370 dev = &rte_eth_devices[port_id]; 3371 3372 /* implemented by the driver */ 3373 if (dev->dev_ops->xstats_reset != NULL) 3374 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3375 3376 /* fallback to default */ 3377 return rte_eth_stats_reset(port_id); 3378 } 3379 3380 static int 3381 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3382 uint8_t stat_idx, uint8_t is_rx) 3383 { 3384 struct rte_eth_dev *dev; 3385 3386 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3387 dev = &rte_eth_devices[port_id]; 3388 3389 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3390 return -EINVAL; 3391 3392 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3393 return -EINVAL; 3394 3395 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3396 return -EINVAL; 3397 3398 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3399 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3400 } 3401 3402 int 3403 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3404 uint8_t stat_idx) 3405 { 3406 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3407 tx_queue_id, 3408 stat_idx, STAT_QMAP_TX)); 3409 } 3410 3411 int 3412 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3413 uint8_t stat_idx) 3414 { 3415 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3416 rx_queue_id, 3417 stat_idx, STAT_QMAP_RX)); 3418 } 3419 3420 int 3421 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3422 { 3423 struct rte_eth_dev *dev; 3424 3425 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3426 dev = &rte_eth_devices[port_id]; 3427 3428 if (fw_version == NULL && fw_size > 0) { 3429 RTE_ETHDEV_LOG(ERR, 3430 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3431 port_id); 3432 return -EINVAL; 3433 } 3434 3435 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3436 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3437 fw_version, fw_size)); 3438 } 3439 3440 int 3441 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3442 { 3443 struct rte_eth_dev *dev; 3444 const struct rte_eth_desc_lim lim = { 3445 .nb_max = UINT16_MAX, 3446 .nb_min = 0, 3447 .nb_align = 1, 3448 .nb_seg_max = UINT16_MAX, 3449 .nb_mtu_seg_max = UINT16_MAX, 3450 }; 3451 int diag; 3452 3453 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3454 dev = &rte_eth_devices[port_id]; 3455 3456 if (dev_info == NULL) { 3457 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3458 port_id); 3459 return -EINVAL; 3460 } 3461 3462 /* 3463 * Init dev_info before port_id check since caller does not have 3464 * return status and does not know if get is successful or not. 3465 */ 3466 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3467 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3468 3469 dev_info->rx_desc_lim = lim; 3470 dev_info->tx_desc_lim = lim; 3471 dev_info->device = dev->device; 3472 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3473 RTE_ETHER_CRC_LEN; 3474 dev_info->max_mtu = UINT16_MAX; 3475 3476 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3477 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3478 if (diag != 0) { 3479 /* Cleanup already filled in device information */ 3480 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3481 return eth_err(port_id, diag); 3482 } 3483 3484 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3485 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3486 RTE_MAX_QUEUES_PER_PORT); 3487 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3488 RTE_MAX_QUEUES_PER_PORT); 3489 3490 dev_info->driver_name = dev->device->driver->name; 3491 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3492 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3493 3494 dev_info->dev_flags = &dev->data->dev_flags; 3495 3496 return 0; 3497 } 3498 3499 int 3500 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3501 { 3502 struct rte_eth_dev *dev; 3503 3504 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3505 dev = &rte_eth_devices[port_id]; 3506 3507 if (dev_conf == NULL) { 3508 RTE_ETHDEV_LOG(ERR, 3509 "Cannot get ethdev port %u configuration to NULL\n", 3510 port_id); 3511 return -EINVAL; 3512 } 3513 3514 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3515 3516 return 0; 3517 } 3518 3519 int 3520 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3521 uint32_t *ptypes, int num) 3522 { 3523 int i, j; 3524 struct rte_eth_dev *dev; 3525 const uint32_t *all_ptypes; 3526 3527 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3528 dev = &rte_eth_devices[port_id]; 3529 3530 if (ptypes == NULL && num > 0) { 3531 RTE_ETHDEV_LOG(ERR, 3532 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3533 port_id); 3534 return -EINVAL; 3535 } 3536 3537 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3538 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3539 3540 if (!all_ptypes) 3541 return 0; 3542 3543 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3544 if (all_ptypes[i] & ptype_mask) { 3545 if (j < num) 3546 ptypes[j] = all_ptypes[i]; 3547 j++; 3548 } 3549 3550 return j; 3551 } 3552 3553 int 3554 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3555 uint32_t *set_ptypes, unsigned int num) 3556 { 3557 const uint32_t valid_ptype_masks[] = { 3558 RTE_PTYPE_L2_MASK, 3559 RTE_PTYPE_L3_MASK, 3560 RTE_PTYPE_L4_MASK, 3561 RTE_PTYPE_TUNNEL_MASK, 3562 RTE_PTYPE_INNER_L2_MASK, 3563 RTE_PTYPE_INNER_L3_MASK, 3564 RTE_PTYPE_INNER_L4_MASK, 3565 }; 3566 const uint32_t *all_ptypes; 3567 struct rte_eth_dev *dev; 3568 uint32_t unused_mask; 3569 unsigned int i, j; 3570 int ret; 3571 3572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3573 dev = &rte_eth_devices[port_id]; 3574 3575 if (num > 0 && set_ptypes == NULL) { 3576 RTE_ETHDEV_LOG(ERR, 3577 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3578 port_id); 3579 return -EINVAL; 3580 } 3581 3582 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3583 *dev->dev_ops->dev_ptypes_set == NULL) { 3584 ret = 0; 3585 goto ptype_unknown; 3586 } 3587 3588 if (ptype_mask == 0) { 3589 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3590 ptype_mask); 3591 goto ptype_unknown; 3592 } 3593 3594 unused_mask = ptype_mask; 3595 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3596 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3597 if (mask && mask != valid_ptype_masks[i]) { 3598 ret = -EINVAL; 3599 goto ptype_unknown; 3600 } 3601 unused_mask &= ~valid_ptype_masks[i]; 3602 } 3603 3604 if (unused_mask) { 3605 ret = -EINVAL; 3606 goto ptype_unknown; 3607 } 3608 3609 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3610 if (all_ptypes == NULL) { 3611 ret = 0; 3612 goto ptype_unknown; 3613 } 3614 3615 /* 3616 * Accommodate as many set_ptypes as possible. If the supplied 3617 * set_ptypes array is insufficient fill it partially. 3618 */ 3619 for (i = 0, j = 0; set_ptypes != NULL && 3620 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3621 if (ptype_mask & all_ptypes[i]) { 3622 if (j < num - 1) { 3623 set_ptypes[j] = all_ptypes[i]; 3624 j++; 3625 continue; 3626 } 3627 break; 3628 } 3629 } 3630 3631 if (set_ptypes != NULL && j < num) 3632 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3633 3634 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3635 3636 ptype_unknown: 3637 if (num > 0) 3638 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3639 3640 return ret; 3641 } 3642 3643 int 3644 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3645 unsigned int num) 3646 { 3647 int32_t ret; 3648 struct rte_eth_dev *dev; 3649 struct rte_eth_dev_info dev_info; 3650 3651 if (ma == NULL) { 3652 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3653 return -EINVAL; 3654 } 3655 3656 /* will check for us that port_id is a valid one */ 3657 ret = rte_eth_dev_info_get(port_id, &dev_info); 3658 if (ret != 0) 3659 return ret; 3660 3661 dev = &rte_eth_devices[port_id]; 3662 num = RTE_MIN(dev_info.max_mac_addrs, num); 3663 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3664 3665 return num; 3666 } 3667 3668 int 3669 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3670 { 3671 struct rte_eth_dev *dev; 3672 3673 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3674 dev = &rte_eth_devices[port_id]; 3675 3676 if (mac_addr == NULL) { 3677 RTE_ETHDEV_LOG(ERR, 3678 "Cannot get ethdev port %u MAC address to NULL\n", 3679 port_id); 3680 return -EINVAL; 3681 } 3682 3683 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3684 3685 return 0; 3686 } 3687 3688 int 3689 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3690 { 3691 struct rte_eth_dev *dev; 3692 3693 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3694 dev = &rte_eth_devices[port_id]; 3695 3696 if (mtu == NULL) { 3697 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3698 port_id); 3699 return -EINVAL; 3700 } 3701 3702 *mtu = dev->data->mtu; 3703 return 0; 3704 } 3705 3706 int 3707 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3708 { 3709 int ret; 3710 struct rte_eth_dev_info dev_info; 3711 struct rte_eth_dev *dev; 3712 3713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3714 dev = &rte_eth_devices[port_id]; 3715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3716 3717 /* 3718 * Check if the device supports dev_infos_get, if it does not 3719 * skip min_mtu/max_mtu validation here as this requires values 3720 * that are populated within the call to rte_eth_dev_info_get() 3721 * which relies on dev->dev_ops->dev_infos_get. 3722 */ 3723 if (*dev->dev_ops->dev_infos_get != NULL) { 3724 ret = rte_eth_dev_info_get(port_id, &dev_info); 3725 if (ret != 0) 3726 return ret; 3727 3728 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3729 if (ret != 0) 3730 return ret; 3731 } 3732 3733 if (dev->data->dev_configured == 0) { 3734 RTE_ETHDEV_LOG(ERR, 3735 "Port %u must be configured before MTU set\n", 3736 port_id); 3737 return -EINVAL; 3738 } 3739 3740 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3741 if (ret == 0) 3742 dev->data->mtu = mtu; 3743 3744 return eth_err(port_id, ret); 3745 } 3746 3747 int 3748 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3749 { 3750 struct rte_eth_dev *dev; 3751 int ret; 3752 3753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3754 dev = &rte_eth_devices[port_id]; 3755 3756 if (!(dev->data->dev_conf.rxmode.offloads & 3757 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3758 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3759 port_id); 3760 return -ENOSYS; 3761 } 3762 3763 if (vlan_id > 4095) { 3764 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3765 port_id, vlan_id); 3766 return -EINVAL; 3767 } 3768 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3769 3770 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3771 if (ret == 0) { 3772 struct rte_vlan_filter_conf *vfc; 3773 int vidx; 3774 int vbit; 3775 3776 vfc = &dev->data->vlan_filter_conf; 3777 vidx = vlan_id / 64; 3778 vbit = vlan_id % 64; 3779 3780 if (on) 3781 vfc->ids[vidx] |= RTE_BIT64(vbit); 3782 else 3783 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3784 } 3785 3786 return eth_err(port_id, ret); 3787 } 3788 3789 int 3790 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3791 int on) 3792 { 3793 struct rte_eth_dev *dev; 3794 3795 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3796 dev = &rte_eth_devices[port_id]; 3797 3798 if (rx_queue_id >= dev->data->nb_rx_queues) { 3799 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3800 return -EINVAL; 3801 } 3802 3803 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3804 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3805 3806 return 0; 3807 } 3808 3809 int 3810 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3811 enum rte_vlan_type vlan_type, 3812 uint16_t tpid) 3813 { 3814 struct rte_eth_dev *dev; 3815 3816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3817 dev = &rte_eth_devices[port_id]; 3818 3819 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3820 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3821 tpid)); 3822 } 3823 3824 int 3825 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3826 { 3827 struct rte_eth_dev_info dev_info; 3828 struct rte_eth_dev *dev; 3829 int ret = 0; 3830 int mask = 0; 3831 int cur, org = 0; 3832 uint64_t orig_offloads; 3833 uint64_t dev_offloads; 3834 uint64_t new_offloads; 3835 3836 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3837 dev = &rte_eth_devices[port_id]; 3838 3839 /* save original values in case of failure */ 3840 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3841 dev_offloads = orig_offloads; 3842 3843 /* check which option changed by application */ 3844 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3845 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3846 if (cur != org) { 3847 if (cur) 3848 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3849 else 3850 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3851 mask |= RTE_ETH_VLAN_STRIP_MASK; 3852 } 3853 3854 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3855 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3856 if (cur != org) { 3857 if (cur) 3858 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3859 else 3860 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3861 mask |= RTE_ETH_VLAN_FILTER_MASK; 3862 } 3863 3864 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3865 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3866 if (cur != org) { 3867 if (cur) 3868 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3869 else 3870 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3871 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3872 } 3873 3874 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3875 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3876 if (cur != org) { 3877 if (cur) 3878 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3879 else 3880 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3881 mask |= RTE_ETH_QINQ_STRIP_MASK; 3882 } 3883 3884 /*no change*/ 3885 if (mask == 0) 3886 return ret; 3887 3888 ret = rte_eth_dev_info_get(port_id, &dev_info); 3889 if (ret != 0) 3890 return ret; 3891 3892 /* Rx VLAN offloading must be within its device capabilities */ 3893 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3894 new_offloads = dev_offloads & ~orig_offloads; 3895 RTE_ETHDEV_LOG(ERR, 3896 "Ethdev port_id=%u requested new added VLAN offloads " 3897 "0x%" PRIx64 " must be within Rx offloads capabilities " 3898 "0x%" PRIx64 " in %s()\n", 3899 port_id, new_offloads, dev_info.rx_offload_capa, 3900 __func__); 3901 return -EINVAL; 3902 } 3903 3904 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3905 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3906 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3907 if (ret) { 3908 /* hit an error restore original values */ 3909 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3910 } 3911 3912 return eth_err(port_id, ret); 3913 } 3914 3915 int 3916 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3917 { 3918 struct rte_eth_dev *dev; 3919 uint64_t *dev_offloads; 3920 int ret = 0; 3921 3922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3923 dev = &rte_eth_devices[port_id]; 3924 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3925 3926 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3927 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3928 3929 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3930 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3931 3932 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3933 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3934 3935 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3936 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3937 3938 return ret; 3939 } 3940 3941 int 3942 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3943 { 3944 struct rte_eth_dev *dev; 3945 3946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3947 dev = &rte_eth_devices[port_id]; 3948 3949 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3950 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3951 } 3952 3953 int 3954 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3955 { 3956 struct rte_eth_dev *dev; 3957 3958 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3959 dev = &rte_eth_devices[port_id]; 3960 3961 if (fc_conf == NULL) { 3962 RTE_ETHDEV_LOG(ERR, 3963 "Cannot get ethdev port %u flow control config to NULL\n", 3964 port_id); 3965 return -EINVAL; 3966 } 3967 3968 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3969 memset(fc_conf, 0, sizeof(*fc_conf)); 3970 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3971 } 3972 3973 int 3974 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3975 { 3976 struct rte_eth_dev *dev; 3977 3978 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3979 dev = &rte_eth_devices[port_id]; 3980 3981 if (fc_conf == NULL) { 3982 RTE_ETHDEV_LOG(ERR, 3983 "Cannot set ethdev port %u flow control from NULL config\n", 3984 port_id); 3985 return -EINVAL; 3986 } 3987 3988 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3989 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3990 return -EINVAL; 3991 } 3992 3993 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3994 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3995 } 3996 3997 int 3998 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3999 struct rte_eth_pfc_conf *pfc_conf) 4000 { 4001 struct rte_eth_dev *dev; 4002 4003 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4004 dev = &rte_eth_devices[port_id]; 4005 4006 if (pfc_conf == NULL) { 4007 RTE_ETHDEV_LOG(ERR, 4008 "Cannot set ethdev port %u priority flow control from NULL config\n", 4009 port_id); 4010 return -EINVAL; 4011 } 4012 4013 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4014 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4015 return -EINVAL; 4016 } 4017 4018 /* High water, low water validation are device specific */ 4019 if (*dev->dev_ops->priority_flow_ctrl_set) 4020 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4021 (dev, pfc_conf)); 4022 return -ENOTSUP; 4023 } 4024 4025 static int 4026 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4027 uint16_t reta_size) 4028 { 4029 uint16_t i, num; 4030 4031 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4032 for (i = 0; i < num; i++) { 4033 if (reta_conf[i].mask) 4034 return 0; 4035 } 4036 4037 return -EINVAL; 4038 } 4039 4040 static int 4041 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4042 uint16_t reta_size, 4043 uint16_t max_rxq) 4044 { 4045 uint16_t i, idx, shift; 4046 4047 if (max_rxq == 0) { 4048 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4049 return -EINVAL; 4050 } 4051 4052 for (i = 0; i < reta_size; i++) { 4053 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4054 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4055 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4056 (reta_conf[idx].reta[shift] >= max_rxq)) { 4057 RTE_ETHDEV_LOG(ERR, 4058 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4059 idx, shift, 4060 reta_conf[idx].reta[shift], max_rxq); 4061 return -EINVAL; 4062 } 4063 } 4064 4065 return 0; 4066 } 4067 4068 int 4069 rte_eth_dev_rss_reta_update(uint16_t port_id, 4070 struct rte_eth_rss_reta_entry64 *reta_conf, 4071 uint16_t reta_size) 4072 { 4073 struct rte_eth_dev *dev; 4074 int ret; 4075 4076 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4077 dev = &rte_eth_devices[port_id]; 4078 4079 if (reta_conf == NULL) { 4080 RTE_ETHDEV_LOG(ERR, 4081 "Cannot update ethdev port %u RSS RETA to NULL\n", 4082 port_id); 4083 return -EINVAL; 4084 } 4085 4086 if (reta_size == 0) { 4087 RTE_ETHDEV_LOG(ERR, 4088 "Cannot update ethdev port %u RSS RETA with zero size\n", 4089 port_id); 4090 return -EINVAL; 4091 } 4092 4093 /* Check mask bits */ 4094 ret = eth_check_reta_mask(reta_conf, reta_size); 4095 if (ret < 0) 4096 return ret; 4097 4098 /* Check entry value */ 4099 ret = eth_check_reta_entry(reta_conf, reta_size, 4100 dev->data->nb_rx_queues); 4101 if (ret < 0) 4102 return ret; 4103 4104 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4105 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4106 reta_size)); 4107 } 4108 4109 int 4110 rte_eth_dev_rss_reta_query(uint16_t port_id, 4111 struct rte_eth_rss_reta_entry64 *reta_conf, 4112 uint16_t reta_size) 4113 { 4114 struct rte_eth_dev *dev; 4115 int ret; 4116 4117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4118 dev = &rte_eth_devices[port_id]; 4119 4120 if (reta_conf == NULL) { 4121 RTE_ETHDEV_LOG(ERR, 4122 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4123 port_id); 4124 return -EINVAL; 4125 } 4126 4127 /* Check mask bits */ 4128 ret = eth_check_reta_mask(reta_conf, reta_size); 4129 if (ret < 0) 4130 return ret; 4131 4132 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4133 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4134 reta_size)); 4135 } 4136 4137 int 4138 rte_eth_dev_rss_hash_update(uint16_t port_id, 4139 struct rte_eth_rss_conf *rss_conf) 4140 { 4141 struct rte_eth_dev *dev; 4142 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4143 int ret; 4144 4145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4146 dev = &rte_eth_devices[port_id]; 4147 4148 if (rss_conf == NULL) { 4149 RTE_ETHDEV_LOG(ERR, 4150 "Cannot update ethdev port %u RSS hash from NULL config\n", 4151 port_id); 4152 return -EINVAL; 4153 } 4154 4155 ret = rte_eth_dev_info_get(port_id, &dev_info); 4156 if (ret != 0) 4157 return ret; 4158 4159 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4160 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4161 dev_info.flow_type_rss_offloads) { 4162 RTE_ETHDEV_LOG(ERR, 4163 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4164 port_id, rss_conf->rss_hf, 4165 dev_info.flow_type_rss_offloads); 4166 return -EINVAL; 4167 } 4168 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4169 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4170 rss_conf)); 4171 } 4172 4173 int 4174 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4175 struct rte_eth_rss_conf *rss_conf) 4176 { 4177 struct rte_eth_dev *dev; 4178 4179 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4180 dev = &rte_eth_devices[port_id]; 4181 4182 if (rss_conf == NULL) { 4183 RTE_ETHDEV_LOG(ERR, 4184 "Cannot get ethdev port %u RSS hash config to NULL\n", 4185 port_id); 4186 return -EINVAL; 4187 } 4188 4189 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4190 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4191 rss_conf)); 4192 } 4193 4194 int 4195 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4196 struct rte_eth_udp_tunnel *udp_tunnel) 4197 { 4198 struct rte_eth_dev *dev; 4199 4200 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4201 dev = &rte_eth_devices[port_id]; 4202 4203 if (udp_tunnel == NULL) { 4204 RTE_ETHDEV_LOG(ERR, 4205 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4206 port_id); 4207 return -EINVAL; 4208 } 4209 4210 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4211 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4212 return -EINVAL; 4213 } 4214 4215 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4216 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4217 udp_tunnel)); 4218 } 4219 4220 int 4221 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4222 struct rte_eth_udp_tunnel *udp_tunnel) 4223 { 4224 struct rte_eth_dev *dev; 4225 4226 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4227 dev = &rte_eth_devices[port_id]; 4228 4229 if (udp_tunnel == NULL) { 4230 RTE_ETHDEV_LOG(ERR, 4231 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4232 port_id); 4233 return -EINVAL; 4234 } 4235 4236 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4237 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4238 return -EINVAL; 4239 } 4240 4241 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4242 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4243 udp_tunnel)); 4244 } 4245 4246 int 4247 rte_eth_led_on(uint16_t port_id) 4248 { 4249 struct rte_eth_dev *dev; 4250 4251 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4252 dev = &rte_eth_devices[port_id]; 4253 4254 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4255 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4256 } 4257 4258 int 4259 rte_eth_led_off(uint16_t port_id) 4260 { 4261 struct rte_eth_dev *dev; 4262 4263 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4264 dev = &rte_eth_devices[port_id]; 4265 4266 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4267 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4268 } 4269 4270 int 4271 rte_eth_fec_get_capability(uint16_t port_id, 4272 struct rte_eth_fec_capa *speed_fec_capa, 4273 unsigned int num) 4274 { 4275 struct rte_eth_dev *dev; 4276 int ret; 4277 4278 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4279 dev = &rte_eth_devices[port_id]; 4280 4281 if (speed_fec_capa == NULL && num > 0) { 4282 RTE_ETHDEV_LOG(ERR, 4283 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4284 port_id); 4285 return -EINVAL; 4286 } 4287 4288 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4289 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4290 4291 return ret; 4292 } 4293 4294 int 4295 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4296 { 4297 struct rte_eth_dev *dev; 4298 4299 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4300 dev = &rte_eth_devices[port_id]; 4301 4302 if (fec_capa == NULL) { 4303 RTE_ETHDEV_LOG(ERR, 4304 "Cannot get ethdev port %u current FEC mode to NULL\n", 4305 port_id); 4306 return -EINVAL; 4307 } 4308 4309 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4310 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4311 } 4312 4313 int 4314 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4315 { 4316 struct rte_eth_dev *dev; 4317 4318 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4319 dev = &rte_eth_devices[port_id]; 4320 4321 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4322 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4323 } 4324 4325 /* 4326 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4327 * an empty spot. 4328 */ 4329 static int 4330 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4331 { 4332 struct rte_eth_dev_info dev_info; 4333 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4334 unsigned i; 4335 int ret; 4336 4337 ret = rte_eth_dev_info_get(port_id, &dev_info); 4338 if (ret != 0) 4339 return -1; 4340 4341 for (i = 0; i < dev_info.max_mac_addrs; i++) 4342 if (memcmp(addr, &dev->data->mac_addrs[i], 4343 RTE_ETHER_ADDR_LEN) == 0) 4344 return i; 4345 4346 return -1; 4347 } 4348 4349 static const struct rte_ether_addr null_mac_addr; 4350 4351 int 4352 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4353 uint32_t pool) 4354 { 4355 struct rte_eth_dev *dev; 4356 int index; 4357 uint64_t pool_mask; 4358 int ret; 4359 4360 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4361 dev = &rte_eth_devices[port_id]; 4362 4363 if (addr == NULL) { 4364 RTE_ETHDEV_LOG(ERR, 4365 "Cannot add ethdev port %u MAC address from NULL address\n", 4366 port_id); 4367 return -EINVAL; 4368 } 4369 4370 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4371 4372 if (rte_is_zero_ether_addr(addr)) { 4373 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4374 port_id); 4375 return -EINVAL; 4376 } 4377 if (pool >= RTE_ETH_64_POOLS) { 4378 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4379 return -EINVAL; 4380 } 4381 4382 index = eth_dev_get_mac_addr_index(port_id, addr); 4383 if (index < 0) { 4384 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4385 if (index < 0) { 4386 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4387 port_id); 4388 return -ENOSPC; 4389 } 4390 } else { 4391 pool_mask = dev->data->mac_pool_sel[index]; 4392 4393 /* Check if both MAC address and pool is already there, and do nothing */ 4394 if (pool_mask & RTE_BIT64(pool)) 4395 return 0; 4396 } 4397 4398 /* Update NIC */ 4399 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4400 4401 if (ret == 0) { 4402 /* Update address in NIC data structure */ 4403 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4404 4405 /* Update pool bitmap in NIC data structure */ 4406 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4407 } 4408 4409 return eth_err(port_id, ret); 4410 } 4411 4412 int 4413 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4414 { 4415 struct rte_eth_dev *dev; 4416 int index; 4417 4418 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4419 dev = &rte_eth_devices[port_id]; 4420 4421 if (addr == NULL) { 4422 RTE_ETHDEV_LOG(ERR, 4423 "Cannot remove ethdev port %u MAC address from NULL address\n", 4424 port_id); 4425 return -EINVAL; 4426 } 4427 4428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4429 4430 index = eth_dev_get_mac_addr_index(port_id, addr); 4431 if (index == 0) { 4432 RTE_ETHDEV_LOG(ERR, 4433 "Port %u: Cannot remove default MAC address\n", 4434 port_id); 4435 return -EADDRINUSE; 4436 } else if (index < 0) 4437 return 0; /* Do nothing if address wasn't found */ 4438 4439 /* Update NIC */ 4440 (*dev->dev_ops->mac_addr_remove)(dev, index); 4441 4442 /* Update address in NIC data structure */ 4443 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4444 4445 /* reset pool bitmap */ 4446 dev->data->mac_pool_sel[index] = 0; 4447 4448 return 0; 4449 } 4450 4451 int 4452 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4453 { 4454 struct rte_eth_dev *dev; 4455 int ret; 4456 4457 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4458 dev = &rte_eth_devices[port_id]; 4459 4460 if (addr == NULL) { 4461 RTE_ETHDEV_LOG(ERR, 4462 "Cannot set ethdev port %u default MAC address from NULL address\n", 4463 port_id); 4464 return -EINVAL; 4465 } 4466 4467 if (!rte_is_valid_assigned_ether_addr(addr)) 4468 return -EINVAL; 4469 4470 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4471 4472 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4473 if (ret < 0) 4474 return ret; 4475 4476 /* Update default address in NIC data structure */ 4477 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4478 4479 return 0; 4480 } 4481 4482 4483 /* 4484 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4485 * an empty spot. 4486 */ 4487 static int 4488 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4489 const struct rte_ether_addr *addr) 4490 { 4491 struct rte_eth_dev_info dev_info; 4492 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4493 unsigned i; 4494 int ret; 4495 4496 ret = rte_eth_dev_info_get(port_id, &dev_info); 4497 if (ret != 0) 4498 return -1; 4499 4500 if (!dev->data->hash_mac_addrs) 4501 return -1; 4502 4503 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4504 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4505 RTE_ETHER_ADDR_LEN) == 0) 4506 return i; 4507 4508 return -1; 4509 } 4510 4511 int 4512 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4513 uint8_t on) 4514 { 4515 int index; 4516 int ret; 4517 struct rte_eth_dev *dev; 4518 4519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4520 dev = &rte_eth_devices[port_id]; 4521 4522 if (addr == NULL) { 4523 RTE_ETHDEV_LOG(ERR, 4524 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4525 port_id); 4526 return -EINVAL; 4527 } 4528 4529 if (rte_is_zero_ether_addr(addr)) { 4530 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4531 port_id); 4532 return -EINVAL; 4533 } 4534 4535 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4536 /* Check if it's already there, and do nothing */ 4537 if ((index >= 0) && on) 4538 return 0; 4539 4540 if (index < 0) { 4541 if (!on) { 4542 RTE_ETHDEV_LOG(ERR, 4543 "Port %u: the MAC address was not set in UTA\n", 4544 port_id); 4545 return -EINVAL; 4546 } 4547 4548 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4549 if (index < 0) { 4550 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4551 port_id); 4552 return -ENOSPC; 4553 } 4554 } 4555 4556 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4557 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4558 if (ret == 0) { 4559 /* Update address in NIC data structure */ 4560 if (on) 4561 rte_ether_addr_copy(addr, 4562 &dev->data->hash_mac_addrs[index]); 4563 else 4564 rte_ether_addr_copy(&null_mac_addr, 4565 &dev->data->hash_mac_addrs[index]); 4566 } 4567 4568 return eth_err(port_id, ret); 4569 } 4570 4571 int 4572 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4573 { 4574 struct rte_eth_dev *dev; 4575 4576 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4577 dev = &rte_eth_devices[port_id]; 4578 4579 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4580 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4581 on)); 4582 } 4583 4584 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4585 uint16_t tx_rate) 4586 { 4587 struct rte_eth_dev *dev; 4588 struct rte_eth_dev_info dev_info; 4589 struct rte_eth_link link; 4590 int ret; 4591 4592 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4593 dev = &rte_eth_devices[port_id]; 4594 4595 ret = rte_eth_dev_info_get(port_id, &dev_info); 4596 if (ret != 0) 4597 return ret; 4598 4599 link = dev->data->dev_link; 4600 4601 if (queue_idx > dev_info.max_tx_queues) { 4602 RTE_ETHDEV_LOG(ERR, 4603 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4604 port_id, queue_idx); 4605 return -EINVAL; 4606 } 4607 4608 if (tx_rate > link.link_speed) { 4609 RTE_ETHDEV_LOG(ERR, 4610 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4611 tx_rate, link.link_speed); 4612 return -EINVAL; 4613 } 4614 4615 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4616 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4617 queue_idx, tx_rate)); 4618 } 4619 4620 RTE_INIT(eth_dev_init_fp_ops) 4621 { 4622 uint32_t i; 4623 4624 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4625 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4626 } 4627 4628 RTE_INIT(eth_dev_init_cb_lists) 4629 { 4630 uint16_t i; 4631 4632 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4633 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4634 } 4635 4636 int 4637 rte_eth_dev_callback_register(uint16_t port_id, 4638 enum rte_eth_event_type event, 4639 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4640 { 4641 struct rte_eth_dev *dev; 4642 struct rte_eth_dev_callback *user_cb; 4643 uint16_t next_port; 4644 uint16_t last_port; 4645 4646 if (cb_fn == NULL) { 4647 RTE_ETHDEV_LOG(ERR, 4648 "Cannot register ethdev port %u callback from NULL\n", 4649 port_id); 4650 return -EINVAL; 4651 } 4652 4653 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4654 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4655 return -EINVAL; 4656 } 4657 4658 if (port_id == RTE_ETH_ALL) { 4659 next_port = 0; 4660 last_port = RTE_MAX_ETHPORTS - 1; 4661 } else { 4662 next_port = last_port = port_id; 4663 } 4664 4665 rte_spinlock_lock(ð_dev_cb_lock); 4666 4667 do { 4668 dev = &rte_eth_devices[next_port]; 4669 4670 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4671 if (user_cb->cb_fn == cb_fn && 4672 user_cb->cb_arg == cb_arg && 4673 user_cb->event == event) { 4674 break; 4675 } 4676 } 4677 4678 /* create a new callback. */ 4679 if (user_cb == NULL) { 4680 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4681 sizeof(struct rte_eth_dev_callback), 0); 4682 if (user_cb != NULL) { 4683 user_cb->cb_fn = cb_fn; 4684 user_cb->cb_arg = cb_arg; 4685 user_cb->event = event; 4686 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4687 user_cb, next); 4688 } else { 4689 rte_spinlock_unlock(ð_dev_cb_lock); 4690 rte_eth_dev_callback_unregister(port_id, event, 4691 cb_fn, cb_arg); 4692 return -ENOMEM; 4693 } 4694 4695 } 4696 } while (++next_port <= last_port); 4697 4698 rte_spinlock_unlock(ð_dev_cb_lock); 4699 return 0; 4700 } 4701 4702 int 4703 rte_eth_dev_callback_unregister(uint16_t port_id, 4704 enum rte_eth_event_type event, 4705 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4706 { 4707 int ret; 4708 struct rte_eth_dev *dev; 4709 struct rte_eth_dev_callback *cb, *next; 4710 uint16_t next_port; 4711 uint16_t last_port; 4712 4713 if (cb_fn == NULL) { 4714 RTE_ETHDEV_LOG(ERR, 4715 "Cannot unregister ethdev port %u callback from NULL\n", 4716 port_id); 4717 return -EINVAL; 4718 } 4719 4720 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4721 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4722 return -EINVAL; 4723 } 4724 4725 if (port_id == RTE_ETH_ALL) { 4726 next_port = 0; 4727 last_port = RTE_MAX_ETHPORTS - 1; 4728 } else { 4729 next_port = last_port = port_id; 4730 } 4731 4732 rte_spinlock_lock(ð_dev_cb_lock); 4733 4734 do { 4735 dev = &rte_eth_devices[next_port]; 4736 ret = 0; 4737 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4738 cb = next) { 4739 4740 next = TAILQ_NEXT(cb, next); 4741 4742 if (cb->cb_fn != cb_fn || cb->event != event || 4743 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4744 continue; 4745 4746 /* 4747 * if this callback is not executing right now, 4748 * then remove it. 4749 */ 4750 if (cb->active == 0) { 4751 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4752 rte_free(cb); 4753 } else { 4754 ret = -EAGAIN; 4755 } 4756 } 4757 } while (++next_port <= last_port); 4758 4759 rte_spinlock_unlock(ð_dev_cb_lock); 4760 return ret; 4761 } 4762 4763 int 4764 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4765 enum rte_eth_event_type event, void *ret_param) 4766 { 4767 struct rte_eth_dev_callback *cb_lst; 4768 struct rte_eth_dev_callback dev_cb; 4769 int rc = 0; 4770 4771 rte_spinlock_lock(ð_dev_cb_lock); 4772 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4773 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4774 continue; 4775 dev_cb = *cb_lst; 4776 cb_lst->active = 1; 4777 if (ret_param != NULL) 4778 dev_cb.ret_param = ret_param; 4779 4780 rte_spinlock_unlock(ð_dev_cb_lock); 4781 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4782 dev_cb.cb_arg, dev_cb.ret_param); 4783 rte_spinlock_lock(ð_dev_cb_lock); 4784 cb_lst->active = 0; 4785 } 4786 rte_spinlock_unlock(ð_dev_cb_lock); 4787 return rc; 4788 } 4789 4790 void 4791 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4792 { 4793 if (dev == NULL) 4794 return; 4795 4796 /* 4797 * for secondary process, at that point we expect device 4798 * to be already 'usable', so shared data and all function pointers 4799 * for fast-path devops have to be setup properly inside rte_eth_dev. 4800 */ 4801 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4802 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4803 4804 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4805 4806 dev->state = RTE_ETH_DEV_ATTACHED; 4807 } 4808 4809 int 4810 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4811 { 4812 uint32_t vec; 4813 struct rte_eth_dev *dev; 4814 struct rte_intr_handle *intr_handle; 4815 uint16_t qid; 4816 int rc; 4817 4818 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4819 dev = &rte_eth_devices[port_id]; 4820 4821 if (!dev->intr_handle) { 4822 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4823 return -ENOTSUP; 4824 } 4825 4826 intr_handle = dev->intr_handle; 4827 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4828 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4829 return -EPERM; 4830 } 4831 4832 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4833 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4834 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4835 if (rc && rc != -EEXIST) { 4836 RTE_ETHDEV_LOG(ERR, 4837 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4838 port_id, qid, op, epfd, vec); 4839 } 4840 } 4841 4842 return 0; 4843 } 4844 4845 int 4846 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4847 { 4848 struct rte_intr_handle *intr_handle; 4849 struct rte_eth_dev *dev; 4850 unsigned int efd_idx; 4851 uint32_t vec; 4852 int fd; 4853 4854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4855 dev = &rte_eth_devices[port_id]; 4856 4857 if (queue_id >= dev->data->nb_rx_queues) { 4858 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4859 return -1; 4860 } 4861 4862 if (!dev->intr_handle) { 4863 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4864 return -1; 4865 } 4866 4867 intr_handle = dev->intr_handle; 4868 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4869 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4870 return -1; 4871 } 4872 4873 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4874 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4875 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4876 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4877 4878 return fd; 4879 } 4880 4881 static inline int 4882 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4883 const char *ring_name) 4884 { 4885 return snprintf(name, len, "eth_p%d_q%d_%s", 4886 port_id, queue_id, ring_name); 4887 } 4888 4889 const struct rte_memzone * 4890 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4891 uint16_t queue_id, size_t size, unsigned align, 4892 int socket_id) 4893 { 4894 char z_name[RTE_MEMZONE_NAMESIZE]; 4895 const struct rte_memzone *mz; 4896 int rc; 4897 4898 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4899 queue_id, ring_name); 4900 if (rc >= RTE_MEMZONE_NAMESIZE) { 4901 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4902 rte_errno = ENAMETOOLONG; 4903 return NULL; 4904 } 4905 4906 mz = rte_memzone_lookup(z_name); 4907 if (mz) { 4908 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4909 size > mz->len || 4910 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4911 RTE_ETHDEV_LOG(ERR, 4912 "memzone %s does not justify the requested attributes\n", 4913 mz->name); 4914 return NULL; 4915 } 4916 4917 return mz; 4918 } 4919 4920 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4921 RTE_MEMZONE_IOVA_CONTIG, align); 4922 } 4923 4924 int 4925 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4926 uint16_t queue_id) 4927 { 4928 char z_name[RTE_MEMZONE_NAMESIZE]; 4929 const struct rte_memzone *mz; 4930 int rc = 0; 4931 4932 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4933 queue_id, ring_name); 4934 if (rc >= RTE_MEMZONE_NAMESIZE) { 4935 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4936 return -ENAMETOOLONG; 4937 } 4938 4939 mz = rte_memzone_lookup(z_name); 4940 if (mz) 4941 rc = rte_memzone_free(mz); 4942 else 4943 rc = -ENOENT; 4944 4945 return rc; 4946 } 4947 4948 int 4949 rte_eth_dev_create(struct rte_device *device, const char *name, 4950 size_t priv_data_size, 4951 ethdev_bus_specific_init ethdev_bus_specific_init, 4952 void *bus_init_params, 4953 ethdev_init_t ethdev_init, void *init_params) 4954 { 4955 struct rte_eth_dev *ethdev; 4956 int retval; 4957 4958 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4959 4960 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4961 ethdev = rte_eth_dev_allocate(name); 4962 if (!ethdev) 4963 return -ENODEV; 4964 4965 if (priv_data_size) { 4966 ethdev->data->dev_private = rte_zmalloc_socket( 4967 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4968 device->numa_node); 4969 4970 if (!ethdev->data->dev_private) { 4971 RTE_ETHDEV_LOG(ERR, 4972 "failed to allocate private data\n"); 4973 retval = -ENOMEM; 4974 goto probe_failed; 4975 } 4976 } 4977 } else { 4978 ethdev = rte_eth_dev_attach_secondary(name); 4979 if (!ethdev) { 4980 RTE_ETHDEV_LOG(ERR, 4981 "secondary process attach failed, ethdev doesn't exist\n"); 4982 return -ENODEV; 4983 } 4984 } 4985 4986 ethdev->device = device; 4987 4988 if (ethdev_bus_specific_init) { 4989 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 4990 if (retval) { 4991 RTE_ETHDEV_LOG(ERR, 4992 "ethdev bus specific initialisation failed\n"); 4993 goto probe_failed; 4994 } 4995 } 4996 4997 retval = ethdev_init(ethdev, init_params); 4998 if (retval) { 4999 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 5000 goto probe_failed; 5001 } 5002 5003 rte_eth_dev_probing_finish(ethdev); 5004 5005 return retval; 5006 5007 probe_failed: 5008 rte_eth_dev_release_port(ethdev); 5009 return retval; 5010 } 5011 5012 int 5013 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 5014 ethdev_uninit_t ethdev_uninit) 5015 { 5016 int ret; 5017 5018 ethdev = rte_eth_dev_allocated(ethdev->data->name); 5019 if (!ethdev) 5020 return -ENODEV; 5021 5022 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 5023 5024 ret = ethdev_uninit(ethdev); 5025 if (ret) 5026 return ret; 5027 5028 return rte_eth_dev_release_port(ethdev); 5029 } 5030 5031 int 5032 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5033 int epfd, int op, void *data) 5034 { 5035 uint32_t vec; 5036 struct rte_eth_dev *dev; 5037 struct rte_intr_handle *intr_handle; 5038 int rc; 5039 5040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5041 dev = &rte_eth_devices[port_id]; 5042 5043 if (queue_id >= dev->data->nb_rx_queues) { 5044 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5045 return -EINVAL; 5046 } 5047 5048 if (!dev->intr_handle) { 5049 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5050 return -ENOTSUP; 5051 } 5052 5053 intr_handle = dev->intr_handle; 5054 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5055 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5056 return -EPERM; 5057 } 5058 5059 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5060 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5061 if (rc && rc != -EEXIST) { 5062 RTE_ETHDEV_LOG(ERR, 5063 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5064 port_id, queue_id, op, epfd, vec); 5065 return rc; 5066 } 5067 5068 return 0; 5069 } 5070 5071 int 5072 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5073 uint16_t queue_id) 5074 { 5075 struct rte_eth_dev *dev; 5076 int ret; 5077 5078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5079 dev = &rte_eth_devices[port_id]; 5080 5081 ret = eth_dev_validate_rx_queue(dev, queue_id); 5082 if (ret != 0) 5083 return ret; 5084 5085 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5086 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5087 } 5088 5089 int 5090 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5091 uint16_t queue_id) 5092 { 5093 struct rte_eth_dev *dev; 5094 int ret; 5095 5096 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5097 dev = &rte_eth_devices[port_id]; 5098 5099 ret = eth_dev_validate_rx_queue(dev, queue_id); 5100 if (ret != 0) 5101 return ret; 5102 5103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5104 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5105 } 5106 5107 5108 const struct rte_eth_rxtx_callback * 5109 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5110 rte_rx_callback_fn fn, void *user_param) 5111 { 5112 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5113 rte_errno = ENOTSUP; 5114 return NULL; 5115 #endif 5116 struct rte_eth_dev *dev; 5117 5118 /* check input parameters */ 5119 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5120 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5121 rte_errno = EINVAL; 5122 return NULL; 5123 } 5124 dev = &rte_eth_devices[port_id]; 5125 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5126 rte_errno = EINVAL; 5127 return NULL; 5128 } 5129 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5130 5131 if (cb == NULL) { 5132 rte_errno = ENOMEM; 5133 return NULL; 5134 } 5135 5136 cb->fn.rx = fn; 5137 cb->param = user_param; 5138 5139 rte_spinlock_lock(ð_dev_rx_cb_lock); 5140 /* Add the callbacks in fifo order. */ 5141 struct rte_eth_rxtx_callback *tail = 5142 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5143 5144 if (!tail) { 5145 /* Stores to cb->fn and cb->param should complete before 5146 * cb is visible to data plane. 5147 */ 5148 __atomic_store_n( 5149 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5150 cb, __ATOMIC_RELEASE); 5151 5152 } else { 5153 while (tail->next) 5154 tail = tail->next; 5155 /* Stores to cb->fn and cb->param should complete before 5156 * cb is visible to data plane. 5157 */ 5158 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5159 } 5160 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5161 5162 return cb; 5163 } 5164 5165 const struct rte_eth_rxtx_callback * 5166 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5167 rte_rx_callback_fn fn, void *user_param) 5168 { 5169 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5170 rte_errno = ENOTSUP; 5171 return NULL; 5172 #endif 5173 /* check input parameters */ 5174 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5175 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5176 rte_errno = EINVAL; 5177 return NULL; 5178 } 5179 5180 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5181 5182 if (cb == NULL) { 5183 rte_errno = ENOMEM; 5184 return NULL; 5185 } 5186 5187 cb->fn.rx = fn; 5188 cb->param = user_param; 5189 5190 rte_spinlock_lock(ð_dev_rx_cb_lock); 5191 /* Add the callbacks at first position */ 5192 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5193 /* Stores to cb->fn, cb->param and cb->next should complete before 5194 * cb is visible to data plane threads. 5195 */ 5196 __atomic_store_n( 5197 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5198 cb, __ATOMIC_RELEASE); 5199 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5200 5201 return cb; 5202 } 5203 5204 const struct rte_eth_rxtx_callback * 5205 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5206 rte_tx_callback_fn fn, void *user_param) 5207 { 5208 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5209 rte_errno = ENOTSUP; 5210 return NULL; 5211 #endif 5212 struct rte_eth_dev *dev; 5213 5214 /* check input parameters */ 5215 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5216 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5217 rte_errno = EINVAL; 5218 return NULL; 5219 } 5220 5221 dev = &rte_eth_devices[port_id]; 5222 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5223 rte_errno = EINVAL; 5224 return NULL; 5225 } 5226 5227 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5228 5229 if (cb == NULL) { 5230 rte_errno = ENOMEM; 5231 return NULL; 5232 } 5233 5234 cb->fn.tx = fn; 5235 cb->param = user_param; 5236 5237 rte_spinlock_lock(ð_dev_tx_cb_lock); 5238 /* Add the callbacks in fifo order. */ 5239 struct rte_eth_rxtx_callback *tail = 5240 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5241 5242 if (!tail) { 5243 /* Stores to cb->fn and cb->param should complete before 5244 * cb is visible to data plane. 5245 */ 5246 __atomic_store_n( 5247 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5248 cb, __ATOMIC_RELEASE); 5249 5250 } else { 5251 while (tail->next) 5252 tail = tail->next; 5253 /* Stores to cb->fn and cb->param should complete before 5254 * cb is visible to data plane. 5255 */ 5256 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5257 } 5258 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5259 5260 return cb; 5261 } 5262 5263 int 5264 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5265 const struct rte_eth_rxtx_callback *user_cb) 5266 { 5267 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5268 return -ENOTSUP; 5269 #endif 5270 /* Check input parameters. */ 5271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5272 if (user_cb == NULL || 5273 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5274 return -EINVAL; 5275 5276 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5277 struct rte_eth_rxtx_callback *cb; 5278 struct rte_eth_rxtx_callback **prev_cb; 5279 int ret = -EINVAL; 5280 5281 rte_spinlock_lock(ð_dev_rx_cb_lock); 5282 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5283 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5284 cb = *prev_cb; 5285 if (cb == user_cb) { 5286 /* Remove the user cb from the callback list. */ 5287 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5288 ret = 0; 5289 break; 5290 } 5291 } 5292 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5293 5294 return ret; 5295 } 5296 5297 int 5298 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5299 const struct rte_eth_rxtx_callback *user_cb) 5300 { 5301 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5302 return -ENOTSUP; 5303 #endif 5304 /* Check input parameters. */ 5305 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5306 if (user_cb == NULL || 5307 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5308 return -EINVAL; 5309 5310 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5311 int ret = -EINVAL; 5312 struct rte_eth_rxtx_callback *cb; 5313 struct rte_eth_rxtx_callback **prev_cb; 5314 5315 rte_spinlock_lock(ð_dev_tx_cb_lock); 5316 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5317 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5318 cb = *prev_cb; 5319 if (cb == user_cb) { 5320 /* Remove the user cb from the callback list. */ 5321 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5322 ret = 0; 5323 break; 5324 } 5325 } 5326 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5327 5328 return ret; 5329 } 5330 5331 int 5332 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5333 struct rte_eth_rxq_info *qinfo) 5334 { 5335 struct rte_eth_dev *dev; 5336 5337 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5338 dev = &rte_eth_devices[port_id]; 5339 5340 if (queue_id >= dev->data->nb_rx_queues) { 5341 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5342 return -EINVAL; 5343 } 5344 5345 if (qinfo == NULL) { 5346 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5347 port_id, queue_id); 5348 return -EINVAL; 5349 } 5350 5351 if (dev->data->rx_queues == NULL || 5352 dev->data->rx_queues[queue_id] == NULL) { 5353 RTE_ETHDEV_LOG(ERR, 5354 "Rx queue %"PRIu16" of device with port_id=%" 5355 PRIu16" has not been setup\n", 5356 queue_id, port_id); 5357 return -EINVAL; 5358 } 5359 5360 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5361 RTE_ETHDEV_LOG(INFO, 5362 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5363 queue_id, port_id); 5364 return -EINVAL; 5365 } 5366 5367 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5368 5369 memset(qinfo, 0, sizeof(*qinfo)); 5370 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5371 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5372 5373 return 0; 5374 } 5375 5376 int 5377 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5378 struct rte_eth_txq_info *qinfo) 5379 { 5380 struct rte_eth_dev *dev; 5381 5382 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5383 dev = &rte_eth_devices[port_id]; 5384 5385 if (queue_id >= dev->data->nb_tx_queues) { 5386 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5387 return -EINVAL; 5388 } 5389 5390 if (qinfo == NULL) { 5391 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5392 port_id, queue_id); 5393 return -EINVAL; 5394 } 5395 5396 if (dev->data->tx_queues == NULL || 5397 dev->data->tx_queues[queue_id] == NULL) { 5398 RTE_ETHDEV_LOG(ERR, 5399 "Tx queue %"PRIu16" of device with port_id=%" 5400 PRIu16" has not been setup\n", 5401 queue_id, port_id); 5402 return -EINVAL; 5403 } 5404 5405 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5406 RTE_ETHDEV_LOG(INFO, 5407 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5408 queue_id, port_id); 5409 return -EINVAL; 5410 } 5411 5412 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5413 5414 memset(qinfo, 0, sizeof(*qinfo)); 5415 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5416 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5417 5418 return 0; 5419 } 5420 5421 int 5422 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5423 struct rte_eth_burst_mode *mode) 5424 { 5425 struct rte_eth_dev *dev; 5426 5427 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5428 dev = &rte_eth_devices[port_id]; 5429 5430 if (queue_id >= dev->data->nb_rx_queues) { 5431 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5432 return -EINVAL; 5433 } 5434 5435 if (mode == NULL) { 5436 RTE_ETHDEV_LOG(ERR, 5437 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5438 port_id, queue_id); 5439 return -EINVAL; 5440 } 5441 5442 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5443 memset(mode, 0, sizeof(*mode)); 5444 return eth_err(port_id, 5445 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5446 } 5447 5448 int 5449 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5450 struct rte_eth_burst_mode *mode) 5451 { 5452 struct rte_eth_dev *dev; 5453 5454 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5455 dev = &rte_eth_devices[port_id]; 5456 5457 if (queue_id >= dev->data->nb_tx_queues) { 5458 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5459 return -EINVAL; 5460 } 5461 5462 if (mode == NULL) { 5463 RTE_ETHDEV_LOG(ERR, 5464 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5465 port_id, queue_id); 5466 return -EINVAL; 5467 } 5468 5469 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5470 memset(mode, 0, sizeof(*mode)); 5471 return eth_err(port_id, 5472 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5473 } 5474 5475 int 5476 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5477 struct rte_power_monitor_cond *pmc) 5478 { 5479 struct rte_eth_dev *dev; 5480 5481 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5482 dev = &rte_eth_devices[port_id]; 5483 5484 if (queue_id >= dev->data->nb_rx_queues) { 5485 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5486 return -EINVAL; 5487 } 5488 5489 if (pmc == NULL) { 5490 RTE_ETHDEV_LOG(ERR, 5491 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5492 port_id, queue_id); 5493 return -EINVAL; 5494 } 5495 5496 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5497 return eth_err(port_id, 5498 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5499 } 5500 5501 int 5502 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5503 struct rte_ether_addr *mc_addr_set, 5504 uint32_t nb_mc_addr) 5505 { 5506 struct rte_eth_dev *dev; 5507 5508 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5509 dev = &rte_eth_devices[port_id]; 5510 5511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5512 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5513 mc_addr_set, nb_mc_addr)); 5514 } 5515 5516 int 5517 rte_eth_timesync_enable(uint16_t port_id) 5518 { 5519 struct rte_eth_dev *dev; 5520 5521 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5522 dev = &rte_eth_devices[port_id]; 5523 5524 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5525 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5526 } 5527 5528 int 5529 rte_eth_timesync_disable(uint16_t port_id) 5530 { 5531 struct rte_eth_dev *dev; 5532 5533 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5534 dev = &rte_eth_devices[port_id]; 5535 5536 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5537 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5538 } 5539 5540 int 5541 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5542 uint32_t flags) 5543 { 5544 struct rte_eth_dev *dev; 5545 5546 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5547 dev = &rte_eth_devices[port_id]; 5548 5549 if (timestamp == NULL) { 5550 RTE_ETHDEV_LOG(ERR, 5551 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5552 port_id); 5553 return -EINVAL; 5554 } 5555 5556 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5557 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5558 (dev, timestamp, flags)); 5559 } 5560 5561 int 5562 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5563 struct timespec *timestamp) 5564 { 5565 struct rte_eth_dev *dev; 5566 5567 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5568 dev = &rte_eth_devices[port_id]; 5569 5570 if (timestamp == NULL) { 5571 RTE_ETHDEV_LOG(ERR, 5572 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5573 port_id); 5574 return -EINVAL; 5575 } 5576 5577 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5578 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5579 (dev, timestamp)); 5580 } 5581 5582 int 5583 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5584 { 5585 struct rte_eth_dev *dev; 5586 5587 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5588 dev = &rte_eth_devices[port_id]; 5589 5590 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5591 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5592 } 5593 5594 int 5595 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5596 { 5597 struct rte_eth_dev *dev; 5598 5599 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5600 dev = &rte_eth_devices[port_id]; 5601 5602 if (timestamp == NULL) { 5603 RTE_ETHDEV_LOG(ERR, 5604 "Cannot read ethdev port %u timesync time to NULL\n", 5605 port_id); 5606 return -EINVAL; 5607 } 5608 5609 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5610 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5611 timestamp)); 5612 } 5613 5614 int 5615 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5616 { 5617 struct rte_eth_dev *dev; 5618 5619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5620 dev = &rte_eth_devices[port_id]; 5621 5622 if (timestamp == NULL) { 5623 RTE_ETHDEV_LOG(ERR, 5624 "Cannot write ethdev port %u timesync from NULL time\n", 5625 port_id); 5626 return -EINVAL; 5627 } 5628 5629 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5630 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5631 timestamp)); 5632 } 5633 5634 int 5635 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5636 { 5637 struct rte_eth_dev *dev; 5638 5639 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5640 dev = &rte_eth_devices[port_id]; 5641 5642 if (clock == NULL) { 5643 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5644 port_id); 5645 return -EINVAL; 5646 } 5647 5648 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5649 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5650 } 5651 5652 int 5653 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5654 { 5655 struct rte_eth_dev *dev; 5656 5657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5658 dev = &rte_eth_devices[port_id]; 5659 5660 if (info == NULL) { 5661 RTE_ETHDEV_LOG(ERR, 5662 "Cannot get ethdev port %u register info to NULL\n", 5663 port_id); 5664 return -EINVAL; 5665 } 5666 5667 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5668 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5669 } 5670 5671 int 5672 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5673 { 5674 struct rte_eth_dev *dev; 5675 5676 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5677 dev = &rte_eth_devices[port_id]; 5678 5679 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5680 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5681 } 5682 5683 int 5684 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5685 { 5686 struct rte_eth_dev *dev; 5687 5688 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5689 dev = &rte_eth_devices[port_id]; 5690 5691 if (info == NULL) { 5692 RTE_ETHDEV_LOG(ERR, 5693 "Cannot get ethdev port %u EEPROM info to NULL\n", 5694 port_id); 5695 return -EINVAL; 5696 } 5697 5698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5699 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5700 } 5701 5702 int 5703 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5704 { 5705 struct rte_eth_dev *dev; 5706 5707 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5708 dev = &rte_eth_devices[port_id]; 5709 5710 if (info == NULL) { 5711 RTE_ETHDEV_LOG(ERR, 5712 "Cannot set ethdev port %u EEPROM from NULL info\n", 5713 port_id); 5714 return -EINVAL; 5715 } 5716 5717 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5718 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5719 } 5720 5721 int 5722 rte_eth_dev_get_module_info(uint16_t port_id, 5723 struct rte_eth_dev_module_info *modinfo) 5724 { 5725 struct rte_eth_dev *dev; 5726 5727 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5728 dev = &rte_eth_devices[port_id]; 5729 5730 if (modinfo == NULL) { 5731 RTE_ETHDEV_LOG(ERR, 5732 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5733 port_id); 5734 return -EINVAL; 5735 } 5736 5737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5738 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5739 } 5740 5741 int 5742 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5743 struct rte_dev_eeprom_info *info) 5744 { 5745 struct rte_eth_dev *dev; 5746 5747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5748 dev = &rte_eth_devices[port_id]; 5749 5750 if (info == NULL) { 5751 RTE_ETHDEV_LOG(ERR, 5752 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5753 port_id); 5754 return -EINVAL; 5755 } 5756 5757 if (info->data == NULL) { 5758 RTE_ETHDEV_LOG(ERR, 5759 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5760 port_id); 5761 return -EINVAL; 5762 } 5763 5764 if (info->length == 0) { 5765 RTE_ETHDEV_LOG(ERR, 5766 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5767 port_id); 5768 return -EINVAL; 5769 } 5770 5771 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5772 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5773 } 5774 5775 int 5776 rte_eth_dev_get_dcb_info(uint16_t port_id, 5777 struct rte_eth_dcb_info *dcb_info) 5778 { 5779 struct rte_eth_dev *dev; 5780 5781 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5782 dev = &rte_eth_devices[port_id]; 5783 5784 if (dcb_info == NULL) { 5785 RTE_ETHDEV_LOG(ERR, 5786 "Cannot get ethdev port %u DCB info to NULL\n", 5787 port_id); 5788 return -EINVAL; 5789 } 5790 5791 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5792 5793 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5794 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5795 } 5796 5797 static void 5798 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5799 const struct rte_eth_desc_lim *desc_lim) 5800 { 5801 if (desc_lim->nb_align != 0) 5802 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5803 5804 if (desc_lim->nb_max != 0) 5805 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5806 5807 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5808 } 5809 5810 int 5811 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5812 uint16_t *nb_rx_desc, 5813 uint16_t *nb_tx_desc) 5814 { 5815 struct rte_eth_dev_info dev_info; 5816 int ret; 5817 5818 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5819 5820 ret = rte_eth_dev_info_get(port_id, &dev_info); 5821 if (ret != 0) 5822 return ret; 5823 5824 if (nb_rx_desc != NULL) 5825 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5826 5827 if (nb_tx_desc != NULL) 5828 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5829 5830 return 0; 5831 } 5832 5833 int 5834 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5835 struct rte_eth_hairpin_cap *cap) 5836 { 5837 struct rte_eth_dev *dev; 5838 5839 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5840 dev = &rte_eth_devices[port_id]; 5841 5842 if (cap == NULL) { 5843 RTE_ETHDEV_LOG(ERR, 5844 "Cannot get ethdev port %u hairpin capability to NULL\n", 5845 port_id); 5846 return -EINVAL; 5847 } 5848 5849 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5850 memset(cap, 0, sizeof(*cap)); 5851 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5852 } 5853 5854 int 5855 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5856 { 5857 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5858 return 1; 5859 return 0; 5860 } 5861 5862 int 5863 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5864 { 5865 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5866 return 1; 5867 return 0; 5868 } 5869 5870 int 5871 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5872 { 5873 struct rte_eth_dev *dev; 5874 5875 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5876 dev = &rte_eth_devices[port_id]; 5877 5878 if (pool == NULL) { 5879 RTE_ETHDEV_LOG(ERR, 5880 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5881 port_id); 5882 return -EINVAL; 5883 } 5884 5885 if (*dev->dev_ops->pool_ops_supported == NULL) 5886 return 1; /* all pools are supported */ 5887 5888 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5889 } 5890 5891 /** 5892 * A set of values to describe the possible states of a switch domain. 5893 */ 5894 enum rte_eth_switch_domain_state { 5895 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5896 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5897 }; 5898 5899 /** 5900 * Array of switch domains available for allocation. Array is sized to 5901 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5902 * ethdev ports in a single process. 5903 */ 5904 static struct rte_eth_dev_switch { 5905 enum rte_eth_switch_domain_state state; 5906 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5907 5908 int 5909 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5910 { 5911 uint16_t i; 5912 5913 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5914 5915 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5916 if (eth_dev_switch_domains[i].state == 5917 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5918 eth_dev_switch_domains[i].state = 5919 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5920 *domain_id = i; 5921 return 0; 5922 } 5923 } 5924 5925 return -ENOSPC; 5926 } 5927 5928 int 5929 rte_eth_switch_domain_free(uint16_t domain_id) 5930 { 5931 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5932 domain_id >= RTE_MAX_ETHPORTS) 5933 return -EINVAL; 5934 5935 if (eth_dev_switch_domains[domain_id].state != 5936 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5937 return -EINVAL; 5938 5939 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5940 5941 return 0; 5942 } 5943 5944 static int 5945 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5946 { 5947 int state; 5948 struct rte_kvargs_pair *pair; 5949 char *letter; 5950 5951 arglist->str = strdup(str_in); 5952 if (arglist->str == NULL) 5953 return -ENOMEM; 5954 5955 letter = arglist->str; 5956 state = 0; 5957 arglist->count = 0; 5958 pair = &arglist->pairs[0]; 5959 while (1) { 5960 switch (state) { 5961 case 0: /* Initial */ 5962 if (*letter == '=') 5963 return -EINVAL; 5964 else if (*letter == '\0') 5965 return 0; 5966 5967 state = 1; 5968 pair->key = letter; 5969 /* fall-thru */ 5970 5971 case 1: /* Parsing key */ 5972 if (*letter == '=') { 5973 *letter = '\0'; 5974 pair->value = letter + 1; 5975 state = 2; 5976 } else if (*letter == ',' || *letter == '\0') 5977 return -EINVAL; 5978 break; 5979 5980 5981 case 2: /* Parsing value */ 5982 if (*letter == '[') 5983 state = 3; 5984 else if (*letter == ',') { 5985 *letter = '\0'; 5986 arglist->count++; 5987 pair = &arglist->pairs[arglist->count]; 5988 state = 0; 5989 } else if (*letter == '\0') { 5990 letter--; 5991 arglist->count++; 5992 pair = &arglist->pairs[arglist->count]; 5993 state = 0; 5994 } 5995 break; 5996 5997 case 3: /* Parsing list */ 5998 if (*letter == ']') 5999 state = 2; 6000 else if (*letter == '\0') 6001 return -EINVAL; 6002 break; 6003 } 6004 letter++; 6005 } 6006 } 6007 6008 int 6009 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 6010 { 6011 struct rte_kvargs args; 6012 struct rte_kvargs_pair *pair; 6013 unsigned int i; 6014 int result = 0; 6015 6016 memset(eth_da, 0, sizeof(*eth_da)); 6017 6018 result = eth_dev_devargs_tokenise(&args, dargs); 6019 if (result < 0) 6020 goto parse_cleanup; 6021 6022 for (i = 0; i < args.count; i++) { 6023 pair = &args.pairs[i]; 6024 if (strcmp("representor", pair->key) == 0) { 6025 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 6026 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 6027 dargs); 6028 result = -1; 6029 goto parse_cleanup; 6030 } 6031 result = rte_eth_devargs_parse_representor_ports( 6032 pair->value, eth_da); 6033 if (result < 0) 6034 goto parse_cleanup; 6035 } 6036 } 6037 6038 parse_cleanup: 6039 if (args.str) 6040 free(args.str); 6041 6042 return result; 6043 } 6044 6045 int 6046 rte_eth_representor_id_get(uint16_t port_id, 6047 enum rte_eth_representor_type type, 6048 int controller, int pf, int representor_port, 6049 uint16_t *repr_id) 6050 { 6051 int ret, n, count; 6052 uint32_t i; 6053 struct rte_eth_representor_info *info = NULL; 6054 size_t size; 6055 6056 if (type == RTE_ETH_REPRESENTOR_NONE) 6057 return 0; 6058 if (repr_id == NULL) 6059 return -EINVAL; 6060 6061 /* Get PMD representor range info. */ 6062 ret = rte_eth_representor_info_get(port_id, NULL); 6063 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6064 controller == -1 && pf == -1) { 6065 /* Direct mapping for legacy VF representor. */ 6066 *repr_id = representor_port; 6067 return 0; 6068 } else if (ret < 0) { 6069 return ret; 6070 } 6071 n = ret; 6072 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6073 info = calloc(1, size); 6074 if (info == NULL) 6075 return -ENOMEM; 6076 info->nb_ranges_alloc = n; 6077 ret = rte_eth_representor_info_get(port_id, info); 6078 if (ret < 0) 6079 goto out; 6080 6081 /* Default controller and pf to caller. */ 6082 if (controller == -1) 6083 controller = info->controller; 6084 if (pf == -1) 6085 pf = info->pf; 6086 6087 /* Locate representor ID. */ 6088 ret = -ENOENT; 6089 for (i = 0; i < info->nb_ranges; ++i) { 6090 if (info->ranges[i].type != type) 6091 continue; 6092 if (info->ranges[i].controller != controller) 6093 continue; 6094 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6095 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6096 port_id, info->ranges[i].id_base, 6097 info->ranges[i].id_end, i); 6098 continue; 6099 6100 } 6101 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6102 switch (info->ranges[i].type) { 6103 case RTE_ETH_REPRESENTOR_PF: 6104 if (pf < info->ranges[i].pf || 6105 pf >= info->ranges[i].pf + count) 6106 continue; 6107 *repr_id = info->ranges[i].id_base + 6108 (pf - info->ranges[i].pf); 6109 ret = 0; 6110 goto out; 6111 case RTE_ETH_REPRESENTOR_VF: 6112 if (info->ranges[i].pf != pf) 6113 continue; 6114 if (representor_port < info->ranges[i].vf || 6115 representor_port >= info->ranges[i].vf + count) 6116 continue; 6117 *repr_id = info->ranges[i].id_base + 6118 (representor_port - info->ranges[i].vf); 6119 ret = 0; 6120 goto out; 6121 case RTE_ETH_REPRESENTOR_SF: 6122 if (info->ranges[i].pf != pf) 6123 continue; 6124 if (representor_port < info->ranges[i].sf || 6125 representor_port >= info->ranges[i].sf + count) 6126 continue; 6127 *repr_id = info->ranges[i].id_base + 6128 (representor_port - info->ranges[i].sf); 6129 ret = 0; 6130 goto out; 6131 default: 6132 break; 6133 } 6134 } 6135 out: 6136 free(info); 6137 return ret; 6138 } 6139 6140 static int 6141 eth_dev_handle_port_list(const char *cmd __rte_unused, 6142 const char *params __rte_unused, 6143 struct rte_tel_data *d) 6144 { 6145 int port_id; 6146 6147 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6148 RTE_ETH_FOREACH_DEV(port_id) 6149 rte_tel_data_add_array_int(d, port_id); 6150 return 0; 6151 } 6152 6153 static void 6154 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6155 const char *stat_name) 6156 { 6157 int q; 6158 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6159 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6160 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6161 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6162 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6163 } 6164 6165 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6166 6167 static int 6168 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6169 const char *params, 6170 struct rte_tel_data *d) 6171 { 6172 struct rte_eth_stats stats; 6173 int port_id, ret; 6174 6175 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6176 return -1; 6177 6178 port_id = atoi(params); 6179 if (!rte_eth_dev_is_valid_port(port_id)) 6180 return -1; 6181 6182 ret = rte_eth_stats_get(port_id, &stats); 6183 if (ret < 0) 6184 return -1; 6185 6186 rte_tel_data_start_dict(d); 6187 ADD_DICT_STAT(stats, ipackets); 6188 ADD_DICT_STAT(stats, opackets); 6189 ADD_DICT_STAT(stats, ibytes); 6190 ADD_DICT_STAT(stats, obytes); 6191 ADD_DICT_STAT(stats, imissed); 6192 ADD_DICT_STAT(stats, ierrors); 6193 ADD_DICT_STAT(stats, oerrors); 6194 ADD_DICT_STAT(stats, rx_nombuf); 6195 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6196 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6197 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6198 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6199 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6200 6201 return 0; 6202 } 6203 6204 static int 6205 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6206 const char *params, 6207 struct rte_tel_data *d) 6208 { 6209 struct rte_eth_xstat *eth_xstats; 6210 struct rte_eth_xstat_name *xstat_names; 6211 int port_id, num_xstats; 6212 int i, ret; 6213 char *end_param; 6214 6215 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6216 return -1; 6217 6218 port_id = strtoul(params, &end_param, 0); 6219 if (*end_param != '\0') 6220 RTE_ETHDEV_LOG(NOTICE, 6221 "Extra parameters passed to ethdev telemetry command, ignoring"); 6222 if (!rte_eth_dev_is_valid_port(port_id)) 6223 return -1; 6224 6225 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6226 if (num_xstats < 0) 6227 return -1; 6228 6229 /* use one malloc for both names and stats */ 6230 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6231 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6232 if (eth_xstats == NULL) 6233 return -1; 6234 xstat_names = (void *)ð_xstats[num_xstats]; 6235 6236 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6237 if (ret < 0 || ret > num_xstats) { 6238 free(eth_xstats); 6239 return -1; 6240 } 6241 6242 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6243 if (ret < 0 || ret > num_xstats) { 6244 free(eth_xstats); 6245 return -1; 6246 } 6247 6248 rte_tel_data_start_dict(d); 6249 for (i = 0; i < num_xstats; i++) 6250 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6251 eth_xstats[i].value); 6252 return 0; 6253 } 6254 6255 static int 6256 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6257 const char *params, 6258 struct rte_tel_data *d) 6259 { 6260 static const char *status_str = "status"; 6261 int ret, port_id; 6262 struct rte_eth_link link; 6263 char *end_param; 6264 6265 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6266 return -1; 6267 6268 port_id = strtoul(params, &end_param, 0); 6269 if (*end_param != '\0') 6270 RTE_ETHDEV_LOG(NOTICE, 6271 "Extra parameters passed to ethdev telemetry command, ignoring"); 6272 if (!rte_eth_dev_is_valid_port(port_id)) 6273 return -1; 6274 6275 ret = rte_eth_link_get_nowait(port_id, &link); 6276 if (ret < 0) 6277 return -1; 6278 6279 rte_tel_data_start_dict(d); 6280 if (!link.link_status) { 6281 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6282 return 0; 6283 } 6284 rte_tel_data_add_dict_string(d, status_str, "UP"); 6285 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6286 rte_tel_data_add_dict_string(d, "duplex", 6287 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6288 "full-duplex" : "half-duplex"); 6289 return 0; 6290 } 6291 6292 static int 6293 eth_dev_handle_port_info(const char *cmd __rte_unused, 6294 const char *params, 6295 struct rte_tel_data *d) 6296 { 6297 struct rte_tel_data *rxq_state, *txq_state; 6298 char mac_addr[RTE_ETHER_ADDR_LEN]; 6299 struct rte_eth_dev *eth_dev; 6300 char *end_param; 6301 int port_id, i; 6302 6303 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6304 return -1; 6305 6306 port_id = strtoul(params, &end_param, 0); 6307 if (*end_param != '\0') 6308 RTE_ETHDEV_LOG(NOTICE, 6309 "Extra parameters passed to ethdev telemetry command, ignoring"); 6310 6311 if (!rte_eth_dev_is_valid_port(port_id)) 6312 return -EINVAL; 6313 6314 eth_dev = &rte_eth_devices[port_id]; 6315 if (!eth_dev) 6316 return -EINVAL; 6317 6318 rxq_state = rte_tel_data_alloc(); 6319 if (!rxq_state) 6320 return -ENOMEM; 6321 6322 txq_state = rte_tel_data_alloc(); 6323 if (!txq_state) 6324 return -ENOMEM; 6325 6326 rte_tel_data_start_dict(d); 6327 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6328 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6329 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6330 eth_dev->data->nb_rx_queues); 6331 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6332 eth_dev->data->nb_tx_queues); 6333 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6334 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6335 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6336 eth_dev->data->min_rx_buf_size); 6337 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6338 eth_dev->data->rx_mbuf_alloc_failed); 6339 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6340 eth_dev->data->mac_addrs->addr_bytes[0], 6341 eth_dev->data->mac_addrs->addr_bytes[1], 6342 eth_dev->data->mac_addrs->addr_bytes[2], 6343 eth_dev->data->mac_addrs->addr_bytes[3], 6344 eth_dev->data->mac_addrs->addr_bytes[4], 6345 eth_dev->data->mac_addrs->addr_bytes[5]); 6346 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6347 rte_tel_data_add_dict_int(d, "promiscuous", 6348 eth_dev->data->promiscuous); 6349 rte_tel_data_add_dict_int(d, "scattered_rx", 6350 eth_dev->data->scattered_rx); 6351 rte_tel_data_add_dict_int(d, "all_multicast", 6352 eth_dev->data->all_multicast); 6353 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6354 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6355 rte_tel_data_add_dict_int(d, "dev_configured", 6356 eth_dev->data->dev_configured); 6357 6358 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6359 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6360 rte_tel_data_add_array_int(rxq_state, 6361 eth_dev->data->rx_queue_state[i]); 6362 6363 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6364 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6365 rte_tel_data_add_array_int(txq_state, 6366 eth_dev->data->tx_queue_state[i]); 6367 6368 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6369 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6370 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6371 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6372 rte_tel_data_add_dict_int(d, "rx_offloads", 6373 eth_dev->data->dev_conf.rxmode.offloads); 6374 rte_tel_data_add_dict_int(d, "tx_offloads", 6375 eth_dev->data->dev_conf.txmode.offloads); 6376 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6377 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6378 6379 return 0; 6380 } 6381 6382 int 6383 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6384 struct rte_hairpin_peer_info *cur_info, 6385 struct rte_hairpin_peer_info *peer_info, 6386 uint32_t direction) 6387 { 6388 struct rte_eth_dev *dev; 6389 6390 /* Current queue information is not mandatory. */ 6391 if (peer_info == NULL) 6392 return -EINVAL; 6393 6394 /* No need to check the validity again. */ 6395 dev = &rte_eth_devices[peer_port]; 6396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6397 -ENOTSUP); 6398 6399 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6400 cur_info, peer_info, direction); 6401 } 6402 6403 int 6404 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6405 struct rte_hairpin_peer_info *peer_info, 6406 uint32_t direction) 6407 { 6408 struct rte_eth_dev *dev; 6409 6410 if (peer_info == NULL) 6411 return -EINVAL; 6412 6413 /* No need to check the validity again. */ 6414 dev = &rte_eth_devices[cur_port]; 6415 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6416 -ENOTSUP); 6417 6418 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6419 peer_info, direction); 6420 } 6421 6422 int 6423 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6424 uint32_t direction) 6425 { 6426 struct rte_eth_dev *dev; 6427 6428 /* No need to check the validity again. */ 6429 dev = &rte_eth_devices[cur_port]; 6430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6431 -ENOTSUP); 6432 6433 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6434 direction); 6435 } 6436 6437 int 6438 rte_eth_representor_info_get(uint16_t port_id, 6439 struct rte_eth_representor_info *info) 6440 { 6441 struct rte_eth_dev *dev; 6442 6443 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6444 dev = &rte_eth_devices[port_id]; 6445 6446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6447 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6448 } 6449 6450 int 6451 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6452 { 6453 struct rte_eth_dev *dev; 6454 6455 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6456 dev = &rte_eth_devices[port_id]; 6457 6458 if (dev->data->dev_configured != 0) { 6459 RTE_ETHDEV_LOG(ERR, 6460 "The port (ID=%"PRIu16") is already configured\n", 6461 port_id); 6462 return -EBUSY; 6463 } 6464 6465 if (features == NULL) { 6466 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6467 return -EINVAL; 6468 } 6469 6470 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6471 return eth_err(port_id, 6472 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6473 } 6474 6475 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6476 6477 RTE_INIT(ethdev_init_telemetry) 6478 { 6479 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6480 "Returns list of available ethdev ports. Takes no parameters"); 6481 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6482 "Returns the common stats for a port. Parameters: int port_id"); 6483 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6484 "Returns the extended stats for a port. Parameters: int port_id"); 6485 rte_telemetry_register_cmd("/ethdev/link_status", 6486 eth_dev_handle_port_link_status, 6487 "Returns the link status for a port. Parameters: int port_id"); 6488 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6489 "Returns the device info for a port. Parameters: int port_id"); 6490 } 6491