1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_kvargs.h> 34 #include <rte_class.h> 35 #include <rte_ether.h> 36 #include <rte_telemetry.h> 37 38 #include "rte_ethdev_trace.h" 39 #include "rte_ethdev.h" 40 #include "ethdev_driver.h" 41 #include "ethdev_profile.h" 42 #include "ethdev_private.h" 43 44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; 45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 46 47 /* public fast-path API */ 48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 49 50 /* spinlock for eth device callbacks */ 51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; 52 53 /* spinlock for add/remove Rx callbacks */ 54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 55 56 /* spinlock for add/remove Tx callbacks */ 57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 58 59 /* spinlock for shared data allocation */ 60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 61 62 /* store statistics names and its offset in stats structure */ 63 struct rte_eth_xstats_name_off { 64 char name[RTE_ETH_XSTATS_NAME_SIZE]; 65 unsigned offset; 66 }; 67 68 /* Shared memory between primary and secondary processes. */ 69 static struct { 70 uint64_t next_owner_id; 71 rte_spinlock_t ownership_lock; 72 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; 73 } *eth_dev_shared_data; 74 75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 76 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 77 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 78 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 79 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 80 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 81 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 82 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 83 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 84 rx_nombuf)}, 85 }; 86 87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 88 89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 90 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 91 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 92 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 93 }; 94 95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 96 97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 98 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 99 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 100 }; 101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 102 103 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 104 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 105 106 static const struct { 107 uint64_t offload; 108 const char *name; 109 } eth_dev_rx_offload_names[] = { 110 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 111 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 114 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 115 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 116 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 117 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 118 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 119 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 120 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 121 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 122 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 123 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 124 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 125 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 127 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 128 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 129 }; 130 131 #undef RTE_RX_OFFLOAD_BIT2STR 132 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 133 134 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 135 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 136 137 static const struct { 138 uint64_t offload; 139 const char *name; 140 } eth_dev_tx_offload_names[] = { 141 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 142 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 143 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 144 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 145 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 146 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 148 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 149 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 150 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 151 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 152 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 153 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 154 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 155 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 156 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 157 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 158 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 159 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 160 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 161 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 162 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 163 }; 164 165 #undef RTE_TX_OFFLOAD_BIT2STR 166 167 static const struct { 168 uint64_t offload; 169 const char *name; 170 } rte_eth_dev_capa_names[] = { 171 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 172 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 173 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 174 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 175 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 176 }; 177 178 /** 179 * The user application callback description. 180 * 181 * It contains callback address to be registered by user application, 182 * the pointer to the parameters for callback, and the event type. 183 */ 184 struct rte_eth_dev_callback { 185 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ 186 rte_eth_dev_cb_fn cb_fn; /**< Callback address */ 187 void *cb_arg; /**< Parameter for callback */ 188 void *ret_param; /**< Return parameter */ 189 enum rte_eth_event_type event; /**< Interrupt event type */ 190 uint32_t active; /**< Callback is executing */ 191 }; 192 193 enum { 194 STAT_QMAP_TX = 0, 195 STAT_QMAP_RX 196 }; 197 198 int 199 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 200 { 201 int ret; 202 struct rte_devargs devargs; 203 const char *bus_param_key; 204 char *bus_str = NULL; 205 char *cls_str = NULL; 206 int str_size; 207 208 if (iter == NULL) { 209 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 210 return -EINVAL; 211 } 212 213 if (devargs_str == NULL) { 214 RTE_ETHDEV_LOG(ERR, 215 "Cannot initialize iterator from NULL device description string\n"); 216 return -EINVAL; 217 } 218 219 memset(iter, 0, sizeof(*iter)); 220 memset(&devargs, 0, sizeof(devargs)); 221 222 /* 223 * The devargs string may use various syntaxes: 224 * - 0000:08:00.0,representor=[1-3] 225 * - pci:0000:06:00.0,representor=[0,5] 226 * - class=eth,mac=00:11:22:33:44:55 227 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 228 */ 229 230 /* 231 * Handle pure class filter (i.e. without any bus-level argument), 232 * from future new syntax. 233 * rte_devargs_parse() is not yet supporting the new syntax, 234 * that's why this simple case is temporarily parsed here. 235 */ 236 #define iter_anybus_str "class=eth," 237 if (strncmp(devargs_str, iter_anybus_str, 238 strlen(iter_anybus_str)) == 0) { 239 iter->cls_str = devargs_str + strlen(iter_anybus_str); 240 goto end; 241 } 242 243 /* Split bus, device and parameters. */ 244 ret = rte_devargs_parse(&devargs, devargs_str); 245 if (ret != 0) 246 goto error; 247 248 /* 249 * Assume parameters of old syntax can match only at ethdev level. 250 * Extra parameters will be ignored, thanks to "+" prefix. 251 */ 252 str_size = strlen(devargs.args) + 2; 253 cls_str = malloc(str_size); 254 if (cls_str == NULL) { 255 ret = -ENOMEM; 256 goto error; 257 } 258 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 259 if (ret != str_size - 1) { 260 ret = -EINVAL; 261 goto error; 262 } 263 iter->cls_str = cls_str; 264 265 iter->bus = devargs.bus; 266 if (iter->bus->dev_iterate == NULL) { 267 ret = -ENOTSUP; 268 goto error; 269 } 270 271 /* Convert bus args to new syntax for use with new API dev_iterate. */ 272 if ((strcmp(iter->bus->name, "vdev") == 0) || 273 (strcmp(iter->bus->name, "fslmc") == 0) || 274 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 275 bus_param_key = "name"; 276 } else if (strcmp(iter->bus->name, "pci") == 0) { 277 bus_param_key = "addr"; 278 } else { 279 ret = -ENOTSUP; 280 goto error; 281 } 282 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 283 bus_str = malloc(str_size); 284 if (bus_str == NULL) { 285 ret = -ENOMEM; 286 goto error; 287 } 288 ret = snprintf(bus_str, str_size, "%s=%s", 289 bus_param_key, devargs.name); 290 if (ret != str_size - 1) { 291 ret = -EINVAL; 292 goto error; 293 } 294 iter->bus_str = bus_str; 295 296 end: 297 iter->cls = rte_class_find_by_name("eth"); 298 rte_devargs_reset(&devargs); 299 return 0; 300 301 error: 302 if (ret == -ENOTSUP) 303 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 304 iter->bus->name); 305 rte_devargs_reset(&devargs); 306 free(bus_str); 307 free(cls_str); 308 return ret; 309 } 310 311 uint16_t 312 rte_eth_iterator_next(struct rte_dev_iterator *iter) 313 { 314 if (iter == NULL) { 315 RTE_ETHDEV_LOG(ERR, 316 "Cannot get next device from NULL iterator\n"); 317 return RTE_MAX_ETHPORTS; 318 } 319 320 if (iter->cls == NULL) /* invalid ethdev iterator */ 321 return RTE_MAX_ETHPORTS; 322 323 do { /* loop to try all matching rte_device */ 324 /* If not pure ethdev filter and */ 325 if (iter->bus != NULL && 326 /* not in middle of rte_eth_dev iteration, */ 327 iter->class_device == NULL) { 328 /* get next rte_device to try. */ 329 iter->device = iter->bus->dev_iterate( 330 iter->device, iter->bus_str, iter); 331 if (iter->device == NULL) 332 break; /* no more rte_device candidate */ 333 } 334 /* A device is matching bus part, need to check ethdev part. */ 335 iter->class_device = iter->cls->dev_iterate( 336 iter->class_device, iter->cls_str, iter); 337 if (iter->class_device != NULL) 338 return eth_dev_to_id(iter->class_device); /* match */ 339 } while (iter->bus != NULL); /* need to try next rte_device */ 340 341 /* No more ethdev port to iterate. */ 342 rte_eth_iterator_cleanup(iter); 343 return RTE_MAX_ETHPORTS; 344 } 345 346 void 347 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 348 { 349 if (iter == NULL) { 350 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 351 return; 352 } 353 354 if (iter->bus_str == NULL) 355 return; /* nothing to free in pure class filter */ 356 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 357 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 358 memset(iter, 0, sizeof(*iter)); 359 } 360 361 uint16_t 362 rte_eth_find_next(uint16_t port_id) 363 { 364 while (port_id < RTE_MAX_ETHPORTS && 365 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 366 port_id++; 367 368 if (port_id >= RTE_MAX_ETHPORTS) 369 return RTE_MAX_ETHPORTS; 370 371 return port_id; 372 } 373 374 /* 375 * Macro to iterate over all valid ports for internal usage. 376 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 377 */ 378 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 379 for (port_id = rte_eth_find_next(0); \ 380 port_id < RTE_MAX_ETHPORTS; \ 381 port_id = rte_eth_find_next(port_id + 1)) 382 383 uint16_t 384 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 385 { 386 port_id = rte_eth_find_next(port_id); 387 while (port_id < RTE_MAX_ETHPORTS && 388 rte_eth_devices[port_id].device != parent) 389 port_id = rte_eth_find_next(port_id + 1); 390 391 return port_id; 392 } 393 394 uint16_t 395 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 396 { 397 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 398 return rte_eth_find_next_of(port_id, 399 rte_eth_devices[ref_port_id].device); 400 } 401 402 static void 403 eth_dev_shared_data_prepare(void) 404 { 405 const unsigned flags = 0; 406 const struct rte_memzone *mz; 407 408 rte_spinlock_lock(ð_dev_shared_data_lock); 409 410 if (eth_dev_shared_data == NULL) { 411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 412 /* Allocate port data and ownership shared memory. */ 413 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, 414 sizeof(*eth_dev_shared_data), 415 rte_socket_id(), flags); 416 } else 417 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); 418 if (mz == NULL) 419 rte_panic("Cannot allocate ethdev shared data\n"); 420 421 eth_dev_shared_data = mz->addr; 422 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 423 eth_dev_shared_data->next_owner_id = 424 RTE_ETH_DEV_NO_OWNER + 1; 425 rte_spinlock_init(ð_dev_shared_data->ownership_lock); 426 memset(eth_dev_shared_data->data, 0, 427 sizeof(eth_dev_shared_data->data)); 428 } 429 } 430 431 rte_spinlock_unlock(ð_dev_shared_data_lock); 432 } 433 434 static bool 435 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 436 { 437 return ethdev->data->name[0] != '\0'; 438 } 439 440 static struct rte_eth_dev * 441 eth_dev_allocated(const char *name) 442 { 443 uint16_t i; 444 445 RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); 446 447 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 448 if (rte_eth_devices[i].data != NULL && 449 strcmp(rte_eth_devices[i].data->name, name) == 0) 450 return &rte_eth_devices[i]; 451 } 452 return NULL; 453 } 454 455 struct rte_eth_dev * 456 rte_eth_dev_allocated(const char *name) 457 { 458 struct rte_eth_dev *ethdev; 459 460 eth_dev_shared_data_prepare(); 461 462 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 463 464 ethdev = eth_dev_allocated(name); 465 466 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 467 468 return ethdev; 469 } 470 471 static uint16_t 472 eth_dev_find_free_port(void) 473 { 474 uint16_t i; 475 476 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 477 /* Using shared name field to find a free port. */ 478 if (eth_dev_shared_data->data[i].name[0] == '\0') { 479 RTE_ASSERT(rte_eth_devices[i].state == 480 RTE_ETH_DEV_UNUSED); 481 return i; 482 } 483 } 484 return RTE_MAX_ETHPORTS; 485 } 486 487 static struct rte_eth_dev * 488 eth_dev_get(uint16_t port_id) 489 { 490 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; 491 492 eth_dev->data = ð_dev_shared_data->data[port_id]; 493 494 return eth_dev; 495 } 496 497 struct rte_eth_dev * 498 rte_eth_dev_allocate(const char *name) 499 { 500 uint16_t port_id; 501 struct rte_eth_dev *eth_dev = NULL; 502 size_t name_len; 503 504 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); 505 if (name_len == 0) { 506 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); 507 return NULL; 508 } 509 510 if (name_len >= RTE_ETH_NAME_MAX_LEN) { 511 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); 512 return NULL; 513 } 514 515 eth_dev_shared_data_prepare(); 516 517 /* Synchronize port creation between primary and secondary threads. */ 518 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 519 520 if (eth_dev_allocated(name) != NULL) { 521 RTE_ETHDEV_LOG(ERR, 522 "Ethernet device with name %s already allocated\n", 523 name); 524 goto unlock; 525 } 526 527 port_id = eth_dev_find_free_port(); 528 if (port_id == RTE_MAX_ETHPORTS) { 529 RTE_ETHDEV_LOG(ERR, 530 "Reached maximum number of Ethernet ports\n"); 531 goto unlock; 532 } 533 534 eth_dev = eth_dev_get(port_id); 535 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); 536 eth_dev->data->port_id = port_id; 537 eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; 538 eth_dev->data->mtu = RTE_ETHER_MTU; 539 pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); 540 541 unlock: 542 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 543 544 return eth_dev; 545 } 546 547 /* 548 * Attach to a port already registered by the primary process, which 549 * makes sure that the same device would have the same port ID both 550 * in the primary and secondary process. 551 */ 552 struct rte_eth_dev * 553 rte_eth_dev_attach_secondary(const char *name) 554 { 555 uint16_t i; 556 struct rte_eth_dev *eth_dev = NULL; 557 558 eth_dev_shared_data_prepare(); 559 560 /* Synchronize port attachment to primary port creation and release. */ 561 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 562 563 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 564 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) 565 break; 566 } 567 if (i == RTE_MAX_ETHPORTS) { 568 RTE_ETHDEV_LOG(ERR, 569 "Device %s is not driven by the primary process\n", 570 name); 571 } else { 572 eth_dev = eth_dev_get(i); 573 RTE_ASSERT(eth_dev->data->port_id == i); 574 } 575 576 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 577 return eth_dev; 578 } 579 580 int 581 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) 582 { 583 if (eth_dev == NULL) 584 return -EINVAL; 585 586 eth_dev_shared_data_prepare(); 587 588 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 589 rte_eth_dev_callback_process(eth_dev, 590 RTE_ETH_EVENT_DESTROY, NULL); 591 592 eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); 593 594 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 595 596 eth_dev->state = RTE_ETH_DEV_UNUSED; 597 eth_dev->device = NULL; 598 eth_dev->process_private = NULL; 599 eth_dev->intr_handle = NULL; 600 eth_dev->rx_pkt_burst = NULL; 601 eth_dev->tx_pkt_burst = NULL; 602 eth_dev->tx_pkt_prepare = NULL; 603 eth_dev->rx_queue_count = NULL; 604 eth_dev->rx_descriptor_status = NULL; 605 eth_dev->tx_descriptor_status = NULL; 606 eth_dev->dev_ops = NULL; 607 608 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 609 rte_free(eth_dev->data->rx_queues); 610 rte_free(eth_dev->data->tx_queues); 611 rte_free(eth_dev->data->mac_addrs); 612 rte_free(eth_dev->data->hash_mac_addrs); 613 rte_free(eth_dev->data->dev_private); 614 pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); 615 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); 616 } 617 618 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 619 620 return 0; 621 } 622 623 int 624 rte_eth_dev_is_valid_port(uint16_t port_id) 625 { 626 if (port_id >= RTE_MAX_ETHPORTS || 627 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 628 return 0; 629 else 630 return 1; 631 } 632 633 static int 634 eth_is_valid_owner_id(uint64_t owner_id) 635 { 636 if (owner_id == RTE_ETH_DEV_NO_OWNER || 637 eth_dev_shared_data->next_owner_id <= owner_id) 638 return 0; 639 return 1; 640 } 641 642 uint64_t 643 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 644 { 645 port_id = rte_eth_find_next(port_id); 646 while (port_id < RTE_MAX_ETHPORTS && 647 rte_eth_devices[port_id].data->owner.id != owner_id) 648 port_id = rte_eth_find_next(port_id + 1); 649 650 return port_id; 651 } 652 653 int 654 rte_eth_dev_owner_new(uint64_t *owner_id) 655 { 656 if (owner_id == NULL) { 657 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 658 return -EINVAL; 659 } 660 661 eth_dev_shared_data_prepare(); 662 663 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 664 665 *owner_id = eth_dev_shared_data->next_owner_id++; 666 667 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 668 return 0; 669 } 670 671 static int 672 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 673 const struct rte_eth_dev_owner *new_owner) 674 { 675 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 676 struct rte_eth_dev_owner *port_owner; 677 678 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 679 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 680 port_id); 681 return -ENODEV; 682 } 683 684 if (new_owner == NULL) { 685 RTE_ETHDEV_LOG(ERR, 686 "Cannot set ethdev port %u owner from NULL owner\n", 687 port_id); 688 return -EINVAL; 689 } 690 691 if (!eth_is_valid_owner_id(new_owner->id) && 692 !eth_is_valid_owner_id(old_owner_id)) { 693 RTE_ETHDEV_LOG(ERR, 694 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 695 old_owner_id, new_owner->id); 696 return -EINVAL; 697 } 698 699 port_owner = &rte_eth_devices[port_id].data->owner; 700 if (port_owner->id != old_owner_id) { 701 RTE_ETHDEV_LOG(ERR, 702 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 703 port_id, port_owner->name, port_owner->id); 704 return -EPERM; 705 } 706 707 /* can not truncate (same structure) */ 708 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 709 710 port_owner->id = new_owner->id; 711 712 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 713 port_id, new_owner->name, new_owner->id); 714 715 return 0; 716 } 717 718 int 719 rte_eth_dev_owner_set(const uint16_t port_id, 720 const struct rte_eth_dev_owner *owner) 721 { 722 int ret; 723 724 eth_dev_shared_data_prepare(); 725 726 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 727 728 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 729 730 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 731 return ret; 732 } 733 734 int 735 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 736 { 737 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 738 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 739 int ret; 740 741 eth_dev_shared_data_prepare(); 742 743 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 744 745 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 746 747 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 748 return ret; 749 } 750 751 int 752 rte_eth_dev_owner_delete(const uint64_t owner_id) 753 { 754 uint16_t port_id; 755 int ret = 0; 756 757 eth_dev_shared_data_prepare(); 758 759 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 760 761 if (eth_is_valid_owner_id(owner_id)) { 762 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 763 struct rte_eth_dev_data *data = 764 rte_eth_devices[port_id].data; 765 if (data != NULL && data->owner.id == owner_id) 766 memset(&data->owner, 0, 767 sizeof(struct rte_eth_dev_owner)); 768 } 769 RTE_ETHDEV_LOG(NOTICE, 770 "All port owners owned by %016"PRIx64" identifier have removed\n", 771 owner_id); 772 } else { 773 RTE_ETHDEV_LOG(ERR, 774 "Invalid owner ID=%016"PRIx64"\n", 775 owner_id); 776 ret = -EINVAL; 777 } 778 779 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 780 781 return ret; 782 } 783 784 int 785 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 786 { 787 struct rte_eth_dev *ethdev; 788 789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 790 ethdev = &rte_eth_devices[port_id]; 791 792 if (!eth_dev_is_allocated(ethdev)) { 793 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 794 port_id); 795 return -ENODEV; 796 } 797 798 if (owner == NULL) { 799 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 800 port_id); 801 return -EINVAL; 802 } 803 804 eth_dev_shared_data_prepare(); 805 806 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 807 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 808 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 809 810 return 0; 811 } 812 813 int 814 rte_eth_dev_socket_id(uint16_t port_id) 815 { 816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 817 return rte_eth_devices[port_id].data->numa_node; 818 } 819 820 void * 821 rte_eth_dev_get_sec_ctx(uint16_t port_id) 822 { 823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 824 return rte_eth_devices[port_id].security_ctx; 825 } 826 827 uint16_t 828 rte_eth_dev_count_avail(void) 829 { 830 uint16_t p; 831 uint16_t count; 832 833 count = 0; 834 835 RTE_ETH_FOREACH_DEV(p) 836 count++; 837 838 return count; 839 } 840 841 uint16_t 842 rte_eth_dev_count_total(void) 843 { 844 uint16_t port, count = 0; 845 846 RTE_ETH_FOREACH_VALID_DEV(port) 847 count++; 848 849 return count; 850 } 851 852 int 853 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 854 { 855 char *tmp; 856 857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 858 859 if (name == NULL) { 860 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 861 port_id); 862 return -EINVAL; 863 } 864 865 /* shouldn't check 'rte_eth_devices[i].data', 866 * because it might be overwritten by VDEV PMD */ 867 tmp = eth_dev_shared_data->data[port_id].name; 868 strcpy(name, tmp); 869 return 0; 870 } 871 872 int 873 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 874 { 875 uint16_t pid; 876 877 if (name == NULL) { 878 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 879 return -EINVAL; 880 } 881 882 if (port_id == NULL) { 883 RTE_ETHDEV_LOG(ERR, 884 "Cannot get port ID to NULL for %s\n", name); 885 return -EINVAL; 886 } 887 888 RTE_ETH_FOREACH_VALID_DEV(pid) 889 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 890 *port_id = pid; 891 return 0; 892 } 893 894 return -ENODEV; 895 } 896 897 struct rte_eth_dev * 898 rte_eth_dev_get_by_name(const char *name) 899 { 900 uint16_t pid; 901 902 if (rte_eth_dev_get_port_by_name(name, &pid)) 903 return NULL; 904 905 return &rte_eth_devices[pid]; 906 } 907 908 static int 909 eth_err(uint16_t port_id, int ret) 910 { 911 if (ret == 0) 912 return 0; 913 if (rte_eth_dev_is_removed(port_id)) 914 return -EIO; 915 return ret; 916 } 917 918 static void 919 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) 920 { 921 void **rxq = dev->data->rx_queues; 922 923 if (rxq[qid] == NULL) 924 return; 925 926 if (dev->dev_ops->rx_queue_release != NULL) 927 (*dev->dev_ops->rx_queue_release)(dev, qid); 928 rxq[qid] = NULL; 929 } 930 931 static void 932 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) 933 { 934 void **txq = dev->data->tx_queues; 935 936 if (txq[qid] == NULL) 937 return; 938 939 if (dev->dev_ops->tx_queue_release != NULL) 940 (*dev->dev_ops->tx_queue_release)(dev, qid); 941 txq[qid] = NULL; 942 } 943 944 static int 945 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 946 { 947 uint16_t old_nb_queues = dev->data->nb_rx_queues; 948 unsigned i; 949 950 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ 951 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", 952 sizeof(dev->data->rx_queues[0]) * 953 RTE_MAX_QUEUES_PER_PORT, 954 RTE_CACHE_LINE_SIZE); 955 if (dev->data->rx_queues == NULL) { 956 dev->data->nb_rx_queues = 0; 957 return -(ENOMEM); 958 } 959 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ 960 for (i = nb_queues; i < old_nb_queues; i++) 961 eth_dev_rxq_release(dev, i); 962 963 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { 964 for (i = nb_queues; i < old_nb_queues; i++) 965 eth_dev_rxq_release(dev, i); 966 967 rte_free(dev->data->rx_queues); 968 dev->data->rx_queues = NULL; 969 } 970 dev->data->nb_rx_queues = nb_queues; 971 return 0; 972 } 973 974 static int 975 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 976 { 977 uint16_t port_id; 978 979 if (rx_queue_id >= dev->data->nb_rx_queues) { 980 port_id = dev->data->port_id; 981 RTE_ETHDEV_LOG(ERR, 982 "Invalid Rx queue_id=%u of device with port_id=%u\n", 983 rx_queue_id, port_id); 984 return -EINVAL; 985 } 986 987 if (dev->data->rx_queues[rx_queue_id] == NULL) { 988 port_id = dev->data->port_id; 989 RTE_ETHDEV_LOG(ERR, 990 "Queue %u of device with port_id=%u has not been setup\n", 991 rx_queue_id, port_id); 992 return -EINVAL; 993 } 994 995 return 0; 996 } 997 998 static int 999 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 1000 { 1001 uint16_t port_id; 1002 1003 if (tx_queue_id >= dev->data->nb_tx_queues) { 1004 port_id = dev->data->port_id; 1005 RTE_ETHDEV_LOG(ERR, 1006 "Invalid Tx queue_id=%u of device with port_id=%u\n", 1007 tx_queue_id, port_id); 1008 return -EINVAL; 1009 } 1010 1011 if (dev->data->tx_queues[tx_queue_id] == NULL) { 1012 port_id = dev->data->port_id; 1013 RTE_ETHDEV_LOG(ERR, 1014 "Queue %u of device with port_id=%u has not been setup\n", 1015 tx_queue_id, port_id); 1016 return -EINVAL; 1017 } 1018 1019 return 0; 1020 } 1021 1022 int 1023 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 1024 { 1025 struct rte_eth_dev *dev; 1026 int ret; 1027 1028 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1029 dev = &rte_eth_devices[port_id]; 1030 1031 if (!dev->data->dev_started) { 1032 RTE_ETHDEV_LOG(ERR, 1033 "Port %u must be started before start any queue\n", 1034 port_id); 1035 return -EINVAL; 1036 } 1037 1038 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1039 if (ret != 0) 1040 return ret; 1041 1042 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 1043 1044 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1045 RTE_ETHDEV_LOG(INFO, 1046 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1047 rx_queue_id, port_id); 1048 return -EINVAL; 1049 } 1050 1051 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1052 RTE_ETHDEV_LOG(INFO, 1053 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1054 rx_queue_id, port_id); 1055 return 0; 1056 } 1057 1058 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 1059 } 1060 1061 int 1062 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 1063 { 1064 struct rte_eth_dev *dev; 1065 int ret; 1066 1067 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1068 dev = &rte_eth_devices[port_id]; 1069 1070 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 1071 if (ret != 0) 1072 return ret; 1073 1074 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 1075 1076 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 1077 RTE_ETHDEV_LOG(INFO, 1078 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1079 rx_queue_id, port_id); 1080 return -EINVAL; 1081 } 1082 1083 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1084 RTE_ETHDEV_LOG(INFO, 1085 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1086 rx_queue_id, port_id); 1087 return 0; 1088 } 1089 1090 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 1091 } 1092 1093 int 1094 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 1095 { 1096 struct rte_eth_dev *dev; 1097 int ret; 1098 1099 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1100 dev = &rte_eth_devices[port_id]; 1101 1102 if (!dev->data->dev_started) { 1103 RTE_ETHDEV_LOG(ERR, 1104 "Port %u must be started before start any queue\n", 1105 port_id); 1106 return -EINVAL; 1107 } 1108 1109 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1110 if (ret != 0) 1111 return ret; 1112 1113 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 1114 1115 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1116 RTE_ETHDEV_LOG(INFO, 1117 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1118 tx_queue_id, port_id); 1119 return -EINVAL; 1120 } 1121 1122 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 1123 RTE_ETHDEV_LOG(INFO, 1124 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 1125 tx_queue_id, port_id); 1126 return 0; 1127 } 1128 1129 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 1130 } 1131 1132 int 1133 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 1134 { 1135 struct rte_eth_dev *dev; 1136 int ret; 1137 1138 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1139 dev = &rte_eth_devices[port_id]; 1140 1141 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 1142 if (ret != 0) 1143 return ret; 1144 1145 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 1146 1147 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 1148 RTE_ETHDEV_LOG(INFO, 1149 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 1150 tx_queue_id, port_id); 1151 return -EINVAL; 1152 } 1153 1154 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 1155 RTE_ETHDEV_LOG(INFO, 1156 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 1157 tx_queue_id, port_id); 1158 return 0; 1159 } 1160 1161 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 1162 } 1163 1164 static int 1165 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) 1166 { 1167 uint16_t old_nb_queues = dev->data->nb_tx_queues; 1168 unsigned i; 1169 1170 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ 1171 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", 1172 sizeof(dev->data->tx_queues[0]) * 1173 RTE_MAX_QUEUES_PER_PORT, 1174 RTE_CACHE_LINE_SIZE); 1175 if (dev->data->tx_queues == NULL) { 1176 dev->data->nb_tx_queues = 0; 1177 return -(ENOMEM); 1178 } 1179 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ 1180 for (i = nb_queues; i < old_nb_queues; i++) 1181 eth_dev_txq_release(dev, i); 1182 1183 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { 1184 for (i = nb_queues; i < old_nb_queues; i++) 1185 eth_dev_txq_release(dev, i); 1186 1187 rte_free(dev->data->tx_queues); 1188 dev->data->tx_queues = NULL; 1189 } 1190 dev->data->nb_tx_queues = nb_queues; 1191 return 0; 1192 } 1193 1194 uint32_t 1195 rte_eth_speed_bitflag(uint32_t speed, int duplex) 1196 { 1197 switch (speed) { 1198 case RTE_ETH_SPEED_NUM_10M: 1199 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1200 case RTE_ETH_SPEED_NUM_100M: 1201 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1202 case RTE_ETH_SPEED_NUM_1G: 1203 return RTE_ETH_LINK_SPEED_1G; 1204 case RTE_ETH_SPEED_NUM_2_5G: 1205 return RTE_ETH_LINK_SPEED_2_5G; 1206 case RTE_ETH_SPEED_NUM_5G: 1207 return RTE_ETH_LINK_SPEED_5G; 1208 case RTE_ETH_SPEED_NUM_10G: 1209 return RTE_ETH_LINK_SPEED_10G; 1210 case RTE_ETH_SPEED_NUM_20G: 1211 return RTE_ETH_LINK_SPEED_20G; 1212 case RTE_ETH_SPEED_NUM_25G: 1213 return RTE_ETH_LINK_SPEED_25G; 1214 case RTE_ETH_SPEED_NUM_40G: 1215 return RTE_ETH_LINK_SPEED_40G; 1216 case RTE_ETH_SPEED_NUM_50G: 1217 return RTE_ETH_LINK_SPEED_50G; 1218 case RTE_ETH_SPEED_NUM_56G: 1219 return RTE_ETH_LINK_SPEED_56G; 1220 case RTE_ETH_SPEED_NUM_100G: 1221 return RTE_ETH_LINK_SPEED_100G; 1222 case RTE_ETH_SPEED_NUM_200G: 1223 return RTE_ETH_LINK_SPEED_200G; 1224 default: 1225 return 0; 1226 } 1227 } 1228 1229 const char * 1230 rte_eth_dev_rx_offload_name(uint64_t offload) 1231 { 1232 const char *name = "UNKNOWN"; 1233 unsigned int i; 1234 1235 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1236 if (offload == eth_dev_rx_offload_names[i].offload) { 1237 name = eth_dev_rx_offload_names[i].name; 1238 break; 1239 } 1240 } 1241 1242 return name; 1243 } 1244 1245 const char * 1246 rte_eth_dev_tx_offload_name(uint64_t offload) 1247 { 1248 const char *name = "UNKNOWN"; 1249 unsigned int i; 1250 1251 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1252 if (offload == eth_dev_tx_offload_names[i].offload) { 1253 name = eth_dev_tx_offload_names[i].name; 1254 break; 1255 } 1256 } 1257 1258 return name; 1259 } 1260 1261 const char * 1262 rte_eth_dev_capability_name(uint64_t capability) 1263 { 1264 const char *name = "UNKNOWN"; 1265 unsigned int i; 1266 1267 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1268 if (capability == rte_eth_dev_capa_names[i].offload) { 1269 name = rte_eth_dev_capa_names[i].name; 1270 break; 1271 } 1272 } 1273 1274 return name; 1275 } 1276 1277 static inline int 1278 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1279 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1280 { 1281 int ret = 0; 1282 1283 if (dev_info_size == 0) { 1284 if (config_size != max_rx_pkt_len) { 1285 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1286 " %u != %u is not allowed\n", 1287 port_id, config_size, max_rx_pkt_len); 1288 ret = -EINVAL; 1289 } 1290 } else if (config_size > dev_info_size) { 1291 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1292 "> max allowed value %u\n", port_id, config_size, 1293 dev_info_size); 1294 ret = -EINVAL; 1295 } else if (config_size < RTE_ETHER_MIN_LEN) { 1296 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1297 "< min allowed value %u\n", port_id, config_size, 1298 (unsigned int)RTE_ETHER_MIN_LEN); 1299 ret = -EINVAL; 1300 } 1301 return ret; 1302 } 1303 1304 /* 1305 * Validate offloads that are requested through rte_eth_dev_configure against 1306 * the offloads successfully set by the Ethernet device. 1307 * 1308 * @param port_id 1309 * The port identifier of the Ethernet device. 1310 * @param req_offloads 1311 * The offloads that have been requested through `rte_eth_dev_configure`. 1312 * @param set_offloads 1313 * The offloads successfully set by the Ethernet device. 1314 * @param offload_type 1315 * The offload type i.e. Rx/Tx string. 1316 * @param offload_name 1317 * The function that prints the offload name. 1318 * @return 1319 * - (0) if validation successful. 1320 * - (-EINVAL) if requested offload has been silently disabled. 1321 * 1322 */ 1323 static int 1324 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1325 uint64_t set_offloads, const char *offload_type, 1326 const char *(*offload_name)(uint64_t)) 1327 { 1328 uint64_t offloads_diff = req_offloads ^ set_offloads; 1329 uint64_t offload; 1330 int ret = 0; 1331 1332 while (offloads_diff != 0) { 1333 /* Check if any offload is requested but not enabled. */ 1334 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1335 if (offload & req_offloads) { 1336 RTE_ETHDEV_LOG(ERR, 1337 "Port %u failed to enable %s offload %s\n", 1338 port_id, offload_type, offload_name(offload)); 1339 ret = -EINVAL; 1340 } 1341 1342 /* Check if offload couldn't be disabled. */ 1343 if (offload & set_offloads) { 1344 RTE_ETHDEV_LOG(DEBUG, 1345 "Port %u %s offload %s is not requested but enabled\n", 1346 port_id, offload_type, offload_name(offload)); 1347 } 1348 1349 offloads_diff &= ~offload; 1350 } 1351 1352 return ret; 1353 } 1354 1355 static uint32_t 1356 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1357 { 1358 uint32_t overhead_len; 1359 1360 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1361 overhead_len = max_rx_pktlen - max_mtu; 1362 else 1363 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1364 1365 return overhead_len; 1366 } 1367 1368 /* rte_eth_dev_info_get() should be called prior to this function */ 1369 static int 1370 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1371 uint16_t mtu) 1372 { 1373 uint32_t overhead_len; 1374 uint32_t frame_size; 1375 1376 if (mtu < dev_info->min_mtu) { 1377 RTE_ETHDEV_LOG(ERR, 1378 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1379 mtu, dev_info->min_mtu, port_id); 1380 return -EINVAL; 1381 } 1382 if (mtu > dev_info->max_mtu) { 1383 RTE_ETHDEV_LOG(ERR, 1384 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1385 mtu, dev_info->max_mtu, port_id); 1386 return -EINVAL; 1387 } 1388 1389 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1390 dev_info->max_mtu); 1391 frame_size = mtu + overhead_len; 1392 if (frame_size < RTE_ETHER_MIN_LEN) { 1393 RTE_ETHDEV_LOG(ERR, 1394 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1395 frame_size, RTE_ETHER_MIN_LEN, port_id); 1396 return -EINVAL; 1397 } 1398 1399 if (frame_size > dev_info->max_rx_pktlen) { 1400 RTE_ETHDEV_LOG(ERR, 1401 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1402 frame_size, dev_info->max_rx_pktlen, port_id); 1403 return -EINVAL; 1404 } 1405 1406 return 0; 1407 } 1408 1409 int 1410 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1411 const struct rte_eth_conf *dev_conf) 1412 { 1413 struct rte_eth_dev *dev; 1414 struct rte_eth_dev_info dev_info; 1415 struct rte_eth_conf orig_conf; 1416 int diag; 1417 int ret; 1418 uint16_t old_mtu; 1419 1420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1421 dev = &rte_eth_devices[port_id]; 1422 1423 if (dev_conf == NULL) { 1424 RTE_ETHDEV_LOG(ERR, 1425 "Cannot configure ethdev port %u from NULL config\n", 1426 port_id); 1427 return -EINVAL; 1428 } 1429 1430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1431 1432 if (dev->data->dev_started) { 1433 RTE_ETHDEV_LOG(ERR, 1434 "Port %u must be stopped to allow configuration\n", 1435 port_id); 1436 return -EBUSY; 1437 } 1438 1439 /* 1440 * Ensure that "dev_configured" is always 0 each time prepare to do 1441 * dev_configure() to avoid any non-anticipated behaviour. 1442 * And set to 1 when dev_configure() is executed successfully. 1443 */ 1444 dev->data->dev_configured = 0; 1445 1446 /* Store original config, as rollback required on failure */ 1447 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1448 1449 /* 1450 * Copy the dev_conf parameter into the dev structure. 1451 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1452 */ 1453 if (dev_conf != &dev->data->dev_conf) 1454 memcpy(&dev->data->dev_conf, dev_conf, 1455 sizeof(dev->data->dev_conf)); 1456 1457 /* Backup mtu for rollback */ 1458 old_mtu = dev->data->mtu; 1459 1460 ret = rte_eth_dev_info_get(port_id, &dev_info); 1461 if (ret != 0) 1462 goto rollback; 1463 1464 /* If number of queues specified by application for both Rx and Tx is 1465 * zero, use driver preferred values. This cannot be done individually 1466 * as it is valid for either Tx or Rx (but not both) to be zero. 1467 * If driver does not provide any preferred valued, fall back on 1468 * EAL defaults. 1469 */ 1470 if (nb_rx_q == 0 && nb_tx_q == 0) { 1471 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1472 if (nb_rx_q == 0) 1473 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1474 nb_tx_q = dev_info.default_txportconf.nb_queues; 1475 if (nb_tx_q == 0) 1476 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1477 } 1478 1479 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1480 RTE_ETHDEV_LOG(ERR, 1481 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1482 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1483 ret = -EINVAL; 1484 goto rollback; 1485 } 1486 1487 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1488 RTE_ETHDEV_LOG(ERR, 1489 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1490 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1491 ret = -EINVAL; 1492 goto rollback; 1493 } 1494 1495 /* 1496 * Check that the numbers of Rx and Tx queues are not greater 1497 * than the maximum number of Rx and Tx queues supported by the 1498 * configured device. 1499 */ 1500 if (nb_rx_q > dev_info.max_rx_queues) { 1501 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1502 port_id, nb_rx_q, dev_info.max_rx_queues); 1503 ret = -EINVAL; 1504 goto rollback; 1505 } 1506 1507 if (nb_tx_q > dev_info.max_tx_queues) { 1508 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1509 port_id, nb_tx_q, dev_info.max_tx_queues); 1510 ret = -EINVAL; 1511 goto rollback; 1512 } 1513 1514 /* Check that the device supports requested interrupts */ 1515 if ((dev_conf->intr_conf.lsc == 1) && 1516 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1517 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1518 dev->device->driver->name); 1519 ret = -EINVAL; 1520 goto rollback; 1521 } 1522 if ((dev_conf->intr_conf.rmv == 1) && 1523 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1524 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1525 dev->device->driver->name); 1526 ret = -EINVAL; 1527 goto rollback; 1528 } 1529 1530 if (dev_conf->rxmode.mtu == 0) 1531 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1532 1533 ret = eth_dev_validate_mtu(port_id, &dev_info, 1534 dev->data->dev_conf.rxmode.mtu); 1535 if (ret != 0) 1536 goto rollback; 1537 1538 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1539 1540 /* 1541 * If LRO is enabled, check that the maximum aggregated packet 1542 * size is supported by the configured device. 1543 */ 1544 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1545 uint32_t max_rx_pktlen; 1546 uint32_t overhead_len; 1547 1548 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1549 dev_info.max_mtu); 1550 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1551 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1552 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1553 ret = eth_dev_check_lro_pkt_size(port_id, 1554 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1555 max_rx_pktlen, 1556 dev_info.max_lro_pkt_size); 1557 if (ret != 0) 1558 goto rollback; 1559 } 1560 1561 /* Any requested offloading must be within its device capabilities */ 1562 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1563 dev_conf->rxmode.offloads) { 1564 RTE_ETHDEV_LOG(ERR, 1565 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1566 "capabilities 0x%"PRIx64" in %s()\n", 1567 port_id, dev_conf->rxmode.offloads, 1568 dev_info.rx_offload_capa, 1569 __func__); 1570 ret = -EINVAL; 1571 goto rollback; 1572 } 1573 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1574 dev_conf->txmode.offloads) { 1575 RTE_ETHDEV_LOG(ERR, 1576 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1577 "capabilities 0x%"PRIx64" in %s()\n", 1578 port_id, dev_conf->txmode.offloads, 1579 dev_info.tx_offload_capa, 1580 __func__); 1581 ret = -EINVAL; 1582 goto rollback; 1583 } 1584 1585 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1586 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1587 1588 /* Check that device supports requested rss hash functions. */ 1589 if ((dev_info.flow_type_rss_offloads | 1590 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1591 dev_info.flow_type_rss_offloads) { 1592 RTE_ETHDEV_LOG(ERR, 1593 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1594 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1595 dev_info.flow_type_rss_offloads); 1596 ret = -EINVAL; 1597 goto rollback; 1598 } 1599 1600 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1601 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1602 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1603 RTE_ETHDEV_LOG(ERR, 1604 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1605 port_id, 1606 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1607 ret = -EINVAL; 1608 goto rollback; 1609 } 1610 1611 /* 1612 * Setup new number of Rx/Tx queues and reconfigure device. 1613 */ 1614 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1615 if (diag != 0) { 1616 RTE_ETHDEV_LOG(ERR, 1617 "Port%u eth_dev_rx_queue_config = %d\n", 1618 port_id, diag); 1619 ret = diag; 1620 goto rollback; 1621 } 1622 1623 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1624 if (diag != 0) { 1625 RTE_ETHDEV_LOG(ERR, 1626 "Port%u eth_dev_tx_queue_config = %d\n", 1627 port_id, diag); 1628 eth_dev_rx_queue_config(dev, 0); 1629 ret = diag; 1630 goto rollback; 1631 } 1632 1633 diag = (*dev->dev_ops->dev_configure)(dev); 1634 if (diag != 0) { 1635 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1636 port_id, diag); 1637 ret = eth_err(port_id, diag); 1638 goto reset_queues; 1639 } 1640 1641 /* Initialize Rx profiling if enabled at compilation time. */ 1642 diag = __rte_eth_dev_profile_init(port_id, dev); 1643 if (diag != 0) { 1644 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1645 port_id, diag); 1646 ret = eth_err(port_id, diag); 1647 goto reset_queues; 1648 } 1649 1650 /* Validate Rx offloads. */ 1651 diag = eth_dev_validate_offloads(port_id, 1652 dev_conf->rxmode.offloads, 1653 dev->data->dev_conf.rxmode.offloads, "Rx", 1654 rte_eth_dev_rx_offload_name); 1655 if (diag != 0) { 1656 ret = diag; 1657 goto reset_queues; 1658 } 1659 1660 /* Validate Tx offloads. */ 1661 diag = eth_dev_validate_offloads(port_id, 1662 dev_conf->txmode.offloads, 1663 dev->data->dev_conf.txmode.offloads, "Tx", 1664 rte_eth_dev_tx_offload_name); 1665 if (diag != 0) { 1666 ret = diag; 1667 goto reset_queues; 1668 } 1669 1670 dev->data->dev_configured = 1; 1671 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1672 return 0; 1673 reset_queues: 1674 eth_dev_rx_queue_config(dev, 0); 1675 eth_dev_tx_queue_config(dev, 0); 1676 rollback: 1677 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1678 if (old_mtu != dev->data->mtu) 1679 dev->data->mtu = old_mtu; 1680 1681 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1682 return ret; 1683 } 1684 1685 void 1686 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) 1687 { 1688 if (dev->data->dev_started) { 1689 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", 1690 dev->data->port_id); 1691 return; 1692 } 1693 1694 eth_dev_rx_queue_config(dev, 0); 1695 eth_dev_tx_queue_config(dev, 0); 1696 1697 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); 1698 } 1699 1700 static void 1701 eth_dev_mac_restore(struct rte_eth_dev *dev, 1702 struct rte_eth_dev_info *dev_info) 1703 { 1704 struct rte_ether_addr *addr; 1705 uint16_t i; 1706 uint32_t pool = 0; 1707 uint64_t pool_mask; 1708 1709 /* replay MAC address configuration including default MAC */ 1710 addr = &dev->data->mac_addrs[0]; 1711 if (*dev->dev_ops->mac_addr_set != NULL) 1712 (*dev->dev_ops->mac_addr_set)(dev, addr); 1713 else if (*dev->dev_ops->mac_addr_add != NULL) 1714 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1715 1716 if (*dev->dev_ops->mac_addr_add != NULL) { 1717 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1718 addr = &dev->data->mac_addrs[i]; 1719 1720 /* skip zero address */ 1721 if (rte_is_zero_ether_addr(addr)) 1722 continue; 1723 1724 pool = 0; 1725 pool_mask = dev->data->mac_pool_sel[i]; 1726 1727 do { 1728 if (pool_mask & UINT64_C(1)) 1729 (*dev->dev_ops->mac_addr_add)(dev, 1730 addr, i, pool); 1731 pool_mask >>= 1; 1732 pool++; 1733 } while (pool_mask); 1734 } 1735 } 1736 } 1737 1738 static int 1739 eth_dev_config_restore(struct rte_eth_dev *dev, 1740 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1741 { 1742 int ret; 1743 1744 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1745 eth_dev_mac_restore(dev, dev_info); 1746 1747 /* replay promiscuous configuration */ 1748 /* 1749 * use callbacks directly since we don't need port_id check and 1750 * would like to bypass the same value set 1751 */ 1752 if (rte_eth_promiscuous_get(port_id) == 1 && 1753 *dev->dev_ops->promiscuous_enable != NULL) { 1754 ret = eth_err(port_id, 1755 (*dev->dev_ops->promiscuous_enable)(dev)); 1756 if (ret != 0 && ret != -ENOTSUP) { 1757 RTE_ETHDEV_LOG(ERR, 1758 "Failed to enable promiscuous mode for device (port %u): %s\n", 1759 port_id, rte_strerror(-ret)); 1760 return ret; 1761 } 1762 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1763 *dev->dev_ops->promiscuous_disable != NULL) { 1764 ret = eth_err(port_id, 1765 (*dev->dev_ops->promiscuous_disable)(dev)); 1766 if (ret != 0 && ret != -ENOTSUP) { 1767 RTE_ETHDEV_LOG(ERR, 1768 "Failed to disable promiscuous mode for device (port %u): %s\n", 1769 port_id, rte_strerror(-ret)); 1770 return ret; 1771 } 1772 } 1773 1774 /* replay all multicast configuration */ 1775 /* 1776 * use callbacks directly since we don't need port_id check and 1777 * would like to bypass the same value set 1778 */ 1779 if (rte_eth_allmulticast_get(port_id) == 1 && 1780 *dev->dev_ops->allmulticast_enable != NULL) { 1781 ret = eth_err(port_id, 1782 (*dev->dev_ops->allmulticast_enable)(dev)); 1783 if (ret != 0 && ret != -ENOTSUP) { 1784 RTE_ETHDEV_LOG(ERR, 1785 "Failed to enable allmulticast mode for device (port %u): %s\n", 1786 port_id, rte_strerror(-ret)); 1787 return ret; 1788 } 1789 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1790 *dev->dev_ops->allmulticast_disable != NULL) { 1791 ret = eth_err(port_id, 1792 (*dev->dev_ops->allmulticast_disable)(dev)); 1793 if (ret != 0 && ret != -ENOTSUP) { 1794 RTE_ETHDEV_LOG(ERR, 1795 "Failed to disable allmulticast mode for device (port %u): %s\n", 1796 port_id, rte_strerror(-ret)); 1797 return ret; 1798 } 1799 } 1800 1801 return 0; 1802 } 1803 1804 int 1805 rte_eth_dev_start(uint16_t port_id) 1806 { 1807 struct rte_eth_dev *dev; 1808 struct rte_eth_dev_info dev_info; 1809 int diag; 1810 int ret, ret_stop; 1811 1812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1813 dev = &rte_eth_devices[port_id]; 1814 1815 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1816 1817 if (dev->data->dev_configured == 0) { 1818 RTE_ETHDEV_LOG(INFO, 1819 "Device with port_id=%"PRIu16" is not configured.\n", 1820 port_id); 1821 return -EINVAL; 1822 } 1823 1824 if (dev->data->dev_started != 0) { 1825 RTE_ETHDEV_LOG(INFO, 1826 "Device with port_id=%"PRIu16" already started\n", 1827 port_id); 1828 return 0; 1829 } 1830 1831 ret = rte_eth_dev_info_get(port_id, &dev_info); 1832 if (ret != 0) 1833 return ret; 1834 1835 /* Lets restore MAC now if device does not support live change */ 1836 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1837 eth_dev_mac_restore(dev, &dev_info); 1838 1839 diag = (*dev->dev_ops->dev_start)(dev); 1840 if (diag == 0) 1841 dev->data->dev_started = 1; 1842 else 1843 return eth_err(port_id, diag); 1844 1845 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1846 if (ret != 0) { 1847 RTE_ETHDEV_LOG(ERR, 1848 "Error during restoring configuration for device (port %u): %s\n", 1849 port_id, rte_strerror(-ret)); 1850 ret_stop = rte_eth_dev_stop(port_id); 1851 if (ret_stop != 0) { 1852 RTE_ETHDEV_LOG(ERR, 1853 "Failed to stop device (port %u): %s\n", 1854 port_id, rte_strerror(-ret_stop)); 1855 } 1856 1857 return ret; 1858 } 1859 1860 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1861 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1862 (*dev->dev_ops->link_update)(dev, 0); 1863 } 1864 1865 /* expose selection of PMD fast-path functions */ 1866 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1867 1868 rte_ethdev_trace_start(port_id); 1869 return 0; 1870 } 1871 1872 int 1873 rte_eth_dev_stop(uint16_t port_id) 1874 { 1875 struct rte_eth_dev *dev; 1876 int ret; 1877 1878 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1879 dev = &rte_eth_devices[port_id]; 1880 1881 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1882 1883 if (dev->data->dev_started == 0) { 1884 RTE_ETHDEV_LOG(INFO, 1885 "Device with port_id=%"PRIu16" already stopped\n", 1886 port_id); 1887 return 0; 1888 } 1889 1890 /* point fast-path functions to dummy ones */ 1891 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1892 1893 dev->data->dev_started = 0; 1894 ret = (*dev->dev_ops->dev_stop)(dev); 1895 rte_ethdev_trace_stop(port_id, ret); 1896 1897 return ret; 1898 } 1899 1900 int 1901 rte_eth_dev_set_link_up(uint16_t port_id) 1902 { 1903 struct rte_eth_dev *dev; 1904 1905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1906 dev = &rte_eth_devices[port_id]; 1907 1908 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1909 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1910 } 1911 1912 int 1913 rte_eth_dev_set_link_down(uint16_t port_id) 1914 { 1915 struct rte_eth_dev *dev; 1916 1917 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1918 dev = &rte_eth_devices[port_id]; 1919 1920 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1921 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1922 } 1923 1924 int 1925 rte_eth_dev_close(uint16_t port_id) 1926 { 1927 struct rte_eth_dev *dev; 1928 int firsterr, binerr; 1929 int *lasterr = &firsterr; 1930 1931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1932 dev = &rte_eth_devices[port_id]; 1933 1934 if (dev->data->dev_started) { 1935 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1936 port_id); 1937 return -EINVAL; 1938 } 1939 1940 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1941 *lasterr = (*dev->dev_ops->dev_close)(dev); 1942 if (*lasterr != 0) 1943 lasterr = &binerr; 1944 1945 rte_ethdev_trace_close(port_id); 1946 *lasterr = rte_eth_dev_release_port(dev); 1947 1948 return firsterr; 1949 } 1950 1951 int 1952 rte_eth_dev_reset(uint16_t port_id) 1953 { 1954 struct rte_eth_dev *dev; 1955 int ret; 1956 1957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1958 dev = &rte_eth_devices[port_id]; 1959 1960 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1961 1962 ret = rte_eth_dev_stop(port_id); 1963 if (ret != 0) { 1964 RTE_ETHDEV_LOG(ERR, 1965 "Failed to stop device (port %u) before reset: %s - ignore\n", 1966 port_id, rte_strerror(-ret)); 1967 } 1968 ret = dev->dev_ops->dev_reset(dev); 1969 1970 return eth_err(port_id, ret); 1971 } 1972 1973 int 1974 rte_eth_dev_is_removed(uint16_t port_id) 1975 { 1976 struct rte_eth_dev *dev; 1977 int ret; 1978 1979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1980 dev = &rte_eth_devices[port_id]; 1981 1982 if (dev->state == RTE_ETH_DEV_REMOVED) 1983 return 1; 1984 1985 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1986 1987 ret = dev->dev_ops->is_removed(dev); 1988 if (ret != 0) 1989 /* Device is physically removed. */ 1990 dev->state = RTE_ETH_DEV_REMOVED; 1991 1992 return ret; 1993 } 1994 1995 static int 1996 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1997 uint16_t n_seg, uint32_t *mbp_buf_size, 1998 const struct rte_eth_dev_info *dev_info) 1999 { 2000 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 2001 struct rte_mempool *mp_first; 2002 uint32_t offset_mask; 2003 uint16_t seg_idx; 2004 2005 if (n_seg > seg_capa->max_nseg) { 2006 RTE_ETHDEV_LOG(ERR, 2007 "Requested Rx segments %u exceed supported %u\n", 2008 n_seg, seg_capa->max_nseg); 2009 return -EINVAL; 2010 } 2011 /* 2012 * Check the sizes and offsets against buffer sizes 2013 * for each segment specified in extended configuration. 2014 */ 2015 mp_first = rx_seg[0].mp; 2016 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2017 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2018 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2019 uint32_t length = rx_seg[seg_idx].length; 2020 uint32_t offset = rx_seg[seg_idx].offset; 2021 2022 if (mpl == NULL) { 2023 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 2024 return -EINVAL; 2025 } 2026 if (seg_idx != 0 && mp_first != mpl && 2027 seg_capa->multi_pools == 0) { 2028 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 2029 return -ENOTSUP; 2030 } 2031 if (offset != 0) { 2032 if (seg_capa->offset_allowed == 0) { 2033 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 2034 return -ENOTSUP; 2035 } 2036 if (offset & offset_mask) { 2037 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2038 offset, 2039 seg_capa->offset_align_log2); 2040 return -EINVAL; 2041 } 2042 } 2043 if (mpl->private_data_size < 2044 sizeof(struct rte_pktmbuf_pool_private)) { 2045 RTE_ETHDEV_LOG(ERR, 2046 "%s private_data_size %u < %u\n", 2047 mpl->name, mpl->private_data_size, 2048 (unsigned int)sizeof 2049 (struct rte_pktmbuf_pool_private)); 2050 return -ENOSPC; 2051 } 2052 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2053 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2054 length = length != 0 ? length : *mbp_buf_size; 2055 if (*mbp_buf_size < length + offset) { 2056 RTE_ETHDEV_LOG(ERR, 2057 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 2058 mpl->name, *mbp_buf_size, 2059 length + offset, length, offset); 2060 return -EINVAL; 2061 } 2062 } 2063 return 0; 2064 } 2065 2066 int 2067 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2068 uint16_t nb_rx_desc, unsigned int socket_id, 2069 const struct rte_eth_rxconf *rx_conf, 2070 struct rte_mempool *mp) 2071 { 2072 int ret; 2073 uint32_t mbp_buf_size; 2074 struct rte_eth_dev *dev; 2075 struct rte_eth_dev_info dev_info; 2076 struct rte_eth_rxconf local_conf; 2077 2078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2079 dev = &rte_eth_devices[port_id]; 2080 2081 if (rx_queue_id >= dev->data->nb_rx_queues) { 2082 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2083 return -EINVAL; 2084 } 2085 2086 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 2087 2088 ret = rte_eth_dev_info_get(port_id, &dev_info); 2089 if (ret != 0) 2090 return ret; 2091 2092 if (mp != NULL) { 2093 /* Single pool configuration check. */ 2094 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 2095 RTE_ETHDEV_LOG(ERR, 2096 "Ambiguous segment configuration\n"); 2097 return -EINVAL; 2098 } 2099 /* 2100 * Check the size of the mbuf data buffer, this value 2101 * must be provided in the private data of the memory pool. 2102 * First check that the memory pool(s) has a valid private data. 2103 */ 2104 if (mp->private_data_size < 2105 sizeof(struct rte_pktmbuf_pool_private)) { 2106 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 2107 mp->name, mp->private_data_size, 2108 (unsigned int) 2109 sizeof(struct rte_pktmbuf_pool_private)); 2110 return -ENOSPC; 2111 } 2112 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2113 if (mbp_buf_size < dev_info.min_rx_bufsize + 2114 RTE_PKTMBUF_HEADROOM) { 2115 RTE_ETHDEV_LOG(ERR, 2116 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 2117 mp->name, mbp_buf_size, 2118 RTE_PKTMBUF_HEADROOM + 2119 dev_info.min_rx_bufsize, 2120 RTE_PKTMBUF_HEADROOM, 2121 dev_info.min_rx_bufsize); 2122 return -EINVAL; 2123 } 2124 } else { 2125 const struct rte_eth_rxseg_split *rx_seg; 2126 uint16_t n_seg; 2127 2128 /* Extended multi-segment configuration check. */ 2129 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 2130 RTE_ETHDEV_LOG(ERR, 2131 "Memory pool is null and no extended configuration provided\n"); 2132 return -EINVAL; 2133 } 2134 2135 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2136 n_seg = rx_conf->rx_nseg; 2137 2138 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2139 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 2140 &mbp_buf_size, 2141 &dev_info); 2142 if (ret != 0) 2143 return ret; 2144 } else { 2145 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2146 return -EINVAL; 2147 } 2148 } 2149 2150 /* Use default specified by driver, if nb_rx_desc is zero */ 2151 if (nb_rx_desc == 0) { 2152 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2153 /* If driver default is also zero, fall back on EAL default */ 2154 if (nb_rx_desc == 0) 2155 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2156 } 2157 2158 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2159 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2160 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2161 2162 RTE_ETHDEV_LOG(ERR, 2163 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2164 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2165 dev_info.rx_desc_lim.nb_min, 2166 dev_info.rx_desc_lim.nb_align); 2167 return -EINVAL; 2168 } 2169 2170 if (dev->data->dev_started && 2171 !(dev_info.dev_capa & 2172 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2173 return -EBUSY; 2174 2175 if (dev->data->dev_started && 2176 (dev->data->rx_queue_state[rx_queue_id] != 2177 RTE_ETH_QUEUE_STATE_STOPPED)) 2178 return -EBUSY; 2179 2180 eth_dev_rxq_release(dev, rx_queue_id); 2181 2182 if (rx_conf == NULL) 2183 rx_conf = &dev_info.default_rxconf; 2184 2185 local_conf = *rx_conf; 2186 2187 /* 2188 * If an offloading has already been enabled in 2189 * rte_eth_dev_configure(), it has been enabled on all queues, 2190 * so there is no need to enable it in this queue again. 2191 * The local_conf.offloads input to underlying PMD only carries 2192 * those offloadings which are only enabled on this queue and 2193 * not enabled on all queues. 2194 */ 2195 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2196 2197 /* 2198 * New added offloadings for this queue are those not enabled in 2199 * rte_eth_dev_configure() and they must be per-queue type. 2200 * A pure per-port offloading can't be enabled on a queue while 2201 * disabled on another queue. A pure per-port offloading can't 2202 * be enabled for any queue as new added one if it hasn't been 2203 * enabled in rte_eth_dev_configure(). 2204 */ 2205 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2206 local_conf.offloads) { 2207 RTE_ETHDEV_LOG(ERR, 2208 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2209 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2210 port_id, rx_queue_id, local_conf.offloads, 2211 dev_info.rx_queue_offload_capa, 2212 __func__); 2213 return -EINVAL; 2214 } 2215 2216 if (local_conf.share_group > 0 && 2217 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2218 RTE_ETHDEV_LOG(ERR, 2219 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2220 port_id, rx_queue_id, local_conf.share_group); 2221 return -EINVAL; 2222 } 2223 2224 /* 2225 * If LRO is enabled, check that the maximum aggregated packet 2226 * size is supported by the configured device. 2227 */ 2228 /* Get the real Ethernet overhead length */ 2229 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2230 uint32_t overhead_len; 2231 uint32_t max_rx_pktlen; 2232 int ret; 2233 2234 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2235 dev_info.max_mtu); 2236 max_rx_pktlen = dev->data->mtu + overhead_len; 2237 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2238 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2239 ret = eth_dev_check_lro_pkt_size(port_id, 2240 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2241 max_rx_pktlen, 2242 dev_info.max_lro_pkt_size); 2243 if (ret != 0) 2244 return ret; 2245 } 2246 2247 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2248 socket_id, &local_conf, mp); 2249 if (!ret) { 2250 if (!dev->data->min_rx_buf_size || 2251 dev->data->min_rx_buf_size > mbp_buf_size) 2252 dev->data->min_rx_buf_size = mbp_buf_size; 2253 } 2254 2255 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2256 rx_conf, ret); 2257 return eth_err(port_id, ret); 2258 } 2259 2260 int 2261 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2262 uint16_t nb_rx_desc, 2263 const struct rte_eth_hairpin_conf *conf) 2264 { 2265 int ret; 2266 struct rte_eth_dev *dev; 2267 struct rte_eth_hairpin_cap cap; 2268 int i; 2269 int count; 2270 2271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2272 dev = &rte_eth_devices[port_id]; 2273 2274 if (rx_queue_id >= dev->data->nb_rx_queues) { 2275 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2276 return -EINVAL; 2277 } 2278 2279 if (conf == NULL) { 2280 RTE_ETHDEV_LOG(ERR, 2281 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2282 port_id); 2283 return -EINVAL; 2284 } 2285 2286 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2287 if (ret != 0) 2288 return ret; 2289 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 2290 -ENOTSUP); 2291 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2292 if (nb_rx_desc == 0) 2293 nb_rx_desc = cap.max_nb_desc; 2294 if (nb_rx_desc > cap.max_nb_desc) { 2295 RTE_ETHDEV_LOG(ERR, 2296 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2297 nb_rx_desc, cap.max_nb_desc); 2298 return -EINVAL; 2299 } 2300 if (conf->peer_count > cap.max_rx_2_tx) { 2301 RTE_ETHDEV_LOG(ERR, 2302 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2303 conf->peer_count, cap.max_rx_2_tx); 2304 return -EINVAL; 2305 } 2306 if (conf->peer_count == 0) { 2307 RTE_ETHDEV_LOG(ERR, 2308 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2309 conf->peer_count); 2310 return -EINVAL; 2311 } 2312 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2313 cap.max_nb_queues != UINT16_MAX; i++) { 2314 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2315 count++; 2316 } 2317 if (count > cap.max_nb_queues) { 2318 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2319 cap.max_nb_queues); 2320 return -EINVAL; 2321 } 2322 if (dev->data->dev_started) 2323 return -EBUSY; 2324 eth_dev_rxq_release(dev, rx_queue_id); 2325 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2326 nb_rx_desc, conf); 2327 if (ret == 0) 2328 dev->data->rx_queue_state[rx_queue_id] = 2329 RTE_ETH_QUEUE_STATE_HAIRPIN; 2330 return eth_err(port_id, ret); 2331 } 2332 2333 int 2334 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2335 uint16_t nb_tx_desc, unsigned int socket_id, 2336 const struct rte_eth_txconf *tx_conf) 2337 { 2338 struct rte_eth_dev *dev; 2339 struct rte_eth_dev_info dev_info; 2340 struct rte_eth_txconf local_conf; 2341 int ret; 2342 2343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2344 dev = &rte_eth_devices[port_id]; 2345 2346 if (tx_queue_id >= dev->data->nb_tx_queues) { 2347 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2348 return -EINVAL; 2349 } 2350 2351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2352 2353 ret = rte_eth_dev_info_get(port_id, &dev_info); 2354 if (ret != 0) 2355 return ret; 2356 2357 /* Use default specified by driver, if nb_tx_desc is zero */ 2358 if (nb_tx_desc == 0) { 2359 nb_tx_desc = dev_info.default_txportconf.ring_size; 2360 /* If driver default is zero, fall back on EAL default */ 2361 if (nb_tx_desc == 0) 2362 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2363 } 2364 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2365 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2366 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2367 RTE_ETHDEV_LOG(ERR, 2368 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2369 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2370 dev_info.tx_desc_lim.nb_min, 2371 dev_info.tx_desc_lim.nb_align); 2372 return -EINVAL; 2373 } 2374 2375 if (dev->data->dev_started && 2376 !(dev_info.dev_capa & 2377 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2378 return -EBUSY; 2379 2380 if (dev->data->dev_started && 2381 (dev->data->tx_queue_state[tx_queue_id] != 2382 RTE_ETH_QUEUE_STATE_STOPPED)) 2383 return -EBUSY; 2384 2385 eth_dev_txq_release(dev, tx_queue_id); 2386 2387 if (tx_conf == NULL) 2388 tx_conf = &dev_info.default_txconf; 2389 2390 local_conf = *tx_conf; 2391 2392 /* 2393 * If an offloading has already been enabled in 2394 * rte_eth_dev_configure(), it has been enabled on all queues, 2395 * so there is no need to enable it in this queue again. 2396 * The local_conf.offloads input to underlying PMD only carries 2397 * those offloadings which are only enabled on this queue and 2398 * not enabled on all queues. 2399 */ 2400 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2401 2402 /* 2403 * New added offloadings for this queue are those not enabled in 2404 * rte_eth_dev_configure() and they must be per-queue type. 2405 * A pure per-port offloading can't be enabled on a queue while 2406 * disabled on another queue. A pure per-port offloading can't 2407 * be enabled for any queue as new added one if it hasn't been 2408 * enabled in rte_eth_dev_configure(). 2409 */ 2410 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2411 local_conf.offloads) { 2412 RTE_ETHDEV_LOG(ERR, 2413 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2414 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2415 port_id, tx_queue_id, local_conf.offloads, 2416 dev_info.tx_queue_offload_capa, 2417 __func__); 2418 return -EINVAL; 2419 } 2420 2421 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2422 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2423 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2424 } 2425 2426 int 2427 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2428 uint16_t nb_tx_desc, 2429 const struct rte_eth_hairpin_conf *conf) 2430 { 2431 struct rte_eth_dev *dev; 2432 struct rte_eth_hairpin_cap cap; 2433 int i; 2434 int count; 2435 int ret; 2436 2437 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2438 dev = &rte_eth_devices[port_id]; 2439 2440 if (tx_queue_id >= dev->data->nb_tx_queues) { 2441 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2442 return -EINVAL; 2443 } 2444 2445 if (conf == NULL) { 2446 RTE_ETHDEV_LOG(ERR, 2447 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2448 port_id); 2449 return -EINVAL; 2450 } 2451 2452 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2453 if (ret != 0) 2454 return ret; 2455 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2456 -ENOTSUP); 2457 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2458 if (nb_tx_desc == 0) 2459 nb_tx_desc = cap.max_nb_desc; 2460 if (nb_tx_desc > cap.max_nb_desc) { 2461 RTE_ETHDEV_LOG(ERR, 2462 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2463 nb_tx_desc, cap.max_nb_desc); 2464 return -EINVAL; 2465 } 2466 if (conf->peer_count > cap.max_tx_2_rx) { 2467 RTE_ETHDEV_LOG(ERR, 2468 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2469 conf->peer_count, cap.max_tx_2_rx); 2470 return -EINVAL; 2471 } 2472 if (conf->peer_count == 0) { 2473 RTE_ETHDEV_LOG(ERR, 2474 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2475 conf->peer_count); 2476 return -EINVAL; 2477 } 2478 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2479 cap.max_nb_queues != UINT16_MAX; i++) { 2480 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2481 count++; 2482 } 2483 if (count > cap.max_nb_queues) { 2484 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2485 cap.max_nb_queues); 2486 return -EINVAL; 2487 } 2488 if (dev->data->dev_started) 2489 return -EBUSY; 2490 eth_dev_txq_release(dev, tx_queue_id); 2491 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2492 (dev, tx_queue_id, nb_tx_desc, conf); 2493 if (ret == 0) 2494 dev->data->tx_queue_state[tx_queue_id] = 2495 RTE_ETH_QUEUE_STATE_HAIRPIN; 2496 return eth_err(port_id, ret); 2497 } 2498 2499 int 2500 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2501 { 2502 struct rte_eth_dev *dev; 2503 int ret; 2504 2505 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2506 dev = &rte_eth_devices[tx_port]; 2507 2508 if (dev->data->dev_started == 0) { 2509 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2510 return -EBUSY; 2511 } 2512 2513 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2514 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2515 if (ret != 0) 2516 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2517 " to Rx %d (%d - all ports)\n", 2518 tx_port, rx_port, RTE_MAX_ETHPORTS); 2519 2520 return ret; 2521 } 2522 2523 int 2524 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2525 { 2526 struct rte_eth_dev *dev; 2527 int ret; 2528 2529 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2530 dev = &rte_eth_devices[tx_port]; 2531 2532 if (dev->data->dev_started == 0) { 2533 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2534 return -EBUSY; 2535 } 2536 2537 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2538 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2539 if (ret != 0) 2540 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2541 " from Rx %d (%d - all ports)\n", 2542 tx_port, rx_port, RTE_MAX_ETHPORTS); 2543 2544 return ret; 2545 } 2546 2547 int 2548 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2549 size_t len, uint32_t direction) 2550 { 2551 struct rte_eth_dev *dev; 2552 int ret; 2553 2554 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2555 dev = &rte_eth_devices[port_id]; 2556 2557 if (peer_ports == NULL) { 2558 RTE_ETHDEV_LOG(ERR, 2559 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2560 port_id); 2561 return -EINVAL; 2562 } 2563 2564 if (len == 0) { 2565 RTE_ETHDEV_LOG(ERR, 2566 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2567 port_id); 2568 return -EINVAL; 2569 } 2570 2571 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2572 -ENOTSUP); 2573 2574 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2575 len, direction); 2576 if (ret < 0) 2577 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2578 port_id, direction ? "Rx" : "Tx"); 2579 2580 return ret; 2581 } 2582 2583 void 2584 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2585 void *userdata __rte_unused) 2586 { 2587 rte_pktmbuf_free_bulk(pkts, unsent); 2588 } 2589 2590 void 2591 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2592 void *userdata) 2593 { 2594 uint64_t *count = userdata; 2595 2596 rte_pktmbuf_free_bulk(pkts, unsent); 2597 *count += unsent; 2598 } 2599 2600 int 2601 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2602 buffer_tx_error_fn cbfn, void *userdata) 2603 { 2604 if (buffer == NULL) { 2605 RTE_ETHDEV_LOG(ERR, 2606 "Cannot set Tx buffer error callback to NULL buffer\n"); 2607 return -EINVAL; 2608 } 2609 2610 buffer->error_callback = cbfn; 2611 buffer->error_userdata = userdata; 2612 return 0; 2613 } 2614 2615 int 2616 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2617 { 2618 int ret = 0; 2619 2620 if (buffer == NULL) { 2621 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2622 return -EINVAL; 2623 } 2624 2625 buffer->size = size; 2626 if (buffer->error_callback == NULL) { 2627 ret = rte_eth_tx_buffer_set_err_callback( 2628 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2629 } 2630 2631 return ret; 2632 } 2633 2634 int 2635 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2636 { 2637 struct rte_eth_dev *dev; 2638 int ret; 2639 2640 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2641 dev = &rte_eth_devices[port_id]; 2642 2643 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2644 2645 /* Call driver to free pending mbufs. */ 2646 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2647 free_cnt); 2648 return eth_err(port_id, ret); 2649 } 2650 2651 int 2652 rte_eth_promiscuous_enable(uint16_t port_id) 2653 { 2654 struct rte_eth_dev *dev; 2655 int diag = 0; 2656 2657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2658 dev = &rte_eth_devices[port_id]; 2659 2660 if (dev->data->promiscuous == 1) 2661 return 0; 2662 2663 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2664 2665 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2666 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2667 2668 return eth_err(port_id, diag); 2669 } 2670 2671 int 2672 rte_eth_promiscuous_disable(uint16_t port_id) 2673 { 2674 struct rte_eth_dev *dev; 2675 int diag = 0; 2676 2677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2678 dev = &rte_eth_devices[port_id]; 2679 2680 if (dev->data->promiscuous == 0) 2681 return 0; 2682 2683 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2684 2685 dev->data->promiscuous = 0; 2686 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2687 if (diag != 0) 2688 dev->data->promiscuous = 1; 2689 2690 return eth_err(port_id, diag); 2691 } 2692 2693 int 2694 rte_eth_promiscuous_get(uint16_t port_id) 2695 { 2696 struct rte_eth_dev *dev; 2697 2698 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2699 dev = &rte_eth_devices[port_id]; 2700 2701 return dev->data->promiscuous; 2702 } 2703 2704 int 2705 rte_eth_allmulticast_enable(uint16_t port_id) 2706 { 2707 struct rte_eth_dev *dev; 2708 int diag; 2709 2710 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2711 dev = &rte_eth_devices[port_id]; 2712 2713 if (dev->data->all_multicast == 1) 2714 return 0; 2715 2716 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2717 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2718 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2719 2720 return eth_err(port_id, diag); 2721 } 2722 2723 int 2724 rte_eth_allmulticast_disable(uint16_t port_id) 2725 { 2726 struct rte_eth_dev *dev; 2727 int diag; 2728 2729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2730 dev = &rte_eth_devices[port_id]; 2731 2732 if (dev->data->all_multicast == 0) 2733 return 0; 2734 2735 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2736 dev->data->all_multicast = 0; 2737 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2738 if (diag != 0) 2739 dev->data->all_multicast = 1; 2740 2741 return eth_err(port_id, diag); 2742 } 2743 2744 int 2745 rte_eth_allmulticast_get(uint16_t port_id) 2746 { 2747 struct rte_eth_dev *dev; 2748 2749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2750 dev = &rte_eth_devices[port_id]; 2751 2752 return dev->data->all_multicast; 2753 } 2754 2755 int 2756 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2757 { 2758 struct rte_eth_dev *dev; 2759 2760 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2761 dev = &rte_eth_devices[port_id]; 2762 2763 if (eth_link == NULL) { 2764 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2765 port_id); 2766 return -EINVAL; 2767 } 2768 2769 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2770 rte_eth_linkstatus_get(dev, eth_link); 2771 else { 2772 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2773 (*dev->dev_ops->link_update)(dev, 1); 2774 *eth_link = dev->data->dev_link; 2775 } 2776 2777 return 0; 2778 } 2779 2780 int 2781 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2782 { 2783 struct rte_eth_dev *dev; 2784 2785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2786 dev = &rte_eth_devices[port_id]; 2787 2788 if (eth_link == NULL) { 2789 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2790 port_id); 2791 return -EINVAL; 2792 } 2793 2794 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2795 rte_eth_linkstatus_get(dev, eth_link); 2796 else { 2797 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2798 (*dev->dev_ops->link_update)(dev, 0); 2799 *eth_link = dev->data->dev_link; 2800 } 2801 2802 return 0; 2803 } 2804 2805 const char * 2806 rte_eth_link_speed_to_str(uint32_t link_speed) 2807 { 2808 switch (link_speed) { 2809 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2810 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2811 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2812 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2813 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2814 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2815 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2816 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2817 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2818 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2819 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2820 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2821 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2822 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2823 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2824 default: return "Invalid"; 2825 } 2826 } 2827 2828 int 2829 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2830 { 2831 if (str == NULL) { 2832 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2833 return -EINVAL; 2834 } 2835 2836 if (len == 0) { 2837 RTE_ETHDEV_LOG(ERR, 2838 "Cannot convert link to string with zero size\n"); 2839 return -EINVAL; 2840 } 2841 2842 if (eth_link == NULL) { 2843 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2844 return -EINVAL; 2845 } 2846 2847 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2848 return snprintf(str, len, "Link down"); 2849 else 2850 return snprintf(str, len, "Link up at %s %s %s", 2851 rte_eth_link_speed_to_str(eth_link->link_speed), 2852 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2853 "FDX" : "HDX", 2854 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2855 "Autoneg" : "Fixed"); 2856 } 2857 2858 int 2859 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2860 { 2861 struct rte_eth_dev *dev; 2862 2863 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2864 dev = &rte_eth_devices[port_id]; 2865 2866 if (stats == NULL) { 2867 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2868 port_id); 2869 return -EINVAL; 2870 } 2871 2872 memset(stats, 0, sizeof(*stats)); 2873 2874 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2875 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2876 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2877 } 2878 2879 int 2880 rte_eth_stats_reset(uint16_t port_id) 2881 { 2882 struct rte_eth_dev *dev; 2883 int ret; 2884 2885 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2886 dev = &rte_eth_devices[port_id]; 2887 2888 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2889 ret = (*dev->dev_ops->stats_reset)(dev); 2890 if (ret != 0) 2891 return eth_err(port_id, ret); 2892 2893 dev->data->rx_mbuf_alloc_failed = 0; 2894 2895 return 0; 2896 } 2897 2898 static inline int 2899 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2900 { 2901 uint16_t nb_rxqs, nb_txqs; 2902 int count; 2903 2904 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2905 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2906 2907 count = RTE_NB_STATS; 2908 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2909 count += nb_rxqs * RTE_NB_RXQ_STATS; 2910 count += nb_txqs * RTE_NB_TXQ_STATS; 2911 } 2912 2913 return count; 2914 } 2915 2916 static int 2917 eth_dev_get_xstats_count(uint16_t port_id) 2918 { 2919 struct rte_eth_dev *dev; 2920 int count; 2921 2922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2923 dev = &rte_eth_devices[port_id]; 2924 if (dev->dev_ops->xstats_get_names != NULL) { 2925 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2926 if (count < 0) 2927 return eth_err(port_id, count); 2928 } else 2929 count = 0; 2930 2931 2932 count += eth_dev_get_xstats_basic_count(dev); 2933 2934 return count; 2935 } 2936 2937 int 2938 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2939 uint64_t *id) 2940 { 2941 int cnt_xstats, idx_xstat; 2942 2943 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2944 2945 if (xstat_name == NULL) { 2946 RTE_ETHDEV_LOG(ERR, 2947 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2948 port_id); 2949 return -ENOMEM; 2950 } 2951 2952 if (id == NULL) { 2953 RTE_ETHDEV_LOG(ERR, 2954 "Cannot get ethdev port %u xstats ID to NULL\n", 2955 port_id); 2956 return -ENOMEM; 2957 } 2958 2959 /* Get count */ 2960 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2961 if (cnt_xstats < 0) { 2962 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2963 return -ENODEV; 2964 } 2965 2966 /* Get id-name lookup table */ 2967 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2968 2969 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2970 port_id, xstats_names, cnt_xstats, NULL)) { 2971 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2972 return -1; 2973 } 2974 2975 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2976 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2977 *id = idx_xstat; 2978 return 0; 2979 }; 2980 } 2981 2982 return -EINVAL; 2983 } 2984 2985 /* retrieve basic stats names */ 2986 static int 2987 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2988 struct rte_eth_xstat_name *xstats_names) 2989 { 2990 int cnt_used_entries = 0; 2991 uint32_t idx, id_queue; 2992 uint16_t num_q; 2993 2994 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2995 strlcpy(xstats_names[cnt_used_entries].name, 2996 eth_dev_stats_strings[idx].name, 2997 sizeof(xstats_names[0].name)); 2998 cnt_used_entries++; 2999 } 3000 3001 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3002 return cnt_used_entries; 3003 3004 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3005 for (id_queue = 0; id_queue < num_q; id_queue++) { 3006 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3007 snprintf(xstats_names[cnt_used_entries].name, 3008 sizeof(xstats_names[0].name), 3009 "rx_q%u_%s", 3010 id_queue, eth_dev_rxq_stats_strings[idx].name); 3011 cnt_used_entries++; 3012 } 3013 3014 } 3015 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3016 for (id_queue = 0; id_queue < num_q; id_queue++) { 3017 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3018 snprintf(xstats_names[cnt_used_entries].name, 3019 sizeof(xstats_names[0].name), 3020 "tx_q%u_%s", 3021 id_queue, eth_dev_txq_stats_strings[idx].name); 3022 cnt_used_entries++; 3023 } 3024 } 3025 return cnt_used_entries; 3026 } 3027 3028 /* retrieve ethdev extended statistics names */ 3029 int 3030 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3031 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3032 uint64_t *ids) 3033 { 3034 struct rte_eth_xstat_name *xstats_names_copy; 3035 unsigned int no_basic_stat_requested = 1; 3036 unsigned int no_ext_stat_requested = 1; 3037 unsigned int expected_entries; 3038 unsigned int basic_count; 3039 struct rte_eth_dev *dev; 3040 unsigned int i; 3041 int ret; 3042 3043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3044 dev = &rte_eth_devices[port_id]; 3045 3046 basic_count = eth_dev_get_xstats_basic_count(dev); 3047 ret = eth_dev_get_xstats_count(port_id); 3048 if (ret < 0) 3049 return ret; 3050 expected_entries = (unsigned int)ret; 3051 3052 /* Return max number of stats if no ids given */ 3053 if (!ids) { 3054 if (!xstats_names) 3055 return expected_entries; 3056 else if (xstats_names && size < expected_entries) 3057 return expected_entries; 3058 } 3059 3060 if (ids && !xstats_names) 3061 return -EINVAL; 3062 3063 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3064 uint64_t ids_copy[size]; 3065 3066 for (i = 0; i < size; i++) { 3067 if (ids[i] < basic_count) { 3068 no_basic_stat_requested = 0; 3069 break; 3070 } 3071 3072 /* 3073 * Convert ids to xstats ids that PMD knows. 3074 * ids known by user are basic + extended stats. 3075 */ 3076 ids_copy[i] = ids[i] - basic_count; 3077 } 3078 3079 if (no_basic_stat_requested) 3080 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3081 ids_copy, xstats_names, size); 3082 } 3083 3084 /* Retrieve all stats */ 3085 if (!ids) { 3086 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3087 expected_entries); 3088 if (num_stats < 0 || num_stats > (int)expected_entries) 3089 return num_stats; 3090 else 3091 return expected_entries; 3092 } 3093 3094 xstats_names_copy = calloc(expected_entries, 3095 sizeof(struct rte_eth_xstat_name)); 3096 3097 if (!xstats_names_copy) { 3098 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3099 return -ENOMEM; 3100 } 3101 3102 if (ids) { 3103 for (i = 0; i < size; i++) { 3104 if (ids[i] >= basic_count) { 3105 no_ext_stat_requested = 0; 3106 break; 3107 } 3108 } 3109 } 3110 3111 /* Fill xstats_names_copy structure */ 3112 if (ids && no_ext_stat_requested) { 3113 eth_basic_stats_get_names(dev, xstats_names_copy); 3114 } else { 3115 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3116 expected_entries); 3117 if (ret < 0) { 3118 free(xstats_names_copy); 3119 return ret; 3120 } 3121 } 3122 3123 /* Filter stats */ 3124 for (i = 0; i < size; i++) { 3125 if (ids[i] >= expected_entries) { 3126 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3127 free(xstats_names_copy); 3128 return -1; 3129 } 3130 xstats_names[i] = xstats_names_copy[ids[i]]; 3131 } 3132 3133 free(xstats_names_copy); 3134 return size; 3135 } 3136 3137 int 3138 rte_eth_xstats_get_names(uint16_t port_id, 3139 struct rte_eth_xstat_name *xstats_names, 3140 unsigned int size) 3141 { 3142 struct rte_eth_dev *dev; 3143 int cnt_used_entries; 3144 int cnt_expected_entries; 3145 int cnt_driver_entries; 3146 3147 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3148 if (xstats_names == NULL || cnt_expected_entries < 0 || 3149 (int)size < cnt_expected_entries) 3150 return cnt_expected_entries; 3151 3152 /* port_id checked in eth_dev_get_xstats_count() */ 3153 dev = &rte_eth_devices[port_id]; 3154 3155 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3156 3157 if (dev->dev_ops->xstats_get_names != NULL) { 3158 /* If there are any driver-specific xstats, append them 3159 * to end of list. 3160 */ 3161 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3162 dev, 3163 xstats_names + cnt_used_entries, 3164 size - cnt_used_entries); 3165 if (cnt_driver_entries < 0) 3166 return eth_err(port_id, cnt_driver_entries); 3167 cnt_used_entries += cnt_driver_entries; 3168 } 3169 3170 return cnt_used_entries; 3171 } 3172 3173 3174 static int 3175 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3176 { 3177 struct rte_eth_dev *dev; 3178 struct rte_eth_stats eth_stats; 3179 unsigned int count = 0, i, q; 3180 uint64_t val, *stats_ptr; 3181 uint16_t nb_rxqs, nb_txqs; 3182 int ret; 3183 3184 ret = rte_eth_stats_get(port_id, ð_stats); 3185 if (ret < 0) 3186 return ret; 3187 3188 dev = &rte_eth_devices[port_id]; 3189 3190 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3191 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3192 3193 /* global stats */ 3194 for (i = 0; i < RTE_NB_STATS; i++) { 3195 stats_ptr = RTE_PTR_ADD(ð_stats, 3196 eth_dev_stats_strings[i].offset); 3197 val = *stats_ptr; 3198 xstats[count++].value = val; 3199 } 3200 3201 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3202 return count; 3203 3204 /* per-rxq stats */ 3205 for (q = 0; q < nb_rxqs; q++) { 3206 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3207 stats_ptr = RTE_PTR_ADD(ð_stats, 3208 eth_dev_rxq_stats_strings[i].offset + 3209 q * sizeof(uint64_t)); 3210 val = *stats_ptr; 3211 xstats[count++].value = val; 3212 } 3213 } 3214 3215 /* per-txq stats */ 3216 for (q = 0; q < nb_txqs; q++) { 3217 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3218 stats_ptr = RTE_PTR_ADD(ð_stats, 3219 eth_dev_txq_stats_strings[i].offset + 3220 q * sizeof(uint64_t)); 3221 val = *stats_ptr; 3222 xstats[count++].value = val; 3223 } 3224 } 3225 return count; 3226 } 3227 3228 /* retrieve ethdev extended statistics */ 3229 int 3230 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3231 uint64_t *values, unsigned int size) 3232 { 3233 unsigned int no_basic_stat_requested = 1; 3234 unsigned int no_ext_stat_requested = 1; 3235 unsigned int num_xstats_filled; 3236 unsigned int basic_count; 3237 uint16_t expected_entries; 3238 struct rte_eth_dev *dev; 3239 unsigned int i; 3240 int ret; 3241 3242 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3243 dev = &rte_eth_devices[port_id]; 3244 3245 ret = eth_dev_get_xstats_count(port_id); 3246 if (ret < 0) 3247 return ret; 3248 expected_entries = (uint16_t)ret; 3249 struct rte_eth_xstat xstats[expected_entries]; 3250 basic_count = eth_dev_get_xstats_basic_count(dev); 3251 3252 /* Return max number of stats if no ids given */ 3253 if (!ids) { 3254 if (!values) 3255 return expected_entries; 3256 else if (values && size < expected_entries) 3257 return expected_entries; 3258 } 3259 3260 if (ids && !values) 3261 return -EINVAL; 3262 3263 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3264 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3265 uint64_t ids_copy[size]; 3266 3267 for (i = 0; i < size; i++) { 3268 if (ids[i] < basic_count) { 3269 no_basic_stat_requested = 0; 3270 break; 3271 } 3272 3273 /* 3274 * Convert ids to xstats ids that PMD knows. 3275 * ids known by user are basic + extended stats. 3276 */ 3277 ids_copy[i] = ids[i] - basic_count; 3278 } 3279 3280 if (no_basic_stat_requested) 3281 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3282 values, size); 3283 } 3284 3285 if (ids) { 3286 for (i = 0; i < size; i++) { 3287 if (ids[i] >= basic_count) { 3288 no_ext_stat_requested = 0; 3289 break; 3290 } 3291 } 3292 } 3293 3294 /* Fill the xstats structure */ 3295 if (ids && no_ext_stat_requested) 3296 ret = eth_basic_stats_get(port_id, xstats); 3297 else 3298 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3299 3300 if (ret < 0) 3301 return ret; 3302 num_xstats_filled = (unsigned int)ret; 3303 3304 /* Return all stats */ 3305 if (!ids) { 3306 for (i = 0; i < num_xstats_filled; i++) 3307 values[i] = xstats[i].value; 3308 return expected_entries; 3309 } 3310 3311 /* Filter stats */ 3312 for (i = 0; i < size; i++) { 3313 if (ids[i] >= expected_entries) { 3314 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3315 return -1; 3316 } 3317 values[i] = xstats[ids[i]].value; 3318 } 3319 return size; 3320 } 3321 3322 int 3323 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3324 unsigned int n) 3325 { 3326 struct rte_eth_dev *dev; 3327 unsigned int count = 0, i; 3328 signed int xcount = 0; 3329 uint16_t nb_rxqs, nb_txqs; 3330 int ret; 3331 3332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3333 dev = &rte_eth_devices[port_id]; 3334 3335 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3336 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3337 3338 /* Return generic statistics */ 3339 count = RTE_NB_STATS; 3340 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) 3341 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS); 3342 3343 /* implemented by the driver */ 3344 if (dev->dev_ops->xstats_get != NULL) { 3345 /* Retrieve the xstats from the driver at the end of the 3346 * xstats struct. 3347 */ 3348 xcount = (*dev->dev_ops->xstats_get)(dev, 3349 xstats ? xstats + count : NULL, 3350 (n > count) ? n - count : 0); 3351 3352 if (xcount < 0) 3353 return eth_err(port_id, xcount); 3354 } 3355 3356 if (n < count + xcount || xstats == NULL) 3357 return count + xcount; 3358 3359 /* now fill the xstats structure */ 3360 ret = eth_basic_stats_get(port_id, xstats); 3361 if (ret < 0) 3362 return ret; 3363 count = ret; 3364 3365 for (i = 0; i < count; i++) 3366 xstats[i].id = i; 3367 /* add an offset to driver-specific stats */ 3368 for ( ; i < count + xcount; i++) 3369 xstats[i].id += count; 3370 3371 return count + xcount; 3372 } 3373 3374 /* reset ethdev extended statistics */ 3375 int 3376 rte_eth_xstats_reset(uint16_t port_id) 3377 { 3378 struct rte_eth_dev *dev; 3379 3380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3381 dev = &rte_eth_devices[port_id]; 3382 3383 /* implemented by the driver */ 3384 if (dev->dev_ops->xstats_reset != NULL) 3385 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3386 3387 /* fallback to default */ 3388 return rte_eth_stats_reset(port_id); 3389 } 3390 3391 static int 3392 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3393 uint8_t stat_idx, uint8_t is_rx) 3394 { 3395 struct rte_eth_dev *dev; 3396 3397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3398 dev = &rte_eth_devices[port_id]; 3399 3400 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3401 return -EINVAL; 3402 3403 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3404 return -EINVAL; 3405 3406 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3407 return -EINVAL; 3408 3409 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3410 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3411 } 3412 3413 int 3414 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3415 uint8_t stat_idx) 3416 { 3417 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3418 tx_queue_id, 3419 stat_idx, STAT_QMAP_TX)); 3420 } 3421 3422 int 3423 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3424 uint8_t stat_idx) 3425 { 3426 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3427 rx_queue_id, 3428 stat_idx, STAT_QMAP_RX)); 3429 } 3430 3431 int 3432 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3433 { 3434 struct rte_eth_dev *dev; 3435 3436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3437 dev = &rte_eth_devices[port_id]; 3438 3439 if (fw_version == NULL && fw_size > 0) { 3440 RTE_ETHDEV_LOG(ERR, 3441 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3442 port_id); 3443 return -EINVAL; 3444 } 3445 3446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3447 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3448 fw_version, fw_size)); 3449 } 3450 3451 int 3452 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3453 { 3454 struct rte_eth_dev *dev; 3455 const struct rte_eth_desc_lim lim = { 3456 .nb_max = UINT16_MAX, 3457 .nb_min = 0, 3458 .nb_align = 1, 3459 .nb_seg_max = UINT16_MAX, 3460 .nb_mtu_seg_max = UINT16_MAX, 3461 }; 3462 int diag; 3463 3464 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3465 dev = &rte_eth_devices[port_id]; 3466 3467 if (dev_info == NULL) { 3468 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3469 port_id); 3470 return -EINVAL; 3471 } 3472 3473 /* 3474 * Init dev_info before port_id check since caller does not have 3475 * return status and does not know if get is successful or not. 3476 */ 3477 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3478 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3479 3480 dev_info->rx_desc_lim = lim; 3481 dev_info->tx_desc_lim = lim; 3482 dev_info->device = dev->device; 3483 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3484 RTE_ETHER_CRC_LEN; 3485 dev_info->max_mtu = UINT16_MAX; 3486 3487 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3488 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3489 if (diag != 0) { 3490 /* Cleanup already filled in device information */ 3491 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3492 return eth_err(port_id, diag); 3493 } 3494 3495 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3496 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3497 RTE_MAX_QUEUES_PER_PORT); 3498 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3499 RTE_MAX_QUEUES_PER_PORT); 3500 3501 dev_info->driver_name = dev->device->driver->name; 3502 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3503 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3504 3505 dev_info->dev_flags = &dev->data->dev_flags; 3506 3507 return 0; 3508 } 3509 3510 int 3511 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3512 { 3513 struct rte_eth_dev *dev; 3514 3515 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3516 dev = &rte_eth_devices[port_id]; 3517 3518 if (dev_conf == NULL) { 3519 RTE_ETHDEV_LOG(ERR, 3520 "Cannot get ethdev port %u configuration to NULL\n", 3521 port_id); 3522 return -EINVAL; 3523 } 3524 3525 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3526 3527 return 0; 3528 } 3529 3530 int 3531 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3532 uint32_t *ptypes, int num) 3533 { 3534 int i, j; 3535 struct rte_eth_dev *dev; 3536 const uint32_t *all_ptypes; 3537 3538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3539 dev = &rte_eth_devices[port_id]; 3540 3541 if (ptypes == NULL && num > 0) { 3542 RTE_ETHDEV_LOG(ERR, 3543 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3544 port_id); 3545 return -EINVAL; 3546 } 3547 3548 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3549 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3550 3551 if (!all_ptypes) 3552 return 0; 3553 3554 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3555 if (all_ptypes[i] & ptype_mask) { 3556 if (j < num) 3557 ptypes[j] = all_ptypes[i]; 3558 j++; 3559 } 3560 3561 return j; 3562 } 3563 3564 int 3565 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3566 uint32_t *set_ptypes, unsigned int num) 3567 { 3568 const uint32_t valid_ptype_masks[] = { 3569 RTE_PTYPE_L2_MASK, 3570 RTE_PTYPE_L3_MASK, 3571 RTE_PTYPE_L4_MASK, 3572 RTE_PTYPE_TUNNEL_MASK, 3573 RTE_PTYPE_INNER_L2_MASK, 3574 RTE_PTYPE_INNER_L3_MASK, 3575 RTE_PTYPE_INNER_L4_MASK, 3576 }; 3577 const uint32_t *all_ptypes; 3578 struct rte_eth_dev *dev; 3579 uint32_t unused_mask; 3580 unsigned int i, j; 3581 int ret; 3582 3583 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3584 dev = &rte_eth_devices[port_id]; 3585 3586 if (num > 0 && set_ptypes == NULL) { 3587 RTE_ETHDEV_LOG(ERR, 3588 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3589 port_id); 3590 return -EINVAL; 3591 } 3592 3593 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3594 *dev->dev_ops->dev_ptypes_set == NULL) { 3595 ret = 0; 3596 goto ptype_unknown; 3597 } 3598 3599 if (ptype_mask == 0) { 3600 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3601 ptype_mask); 3602 goto ptype_unknown; 3603 } 3604 3605 unused_mask = ptype_mask; 3606 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3607 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3608 if (mask && mask != valid_ptype_masks[i]) { 3609 ret = -EINVAL; 3610 goto ptype_unknown; 3611 } 3612 unused_mask &= ~valid_ptype_masks[i]; 3613 } 3614 3615 if (unused_mask) { 3616 ret = -EINVAL; 3617 goto ptype_unknown; 3618 } 3619 3620 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3621 if (all_ptypes == NULL) { 3622 ret = 0; 3623 goto ptype_unknown; 3624 } 3625 3626 /* 3627 * Accommodate as many set_ptypes as possible. If the supplied 3628 * set_ptypes array is insufficient fill it partially. 3629 */ 3630 for (i = 0, j = 0; set_ptypes != NULL && 3631 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3632 if (ptype_mask & all_ptypes[i]) { 3633 if (j < num - 1) { 3634 set_ptypes[j] = all_ptypes[i]; 3635 j++; 3636 continue; 3637 } 3638 break; 3639 } 3640 } 3641 3642 if (set_ptypes != NULL && j < num) 3643 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3644 3645 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3646 3647 ptype_unknown: 3648 if (num > 0) 3649 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3650 3651 return ret; 3652 } 3653 3654 int 3655 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3656 unsigned int num) 3657 { 3658 int32_t ret; 3659 struct rte_eth_dev *dev; 3660 struct rte_eth_dev_info dev_info; 3661 3662 if (ma == NULL) { 3663 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3664 return -EINVAL; 3665 } 3666 3667 /* will check for us that port_id is a valid one */ 3668 ret = rte_eth_dev_info_get(port_id, &dev_info); 3669 if (ret != 0) 3670 return ret; 3671 3672 dev = &rte_eth_devices[port_id]; 3673 num = RTE_MIN(dev_info.max_mac_addrs, num); 3674 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3675 3676 return num; 3677 } 3678 3679 int 3680 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3681 { 3682 struct rte_eth_dev *dev; 3683 3684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3685 dev = &rte_eth_devices[port_id]; 3686 3687 if (mac_addr == NULL) { 3688 RTE_ETHDEV_LOG(ERR, 3689 "Cannot get ethdev port %u MAC address to NULL\n", 3690 port_id); 3691 return -EINVAL; 3692 } 3693 3694 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3695 3696 return 0; 3697 } 3698 3699 int 3700 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3701 { 3702 struct rte_eth_dev *dev; 3703 3704 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3705 dev = &rte_eth_devices[port_id]; 3706 3707 if (mtu == NULL) { 3708 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3709 port_id); 3710 return -EINVAL; 3711 } 3712 3713 *mtu = dev->data->mtu; 3714 return 0; 3715 } 3716 3717 int 3718 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3719 { 3720 int ret; 3721 struct rte_eth_dev_info dev_info; 3722 struct rte_eth_dev *dev; 3723 3724 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3725 dev = &rte_eth_devices[port_id]; 3726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3727 3728 /* 3729 * Check if the device supports dev_infos_get, if it does not 3730 * skip min_mtu/max_mtu validation here as this requires values 3731 * that are populated within the call to rte_eth_dev_info_get() 3732 * which relies on dev->dev_ops->dev_infos_get. 3733 */ 3734 if (*dev->dev_ops->dev_infos_get != NULL) { 3735 ret = rte_eth_dev_info_get(port_id, &dev_info); 3736 if (ret != 0) 3737 return ret; 3738 3739 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3740 if (ret != 0) 3741 return ret; 3742 } 3743 3744 if (dev->data->dev_configured == 0) { 3745 RTE_ETHDEV_LOG(ERR, 3746 "Port %u must be configured before MTU set\n", 3747 port_id); 3748 return -EINVAL; 3749 } 3750 3751 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3752 if (ret == 0) 3753 dev->data->mtu = mtu; 3754 3755 return eth_err(port_id, ret); 3756 } 3757 3758 int 3759 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3760 { 3761 struct rte_eth_dev *dev; 3762 int ret; 3763 3764 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3765 dev = &rte_eth_devices[port_id]; 3766 3767 if (!(dev->data->dev_conf.rxmode.offloads & 3768 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3769 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3770 port_id); 3771 return -ENOSYS; 3772 } 3773 3774 if (vlan_id > 4095) { 3775 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3776 port_id, vlan_id); 3777 return -EINVAL; 3778 } 3779 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3780 3781 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3782 if (ret == 0) { 3783 struct rte_vlan_filter_conf *vfc; 3784 int vidx; 3785 int vbit; 3786 3787 vfc = &dev->data->vlan_filter_conf; 3788 vidx = vlan_id / 64; 3789 vbit = vlan_id % 64; 3790 3791 if (on) 3792 vfc->ids[vidx] |= RTE_BIT64(vbit); 3793 else 3794 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3795 } 3796 3797 return eth_err(port_id, ret); 3798 } 3799 3800 int 3801 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3802 int on) 3803 { 3804 struct rte_eth_dev *dev; 3805 3806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3807 dev = &rte_eth_devices[port_id]; 3808 3809 if (rx_queue_id >= dev->data->nb_rx_queues) { 3810 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3811 return -EINVAL; 3812 } 3813 3814 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3815 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3816 3817 return 0; 3818 } 3819 3820 int 3821 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3822 enum rte_vlan_type vlan_type, 3823 uint16_t tpid) 3824 { 3825 struct rte_eth_dev *dev; 3826 3827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3828 dev = &rte_eth_devices[port_id]; 3829 3830 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3831 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3832 tpid)); 3833 } 3834 3835 int 3836 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3837 { 3838 struct rte_eth_dev_info dev_info; 3839 struct rte_eth_dev *dev; 3840 int ret = 0; 3841 int mask = 0; 3842 int cur, org = 0; 3843 uint64_t orig_offloads; 3844 uint64_t dev_offloads; 3845 uint64_t new_offloads; 3846 3847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3848 dev = &rte_eth_devices[port_id]; 3849 3850 /* save original values in case of failure */ 3851 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3852 dev_offloads = orig_offloads; 3853 3854 /* check which option changed by application */ 3855 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3856 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3857 if (cur != org) { 3858 if (cur) 3859 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3860 else 3861 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3862 mask |= RTE_ETH_VLAN_STRIP_MASK; 3863 } 3864 3865 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3866 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3867 if (cur != org) { 3868 if (cur) 3869 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3870 else 3871 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3872 mask |= RTE_ETH_VLAN_FILTER_MASK; 3873 } 3874 3875 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3876 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3877 if (cur != org) { 3878 if (cur) 3879 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3880 else 3881 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3882 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3883 } 3884 3885 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3886 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3887 if (cur != org) { 3888 if (cur) 3889 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3890 else 3891 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3892 mask |= RTE_ETH_QINQ_STRIP_MASK; 3893 } 3894 3895 /*no change*/ 3896 if (mask == 0) 3897 return ret; 3898 3899 ret = rte_eth_dev_info_get(port_id, &dev_info); 3900 if (ret != 0) 3901 return ret; 3902 3903 /* Rx VLAN offloading must be within its device capabilities */ 3904 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3905 new_offloads = dev_offloads & ~orig_offloads; 3906 RTE_ETHDEV_LOG(ERR, 3907 "Ethdev port_id=%u requested new added VLAN offloads " 3908 "0x%" PRIx64 " must be within Rx offloads capabilities " 3909 "0x%" PRIx64 " in %s()\n", 3910 port_id, new_offloads, dev_info.rx_offload_capa, 3911 __func__); 3912 return -EINVAL; 3913 } 3914 3915 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3916 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3917 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3918 if (ret) { 3919 /* hit an error restore original values */ 3920 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3921 } 3922 3923 return eth_err(port_id, ret); 3924 } 3925 3926 int 3927 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3928 { 3929 struct rte_eth_dev *dev; 3930 uint64_t *dev_offloads; 3931 int ret = 0; 3932 3933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3934 dev = &rte_eth_devices[port_id]; 3935 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3936 3937 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3938 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3939 3940 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3941 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3942 3943 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3944 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3945 3946 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3947 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3948 3949 return ret; 3950 } 3951 3952 int 3953 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3954 { 3955 struct rte_eth_dev *dev; 3956 3957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3958 dev = &rte_eth_devices[port_id]; 3959 3960 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3961 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3962 } 3963 3964 int 3965 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3966 { 3967 struct rte_eth_dev *dev; 3968 3969 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3970 dev = &rte_eth_devices[port_id]; 3971 3972 if (fc_conf == NULL) { 3973 RTE_ETHDEV_LOG(ERR, 3974 "Cannot get ethdev port %u flow control config to NULL\n", 3975 port_id); 3976 return -EINVAL; 3977 } 3978 3979 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3980 memset(fc_conf, 0, sizeof(*fc_conf)); 3981 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3982 } 3983 3984 int 3985 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3986 { 3987 struct rte_eth_dev *dev; 3988 3989 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3990 dev = &rte_eth_devices[port_id]; 3991 3992 if (fc_conf == NULL) { 3993 RTE_ETHDEV_LOG(ERR, 3994 "Cannot set ethdev port %u flow control from NULL config\n", 3995 port_id); 3996 return -EINVAL; 3997 } 3998 3999 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4000 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4001 return -EINVAL; 4002 } 4003 4004 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 4005 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4006 } 4007 4008 int 4009 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4010 struct rte_eth_pfc_conf *pfc_conf) 4011 { 4012 struct rte_eth_dev *dev; 4013 4014 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4015 dev = &rte_eth_devices[port_id]; 4016 4017 if (pfc_conf == NULL) { 4018 RTE_ETHDEV_LOG(ERR, 4019 "Cannot set ethdev port %u priority flow control from NULL config\n", 4020 port_id); 4021 return -EINVAL; 4022 } 4023 4024 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4025 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4026 return -EINVAL; 4027 } 4028 4029 /* High water, low water validation are device specific */ 4030 if (*dev->dev_ops->priority_flow_ctrl_set) 4031 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4032 (dev, pfc_conf)); 4033 return -ENOTSUP; 4034 } 4035 4036 static int 4037 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4038 uint16_t reta_size) 4039 { 4040 uint16_t i, num; 4041 4042 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4043 for (i = 0; i < num; i++) { 4044 if (reta_conf[i].mask) 4045 return 0; 4046 } 4047 4048 return -EINVAL; 4049 } 4050 4051 static int 4052 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4053 uint16_t reta_size, 4054 uint16_t max_rxq) 4055 { 4056 uint16_t i, idx, shift; 4057 4058 if (max_rxq == 0) { 4059 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4060 return -EINVAL; 4061 } 4062 4063 for (i = 0; i < reta_size; i++) { 4064 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4065 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4066 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4067 (reta_conf[idx].reta[shift] >= max_rxq)) { 4068 RTE_ETHDEV_LOG(ERR, 4069 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4070 idx, shift, 4071 reta_conf[idx].reta[shift], max_rxq); 4072 return -EINVAL; 4073 } 4074 } 4075 4076 return 0; 4077 } 4078 4079 int 4080 rte_eth_dev_rss_reta_update(uint16_t port_id, 4081 struct rte_eth_rss_reta_entry64 *reta_conf, 4082 uint16_t reta_size) 4083 { 4084 struct rte_eth_dev *dev; 4085 int ret; 4086 4087 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4088 dev = &rte_eth_devices[port_id]; 4089 4090 if (reta_conf == NULL) { 4091 RTE_ETHDEV_LOG(ERR, 4092 "Cannot update ethdev port %u RSS RETA to NULL\n", 4093 port_id); 4094 return -EINVAL; 4095 } 4096 4097 if (reta_size == 0) { 4098 RTE_ETHDEV_LOG(ERR, 4099 "Cannot update ethdev port %u RSS RETA with zero size\n", 4100 port_id); 4101 return -EINVAL; 4102 } 4103 4104 /* Check mask bits */ 4105 ret = eth_check_reta_mask(reta_conf, reta_size); 4106 if (ret < 0) 4107 return ret; 4108 4109 /* Check entry value */ 4110 ret = eth_check_reta_entry(reta_conf, reta_size, 4111 dev->data->nb_rx_queues); 4112 if (ret < 0) 4113 return ret; 4114 4115 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 4116 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4117 reta_size)); 4118 } 4119 4120 int 4121 rte_eth_dev_rss_reta_query(uint16_t port_id, 4122 struct rte_eth_rss_reta_entry64 *reta_conf, 4123 uint16_t reta_size) 4124 { 4125 struct rte_eth_dev *dev; 4126 int ret; 4127 4128 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4129 dev = &rte_eth_devices[port_id]; 4130 4131 if (reta_conf == NULL) { 4132 RTE_ETHDEV_LOG(ERR, 4133 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4134 port_id); 4135 return -EINVAL; 4136 } 4137 4138 /* Check mask bits */ 4139 ret = eth_check_reta_mask(reta_conf, reta_size); 4140 if (ret < 0) 4141 return ret; 4142 4143 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 4144 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4145 reta_size)); 4146 } 4147 4148 int 4149 rte_eth_dev_rss_hash_update(uint16_t port_id, 4150 struct rte_eth_rss_conf *rss_conf) 4151 { 4152 struct rte_eth_dev *dev; 4153 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4154 int ret; 4155 4156 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4157 dev = &rte_eth_devices[port_id]; 4158 4159 if (rss_conf == NULL) { 4160 RTE_ETHDEV_LOG(ERR, 4161 "Cannot update ethdev port %u RSS hash from NULL config\n", 4162 port_id); 4163 return -EINVAL; 4164 } 4165 4166 ret = rte_eth_dev_info_get(port_id, &dev_info); 4167 if (ret != 0) 4168 return ret; 4169 4170 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4171 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4172 dev_info.flow_type_rss_offloads) { 4173 RTE_ETHDEV_LOG(ERR, 4174 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4175 port_id, rss_conf->rss_hf, 4176 dev_info.flow_type_rss_offloads); 4177 return -EINVAL; 4178 } 4179 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 4180 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4181 rss_conf)); 4182 } 4183 4184 int 4185 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4186 struct rte_eth_rss_conf *rss_conf) 4187 { 4188 struct rte_eth_dev *dev; 4189 4190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4191 dev = &rte_eth_devices[port_id]; 4192 4193 if (rss_conf == NULL) { 4194 RTE_ETHDEV_LOG(ERR, 4195 "Cannot get ethdev port %u RSS hash config to NULL\n", 4196 port_id); 4197 return -EINVAL; 4198 } 4199 4200 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4201 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4202 rss_conf)); 4203 } 4204 4205 int 4206 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4207 struct rte_eth_udp_tunnel *udp_tunnel) 4208 { 4209 struct rte_eth_dev *dev; 4210 4211 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4212 dev = &rte_eth_devices[port_id]; 4213 4214 if (udp_tunnel == NULL) { 4215 RTE_ETHDEV_LOG(ERR, 4216 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4217 port_id); 4218 return -EINVAL; 4219 } 4220 4221 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4222 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4223 return -EINVAL; 4224 } 4225 4226 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4227 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4228 udp_tunnel)); 4229 } 4230 4231 int 4232 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4233 struct rte_eth_udp_tunnel *udp_tunnel) 4234 { 4235 struct rte_eth_dev *dev; 4236 4237 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4238 dev = &rte_eth_devices[port_id]; 4239 4240 if (udp_tunnel == NULL) { 4241 RTE_ETHDEV_LOG(ERR, 4242 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4243 port_id); 4244 return -EINVAL; 4245 } 4246 4247 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4248 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4249 return -EINVAL; 4250 } 4251 4252 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4253 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4254 udp_tunnel)); 4255 } 4256 4257 int 4258 rte_eth_led_on(uint16_t port_id) 4259 { 4260 struct rte_eth_dev *dev; 4261 4262 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4263 dev = &rte_eth_devices[port_id]; 4264 4265 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4266 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4267 } 4268 4269 int 4270 rte_eth_led_off(uint16_t port_id) 4271 { 4272 struct rte_eth_dev *dev; 4273 4274 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4275 dev = &rte_eth_devices[port_id]; 4276 4277 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4278 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4279 } 4280 4281 int 4282 rte_eth_fec_get_capability(uint16_t port_id, 4283 struct rte_eth_fec_capa *speed_fec_capa, 4284 unsigned int num) 4285 { 4286 struct rte_eth_dev *dev; 4287 int ret; 4288 4289 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4290 dev = &rte_eth_devices[port_id]; 4291 4292 if (speed_fec_capa == NULL && num > 0) { 4293 RTE_ETHDEV_LOG(ERR, 4294 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4295 port_id); 4296 return -EINVAL; 4297 } 4298 4299 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4300 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4301 4302 return ret; 4303 } 4304 4305 int 4306 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4307 { 4308 struct rte_eth_dev *dev; 4309 4310 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4311 dev = &rte_eth_devices[port_id]; 4312 4313 if (fec_capa == NULL) { 4314 RTE_ETHDEV_LOG(ERR, 4315 "Cannot get ethdev port %u current FEC mode to NULL\n", 4316 port_id); 4317 return -EINVAL; 4318 } 4319 4320 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4321 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4322 } 4323 4324 int 4325 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4326 { 4327 struct rte_eth_dev *dev; 4328 4329 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4330 dev = &rte_eth_devices[port_id]; 4331 4332 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4333 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4334 } 4335 4336 /* 4337 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4338 * an empty spot. 4339 */ 4340 static int 4341 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4342 { 4343 struct rte_eth_dev_info dev_info; 4344 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4345 unsigned i; 4346 int ret; 4347 4348 ret = rte_eth_dev_info_get(port_id, &dev_info); 4349 if (ret != 0) 4350 return -1; 4351 4352 for (i = 0; i < dev_info.max_mac_addrs; i++) 4353 if (memcmp(addr, &dev->data->mac_addrs[i], 4354 RTE_ETHER_ADDR_LEN) == 0) 4355 return i; 4356 4357 return -1; 4358 } 4359 4360 static const struct rte_ether_addr null_mac_addr; 4361 4362 int 4363 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4364 uint32_t pool) 4365 { 4366 struct rte_eth_dev *dev; 4367 int index; 4368 uint64_t pool_mask; 4369 int ret; 4370 4371 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4372 dev = &rte_eth_devices[port_id]; 4373 4374 if (addr == NULL) { 4375 RTE_ETHDEV_LOG(ERR, 4376 "Cannot add ethdev port %u MAC address from NULL address\n", 4377 port_id); 4378 return -EINVAL; 4379 } 4380 4381 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4382 4383 if (rte_is_zero_ether_addr(addr)) { 4384 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4385 port_id); 4386 return -EINVAL; 4387 } 4388 if (pool >= RTE_ETH_64_POOLS) { 4389 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4390 return -EINVAL; 4391 } 4392 4393 index = eth_dev_get_mac_addr_index(port_id, addr); 4394 if (index < 0) { 4395 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4396 if (index < 0) { 4397 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4398 port_id); 4399 return -ENOSPC; 4400 } 4401 } else { 4402 pool_mask = dev->data->mac_pool_sel[index]; 4403 4404 /* Check if both MAC address and pool is already there, and do nothing */ 4405 if (pool_mask & RTE_BIT64(pool)) 4406 return 0; 4407 } 4408 4409 /* Update NIC */ 4410 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4411 4412 if (ret == 0) { 4413 /* Update address in NIC data structure */ 4414 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4415 4416 /* Update pool bitmap in NIC data structure */ 4417 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4418 } 4419 4420 return eth_err(port_id, ret); 4421 } 4422 4423 int 4424 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4425 { 4426 struct rte_eth_dev *dev; 4427 int index; 4428 4429 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4430 dev = &rte_eth_devices[port_id]; 4431 4432 if (addr == NULL) { 4433 RTE_ETHDEV_LOG(ERR, 4434 "Cannot remove ethdev port %u MAC address from NULL address\n", 4435 port_id); 4436 return -EINVAL; 4437 } 4438 4439 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4440 4441 index = eth_dev_get_mac_addr_index(port_id, addr); 4442 if (index == 0) { 4443 RTE_ETHDEV_LOG(ERR, 4444 "Port %u: Cannot remove default MAC address\n", 4445 port_id); 4446 return -EADDRINUSE; 4447 } else if (index < 0) 4448 return 0; /* Do nothing if address wasn't found */ 4449 4450 /* Update NIC */ 4451 (*dev->dev_ops->mac_addr_remove)(dev, index); 4452 4453 /* Update address in NIC data structure */ 4454 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4455 4456 /* reset pool bitmap */ 4457 dev->data->mac_pool_sel[index] = 0; 4458 4459 return 0; 4460 } 4461 4462 int 4463 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4464 { 4465 struct rte_eth_dev *dev; 4466 int ret; 4467 4468 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4469 dev = &rte_eth_devices[port_id]; 4470 4471 if (addr == NULL) { 4472 RTE_ETHDEV_LOG(ERR, 4473 "Cannot set ethdev port %u default MAC address from NULL address\n", 4474 port_id); 4475 return -EINVAL; 4476 } 4477 4478 if (!rte_is_valid_assigned_ether_addr(addr)) 4479 return -EINVAL; 4480 4481 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4482 4483 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4484 if (ret < 0) 4485 return ret; 4486 4487 /* Update default address in NIC data structure */ 4488 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4489 4490 return 0; 4491 } 4492 4493 4494 /* 4495 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4496 * an empty spot. 4497 */ 4498 static int 4499 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4500 const struct rte_ether_addr *addr) 4501 { 4502 struct rte_eth_dev_info dev_info; 4503 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4504 unsigned i; 4505 int ret; 4506 4507 ret = rte_eth_dev_info_get(port_id, &dev_info); 4508 if (ret != 0) 4509 return -1; 4510 4511 if (!dev->data->hash_mac_addrs) 4512 return -1; 4513 4514 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4515 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4516 RTE_ETHER_ADDR_LEN) == 0) 4517 return i; 4518 4519 return -1; 4520 } 4521 4522 int 4523 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4524 uint8_t on) 4525 { 4526 int index; 4527 int ret; 4528 struct rte_eth_dev *dev; 4529 4530 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4531 dev = &rte_eth_devices[port_id]; 4532 4533 if (addr == NULL) { 4534 RTE_ETHDEV_LOG(ERR, 4535 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4536 port_id); 4537 return -EINVAL; 4538 } 4539 4540 if (rte_is_zero_ether_addr(addr)) { 4541 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4542 port_id); 4543 return -EINVAL; 4544 } 4545 4546 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4547 /* Check if it's already there, and do nothing */ 4548 if ((index >= 0) && on) 4549 return 0; 4550 4551 if (index < 0) { 4552 if (!on) { 4553 RTE_ETHDEV_LOG(ERR, 4554 "Port %u: the MAC address was not set in UTA\n", 4555 port_id); 4556 return -EINVAL; 4557 } 4558 4559 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4560 if (index < 0) { 4561 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4562 port_id); 4563 return -ENOSPC; 4564 } 4565 } 4566 4567 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4568 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4569 if (ret == 0) { 4570 /* Update address in NIC data structure */ 4571 if (on) 4572 rte_ether_addr_copy(addr, 4573 &dev->data->hash_mac_addrs[index]); 4574 else 4575 rte_ether_addr_copy(&null_mac_addr, 4576 &dev->data->hash_mac_addrs[index]); 4577 } 4578 4579 return eth_err(port_id, ret); 4580 } 4581 4582 int 4583 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4584 { 4585 struct rte_eth_dev *dev; 4586 4587 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4588 dev = &rte_eth_devices[port_id]; 4589 4590 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4591 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4592 on)); 4593 } 4594 4595 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4596 uint16_t tx_rate) 4597 { 4598 struct rte_eth_dev *dev; 4599 struct rte_eth_dev_info dev_info; 4600 struct rte_eth_link link; 4601 int ret; 4602 4603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4604 dev = &rte_eth_devices[port_id]; 4605 4606 ret = rte_eth_dev_info_get(port_id, &dev_info); 4607 if (ret != 0) 4608 return ret; 4609 4610 link = dev->data->dev_link; 4611 4612 if (queue_idx > dev_info.max_tx_queues) { 4613 RTE_ETHDEV_LOG(ERR, 4614 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4615 port_id, queue_idx); 4616 return -EINVAL; 4617 } 4618 4619 if (tx_rate > link.link_speed) { 4620 RTE_ETHDEV_LOG(ERR, 4621 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4622 tx_rate, link.link_speed); 4623 return -EINVAL; 4624 } 4625 4626 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4627 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4628 queue_idx, tx_rate)); 4629 } 4630 4631 RTE_INIT(eth_dev_init_fp_ops) 4632 { 4633 uint32_t i; 4634 4635 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4636 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4637 } 4638 4639 RTE_INIT(eth_dev_init_cb_lists) 4640 { 4641 uint16_t i; 4642 4643 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4644 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4645 } 4646 4647 int 4648 rte_eth_dev_callback_register(uint16_t port_id, 4649 enum rte_eth_event_type event, 4650 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4651 { 4652 struct rte_eth_dev *dev; 4653 struct rte_eth_dev_callback *user_cb; 4654 uint16_t next_port; 4655 uint16_t last_port; 4656 4657 if (cb_fn == NULL) { 4658 RTE_ETHDEV_LOG(ERR, 4659 "Cannot register ethdev port %u callback from NULL\n", 4660 port_id); 4661 return -EINVAL; 4662 } 4663 4664 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4665 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4666 return -EINVAL; 4667 } 4668 4669 if (port_id == RTE_ETH_ALL) { 4670 next_port = 0; 4671 last_port = RTE_MAX_ETHPORTS - 1; 4672 } else { 4673 next_port = last_port = port_id; 4674 } 4675 4676 rte_spinlock_lock(ð_dev_cb_lock); 4677 4678 do { 4679 dev = &rte_eth_devices[next_port]; 4680 4681 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4682 if (user_cb->cb_fn == cb_fn && 4683 user_cb->cb_arg == cb_arg && 4684 user_cb->event == event) { 4685 break; 4686 } 4687 } 4688 4689 /* create a new callback. */ 4690 if (user_cb == NULL) { 4691 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4692 sizeof(struct rte_eth_dev_callback), 0); 4693 if (user_cb != NULL) { 4694 user_cb->cb_fn = cb_fn; 4695 user_cb->cb_arg = cb_arg; 4696 user_cb->event = event; 4697 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4698 user_cb, next); 4699 } else { 4700 rte_spinlock_unlock(ð_dev_cb_lock); 4701 rte_eth_dev_callback_unregister(port_id, event, 4702 cb_fn, cb_arg); 4703 return -ENOMEM; 4704 } 4705 4706 } 4707 } while (++next_port <= last_port); 4708 4709 rte_spinlock_unlock(ð_dev_cb_lock); 4710 return 0; 4711 } 4712 4713 int 4714 rte_eth_dev_callback_unregister(uint16_t port_id, 4715 enum rte_eth_event_type event, 4716 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4717 { 4718 int ret; 4719 struct rte_eth_dev *dev; 4720 struct rte_eth_dev_callback *cb, *next; 4721 uint16_t next_port; 4722 uint16_t last_port; 4723 4724 if (cb_fn == NULL) { 4725 RTE_ETHDEV_LOG(ERR, 4726 "Cannot unregister ethdev port %u callback from NULL\n", 4727 port_id); 4728 return -EINVAL; 4729 } 4730 4731 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4732 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4733 return -EINVAL; 4734 } 4735 4736 if (port_id == RTE_ETH_ALL) { 4737 next_port = 0; 4738 last_port = RTE_MAX_ETHPORTS - 1; 4739 } else { 4740 next_port = last_port = port_id; 4741 } 4742 4743 rte_spinlock_lock(ð_dev_cb_lock); 4744 4745 do { 4746 dev = &rte_eth_devices[next_port]; 4747 ret = 0; 4748 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4749 cb = next) { 4750 4751 next = TAILQ_NEXT(cb, next); 4752 4753 if (cb->cb_fn != cb_fn || cb->event != event || 4754 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4755 continue; 4756 4757 /* 4758 * if this callback is not executing right now, 4759 * then remove it. 4760 */ 4761 if (cb->active == 0) { 4762 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4763 rte_free(cb); 4764 } else { 4765 ret = -EAGAIN; 4766 } 4767 } 4768 } while (++next_port <= last_port); 4769 4770 rte_spinlock_unlock(ð_dev_cb_lock); 4771 return ret; 4772 } 4773 4774 int 4775 rte_eth_dev_callback_process(struct rte_eth_dev *dev, 4776 enum rte_eth_event_type event, void *ret_param) 4777 { 4778 struct rte_eth_dev_callback *cb_lst; 4779 struct rte_eth_dev_callback dev_cb; 4780 int rc = 0; 4781 4782 rte_spinlock_lock(ð_dev_cb_lock); 4783 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 4784 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 4785 continue; 4786 dev_cb = *cb_lst; 4787 cb_lst->active = 1; 4788 if (ret_param != NULL) 4789 dev_cb.ret_param = ret_param; 4790 4791 rte_spinlock_unlock(ð_dev_cb_lock); 4792 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, 4793 dev_cb.cb_arg, dev_cb.ret_param); 4794 rte_spinlock_lock(ð_dev_cb_lock); 4795 cb_lst->active = 0; 4796 } 4797 rte_spinlock_unlock(ð_dev_cb_lock); 4798 return rc; 4799 } 4800 4801 void 4802 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) 4803 { 4804 if (dev == NULL) 4805 return; 4806 4807 /* 4808 * for secondary process, at that point we expect device 4809 * to be already 'usable', so shared data and all function pointers 4810 * for fast-path devops have to be setup properly inside rte_eth_dev. 4811 */ 4812 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4813 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); 4814 4815 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); 4816 4817 dev->state = RTE_ETH_DEV_ATTACHED; 4818 } 4819 4820 int 4821 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4822 { 4823 uint32_t vec; 4824 struct rte_eth_dev *dev; 4825 struct rte_intr_handle *intr_handle; 4826 uint16_t qid; 4827 int rc; 4828 4829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4830 dev = &rte_eth_devices[port_id]; 4831 4832 if (!dev->intr_handle) { 4833 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4834 return -ENOTSUP; 4835 } 4836 4837 intr_handle = dev->intr_handle; 4838 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4839 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4840 return -EPERM; 4841 } 4842 4843 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4844 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4845 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4846 if (rc && rc != -EEXIST) { 4847 RTE_ETHDEV_LOG(ERR, 4848 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4849 port_id, qid, op, epfd, vec); 4850 } 4851 } 4852 4853 return 0; 4854 } 4855 4856 int 4857 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4858 { 4859 struct rte_intr_handle *intr_handle; 4860 struct rte_eth_dev *dev; 4861 unsigned int efd_idx; 4862 uint32_t vec; 4863 int fd; 4864 4865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4866 dev = &rte_eth_devices[port_id]; 4867 4868 if (queue_id >= dev->data->nb_rx_queues) { 4869 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4870 return -1; 4871 } 4872 4873 if (!dev->intr_handle) { 4874 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4875 return -1; 4876 } 4877 4878 intr_handle = dev->intr_handle; 4879 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4880 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4881 return -1; 4882 } 4883 4884 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4885 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4886 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4887 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4888 4889 return fd; 4890 } 4891 4892 static inline int 4893 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, 4894 const char *ring_name) 4895 { 4896 return snprintf(name, len, "eth_p%d_q%d_%s", 4897 port_id, queue_id, ring_name); 4898 } 4899 4900 const struct rte_memzone * 4901 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, 4902 uint16_t queue_id, size_t size, unsigned align, 4903 int socket_id) 4904 { 4905 char z_name[RTE_MEMZONE_NAMESIZE]; 4906 const struct rte_memzone *mz; 4907 int rc; 4908 4909 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4910 queue_id, ring_name); 4911 if (rc >= RTE_MEMZONE_NAMESIZE) { 4912 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4913 rte_errno = ENAMETOOLONG; 4914 return NULL; 4915 } 4916 4917 mz = rte_memzone_lookup(z_name); 4918 if (mz) { 4919 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || 4920 size > mz->len || 4921 ((uintptr_t)mz->addr & (align - 1)) != 0) { 4922 RTE_ETHDEV_LOG(ERR, 4923 "memzone %s does not justify the requested attributes\n", 4924 mz->name); 4925 return NULL; 4926 } 4927 4928 return mz; 4929 } 4930 4931 return rte_memzone_reserve_aligned(z_name, size, socket_id, 4932 RTE_MEMZONE_IOVA_CONTIG, align); 4933 } 4934 4935 int 4936 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, 4937 uint16_t queue_id) 4938 { 4939 char z_name[RTE_MEMZONE_NAMESIZE]; 4940 const struct rte_memzone *mz; 4941 int rc = 0; 4942 4943 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, 4944 queue_id, ring_name); 4945 if (rc >= RTE_MEMZONE_NAMESIZE) { 4946 RTE_ETHDEV_LOG(ERR, "ring name too long\n"); 4947 return -ENAMETOOLONG; 4948 } 4949 4950 mz = rte_memzone_lookup(z_name); 4951 if (mz) 4952 rc = rte_memzone_free(mz); 4953 else 4954 rc = -ENOENT; 4955 4956 return rc; 4957 } 4958 4959 int 4960 rte_eth_dev_create(struct rte_device *device, const char *name, 4961 size_t priv_data_size, 4962 ethdev_bus_specific_init ethdev_bus_specific_init, 4963 void *bus_init_params, 4964 ethdev_init_t ethdev_init, void *init_params) 4965 { 4966 struct rte_eth_dev *ethdev; 4967 int retval; 4968 4969 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); 4970 4971 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4972 ethdev = rte_eth_dev_allocate(name); 4973 if (!ethdev) 4974 return -ENODEV; 4975 4976 if (priv_data_size) { 4977 ethdev->data->dev_private = rte_zmalloc_socket( 4978 name, priv_data_size, RTE_CACHE_LINE_SIZE, 4979 device->numa_node); 4980 4981 if (!ethdev->data->dev_private) { 4982 RTE_ETHDEV_LOG(ERR, 4983 "failed to allocate private data\n"); 4984 retval = -ENOMEM; 4985 goto probe_failed; 4986 } 4987 } 4988 } else { 4989 ethdev = rte_eth_dev_attach_secondary(name); 4990 if (!ethdev) { 4991 RTE_ETHDEV_LOG(ERR, 4992 "secondary process attach failed, ethdev doesn't exist\n"); 4993 return -ENODEV; 4994 } 4995 } 4996 4997 ethdev->device = device; 4998 4999 if (ethdev_bus_specific_init) { 5000 retval = ethdev_bus_specific_init(ethdev, bus_init_params); 5001 if (retval) { 5002 RTE_ETHDEV_LOG(ERR, 5003 "ethdev bus specific initialisation failed\n"); 5004 goto probe_failed; 5005 } 5006 } 5007 5008 retval = ethdev_init(ethdev, init_params); 5009 if (retval) { 5010 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); 5011 goto probe_failed; 5012 } 5013 5014 rte_eth_dev_probing_finish(ethdev); 5015 5016 return retval; 5017 5018 probe_failed: 5019 rte_eth_dev_release_port(ethdev); 5020 return retval; 5021 } 5022 5023 int 5024 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, 5025 ethdev_uninit_t ethdev_uninit) 5026 { 5027 int ret; 5028 5029 ethdev = rte_eth_dev_allocated(ethdev->data->name); 5030 if (!ethdev) 5031 return -ENODEV; 5032 5033 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); 5034 5035 ret = ethdev_uninit(ethdev); 5036 if (ret) 5037 return ret; 5038 5039 return rte_eth_dev_release_port(ethdev); 5040 } 5041 5042 int 5043 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5044 int epfd, int op, void *data) 5045 { 5046 uint32_t vec; 5047 struct rte_eth_dev *dev; 5048 struct rte_intr_handle *intr_handle; 5049 int rc; 5050 5051 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5052 dev = &rte_eth_devices[port_id]; 5053 5054 if (queue_id >= dev->data->nb_rx_queues) { 5055 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5056 return -EINVAL; 5057 } 5058 5059 if (!dev->intr_handle) { 5060 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5061 return -ENOTSUP; 5062 } 5063 5064 intr_handle = dev->intr_handle; 5065 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5066 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5067 return -EPERM; 5068 } 5069 5070 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5071 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5072 if (rc && rc != -EEXIST) { 5073 RTE_ETHDEV_LOG(ERR, 5074 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5075 port_id, queue_id, op, epfd, vec); 5076 return rc; 5077 } 5078 5079 return 0; 5080 } 5081 5082 int 5083 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5084 uint16_t queue_id) 5085 { 5086 struct rte_eth_dev *dev; 5087 int ret; 5088 5089 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5090 dev = &rte_eth_devices[port_id]; 5091 5092 ret = eth_dev_validate_rx_queue(dev, queue_id); 5093 if (ret != 0) 5094 return ret; 5095 5096 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 5097 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5098 } 5099 5100 int 5101 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5102 uint16_t queue_id) 5103 { 5104 struct rte_eth_dev *dev; 5105 int ret; 5106 5107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5108 dev = &rte_eth_devices[port_id]; 5109 5110 ret = eth_dev_validate_rx_queue(dev, queue_id); 5111 if (ret != 0) 5112 return ret; 5113 5114 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 5115 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5116 } 5117 5118 5119 const struct rte_eth_rxtx_callback * 5120 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5121 rte_rx_callback_fn fn, void *user_param) 5122 { 5123 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5124 rte_errno = ENOTSUP; 5125 return NULL; 5126 #endif 5127 struct rte_eth_dev *dev; 5128 5129 /* check input parameters */ 5130 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5131 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5132 rte_errno = EINVAL; 5133 return NULL; 5134 } 5135 dev = &rte_eth_devices[port_id]; 5136 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5137 rte_errno = EINVAL; 5138 return NULL; 5139 } 5140 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5141 5142 if (cb == NULL) { 5143 rte_errno = ENOMEM; 5144 return NULL; 5145 } 5146 5147 cb->fn.rx = fn; 5148 cb->param = user_param; 5149 5150 rte_spinlock_lock(ð_dev_rx_cb_lock); 5151 /* Add the callbacks in fifo order. */ 5152 struct rte_eth_rxtx_callback *tail = 5153 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5154 5155 if (!tail) { 5156 /* Stores to cb->fn and cb->param should complete before 5157 * cb is visible to data plane. 5158 */ 5159 __atomic_store_n( 5160 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5161 cb, __ATOMIC_RELEASE); 5162 5163 } else { 5164 while (tail->next) 5165 tail = tail->next; 5166 /* Stores to cb->fn and cb->param should complete before 5167 * cb is visible to data plane. 5168 */ 5169 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5170 } 5171 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5172 5173 return cb; 5174 } 5175 5176 const struct rte_eth_rxtx_callback * 5177 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5178 rte_rx_callback_fn fn, void *user_param) 5179 { 5180 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5181 rte_errno = ENOTSUP; 5182 return NULL; 5183 #endif 5184 /* check input parameters */ 5185 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5186 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5187 rte_errno = EINVAL; 5188 return NULL; 5189 } 5190 5191 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5192 5193 if (cb == NULL) { 5194 rte_errno = ENOMEM; 5195 return NULL; 5196 } 5197 5198 cb->fn.rx = fn; 5199 cb->param = user_param; 5200 5201 rte_spinlock_lock(ð_dev_rx_cb_lock); 5202 /* Add the callbacks at first position */ 5203 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5204 /* Stores to cb->fn, cb->param and cb->next should complete before 5205 * cb is visible to data plane threads. 5206 */ 5207 __atomic_store_n( 5208 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5209 cb, __ATOMIC_RELEASE); 5210 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5211 5212 return cb; 5213 } 5214 5215 const struct rte_eth_rxtx_callback * 5216 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5217 rte_tx_callback_fn fn, void *user_param) 5218 { 5219 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5220 rte_errno = ENOTSUP; 5221 return NULL; 5222 #endif 5223 struct rte_eth_dev *dev; 5224 5225 /* check input parameters */ 5226 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5227 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5228 rte_errno = EINVAL; 5229 return NULL; 5230 } 5231 5232 dev = &rte_eth_devices[port_id]; 5233 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5234 rte_errno = EINVAL; 5235 return NULL; 5236 } 5237 5238 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5239 5240 if (cb == NULL) { 5241 rte_errno = ENOMEM; 5242 return NULL; 5243 } 5244 5245 cb->fn.tx = fn; 5246 cb->param = user_param; 5247 5248 rte_spinlock_lock(ð_dev_tx_cb_lock); 5249 /* Add the callbacks in fifo order. */ 5250 struct rte_eth_rxtx_callback *tail = 5251 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5252 5253 if (!tail) { 5254 /* Stores to cb->fn and cb->param should complete before 5255 * cb is visible to data plane. 5256 */ 5257 __atomic_store_n( 5258 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5259 cb, __ATOMIC_RELEASE); 5260 5261 } else { 5262 while (tail->next) 5263 tail = tail->next; 5264 /* Stores to cb->fn and cb->param should complete before 5265 * cb is visible to data plane. 5266 */ 5267 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5268 } 5269 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5270 5271 return cb; 5272 } 5273 5274 int 5275 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5276 const struct rte_eth_rxtx_callback *user_cb) 5277 { 5278 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5279 return -ENOTSUP; 5280 #endif 5281 /* Check input parameters. */ 5282 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5283 if (user_cb == NULL || 5284 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5285 return -EINVAL; 5286 5287 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5288 struct rte_eth_rxtx_callback *cb; 5289 struct rte_eth_rxtx_callback **prev_cb; 5290 int ret = -EINVAL; 5291 5292 rte_spinlock_lock(ð_dev_rx_cb_lock); 5293 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5294 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5295 cb = *prev_cb; 5296 if (cb == user_cb) { 5297 /* Remove the user cb from the callback list. */ 5298 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5299 ret = 0; 5300 break; 5301 } 5302 } 5303 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5304 5305 return ret; 5306 } 5307 5308 int 5309 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5310 const struct rte_eth_rxtx_callback *user_cb) 5311 { 5312 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5313 return -ENOTSUP; 5314 #endif 5315 /* Check input parameters. */ 5316 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5317 if (user_cb == NULL || 5318 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5319 return -EINVAL; 5320 5321 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5322 int ret = -EINVAL; 5323 struct rte_eth_rxtx_callback *cb; 5324 struct rte_eth_rxtx_callback **prev_cb; 5325 5326 rte_spinlock_lock(ð_dev_tx_cb_lock); 5327 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5328 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5329 cb = *prev_cb; 5330 if (cb == user_cb) { 5331 /* Remove the user cb from the callback list. */ 5332 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5333 ret = 0; 5334 break; 5335 } 5336 } 5337 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5338 5339 return ret; 5340 } 5341 5342 int 5343 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5344 struct rte_eth_rxq_info *qinfo) 5345 { 5346 struct rte_eth_dev *dev; 5347 5348 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5349 dev = &rte_eth_devices[port_id]; 5350 5351 if (queue_id >= dev->data->nb_rx_queues) { 5352 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5353 return -EINVAL; 5354 } 5355 5356 if (qinfo == NULL) { 5357 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5358 port_id, queue_id); 5359 return -EINVAL; 5360 } 5361 5362 if (dev->data->rx_queues == NULL || 5363 dev->data->rx_queues[queue_id] == NULL) { 5364 RTE_ETHDEV_LOG(ERR, 5365 "Rx queue %"PRIu16" of device with port_id=%" 5366 PRIu16" has not been setup\n", 5367 queue_id, port_id); 5368 return -EINVAL; 5369 } 5370 5371 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5372 RTE_ETHDEV_LOG(INFO, 5373 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5374 queue_id, port_id); 5375 return -EINVAL; 5376 } 5377 5378 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5379 5380 memset(qinfo, 0, sizeof(*qinfo)); 5381 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5382 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5383 5384 return 0; 5385 } 5386 5387 int 5388 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5389 struct rte_eth_txq_info *qinfo) 5390 { 5391 struct rte_eth_dev *dev; 5392 5393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5394 dev = &rte_eth_devices[port_id]; 5395 5396 if (queue_id >= dev->data->nb_tx_queues) { 5397 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5398 return -EINVAL; 5399 } 5400 5401 if (qinfo == NULL) { 5402 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5403 port_id, queue_id); 5404 return -EINVAL; 5405 } 5406 5407 if (dev->data->tx_queues == NULL || 5408 dev->data->tx_queues[queue_id] == NULL) { 5409 RTE_ETHDEV_LOG(ERR, 5410 "Tx queue %"PRIu16" of device with port_id=%" 5411 PRIu16" has not been setup\n", 5412 queue_id, port_id); 5413 return -EINVAL; 5414 } 5415 5416 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5417 RTE_ETHDEV_LOG(INFO, 5418 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5419 queue_id, port_id); 5420 return -EINVAL; 5421 } 5422 5423 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5424 5425 memset(qinfo, 0, sizeof(*qinfo)); 5426 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5427 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5428 5429 return 0; 5430 } 5431 5432 int 5433 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5434 struct rte_eth_burst_mode *mode) 5435 { 5436 struct rte_eth_dev *dev; 5437 5438 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5439 dev = &rte_eth_devices[port_id]; 5440 5441 if (queue_id >= dev->data->nb_rx_queues) { 5442 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5443 return -EINVAL; 5444 } 5445 5446 if (mode == NULL) { 5447 RTE_ETHDEV_LOG(ERR, 5448 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5449 port_id, queue_id); 5450 return -EINVAL; 5451 } 5452 5453 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5454 memset(mode, 0, sizeof(*mode)); 5455 return eth_err(port_id, 5456 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5457 } 5458 5459 int 5460 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5461 struct rte_eth_burst_mode *mode) 5462 { 5463 struct rte_eth_dev *dev; 5464 5465 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5466 dev = &rte_eth_devices[port_id]; 5467 5468 if (queue_id >= dev->data->nb_tx_queues) { 5469 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5470 return -EINVAL; 5471 } 5472 5473 if (mode == NULL) { 5474 RTE_ETHDEV_LOG(ERR, 5475 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5476 port_id, queue_id); 5477 return -EINVAL; 5478 } 5479 5480 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5481 memset(mode, 0, sizeof(*mode)); 5482 return eth_err(port_id, 5483 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5484 } 5485 5486 int 5487 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5488 struct rte_power_monitor_cond *pmc) 5489 { 5490 struct rte_eth_dev *dev; 5491 5492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5493 dev = &rte_eth_devices[port_id]; 5494 5495 if (queue_id >= dev->data->nb_rx_queues) { 5496 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5497 return -EINVAL; 5498 } 5499 5500 if (pmc == NULL) { 5501 RTE_ETHDEV_LOG(ERR, 5502 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5503 port_id, queue_id); 5504 return -EINVAL; 5505 } 5506 5507 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5508 return eth_err(port_id, 5509 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5510 } 5511 5512 int 5513 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5514 struct rte_ether_addr *mc_addr_set, 5515 uint32_t nb_mc_addr) 5516 { 5517 struct rte_eth_dev *dev; 5518 5519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5520 dev = &rte_eth_devices[port_id]; 5521 5522 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5523 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5524 mc_addr_set, nb_mc_addr)); 5525 } 5526 5527 int 5528 rte_eth_timesync_enable(uint16_t port_id) 5529 { 5530 struct rte_eth_dev *dev; 5531 5532 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5533 dev = &rte_eth_devices[port_id]; 5534 5535 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5536 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5537 } 5538 5539 int 5540 rte_eth_timesync_disable(uint16_t port_id) 5541 { 5542 struct rte_eth_dev *dev; 5543 5544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5545 dev = &rte_eth_devices[port_id]; 5546 5547 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5548 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5549 } 5550 5551 int 5552 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5553 uint32_t flags) 5554 { 5555 struct rte_eth_dev *dev; 5556 5557 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5558 dev = &rte_eth_devices[port_id]; 5559 5560 if (timestamp == NULL) { 5561 RTE_ETHDEV_LOG(ERR, 5562 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5563 port_id); 5564 return -EINVAL; 5565 } 5566 5567 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5568 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5569 (dev, timestamp, flags)); 5570 } 5571 5572 int 5573 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5574 struct timespec *timestamp) 5575 { 5576 struct rte_eth_dev *dev; 5577 5578 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5579 dev = &rte_eth_devices[port_id]; 5580 5581 if (timestamp == NULL) { 5582 RTE_ETHDEV_LOG(ERR, 5583 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5584 port_id); 5585 return -EINVAL; 5586 } 5587 5588 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5589 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5590 (dev, timestamp)); 5591 } 5592 5593 int 5594 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5595 { 5596 struct rte_eth_dev *dev; 5597 5598 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5599 dev = &rte_eth_devices[port_id]; 5600 5601 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5602 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5603 } 5604 5605 int 5606 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5607 { 5608 struct rte_eth_dev *dev; 5609 5610 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5611 dev = &rte_eth_devices[port_id]; 5612 5613 if (timestamp == NULL) { 5614 RTE_ETHDEV_LOG(ERR, 5615 "Cannot read ethdev port %u timesync time to NULL\n", 5616 port_id); 5617 return -EINVAL; 5618 } 5619 5620 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5621 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5622 timestamp)); 5623 } 5624 5625 int 5626 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5627 { 5628 struct rte_eth_dev *dev; 5629 5630 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5631 dev = &rte_eth_devices[port_id]; 5632 5633 if (timestamp == NULL) { 5634 RTE_ETHDEV_LOG(ERR, 5635 "Cannot write ethdev port %u timesync from NULL time\n", 5636 port_id); 5637 return -EINVAL; 5638 } 5639 5640 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5641 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5642 timestamp)); 5643 } 5644 5645 int 5646 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5647 { 5648 struct rte_eth_dev *dev; 5649 5650 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5651 dev = &rte_eth_devices[port_id]; 5652 5653 if (clock == NULL) { 5654 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5655 port_id); 5656 return -EINVAL; 5657 } 5658 5659 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5660 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5661 } 5662 5663 int 5664 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5665 { 5666 struct rte_eth_dev *dev; 5667 5668 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5669 dev = &rte_eth_devices[port_id]; 5670 5671 if (info == NULL) { 5672 RTE_ETHDEV_LOG(ERR, 5673 "Cannot get ethdev port %u register info to NULL\n", 5674 port_id); 5675 return -EINVAL; 5676 } 5677 5678 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5679 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5680 } 5681 5682 int 5683 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5684 { 5685 struct rte_eth_dev *dev; 5686 5687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5688 dev = &rte_eth_devices[port_id]; 5689 5690 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5691 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5692 } 5693 5694 int 5695 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5696 { 5697 struct rte_eth_dev *dev; 5698 5699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5700 dev = &rte_eth_devices[port_id]; 5701 5702 if (info == NULL) { 5703 RTE_ETHDEV_LOG(ERR, 5704 "Cannot get ethdev port %u EEPROM info to NULL\n", 5705 port_id); 5706 return -EINVAL; 5707 } 5708 5709 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5710 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5711 } 5712 5713 int 5714 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5715 { 5716 struct rte_eth_dev *dev; 5717 5718 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5719 dev = &rte_eth_devices[port_id]; 5720 5721 if (info == NULL) { 5722 RTE_ETHDEV_LOG(ERR, 5723 "Cannot set ethdev port %u EEPROM from NULL info\n", 5724 port_id); 5725 return -EINVAL; 5726 } 5727 5728 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5729 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5730 } 5731 5732 int 5733 rte_eth_dev_get_module_info(uint16_t port_id, 5734 struct rte_eth_dev_module_info *modinfo) 5735 { 5736 struct rte_eth_dev *dev; 5737 5738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5739 dev = &rte_eth_devices[port_id]; 5740 5741 if (modinfo == NULL) { 5742 RTE_ETHDEV_LOG(ERR, 5743 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5744 port_id); 5745 return -EINVAL; 5746 } 5747 5748 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5749 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5750 } 5751 5752 int 5753 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5754 struct rte_dev_eeprom_info *info) 5755 { 5756 struct rte_eth_dev *dev; 5757 5758 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5759 dev = &rte_eth_devices[port_id]; 5760 5761 if (info == NULL) { 5762 RTE_ETHDEV_LOG(ERR, 5763 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5764 port_id); 5765 return -EINVAL; 5766 } 5767 5768 if (info->data == NULL) { 5769 RTE_ETHDEV_LOG(ERR, 5770 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5771 port_id); 5772 return -EINVAL; 5773 } 5774 5775 if (info->length == 0) { 5776 RTE_ETHDEV_LOG(ERR, 5777 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5778 port_id); 5779 return -EINVAL; 5780 } 5781 5782 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5783 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5784 } 5785 5786 int 5787 rte_eth_dev_get_dcb_info(uint16_t port_id, 5788 struct rte_eth_dcb_info *dcb_info) 5789 { 5790 struct rte_eth_dev *dev; 5791 5792 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5793 dev = &rte_eth_devices[port_id]; 5794 5795 if (dcb_info == NULL) { 5796 RTE_ETHDEV_LOG(ERR, 5797 "Cannot get ethdev port %u DCB info to NULL\n", 5798 port_id); 5799 return -EINVAL; 5800 } 5801 5802 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5803 5804 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5805 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5806 } 5807 5808 static void 5809 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5810 const struct rte_eth_desc_lim *desc_lim) 5811 { 5812 if (desc_lim->nb_align != 0) 5813 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5814 5815 if (desc_lim->nb_max != 0) 5816 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5817 5818 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5819 } 5820 5821 int 5822 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5823 uint16_t *nb_rx_desc, 5824 uint16_t *nb_tx_desc) 5825 { 5826 struct rte_eth_dev_info dev_info; 5827 int ret; 5828 5829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5830 5831 ret = rte_eth_dev_info_get(port_id, &dev_info); 5832 if (ret != 0) 5833 return ret; 5834 5835 if (nb_rx_desc != NULL) 5836 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5837 5838 if (nb_tx_desc != NULL) 5839 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5840 5841 return 0; 5842 } 5843 5844 int 5845 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5846 struct rte_eth_hairpin_cap *cap) 5847 { 5848 struct rte_eth_dev *dev; 5849 5850 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5851 dev = &rte_eth_devices[port_id]; 5852 5853 if (cap == NULL) { 5854 RTE_ETHDEV_LOG(ERR, 5855 "Cannot get ethdev port %u hairpin capability to NULL\n", 5856 port_id); 5857 return -EINVAL; 5858 } 5859 5860 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5861 memset(cap, 0, sizeof(*cap)); 5862 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5863 } 5864 5865 int 5866 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5867 { 5868 if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5869 return 1; 5870 return 0; 5871 } 5872 5873 int 5874 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) 5875 { 5876 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) 5877 return 1; 5878 return 0; 5879 } 5880 5881 int 5882 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5883 { 5884 struct rte_eth_dev *dev; 5885 5886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5887 dev = &rte_eth_devices[port_id]; 5888 5889 if (pool == NULL) { 5890 RTE_ETHDEV_LOG(ERR, 5891 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5892 port_id); 5893 return -EINVAL; 5894 } 5895 5896 if (*dev->dev_ops->pool_ops_supported == NULL) 5897 return 1; /* all pools are supported */ 5898 5899 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5900 } 5901 5902 /** 5903 * A set of values to describe the possible states of a switch domain. 5904 */ 5905 enum rte_eth_switch_domain_state { 5906 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, 5907 RTE_ETH_SWITCH_DOMAIN_ALLOCATED 5908 }; 5909 5910 /** 5911 * Array of switch domains available for allocation. Array is sized to 5912 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than 5913 * ethdev ports in a single process. 5914 */ 5915 static struct rte_eth_dev_switch { 5916 enum rte_eth_switch_domain_state state; 5917 } eth_dev_switch_domains[RTE_MAX_ETHPORTS]; 5918 5919 int 5920 rte_eth_switch_domain_alloc(uint16_t *domain_id) 5921 { 5922 uint16_t i; 5923 5924 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 5925 5926 for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 5927 if (eth_dev_switch_domains[i].state == 5928 RTE_ETH_SWITCH_DOMAIN_UNUSED) { 5929 eth_dev_switch_domains[i].state = 5930 RTE_ETH_SWITCH_DOMAIN_ALLOCATED; 5931 *domain_id = i; 5932 return 0; 5933 } 5934 } 5935 5936 return -ENOSPC; 5937 } 5938 5939 int 5940 rte_eth_switch_domain_free(uint16_t domain_id) 5941 { 5942 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || 5943 domain_id >= RTE_MAX_ETHPORTS) 5944 return -EINVAL; 5945 5946 if (eth_dev_switch_domains[domain_id].state != 5947 RTE_ETH_SWITCH_DOMAIN_ALLOCATED) 5948 return -EINVAL; 5949 5950 eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; 5951 5952 return 0; 5953 } 5954 5955 static int 5956 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) 5957 { 5958 int state; 5959 struct rte_kvargs_pair *pair; 5960 char *letter; 5961 5962 arglist->str = strdup(str_in); 5963 if (arglist->str == NULL) 5964 return -ENOMEM; 5965 5966 letter = arglist->str; 5967 state = 0; 5968 arglist->count = 0; 5969 pair = &arglist->pairs[0]; 5970 while (1) { 5971 switch (state) { 5972 case 0: /* Initial */ 5973 if (*letter == '=') 5974 return -EINVAL; 5975 else if (*letter == '\0') 5976 return 0; 5977 5978 state = 1; 5979 pair->key = letter; 5980 /* fall-thru */ 5981 5982 case 1: /* Parsing key */ 5983 if (*letter == '=') { 5984 *letter = '\0'; 5985 pair->value = letter + 1; 5986 state = 2; 5987 } else if (*letter == ',' || *letter == '\0') 5988 return -EINVAL; 5989 break; 5990 5991 5992 case 2: /* Parsing value */ 5993 if (*letter == '[') 5994 state = 3; 5995 else if (*letter == ',') { 5996 *letter = '\0'; 5997 arglist->count++; 5998 pair = &arglist->pairs[arglist->count]; 5999 state = 0; 6000 } else if (*letter == '\0') { 6001 letter--; 6002 arglist->count++; 6003 pair = &arglist->pairs[arglist->count]; 6004 state = 0; 6005 } 6006 break; 6007 6008 case 3: /* Parsing list */ 6009 if (*letter == ']') 6010 state = 2; 6011 else if (*letter == '\0') 6012 return -EINVAL; 6013 break; 6014 } 6015 letter++; 6016 } 6017 } 6018 6019 int 6020 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) 6021 { 6022 struct rte_kvargs args; 6023 struct rte_kvargs_pair *pair; 6024 unsigned int i; 6025 int result = 0; 6026 6027 memset(eth_da, 0, sizeof(*eth_da)); 6028 6029 result = eth_dev_devargs_tokenise(&args, dargs); 6030 if (result < 0) 6031 goto parse_cleanup; 6032 6033 for (i = 0; i < args.count; i++) { 6034 pair = &args.pairs[i]; 6035 if (strcmp("representor", pair->key) == 0) { 6036 if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { 6037 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", 6038 dargs); 6039 result = -1; 6040 goto parse_cleanup; 6041 } 6042 result = rte_eth_devargs_parse_representor_ports( 6043 pair->value, eth_da); 6044 if (result < 0) 6045 goto parse_cleanup; 6046 } 6047 } 6048 6049 parse_cleanup: 6050 if (args.str) 6051 free(args.str); 6052 6053 return result; 6054 } 6055 6056 int 6057 rte_eth_representor_id_get(uint16_t port_id, 6058 enum rte_eth_representor_type type, 6059 int controller, int pf, int representor_port, 6060 uint16_t *repr_id) 6061 { 6062 int ret, n, count; 6063 uint32_t i; 6064 struct rte_eth_representor_info *info = NULL; 6065 size_t size; 6066 6067 if (type == RTE_ETH_REPRESENTOR_NONE) 6068 return 0; 6069 if (repr_id == NULL) 6070 return -EINVAL; 6071 6072 /* Get PMD representor range info. */ 6073 ret = rte_eth_representor_info_get(port_id, NULL); 6074 if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && 6075 controller == -1 && pf == -1) { 6076 /* Direct mapping for legacy VF representor. */ 6077 *repr_id = representor_port; 6078 return 0; 6079 } else if (ret < 0) { 6080 return ret; 6081 } 6082 n = ret; 6083 size = sizeof(*info) + n * sizeof(info->ranges[0]); 6084 info = calloc(1, size); 6085 if (info == NULL) 6086 return -ENOMEM; 6087 info->nb_ranges_alloc = n; 6088 ret = rte_eth_representor_info_get(port_id, info); 6089 if (ret < 0) 6090 goto out; 6091 6092 /* Default controller and pf to caller. */ 6093 if (controller == -1) 6094 controller = info->controller; 6095 if (pf == -1) 6096 pf = info->pf; 6097 6098 /* Locate representor ID. */ 6099 ret = -ENOENT; 6100 for (i = 0; i < info->nb_ranges; ++i) { 6101 if (info->ranges[i].type != type) 6102 continue; 6103 if (info->ranges[i].controller != controller) 6104 continue; 6105 if (info->ranges[i].id_end < info->ranges[i].id_base) { 6106 RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", 6107 port_id, info->ranges[i].id_base, 6108 info->ranges[i].id_end, i); 6109 continue; 6110 6111 } 6112 count = info->ranges[i].id_end - info->ranges[i].id_base + 1; 6113 switch (info->ranges[i].type) { 6114 case RTE_ETH_REPRESENTOR_PF: 6115 if (pf < info->ranges[i].pf || 6116 pf >= info->ranges[i].pf + count) 6117 continue; 6118 *repr_id = info->ranges[i].id_base + 6119 (pf - info->ranges[i].pf); 6120 ret = 0; 6121 goto out; 6122 case RTE_ETH_REPRESENTOR_VF: 6123 if (info->ranges[i].pf != pf) 6124 continue; 6125 if (representor_port < info->ranges[i].vf || 6126 representor_port >= info->ranges[i].vf + count) 6127 continue; 6128 *repr_id = info->ranges[i].id_base + 6129 (representor_port - info->ranges[i].vf); 6130 ret = 0; 6131 goto out; 6132 case RTE_ETH_REPRESENTOR_SF: 6133 if (info->ranges[i].pf != pf) 6134 continue; 6135 if (representor_port < info->ranges[i].sf || 6136 representor_port >= info->ranges[i].sf + count) 6137 continue; 6138 *repr_id = info->ranges[i].id_base + 6139 (representor_port - info->ranges[i].sf); 6140 ret = 0; 6141 goto out; 6142 default: 6143 break; 6144 } 6145 } 6146 out: 6147 free(info); 6148 return ret; 6149 } 6150 6151 static int 6152 eth_dev_handle_port_list(const char *cmd __rte_unused, 6153 const char *params __rte_unused, 6154 struct rte_tel_data *d) 6155 { 6156 int port_id; 6157 6158 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6159 RTE_ETH_FOREACH_DEV(port_id) 6160 rte_tel_data_add_array_int(d, port_id); 6161 return 0; 6162 } 6163 6164 static void 6165 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6166 const char *stat_name) 6167 { 6168 int q; 6169 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6170 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 6171 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6172 rte_tel_data_add_array_u64(q_data, q_stats[q]); 6173 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6174 } 6175 6176 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 6177 6178 static int 6179 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6180 const char *params, 6181 struct rte_tel_data *d) 6182 { 6183 struct rte_eth_stats stats; 6184 int port_id, ret; 6185 6186 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6187 return -1; 6188 6189 port_id = atoi(params); 6190 if (!rte_eth_dev_is_valid_port(port_id)) 6191 return -1; 6192 6193 ret = rte_eth_stats_get(port_id, &stats); 6194 if (ret < 0) 6195 return -1; 6196 6197 rte_tel_data_start_dict(d); 6198 ADD_DICT_STAT(stats, ipackets); 6199 ADD_DICT_STAT(stats, opackets); 6200 ADD_DICT_STAT(stats, ibytes); 6201 ADD_DICT_STAT(stats, obytes); 6202 ADD_DICT_STAT(stats, imissed); 6203 ADD_DICT_STAT(stats, ierrors); 6204 ADD_DICT_STAT(stats, oerrors); 6205 ADD_DICT_STAT(stats, rx_nombuf); 6206 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6207 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6208 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6209 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6210 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6211 6212 return 0; 6213 } 6214 6215 static int 6216 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6217 const char *params, 6218 struct rte_tel_data *d) 6219 { 6220 struct rte_eth_xstat *eth_xstats; 6221 struct rte_eth_xstat_name *xstat_names; 6222 int port_id, num_xstats; 6223 int i, ret; 6224 char *end_param; 6225 6226 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6227 return -1; 6228 6229 port_id = strtoul(params, &end_param, 0); 6230 if (*end_param != '\0') 6231 RTE_ETHDEV_LOG(NOTICE, 6232 "Extra parameters passed to ethdev telemetry command, ignoring"); 6233 if (!rte_eth_dev_is_valid_port(port_id)) 6234 return -1; 6235 6236 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6237 if (num_xstats < 0) 6238 return -1; 6239 6240 /* use one malloc for both names and stats */ 6241 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6242 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6243 if (eth_xstats == NULL) 6244 return -1; 6245 xstat_names = (void *)ð_xstats[num_xstats]; 6246 6247 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6248 if (ret < 0 || ret > num_xstats) { 6249 free(eth_xstats); 6250 return -1; 6251 } 6252 6253 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6254 if (ret < 0 || ret > num_xstats) { 6255 free(eth_xstats); 6256 return -1; 6257 } 6258 6259 rte_tel_data_start_dict(d); 6260 for (i = 0; i < num_xstats; i++) 6261 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 6262 eth_xstats[i].value); 6263 return 0; 6264 } 6265 6266 static int 6267 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6268 const char *params, 6269 struct rte_tel_data *d) 6270 { 6271 static const char *status_str = "status"; 6272 int ret, port_id; 6273 struct rte_eth_link link; 6274 char *end_param; 6275 6276 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6277 return -1; 6278 6279 port_id = strtoul(params, &end_param, 0); 6280 if (*end_param != '\0') 6281 RTE_ETHDEV_LOG(NOTICE, 6282 "Extra parameters passed to ethdev telemetry command, ignoring"); 6283 if (!rte_eth_dev_is_valid_port(port_id)) 6284 return -1; 6285 6286 ret = rte_eth_link_get_nowait(port_id, &link); 6287 if (ret < 0) 6288 return -1; 6289 6290 rte_tel_data_start_dict(d); 6291 if (!link.link_status) { 6292 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6293 return 0; 6294 } 6295 rte_tel_data_add_dict_string(d, status_str, "UP"); 6296 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 6297 rte_tel_data_add_dict_string(d, "duplex", 6298 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6299 "full-duplex" : "half-duplex"); 6300 return 0; 6301 } 6302 6303 static int 6304 eth_dev_handle_port_info(const char *cmd __rte_unused, 6305 const char *params, 6306 struct rte_tel_data *d) 6307 { 6308 struct rte_tel_data *rxq_state, *txq_state; 6309 char mac_addr[RTE_ETHER_ADDR_LEN]; 6310 struct rte_eth_dev *eth_dev; 6311 char *end_param; 6312 int port_id, i; 6313 6314 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6315 return -1; 6316 6317 port_id = strtoul(params, &end_param, 0); 6318 if (*end_param != '\0') 6319 RTE_ETHDEV_LOG(NOTICE, 6320 "Extra parameters passed to ethdev telemetry command, ignoring"); 6321 6322 if (!rte_eth_dev_is_valid_port(port_id)) 6323 return -EINVAL; 6324 6325 eth_dev = &rte_eth_devices[port_id]; 6326 if (!eth_dev) 6327 return -EINVAL; 6328 6329 rxq_state = rte_tel_data_alloc(); 6330 if (!rxq_state) 6331 return -ENOMEM; 6332 6333 txq_state = rte_tel_data_alloc(); 6334 if (!txq_state) { 6335 rte_tel_data_free(rxq_state); 6336 return -ENOMEM; 6337 } 6338 6339 rte_tel_data_start_dict(d); 6340 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6341 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6342 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6343 eth_dev->data->nb_rx_queues); 6344 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6345 eth_dev->data->nb_tx_queues); 6346 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6347 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6348 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 6349 eth_dev->data->min_rx_buf_size); 6350 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 6351 eth_dev->data->rx_mbuf_alloc_failed); 6352 snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", 6353 eth_dev->data->mac_addrs->addr_bytes[0], 6354 eth_dev->data->mac_addrs->addr_bytes[1], 6355 eth_dev->data->mac_addrs->addr_bytes[2], 6356 eth_dev->data->mac_addrs->addr_bytes[3], 6357 eth_dev->data->mac_addrs->addr_bytes[4], 6358 eth_dev->data->mac_addrs->addr_bytes[5]); 6359 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6360 rte_tel_data_add_dict_int(d, "promiscuous", 6361 eth_dev->data->promiscuous); 6362 rte_tel_data_add_dict_int(d, "scattered_rx", 6363 eth_dev->data->scattered_rx); 6364 rte_tel_data_add_dict_int(d, "all_multicast", 6365 eth_dev->data->all_multicast); 6366 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6367 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6368 rte_tel_data_add_dict_int(d, "dev_configured", 6369 eth_dev->data->dev_configured); 6370 6371 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6372 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6373 rte_tel_data_add_array_int(rxq_state, 6374 eth_dev->data->rx_queue_state[i]); 6375 6376 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6377 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6378 rte_tel_data_add_array_int(txq_state, 6379 eth_dev->data->tx_queue_state[i]); 6380 6381 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6382 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6383 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6384 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 6385 rte_tel_data_add_dict_int(d, "rx_offloads", 6386 eth_dev->data->dev_conf.rxmode.offloads); 6387 rte_tel_data_add_dict_int(d, "tx_offloads", 6388 eth_dev->data->dev_conf.txmode.offloads); 6389 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6390 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6391 6392 return 0; 6393 } 6394 6395 int 6396 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, 6397 struct rte_hairpin_peer_info *cur_info, 6398 struct rte_hairpin_peer_info *peer_info, 6399 uint32_t direction) 6400 { 6401 struct rte_eth_dev *dev; 6402 6403 /* Current queue information is not mandatory. */ 6404 if (peer_info == NULL) 6405 return -EINVAL; 6406 6407 /* No need to check the validity again. */ 6408 dev = &rte_eth_devices[peer_port]; 6409 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, 6410 -ENOTSUP); 6411 6412 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, 6413 cur_info, peer_info, direction); 6414 } 6415 6416 int 6417 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, 6418 struct rte_hairpin_peer_info *peer_info, 6419 uint32_t direction) 6420 { 6421 struct rte_eth_dev *dev; 6422 6423 if (peer_info == NULL) 6424 return -EINVAL; 6425 6426 /* No need to check the validity again. */ 6427 dev = &rte_eth_devices[cur_port]; 6428 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, 6429 -ENOTSUP); 6430 6431 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, 6432 peer_info, direction); 6433 } 6434 6435 int 6436 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, 6437 uint32_t direction) 6438 { 6439 struct rte_eth_dev *dev; 6440 6441 /* No need to check the validity again. */ 6442 dev = &rte_eth_devices[cur_port]; 6443 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, 6444 -ENOTSUP); 6445 6446 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, 6447 direction); 6448 } 6449 6450 int 6451 rte_eth_representor_info_get(uint16_t port_id, 6452 struct rte_eth_representor_info *info) 6453 { 6454 struct rte_eth_dev *dev; 6455 6456 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6457 dev = &rte_eth_devices[port_id]; 6458 6459 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 6460 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6461 } 6462 6463 int 6464 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6465 { 6466 struct rte_eth_dev *dev; 6467 6468 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6469 dev = &rte_eth_devices[port_id]; 6470 6471 if (dev->data->dev_configured != 0) { 6472 RTE_ETHDEV_LOG(ERR, 6473 "The port (ID=%"PRIu16") is already configured\n", 6474 port_id); 6475 return -EBUSY; 6476 } 6477 6478 if (features == NULL) { 6479 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6480 return -EINVAL; 6481 } 6482 6483 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 6484 return eth_err(port_id, 6485 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6486 } 6487 6488 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6489 6490 RTE_INIT(ethdev_init_telemetry) 6491 { 6492 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6493 "Returns list of available ethdev ports. Takes no parameters"); 6494 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6495 "Returns the common stats for a port. Parameters: int port_id"); 6496 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6497 "Returns the extended stats for a port. Parameters: int port_id"); 6498 rte_telemetry_register_cmd("/ethdev/link_status", 6499 eth_dev_handle_port_link_status, 6500 "Returns the link status for a port. Parameters: int port_id"); 6501 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6502 "Returns the device info for a port. Parameters: int port_id"); 6503 } 6504