1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/queue.h> 14 15 #include <bus_driver.h> 16 #include <rte_log.h> 17 #include <rte_interrupts.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev_trace.h" 31 #include "rte_ethdev.h" 32 #include "ethdev_driver.h" 33 #include "ethdev_profile.h" 34 #include "ethdev_private.h" 35 #include "sff_telemetry.h" 36 37 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 38 39 /* public fast-path API */ 40 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 41 42 /* spinlock for add/remove Rx callbacks */ 43 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 44 45 /* spinlock for add/remove Tx callbacks */ 46 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 47 48 /* store statistics names and its offset in stats structure */ 49 struct rte_eth_xstats_name_off { 50 char name[RTE_ETH_XSTATS_NAME_SIZE]; 51 unsigned offset; 52 }; 53 54 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 55 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 56 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 57 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 58 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 59 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 60 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 61 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 62 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 63 rx_nombuf)}, 64 }; 65 66 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 67 68 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 69 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 70 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 71 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 72 }; 73 74 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 75 76 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 77 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 78 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 79 }; 80 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 81 82 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 83 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 84 85 static const struct { 86 uint64_t offload; 87 const char *name; 88 } eth_dev_rx_offload_names[] = { 89 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 90 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 91 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 92 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 94 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 95 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 96 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 98 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 99 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 100 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 101 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 102 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 103 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 104 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 105 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 106 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 107 }; 108 109 #undef RTE_RX_OFFLOAD_BIT2STR 110 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 111 112 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 113 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 114 115 static const struct { 116 uint64_t offload; 117 const char *name; 118 } eth_dev_tx_offload_names[] = { 119 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 120 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 121 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 122 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 125 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 126 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 127 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 128 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 129 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 130 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 133 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 134 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 135 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 136 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 137 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 138 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 139 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 140 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 141 }; 142 143 #undef RTE_TX_OFFLOAD_BIT2STR 144 145 static const struct { 146 uint64_t offload; 147 const char *name; 148 } rte_eth_dev_capa_names[] = { 149 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 150 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 151 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 152 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 153 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 154 }; 155 156 enum { 157 STAT_QMAP_TX = 0, 158 STAT_QMAP_RX 159 }; 160 161 int 162 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 163 { 164 int ret; 165 struct rte_devargs devargs; 166 const char *bus_param_key; 167 char *bus_str = NULL; 168 char *cls_str = NULL; 169 int str_size; 170 171 if (iter == NULL) { 172 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 173 return -EINVAL; 174 } 175 176 if (devargs_str == NULL) { 177 RTE_ETHDEV_LOG(ERR, 178 "Cannot initialize iterator from NULL device description string\n"); 179 return -EINVAL; 180 } 181 182 memset(iter, 0, sizeof(*iter)); 183 memset(&devargs, 0, sizeof(devargs)); 184 185 /* 186 * The devargs string may use various syntaxes: 187 * - 0000:08:00.0,representor=[1-3] 188 * - pci:0000:06:00.0,representor=[0,5] 189 * - class=eth,mac=00:11:22:33:44:55 190 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 191 */ 192 193 /* 194 * Handle pure class filter (i.e. without any bus-level argument), 195 * from future new syntax. 196 * rte_devargs_parse() is not yet supporting the new syntax, 197 * that's why this simple case is temporarily parsed here. 198 */ 199 #define iter_anybus_str "class=eth," 200 if (strncmp(devargs_str, iter_anybus_str, 201 strlen(iter_anybus_str)) == 0) { 202 iter->cls_str = devargs_str + strlen(iter_anybus_str); 203 goto end; 204 } 205 206 /* Split bus, device and parameters. */ 207 ret = rte_devargs_parse(&devargs, devargs_str); 208 if (ret != 0) 209 goto error; 210 211 /* 212 * Assume parameters of old syntax can match only at ethdev level. 213 * Extra parameters will be ignored, thanks to "+" prefix. 214 */ 215 str_size = strlen(devargs.args) + 2; 216 cls_str = malloc(str_size); 217 if (cls_str == NULL) { 218 ret = -ENOMEM; 219 goto error; 220 } 221 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 222 if (ret != str_size - 1) { 223 ret = -EINVAL; 224 goto error; 225 } 226 iter->cls_str = cls_str; 227 228 iter->bus = devargs.bus; 229 if (iter->bus->dev_iterate == NULL) { 230 ret = -ENOTSUP; 231 goto error; 232 } 233 234 /* Convert bus args to new syntax for use with new API dev_iterate. */ 235 if ((strcmp(iter->bus->name, "vdev") == 0) || 236 (strcmp(iter->bus->name, "fslmc") == 0) || 237 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 238 bus_param_key = "name"; 239 } else if (strcmp(iter->bus->name, "pci") == 0) { 240 bus_param_key = "addr"; 241 } else { 242 ret = -ENOTSUP; 243 goto error; 244 } 245 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 246 bus_str = malloc(str_size); 247 if (bus_str == NULL) { 248 ret = -ENOMEM; 249 goto error; 250 } 251 ret = snprintf(bus_str, str_size, "%s=%s", 252 bus_param_key, devargs.name); 253 if (ret != str_size - 1) { 254 ret = -EINVAL; 255 goto error; 256 } 257 iter->bus_str = bus_str; 258 259 end: 260 iter->cls = rte_class_find_by_name("eth"); 261 rte_devargs_reset(&devargs); 262 return 0; 263 264 error: 265 if (ret == -ENOTSUP) 266 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 267 iter->bus->name); 268 rte_devargs_reset(&devargs); 269 free(bus_str); 270 free(cls_str); 271 return ret; 272 } 273 274 uint16_t 275 rte_eth_iterator_next(struct rte_dev_iterator *iter) 276 { 277 if (iter == NULL) { 278 RTE_ETHDEV_LOG(ERR, 279 "Cannot get next device from NULL iterator\n"); 280 return RTE_MAX_ETHPORTS; 281 } 282 283 if (iter->cls == NULL) /* invalid ethdev iterator */ 284 return RTE_MAX_ETHPORTS; 285 286 do { /* loop to try all matching rte_device */ 287 /* If not pure ethdev filter and */ 288 if (iter->bus != NULL && 289 /* not in middle of rte_eth_dev iteration, */ 290 iter->class_device == NULL) { 291 /* get next rte_device to try. */ 292 iter->device = iter->bus->dev_iterate( 293 iter->device, iter->bus_str, iter); 294 if (iter->device == NULL) 295 break; /* no more rte_device candidate */ 296 } 297 /* A device is matching bus part, need to check ethdev part. */ 298 iter->class_device = iter->cls->dev_iterate( 299 iter->class_device, iter->cls_str, iter); 300 if (iter->class_device != NULL) 301 return eth_dev_to_id(iter->class_device); /* match */ 302 } while (iter->bus != NULL); /* need to try next rte_device */ 303 304 /* No more ethdev port to iterate. */ 305 rte_eth_iterator_cleanup(iter); 306 return RTE_MAX_ETHPORTS; 307 } 308 309 void 310 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 311 { 312 if (iter == NULL) { 313 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 314 return; 315 } 316 317 if (iter->bus_str == NULL) 318 return; /* nothing to free in pure class filter */ 319 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 320 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 321 memset(iter, 0, sizeof(*iter)); 322 } 323 324 uint16_t 325 rte_eth_find_next(uint16_t port_id) 326 { 327 while (port_id < RTE_MAX_ETHPORTS && 328 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 329 port_id++; 330 331 if (port_id >= RTE_MAX_ETHPORTS) 332 return RTE_MAX_ETHPORTS; 333 334 return port_id; 335 } 336 337 /* 338 * Macro to iterate over all valid ports for internal usage. 339 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 340 */ 341 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 342 for (port_id = rte_eth_find_next(0); \ 343 port_id < RTE_MAX_ETHPORTS; \ 344 port_id = rte_eth_find_next(port_id + 1)) 345 346 uint16_t 347 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 348 { 349 port_id = rte_eth_find_next(port_id); 350 while (port_id < RTE_MAX_ETHPORTS && 351 rte_eth_devices[port_id].device != parent) 352 port_id = rte_eth_find_next(port_id + 1); 353 354 return port_id; 355 } 356 357 uint16_t 358 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 359 { 360 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 361 return rte_eth_find_next_of(port_id, 362 rte_eth_devices[ref_port_id].device); 363 } 364 365 static bool 366 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 367 { 368 return ethdev->data->name[0] != '\0'; 369 } 370 371 int 372 rte_eth_dev_is_valid_port(uint16_t port_id) 373 { 374 if (port_id >= RTE_MAX_ETHPORTS || 375 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 376 return 0; 377 else 378 return 1; 379 } 380 381 static int 382 eth_is_valid_owner_id(uint64_t owner_id) 383 { 384 if (owner_id == RTE_ETH_DEV_NO_OWNER || 385 eth_dev_shared_data->next_owner_id <= owner_id) 386 return 0; 387 return 1; 388 } 389 390 uint64_t 391 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 392 { 393 port_id = rte_eth_find_next(port_id); 394 while (port_id < RTE_MAX_ETHPORTS && 395 rte_eth_devices[port_id].data->owner.id != owner_id) 396 port_id = rte_eth_find_next(port_id + 1); 397 398 return port_id; 399 } 400 401 int 402 rte_eth_dev_owner_new(uint64_t *owner_id) 403 { 404 if (owner_id == NULL) { 405 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 406 return -EINVAL; 407 } 408 409 eth_dev_shared_data_prepare(); 410 411 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 412 413 *owner_id = eth_dev_shared_data->next_owner_id++; 414 415 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 416 return 0; 417 } 418 419 static int 420 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 421 const struct rte_eth_dev_owner *new_owner) 422 { 423 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 424 struct rte_eth_dev_owner *port_owner; 425 426 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 427 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 428 port_id); 429 return -ENODEV; 430 } 431 432 if (new_owner == NULL) { 433 RTE_ETHDEV_LOG(ERR, 434 "Cannot set ethdev port %u owner from NULL owner\n", 435 port_id); 436 return -EINVAL; 437 } 438 439 if (!eth_is_valid_owner_id(new_owner->id) && 440 !eth_is_valid_owner_id(old_owner_id)) { 441 RTE_ETHDEV_LOG(ERR, 442 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 443 old_owner_id, new_owner->id); 444 return -EINVAL; 445 } 446 447 port_owner = &rte_eth_devices[port_id].data->owner; 448 if (port_owner->id != old_owner_id) { 449 RTE_ETHDEV_LOG(ERR, 450 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 451 port_id, port_owner->name, port_owner->id); 452 return -EPERM; 453 } 454 455 /* can not truncate (same structure) */ 456 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 457 458 port_owner->id = new_owner->id; 459 460 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 461 port_id, new_owner->name, new_owner->id); 462 463 return 0; 464 } 465 466 int 467 rte_eth_dev_owner_set(const uint16_t port_id, 468 const struct rte_eth_dev_owner *owner) 469 { 470 int ret; 471 472 eth_dev_shared_data_prepare(); 473 474 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 475 476 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 477 478 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 479 return ret; 480 } 481 482 int 483 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 484 { 485 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 486 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 487 int ret; 488 489 eth_dev_shared_data_prepare(); 490 491 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 492 493 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 494 495 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 496 return ret; 497 } 498 499 int 500 rte_eth_dev_owner_delete(const uint64_t owner_id) 501 { 502 uint16_t port_id; 503 int ret = 0; 504 505 eth_dev_shared_data_prepare(); 506 507 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 508 509 if (eth_is_valid_owner_id(owner_id)) { 510 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 511 struct rte_eth_dev_data *data = 512 rte_eth_devices[port_id].data; 513 if (data != NULL && data->owner.id == owner_id) 514 memset(&data->owner, 0, 515 sizeof(struct rte_eth_dev_owner)); 516 } 517 RTE_ETHDEV_LOG(NOTICE, 518 "All port owners owned by %016"PRIx64" identifier have removed\n", 519 owner_id); 520 } else { 521 RTE_ETHDEV_LOG(ERR, 522 "Invalid owner ID=%016"PRIx64"\n", 523 owner_id); 524 ret = -EINVAL; 525 } 526 527 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 528 529 return ret; 530 } 531 532 int 533 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 534 { 535 struct rte_eth_dev *ethdev; 536 537 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 538 ethdev = &rte_eth_devices[port_id]; 539 540 if (!eth_dev_is_allocated(ethdev)) { 541 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 542 port_id); 543 return -ENODEV; 544 } 545 546 if (owner == NULL) { 547 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 548 port_id); 549 return -EINVAL; 550 } 551 552 eth_dev_shared_data_prepare(); 553 554 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 555 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 556 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 557 558 return 0; 559 } 560 561 int 562 rte_eth_dev_socket_id(uint16_t port_id) 563 { 564 int socket_id = SOCKET_ID_ANY; 565 566 if (!rte_eth_dev_is_valid_port(port_id)) { 567 rte_errno = EINVAL; 568 } else { 569 socket_id = rte_eth_devices[port_id].data->numa_node; 570 if (socket_id == SOCKET_ID_ANY) 571 rte_errno = 0; 572 } 573 return socket_id; 574 } 575 576 void * 577 rte_eth_dev_get_sec_ctx(uint16_t port_id) 578 { 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 580 return rte_eth_devices[port_id].security_ctx; 581 } 582 583 uint16_t 584 rte_eth_dev_count_avail(void) 585 { 586 uint16_t p; 587 uint16_t count; 588 589 count = 0; 590 591 RTE_ETH_FOREACH_DEV(p) 592 count++; 593 594 return count; 595 } 596 597 uint16_t 598 rte_eth_dev_count_total(void) 599 { 600 uint16_t port, count = 0; 601 602 RTE_ETH_FOREACH_VALID_DEV(port) 603 count++; 604 605 return count; 606 } 607 608 int 609 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 610 { 611 char *tmp; 612 613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 614 615 if (name == NULL) { 616 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 617 port_id); 618 return -EINVAL; 619 } 620 621 /* shouldn't check 'rte_eth_devices[i].data', 622 * because it might be overwritten by VDEV PMD */ 623 tmp = eth_dev_shared_data->data[port_id].name; 624 strcpy(name, tmp); 625 return 0; 626 } 627 628 int 629 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 630 { 631 uint16_t pid; 632 633 if (name == NULL) { 634 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 635 return -EINVAL; 636 } 637 638 if (port_id == NULL) { 639 RTE_ETHDEV_LOG(ERR, 640 "Cannot get port ID to NULL for %s\n", name); 641 return -EINVAL; 642 } 643 644 RTE_ETH_FOREACH_VALID_DEV(pid) 645 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 646 *port_id = pid; 647 return 0; 648 } 649 650 return -ENODEV; 651 } 652 653 int 654 eth_err(uint16_t port_id, int ret) 655 { 656 if (ret == 0) 657 return 0; 658 if (rte_eth_dev_is_removed(port_id)) 659 return -EIO; 660 return ret; 661 } 662 663 static int 664 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 665 { 666 uint16_t port_id; 667 668 if (rx_queue_id >= dev->data->nb_rx_queues) { 669 port_id = dev->data->port_id; 670 RTE_ETHDEV_LOG(ERR, 671 "Invalid Rx queue_id=%u of device with port_id=%u\n", 672 rx_queue_id, port_id); 673 return -EINVAL; 674 } 675 676 if (dev->data->rx_queues[rx_queue_id] == NULL) { 677 port_id = dev->data->port_id; 678 RTE_ETHDEV_LOG(ERR, 679 "Queue %u of device with port_id=%u has not been setup\n", 680 rx_queue_id, port_id); 681 return -EINVAL; 682 } 683 684 return 0; 685 } 686 687 static int 688 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 689 { 690 uint16_t port_id; 691 692 if (tx_queue_id >= dev->data->nb_tx_queues) { 693 port_id = dev->data->port_id; 694 RTE_ETHDEV_LOG(ERR, 695 "Invalid Tx queue_id=%u of device with port_id=%u\n", 696 tx_queue_id, port_id); 697 return -EINVAL; 698 } 699 700 if (dev->data->tx_queues[tx_queue_id] == NULL) { 701 port_id = dev->data->port_id; 702 RTE_ETHDEV_LOG(ERR, 703 "Queue %u of device with port_id=%u has not been setup\n", 704 tx_queue_id, port_id); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 713 { 714 struct rte_eth_dev *dev; 715 int ret; 716 717 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 718 dev = &rte_eth_devices[port_id]; 719 720 if (!dev->data->dev_started) { 721 RTE_ETHDEV_LOG(ERR, 722 "Port %u must be started before start any queue\n", 723 port_id); 724 return -EINVAL; 725 } 726 727 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 728 if (ret != 0) 729 return ret; 730 731 if (*dev->dev_ops->rx_queue_start == NULL) 732 return -ENOTSUP; 733 734 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 735 RTE_ETHDEV_LOG(INFO, 736 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 737 rx_queue_id, port_id); 738 return -EINVAL; 739 } 740 741 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 742 RTE_ETHDEV_LOG(INFO, 743 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 744 rx_queue_id, port_id); 745 return 0; 746 } 747 748 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 749 } 750 751 int 752 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 753 { 754 struct rte_eth_dev *dev; 755 int ret; 756 757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 758 dev = &rte_eth_devices[port_id]; 759 760 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 761 if (ret != 0) 762 return ret; 763 764 if (*dev->dev_ops->rx_queue_stop == NULL) 765 return -ENOTSUP; 766 767 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 768 RTE_ETHDEV_LOG(INFO, 769 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 770 rx_queue_id, port_id); 771 return -EINVAL; 772 } 773 774 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 775 RTE_ETHDEV_LOG(INFO, 776 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 777 rx_queue_id, port_id); 778 return 0; 779 } 780 781 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 782 } 783 784 int 785 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 786 { 787 struct rte_eth_dev *dev; 788 int ret; 789 790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 791 dev = &rte_eth_devices[port_id]; 792 793 if (!dev->data->dev_started) { 794 RTE_ETHDEV_LOG(ERR, 795 "Port %u must be started before start any queue\n", 796 port_id); 797 return -EINVAL; 798 } 799 800 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 801 if (ret != 0) 802 return ret; 803 804 if (*dev->dev_ops->tx_queue_start == NULL) 805 return -ENOTSUP; 806 807 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 808 RTE_ETHDEV_LOG(INFO, 809 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 810 tx_queue_id, port_id); 811 return -EINVAL; 812 } 813 814 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 815 RTE_ETHDEV_LOG(INFO, 816 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 817 tx_queue_id, port_id); 818 return 0; 819 } 820 821 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 822 } 823 824 int 825 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 826 { 827 struct rte_eth_dev *dev; 828 int ret; 829 830 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 831 dev = &rte_eth_devices[port_id]; 832 833 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 834 if (ret != 0) 835 return ret; 836 837 if (*dev->dev_ops->tx_queue_stop == NULL) 838 return -ENOTSUP; 839 840 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 841 RTE_ETHDEV_LOG(INFO, 842 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 843 tx_queue_id, port_id); 844 return -EINVAL; 845 } 846 847 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 848 RTE_ETHDEV_LOG(INFO, 849 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 850 tx_queue_id, port_id); 851 return 0; 852 } 853 854 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 855 } 856 857 uint32_t 858 rte_eth_speed_bitflag(uint32_t speed, int duplex) 859 { 860 switch (speed) { 861 case RTE_ETH_SPEED_NUM_10M: 862 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 863 case RTE_ETH_SPEED_NUM_100M: 864 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 865 case RTE_ETH_SPEED_NUM_1G: 866 return RTE_ETH_LINK_SPEED_1G; 867 case RTE_ETH_SPEED_NUM_2_5G: 868 return RTE_ETH_LINK_SPEED_2_5G; 869 case RTE_ETH_SPEED_NUM_5G: 870 return RTE_ETH_LINK_SPEED_5G; 871 case RTE_ETH_SPEED_NUM_10G: 872 return RTE_ETH_LINK_SPEED_10G; 873 case RTE_ETH_SPEED_NUM_20G: 874 return RTE_ETH_LINK_SPEED_20G; 875 case RTE_ETH_SPEED_NUM_25G: 876 return RTE_ETH_LINK_SPEED_25G; 877 case RTE_ETH_SPEED_NUM_40G: 878 return RTE_ETH_LINK_SPEED_40G; 879 case RTE_ETH_SPEED_NUM_50G: 880 return RTE_ETH_LINK_SPEED_50G; 881 case RTE_ETH_SPEED_NUM_56G: 882 return RTE_ETH_LINK_SPEED_56G; 883 case RTE_ETH_SPEED_NUM_100G: 884 return RTE_ETH_LINK_SPEED_100G; 885 case RTE_ETH_SPEED_NUM_200G: 886 return RTE_ETH_LINK_SPEED_200G; 887 default: 888 return 0; 889 } 890 } 891 892 const char * 893 rte_eth_dev_rx_offload_name(uint64_t offload) 894 { 895 const char *name = "UNKNOWN"; 896 unsigned int i; 897 898 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 899 if (offload == eth_dev_rx_offload_names[i].offload) { 900 name = eth_dev_rx_offload_names[i].name; 901 break; 902 } 903 } 904 905 return name; 906 } 907 908 const char * 909 rte_eth_dev_tx_offload_name(uint64_t offload) 910 { 911 const char *name = "UNKNOWN"; 912 unsigned int i; 913 914 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 915 if (offload == eth_dev_tx_offload_names[i].offload) { 916 name = eth_dev_tx_offload_names[i].name; 917 break; 918 } 919 } 920 921 return name; 922 } 923 924 const char * 925 rte_eth_dev_capability_name(uint64_t capability) 926 { 927 const char *name = "UNKNOWN"; 928 unsigned int i; 929 930 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 931 if (capability == rte_eth_dev_capa_names[i].offload) { 932 name = rte_eth_dev_capa_names[i].name; 933 break; 934 } 935 } 936 937 return name; 938 } 939 940 static inline int 941 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 942 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 943 { 944 int ret = 0; 945 946 if (dev_info_size == 0) { 947 if (config_size != max_rx_pkt_len) { 948 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 949 " %u != %u is not allowed\n", 950 port_id, config_size, max_rx_pkt_len); 951 ret = -EINVAL; 952 } 953 } else if (config_size > dev_info_size) { 954 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 955 "> max allowed value %u\n", port_id, config_size, 956 dev_info_size); 957 ret = -EINVAL; 958 } else if (config_size < RTE_ETHER_MIN_LEN) { 959 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 960 "< min allowed value %u\n", port_id, config_size, 961 (unsigned int)RTE_ETHER_MIN_LEN); 962 ret = -EINVAL; 963 } 964 return ret; 965 } 966 967 /* 968 * Validate offloads that are requested through rte_eth_dev_configure against 969 * the offloads successfully set by the Ethernet device. 970 * 971 * @param port_id 972 * The port identifier of the Ethernet device. 973 * @param req_offloads 974 * The offloads that have been requested through `rte_eth_dev_configure`. 975 * @param set_offloads 976 * The offloads successfully set by the Ethernet device. 977 * @param offload_type 978 * The offload type i.e. Rx/Tx string. 979 * @param offload_name 980 * The function that prints the offload name. 981 * @return 982 * - (0) if validation successful. 983 * - (-EINVAL) if requested offload has been silently disabled. 984 * 985 */ 986 static int 987 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 988 uint64_t set_offloads, const char *offload_type, 989 const char *(*offload_name)(uint64_t)) 990 { 991 uint64_t offloads_diff = req_offloads ^ set_offloads; 992 uint64_t offload; 993 int ret = 0; 994 995 while (offloads_diff != 0) { 996 /* Check if any offload is requested but not enabled. */ 997 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 998 if (offload & req_offloads) { 999 RTE_ETHDEV_LOG(ERR, 1000 "Port %u failed to enable %s offload %s\n", 1001 port_id, offload_type, offload_name(offload)); 1002 ret = -EINVAL; 1003 } 1004 1005 /* Check if offload couldn't be disabled. */ 1006 if (offload & set_offloads) { 1007 RTE_ETHDEV_LOG(DEBUG, 1008 "Port %u %s offload %s is not requested but enabled\n", 1009 port_id, offload_type, offload_name(offload)); 1010 } 1011 1012 offloads_diff &= ~offload; 1013 } 1014 1015 return ret; 1016 } 1017 1018 static uint32_t 1019 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1020 { 1021 uint32_t overhead_len; 1022 1023 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1024 overhead_len = max_rx_pktlen - max_mtu; 1025 else 1026 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1027 1028 return overhead_len; 1029 } 1030 1031 /* rte_eth_dev_info_get() should be called prior to this function */ 1032 static int 1033 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1034 uint16_t mtu) 1035 { 1036 uint32_t overhead_len; 1037 uint32_t frame_size; 1038 1039 if (mtu < dev_info->min_mtu) { 1040 RTE_ETHDEV_LOG(ERR, 1041 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1042 mtu, dev_info->min_mtu, port_id); 1043 return -EINVAL; 1044 } 1045 if (mtu > dev_info->max_mtu) { 1046 RTE_ETHDEV_LOG(ERR, 1047 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1048 mtu, dev_info->max_mtu, port_id); 1049 return -EINVAL; 1050 } 1051 1052 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1053 dev_info->max_mtu); 1054 frame_size = mtu + overhead_len; 1055 if (frame_size < RTE_ETHER_MIN_LEN) { 1056 RTE_ETHDEV_LOG(ERR, 1057 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1058 frame_size, RTE_ETHER_MIN_LEN, port_id); 1059 return -EINVAL; 1060 } 1061 1062 if (frame_size > dev_info->max_rx_pktlen) { 1063 RTE_ETHDEV_LOG(ERR, 1064 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1065 frame_size, dev_info->max_rx_pktlen, port_id); 1066 return -EINVAL; 1067 } 1068 1069 return 0; 1070 } 1071 1072 int 1073 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1074 const struct rte_eth_conf *dev_conf) 1075 { 1076 struct rte_eth_dev *dev; 1077 struct rte_eth_dev_info dev_info; 1078 struct rte_eth_conf orig_conf; 1079 int diag; 1080 int ret; 1081 uint16_t old_mtu; 1082 1083 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1084 dev = &rte_eth_devices[port_id]; 1085 1086 if (dev_conf == NULL) { 1087 RTE_ETHDEV_LOG(ERR, 1088 "Cannot configure ethdev port %u from NULL config\n", 1089 port_id); 1090 return -EINVAL; 1091 } 1092 1093 if (*dev->dev_ops->dev_configure == NULL) 1094 return -ENOTSUP; 1095 1096 if (dev->data->dev_started) { 1097 RTE_ETHDEV_LOG(ERR, 1098 "Port %u must be stopped to allow configuration\n", 1099 port_id); 1100 return -EBUSY; 1101 } 1102 1103 /* 1104 * Ensure that "dev_configured" is always 0 each time prepare to do 1105 * dev_configure() to avoid any non-anticipated behaviour. 1106 * And set to 1 when dev_configure() is executed successfully. 1107 */ 1108 dev->data->dev_configured = 0; 1109 1110 /* Store original config, as rollback required on failure */ 1111 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1112 1113 /* 1114 * Copy the dev_conf parameter into the dev structure. 1115 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1116 */ 1117 if (dev_conf != &dev->data->dev_conf) 1118 memcpy(&dev->data->dev_conf, dev_conf, 1119 sizeof(dev->data->dev_conf)); 1120 1121 /* Backup mtu for rollback */ 1122 old_mtu = dev->data->mtu; 1123 1124 ret = rte_eth_dev_info_get(port_id, &dev_info); 1125 if (ret != 0) 1126 goto rollback; 1127 1128 /* If number of queues specified by application for both Rx and Tx is 1129 * zero, use driver preferred values. This cannot be done individually 1130 * as it is valid for either Tx or Rx (but not both) to be zero. 1131 * If driver does not provide any preferred valued, fall back on 1132 * EAL defaults. 1133 */ 1134 if (nb_rx_q == 0 && nb_tx_q == 0) { 1135 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1136 if (nb_rx_q == 0) 1137 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1138 nb_tx_q = dev_info.default_txportconf.nb_queues; 1139 if (nb_tx_q == 0) 1140 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1141 } 1142 1143 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1144 RTE_ETHDEV_LOG(ERR, 1145 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1146 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1147 ret = -EINVAL; 1148 goto rollback; 1149 } 1150 1151 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1152 RTE_ETHDEV_LOG(ERR, 1153 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1154 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1155 ret = -EINVAL; 1156 goto rollback; 1157 } 1158 1159 /* 1160 * Check that the numbers of Rx and Tx queues are not greater 1161 * than the maximum number of Rx and Tx queues supported by the 1162 * configured device. 1163 */ 1164 if (nb_rx_q > dev_info.max_rx_queues) { 1165 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1166 port_id, nb_rx_q, dev_info.max_rx_queues); 1167 ret = -EINVAL; 1168 goto rollback; 1169 } 1170 1171 if (nb_tx_q > dev_info.max_tx_queues) { 1172 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1173 port_id, nb_tx_q, dev_info.max_tx_queues); 1174 ret = -EINVAL; 1175 goto rollback; 1176 } 1177 1178 /* Check that the device supports requested interrupts */ 1179 if ((dev_conf->intr_conf.lsc == 1) && 1180 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1181 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1182 dev->device->driver->name); 1183 ret = -EINVAL; 1184 goto rollback; 1185 } 1186 if ((dev_conf->intr_conf.rmv == 1) && 1187 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1188 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1189 dev->device->driver->name); 1190 ret = -EINVAL; 1191 goto rollback; 1192 } 1193 1194 if (dev_conf->rxmode.mtu == 0) 1195 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1196 1197 ret = eth_dev_validate_mtu(port_id, &dev_info, 1198 dev->data->dev_conf.rxmode.mtu); 1199 if (ret != 0) 1200 goto rollback; 1201 1202 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1203 1204 /* 1205 * If LRO is enabled, check that the maximum aggregated packet 1206 * size is supported by the configured device. 1207 */ 1208 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1209 uint32_t max_rx_pktlen; 1210 uint32_t overhead_len; 1211 1212 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1213 dev_info.max_mtu); 1214 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1215 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1216 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1217 ret = eth_dev_check_lro_pkt_size(port_id, 1218 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1219 max_rx_pktlen, 1220 dev_info.max_lro_pkt_size); 1221 if (ret != 0) 1222 goto rollback; 1223 } 1224 1225 /* Any requested offloading must be within its device capabilities */ 1226 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1227 dev_conf->rxmode.offloads) { 1228 RTE_ETHDEV_LOG(ERR, 1229 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1230 "capabilities 0x%"PRIx64" in %s()\n", 1231 port_id, dev_conf->rxmode.offloads, 1232 dev_info.rx_offload_capa, 1233 __func__); 1234 ret = -EINVAL; 1235 goto rollback; 1236 } 1237 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1238 dev_conf->txmode.offloads) { 1239 RTE_ETHDEV_LOG(ERR, 1240 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1241 "capabilities 0x%"PRIx64" in %s()\n", 1242 port_id, dev_conf->txmode.offloads, 1243 dev_info.tx_offload_capa, 1244 __func__); 1245 ret = -EINVAL; 1246 goto rollback; 1247 } 1248 1249 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1250 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1251 1252 /* Check that device supports requested rss hash functions. */ 1253 if ((dev_info.flow_type_rss_offloads | 1254 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1255 dev_info.flow_type_rss_offloads) { 1256 RTE_ETHDEV_LOG(ERR, 1257 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1258 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1259 dev_info.flow_type_rss_offloads); 1260 ret = -EINVAL; 1261 goto rollback; 1262 } 1263 1264 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1265 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1266 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1267 RTE_ETHDEV_LOG(ERR, 1268 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1269 port_id, 1270 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1271 ret = -EINVAL; 1272 goto rollback; 1273 } 1274 1275 /* 1276 * Setup new number of Rx/Tx queues and reconfigure device. 1277 */ 1278 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1279 if (diag != 0) { 1280 RTE_ETHDEV_LOG(ERR, 1281 "Port%u eth_dev_rx_queue_config = %d\n", 1282 port_id, diag); 1283 ret = diag; 1284 goto rollback; 1285 } 1286 1287 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1288 if (diag != 0) { 1289 RTE_ETHDEV_LOG(ERR, 1290 "Port%u eth_dev_tx_queue_config = %d\n", 1291 port_id, diag); 1292 eth_dev_rx_queue_config(dev, 0); 1293 ret = diag; 1294 goto rollback; 1295 } 1296 1297 diag = (*dev->dev_ops->dev_configure)(dev); 1298 if (diag != 0) { 1299 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1300 port_id, diag); 1301 ret = eth_err(port_id, diag); 1302 goto reset_queues; 1303 } 1304 1305 /* Initialize Rx profiling if enabled at compilation time. */ 1306 diag = __rte_eth_dev_profile_init(port_id, dev); 1307 if (diag != 0) { 1308 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1309 port_id, diag); 1310 ret = eth_err(port_id, diag); 1311 goto reset_queues; 1312 } 1313 1314 /* Validate Rx offloads. */ 1315 diag = eth_dev_validate_offloads(port_id, 1316 dev_conf->rxmode.offloads, 1317 dev->data->dev_conf.rxmode.offloads, "Rx", 1318 rte_eth_dev_rx_offload_name); 1319 if (diag != 0) { 1320 ret = diag; 1321 goto reset_queues; 1322 } 1323 1324 /* Validate Tx offloads. */ 1325 diag = eth_dev_validate_offloads(port_id, 1326 dev_conf->txmode.offloads, 1327 dev->data->dev_conf.txmode.offloads, "Tx", 1328 rte_eth_dev_tx_offload_name); 1329 if (diag != 0) { 1330 ret = diag; 1331 goto reset_queues; 1332 } 1333 1334 dev->data->dev_configured = 1; 1335 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1336 return 0; 1337 reset_queues: 1338 eth_dev_rx_queue_config(dev, 0); 1339 eth_dev_tx_queue_config(dev, 0); 1340 rollback: 1341 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1342 if (old_mtu != dev->data->mtu) 1343 dev->data->mtu = old_mtu; 1344 1345 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1346 return ret; 1347 } 1348 1349 static void 1350 eth_dev_mac_restore(struct rte_eth_dev *dev, 1351 struct rte_eth_dev_info *dev_info) 1352 { 1353 struct rte_ether_addr *addr; 1354 uint16_t i; 1355 uint32_t pool = 0; 1356 uint64_t pool_mask; 1357 1358 /* replay MAC address configuration including default MAC */ 1359 addr = &dev->data->mac_addrs[0]; 1360 if (*dev->dev_ops->mac_addr_set != NULL) 1361 (*dev->dev_ops->mac_addr_set)(dev, addr); 1362 else if (*dev->dev_ops->mac_addr_add != NULL) 1363 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1364 1365 if (*dev->dev_ops->mac_addr_add != NULL) { 1366 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1367 addr = &dev->data->mac_addrs[i]; 1368 1369 /* skip zero address */ 1370 if (rte_is_zero_ether_addr(addr)) 1371 continue; 1372 1373 pool = 0; 1374 pool_mask = dev->data->mac_pool_sel[i]; 1375 1376 do { 1377 if (pool_mask & UINT64_C(1)) 1378 (*dev->dev_ops->mac_addr_add)(dev, 1379 addr, i, pool); 1380 pool_mask >>= 1; 1381 pool++; 1382 } while (pool_mask); 1383 } 1384 } 1385 } 1386 1387 static int 1388 eth_dev_config_restore(struct rte_eth_dev *dev, 1389 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1390 { 1391 int ret; 1392 1393 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1394 eth_dev_mac_restore(dev, dev_info); 1395 1396 /* replay promiscuous configuration */ 1397 /* 1398 * use callbacks directly since we don't need port_id check and 1399 * would like to bypass the same value set 1400 */ 1401 if (rte_eth_promiscuous_get(port_id) == 1 && 1402 *dev->dev_ops->promiscuous_enable != NULL) { 1403 ret = eth_err(port_id, 1404 (*dev->dev_ops->promiscuous_enable)(dev)); 1405 if (ret != 0 && ret != -ENOTSUP) { 1406 RTE_ETHDEV_LOG(ERR, 1407 "Failed to enable promiscuous mode for device (port %u): %s\n", 1408 port_id, rte_strerror(-ret)); 1409 return ret; 1410 } 1411 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1412 *dev->dev_ops->promiscuous_disable != NULL) { 1413 ret = eth_err(port_id, 1414 (*dev->dev_ops->promiscuous_disable)(dev)); 1415 if (ret != 0 && ret != -ENOTSUP) { 1416 RTE_ETHDEV_LOG(ERR, 1417 "Failed to disable promiscuous mode for device (port %u): %s\n", 1418 port_id, rte_strerror(-ret)); 1419 return ret; 1420 } 1421 } 1422 1423 /* replay all multicast configuration */ 1424 /* 1425 * use callbacks directly since we don't need port_id check and 1426 * would like to bypass the same value set 1427 */ 1428 if (rte_eth_allmulticast_get(port_id) == 1 && 1429 *dev->dev_ops->allmulticast_enable != NULL) { 1430 ret = eth_err(port_id, 1431 (*dev->dev_ops->allmulticast_enable)(dev)); 1432 if (ret != 0 && ret != -ENOTSUP) { 1433 RTE_ETHDEV_LOG(ERR, 1434 "Failed to enable allmulticast mode for device (port %u): %s\n", 1435 port_id, rte_strerror(-ret)); 1436 return ret; 1437 } 1438 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1439 *dev->dev_ops->allmulticast_disable != NULL) { 1440 ret = eth_err(port_id, 1441 (*dev->dev_ops->allmulticast_disable)(dev)); 1442 if (ret != 0 && ret != -ENOTSUP) { 1443 RTE_ETHDEV_LOG(ERR, 1444 "Failed to disable allmulticast mode for device (port %u): %s\n", 1445 port_id, rte_strerror(-ret)); 1446 return ret; 1447 } 1448 } 1449 1450 return 0; 1451 } 1452 1453 int 1454 rte_eth_dev_start(uint16_t port_id) 1455 { 1456 struct rte_eth_dev *dev; 1457 struct rte_eth_dev_info dev_info; 1458 int diag; 1459 int ret, ret_stop; 1460 1461 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1462 dev = &rte_eth_devices[port_id]; 1463 1464 if (*dev->dev_ops->dev_start == NULL) 1465 return -ENOTSUP; 1466 1467 if (dev->data->dev_configured == 0) { 1468 RTE_ETHDEV_LOG(INFO, 1469 "Device with port_id=%"PRIu16" is not configured.\n", 1470 port_id); 1471 return -EINVAL; 1472 } 1473 1474 if (dev->data->dev_started != 0) { 1475 RTE_ETHDEV_LOG(INFO, 1476 "Device with port_id=%"PRIu16" already started\n", 1477 port_id); 1478 return 0; 1479 } 1480 1481 ret = rte_eth_dev_info_get(port_id, &dev_info); 1482 if (ret != 0) 1483 return ret; 1484 1485 /* Lets restore MAC now if device does not support live change */ 1486 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1487 eth_dev_mac_restore(dev, &dev_info); 1488 1489 diag = (*dev->dev_ops->dev_start)(dev); 1490 if (diag == 0) 1491 dev->data->dev_started = 1; 1492 else 1493 return eth_err(port_id, diag); 1494 1495 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1496 if (ret != 0) { 1497 RTE_ETHDEV_LOG(ERR, 1498 "Error during restoring configuration for device (port %u): %s\n", 1499 port_id, rte_strerror(-ret)); 1500 ret_stop = rte_eth_dev_stop(port_id); 1501 if (ret_stop != 0) { 1502 RTE_ETHDEV_LOG(ERR, 1503 "Failed to stop device (port %u): %s\n", 1504 port_id, rte_strerror(-ret_stop)); 1505 } 1506 1507 return ret; 1508 } 1509 1510 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1511 if (*dev->dev_ops->link_update == NULL) 1512 return -ENOTSUP; 1513 (*dev->dev_ops->link_update)(dev, 0); 1514 } 1515 1516 /* expose selection of PMD fast-path functions */ 1517 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1518 1519 rte_ethdev_trace_start(port_id); 1520 return 0; 1521 } 1522 1523 int 1524 rte_eth_dev_stop(uint16_t port_id) 1525 { 1526 struct rte_eth_dev *dev; 1527 int ret; 1528 1529 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1530 dev = &rte_eth_devices[port_id]; 1531 1532 if (*dev->dev_ops->dev_stop == NULL) 1533 return -ENOTSUP; 1534 1535 if (dev->data->dev_started == 0) { 1536 RTE_ETHDEV_LOG(INFO, 1537 "Device with port_id=%"PRIu16" already stopped\n", 1538 port_id); 1539 return 0; 1540 } 1541 1542 /* point fast-path functions to dummy ones */ 1543 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1544 1545 ret = (*dev->dev_ops->dev_stop)(dev); 1546 if (ret == 0) 1547 dev->data->dev_started = 0; 1548 rte_ethdev_trace_stop(port_id, ret); 1549 1550 return ret; 1551 } 1552 1553 int 1554 rte_eth_dev_set_link_up(uint16_t port_id) 1555 { 1556 struct rte_eth_dev *dev; 1557 1558 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1559 dev = &rte_eth_devices[port_id]; 1560 1561 if (*dev->dev_ops->dev_set_link_up == NULL) 1562 return -ENOTSUP; 1563 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1564 } 1565 1566 int 1567 rte_eth_dev_set_link_down(uint16_t port_id) 1568 { 1569 struct rte_eth_dev *dev; 1570 1571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1572 dev = &rte_eth_devices[port_id]; 1573 1574 if (*dev->dev_ops->dev_set_link_down == NULL) 1575 return -ENOTSUP; 1576 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1577 } 1578 1579 int 1580 rte_eth_dev_close(uint16_t port_id) 1581 { 1582 struct rte_eth_dev *dev; 1583 int firsterr, binerr; 1584 int *lasterr = &firsterr; 1585 1586 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1587 dev = &rte_eth_devices[port_id]; 1588 1589 /* 1590 * Secondary process needs to close device to release process private 1591 * resources. But secondary process should not be obliged to wait 1592 * for device stop before closing ethdev. 1593 */ 1594 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1595 dev->data->dev_started) { 1596 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1597 port_id); 1598 return -EINVAL; 1599 } 1600 1601 if (*dev->dev_ops->dev_close == NULL) 1602 return -ENOTSUP; 1603 *lasterr = (*dev->dev_ops->dev_close)(dev); 1604 if (*lasterr != 0) 1605 lasterr = &binerr; 1606 1607 rte_ethdev_trace_close(port_id); 1608 *lasterr = rte_eth_dev_release_port(dev); 1609 1610 return firsterr; 1611 } 1612 1613 int 1614 rte_eth_dev_reset(uint16_t port_id) 1615 { 1616 struct rte_eth_dev *dev; 1617 int ret; 1618 1619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1620 dev = &rte_eth_devices[port_id]; 1621 1622 if (*dev->dev_ops->dev_reset == NULL) 1623 return -ENOTSUP; 1624 1625 ret = rte_eth_dev_stop(port_id); 1626 if (ret != 0) { 1627 RTE_ETHDEV_LOG(ERR, 1628 "Failed to stop device (port %u) before reset: %s - ignore\n", 1629 port_id, rte_strerror(-ret)); 1630 } 1631 ret = dev->dev_ops->dev_reset(dev); 1632 1633 return eth_err(port_id, ret); 1634 } 1635 1636 int 1637 rte_eth_dev_is_removed(uint16_t port_id) 1638 { 1639 struct rte_eth_dev *dev; 1640 int ret; 1641 1642 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1643 dev = &rte_eth_devices[port_id]; 1644 1645 if (dev->state == RTE_ETH_DEV_REMOVED) 1646 return 1; 1647 1648 if (*dev->dev_ops->is_removed == NULL) 1649 return 0; 1650 1651 ret = dev->dev_ops->is_removed(dev); 1652 if (ret != 0) 1653 /* Device is physically removed. */ 1654 dev->state = RTE_ETH_DEV_REMOVED; 1655 1656 return ret; 1657 } 1658 1659 static int 1660 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1661 uint16_t min_length) 1662 { 1663 uint16_t data_room_size; 1664 1665 /* 1666 * Check the size of the mbuf data buffer, this value 1667 * must be provided in the private data of the memory pool. 1668 * First check that the memory pool(s) has a valid private data. 1669 */ 1670 if (mp->private_data_size < 1671 sizeof(struct rte_pktmbuf_pool_private)) { 1672 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1673 mp->name, mp->private_data_size, 1674 (unsigned int) 1675 sizeof(struct rte_pktmbuf_pool_private)); 1676 return -ENOSPC; 1677 } 1678 data_room_size = rte_pktmbuf_data_room_size(mp); 1679 if (data_room_size < offset + min_length) { 1680 RTE_ETHDEV_LOG(ERR, 1681 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1682 mp->name, data_room_size, 1683 offset + min_length, offset, min_length); 1684 return -EINVAL; 1685 } 1686 return 0; 1687 } 1688 1689 static int 1690 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1691 uint16_t n_seg, uint32_t *mbp_buf_size, 1692 const struct rte_eth_dev_info *dev_info) 1693 { 1694 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1695 struct rte_mempool *mp_first; 1696 uint32_t offset_mask; 1697 uint16_t seg_idx; 1698 int ret; 1699 1700 if (n_seg > seg_capa->max_nseg) { 1701 RTE_ETHDEV_LOG(ERR, 1702 "Requested Rx segments %u exceed supported %u\n", 1703 n_seg, seg_capa->max_nseg); 1704 return -EINVAL; 1705 } 1706 /* 1707 * Check the sizes and offsets against buffer sizes 1708 * for each segment specified in extended configuration. 1709 */ 1710 mp_first = rx_seg[0].mp; 1711 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1712 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1713 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1714 uint32_t length = rx_seg[seg_idx].length; 1715 uint32_t offset = rx_seg[seg_idx].offset; 1716 1717 if (mpl == NULL) { 1718 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1719 return -EINVAL; 1720 } 1721 if (seg_idx != 0 && mp_first != mpl && 1722 seg_capa->multi_pools == 0) { 1723 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1724 return -ENOTSUP; 1725 } 1726 if (offset != 0) { 1727 if (seg_capa->offset_allowed == 0) { 1728 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1729 return -ENOTSUP; 1730 } 1731 if (offset & offset_mask) { 1732 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1733 offset, 1734 seg_capa->offset_align_log2); 1735 return -EINVAL; 1736 } 1737 } 1738 1739 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1740 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1741 length = length != 0 ? length : *mbp_buf_size; 1742 1743 ret = rte_eth_check_rx_mempool(mpl, offset, length); 1744 if (ret != 0) 1745 return ret; 1746 } 1747 return 0; 1748 } 1749 1750 int 1751 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1752 uint16_t nb_rx_desc, unsigned int socket_id, 1753 const struct rte_eth_rxconf *rx_conf, 1754 struct rte_mempool *mp) 1755 { 1756 int ret; 1757 uint32_t mbp_buf_size; 1758 struct rte_eth_dev *dev; 1759 struct rte_eth_dev_info dev_info; 1760 struct rte_eth_rxconf local_conf; 1761 1762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1763 dev = &rte_eth_devices[port_id]; 1764 1765 if (rx_queue_id >= dev->data->nb_rx_queues) { 1766 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1767 return -EINVAL; 1768 } 1769 1770 if (*dev->dev_ops->rx_queue_setup == NULL) 1771 return -ENOTSUP; 1772 1773 ret = rte_eth_dev_info_get(port_id, &dev_info); 1774 if (ret != 0) 1775 return ret; 1776 1777 if (mp != NULL) { 1778 /* Single pool configuration check. */ 1779 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 1780 RTE_ETHDEV_LOG(ERR, 1781 "Ambiguous segment configuration\n"); 1782 return -EINVAL; 1783 } 1784 1785 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1786 dev_info.min_rx_bufsize); 1787 if (ret != 0) 1788 return ret; 1789 1790 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1791 } else { 1792 const struct rte_eth_rxseg_split *rx_seg; 1793 uint16_t n_seg; 1794 1795 /* Extended multi-segment configuration check. */ 1796 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 1797 RTE_ETHDEV_LOG(ERR, 1798 "Memory pool is null and no extended configuration provided\n"); 1799 return -EINVAL; 1800 } 1801 1802 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 1803 n_seg = rx_conf->rx_nseg; 1804 1805 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1806 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 1807 &mbp_buf_size, 1808 &dev_info); 1809 if (ret != 0) 1810 return ret; 1811 } else { 1812 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 1813 return -EINVAL; 1814 } 1815 } 1816 1817 /* Use default specified by driver, if nb_rx_desc is zero */ 1818 if (nb_rx_desc == 0) { 1819 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1820 /* If driver default is also zero, fall back on EAL default */ 1821 if (nb_rx_desc == 0) 1822 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1823 } 1824 1825 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1826 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1827 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1828 1829 RTE_ETHDEV_LOG(ERR, 1830 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1831 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1832 dev_info.rx_desc_lim.nb_min, 1833 dev_info.rx_desc_lim.nb_align); 1834 return -EINVAL; 1835 } 1836 1837 if (dev->data->dev_started && 1838 !(dev_info.dev_capa & 1839 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1840 return -EBUSY; 1841 1842 if (dev->data->dev_started && 1843 (dev->data->rx_queue_state[rx_queue_id] != 1844 RTE_ETH_QUEUE_STATE_STOPPED)) 1845 return -EBUSY; 1846 1847 eth_dev_rxq_release(dev, rx_queue_id); 1848 1849 if (rx_conf == NULL) 1850 rx_conf = &dev_info.default_rxconf; 1851 1852 local_conf = *rx_conf; 1853 1854 /* 1855 * If an offloading has already been enabled in 1856 * rte_eth_dev_configure(), it has been enabled on all queues, 1857 * so there is no need to enable it in this queue again. 1858 * The local_conf.offloads input to underlying PMD only carries 1859 * those offloadings which are only enabled on this queue and 1860 * not enabled on all queues. 1861 */ 1862 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1863 1864 /* 1865 * New added offloadings for this queue are those not enabled in 1866 * rte_eth_dev_configure() and they must be per-queue type. 1867 * A pure per-port offloading can't be enabled on a queue while 1868 * disabled on another queue. A pure per-port offloading can't 1869 * be enabled for any queue as new added one if it hasn't been 1870 * enabled in rte_eth_dev_configure(). 1871 */ 1872 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1873 local_conf.offloads) { 1874 RTE_ETHDEV_LOG(ERR, 1875 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1876 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1877 port_id, rx_queue_id, local_conf.offloads, 1878 dev_info.rx_queue_offload_capa, 1879 __func__); 1880 return -EINVAL; 1881 } 1882 1883 if (local_conf.share_group > 0 && 1884 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 1885 RTE_ETHDEV_LOG(ERR, 1886 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 1887 port_id, rx_queue_id, local_conf.share_group); 1888 return -EINVAL; 1889 } 1890 1891 /* 1892 * If LRO is enabled, check that the maximum aggregated packet 1893 * size is supported by the configured device. 1894 */ 1895 /* Get the real Ethernet overhead length */ 1896 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1897 uint32_t overhead_len; 1898 uint32_t max_rx_pktlen; 1899 int ret; 1900 1901 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1902 dev_info.max_mtu); 1903 max_rx_pktlen = dev->data->mtu + overhead_len; 1904 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 1905 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1906 ret = eth_dev_check_lro_pkt_size(port_id, 1907 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1908 max_rx_pktlen, 1909 dev_info.max_lro_pkt_size); 1910 if (ret != 0) 1911 return ret; 1912 } 1913 1914 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1915 socket_id, &local_conf, mp); 1916 if (!ret) { 1917 if (!dev->data->min_rx_buf_size || 1918 dev->data->min_rx_buf_size > mbp_buf_size) 1919 dev->data->min_rx_buf_size = mbp_buf_size; 1920 } 1921 1922 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 1923 rx_conf, ret); 1924 return eth_err(port_id, ret); 1925 } 1926 1927 int 1928 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1929 uint16_t nb_rx_desc, 1930 const struct rte_eth_hairpin_conf *conf) 1931 { 1932 int ret; 1933 struct rte_eth_dev *dev; 1934 struct rte_eth_hairpin_cap cap; 1935 int i; 1936 int count; 1937 1938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1939 dev = &rte_eth_devices[port_id]; 1940 1941 if (rx_queue_id >= dev->data->nb_rx_queues) { 1942 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1943 return -EINVAL; 1944 } 1945 1946 if (conf == NULL) { 1947 RTE_ETHDEV_LOG(ERR, 1948 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 1949 port_id); 1950 return -EINVAL; 1951 } 1952 1953 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 1954 if (ret != 0) 1955 return ret; 1956 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 1957 return -ENOTSUP; 1958 /* if nb_rx_desc is zero use max number of desc from the driver. */ 1959 if (nb_rx_desc == 0) 1960 nb_rx_desc = cap.max_nb_desc; 1961 if (nb_rx_desc > cap.max_nb_desc) { 1962 RTE_ETHDEV_LOG(ERR, 1963 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 1964 nb_rx_desc, cap.max_nb_desc); 1965 return -EINVAL; 1966 } 1967 if (conf->peer_count > cap.max_rx_2_tx) { 1968 RTE_ETHDEV_LOG(ERR, 1969 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 1970 conf->peer_count, cap.max_rx_2_tx); 1971 return -EINVAL; 1972 } 1973 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 1974 RTE_ETHDEV_LOG(ERR, 1975 "Attempt to use locked device memory for Rx queue, which is not supported"); 1976 return -EINVAL; 1977 } 1978 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 1979 RTE_ETHDEV_LOG(ERR, 1980 "Attempt to use DPDK memory for Rx queue, which is not supported"); 1981 return -EINVAL; 1982 } 1983 if (conf->use_locked_device_memory && conf->use_rte_memory) { 1984 RTE_ETHDEV_LOG(ERR, 1985 "Attempt to use mutually exclusive memory settings for Rx queue"); 1986 return -EINVAL; 1987 } 1988 if (conf->force_memory && 1989 !conf->use_locked_device_memory && 1990 !conf->use_rte_memory) { 1991 RTE_ETHDEV_LOG(ERR, 1992 "Attempt to force Rx queue memory settings, but none is set"); 1993 return -EINVAL; 1994 } 1995 if (conf->peer_count == 0) { 1996 RTE_ETHDEV_LOG(ERR, 1997 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 1998 conf->peer_count); 1999 return -EINVAL; 2000 } 2001 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2002 cap.max_nb_queues != UINT16_MAX; i++) { 2003 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2004 count++; 2005 } 2006 if (count > cap.max_nb_queues) { 2007 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2008 cap.max_nb_queues); 2009 return -EINVAL; 2010 } 2011 if (dev->data->dev_started) 2012 return -EBUSY; 2013 eth_dev_rxq_release(dev, rx_queue_id); 2014 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2015 nb_rx_desc, conf); 2016 if (ret == 0) 2017 dev->data->rx_queue_state[rx_queue_id] = 2018 RTE_ETH_QUEUE_STATE_HAIRPIN; 2019 return eth_err(port_id, ret); 2020 } 2021 2022 int 2023 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2024 uint16_t nb_tx_desc, unsigned int socket_id, 2025 const struct rte_eth_txconf *tx_conf) 2026 { 2027 struct rte_eth_dev *dev; 2028 struct rte_eth_dev_info dev_info; 2029 struct rte_eth_txconf local_conf; 2030 int ret; 2031 2032 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2033 dev = &rte_eth_devices[port_id]; 2034 2035 if (tx_queue_id >= dev->data->nb_tx_queues) { 2036 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2037 return -EINVAL; 2038 } 2039 2040 if (*dev->dev_ops->tx_queue_setup == NULL) 2041 return -ENOTSUP; 2042 2043 ret = rte_eth_dev_info_get(port_id, &dev_info); 2044 if (ret != 0) 2045 return ret; 2046 2047 /* Use default specified by driver, if nb_tx_desc is zero */ 2048 if (nb_tx_desc == 0) { 2049 nb_tx_desc = dev_info.default_txportconf.ring_size; 2050 /* If driver default is zero, fall back on EAL default */ 2051 if (nb_tx_desc == 0) 2052 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2053 } 2054 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2055 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2056 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2057 RTE_ETHDEV_LOG(ERR, 2058 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2059 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2060 dev_info.tx_desc_lim.nb_min, 2061 dev_info.tx_desc_lim.nb_align); 2062 return -EINVAL; 2063 } 2064 2065 if (dev->data->dev_started && 2066 !(dev_info.dev_capa & 2067 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2068 return -EBUSY; 2069 2070 if (dev->data->dev_started && 2071 (dev->data->tx_queue_state[tx_queue_id] != 2072 RTE_ETH_QUEUE_STATE_STOPPED)) 2073 return -EBUSY; 2074 2075 eth_dev_txq_release(dev, tx_queue_id); 2076 2077 if (tx_conf == NULL) 2078 tx_conf = &dev_info.default_txconf; 2079 2080 local_conf = *tx_conf; 2081 2082 /* 2083 * If an offloading has already been enabled in 2084 * rte_eth_dev_configure(), it has been enabled on all queues, 2085 * so there is no need to enable it in this queue again. 2086 * The local_conf.offloads input to underlying PMD only carries 2087 * those offloadings which are only enabled on this queue and 2088 * not enabled on all queues. 2089 */ 2090 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2091 2092 /* 2093 * New added offloadings for this queue are those not enabled in 2094 * rte_eth_dev_configure() and they must be per-queue type. 2095 * A pure per-port offloading can't be enabled on a queue while 2096 * disabled on another queue. A pure per-port offloading can't 2097 * be enabled for any queue as new added one if it hasn't been 2098 * enabled in rte_eth_dev_configure(). 2099 */ 2100 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2101 local_conf.offloads) { 2102 RTE_ETHDEV_LOG(ERR, 2103 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2104 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2105 port_id, tx_queue_id, local_conf.offloads, 2106 dev_info.tx_queue_offload_capa, 2107 __func__); 2108 return -EINVAL; 2109 } 2110 2111 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2112 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2113 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2114 } 2115 2116 int 2117 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2118 uint16_t nb_tx_desc, 2119 const struct rte_eth_hairpin_conf *conf) 2120 { 2121 struct rte_eth_dev *dev; 2122 struct rte_eth_hairpin_cap cap; 2123 int i; 2124 int count; 2125 int ret; 2126 2127 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2128 dev = &rte_eth_devices[port_id]; 2129 2130 if (tx_queue_id >= dev->data->nb_tx_queues) { 2131 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2132 return -EINVAL; 2133 } 2134 2135 if (conf == NULL) { 2136 RTE_ETHDEV_LOG(ERR, 2137 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2138 port_id); 2139 return -EINVAL; 2140 } 2141 2142 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2143 if (ret != 0) 2144 return ret; 2145 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2146 return -ENOTSUP; 2147 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2148 if (nb_tx_desc == 0) 2149 nb_tx_desc = cap.max_nb_desc; 2150 if (nb_tx_desc > cap.max_nb_desc) { 2151 RTE_ETHDEV_LOG(ERR, 2152 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2153 nb_tx_desc, cap.max_nb_desc); 2154 return -EINVAL; 2155 } 2156 if (conf->peer_count > cap.max_tx_2_rx) { 2157 RTE_ETHDEV_LOG(ERR, 2158 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2159 conf->peer_count, cap.max_tx_2_rx); 2160 return -EINVAL; 2161 } 2162 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2163 RTE_ETHDEV_LOG(ERR, 2164 "Attempt to use locked device memory for Tx queue, which is not supported"); 2165 return -EINVAL; 2166 } 2167 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2168 RTE_ETHDEV_LOG(ERR, 2169 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2170 return -EINVAL; 2171 } 2172 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2173 RTE_ETHDEV_LOG(ERR, 2174 "Attempt to use mutually exclusive memory settings for Tx queue"); 2175 return -EINVAL; 2176 } 2177 if (conf->force_memory && 2178 !conf->use_locked_device_memory && 2179 !conf->use_rte_memory) { 2180 RTE_ETHDEV_LOG(ERR, 2181 "Attempt to force Tx queue memory settings, but none is set"); 2182 return -EINVAL; 2183 } 2184 if (conf->peer_count == 0) { 2185 RTE_ETHDEV_LOG(ERR, 2186 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2187 conf->peer_count); 2188 return -EINVAL; 2189 } 2190 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2191 cap.max_nb_queues != UINT16_MAX; i++) { 2192 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2193 count++; 2194 } 2195 if (count > cap.max_nb_queues) { 2196 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2197 cap.max_nb_queues); 2198 return -EINVAL; 2199 } 2200 if (dev->data->dev_started) 2201 return -EBUSY; 2202 eth_dev_txq_release(dev, tx_queue_id); 2203 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2204 (dev, tx_queue_id, nb_tx_desc, conf); 2205 if (ret == 0) 2206 dev->data->tx_queue_state[tx_queue_id] = 2207 RTE_ETH_QUEUE_STATE_HAIRPIN; 2208 return eth_err(port_id, ret); 2209 } 2210 2211 int 2212 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2213 { 2214 struct rte_eth_dev *dev; 2215 int ret; 2216 2217 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2218 dev = &rte_eth_devices[tx_port]; 2219 2220 if (dev->data->dev_started == 0) { 2221 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2222 return -EBUSY; 2223 } 2224 2225 if (*dev->dev_ops->hairpin_bind == NULL) 2226 return -ENOTSUP; 2227 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2228 if (ret != 0) 2229 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2230 " to Rx %d (%d - all ports)\n", 2231 tx_port, rx_port, RTE_MAX_ETHPORTS); 2232 2233 return ret; 2234 } 2235 2236 int 2237 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2238 { 2239 struct rte_eth_dev *dev; 2240 int ret; 2241 2242 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2243 dev = &rte_eth_devices[tx_port]; 2244 2245 if (dev->data->dev_started == 0) { 2246 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2247 return -EBUSY; 2248 } 2249 2250 if (*dev->dev_ops->hairpin_unbind == NULL) 2251 return -ENOTSUP; 2252 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2253 if (ret != 0) 2254 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2255 " from Rx %d (%d - all ports)\n", 2256 tx_port, rx_port, RTE_MAX_ETHPORTS); 2257 2258 return ret; 2259 } 2260 2261 int 2262 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2263 size_t len, uint32_t direction) 2264 { 2265 struct rte_eth_dev *dev; 2266 int ret; 2267 2268 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2269 dev = &rte_eth_devices[port_id]; 2270 2271 if (peer_ports == NULL) { 2272 RTE_ETHDEV_LOG(ERR, 2273 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2274 port_id); 2275 return -EINVAL; 2276 } 2277 2278 if (len == 0) { 2279 RTE_ETHDEV_LOG(ERR, 2280 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2281 port_id); 2282 return -EINVAL; 2283 } 2284 2285 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2286 return -ENOTSUP; 2287 2288 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2289 len, direction); 2290 if (ret < 0) 2291 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2292 port_id, direction ? "Rx" : "Tx"); 2293 2294 return ret; 2295 } 2296 2297 void 2298 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2299 void *userdata __rte_unused) 2300 { 2301 rte_pktmbuf_free_bulk(pkts, unsent); 2302 } 2303 2304 void 2305 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2306 void *userdata) 2307 { 2308 uint64_t *count = userdata; 2309 2310 rte_pktmbuf_free_bulk(pkts, unsent); 2311 *count += unsent; 2312 } 2313 2314 int 2315 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2316 buffer_tx_error_fn cbfn, void *userdata) 2317 { 2318 if (buffer == NULL) { 2319 RTE_ETHDEV_LOG(ERR, 2320 "Cannot set Tx buffer error callback to NULL buffer\n"); 2321 return -EINVAL; 2322 } 2323 2324 buffer->error_callback = cbfn; 2325 buffer->error_userdata = userdata; 2326 return 0; 2327 } 2328 2329 int 2330 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2331 { 2332 int ret = 0; 2333 2334 if (buffer == NULL) { 2335 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2336 return -EINVAL; 2337 } 2338 2339 buffer->size = size; 2340 if (buffer->error_callback == NULL) { 2341 ret = rte_eth_tx_buffer_set_err_callback( 2342 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2343 } 2344 2345 return ret; 2346 } 2347 2348 int 2349 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2350 { 2351 struct rte_eth_dev *dev; 2352 int ret; 2353 2354 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2355 dev = &rte_eth_devices[port_id]; 2356 2357 if (*dev->dev_ops->tx_done_cleanup == NULL) 2358 return -ENOTSUP; 2359 2360 /* Call driver to free pending mbufs. */ 2361 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2362 free_cnt); 2363 return eth_err(port_id, ret); 2364 } 2365 2366 int 2367 rte_eth_promiscuous_enable(uint16_t port_id) 2368 { 2369 struct rte_eth_dev *dev; 2370 int diag = 0; 2371 2372 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2373 dev = &rte_eth_devices[port_id]; 2374 2375 if (dev->data->promiscuous == 1) 2376 return 0; 2377 2378 if (*dev->dev_ops->promiscuous_enable == NULL) 2379 return -ENOTSUP; 2380 2381 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2382 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2383 2384 return eth_err(port_id, diag); 2385 } 2386 2387 int 2388 rte_eth_promiscuous_disable(uint16_t port_id) 2389 { 2390 struct rte_eth_dev *dev; 2391 int diag = 0; 2392 2393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2394 dev = &rte_eth_devices[port_id]; 2395 2396 if (dev->data->promiscuous == 0) 2397 return 0; 2398 2399 if (*dev->dev_ops->promiscuous_disable == NULL) 2400 return -ENOTSUP; 2401 2402 dev->data->promiscuous = 0; 2403 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2404 if (diag != 0) 2405 dev->data->promiscuous = 1; 2406 2407 return eth_err(port_id, diag); 2408 } 2409 2410 int 2411 rte_eth_promiscuous_get(uint16_t port_id) 2412 { 2413 struct rte_eth_dev *dev; 2414 2415 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2416 dev = &rte_eth_devices[port_id]; 2417 2418 return dev->data->promiscuous; 2419 } 2420 2421 int 2422 rte_eth_allmulticast_enable(uint16_t port_id) 2423 { 2424 struct rte_eth_dev *dev; 2425 int diag; 2426 2427 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2428 dev = &rte_eth_devices[port_id]; 2429 2430 if (dev->data->all_multicast == 1) 2431 return 0; 2432 2433 if (*dev->dev_ops->allmulticast_enable == NULL) 2434 return -ENOTSUP; 2435 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2436 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2437 2438 return eth_err(port_id, diag); 2439 } 2440 2441 int 2442 rte_eth_allmulticast_disable(uint16_t port_id) 2443 { 2444 struct rte_eth_dev *dev; 2445 int diag; 2446 2447 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2448 dev = &rte_eth_devices[port_id]; 2449 2450 if (dev->data->all_multicast == 0) 2451 return 0; 2452 2453 if (*dev->dev_ops->allmulticast_disable == NULL) 2454 return -ENOTSUP; 2455 dev->data->all_multicast = 0; 2456 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2457 if (diag != 0) 2458 dev->data->all_multicast = 1; 2459 2460 return eth_err(port_id, diag); 2461 } 2462 2463 int 2464 rte_eth_allmulticast_get(uint16_t port_id) 2465 { 2466 struct rte_eth_dev *dev; 2467 2468 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2469 dev = &rte_eth_devices[port_id]; 2470 2471 return dev->data->all_multicast; 2472 } 2473 2474 int 2475 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2476 { 2477 struct rte_eth_dev *dev; 2478 2479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2480 dev = &rte_eth_devices[port_id]; 2481 2482 if (eth_link == NULL) { 2483 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2484 port_id); 2485 return -EINVAL; 2486 } 2487 2488 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2489 rte_eth_linkstatus_get(dev, eth_link); 2490 else { 2491 if (*dev->dev_ops->link_update == NULL) 2492 return -ENOTSUP; 2493 (*dev->dev_ops->link_update)(dev, 1); 2494 *eth_link = dev->data->dev_link; 2495 } 2496 2497 return 0; 2498 } 2499 2500 int 2501 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2502 { 2503 struct rte_eth_dev *dev; 2504 2505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2506 dev = &rte_eth_devices[port_id]; 2507 2508 if (eth_link == NULL) { 2509 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2510 port_id); 2511 return -EINVAL; 2512 } 2513 2514 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2515 rte_eth_linkstatus_get(dev, eth_link); 2516 else { 2517 if (*dev->dev_ops->link_update == NULL) 2518 return -ENOTSUP; 2519 (*dev->dev_ops->link_update)(dev, 0); 2520 *eth_link = dev->data->dev_link; 2521 } 2522 2523 return 0; 2524 } 2525 2526 const char * 2527 rte_eth_link_speed_to_str(uint32_t link_speed) 2528 { 2529 switch (link_speed) { 2530 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2531 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2532 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2533 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2534 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2535 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2536 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2537 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2538 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2539 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2540 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2541 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2542 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2543 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2544 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2545 default: return "Invalid"; 2546 } 2547 } 2548 2549 int 2550 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2551 { 2552 if (str == NULL) { 2553 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2554 return -EINVAL; 2555 } 2556 2557 if (len == 0) { 2558 RTE_ETHDEV_LOG(ERR, 2559 "Cannot convert link to string with zero size\n"); 2560 return -EINVAL; 2561 } 2562 2563 if (eth_link == NULL) { 2564 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2565 return -EINVAL; 2566 } 2567 2568 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2569 return snprintf(str, len, "Link down"); 2570 else 2571 return snprintf(str, len, "Link up at %s %s %s", 2572 rte_eth_link_speed_to_str(eth_link->link_speed), 2573 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2574 "FDX" : "HDX", 2575 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2576 "Autoneg" : "Fixed"); 2577 } 2578 2579 int 2580 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2581 { 2582 struct rte_eth_dev *dev; 2583 2584 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2585 dev = &rte_eth_devices[port_id]; 2586 2587 if (stats == NULL) { 2588 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2589 port_id); 2590 return -EINVAL; 2591 } 2592 2593 memset(stats, 0, sizeof(*stats)); 2594 2595 if (*dev->dev_ops->stats_get == NULL) 2596 return -ENOTSUP; 2597 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2598 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2599 } 2600 2601 int 2602 rte_eth_stats_reset(uint16_t port_id) 2603 { 2604 struct rte_eth_dev *dev; 2605 int ret; 2606 2607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2608 dev = &rte_eth_devices[port_id]; 2609 2610 if (*dev->dev_ops->stats_reset == NULL) 2611 return -ENOTSUP; 2612 ret = (*dev->dev_ops->stats_reset)(dev); 2613 if (ret != 0) 2614 return eth_err(port_id, ret); 2615 2616 dev->data->rx_mbuf_alloc_failed = 0; 2617 2618 return 0; 2619 } 2620 2621 static inline int 2622 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2623 { 2624 uint16_t nb_rxqs, nb_txqs; 2625 int count; 2626 2627 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2628 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2629 2630 count = RTE_NB_STATS; 2631 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2632 count += nb_rxqs * RTE_NB_RXQ_STATS; 2633 count += nb_txqs * RTE_NB_TXQ_STATS; 2634 } 2635 2636 return count; 2637 } 2638 2639 static int 2640 eth_dev_get_xstats_count(uint16_t port_id) 2641 { 2642 struct rte_eth_dev *dev; 2643 int count; 2644 2645 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2646 dev = &rte_eth_devices[port_id]; 2647 if (dev->dev_ops->xstats_get_names != NULL) { 2648 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2649 if (count < 0) 2650 return eth_err(port_id, count); 2651 } else 2652 count = 0; 2653 2654 2655 count += eth_dev_get_xstats_basic_count(dev); 2656 2657 return count; 2658 } 2659 2660 int 2661 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2662 uint64_t *id) 2663 { 2664 int cnt_xstats, idx_xstat; 2665 2666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2667 2668 if (xstat_name == NULL) { 2669 RTE_ETHDEV_LOG(ERR, 2670 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2671 port_id); 2672 return -ENOMEM; 2673 } 2674 2675 if (id == NULL) { 2676 RTE_ETHDEV_LOG(ERR, 2677 "Cannot get ethdev port %u xstats ID to NULL\n", 2678 port_id); 2679 return -ENOMEM; 2680 } 2681 2682 /* Get count */ 2683 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2684 if (cnt_xstats < 0) { 2685 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2686 return -ENODEV; 2687 } 2688 2689 /* Get id-name lookup table */ 2690 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2691 2692 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2693 port_id, xstats_names, cnt_xstats, NULL)) { 2694 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2695 return -1; 2696 } 2697 2698 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2699 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2700 *id = idx_xstat; 2701 return 0; 2702 }; 2703 } 2704 2705 return -EINVAL; 2706 } 2707 2708 /* retrieve basic stats names */ 2709 static int 2710 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2711 struct rte_eth_xstat_name *xstats_names) 2712 { 2713 int cnt_used_entries = 0; 2714 uint32_t idx, id_queue; 2715 uint16_t num_q; 2716 2717 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2718 strlcpy(xstats_names[cnt_used_entries].name, 2719 eth_dev_stats_strings[idx].name, 2720 sizeof(xstats_names[0].name)); 2721 cnt_used_entries++; 2722 } 2723 2724 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2725 return cnt_used_entries; 2726 2727 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2728 for (id_queue = 0; id_queue < num_q; id_queue++) { 2729 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2730 snprintf(xstats_names[cnt_used_entries].name, 2731 sizeof(xstats_names[0].name), 2732 "rx_q%u_%s", 2733 id_queue, eth_dev_rxq_stats_strings[idx].name); 2734 cnt_used_entries++; 2735 } 2736 2737 } 2738 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2739 for (id_queue = 0; id_queue < num_q; id_queue++) { 2740 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2741 snprintf(xstats_names[cnt_used_entries].name, 2742 sizeof(xstats_names[0].name), 2743 "tx_q%u_%s", 2744 id_queue, eth_dev_txq_stats_strings[idx].name); 2745 cnt_used_entries++; 2746 } 2747 } 2748 return cnt_used_entries; 2749 } 2750 2751 /* retrieve ethdev extended statistics names */ 2752 int 2753 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2754 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2755 uint64_t *ids) 2756 { 2757 struct rte_eth_xstat_name *xstats_names_copy; 2758 unsigned int no_basic_stat_requested = 1; 2759 unsigned int no_ext_stat_requested = 1; 2760 unsigned int expected_entries; 2761 unsigned int basic_count; 2762 struct rte_eth_dev *dev; 2763 unsigned int i; 2764 int ret; 2765 2766 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2767 dev = &rte_eth_devices[port_id]; 2768 2769 basic_count = eth_dev_get_xstats_basic_count(dev); 2770 ret = eth_dev_get_xstats_count(port_id); 2771 if (ret < 0) 2772 return ret; 2773 expected_entries = (unsigned int)ret; 2774 2775 /* Return max number of stats if no ids given */ 2776 if (!ids) { 2777 if (!xstats_names) 2778 return expected_entries; 2779 else if (xstats_names && size < expected_entries) 2780 return expected_entries; 2781 } 2782 2783 if (ids && !xstats_names) 2784 return -EINVAL; 2785 2786 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2787 uint64_t ids_copy[size]; 2788 2789 for (i = 0; i < size; i++) { 2790 if (ids[i] < basic_count) { 2791 no_basic_stat_requested = 0; 2792 break; 2793 } 2794 2795 /* 2796 * Convert ids to xstats ids that PMD knows. 2797 * ids known by user are basic + extended stats. 2798 */ 2799 ids_copy[i] = ids[i] - basic_count; 2800 } 2801 2802 if (no_basic_stat_requested) 2803 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2804 ids_copy, xstats_names, size); 2805 } 2806 2807 /* Retrieve all stats */ 2808 if (!ids) { 2809 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2810 expected_entries); 2811 if (num_stats < 0 || num_stats > (int)expected_entries) 2812 return num_stats; 2813 else 2814 return expected_entries; 2815 } 2816 2817 xstats_names_copy = calloc(expected_entries, 2818 sizeof(struct rte_eth_xstat_name)); 2819 2820 if (!xstats_names_copy) { 2821 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2822 return -ENOMEM; 2823 } 2824 2825 if (ids) { 2826 for (i = 0; i < size; i++) { 2827 if (ids[i] >= basic_count) { 2828 no_ext_stat_requested = 0; 2829 break; 2830 } 2831 } 2832 } 2833 2834 /* Fill xstats_names_copy structure */ 2835 if (ids && no_ext_stat_requested) { 2836 eth_basic_stats_get_names(dev, xstats_names_copy); 2837 } else { 2838 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2839 expected_entries); 2840 if (ret < 0) { 2841 free(xstats_names_copy); 2842 return ret; 2843 } 2844 } 2845 2846 /* Filter stats */ 2847 for (i = 0; i < size; i++) { 2848 if (ids[i] >= expected_entries) { 2849 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2850 free(xstats_names_copy); 2851 return -1; 2852 } 2853 xstats_names[i] = xstats_names_copy[ids[i]]; 2854 } 2855 2856 free(xstats_names_copy); 2857 return size; 2858 } 2859 2860 int 2861 rte_eth_xstats_get_names(uint16_t port_id, 2862 struct rte_eth_xstat_name *xstats_names, 2863 unsigned int size) 2864 { 2865 struct rte_eth_dev *dev; 2866 int cnt_used_entries; 2867 int cnt_expected_entries; 2868 int cnt_driver_entries; 2869 2870 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 2871 if (xstats_names == NULL || cnt_expected_entries < 0 || 2872 (int)size < cnt_expected_entries) 2873 return cnt_expected_entries; 2874 2875 /* port_id checked in eth_dev_get_xstats_count() */ 2876 dev = &rte_eth_devices[port_id]; 2877 2878 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 2879 2880 if (dev->dev_ops->xstats_get_names != NULL) { 2881 /* If there are any driver-specific xstats, append them 2882 * to end of list. 2883 */ 2884 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2885 dev, 2886 xstats_names + cnt_used_entries, 2887 size - cnt_used_entries); 2888 if (cnt_driver_entries < 0) 2889 return eth_err(port_id, cnt_driver_entries); 2890 cnt_used_entries += cnt_driver_entries; 2891 } 2892 2893 return cnt_used_entries; 2894 } 2895 2896 2897 static int 2898 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2899 { 2900 struct rte_eth_dev *dev; 2901 struct rte_eth_stats eth_stats; 2902 unsigned int count = 0, i, q; 2903 uint64_t val, *stats_ptr; 2904 uint16_t nb_rxqs, nb_txqs; 2905 int ret; 2906 2907 ret = rte_eth_stats_get(port_id, ð_stats); 2908 if (ret < 0) 2909 return ret; 2910 2911 dev = &rte_eth_devices[port_id]; 2912 2913 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2914 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2915 2916 /* global stats */ 2917 for (i = 0; i < RTE_NB_STATS; i++) { 2918 stats_ptr = RTE_PTR_ADD(ð_stats, 2919 eth_dev_stats_strings[i].offset); 2920 val = *stats_ptr; 2921 xstats[count++].value = val; 2922 } 2923 2924 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2925 return count; 2926 2927 /* per-rxq stats */ 2928 for (q = 0; q < nb_rxqs; q++) { 2929 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2930 stats_ptr = RTE_PTR_ADD(ð_stats, 2931 eth_dev_rxq_stats_strings[i].offset + 2932 q * sizeof(uint64_t)); 2933 val = *stats_ptr; 2934 xstats[count++].value = val; 2935 } 2936 } 2937 2938 /* per-txq stats */ 2939 for (q = 0; q < nb_txqs; q++) { 2940 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2941 stats_ptr = RTE_PTR_ADD(ð_stats, 2942 eth_dev_txq_stats_strings[i].offset + 2943 q * sizeof(uint64_t)); 2944 val = *stats_ptr; 2945 xstats[count++].value = val; 2946 } 2947 } 2948 return count; 2949 } 2950 2951 /* retrieve ethdev extended statistics */ 2952 int 2953 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2954 uint64_t *values, unsigned int size) 2955 { 2956 unsigned int no_basic_stat_requested = 1; 2957 unsigned int no_ext_stat_requested = 1; 2958 unsigned int num_xstats_filled; 2959 unsigned int basic_count; 2960 uint16_t expected_entries; 2961 struct rte_eth_dev *dev; 2962 unsigned int i; 2963 int ret; 2964 2965 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2966 dev = &rte_eth_devices[port_id]; 2967 2968 ret = eth_dev_get_xstats_count(port_id); 2969 if (ret < 0) 2970 return ret; 2971 expected_entries = (uint16_t)ret; 2972 struct rte_eth_xstat xstats[expected_entries]; 2973 basic_count = eth_dev_get_xstats_basic_count(dev); 2974 2975 /* Return max number of stats if no ids given */ 2976 if (!ids) { 2977 if (!values) 2978 return expected_entries; 2979 else if (values && size < expected_entries) 2980 return expected_entries; 2981 } 2982 2983 if (ids && !values) 2984 return -EINVAL; 2985 2986 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2987 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 2988 uint64_t ids_copy[size]; 2989 2990 for (i = 0; i < size; i++) { 2991 if (ids[i] < basic_count) { 2992 no_basic_stat_requested = 0; 2993 break; 2994 } 2995 2996 /* 2997 * Convert ids to xstats ids that PMD knows. 2998 * ids known by user are basic + extended stats. 2999 */ 3000 ids_copy[i] = ids[i] - basic_count; 3001 } 3002 3003 if (no_basic_stat_requested) 3004 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3005 values, size); 3006 } 3007 3008 if (ids) { 3009 for (i = 0; i < size; i++) { 3010 if (ids[i] >= basic_count) { 3011 no_ext_stat_requested = 0; 3012 break; 3013 } 3014 } 3015 } 3016 3017 /* Fill the xstats structure */ 3018 if (ids && no_ext_stat_requested) 3019 ret = eth_basic_stats_get(port_id, xstats); 3020 else 3021 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3022 3023 if (ret < 0) 3024 return ret; 3025 num_xstats_filled = (unsigned int)ret; 3026 3027 /* Return all stats */ 3028 if (!ids) { 3029 for (i = 0; i < num_xstats_filled; i++) 3030 values[i] = xstats[i].value; 3031 return expected_entries; 3032 } 3033 3034 /* Filter stats */ 3035 for (i = 0; i < size; i++) { 3036 if (ids[i] >= expected_entries) { 3037 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3038 return -1; 3039 } 3040 values[i] = xstats[ids[i]].value; 3041 } 3042 return size; 3043 } 3044 3045 int 3046 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3047 unsigned int n) 3048 { 3049 struct rte_eth_dev *dev; 3050 unsigned int count, i; 3051 signed int xcount = 0; 3052 int ret; 3053 3054 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3055 if (xstats == NULL && n > 0) 3056 return -EINVAL; 3057 dev = &rte_eth_devices[port_id]; 3058 3059 count = eth_dev_get_xstats_basic_count(dev); 3060 3061 /* implemented by the driver */ 3062 if (dev->dev_ops->xstats_get != NULL) { 3063 /* Retrieve the xstats from the driver at the end of the 3064 * xstats struct. 3065 */ 3066 xcount = (*dev->dev_ops->xstats_get)(dev, 3067 (n > count) ? xstats + count : NULL, 3068 (n > count) ? n - count : 0); 3069 3070 if (xcount < 0) 3071 return eth_err(port_id, xcount); 3072 } 3073 3074 if (n < count + xcount || xstats == NULL) 3075 return count + xcount; 3076 3077 /* now fill the xstats structure */ 3078 ret = eth_basic_stats_get(port_id, xstats); 3079 if (ret < 0) 3080 return ret; 3081 count = ret; 3082 3083 for (i = 0; i < count; i++) 3084 xstats[i].id = i; 3085 /* add an offset to driver-specific stats */ 3086 for ( ; i < count + xcount; i++) 3087 xstats[i].id += count; 3088 3089 return count + xcount; 3090 } 3091 3092 /* reset ethdev extended statistics */ 3093 int 3094 rte_eth_xstats_reset(uint16_t port_id) 3095 { 3096 struct rte_eth_dev *dev; 3097 3098 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3099 dev = &rte_eth_devices[port_id]; 3100 3101 /* implemented by the driver */ 3102 if (dev->dev_ops->xstats_reset != NULL) 3103 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3104 3105 /* fallback to default */ 3106 return rte_eth_stats_reset(port_id); 3107 } 3108 3109 static int 3110 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3111 uint8_t stat_idx, uint8_t is_rx) 3112 { 3113 struct rte_eth_dev *dev; 3114 3115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3116 dev = &rte_eth_devices[port_id]; 3117 3118 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3119 return -EINVAL; 3120 3121 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3122 return -EINVAL; 3123 3124 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3125 return -EINVAL; 3126 3127 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3128 return -ENOTSUP; 3129 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3130 } 3131 3132 int 3133 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3134 uint8_t stat_idx) 3135 { 3136 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3137 tx_queue_id, 3138 stat_idx, STAT_QMAP_TX)); 3139 } 3140 3141 int 3142 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3143 uint8_t stat_idx) 3144 { 3145 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3146 rx_queue_id, 3147 stat_idx, STAT_QMAP_RX)); 3148 } 3149 3150 int 3151 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3152 { 3153 struct rte_eth_dev *dev; 3154 3155 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3156 dev = &rte_eth_devices[port_id]; 3157 3158 if (fw_version == NULL && fw_size > 0) { 3159 RTE_ETHDEV_LOG(ERR, 3160 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3161 port_id); 3162 return -EINVAL; 3163 } 3164 3165 if (*dev->dev_ops->fw_version_get == NULL) 3166 return -ENOTSUP; 3167 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3168 fw_version, fw_size)); 3169 } 3170 3171 int 3172 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3173 { 3174 struct rte_eth_dev *dev; 3175 const struct rte_eth_desc_lim lim = { 3176 .nb_max = UINT16_MAX, 3177 .nb_min = 0, 3178 .nb_align = 1, 3179 .nb_seg_max = UINT16_MAX, 3180 .nb_mtu_seg_max = UINT16_MAX, 3181 }; 3182 int diag; 3183 3184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3185 dev = &rte_eth_devices[port_id]; 3186 3187 if (dev_info == NULL) { 3188 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3189 port_id); 3190 return -EINVAL; 3191 } 3192 3193 /* 3194 * Init dev_info before port_id check since caller does not have 3195 * return status and does not know if get is successful or not. 3196 */ 3197 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3198 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3199 3200 dev_info->rx_desc_lim = lim; 3201 dev_info->tx_desc_lim = lim; 3202 dev_info->device = dev->device; 3203 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3204 RTE_ETHER_CRC_LEN; 3205 dev_info->max_mtu = UINT16_MAX; 3206 3207 if (*dev->dev_ops->dev_infos_get == NULL) 3208 return -ENOTSUP; 3209 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3210 if (diag != 0) { 3211 /* Cleanup already filled in device information */ 3212 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3213 return eth_err(port_id, diag); 3214 } 3215 3216 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3217 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3218 RTE_MAX_QUEUES_PER_PORT); 3219 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3220 RTE_MAX_QUEUES_PER_PORT); 3221 3222 dev_info->driver_name = dev->device->driver->name; 3223 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3224 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3225 3226 dev_info->dev_flags = &dev->data->dev_flags; 3227 3228 return 0; 3229 } 3230 3231 int 3232 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3233 { 3234 struct rte_eth_dev *dev; 3235 3236 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3237 dev = &rte_eth_devices[port_id]; 3238 3239 if (dev_conf == NULL) { 3240 RTE_ETHDEV_LOG(ERR, 3241 "Cannot get ethdev port %u configuration to NULL\n", 3242 port_id); 3243 return -EINVAL; 3244 } 3245 3246 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3247 3248 return 0; 3249 } 3250 3251 int 3252 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3253 uint32_t *ptypes, int num) 3254 { 3255 int i, j; 3256 struct rte_eth_dev *dev; 3257 const uint32_t *all_ptypes; 3258 3259 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3260 dev = &rte_eth_devices[port_id]; 3261 3262 if (ptypes == NULL && num > 0) { 3263 RTE_ETHDEV_LOG(ERR, 3264 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3265 port_id); 3266 return -EINVAL; 3267 } 3268 3269 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3270 return 0; 3271 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3272 3273 if (!all_ptypes) 3274 return 0; 3275 3276 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3277 if (all_ptypes[i] & ptype_mask) { 3278 if (j < num) 3279 ptypes[j] = all_ptypes[i]; 3280 j++; 3281 } 3282 3283 return j; 3284 } 3285 3286 int 3287 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3288 uint32_t *set_ptypes, unsigned int num) 3289 { 3290 const uint32_t valid_ptype_masks[] = { 3291 RTE_PTYPE_L2_MASK, 3292 RTE_PTYPE_L3_MASK, 3293 RTE_PTYPE_L4_MASK, 3294 RTE_PTYPE_TUNNEL_MASK, 3295 RTE_PTYPE_INNER_L2_MASK, 3296 RTE_PTYPE_INNER_L3_MASK, 3297 RTE_PTYPE_INNER_L4_MASK, 3298 }; 3299 const uint32_t *all_ptypes; 3300 struct rte_eth_dev *dev; 3301 uint32_t unused_mask; 3302 unsigned int i, j; 3303 int ret; 3304 3305 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3306 dev = &rte_eth_devices[port_id]; 3307 3308 if (num > 0 && set_ptypes == NULL) { 3309 RTE_ETHDEV_LOG(ERR, 3310 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3311 port_id); 3312 return -EINVAL; 3313 } 3314 3315 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3316 *dev->dev_ops->dev_ptypes_set == NULL) { 3317 ret = 0; 3318 goto ptype_unknown; 3319 } 3320 3321 if (ptype_mask == 0) { 3322 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3323 ptype_mask); 3324 goto ptype_unknown; 3325 } 3326 3327 unused_mask = ptype_mask; 3328 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3329 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3330 if (mask && mask != valid_ptype_masks[i]) { 3331 ret = -EINVAL; 3332 goto ptype_unknown; 3333 } 3334 unused_mask &= ~valid_ptype_masks[i]; 3335 } 3336 3337 if (unused_mask) { 3338 ret = -EINVAL; 3339 goto ptype_unknown; 3340 } 3341 3342 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3343 if (all_ptypes == NULL) { 3344 ret = 0; 3345 goto ptype_unknown; 3346 } 3347 3348 /* 3349 * Accommodate as many set_ptypes as possible. If the supplied 3350 * set_ptypes array is insufficient fill it partially. 3351 */ 3352 for (i = 0, j = 0; set_ptypes != NULL && 3353 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3354 if (ptype_mask & all_ptypes[i]) { 3355 if (j < num - 1) { 3356 set_ptypes[j] = all_ptypes[i]; 3357 j++; 3358 continue; 3359 } 3360 break; 3361 } 3362 } 3363 3364 if (set_ptypes != NULL && j < num) 3365 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3366 3367 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3368 3369 ptype_unknown: 3370 if (num > 0) 3371 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3372 3373 return ret; 3374 } 3375 3376 int 3377 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3378 unsigned int num) 3379 { 3380 int32_t ret; 3381 struct rte_eth_dev *dev; 3382 struct rte_eth_dev_info dev_info; 3383 3384 if (ma == NULL) { 3385 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3386 return -EINVAL; 3387 } 3388 3389 /* will check for us that port_id is a valid one */ 3390 ret = rte_eth_dev_info_get(port_id, &dev_info); 3391 if (ret != 0) 3392 return ret; 3393 3394 dev = &rte_eth_devices[port_id]; 3395 num = RTE_MIN(dev_info.max_mac_addrs, num); 3396 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3397 3398 return num; 3399 } 3400 3401 int 3402 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3403 { 3404 struct rte_eth_dev *dev; 3405 3406 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3407 dev = &rte_eth_devices[port_id]; 3408 3409 if (mac_addr == NULL) { 3410 RTE_ETHDEV_LOG(ERR, 3411 "Cannot get ethdev port %u MAC address to NULL\n", 3412 port_id); 3413 return -EINVAL; 3414 } 3415 3416 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3417 3418 return 0; 3419 } 3420 3421 int 3422 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3423 { 3424 struct rte_eth_dev *dev; 3425 3426 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3427 dev = &rte_eth_devices[port_id]; 3428 3429 if (mtu == NULL) { 3430 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3431 port_id); 3432 return -EINVAL; 3433 } 3434 3435 *mtu = dev->data->mtu; 3436 return 0; 3437 } 3438 3439 int 3440 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3441 { 3442 int ret; 3443 struct rte_eth_dev_info dev_info; 3444 struct rte_eth_dev *dev; 3445 3446 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3447 dev = &rte_eth_devices[port_id]; 3448 if (*dev->dev_ops->mtu_set == NULL) 3449 return -ENOTSUP; 3450 3451 /* 3452 * Check if the device supports dev_infos_get, if it does not 3453 * skip min_mtu/max_mtu validation here as this requires values 3454 * that are populated within the call to rte_eth_dev_info_get() 3455 * which relies on dev->dev_ops->dev_infos_get. 3456 */ 3457 if (*dev->dev_ops->dev_infos_get != NULL) { 3458 ret = rte_eth_dev_info_get(port_id, &dev_info); 3459 if (ret != 0) 3460 return ret; 3461 3462 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3463 if (ret != 0) 3464 return ret; 3465 } 3466 3467 if (dev->data->dev_configured == 0) { 3468 RTE_ETHDEV_LOG(ERR, 3469 "Port %u must be configured before MTU set\n", 3470 port_id); 3471 return -EINVAL; 3472 } 3473 3474 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3475 if (ret == 0) 3476 dev->data->mtu = mtu; 3477 3478 return eth_err(port_id, ret); 3479 } 3480 3481 int 3482 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3483 { 3484 struct rte_eth_dev *dev; 3485 int ret; 3486 3487 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3488 dev = &rte_eth_devices[port_id]; 3489 3490 if (!(dev->data->dev_conf.rxmode.offloads & 3491 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3492 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3493 port_id); 3494 return -ENOSYS; 3495 } 3496 3497 if (vlan_id > 4095) { 3498 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3499 port_id, vlan_id); 3500 return -EINVAL; 3501 } 3502 if (*dev->dev_ops->vlan_filter_set == NULL) 3503 return -ENOTSUP; 3504 3505 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3506 if (ret == 0) { 3507 struct rte_vlan_filter_conf *vfc; 3508 int vidx; 3509 int vbit; 3510 3511 vfc = &dev->data->vlan_filter_conf; 3512 vidx = vlan_id / 64; 3513 vbit = vlan_id % 64; 3514 3515 if (on) 3516 vfc->ids[vidx] |= RTE_BIT64(vbit); 3517 else 3518 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3519 } 3520 3521 return eth_err(port_id, ret); 3522 } 3523 3524 int 3525 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3526 int on) 3527 { 3528 struct rte_eth_dev *dev; 3529 3530 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3531 dev = &rte_eth_devices[port_id]; 3532 3533 if (rx_queue_id >= dev->data->nb_rx_queues) { 3534 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3535 return -EINVAL; 3536 } 3537 3538 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 3539 return -ENOTSUP; 3540 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3541 3542 return 0; 3543 } 3544 3545 int 3546 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3547 enum rte_vlan_type vlan_type, 3548 uint16_t tpid) 3549 { 3550 struct rte_eth_dev *dev; 3551 3552 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3553 dev = &rte_eth_devices[port_id]; 3554 3555 if (*dev->dev_ops->vlan_tpid_set == NULL) 3556 return -ENOTSUP; 3557 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3558 tpid)); 3559 } 3560 3561 int 3562 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3563 { 3564 struct rte_eth_dev_info dev_info; 3565 struct rte_eth_dev *dev; 3566 int ret = 0; 3567 int mask = 0; 3568 int cur, org = 0; 3569 uint64_t orig_offloads; 3570 uint64_t dev_offloads; 3571 uint64_t new_offloads; 3572 3573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3574 dev = &rte_eth_devices[port_id]; 3575 3576 /* save original values in case of failure */ 3577 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3578 dev_offloads = orig_offloads; 3579 3580 /* check which option changed by application */ 3581 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3582 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3583 if (cur != org) { 3584 if (cur) 3585 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3586 else 3587 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3588 mask |= RTE_ETH_VLAN_STRIP_MASK; 3589 } 3590 3591 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3592 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3593 if (cur != org) { 3594 if (cur) 3595 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3596 else 3597 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3598 mask |= RTE_ETH_VLAN_FILTER_MASK; 3599 } 3600 3601 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3602 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3603 if (cur != org) { 3604 if (cur) 3605 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3606 else 3607 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3608 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3609 } 3610 3611 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3612 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3613 if (cur != org) { 3614 if (cur) 3615 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3616 else 3617 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3618 mask |= RTE_ETH_QINQ_STRIP_MASK; 3619 } 3620 3621 /*no change*/ 3622 if (mask == 0) 3623 return ret; 3624 3625 ret = rte_eth_dev_info_get(port_id, &dev_info); 3626 if (ret != 0) 3627 return ret; 3628 3629 /* Rx VLAN offloading must be within its device capabilities */ 3630 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3631 new_offloads = dev_offloads & ~orig_offloads; 3632 RTE_ETHDEV_LOG(ERR, 3633 "Ethdev port_id=%u requested new added VLAN offloads " 3634 "0x%" PRIx64 " must be within Rx offloads capabilities " 3635 "0x%" PRIx64 " in %s()\n", 3636 port_id, new_offloads, dev_info.rx_offload_capa, 3637 __func__); 3638 return -EINVAL; 3639 } 3640 3641 if (*dev->dev_ops->vlan_offload_set == NULL) 3642 return -ENOTSUP; 3643 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3644 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3645 if (ret) { 3646 /* hit an error restore original values */ 3647 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3648 } 3649 3650 return eth_err(port_id, ret); 3651 } 3652 3653 int 3654 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3655 { 3656 struct rte_eth_dev *dev; 3657 uint64_t *dev_offloads; 3658 int ret = 0; 3659 3660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3661 dev = &rte_eth_devices[port_id]; 3662 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3663 3664 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3665 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3666 3667 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3668 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3669 3670 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3671 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3672 3673 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3674 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3675 3676 return ret; 3677 } 3678 3679 int 3680 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3681 { 3682 struct rte_eth_dev *dev; 3683 3684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3685 dev = &rte_eth_devices[port_id]; 3686 3687 if (*dev->dev_ops->vlan_pvid_set == NULL) 3688 return -ENOTSUP; 3689 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3690 } 3691 3692 int 3693 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3694 { 3695 struct rte_eth_dev *dev; 3696 3697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3698 dev = &rte_eth_devices[port_id]; 3699 3700 if (fc_conf == NULL) { 3701 RTE_ETHDEV_LOG(ERR, 3702 "Cannot get ethdev port %u flow control config to NULL\n", 3703 port_id); 3704 return -EINVAL; 3705 } 3706 3707 if (*dev->dev_ops->flow_ctrl_get == NULL) 3708 return -ENOTSUP; 3709 memset(fc_conf, 0, sizeof(*fc_conf)); 3710 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3711 } 3712 3713 int 3714 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3715 { 3716 struct rte_eth_dev *dev; 3717 3718 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3719 dev = &rte_eth_devices[port_id]; 3720 3721 if (fc_conf == NULL) { 3722 RTE_ETHDEV_LOG(ERR, 3723 "Cannot set ethdev port %u flow control from NULL config\n", 3724 port_id); 3725 return -EINVAL; 3726 } 3727 3728 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3729 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3730 return -EINVAL; 3731 } 3732 3733 if (*dev->dev_ops->flow_ctrl_set == NULL) 3734 return -ENOTSUP; 3735 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3736 } 3737 3738 int 3739 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3740 struct rte_eth_pfc_conf *pfc_conf) 3741 { 3742 struct rte_eth_dev *dev; 3743 3744 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3745 dev = &rte_eth_devices[port_id]; 3746 3747 if (pfc_conf == NULL) { 3748 RTE_ETHDEV_LOG(ERR, 3749 "Cannot set ethdev port %u priority flow control from NULL config\n", 3750 port_id); 3751 return -EINVAL; 3752 } 3753 3754 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3755 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3756 return -EINVAL; 3757 } 3758 3759 /* High water, low water validation are device specific */ 3760 if (*dev->dev_ops->priority_flow_ctrl_set) 3761 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3762 (dev, pfc_conf)); 3763 return -ENOTSUP; 3764 } 3765 3766 static int 3767 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3768 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3769 { 3770 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 3771 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3772 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 3773 RTE_ETHDEV_LOG(ERR, 3774 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 3775 pfc_queue_conf->rx_pause.tx_qid, 3776 dev_info->nb_tx_queues); 3777 return -EINVAL; 3778 } 3779 3780 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 3781 RTE_ETHDEV_LOG(ERR, 3782 "PFC TC not in range for Rx pause requested:%d max:%d\n", 3783 pfc_queue_conf->rx_pause.tc, tc_max); 3784 return -EINVAL; 3785 } 3786 } 3787 3788 return 0; 3789 } 3790 3791 static int 3792 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3793 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3794 { 3795 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 3796 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3797 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 3798 RTE_ETHDEV_LOG(ERR, 3799 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 3800 pfc_queue_conf->tx_pause.rx_qid, 3801 dev_info->nb_rx_queues); 3802 return -EINVAL; 3803 } 3804 3805 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 3806 RTE_ETHDEV_LOG(ERR, 3807 "PFC TC not in range for Tx pause requested:%d max:%d\n", 3808 pfc_queue_conf->tx_pause.tc, tc_max); 3809 return -EINVAL; 3810 } 3811 } 3812 3813 return 0; 3814 } 3815 3816 int 3817 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 3818 struct rte_eth_pfc_queue_info *pfc_queue_info) 3819 { 3820 struct rte_eth_dev *dev; 3821 3822 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3823 dev = &rte_eth_devices[port_id]; 3824 3825 if (pfc_queue_info == NULL) { 3826 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 3827 port_id); 3828 return -EINVAL; 3829 } 3830 3831 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3832 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3833 (dev, pfc_queue_info)); 3834 return -ENOTSUP; 3835 } 3836 3837 int 3838 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 3839 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3840 { 3841 struct rte_eth_pfc_queue_info pfc_info; 3842 struct rte_eth_dev_info dev_info; 3843 struct rte_eth_dev *dev; 3844 int ret; 3845 3846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3847 dev = &rte_eth_devices[port_id]; 3848 3849 if (pfc_queue_conf == NULL) { 3850 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 3851 port_id); 3852 return -EINVAL; 3853 } 3854 3855 ret = rte_eth_dev_info_get(port_id, &dev_info); 3856 if (ret != 0) 3857 return ret; 3858 3859 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 3860 if (ret != 0) 3861 return ret; 3862 3863 if (pfc_info.tc_max == 0) { 3864 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 3865 port_id); 3866 return -ENOTSUP; 3867 } 3868 3869 /* Check requested mode supported or not */ 3870 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 3871 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 3872 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 3873 port_id); 3874 return -EINVAL; 3875 } 3876 3877 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 3878 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 3879 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 3880 port_id); 3881 return -EINVAL; 3882 } 3883 3884 /* Validate Rx pause parameters */ 3885 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3886 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 3887 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 3888 pfc_queue_conf); 3889 if (ret != 0) 3890 return ret; 3891 } 3892 3893 /* Validate Tx pause parameters */ 3894 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3895 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 3896 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 3897 pfc_queue_conf); 3898 if (ret != 0) 3899 return ret; 3900 } 3901 3902 if (*dev->dev_ops->priority_flow_ctrl_queue_config) 3903 return eth_err(port_id, 3904 (*dev->dev_ops->priority_flow_ctrl_queue_config)( 3905 dev, pfc_queue_conf)); 3906 return -ENOTSUP; 3907 } 3908 3909 static int 3910 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3911 uint16_t reta_size) 3912 { 3913 uint16_t i, num; 3914 3915 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 3916 for (i = 0; i < num; i++) { 3917 if (reta_conf[i].mask) 3918 return 0; 3919 } 3920 3921 return -EINVAL; 3922 } 3923 3924 static int 3925 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3926 uint16_t reta_size, 3927 uint16_t max_rxq) 3928 { 3929 uint16_t i, idx, shift; 3930 3931 if (max_rxq == 0) { 3932 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3933 return -EINVAL; 3934 } 3935 3936 for (i = 0; i < reta_size; i++) { 3937 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3938 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3939 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 3940 (reta_conf[idx].reta[shift] >= max_rxq)) { 3941 RTE_ETHDEV_LOG(ERR, 3942 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3943 idx, shift, 3944 reta_conf[idx].reta[shift], max_rxq); 3945 return -EINVAL; 3946 } 3947 } 3948 3949 return 0; 3950 } 3951 3952 int 3953 rte_eth_dev_rss_reta_update(uint16_t port_id, 3954 struct rte_eth_rss_reta_entry64 *reta_conf, 3955 uint16_t reta_size) 3956 { 3957 enum rte_eth_rx_mq_mode mq_mode; 3958 struct rte_eth_dev *dev; 3959 int ret; 3960 3961 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3962 dev = &rte_eth_devices[port_id]; 3963 3964 if (reta_conf == NULL) { 3965 RTE_ETHDEV_LOG(ERR, 3966 "Cannot update ethdev port %u RSS RETA to NULL\n", 3967 port_id); 3968 return -EINVAL; 3969 } 3970 3971 if (reta_size == 0) { 3972 RTE_ETHDEV_LOG(ERR, 3973 "Cannot update ethdev port %u RSS RETA with zero size\n", 3974 port_id); 3975 return -EINVAL; 3976 } 3977 3978 /* Check mask bits */ 3979 ret = eth_check_reta_mask(reta_conf, reta_size); 3980 if (ret < 0) 3981 return ret; 3982 3983 /* Check entry value */ 3984 ret = eth_check_reta_entry(reta_conf, reta_size, 3985 dev->data->nb_rx_queues); 3986 if (ret < 0) 3987 return ret; 3988 3989 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3990 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3991 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3992 return -ENOTSUP; 3993 } 3994 3995 if (*dev->dev_ops->reta_update == NULL) 3996 return -ENOTSUP; 3997 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 3998 reta_size)); 3999 } 4000 4001 int 4002 rte_eth_dev_rss_reta_query(uint16_t port_id, 4003 struct rte_eth_rss_reta_entry64 *reta_conf, 4004 uint16_t reta_size) 4005 { 4006 struct rte_eth_dev *dev; 4007 int ret; 4008 4009 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4010 dev = &rte_eth_devices[port_id]; 4011 4012 if (reta_conf == NULL) { 4013 RTE_ETHDEV_LOG(ERR, 4014 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4015 port_id); 4016 return -EINVAL; 4017 } 4018 4019 /* Check mask bits */ 4020 ret = eth_check_reta_mask(reta_conf, reta_size); 4021 if (ret < 0) 4022 return ret; 4023 4024 if (*dev->dev_ops->reta_query == NULL) 4025 return -ENOTSUP; 4026 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4027 reta_size)); 4028 } 4029 4030 int 4031 rte_eth_dev_rss_hash_update(uint16_t port_id, 4032 struct rte_eth_rss_conf *rss_conf) 4033 { 4034 struct rte_eth_dev *dev; 4035 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4036 enum rte_eth_rx_mq_mode mq_mode; 4037 int ret; 4038 4039 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4040 dev = &rte_eth_devices[port_id]; 4041 4042 if (rss_conf == NULL) { 4043 RTE_ETHDEV_LOG(ERR, 4044 "Cannot update ethdev port %u RSS hash from NULL config\n", 4045 port_id); 4046 return -EINVAL; 4047 } 4048 4049 ret = rte_eth_dev_info_get(port_id, &dev_info); 4050 if (ret != 0) 4051 return ret; 4052 4053 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4054 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4055 dev_info.flow_type_rss_offloads) { 4056 RTE_ETHDEV_LOG(ERR, 4057 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4058 port_id, rss_conf->rss_hf, 4059 dev_info.flow_type_rss_offloads); 4060 return -EINVAL; 4061 } 4062 4063 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4064 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4065 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4066 return -ENOTSUP; 4067 } 4068 4069 if (*dev->dev_ops->rss_hash_update == NULL) 4070 return -ENOTSUP; 4071 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4072 rss_conf)); 4073 } 4074 4075 int 4076 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4077 struct rte_eth_rss_conf *rss_conf) 4078 { 4079 struct rte_eth_dev *dev; 4080 4081 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4082 dev = &rte_eth_devices[port_id]; 4083 4084 if (rss_conf == NULL) { 4085 RTE_ETHDEV_LOG(ERR, 4086 "Cannot get ethdev port %u RSS hash config to NULL\n", 4087 port_id); 4088 return -EINVAL; 4089 } 4090 4091 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4092 return -ENOTSUP; 4093 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4094 rss_conf)); 4095 } 4096 4097 int 4098 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4099 struct rte_eth_udp_tunnel *udp_tunnel) 4100 { 4101 struct rte_eth_dev *dev; 4102 4103 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4104 dev = &rte_eth_devices[port_id]; 4105 4106 if (udp_tunnel == NULL) { 4107 RTE_ETHDEV_LOG(ERR, 4108 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4109 port_id); 4110 return -EINVAL; 4111 } 4112 4113 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4114 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4115 return -EINVAL; 4116 } 4117 4118 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4119 return -ENOTSUP; 4120 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4121 udp_tunnel)); 4122 } 4123 4124 int 4125 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4126 struct rte_eth_udp_tunnel *udp_tunnel) 4127 { 4128 struct rte_eth_dev *dev; 4129 4130 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4131 dev = &rte_eth_devices[port_id]; 4132 4133 if (udp_tunnel == NULL) { 4134 RTE_ETHDEV_LOG(ERR, 4135 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4136 port_id); 4137 return -EINVAL; 4138 } 4139 4140 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4141 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4142 return -EINVAL; 4143 } 4144 4145 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4146 return -ENOTSUP; 4147 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4148 udp_tunnel)); 4149 } 4150 4151 int 4152 rte_eth_led_on(uint16_t port_id) 4153 { 4154 struct rte_eth_dev *dev; 4155 4156 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4157 dev = &rte_eth_devices[port_id]; 4158 4159 if (*dev->dev_ops->dev_led_on == NULL) 4160 return -ENOTSUP; 4161 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4162 } 4163 4164 int 4165 rte_eth_led_off(uint16_t port_id) 4166 { 4167 struct rte_eth_dev *dev; 4168 4169 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4170 dev = &rte_eth_devices[port_id]; 4171 4172 if (*dev->dev_ops->dev_led_off == NULL) 4173 return -ENOTSUP; 4174 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4175 } 4176 4177 int 4178 rte_eth_fec_get_capability(uint16_t port_id, 4179 struct rte_eth_fec_capa *speed_fec_capa, 4180 unsigned int num) 4181 { 4182 struct rte_eth_dev *dev; 4183 int ret; 4184 4185 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4186 dev = &rte_eth_devices[port_id]; 4187 4188 if (speed_fec_capa == NULL && num > 0) { 4189 RTE_ETHDEV_LOG(ERR, 4190 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4191 port_id); 4192 return -EINVAL; 4193 } 4194 4195 if (*dev->dev_ops->fec_get_capability == NULL) 4196 return -ENOTSUP; 4197 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4198 4199 return ret; 4200 } 4201 4202 int 4203 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4204 { 4205 struct rte_eth_dev *dev; 4206 4207 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4208 dev = &rte_eth_devices[port_id]; 4209 4210 if (fec_capa == NULL) { 4211 RTE_ETHDEV_LOG(ERR, 4212 "Cannot get ethdev port %u current FEC mode to NULL\n", 4213 port_id); 4214 return -EINVAL; 4215 } 4216 4217 if (*dev->dev_ops->fec_get == NULL) 4218 return -ENOTSUP; 4219 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4220 } 4221 4222 int 4223 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4224 { 4225 struct rte_eth_dev *dev; 4226 4227 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4228 dev = &rte_eth_devices[port_id]; 4229 4230 if (*dev->dev_ops->fec_set == NULL) 4231 return -ENOTSUP; 4232 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4233 } 4234 4235 /* 4236 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4237 * an empty spot. 4238 */ 4239 static int 4240 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4241 { 4242 struct rte_eth_dev_info dev_info; 4243 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4244 unsigned i; 4245 int ret; 4246 4247 ret = rte_eth_dev_info_get(port_id, &dev_info); 4248 if (ret != 0) 4249 return -1; 4250 4251 for (i = 0; i < dev_info.max_mac_addrs; i++) 4252 if (memcmp(addr, &dev->data->mac_addrs[i], 4253 RTE_ETHER_ADDR_LEN) == 0) 4254 return i; 4255 4256 return -1; 4257 } 4258 4259 static const struct rte_ether_addr null_mac_addr; 4260 4261 int 4262 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4263 uint32_t pool) 4264 { 4265 struct rte_eth_dev *dev; 4266 int index; 4267 uint64_t pool_mask; 4268 int ret; 4269 4270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4271 dev = &rte_eth_devices[port_id]; 4272 4273 if (addr == NULL) { 4274 RTE_ETHDEV_LOG(ERR, 4275 "Cannot add ethdev port %u MAC address from NULL address\n", 4276 port_id); 4277 return -EINVAL; 4278 } 4279 4280 if (*dev->dev_ops->mac_addr_add == NULL) 4281 return -ENOTSUP; 4282 4283 if (rte_is_zero_ether_addr(addr)) { 4284 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4285 port_id); 4286 return -EINVAL; 4287 } 4288 if (pool >= RTE_ETH_64_POOLS) { 4289 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4290 return -EINVAL; 4291 } 4292 4293 index = eth_dev_get_mac_addr_index(port_id, addr); 4294 if (index < 0) { 4295 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4296 if (index < 0) { 4297 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4298 port_id); 4299 return -ENOSPC; 4300 } 4301 } else { 4302 pool_mask = dev->data->mac_pool_sel[index]; 4303 4304 /* Check if both MAC address and pool is already there, and do nothing */ 4305 if (pool_mask & RTE_BIT64(pool)) 4306 return 0; 4307 } 4308 4309 /* Update NIC */ 4310 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4311 4312 if (ret == 0) { 4313 /* Update address in NIC data structure */ 4314 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4315 4316 /* Update pool bitmap in NIC data structure */ 4317 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4318 } 4319 4320 return eth_err(port_id, ret); 4321 } 4322 4323 int 4324 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4325 { 4326 struct rte_eth_dev *dev; 4327 int index; 4328 4329 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4330 dev = &rte_eth_devices[port_id]; 4331 4332 if (addr == NULL) { 4333 RTE_ETHDEV_LOG(ERR, 4334 "Cannot remove ethdev port %u MAC address from NULL address\n", 4335 port_id); 4336 return -EINVAL; 4337 } 4338 4339 if (*dev->dev_ops->mac_addr_remove == NULL) 4340 return -ENOTSUP; 4341 4342 index = eth_dev_get_mac_addr_index(port_id, addr); 4343 if (index == 0) { 4344 RTE_ETHDEV_LOG(ERR, 4345 "Port %u: Cannot remove default MAC address\n", 4346 port_id); 4347 return -EADDRINUSE; 4348 } else if (index < 0) 4349 return 0; /* Do nothing if address wasn't found */ 4350 4351 /* Update NIC */ 4352 (*dev->dev_ops->mac_addr_remove)(dev, index); 4353 4354 /* Update address in NIC data structure */ 4355 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4356 4357 /* reset pool bitmap */ 4358 dev->data->mac_pool_sel[index] = 0; 4359 4360 return 0; 4361 } 4362 4363 int 4364 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4365 { 4366 struct rte_eth_dev *dev; 4367 int ret; 4368 4369 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4370 dev = &rte_eth_devices[port_id]; 4371 4372 if (addr == NULL) { 4373 RTE_ETHDEV_LOG(ERR, 4374 "Cannot set ethdev port %u default MAC address from NULL address\n", 4375 port_id); 4376 return -EINVAL; 4377 } 4378 4379 if (!rte_is_valid_assigned_ether_addr(addr)) 4380 return -EINVAL; 4381 4382 if (*dev->dev_ops->mac_addr_set == NULL) 4383 return -ENOTSUP; 4384 4385 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4386 if (ret < 0) 4387 return ret; 4388 4389 /* Update default address in NIC data structure */ 4390 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4391 4392 return 0; 4393 } 4394 4395 4396 /* 4397 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4398 * an empty spot. 4399 */ 4400 static int 4401 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4402 const struct rte_ether_addr *addr) 4403 { 4404 struct rte_eth_dev_info dev_info; 4405 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4406 unsigned i; 4407 int ret; 4408 4409 ret = rte_eth_dev_info_get(port_id, &dev_info); 4410 if (ret != 0) 4411 return -1; 4412 4413 if (!dev->data->hash_mac_addrs) 4414 return -1; 4415 4416 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4417 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4418 RTE_ETHER_ADDR_LEN) == 0) 4419 return i; 4420 4421 return -1; 4422 } 4423 4424 int 4425 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4426 uint8_t on) 4427 { 4428 int index; 4429 int ret; 4430 struct rte_eth_dev *dev; 4431 4432 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4433 dev = &rte_eth_devices[port_id]; 4434 4435 if (addr == NULL) { 4436 RTE_ETHDEV_LOG(ERR, 4437 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4438 port_id); 4439 return -EINVAL; 4440 } 4441 4442 if (rte_is_zero_ether_addr(addr)) { 4443 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4444 port_id); 4445 return -EINVAL; 4446 } 4447 4448 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4449 /* Check if it's already there, and do nothing */ 4450 if ((index >= 0) && on) 4451 return 0; 4452 4453 if (index < 0) { 4454 if (!on) { 4455 RTE_ETHDEV_LOG(ERR, 4456 "Port %u: the MAC address was not set in UTA\n", 4457 port_id); 4458 return -EINVAL; 4459 } 4460 4461 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4462 if (index < 0) { 4463 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4464 port_id); 4465 return -ENOSPC; 4466 } 4467 } 4468 4469 if (*dev->dev_ops->uc_hash_table_set == NULL) 4470 return -ENOTSUP; 4471 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4472 if (ret == 0) { 4473 /* Update address in NIC data structure */ 4474 if (on) 4475 rte_ether_addr_copy(addr, 4476 &dev->data->hash_mac_addrs[index]); 4477 else 4478 rte_ether_addr_copy(&null_mac_addr, 4479 &dev->data->hash_mac_addrs[index]); 4480 } 4481 4482 return eth_err(port_id, ret); 4483 } 4484 4485 int 4486 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4487 { 4488 struct rte_eth_dev *dev; 4489 4490 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4491 dev = &rte_eth_devices[port_id]; 4492 4493 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 4494 return -ENOTSUP; 4495 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4496 on)); 4497 } 4498 4499 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4500 uint32_t tx_rate) 4501 { 4502 struct rte_eth_dev *dev; 4503 struct rte_eth_dev_info dev_info; 4504 struct rte_eth_link link; 4505 int ret; 4506 4507 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4508 dev = &rte_eth_devices[port_id]; 4509 4510 ret = rte_eth_dev_info_get(port_id, &dev_info); 4511 if (ret != 0) 4512 return ret; 4513 4514 link = dev->data->dev_link; 4515 4516 if (queue_idx > dev_info.max_tx_queues) { 4517 RTE_ETHDEV_LOG(ERR, 4518 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4519 port_id, queue_idx); 4520 return -EINVAL; 4521 } 4522 4523 if (tx_rate > link.link_speed) { 4524 RTE_ETHDEV_LOG(ERR, 4525 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4526 tx_rate, link.link_speed); 4527 return -EINVAL; 4528 } 4529 4530 if (*dev->dev_ops->set_queue_rate_limit == NULL) 4531 return -ENOTSUP; 4532 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4533 queue_idx, tx_rate)); 4534 } 4535 4536 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 4537 uint8_t avail_thresh) 4538 { 4539 struct rte_eth_dev *dev; 4540 4541 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4542 dev = &rte_eth_devices[port_id]; 4543 4544 if (queue_id > dev->data->nb_rx_queues) { 4545 RTE_ETHDEV_LOG(ERR, 4546 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 4547 port_id, queue_id); 4548 return -EINVAL; 4549 } 4550 4551 if (avail_thresh > 99) { 4552 RTE_ETHDEV_LOG(ERR, 4553 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 4554 port_id); 4555 return -EINVAL; 4556 } 4557 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 4558 return -ENOTSUP; 4559 return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 4560 queue_id, avail_thresh)); 4561 } 4562 4563 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 4564 uint8_t *avail_thresh) 4565 { 4566 struct rte_eth_dev *dev; 4567 4568 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4569 dev = &rte_eth_devices[port_id]; 4570 4571 if (queue_id == NULL) 4572 return -EINVAL; 4573 if (*queue_id >= dev->data->nb_rx_queues) 4574 *queue_id = 0; 4575 4576 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 4577 return -ENOTSUP; 4578 return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 4579 queue_id, avail_thresh)); 4580 } 4581 4582 RTE_INIT(eth_dev_init_fp_ops) 4583 { 4584 uint32_t i; 4585 4586 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4587 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4588 } 4589 4590 RTE_INIT(eth_dev_init_cb_lists) 4591 { 4592 uint16_t i; 4593 4594 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4595 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4596 } 4597 4598 int 4599 rte_eth_dev_callback_register(uint16_t port_id, 4600 enum rte_eth_event_type event, 4601 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4602 { 4603 struct rte_eth_dev *dev; 4604 struct rte_eth_dev_callback *user_cb; 4605 uint16_t next_port; 4606 uint16_t last_port; 4607 4608 if (cb_fn == NULL) { 4609 RTE_ETHDEV_LOG(ERR, 4610 "Cannot register ethdev port %u callback from NULL\n", 4611 port_id); 4612 return -EINVAL; 4613 } 4614 4615 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4616 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4617 return -EINVAL; 4618 } 4619 4620 if (port_id == RTE_ETH_ALL) { 4621 next_port = 0; 4622 last_port = RTE_MAX_ETHPORTS - 1; 4623 } else { 4624 next_port = last_port = port_id; 4625 } 4626 4627 rte_spinlock_lock(ð_dev_cb_lock); 4628 4629 do { 4630 dev = &rte_eth_devices[next_port]; 4631 4632 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4633 if (user_cb->cb_fn == cb_fn && 4634 user_cb->cb_arg == cb_arg && 4635 user_cb->event == event) { 4636 break; 4637 } 4638 } 4639 4640 /* create a new callback. */ 4641 if (user_cb == NULL) { 4642 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4643 sizeof(struct rte_eth_dev_callback), 0); 4644 if (user_cb != NULL) { 4645 user_cb->cb_fn = cb_fn; 4646 user_cb->cb_arg = cb_arg; 4647 user_cb->event = event; 4648 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4649 user_cb, next); 4650 } else { 4651 rte_spinlock_unlock(ð_dev_cb_lock); 4652 rte_eth_dev_callback_unregister(port_id, event, 4653 cb_fn, cb_arg); 4654 return -ENOMEM; 4655 } 4656 4657 } 4658 } while (++next_port <= last_port); 4659 4660 rte_spinlock_unlock(ð_dev_cb_lock); 4661 return 0; 4662 } 4663 4664 int 4665 rte_eth_dev_callback_unregister(uint16_t port_id, 4666 enum rte_eth_event_type event, 4667 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4668 { 4669 int ret; 4670 struct rte_eth_dev *dev; 4671 struct rte_eth_dev_callback *cb, *next; 4672 uint16_t next_port; 4673 uint16_t last_port; 4674 4675 if (cb_fn == NULL) { 4676 RTE_ETHDEV_LOG(ERR, 4677 "Cannot unregister ethdev port %u callback from NULL\n", 4678 port_id); 4679 return -EINVAL; 4680 } 4681 4682 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4683 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4684 return -EINVAL; 4685 } 4686 4687 if (port_id == RTE_ETH_ALL) { 4688 next_port = 0; 4689 last_port = RTE_MAX_ETHPORTS - 1; 4690 } else { 4691 next_port = last_port = port_id; 4692 } 4693 4694 rte_spinlock_lock(ð_dev_cb_lock); 4695 4696 do { 4697 dev = &rte_eth_devices[next_port]; 4698 ret = 0; 4699 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4700 cb = next) { 4701 4702 next = TAILQ_NEXT(cb, next); 4703 4704 if (cb->cb_fn != cb_fn || cb->event != event || 4705 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4706 continue; 4707 4708 /* 4709 * if this callback is not executing right now, 4710 * then remove it. 4711 */ 4712 if (cb->active == 0) { 4713 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4714 rte_free(cb); 4715 } else { 4716 ret = -EAGAIN; 4717 } 4718 } 4719 } while (++next_port <= last_port); 4720 4721 rte_spinlock_unlock(ð_dev_cb_lock); 4722 return ret; 4723 } 4724 4725 int 4726 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4727 { 4728 uint32_t vec; 4729 struct rte_eth_dev *dev; 4730 struct rte_intr_handle *intr_handle; 4731 uint16_t qid; 4732 int rc; 4733 4734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4735 dev = &rte_eth_devices[port_id]; 4736 4737 if (!dev->intr_handle) { 4738 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4739 return -ENOTSUP; 4740 } 4741 4742 intr_handle = dev->intr_handle; 4743 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4744 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4745 return -EPERM; 4746 } 4747 4748 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4749 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4750 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4751 if (rc && rc != -EEXIST) { 4752 RTE_ETHDEV_LOG(ERR, 4753 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4754 port_id, qid, op, epfd, vec); 4755 } 4756 } 4757 4758 return 0; 4759 } 4760 4761 int 4762 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4763 { 4764 struct rte_intr_handle *intr_handle; 4765 struct rte_eth_dev *dev; 4766 unsigned int efd_idx; 4767 uint32_t vec; 4768 int fd; 4769 4770 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4771 dev = &rte_eth_devices[port_id]; 4772 4773 if (queue_id >= dev->data->nb_rx_queues) { 4774 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4775 return -1; 4776 } 4777 4778 if (!dev->intr_handle) { 4779 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4780 return -1; 4781 } 4782 4783 intr_handle = dev->intr_handle; 4784 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4785 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4786 return -1; 4787 } 4788 4789 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4790 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4791 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4792 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4793 4794 return fd; 4795 } 4796 4797 int 4798 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4799 int epfd, int op, void *data) 4800 { 4801 uint32_t vec; 4802 struct rte_eth_dev *dev; 4803 struct rte_intr_handle *intr_handle; 4804 int rc; 4805 4806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4807 dev = &rte_eth_devices[port_id]; 4808 4809 if (queue_id >= dev->data->nb_rx_queues) { 4810 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4811 return -EINVAL; 4812 } 4813 4814 if (!dev->intr_handle) { 4815 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4816 return -ENOTSUP; 4817 } 4818 4819 intr_handle = dev->intr_handle; 4820 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4821 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4822 return -EPERM; 4823 } 4824 4825 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4826 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4827 if (rc && rc != -EEXIST) { 4828 RTE_ETHDEV_LOG(ERR, 4829 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4830 port_id, queue_id, op, epfd, vec); 4831 return rc; 4832 } 4833 4834 return 0; 4835 } 4836 4837 int 4838 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4839 uint16_t queue_id) 4840 { 4841 struct rte_eth_dev *dev; 4842 int ret; 4843 4844 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4845 dev = &rte_eth_devices[port_id]; 4846 4847 ret = eth_dev_validate_rx_queue(dev, queue_id); 4848 if (ret != 0) 4849 return ret; 4850 4851 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 4852 return -ENOTSUP; 4853 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 4854 } 4855 4856 int 4857 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4858 uint16_t queue_id) 4859 { 4860 struct rte_eth_dev *dev; 4861 int ret; 4862 4863 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4864 dev = &rte_eth_devices[port_id]; 4865 4866 ret = eth_dev_validate_rx_queue(dev, queue_id); 4867 if (ret != 0) 4868 return ret; 4869 4870 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 4871 return -ENOTSUP; 4872 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 4873 } 4874 4875 4876 const struct rte_eth_rxtx_callback * 4877 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4878 rte_rx_callback_fn fn, void *user_param) 4879 { 4880 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4881 rte_errno = ENOTSUP; 4882 return NULL; 4883 #endif 4884 struct rte_eth_dev *dev; 4885 4886 /* check input parameters */ 4887 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4888 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4889 rte_errno = EINVAL; 4890 return NULL; 4891 } 4892 dev = &rte_eth_devices[port_id]; 4893 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4894 rte_errno = EINVAL; 4895 return NULL; 4896 } 4897 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4898 4899 if (cb == NULL) { 4900 rte_errno = ENOMEM; 4901 return NULL; 4902 } 4903 4904 cb->fn.rx = fn; 4905 cb->param = user_param; 4906 4907 rte_spinlock_lock(ð_dev_rx_cb_lock); 4908 /* Add the callbacks in fifo order. */ 4909 struct rte_eth_rxtx_callback *tail = 4910 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4911 4912 if (!tail) { 4913 /* Stores to cb->fn and cb->param should complete before 4914 * cb is visible to data plane. 4915 */ 4916 __atomic_store_n( 4917 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4918 cb, __ATOMIC_RELEASE); 4919 4920 } else { 4921 while (tail->next) 4922 tail = tail->next; 4923 /* Stores to cb->fn and cb->param should complete before 4924 * cb is visible to data plane. 4925 */ 4926 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4927 } 4928 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4929 4930 return cb; 4931 } 4932 4933 const struct rte_eth_rxtx_callback * 4934 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 4935 rte_rx_callback_fn fn, void *user_param) 4936 { 4937 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4938 rte_errno = ENOTSUP; 4939 return NULL; 4940 #endif 4941 /* check input parameters */ 4942 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4943 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4944 rte_errno = EINVAL; 4945 return NULL; 4946 } 4947 4948 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4949 4950 if (cb == NULL) { 4951 rte_errno = ENOMEM; 4952 return NULL; 4953 } 4954 4955 cb->fn.rx = fn; 4956 cb->param = user_param; 4957 4958 rte_spinlock_lock(ð_dev_rx_cb_lock); 4959 /* Add the callbacks at first position */ 4960 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4961 /* Stores to cb->fn, cb->param and cb->next should complete before 4962 * cb is visible to data plane threads. 4963 */ 4964 __atomic_store_n( 4965 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4966 cb, __ATOMIC_RELEASE); 4967 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4968 4969 return cb; 4970 } 4971 4972 const struct rte_eth_rxtx_callback * 4973 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 4974 rte_tx_callback_fn fn, void *user_param) 4975 { 4976 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4977 rte_errno = ENOTSUP; 4978 return NULL; 4979 #endif 4980 struct rte_eth_dev *dev; 4981 4982 /* check input parameters */ 4983 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4984 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 4985 rte_errno = EINVAL; 4986 return NULL; 4987 } 4988 4989 dev = &rte_eth_devices[port_id]; 4990 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 4991 rte_errno = EINVAL; 4992 return NULL; 4993 } 4994 4995 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4996 4997 if (cb == NULL) { 4998 rte_errno = ENOMEM; 4999 return NULL; 5000 } 5001 5002 cb->fn.tx = fn; 5003 cb->param = user_param; 5004 5005 rte_spinlock_lock(ð_dev_tx_cb_lock); 5006 /* Add the callbacks in fifo order. */ 5007 struct rte_eth_rxtx_callback *tail = 5008 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5009 5010 if (!tail) { 5011 /* Stores to cb->fn and cb->param should complete before 5012 * cb is visible to data plane. 5013 */ 5014 __atomic_store_n( 5015 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5016 cb, __ATOMIC_RELEASE); 5017 5018 } else { 5019 while (tail->next) 5020 tail = tail->next; 5021 /* Stores to cb->fn and cb->param should complete before 5022 * cb is visible to data plane. 5023 */ 5024 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5025 } 5026 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5027 5028 return cb; 5029 } 5030 5031 int 5032 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5033 const struct rte_eth_rxtx_callback *user_cb) 5034 { 5035 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5036 return -ENOTSUP; 5037 #endif 5038 /* Check input parameters. */ 5039 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5040 if (user_cb == NULL || 5041 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5042 return -EINVAL; 5043 5044 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5045 struct rte_eth_rxtx_callback *cb; 5046 struct rte_eth_rxtx_callback **prev_cb; 5047 int ret = -EINVAL; 5048 5049 rte_spinlock_lock(ð_dev_rx_cb_lock); 5050 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5051 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5052 cb = *prev_cb; 5053 if (cb == user_cb) { 5054 /* Remove the user cb from the callback list. */ 5055 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5056 ret = 0; 5057 break; 5058 } 5059 } 5060 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5061 5062 return ret; 5063 } 5064 5065 int 5066 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5067 const struct rte_eth_rxtx_callback *user_cb) 5068 { 5069 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5070 return -ENOTSUP; 5071 #endif 5072 /* Check input parameters. */ 5073 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5074 if (user_cb == NULL || 5075 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5076 return -EINVAL; 5077 5078 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5079 int ret = -EINVAL; 5080 struct rte_eth_rxtx_callback *cb; 5081 struct rte_eth_rxtx_callback **prev_cb; 5082 5083 rte_spinlock_lock(ð_dev_tx_cb_lock); 5084 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5085 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5086 cb = *prev_cb; 5087 if (cb == user_cb) { 5088 /* Remove the user cb from the callback list. */ 5089 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5090 ret = 0; 5091 break; 5092 } 5093 } 5094 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5095 5096 return ret; 5097 } 5098 5099 int 5100 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5101 struct rte_eth_rxq_info *qinfo) 5102 { 5103 struct rte_eth_dev *dev; 5104 5105 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5106 dev = &rte_eth_devices[port_id]; 5107 5108 if (queue_id >= dev->data->nb_rx_queues) { 5109 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5110 return -EINVAL; 5111 } 5112 5113 if (qinfo == NULL) { 5114 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5115 port_id, queue_id); 5116 return -EINVAL; 5117 } 5118 5119 if (dev->data->rx_queues == NULL || 5120 dev->data->rx_queues[queue_id] == NULL) { 5121 RTE_ETHDEV_LOG(ERR, 5122 "Rx queue %"PRIu16" of device with port_id=%" 5123 PRIu16" has not been setup\n", 5124 queue_id, port_id); 5125 return -EINVAL; 5126 } 5127 5128 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5129 RTE_ETHDEV_LOG(INFO, 5130 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5131 queue_id, port_id); 5132 return -EINVAL; 5133 } 5134 5135 if (*dev->dev_ops->rxq_info_get == NULL) 5136 return -ENOTSUP; 5137 5138 memset(qinfo, 0, sizeof(*qinfo)); 5139 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5140 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5141 5142 return 0; 5143 } 5144 5145 int 5146 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5147 struct rte_eth_txq_info *qinfo) 5148 { 5149 struct rte_eth_dev *dev; 5150 5151 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5152 dev = &rte_eth_devices[port_id]; 5153 5154 if (queue_id >= dev->data->nb_tx_queues) { 5155 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5156 return -EINVAL; 5157 } 5158 5159 if (qinfo == NULL) { 5160 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5161 port_id, queue_id); 5162 return -EINVAL; 5163 } 5164 5165 if (dev->data->tx_queues == NULL || 5166 dev->data->tx_queues[queue_id] == NULL) { 5167 RTE_ETHDEV_LOG(ERR, 5168 "Tx queue %"PRIu16" of device with port_id=%" 5169 PRIu16" has not been setup\n", 5170 queue_id, port_id); 5171 return -EINVAL; 5172 } 5173 5174 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5175 RTE_ETHDEV_LOG(INFO, 5176 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5177 queue_id, port_id); 5178 return -EINVAL; 5179 } 5180 5181 if (*dev->dev_ops->txq_info_get == NULL) 5182 return -ENOTSUP; 5183 5184 memset(qinfo, 0, sizeof(*qinfo)); 5185 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5186 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5187 5188 return 0; 5189 } 5190 5191 int 5192 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5193 struct rte_eth_burst_mode *mode) 5194 { 5195 struct rte_eth_dev *dev; 5196 5197 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5198 dev = &rte_eth_devices[port_id]; 5199 5200 if (queue_id >= dev->data->nb_rx_queues) { 5201 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5202 return -EINVAL; 5203 } 5204 5205 if (mode == NULL) { 5206 RTE_ETHDEV_LOG(ERR, 5207 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5208 port_id, queue_id); 5209 return -EINVAL; 5210 } 5211 5212 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5213 return -ENOTSUP; 5214 memset(mode, 0, sizeof(*mode)); 5215 return eth_err(port_id, 5216 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5217 } 5218 5219 int 5220 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5221 struct rte_eth_burst_mode *mode) 5222 { 5223 struct rte_eth_dev *dev; 5224 5225 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5226 dev = &rte_eth_devices[port_id]; 5227 5228 if (queue_id >= dev->data->nb_tx_queues) { 5229 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5230 return -EINVAL; 5231 } 5232 5233 if (mode == NULL) { 5234 RTE_ETHDEV_LOG(ERR, 5235 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5236 port_id, queue_id); 5237 return -EINVAL; 5238 } 5239 5240 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5241 return -ENOTSUP; 5242 memset(mode, 0, sizeof(*mode)); 5243 return eth_err(port_id, 5244 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5245 } 5246 5247 int 5248 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5249 struct rte_power_monitor_cond *pmc) 5250 { 5251 struct rte_eth_dev *dev; 5252 5253 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5254 dev = &rte_eth_devices[port_id]; 5255 5256 if (queue_id >= dev->data->nb_rx_queues) { 5257 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5258 return -EINVAL; 5259 } 5260 5261 if (pmc == NULL) { 5262 RTE_ETHDEV_LOG(ERR, 5263 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5264 port_id, queue_id); 5265 return -EINVAL; 5266 } 5267 5268 if (*dev->dev_ops->get_monitor_addr == NULL) 5269 return -ENOTSUP; 5270 return eth_err(port_id, 5271 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5272 } 5273 5274 int 5275 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5276 struct rte_ether_addr *mc_addr_set, 5277 uint32_t nb_mc_addr) 5278 { 5279 struct rte_eth_dev *dev; 5280 5281 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5282 dev = &rte_eth_devices[port_id]; 5283 5284 if (*dev->dev_ops->set_mc_addr_list == NULL) 5285 return -ENOTSUP; 5286 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5287 mc_addr_set, nb_mc_addr)); 5288 } 5289 5290 int 5291 rte_eth_timesync_enable(uint16_t port_id) 5292 { 5293 struct rte_eth_dev *dev; 5294 5295 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5296 dev = &rte_eth_devices[port_id]; 5297 5298 if (*dev->dev_ops->timesync_enable == NULL) 5299 return -ENOTSUP; 5300 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5301 } 5302 5303 int 5304 rte_eth_timesync_disable(uint16_t port_id) 5305 { 5306 struct rte_eth_dev *dev; 5307 5308 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5309 dev = &rte_eth_devices[port_id]; 5310 5311 if (*dev->dev_ops->timesync_disable == NULL) 5312 return -ENOTSUP; 5313 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5314 } 5315 5316 int 5317 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5318 uint32_t flags) 5319 { 5320 struct rte_eth_dev *dev; 5321 5322 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5323 dev = &rte_eth_devices[port_id]; 5324 5325 if (timestamp == NULL) { 5326 RTE_ETHDEV_LOG(ERR, 5327 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5328 port_id); 5329 return -EINVAL; 5330 } 5331 5332 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 5333 return -ENOTSUP; 5334 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5335 (dev, timestamp, flags)); 5336 } 5337 5338 int 5339 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5340 struct timespec *timestamp) 5341 { 5342 struct rte_eth_dev *dev; 5343 5344 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5345 dev = &rte_eth_devices[port_id]; 5346 5347 if (timestamp == NULL) { 5348 RTE_ETHDEV_LOG(ERR, 5349 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5350 port_id); 5351 return -EINVAL; 5352 } 5353 5354 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 5355 return -ENOTSUP; 5356 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5357 (dev, timestamp)); 5358 } 5359 5360 int 5361 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5362 { 5363 struct rte_eth_dev *dev; 5364 5365 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5366 dev = &rte_eth_devices[port_id]; 5367 5368 if (*dev->dev_ops->timesync_adjust_time == NULL) 5369 return -ENOTSUP; 5370 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5371 } 5372 5373 int 5374 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5375 { 5376 struct rte_eth_dev *dev; 5377 5378 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5379 dev = &rte_eth_devices[port_id]; 5380 5381 if (timestamp == NULL) { 5382 RTE_ETHDEV_LOG(ERR, 5383 "Cannot read ethdev port %u timesync time to NULL\n", 5384 port_id); 5385 return -EINVAL; 5386 } 5387 5388 if (*dev->dev_ops->timesync_read_time == NULL) 5389 return -ENOTSUP; 5390 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5391 timestamp)); 5392 } 5393 5394 int 5395 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5396 { 5397 struct rte_eth_dev *dev; 5398 5399 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5400 dev = &rte_eth_devices[port_id]; 5401 5402 if (timestamp == NULL) { 5403 RTE_ETHDEV_LOG(ERR, 5404 "Cannot write ethdev port %u timesync from NULL time\n", 5405 port_id); 5406 return -EINVAL; 5407 } 5408 5409 if (*dev->dev_ops->timesync_write_time == NULL) 5410 return -ENOTSUP; 5411 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5412 timestamp)); 5413 } 5414 5415 int 5416 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5417 { 5418 struct rte_eth_dev *dev; 5419 5420 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5421 dev = &rte_eth_devices[port_id]; 5422 5423 if (clock == NULL) { 5424 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5425 port_id); 5426 return -EINVAL; 5427 } 5428 5429 if (*dev->dev_ops->read_clock == NULL) 5430 return -ENOTSUP; 5431 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5432 } 5433 5434 int 5435 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5436 { 5437 struct rte_eth_dev *dev; 5438 5439 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5440 dev = &rte_eth_devices[port_id]; 5441 5442 if (info == NULL) { 5443 RTE_ETHDEV_LOG(ERR, 5444 "Cannot get ethdev port %u register info to NULL\n", 5445 port_id); 5446 return -EINVAL; 5447 } 5448 5449 if (*dev->dev_ops->get_reg == NULL) 5450 return -ENOTSUP; 5451 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5452 } 5453 5454 int 5455 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5456 { 5457 struct rte_eth_dev *dev; 5458 5459 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5460 dev = &rte_eth_devices[port_id]; 5461 5462 if (*dev->dev_ops->get_eeprom_length == NULL) 5463 return -ENOTSUP; 5464 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5465 } 5466 5467 int 5468 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5469 { 5470 struct rte_eth_dev *dev; 5471 5472 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5473 dev = &rte_eth_devices[port_id]; 5474 5475 if (info == NULL) { 5476 RTE_ETHDEV_LOG(ERR, 5477 "Cannot get ethdev port %u EEPROM info to NULL\n", 5478 port_id); 5479 return -EINVAL; 5480 } 5481 5482 if (*dev->dev_ops->get_eeprom == NULL) 5483 return -ENOTSUP; 5484 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5485 } 5486 5487 int 5488 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5489 { 5490 struct rte_eth_dev *dev; 5491 5492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5493 dev = &rte_eth_devices[port_id]; 5494 5495 if (info == NULL) { 5496 RTE_ETHDEV_LOG(ERR, 5497 "Cannot set ethdev port %u EEPROM from NULL info\n", 5498 port_id); 5499 return -EINVAL; 5500 } 5501 5502 if (*dev->dev_ops->set_eeprom == NULL) 5503 return -ENOTSUP; 5504 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5505 } 5506 5507 int 5508 rte_eth_dev_get_module_info(uint16_t port_id, 5509 struct rte_eth_dev_module_info *modinfo) 5510 { 5511 struct rte_eth_dev *dev; 5512 5513 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5514 dev = &rte_eth_devices[port_id]; 5515 5516 if (modinfo == NULL) { 5517 RTE_ETHDEV_LOG(ERR, 5518 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5519 port_id); 5520 return -EINVAL; 5521 } 5522 5523 if (*dev->dev_ops->get_module_info == NULL) 5524 return -ENOTSUP; 5525 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5526 } 5527 5528 int 5529 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5530 struct rte_dev_eeprom_info *info) 5531 { 5532 struct rte_eth_dev *dev; 5533 5534 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5535 dev = &rte_eth_devices[port_id]; 5536 5537 if (info == NULL) { 5538 RTE_ETHDEV_LOG(ERR, 5539 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5540 port_id); 5541 return -EINVAL; 5542 } 5543 5544 if (info->data == NULL) { 5545 RTE_ETHDEV_LOG(ERR, 5546 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5547 port_id); 5548 return -EINVAL; 5549 } 5550 5551 if (info->length == 0) { 5552 RTE_ETHDEV_LOG(ERR, 5553 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5554 port_id); 5555 return -EINVAL; 5556 } 5557 5558 if (*dev->dev_ops->get_module_eeprom == NULL) 5559 return -ENOTSUP; 5560 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5561 } 5562 5563 int 5564 rte_eth_dev_get_dcb_info(uint16_t port_id, 5565 struct rte_eth_dcb_info *dcb_info) 5566 { 5567 struct rte_eth_dev *dev; 5568 5569 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5570 dev = &rte_eth_devices[port_id]; 5571 5572 if (dcb_info == NULL) { 5573 RTE_ETHDEV_LOG(ERR, 5574 "Cannot get ethdev port %u DCB info to NULL\n", 5575 port_id); 5576 return -EINVAL; 5577 } 5578 5579 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5580 5581 if (*dev->dev_ops->get_dcb_info == NULL) 5582 return -ENOTSUP; 5583 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5584 } 5585 5586 static void 5587 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5588 const struct rte_eth_desc_lim *desc_lim) 5589 { 5590 if (desc_lim->nb_align != 0) 5591 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5592 5593 if (desc_lim->nb_max != 0) 5594 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5595 5596 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5597 } 5598 5599 int 5600 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5601 uint16_t *nb_rx_desc, 5602 uint16_t *nb_tx_desc) 5603 { 5604 struct rte_eth_dev_info dev_info; 5605 int ret; 5606 5607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5608 5609 ret = rte_eth_dev_info_get(port_id, &dev_info); 5610 if (ret != 0) 5611 return ret; 5612 5613 if (nb_rx_desc != NULL) 5614 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5615 5616 if (nb_tx_desc != NULL) 5617 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5618 5619 return 0; 5620 } 5621 5622 int 5623 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5624 struct rte_eth_hairpin_cap *cap) 5625 { 5626 struct rte_eth_dev *dev; 5627 5628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5629 dev = &rte_eth_devices[port_id]; 5630 5631 if (cap == NULL) { 5632 RTE_ETHDEV_LOG(ERR, 5633 "Cannot get ethdev port %u hairpin capability to NULL\n", 5634 port_id); 5635 return -EINVAL; 5636 } 5637 5638 if (*dev->dev_ops->hairpin_cap_get == NULL) 5639 return -ENOTSUP; 5640 memset(cap, 0, sizeof(*cap)); 5641 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5642 } 5643 5644 int 5645 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5646 { 5647 struct rte_eth_dev *dev; 5648 5649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5650 dev = &rte_eth_devices[port_id]; 5651 5652 if (pool == NULL) { 5653 RTE_ETHDEV_LOG(ERR, 5654 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5655 port_id); 5656 return -EINVAL; 5657 } 5658 5659 if (*dev->dev_ops->pool_ops_supported == NULL) 5660 return 1; /* all pools are supported */ 5661 5662 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5663 } 5664 5665 static int 5666 eth_dev_handle_port_list(const char *cmd __rte_unused, 5667 const char *params __rte_unused, 5668 struct rte_tel_data *d) 5669 { 5670 int port_id; 5671 5672 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 5673 RTE_ETH_FOREACH_DEV(port_id) 5674 rte_tel_data_add_array_int(d, port_id); 5675 return 0; 5676 } 5677 5678 static void 5679 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 5680 const char *stat_name) 5681 { 5682 int q; 5683 struct rte_tel_data *q_data = rte_tel_data_alloc(); 5684 if (q_data == NULL) 5685 return; 5686 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 5687 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 5688 rte_tel_data_add_array_u64(q_data, q_stats[q]); 5689 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 5690 } 5691 5692 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 5693 5694 static int 5695 eth_dev_handle_port_stats(const char *cmd __rte_unused, 5696 const char *params, 5697 struct rte_tel_data *d) 5698 { 5699 struct rte_eth_stats stats; 5700 int port_id, ret; 5701 5702 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5703 return -1; 5704 5705 port_id = atoi(params); 5706 if (!rte_eth_dev_is_valid_port(port_id)) 5707 return -1; 5708 5709 ret = rte_eth_stats_get(port_id, &stats); 5710 if (ret < 0) 5711 return -1; 5712 5713 rte_tel_data_start_dict(d); 5714 ADD_DICT_STAT(stats, ipackets); 5715 ADD_DICT_STAT(stats, opackets); 5716 ADD_DICT_STAT(stats, ibytes); 5717 ADD_DICT_STAT(stats, obytes); 5718 ADD_DICT_STAT(stats, imissed); 5719 ADD_DICT_STAT(stats, ierrors); 5720 ADD_DICT_STAT(stats, oerrors); 5721 ADD_DICT_STAT(stats, rx_nombuf); 5722 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 5723 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 5724 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 5725 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 5726 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 5727 5728 return 0; 5729 } 5730 5731 static int 5732 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 5733 const char *params, 5734 struct rte_tel_data *d) 5735 { 5736 struct rte_eth_xstat *eth_xstats; 5737 struct rte_eth_xstat_name *xstat_names; 5738 int port_id, num_xstats; 5739 int i, ret; 5740 char *end_param; 5741 5742 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5743 return -1; 5744 5745 port_id = strtoul(params, &end_param, 0); 5746 if (*end_param != '\0') 5747 RTE_ETHDEV_LOG(NOTICE, 5748 "Extra parameters passed to ethdev telemetry command, ignoring"); 5749 if (!rte_eth_dev_is_valid_port(port_id)) 5750 return -1; 5751 5752 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 5753 if (num_xstats < 0) 5754 return -1; 5755 5756 /* use one malloc for both names and stats */ 5757 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 5758 sizeof(struct rte_eth_xstat_name)) * num_xstats); 5759 if (eth_xstats == NULL) 5760 return -1; 5761 xstat_names = (void *)ð_xstats[num_xstats]; 5762 5763 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 5764 if (ret < 0 || ret > num_xstats) { 5765 free(eth_xstats); 5766 return -1; 5767 } 5768 5769 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 5770 if (ret < 0 || ret > num_xstats) { 5771 free(eth_xstats); 5772 return -1; 5773 } 5774 5775 rte_tel_data_start_dict(d); 5776 for (i = 0; i < num_xstats; i++) 5777 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 5778 eth_xstats[i].value); 5779 free(eth_xstats); 5780 return 0; 5781 } 5782 5783 #ifndef RTE_EXEC_ENV_WINDOWS 5784 static int 5785 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, 5786 const char *params, 5787 struct rte_tel_data *d) 5788 { 5789 char *buf, *end_param; 5790 int port_id, ret; 5791 FILE *f; 5792 5793 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5794 return -EINVAL; 5795 5796 port_id = strtoul(params, &end_param, 0); 5797 if (*end_param != '\0') 5798 RTE_ETHDEV_LOG(NOTICE, 5799 "Extra parameters passed to ethdev telemetry command, ignoring"); 5800 if (!rte_eth_dev_is_valid_port(port_id)) 5801 return -EINVAL; 5802 5803 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 5804 if (buf == NULL) 5805 return -ENOMEM; 5806 5807 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 5808 if (f == NULL) { 5809 free(buf); 5810 return -EINVAL; 5811 } 5812 5813 ret = rte_eth_dev_priv_dump(port_id, f); 5814 fclose(f); 5815 if (ret == 0) { 5816 rte_tel_data_start_dict(d); 5817 rte_tel_data_string(d, buf); 5818 } 5819 5820 free(buf); 5821 return 0; 5822 } 5823 #endif /* !RTE_EXEC_ENV_WINDOWS */ 5824 5825 static int 5826 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 5827 const char *params, 5828 struct rte_tel_data *d) 5829 { 5830 static const char *status_str = "status"; 5831 int ret, port_id; 5832 struct rte_eth_link link; 5833 char *end_param; 5834 5835 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5836 return -1; 5837 5838 port_id = strtoul(params, &end_param, 0); 5839 if (*end_param != '\0') 5840 RTE_ETHDEV_LOG(NOTICE, 5841 "Extra parameters passed to ethdev telemetry command, ignoring"); 5842 if (!rte_eth_dev_is_valid_port(port_id)) 5843 return -1; 5844 5845 ret = rte_eth_link_get_nowait(port_id, &link); 5846 if (ret < 0) 5847 return -1; 5848 5849 rte_tel_data_start_dict(d); 5850 if (!link.link_status) { 5851 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 5852 return 0; 5853 } 5854 rte_tel_data_add_dict_string(d, status_str, "UP"); 5855 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 5856 rte_tel_data_add_dict_string(d, "duplex", 5857 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 5858 "full-duplex" : "half-duplex"); 5859 return 0; 5860 } 5861 5862 static int 5863 eth_dev_handle_port_info(const char *cmd __rte_unused, 5864 const char *params, 5865 struct rte_tel_data *d) 5866 { 5867 struct rte_tel_data *rxq_state, *txq_state; 5868 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 5869 struct rte_eth_dev *eth_dev; 5870 char *end_param; 5871 int port_id, i; 5872 5873 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5874 return -1; 5875 5876 port_id = strtoul(params, &end_param, 0); 5877 if (*end_param != '\0') 5878 RTE_ETHDEV_LOG(NOTICE, 5879 "Extra parameters passed to ethdev telemetry command, ignoring"); 5880 5881 if (!rte_eth_dev_is_valid_port(port_id)) 5882 return -EINVAL; 5883 5884 eth_dev = &rte_eth_devices[port_id]; 5885 5886 rxq_state = rte_tel_data_alloc(); 5887 if (!rxq_state) 5888 return -ENOMEM; 5889 5890 txq_state = rte_tel_data_alloc(); 5891 if (!txq_state) { 5892 rte_tel_data_free(rxq_state); 5893 return -ENOMEM; 5894 } 5895 5896 rte_tel_data_start_dict(d); 5897 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 5898 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 5899 rte_tel_data_add_dict_int(d, "nb_rx_queues", 5900 eth_dev->data->nb_rx_queues); 5901 rte_tel_data_add_dict_int(d, "nb_tx_queues", 5902 eth_dev->data->nb_tx_queues); 5903 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 5904 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 5905 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 5906 eth_dev->data->min_rx_buf_size); 5907 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 5908 eth_dev->data->rx_mbuf_alloc_failed); 5909 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 5910 eth_dev->data->mac_addrs); 5911 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 5912 rte_tel_data_add_dict_int(d, "promiscuous", 5913 eth_dev->data->promiscuous); 5914 rte_tel_data_add_dict_int(d, "scattered_rx", 5915 eth_dev->data->scattered_rx); 5916 rte_tel_data_add_dict_int(d, "all_multicast", 5917 eth_dev->data->all_multicast); 5918 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 5919 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 5920 rte_tel_data_add_dict_int(d, "dev_configured", 5921 eth_dev->data->dev_configured); 5922 5923 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 5924 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 5925 rte_tel_data_add_array_int(rxq_state, 5926 eth_dev->data->rx_queue_state[i]); 5927 5928 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 5929 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 5930 rte_tel_data_add_array_int(txq_state, 5931 eth_dev->data->tx_queue_state[i]); 5932 5933 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 5934 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 5935 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 5936 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 5937 rte_tel_data_add_dict_int(d, "rx_offloads", 5938 eth_dev->data->dev_conf.rxmode.offloads); 5939 rte_tel_data_add_dict_int(d, "tx_offloads", 5940 eth_dev->data->dev_conf.txmode.offloads); 5941 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 5942 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 5943 5944 return 0; 5945 } 5946 5947 int 5948 rte_eth_representor_info_get(uint16_t port_id, 5949 struct rte_eth_representor_info *info) 5950 { 5951 struct rte_eth_dev *dev; 5952 5953 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5954 dev = &rte_eth_devices[port_id]; 5955 5956 if (*dev->dev_ops->representor_info_get == NULL) 5957 return -ENOTSUP; 5958 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 5959 } 5960 5961 int 5962 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 5963 { 5964 struct rte_eth_dev *dev; 5965 5966 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5967 dev = &rte_eth_devices[port_id]; 5968 5969 if (dev->data->dev_configured != 0) { 5970 RTE_ETHDEV_LOG(ERR, 5971 "The port (ID=%"PRIu16") is already configured\n", 5972 port_id); 5973 return -EBUSY; 5974 } 5975 5976 if (features == NULL) { 5977 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 5978 return -EINVAL; 5979 } 5980 5981 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 5982 return -ENOTSUP; 5983 return eth_err(port_id, 5984 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 5985 } 5986 5987 int 5988 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 5989 struct rte_eth_ip_reassembly_params *reassembly_capa) 5990 { 5991 struct rte_eth_dev *dev; 5992 5993 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5994 dev = &rte_eth_devices[port_id]; 5995 5996 if (dev->data->dev_configured == 0) { 5997 RTE_ETHDEV_LOG(ERR, 5998 "Device with port_id=%u is not configured.\n" 5999 "Cannot get IP reassembly capability\n", 6000 port_id); 6001 return -EINVAL; 6002 } 6003 6004 if (reassembly_capa == NULL) { 6005 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6006 return -EINVAL; 6007 } 6008 6009 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6010 return -ENOTSUP; 6011 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6012 6013 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6014 (dev, reassembly_capa)); 6015 } 6016 6017 int 6018 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6019 struct rte_eth_ip_reassembly_params *conf) 6020 { 6021 struct rte_eth_dev *dev; 6022 6023 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6024 dev = &rte_eth_devices[port_id]; 6025 6026 if (dev->data->dev_configured == 0) { 6027 RTE_ETHDEV_LOG(ERR, 6028 "Device with port_id=%u is not configured.\n" 6029 "Cannot get IP reassembly configuration\n", 6030 port_id); 6031 return -EINVAL; 6032 } 6033 6034 if (conf == NULL) { 6035 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6036 return -EINVAL; 6037 } 6038 6039 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6040 return -ENOTSUP; 6041 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6042 return eth_err(port_id, 6043 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6044 } 6045 6046 int 6047 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6048 const struct rte_eth_ip_reassembly_params *conf) 6049 { 6050 struct rte_eth_dev *dev; 6051 6052 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6053 dev = &rte_eth_devices[port_id]; 6054 6055 if (dev->data->dev_configured == 0) { 6056 RTE_ETHDEV_LOG(ERR, 6057 "Device with port_id=%u is not configured.\n" 6058 "Cannot set IP reassembly configuration", 6059 port_id); 6060 return -EINVAL; 6061 } 6062 6063 if (dev->data->dev_started != 0) { 6064 RTE_ETHDEV_LOG(ERR, 6065 "Device with port_id=%u started,\n" 6066 "cannot configure IP reassembly params.\n", 6067 port_id); 6068 return -EINVAL; 6069 } 6070 6071 if (conf == NULL) { 6072 RTE_ETHDEV_LOG(ERR, 6073 "Invalid IP reassembly configuration (NULL)\n"); 6074 return -EINVAL; 6075 } 6076 6077 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6078 return -ENOTSUP; 6079 return eth_err(port_id, 6080 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6081 } 6082 6083 int 6084 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6085 { 6086 struct rte_eth_dev *dev; 6087 6088 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6089 dev = &rte_eth_devices[port_id]; 6090 6091 if (file == NULL) { 6092 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6093 return -EINVAL; 6094 } 6095 6096 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6097 return -ENOTSUP; 6098 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6099 } 6100 6101 int 6102 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6103 uint16_t offset, uint16_t num, FILE *file) 6104 { 6105 struct rte_eth_dev *dev; 6106 6107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6108 dev = &rte_eth_devices[port_id]; 6109 6110 if (queue_id >= dev->data->nb_rx_queues) { 6111 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6112 return -EINVAL; 6113 } 6114 6115 if (file == NULL) { 6116 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6117 return -EINVAL; 6118 } 6119 6120 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6121 return -ENOTSUP; 6122 6123 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6124 queue_id, offset, num, file)); 6125 } 6126 6127 int 6128 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6129 uint16_t offset, uint16_t num, FILE *file) 6130 { 6131 struct rte_eth_dev *dev; 6132 6133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6134 dev = &rte_eth_devices[port_id]; 6135 6136 if (queue_id >= dev->data->nb_tx_queues) { 6137 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6138 return -EINVAL; 6139 } 6140 6141 if (file == NULL) { 6142 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6143 return -EINVAL; 6144 } 6145 6146 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6147 return -ENOTSUP; 6148 6149 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6150 queue_id, offset, num, file)); 6151 } 6152 6153 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6154 6155 RTE_INIT(ethdev_init_telemetry) 6156 { 6157 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6158 "Returns list of available ethdev ports. Takes no parameters"); 6159 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6160 "Returns the common stats for a port. Parameters: int port_id"); 6161 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6162 "Returns the extended stats for a port. Parameters: int port_id"); 6163 #ifndef RTE_EXEC_ENV_WINDOWS 6164 rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv, 6165 "Returns dump private information for a port. Parameters: int port_id"); 6166 #endif 6167 rte_telemetry_register_cmd("/ethdev/link_status", 6168 eth_dev_handle_port_link_status, 6169 "Returns the link status for a port. Parameters: int port_id"); 6170 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6171 "Returns the device info for a port. Parameters: int port_id"); 6172 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 6173 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 6174 } 6175