1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/queue.h> 14 15 #include <bus_driver.h> 16 #include <rte_log.h> 17 #include <rte_interrupts.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev_trace.h" 31 #include "rte_ethdev.h" 32 #include "ethdev_driver.h" 33 #include "ethdev_profile.h" 34 #include "ethdev_private.h" 35 #include "sff_telemetry.h" 36 37 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 38 39 /* public fast-path API */ 40 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 41 42 /* spinlock for add/remove Rx callbacks */ 43 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 44 45 /* spinlock for add/remove Tx callbacks */ 46 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 47 48 /* store statistics names and its offset in stats structure */ 49 struct rte_eth_xstats_name_off { 50 char name[RTE_ETH_XSTATS_NAME_SIZE]; 51 unsigned offset; 52 }; 53 54 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 55 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 56 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 57 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 58 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 59 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 60 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 61 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 62 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 63 rx_nombuf)}, 64 }; 65 66 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 67 68 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 69 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 70 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 71 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 72 }; 73 74 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 75 76 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 77 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 78 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 79 }; 80 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 81 82 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 83 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 84 85 static const struct { 86 uint64_t offload; 87 const char *name; 88 } eth_dev_rx_offload_names[] = { 89 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 90 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 91 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 92 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 94 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 95 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 96 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 98 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 99 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 100 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 101 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 102 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 103 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 104 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 105 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 106 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 107 }; 108 109 #undef RTE_RX_OFFLOAD_BIT2STR 110 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 111 112 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 113 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 114 115 static const struct { 116 uint64_t offload; 117 const char *name; 118 } eth_dev_tx_offload_names[] = { 119 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 120 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 121 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 122 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 125 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 126 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 127 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 128 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 129 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 130 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 133 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 134 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 135 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 136 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 137 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 138 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 139 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 140 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 141 }; 142 143 #undef RTE_TX_OFFLOAD_BIT2STR 144 145 static const struct { 146 uint64_t offload; 147 const char *name; 148 } rte_eth_dev_capa_names[] = { 149 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 150 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 151 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 152 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 153 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 154 }; 155 156 enum { 157 STAT_QMAP_TX = 0, 158 STAT_QMAP_RX 159 }; 160 161 int 162 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 163 { 164 int ret; 165 struct rte_devargs devargs; 166 const char *bus_param_key; 167 char *bus_str = NULL; 168 char *cls_str = NULL; 169 int str_size; 170 171 if (iter == NULL) { 172 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 173 return -EINVAL; 174 } 175 176 if (devargs_str == NULL) { 177 RTE_ETHDEV_LOG(ERR, 178 "Cannot initialize iterator from NULL device description string\n"); 179 return -EINVAL; 180 } 181 182 memset(iter, 0, sizeof(*iter)); 183 memset(&devargs, 0, sizeof(devargs)); 184 185 /* 186 * The devargs string may use various syntaxes: 187 * - 0000:08:00.0,representor=[1-3] 188 * - pci:0000:06:00.0,representor=[0,5] 189 * - class=eth,mac=00:11:22:33:44:55 190 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 191 */ 192 193 /* 194 * Handle pure class filter (i.e. without any bus-level argument), 195 * from future new syntax. 196 * rte_devargs_parse() is not yet supporting the new syntax, 197 * that's why this simple case is temporarily parsed here. 198 */ 199 #define iter_anybus_str "class=eth," 200 if (strncmp(devargs_str, iter_anybus_str, 201 strlen(iter_anybus_str)) == 0) { 202 iter->cls_str = devargs_str + strlen(iter_anybus_str); 203 goto end; 204 } 205 206 /* Split bus, device and parameters. */ 207 ret = rte_devargs_parse(&devargs, devargs_str); 208 if (ret != 0) 209 goto error; 210 211 /* 212 * Assume parameters of old syntax can match only at ethdev level. 213 * Extra parameters will be ignored, thanks to "+" prefix. 214 */ 215 str_size = strlen(devargs.args) + 2; 216 cls_str = malloc(str_size); 217 if (cls_str == NULL) { 218 ret = -ENOMEM; 219 goto error; 220 } 221 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 222 if (ret != str_size - 1) { 223 ret = -EINVAL; 224 goto error; 225 } 226 iter->cls_str = cls_str; 227 228 iter->bus = devargs.bus; 229 if (iter->bus->dev_iterate == NULL) { 230 ret = -ENOTSUP; 231 goto error; 232 } 233 234 /* Convert bus args to new syntax for use with new API dev_iterate. */ 235 if ((strcmp(iter->bus->name, "vdev") == 0) || 236 (strcmp(iter->bus->name, "fslmc") == 0) || 237 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 238 bus_param_key = "name"; 239 } else if (strcmp(iter->bus->name, "pci") == 0) { 240 bus_param_key = "addr"; 241 } else { 242 ret = -ENOTSUP; 243 goto error; 244 } 245 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 246 bus_str = malloc(str_size); 247 if (bus_str == NULL) { 248 ret = -ENOMEM; 249 goto error; 250 } 251 ret = snprintf(bus_str, str_size, "%s=%s", 252 bus_param_key, devargs.name); 253 if (ret != str_size - 1) { 254 ret = -EINVAL; 255 goto error; 256 } 257 iter->bus_str = bus_str; 258 259 end: 260 iter->cls = rte_class_find_by_name("eth"); 261 rte_devargs_reset(&devargs); 262 return 0; 263 264 error: 265 if (ret == -ENOTSUP) 266 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 267 iter->bus->name); 268 rte_devargs_reset(&devargs); 269 free(bus_str); 270 free(cls_str); 271 return ret; 272 } 273 274 uint16_t 275 rte_eth_iterator_next(struct rte_dev_iterator *iter) 276 { 277 if (iter == NULL) { 278 RTE_ETHDEV_LOG(ERR, 279 "Cannot get next device from NULL iterator\n"); 280 return RTE_MAX_ETHPORTS; 281 } 282 283 if (iter->cls == NULL) /* invalid ethdev iterator */ 284 return RTE_MAX_ETHPORTS; 285 286 do { /* loop to try all matching rte_device */ 287 /* If not pure ethdev filter and */ 288 if (iter->bus != NULL && 289 /* not in middle of rte_eth_dev iteration, */ 290 iter->class_device == NULL) { 291 /* get next rte_device to try. */ 292 iter->device = iter->bus->dev_iterate( 293 iter->device, iter->bus_str, iter); 294 if (iter->device == NULL) 295 break; /* no more rte_device candidate */ 296 } 297 /* A device is matching bus part, need to check ethdev part. */ 298 iter->class_device = iter->cls->dev_iterate( 299 iter->class_device, iter->cls_str, iter); 300 if (iter->class_device != NULL) 301 return eth_dev_to_id(iter->class_device); /* match */ 302 } while (iter->bus != NULL); /* need to try next rte_device */ 303 304 /* No more ethdev port to iterate. */ 305 rte_eth_iterator_cleanup(iter); 306 return RTE_MAX_ETHPORTS; 307 } 308 309 void 310 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 311 { 312 if (iter == NULL) { 313 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 314 return; 315 } 316 317 if (iter->bus_str == NULL) 318 return; /* nothing to free in pure class filter */ 319 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 320 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 321 memset(iter, 0, sizeof(*iter)); 322 } 323 324 uint16_t 325 rte_eth_find_next(uint16_t port_id) 326 { 327 while (port_id < RTE_MAX_ETHPORTS && 328 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 329 port_id++; 330 331 if (port_id >= RTE_MAX_ETHPORTS) 332 return RTE_MAX_ETHPORTS; 333 334 return port_id; 335 } 336 337 /* 338 * Macro to iterate over all valid ports for internal usage. 339 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 340 */ 341 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 342 for (port_id = rte_eth_find_next(0); \ 343 port_id < RTE_MAX_ETHPORTS; \ 344 port_id = rte_eth_find_next(port_id + 1)) 345 346 uint16_t 347 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 348 { 349 port_id = rte_eth_find_next(port_id); 350 while (port_id < RTE_MAX_ETHPORTS && 351 rte_eth_devices[port_id].device != parent) 352 port_id = rte_eth_find_next(port_id + 1); 353 354 return port_id; 355 } 356 357 uint16_t 358 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 359 { 360 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 361 return rte_eth_find_next_of(port_id, 362 rte_eth_devices[ref_port_id].device); 363 } 364 365 static bool 366 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 367 { 368 return ethdev->data->name[0] != '\0'; 369 } 370 371 int 372 rte_eth_dev_is_valid_port(uint16_t port_id) 373 { 374 if (port_id >= RTE_MAX_ETHPORTS || 375 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 376 return 0; 377 else 378 return 1; 379 } 380 381 static int 382 eth_is_valid_owner_id(uint64_t owner_id) 383 { 384 if (owner_id == RTE_ETH_DEV_NO_OWNER || 385 eth_dev_shared_data->next_owner_id <= owner_id) 386 return 0; 387 return 1; 388 } 389 390 uint64_t 391 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 392 { 393 port_id = rte_eth_find_next(port_id); 394 while (port_id < RTE_MAX_ETHPORTS && 395 rte_eth_devices[port_id].data->owner.id != owner_id) 396 port_id = rte_eth_find_next(port_id + 1); 397 398 return port_id; 399 } 400 401 int 402 rte_eth_dev_owner_new(uint64_t *owner_id) 403 { 404 if (owner_id == NULL) { 405 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 406 return -EINVAL; 407 } 408 409 eth_dev_shared_data_prepare(); 410 411 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 412 413 *owner_id = eth_dev_shared_data->next_owner_id++; 414 415 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 416 return 0; 417 } 418 419 static int 420 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 421 const struct rte_eth_dev_owner *new_owner) 422 { 423 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 424 struct rte_eth_dev_owner *port_owner; 425 426 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 427 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 428 port_id); 429 return -ENODEV; 430 } 431 432 if (new_owner == NULL) { 433 RTE_ETHDEV_LOG(ERR, 434 "Cannot set ethdev port %u owner from NULL owner\n", 435 port_id); 436 return -EINVAL; 437 } 438 439 if (!eth_is_valid_owner_id(new_owner->id) && 440 !eth_is_valid_owner_id(old_owner_id)) { 441 RTE_ETHDEV_LOG(ERR, 442 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 443 old_owner_id, new_owner->id); 444 return -EINVAL; 445 } 446 447 port_owner = &rte_eth_devices[port_id].data->owner; 448 if (port_owner->id != old_owner_id) { 449 RTE_ETHDEV_LOG(ERR, 450 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 451 port_id, port_owner->name, port_owner->id); 452 return -EPERM; 453 } 454 455 /* can not truncate (same structure) */ 456 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 457 458 port_owner->id = new_owner->id; 459 460 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 461 port_id, new_owner->name, new_owner->id); 462 463 return 0; 464 } 465 466 int 467 rte_eth_dev_owner_set(const uint16_t port_id, 468 const struct rte_eth_dev_owner *owner) 469 { 470 int ret; 471 472 eth_dev_shared_data_prepare(); 473 474 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 475 476 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 477 478 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 479 return ret; 480 } 481 482 int 483 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 484 { 485 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 486 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 487 int ret; 488 489 eth_dev_shared_data_prepare(); 490 491 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 492 493 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 494 495 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 496 return ret; 497 } 498 499 int 500 rte_eth_dev_owner_delete(const uint64_t owner_id) 501 { 502 uint16_t port_id; 503 int ret = 0; 504 505 eth_dev_shared_data_prepare(); 506 507 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 508 509 if (eth_is_valid_owner_id(owner_id)) { 510 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 511 struct rte_eth_dev_data *data = 512 rte_eth_devices[port_id].data; 513 if (data != NULL && data->owner.id == owner_id) 514 memset(&data->owner, 0, 515 sizeof(struct rte_eth_dev_owner)); 516 } 517 RTE_ETHDEV_LOG(NOTICE, 518 "All port owners owned by %016"PRIx64" identifier have removed\n", 519 owner_id); 520 } else { 521 RTE_ETHDEV_LOG(ERR, 522 "Invalid owner ID=%016"PRIx64"\n", 523 owner_id); 524 ret = -EINVAL; 525 } 526 527 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 528 529 return ret; 530 } 531 532 int 533 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 534 { 535 struct rte_eth_dev *ethdev; 536 537 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 538 ethdev = &rte_eth_devices[port_id]; 539 540 if (!eth_dev_is_allocated(ethdev)) { 541 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 542 port_id); 543 return -ENODEV; 544 } 545 546 if (owner == NULL) { 547 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 548 port_id); 549 return -EINVAL; 550 } 551 552 eth_dev_shared_data_prepare(); 553 554 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 555 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 556 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 557 558 return 0; 559 } 560 561 int 562 rte_eth_dev_socket_id(uint16_t port_id) 563 { 564 int socket_id = SOCKET_ID_ANY; 565 566 if (!rte_eth_dev_is_valid_port(port_id)) { 567 rte_errno = EINVAL; 568 } else { 569 socket_id = rte_eth_devices[port_id].data->numa_node; 570 if (socket_id == SOCKET_ID_ANY) 571 rte_errno = 0; 572 } 573 return socket_id; 574 } 575 576 void * 577 rte_eth_dev_get_sec_ctx(uint16_t port_id) 578 { 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 580 return rte_eth_devices[port_id].security_ctx; 581 } 582 583 uint16_t 584 rte_eth_dev_count_avail(void) 585 { 586 uint16_t p; 587 uint16_t count; 588 589 count = 0; 590 591 RTE_ETH_FOREACH_DEV(p) 592 count++; 593 594 return count; 595 } 596 597 uint16_t 598 rte_eth_dev_count_total(void) 599 { 600 uint16_t port, count = 0; 601 602 RTE_ETH_FOREACH_VALID_DEV(port) 603 count++; 604 605 return count; 606 } 607 608 int 609 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 610 { 611 char *tmp; 612 613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 614 615 if (name == NULL) { 616 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 617 port_id); 618 return -EINVAL; 619 } 620 621 /* shouldn't check 'rte_eth_devices[i].data', 622 * because it might be overwritten by VDEV PMD */ 623 tmp = eth_dev_shared_data->data[port_id].name; 624 strcpy(name, tmp); 625 return 0; 626 } 627 628 int 629 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 630 { 631 uint16_t pid; 632 633 if (name == NULL) { 634 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 635 return -EINVAL; 636 } 637 638 if (port_id == NULL) { 639 RTE_ETHDEV_LOG(ERR, 640 "Cannot get port ID to NULL for %s\n", name); 641 return -EINVAL; 642 } 643 644 RTE_ETH_FOREACH_VALID_DEV(pid) 645 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 646 *port_id = pid; 647 return 0; 648 } 649 650 return -ENODEV; 651 } 652 653 int 654 eth_err(uint16_t port_id, int ret) 655 { 656 if (ret == 0) 657 return 0; 658 if (rte_eth_dev_is_removed(port_id)) 659 return -EIO; 660 return ret; 661 } 662 663 static int 664 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 665 { 666 uint16_t port_id; 667 668 if (rx_queue_id >= dev->data->nb_rx_queues) { 669 port_id = dev->data->port_id; 670 RTE_ETHDEV_LOG(ERR, 671 "Invalid Rx queue_id=%u of device with port_id=%u\n", 672 rx_queue_id, port_id); 673 return -EINVAL; 674 } 675 676 if (dev->data->rx_queues[rx_queue_id] == NULL) { 677 port_id = dev->data->port_id; 678 RTE_ETHDEV_LOG(ERR, 679 "Queue %u of device with port_id=%u has not been setup\n", 680 rx_queue_id, port_id); 681 return -EINVAL; 682 } 683 684 return 0; 685 } 686 687 static int 688 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 689 { 690 uint16_t port_id; 691 692 if (tx_queue_id >= dev->data->nb_tx_queues) { 693 port_id = dev->data->port_id; 694 RTE_ETHDEV_LOG(ERR, 695 "Invalid Tx queue_id=%u of device with port_id=%u\n", 696 tx_queue_id, port_id); 697 return -EINVAL; 698 } 699 700 if (dev->data->tx_queues[tx_queue_id] == NULL) { 701 port_id = dev->data->port_id; 702 RTE_ETHDEV_LOG(ERR, 703 "Queue %u of device with port_id=%u has not been setup\n", 704 tx_queue_id, port_id); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 713 { 714 struct rte_eth_dev *dev; 715 int ret; 716 717 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 718 dev = &rte_eth_devices[port_id]; 719 720 if (!dev->data->dev_started) { 721 RTE_ETHDEV_LOG(ERR, 722 "Port %u must be started before start any queue\n", 723 port_id); 724 return -EINVAL; 725 } 726 727 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 728 if (ret != 0) 729 return ret; 730 731 if (*dev->dev_ops->rx_queue_start == NULL) 732 return -ENOTSUP; 733 734 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 735 RTE_ETHDEV_LOG(INFO, 736 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 737 rx_queue_id, port_id); 738 return -EINVAL; 739 } 740 741 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 742 RTE_ETHDEV_LOG(INFO, 743 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 744 rx_queue_id, port_id); 745 return 0; 746 } 747 748 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 749 } 750 751 int 752 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 753 { 754 struct rte_eth_dev *dev; 755 int ret; 756 757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 758 dev = &rte_eth_devices[port_id]; 759 760 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 761 if (ret != 0) 762 return ret; 763 764 if (*dev->dev_ops->rx_queue_stop == NULL) 765 return -ENOTSUP; 766 767 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 768 RTE_ETHDEV_LOG(INFO, 769 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 770 rx_queue_id, port_id); 771 return -EINVAL; 772 } 773 774 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 775 RTE_ETHDEV_LOG(INFO, 776 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 777 rx_queue_id, port_id); 778 return 0; 779 } 780 781 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 782 } 783 784 int 785 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 786 { 787 struct rte_eth_dev *dev; 788 int ret; 789 790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 791 dev = &rte_eth_devices[port_id]; 792 793 if (!dev->data->dev_started) { 794 RTE_ETHDEV_LOG(ERR, 795 "Port %u must be started before start any queue\n", 796 port_id); 797 return -EINVAL; 798 } 799 800 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 801 if (ret != 0) 802 return ret; 803 804 if (*dev->dev_ops->tx_queue_start == NULL) 805 return -ENOTSUP; 806 807 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 808 RTE_ETHDEV_LOG(INFO, 809 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 810 tx_queue_id, port_id); 811 return -EINVAL; 812 } 813 814 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 815 RTE_ETHDEV_LOG(INFO, 816 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 817 tx_queue_id, port_id); 818 return 0; 819 } 820 821 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 822 } 823 824 int 825 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 826 { 827 struct rte_eth_dev *dev; 828 int ret; 829 830 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 831 dev = &rte_eth_devices[port_id]; 832 833 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 834 if (ret != 0) 835 return ret; 836 837 if (*dev->dev_ops->tx_queue_stop == NULL) 838 return -ENOTSUP; 839 840 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 841 RTE_ETHDEV_LOG(INFO, 842 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 843 tx_queue_id, port_id); 844 return -EINVAL; 845 } 846 847 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 848 RTE_ETHDEV_LOG(INFO, 849 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 850 tx_queue_id, port_id); 851 return 0; 852 } 853 854 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 855 } 856 857 uint32_t 858 rte_eth_speed_bitflag(uint32_t speed, int duplex) 859 { 860 switch (speed) { 861 case RTE_ETH_SPEED_NUM_10M: 862 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 863 case RTE_ETH_SPEED_NUM_100M: 864 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 865 case RTE_ETH_SPEED_NUM_1G: 866 return RTE_ETH_LINK_SPEED_1G; 867 case RTE_ETH_SPEED_NUM_2_5G: 868 return RTE_ETH_LINK_SPEED_2_5G; 869 case RTE_ETH_SPEED_NUM_5G: 870 return RTE_ETH_LINK_SPEED_5G; 871 case RTE_ETH_SPEED_NUM_10G: 872 return RTE_ETH_LINK_SPEED_10G; 873 case RTE_ETH_SPEED_NUM_20G: 874 return RTE_ETH_LINK_SPEED_20G; 875 case RTE_ETH_SPEED_NUM_25G: 876 return RTE_ETH_LINK_SPEED_25G; 877 case RTE_ETH_SPEED_NUM_40G: 878 return RTE_ETH_LINK_SPEED_40G; 879 case RTE_ETH_SPEED_NUM_50G: 880 return RTE_ETH_LINK_SPEED_50G; 881 case RTE_ETH_SPEED_NUM_56G: 882 return RTE_ETH_LINK_SPEED_56G; 883 case RTE_ETH_SPEED_NUM_100G: 884 return RTE_ETH_LINK_SPEED_100G; 885 case RTE_ETH_SPEED_NUM_200G: 886 return RTE_ETH_LINK_SPEED_200G; 887 default: 888 return 0; 889 } 890 } 891 892 const char * 893 rte_eth_dev_rx_offload_name(uint64_t offload) 894 { 895 const char *name = "UNKNOWN"; 896 unsigned int i; 897 898 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 899 if (offload == eth_dev_rx_offload_names[i].offload) { 900 name = eth_dev_rx_offload_names[i].name; 901 break; 902 } 903 } 904 905 return name; 906 } 907 908 const char * 909 rte_eth_dev_tx_offload_name(uint64_t offload) 910 { 911 const char *name = "UNKNOWN"; 912 unsigned int i; 913 914 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 915 if (offload == eth_dev_tx_offload_names[i].offload) { 916 name = eth_dev_tx_offload_names[i].name; 917 break; 918 } 919 } 920 921 return name; 922 } 923 924 const char * 925 rte_eth_dev_capability_name(uint64_t capability) 926 { 927 const char *name = "UNKNOWN"; 928 unsigned int i; 929 930 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 931 if (capability == rte_eth_dev_capa_names[i].offload) { 932 name = rte_eth_dev_capa_names[i].name; 933 break; 934 } 935 } 936 937 return name; 938 } 939 940 static inline int 941 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 942 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 943 { 944 int ret = 0; 945 946 if (dev_info_size == 0) { 947 if (config_size != max_rx_pkt_len) { 948 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 949 " %u != %u is not allowed\n", 950 port_id, config_size, max_rx_pkt_len); 951 ret = -EINVAL; 952 } 953 } else if (config_size > dev_info_size) { 954 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 955 "> max allowed value %u\n", port_id, config_size, 956 dev_info_size); 957 ret = -EINVAL; 958 } else if (config_size < RTE_ETHER_MIN_LEN) { 959 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 960 "< min allowed value %u\n", port_id, config_size, 961 (unsigned int)RTE_ETHER_MIN_LEN); 962 ret = -EINVAL; 963 } 964 return ret; 965 } 966 967 /* 968 * Validate offloads that are requested through rte_eth_dev_configure against 969 * the offloads successfully set by the Ethernet device. 970 * 971 * @param port_id 972 * The port identifier of the Ethernet device. 973 * @param req_offloads 974 * The offloads that have been requested through `rte_eth_dev_configure`. 975 * @param set_offloads 976 * The offloads successfully set by the Ethernet device. 977 * @param offload_type 978 * The offload type i.e. Rx/Tx string. 979 * @param offload_name 980 * The function that prints the offload name. 981 * @return 982 * - (0) if validation successful. 983 * - (-EINVAL) if requested offload has been silently disabled. 984 * 985 */ 986 static int 987 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 988 uint64_t set_offloads, const char *offload_type, 989 const char *(*offload_name)(uint64_t)) 990 { 991 uint64_t offloads_diff = req_offloads ^ set_offloads; 992 uint64_t offload; 993 int ret = 0; 994 995 while (offloads_diff != 0) { 996 /* Check if any offload is requested but not enabled. */ 997 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 998 if (offload & req_offloads) { 999 RTE_ETHDEV_LOG(ERR, 1000 "Port %u failed to enable %s offload %s\n", 1001 port_id, offload_type, offload_name(offload)); 1002 ret = -EINVAL; 1003 } 1004 1005 /* Check if offload couldn't be disabled. */ 1006 if (offload & set_offloads) { 1007 RTE_ETHDEV_LOG(DEBUG, 1008 "Port %u %s offload %s is not requested but enabled\n", 1009 port_id, offload_type, offload_name(offload)); 1010 } 1011 1012 offloads_diff &= ~offload; 1013 } 1014 1015 return ret; 1016 } 1017 1018 static uint32_t 1019 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1020 { 1021 uint32_t overhead_len; 1022 1023 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1024 overhead_len = max_rx_pktlen - max_mtu; 1025 else 1026 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1027 1028 return overhead_len; 1029 } 1030 1031 /* rte_eth_dev_info_get() should be called prior to this function */ 1032 static int 1033 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1034 uint16_t mtu) 1035 { 1036 uint32_t overhead_len; 1037 uint32_t frame_size; 1038 1039 if (mtu < dev_info->min_mtu) { 1040 RTE_ETHDEV_LOG(ERR, 1041 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1042 mtu, dev_info->min_mtu, port_id); 1043 return -EINVAL; 1044 } 1045 if (mtu > dev_info->max_mtu) { 1046 RTE_ETHDEV_LOG(ERR, 1047 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1048 mtu, dev_info->max_mtu, port_id); 1049 return -EINVAL; 1050 } 1051 1052 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1053 dev_info->max_mtu); 1054 frame_size = mtu + overhead_len; 1055 if (frame_size < RTE_ETHER_MIN_LEN) { 1056 RTE_ETHDEV_LOG(ERR, 1057 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1058 frame_size, RTE_ETHER_MIN_LEN, port_id); 1059 return -EINVAL; 1060 } 1061 1062 if (frame_size > dev_info->max_rx_pktlen) { 1063 RTE_ETHDEV_LOG(ERR, 1064 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1065 frame_size, dev_info->max_rx_pktlen, port_id); 1066 return -EINVAL; 1067 } 1068 1069 return 0; 1070 } 1071 1072 int 1073 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1074 const struct rte_eth_conf *dev_conf) 1075 { 1076 struct rte_eth_dev *dev; 1077 struct rte_eth_dev_info dev_info; 1078 struct rte_eth_conf orig_conf; 1079 int diag; 1080 int ret; 1081 uint16_t old_mtu; 1082 1083 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1084 dev = &rte_eth_devices[port_id]; 1085 1086 if (dev_conf == NULL) { 1087 RTE_ETHDEV_LOG(ERR, 1088 "Cannot configure ethdev port %u from NULL config\n", 1089 port_id); 1090 return -EINVAL; 1091 } 1092 1093 if (*dev->dev_ops->dev_configure == NULL) 1094 return -ENOTSUP; 1095 1096 if (dev->data->dev_started) { 1097 RTE_ETHDEV_LOG(ERR, 1098 "Port %u must be stopped to allow configuration\n", 1099 port_id); 1100 return -EBUSY; 1101 } 1102 1103 /* 1104 * Ensure that "dev_configured" is always 0 each time prepare to do 1105 * dev_configure() to avoid any non-anticipated behaviour. 1106 * And set to 1 when dev_configure() is executed successfully. 1107 */ 1108 dev->data->dev_configured = 0; 1109 1110 /* Store original config, as rollback required on failure */ 1111 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1112 1113 /* 1114 * Copy the dev_conf parameter into the dev structure. 1115 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1116 */ 1117 if (dev_conf != &dev->data->dev_conf) 1118 memcpy(&dev->data->dev_conf, dev_conf, 1119 sizeof(dev->data->dev_conf)); 1120 1121 /* Backup mtu for rollback */ 1122 old_mtu = dev->data->mtu; 1123 1124 ret = rte_eth_dev_info_get(port_id, &dev_info); 1125 if (ret != 0) 1126 goto rollback; 1127 1128 /* If number of queues specified by application for both Rx and Tx is 1129 * zero, use driver preferred values. This cannot be done individually 1130 * as it is valid for either Tx or Rx (but not both) to be zero. 1131 * If driver does not provide any preferred valued, fall back on 1132 * EAL defaults. 1133 */ 1134 if (nb_rx_q == 0 && nb_tx_q == 0) { 1135 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1136 if (nb_rx_q == 0) 1137 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1138 nb_tx_q = dev_info.default_txportconf.nb_queues; 1139 if (nb_tx_q == 0) 1140 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1141 } 1142 1143 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1144 RTE_ETHDEV_LOG(ERR, 1145 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1146 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1147 ret = -EINVAL; 1148 goto rollback; 1149 } 1150 1151 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1152 RTE_ETHDEV_LOG(ERR, 1153 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1154 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1155 ret = -EINVAL; 1156 goto rollback; 1157 } 1158 1159 /* 1160 * Check that the numbers of Rx and Tx queues are not greater 1161 * than the maximum number of Rx and Tx queues supported by the 1162 * configured device. 1163 */ 1164 if (nb_rx_q > dev_info.max_rx_queues) { 1165 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1166 port_id, nb_rx_q, dev_info.max_rx_queues); 1167 ret = -EINVAL; 1168 goto rollback; 1169 } 1170 1171 if (nb_tx_q > dev_info.max_tx_queues) { 1172 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1173 port_id, nb_tx_q, dev_info.max_tx_queues); 1174 ret = -EINVAL; 1175 goto rollback; 1176 } 1177 1178 /* Check that the device supports requested interrupts */ 1179 if ((dev_conf->intr_conf.lsc == 1) && 1180 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1181 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1182 dev->device->driver->name); 1183 ret = -EINVAL; 1184 goto rollback; 1185 } 1186 if ((dev_conf->intr_conf.rmv == 1) && 1187 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1188 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1189 dev->device->driver->name); 1190 ret = -EINVAL; 1191 goto rollback; 1192 } 1193 1194 if (dev_conf->rxmode.mtu == 0) 1195 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1196 1197 ret = eth_dev_validate_mtu(port_id, &dev_info, 1198 dev->data->dev_conf.rxmode.mtu); 1199 if (ret != 0) 1200 goto rollback; 1201 1202 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1203 1204 /* 1205 * If LRO is enabled, check that the maximum aggregated packet 1206 * size is supported by the configured device. 1207 */ 1208 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1209 uint32_t max_rx_pktlen; 1210 uint32_t overhead_len; 1211 1212 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1213 dev_info.max_mtu); 1214 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1215 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1216 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1217 ret = eth_dev_check_lro_pkt_size(port_id, 1218 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1219 max_rx_pktlen, 1220 dev_info.max_lro_pkt_size); 1221 if (ret != 0) 1222 goto rollback; 1223 } 1224 1225 /* Any requested offloading must be within its device capabilities */ 1226 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1227 dev_conf->rxmode.offloads) { 1228 RTE_ETHDEV_LOG(ERR, 1229 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1230 "capabilities 0x%"PRIx64" in %s()\n", 1231 port_id, dev_conf->rxmode.offloads, 1232 dev_info.rx_offload_capa, 1233 __func__); 1234 ret = -EINVAL; 1235 goto rollback; 1236 } 1237 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1238 dev_conf->txmode.offloads) { 1239 RTE_ETHDEV_LOG(ERR, 1240 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1241 "capabilities 0x%"PRIx64" in %s()\n", 1242 port_id, dev_conf->txmode.offloads, 1243 dev_info.tx_offload_capa, 1244 __func__); 1245 ret = -EINVAL; 1246 goto rollback; 1247 } 1248 1249 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1250 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1251 1252 /* Check that device supports requested rss hash functions. */ 1253 if ((dev_info.flow_type_rss_offloads | 1254 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1255 dev_info.flow_type_rss_offloads) { 1256 RTE_ETHDEV_LOG(ERR, 1257 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1258 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1259 dev_info.flow_type_rss_offloads); 1260 ret = -EINVAL; 1261 goto rollback; 1262 } 1263 1264 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1265 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1266 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1267 RTE_ETHDEV_LOG(ERR, 1268 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1269 port_id, 1270 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1271 ret = -EINVAL; 1272 goto rollback; 1273 } 1274 1275 /* 1276 * Setup new number of Rx/Tx queues and reconfigure device. 1277 */ 1278 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1279 if (diag != 0) { 1280 RTE_ETHDEV_LOG(ERR, 1281 "Port%u eth_dev_rx_queue_config = %d\n", 1282 port_id, diag); 1283 ret = diag; 1284 goto rollback; 1285 } 1286 1287 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1288 if (diag != 0) { 1289 RTE_ETHDEV_LOG(ERR, 1290 "Port%u eth_dev_tx_queue_config = %d\n", 1291 port_id, diag); 1292 eth_dev_rx_queue_config(dev, 0); 1293 ret = diag; 1294 goto rollback; 1295 } 1296 1297 diag = (*dev->dev_ops->dev_configure)(dev); 1298 if (diag != 0) { 1299 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1300 port_id, diag); 1301 ret = eth_err(port_id, diag); 1302 goto reset_queues; 1303 } 1304 1305 /* Initialize Rx profiling if enabled at compilation time. */ 1306 diag = __rte_eth_dev_profile_init(port_id, dev); 1307 if (diag != 0) { 1308 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1309 port_id, diag); 1310 ret = eth_err(port_id, diag); 1311 goto reset_queues; 1312 } 1313 1314 /* Validate Rx offloads. */ 1315 diag = eth_dev_validate_offloads(port_id, 1316 dev_conf->rxmode.offloads, 1317 dev->data->dev_conf.rxmode.offloads, "Rx", 1318 rte_eth_dev_rx_offload_name); 1319 if (diag != 0) { 1320 ret = diag; 1321 goto reset_queues; 1322 } 1323 1324 /* Validate Tx offloads. */ 1325 diag = eth_dev_validate_offloads(port_id, 1326 dev_conf->txmode.offloads, 1327 dev->data->dev_conf.txmode.offloads, "Tx", 1328 rte_eth_dev_tx_offload_name); 1329 if (diag != 0) { 1330 ret = diag; 1331 goto reset_queues; 1332 } 1333 1334 dev->data->dev_configured = 1; 1335 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1336 return 0; 1337 reset_queues: 1338 eth_dev_rx_queue_config(dev, 0); 1339 eth_dev_tx_queue_config(dev, 0); 1340 rollback: 1341 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1342 if (old_mtu != dev->data->mtu) 1343 dev->data->mtu = old_mtu; 1344 1345 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1346 return ret; 1347 } 1348 1349 static void 1350 eth_dev_mac_restore(struct rte_eth_dev *dev, 1351 struct rte_eth_dev_info *dev_info) 1352 { 1353 struct rte_ether_addr *addr; 1354 uint16_t i; 1355 uint32_t pool = 0; 1356 uint64_t pool_mask; 1357 1358 /* replay MAC address configuration including default MAC */ 1359 addr = &dev->data->mac_addrs[0]; 1360 if (*dev->dev_ops->mac_addr_set != NULL) 1361 (*dev->dev_ops->mac_addr_set)(dev, addr); 1362 else if (*dev->dev_ops->mac_addr_add != NULL) 1363 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1364 1365 if (*dev->dev_ops->mac_addr_add != NULL) { 1366 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1367 addr = &dev->data->mac_addrs[i]; 1368 1369 /* skip zero address */ 1370 if (rte_is_zero_ether_addr(addr)) 1371 continue; 1372 1373 pool = 0; 1374 pool_mask = dev->data->mac_pool_sel[i]; 1375 1376 do { 1377 if (pool_mask & UINT64_C(1)) 1378 (*dev->dev_ops->mac_addr_add)(dev, 1379 addr, i, pool); 1380 pool_mask >>= 1; 1381 pool++; 1382 } while (pool_mask); 1383 } 1384 } 1385 } 1386 1387 static int 1388 eth_dev_config_restore(struct rte_eth_dev *dev, 1389 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1390 { 1391 int ret; 1392 1393 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1394 eth_dev_mac_restore(dev, dev_info); 1395 1396 /* replay promiscuous configuration */ 1397 /* 1398 * use callbacks directly since we don't need port_id check and 1399 * would like to bypass the same value set 1400 */ 1401 if (rte_eth_promiscuous_get(port_id) == 1 && 1402 *dev->dev_ops->promiscuous_enable != NULL) { 1403 ret = eth_err(port_id, 1404 (*dev->dev_ops->promiscuous_enable)(dev)); 1405 if (ret != 0 && ret != -ENOTSUP) { 1406 RTE_ETHDEV_LOG(ERR, 1407 "Failed to enable promiscuous mode for device (port %u): %s\n", 1408 port_id, rte_strerror(-ret)); 1409 return ret; 1410 } 1411 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1412 *dev->dev_ops->promiscuous_disable != NULL) { 1413 ret = eth_err(port_id, 1414 (*dev->dev_ops->promiscuous_disable)(dev)); 1415 if (ret != 0 && ret != -ENOTSUP) { 1416 RTE_ETHDEV_LOG(ERR, 1417 "Failed to disable promiscuous mode for device (port %u): %s\n", 1418 port_id, rte_strerror(-ret)); 1419 return ret; 1420 } 1421 } 1422 1423 /* replay all multicast configuration */ 1424 /* 1425 * use callbacks directly since we don't need port_id check and 1426 * would like to bypass the same value set 1427 */ 1428 if (rte_eth_allmulticast_get(port_id) == 1 && 1429 *dev->dev_ops->allmulticast_enable != NULL) { 1430 ret = eth_err(port_id, 1431 (*dev->dev_ops->allmulticast_enable)(dev)); 1432 if (ret != 0 && ret != -ENOTSUP) { 1433 RTE_ETHDEV_LOG(ERR, 1434 "Failed to enable allmulticast mode for device (port %u): %s\n", 1435 port_id, rte_strerror(-ret)); 1436 return ret; 1437 } 1438 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1439 *dev->dev_ops->allmulticast_disable != NULL) { 1440 ret = eth_err(port_id, 1441 (*dev->dev_ops->allmulticast_disable)(dev)); 1442 if (ret != 0 && ret != -ENOTSUP) { 1443 RTE_ETHDEV_LOG(ERR, 1444 "Failed to disable allmulticast mode for device (port %u): %s\n", 1445 port_id, rte_strerror(-ret)); 1446 return ret; 1447 } 1448 } 1449 1450 return 0; 1451 } 1452 1453 int 1454 rte_eth_dev_start(uint16_t port_id) 1455 { 1456 struct rte_eth_dev *dev; 1457 struct rte_eth_dev_info dev_info; 1458 int diag; 1459 int ret, ret_stop; 1460 1461 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1462 dev = &rte_eth_devices[port_id]; 1463 1464 if (*dev->dev_ops->dev_start == NULL) 1465 return -ENOTSUP; 1466 1467 if (dev->data->dev_configured == 0) { 1468 RTE_ETHDEV_LOG(INFO, 1469 "Device with port_id=%"PRIu16" is not configured.\n", 1470 port_id); 1471 return -EINVAL; 1472 } 1473 1474 if (dev->data->dev_started != 0) { 1475 RTE_ETHDEV_LOG(INFO, 1476 "Device with port_id=%"PRIu16" already started\n", 1477 port_id); 1478 return 0; 1479 } 1480 1481 ret = rte_eth_dev_info_get(port_id, &dev_info); 1482 if (ret != 0) 1483 return ret; 1484 1485 /* Lets restore MAC now if device does not support live change */ 1486 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1487 eth_dev_mac_restore(dev, &dev_info); 1488 1489 diag = (*dev->dev_ops->dev_start)(dev); 1490 if (diag == 0) 1491 dev->data->dev_started = 1; 1492 else 1493 return eth_err(port_id, diag); 1494 1495 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1496 if (ret != 0) { 1497 RTE_ETHDEV_LOG(ERR, 1498 "Error during restoring configuration for device (port %u): %s\n", 1499 port_id, rte_strerror(-ret)); 1500 ret_stop = rte_eth_dev_stop(port_id); 1501 if (ret_stop != 0) { 1502 RTE_ETHDEV_LOG(ERR, 1503 "Failed to stop device (port %u): %s\n", 1504 port_id, rte_strerror(-ret_stop)); 1505 } 1506 1507 return ret; 1508 } 1509 1510 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1511 if (*dev->dev_ops->link_update == NULL) 1512 return -ENOTSUP; 1513 (*dev->dev_ops->link_update)(dev, 0); 1514 } 1515 1516 /* expose selection of PMD fast-path functions */ 1517 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1518 1519 rte_ethdev_trace_start(port_id); 1520 return 0; 1521 } 1522 1523 int 1524 rte_eth_dev_stop(uint16_t port_id) 1525 { 1526 struct rte_eth_dev *dev; 1527 int ret; 1528 1529 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1530 dev = &rte_eth_devices[port_id]; 1531 1532 if (*dev->dev_ops->dev_stop == NULL) 1533 return -ENOTSUP; 1534 1535 if (dev->data->dev_started == 0) { 1536 RTE_ETHDEV_LOG(INFO, 1537 "Device with port_id=%"PRIu16" already stopped\n", 1538 port_id); 1539 return 0; 1540 } 1541 1542 /* point fast-path functions to dummy ones */ 1543 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1544 1545 ret = (*dev->dev_ops->dev_stop)(dev); 1546 if (ret == 0) 1547 dev->data->dev_started = 0; 1548 rte_ethdev_trace_stop(port_id, ret); 1549 1550 return ret; 1551 } 1552 1553 int 1554 rte_eth_dev_set_link_up(uint16_t port_id) 1555 { 1556 struct rte_eth_dev *dev; 1557 1558 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1559 dev = &rte_eth_devices[port_id]; 1560 1561 if (*dev->dev_ops->dev_set_link_up == NULL) 1562 return -ENOTSUP; 1563 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1564 } 1565 1566 int 1567 rte_eth_dev_set_link_down(uint16_t port_id) 1568 { 1569 struct rte_eth_dev *dev; 1570 1571 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1572 dev = &rte_eth_devices[port_id]; 1573 1574 if (*dev->dev_ops->dev_set_link_down == NULL) 1575 return -ENOTSUP; 1576 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1577 } 1578 1579 int 1580 rte_eth_dev_close(uint16_t port_id) 1581 { 1582 struct rte_eth_dev *dev; 1583 int firsterr, binerr; 1584 int *lasterr = &firsterr; 1585 1586 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1587 dev = &rte_eth_devices[port_id]; 1588 1589 /* 1590 * Secondary process needs to close device to release process private 1591 * resources. But secondary process should not be obliged to wait 1592 * for device stop before closing ethdev. 1593 */ 1594 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1595 dev->data->dev_started) { 1596 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1597 port_id); 1598 return -EINVAL; 1599 } 1600 1601 if (*dev->dev_ops->dev_close == NULL) 1602 return -ENOTSUP; 1603 *lasterr = (*dev->dev_ops->dev_close)(dev); 1604 if (*lasterr != 0) 1605 lasterr = &binerr; 1606 1607 rte_ethdev_trace_close(port_id); 1608 *lasterr = rte_eth_dev_release_port(dev); 1609 1610 return firsterr; 1611 } 1612 1613 int 1614 rte_eth_dev_reset(uint16_t port_id) 1615 { 1616 struct rte_eth_dev *dev; 1617 int ret; 1618 1619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1620 dev = &rte_eth_devices[port_id]; 1621 1622 if (*dev->dev_ops->dev_reset == NULL) 1623 return -ENOTSUP; 1624 1625 ret = rte_eth_dev_stop(port_id); 1626 if (ret != 0) { 1627 RTE_ETHDEV_LOG(ERR, 1628 "Failed to stop device (port %u) before reset: %s - ignore\n", 1629 port_id, rte_strerror(-ret)); 1630 } 1631 ret = dev->dev_ops->dev_reset(dev); 1632 1633 return eth_err(port_id, ret); 1634 } 1635 1636 int 1637 rte_eth_dev_is_removed(uint16_t port_id) 1638 { 1639 struct rte_eth_dev *dev; 1640 int ret; 1641 1642 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1643 dev = &rte_eth_devices[port_id]; 1644 1645 if (dev->state == RTE_ETH_DEV_REMOVED) 1646 return 1; 1647 1648 if (*dev->dev_ops->is_removed == NULL) 1649 return 0; 1650 1651 ret = dev->dev_ops->is_removed(dev); 1652 if (ret != 0) 1653 /* Device is physically removed. */ 1654 dev->state = RTE_ETH_DEV_REMOVED; 1655 1656 return ret; 1657 } 1658 1659 static int 1660 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1661 uint16_t min_length) 1662 { 1663 uint16_t data_room_size; 1664 1665 /* 1666 * Check the size of the mbuf data buffer, this value 1667 * must be provided in the private data of the memory pool. 1668 * First check that the memory pool(s) has a valid private data. 1669 */ 1670 if (mp->private_data_size < 1671 sizeof(struct rte_pktmbuf_pool_private)) { 1672 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1673 mp->name, mp->private_data_size, 1674 (unsigned int) 1675 sizeof(struct rte_pktmbuf_pool_private)); 1676 return -ENOSPC; 1677 } 1678 data_room_size = rte_pktmbuf_data_room_size(mp); 1679 if (data_room_size < offset + min_length) { 1680 RTE_ETHDEV_LOG(ERR, 1681 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1682 mp->name, data_room_size, 1683 offset + min_length, offset, min_length); 1684 return -EINVAL; 1685 } 1686 return 0; 1687 } 1688 1689 static int 1690 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1691 uint16_t n_seg, uint32_t *mbp_buf_size, 1692 const struct rte_eth_dev_info *dev_info) 1693 { 1694 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1695 struct rte_mempool *mp_first; 1696 uint32_t offset_mask; 1697 uint16_t seg_idx; 1698 int ret; 1699 1700 if (n_seg > seg_capa->max_nseg) { 1701 RTE_ETHDEV_LOG(ERR, 1702 "Requested Rx segments %u exceed supported %u\n", 1703 n_seg, seg_capa->max_nseg); 1704 return -EINVAL; 1705 } 1706 /* 1707 * Check the sizes and offsets against buffer sizes 1708 * for each segment specified in extended configuration. 1709 */ 1710 mp_first = rx_seg[0].mp; 1711 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1712 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1713 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1714 uint32_t length = rx_seg[seg_idx].length; 1715 uint32_t offset = rx_seg[seg_idx].offset; 1716 1717 if (mpl == NULL) { 1718 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1719 return -EINVAL; 1720 } 1721 if (seg_idx != 0 && mp_first != mpl && 1722 seg_capa->multi_pools == 0) { 1723 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1724 return -ENOTSUP; 1725 } 1726 if (offset != 0) { 1727 if (seg_capa->offset_allowed == 0) { 1728 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1729 return -ENOTSUP; 1730 } 1731 if (offset & offset_mask) { 1732 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1733 offset, 1734 seg_capa->offset_align_log2); 1735 return -EINVAL; 1736 } 1737 } 1738 1739 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1740 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1741 length = length != 0 ? length : *mbp_buf_size; 1742 1743 ret = rte_eth_check_rx_mempool(mpl, offset, length); 1744 if (ret != 0) 1745 return ret; 1746 } 1747 return 0; 1748 } 1749 1750 static int 1751 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 1752 uint16_t n_mempools, uint32_t *min_buf_size, 1753 const struct rte_eth_dev_info *dev_info) 1754 { 1755 uint16_t pool_idx; 1756 int ret; 1757 1758 if (n_mempools > dev_info->max_rx_mempools) { 1759 RTE_ETHDEV_LOG(ERR, 1760 "Too many Rx mempools %u vs maximum %u\n", 1761 n_mempools, dev_info->max_rx_mempools); 1762 return -EINVAL; 1763 } 1764 1765 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 1766 struct rte_mempool *mp = rx_mempools[pool_idx]; 1767 1768 if (mp == NULL) { 1769 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 1770 return -EINVAL; 1771 } 1772 1773 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1774 dev_info->min_rx_bufsize); 1775 if (ret != 0) 1776 return ret; 1777 1778 *min_buf_size = RTE_MIN(*min_buf_size, 1779 rte_pktmbuf_data_room_size(mp)); 1780 } 1781 1782 return 0; 1783 } 1784 1785 int 1786 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1787 uint16_t nb_rx_desc, unsigned int socket_id, 1788 const struct rte_eth_rxconf *rx_conf, 1789 struct rte_mempool *mp) 1790 { 1791 int ret; 1792 uint64_t rx_offloads; 1793 uint32_t mbp_buf_size = UINT32_MAX; 1794 struct rte_eth_dev *dev; 1795 struct rte_eth_dev_info dev_info; 1796 struct rte_eth_rxconf local_conf; 1797 1798 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1799 dev = &rte_eth_devices[port_id]; 1800 1801 if (rx_queue_id >= dev->data->nb_rx_queues) { 1802 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1803 return -EINVAL; 1804 } 1805 1806 if (*dev->dev_ops->rx_queue_setup == NULL) 1807 return -ENOTSUP; 1808 1809 ret = rte_eth_dev_info_get(port_id, &dev_info); 1810 if (ret != 0) 1811 return ret; 1812 1813 rx_offloads = dev->data->dev_conf.rxmode.offloads; 1814 if (rx_conf != NULL) 1815 rx_offloads |= rx_conf->offloads; 1816 1817 /* Ensure that we have one and only one source of Rx buffers */ 1818 if ((mp != NULL) + 1819 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 1820 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 1821 RTE_ETHDEV_LOG(ERR, 1822 "Ambiguous Rx mempools configuration\n"); 1823 return -EINVAL; 1824 } 1825 1826 if (mp != NULL) { 1827 /* Single pool configuration check. */ 1828 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1829 dev_info.min_rx_bufsize); 1830 if (ret != 0) 1831 return ret; 1832 1833 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1834 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 1835 const struct rte_eth_rxseg_split *rx_seg; 1836 uint16_t n_seg; 1837 1838 /* Extended multi-segment configuration check. */ 1839 if (rx_conf->rx_seg == NULL) { 1840 RTE_ETHDEV_LOG(ERR, 1841 "Memory pool is null and no multi-segment configuration provided\n"); 1842 return -EINVAL; 1843 } 1844 1845 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 1846 n_seg = rx_conf->rx_nseg; 1847 1848 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1849 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 1850 &mbp_buf_size, 1851 &dev_info); 1852 if (ret != 0) 1853 return ret; 1854 } else { 1855 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 1856 return -EINVAL; 1857 } 1858 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 1859 /* Extended multi-pool configuration check. */ 1860 if (rx_conf->rx_mempools == NULL) { 1861 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 1862 return -EINVAL; 1863 } 1864 1865 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 1866 rx_conf->rx_nmempool, 1867 &mbp_buf_size, 1868 &dev_info); 1869 if (ret != 0) 1870 return ret; 1871 } else { 1872 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 1873 return -EINVAL; 1874 } 1875 1876 /* Use default specified by driver, if nb_rx_desc is zero */ 1877 if (nb_rx_desc == 0) { 1878 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1879 /* If driver default is also zero, fall back on EAL default */ 1880 if (nb_rx_desc == 0) 1881 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1882 } 1883 1884 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1885 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1886 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1887 1888 RTE_ETHDEV_LOG(ERR, 1889 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1890 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1891 dev_info.rx_desc_lim.nb_min, 1892 dev_info.rx_desc_lim.nb_align); 1893 return -EINVAL; 1894 } 1895 1896 if (dev->data->dev_started && 1897 !(dev_info.dev_capa & 1898 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1899 return -EBUSY; 1900 1901 if (dev->data->dev_started && 1902 (dev->data->rx_queue_state[rx_queue_id] != 1903 RTE_ETH_QUEUE_STATE_STOPPED)) 1904 return -EBUSY; 1905 1906 eth_dev_rxq_release(dev, rx_queue_id); 1907 1908 if (rx_conf == NULL) 1909 rx_conf = &dev_info.default_rxconf; 1910 1911 local_conf = *rx_conf; 1912 1913 /* 1914 * If an offloading has already been enabled in 1915 * rte_eth_dev_configure(), it has been enabled on all queues, 1916 * so there is no need to enable it in this queue again. 1917 * The local_conf.offloads input to underlying PMD only carries 1918 * those offloadings which are only enabled on this queue and 1919 * not enabled on all queues. 1920 */ 1921 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1922 1923 /* 1924 * New added offloadings for this queue are those not enabled in 1925 * rte_eth_dev_configure() and they must be per-queue type. 1926 * A pure per-port offloading can't be enabled on a queue while 1927 * disabled on another queue. A pure per-port offloading can't 1928 * be enabled for any queue as new added one if it hasn't been 1929 * enabled in rte_eth_dev_configure(). 1930 */ 1931 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1932 local_conf.offloads) { 1933 RTE_ETHDEV_LOG(ERR, 1934 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1935 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1936 port_id, rx_queue_id, local_conf.offloads, 1937 dev_info.rx_queue_offload_capa, 1938 __func__); 1939 return -EINVAL; 1940 } 1941 1942 if (local_conf.share_group > 0 && 1943 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 1944 RTE_ETHDEV_LOG(ERR, 1945 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 1946 port_id, rx_queue_id, local_conf.share_group); 1947 return -EINVAL; 1948 } 1949 1950 /* 1951 * If LRO is enabled, check that the maximum aggregated packet 1952 * size is supported by the configured device. 1953 */ 1954 /* Get the real Ethernet overhead length */ 1955 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1956 uint32_t overhead_len; 1957 uint32_t max_rx_pktlen; 1958 int ret; 1959 1960 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1961 dev_info.max_mtu); 1962 max_rx_pktlen = dev->data->mtu + overhead_len; 1963 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 1964 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1965 ret = eth_dev_check_lro_pkt_size(port_id, 1966 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1967 max_rx_pktlen, 1968 dev_info.max_lro_pkt_size); 1969 if (ret != 0) 1970 return ret; 1971 } 1972 1973 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1974 socket_id, &local_conf, mp); 1975 if (!ret) { 1976 if (!dev->data->min_rx_buf_size || 1977 dev->data->min_rx_buf_size > mbp_buf_size) 1978 dev->data->min_rx_buf_size = mbp_buf_size; 1979 } 1980 1981 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 1982 rx_conf, ret); 1983 return eth_err(port_id, ret); 1984 } 1985 1986 int 1987 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1988 uint16_t nb_rx_desc, 1989 const struct rte_eth_hairpin_conf *conf) 1990 { 1991 int ret; 1992 struct rte_eth_dev *dev; 1993 struct rte_eth_hairpin_cap cap; 1994 int i; 1995 int count; 1996 1997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1998 dev = &rte_eth_devices[port_id]; 1999 2000 if (rx_queue_id >= dev->data->nb_rx_queues) { 2001 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2002 return -EINVAL; 2003 } 2004 2005 if (conf == NULL) { 2006 RTE_ETHDEV_LOG(ERR, 2007 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2008 port_id); 2009 return -EINVAL; 2010 } 2011 2012 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2013 if (ret != 0) 2014 return ret; 2015 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2016 return -ENOTSUP; 2017 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2018 if (nb_rx_desc == 0) 2019 nb_rx_desc = cap.max_nb_desc; 2020 if (nb_rx_desc > cap.max_nb_desc) { 2021 RTE_ETHDEV_LOG(ERR, 2022 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2023 nb_rx_desc, cap.max_nb_desc); 2024 return -EINVAL; 2025 } 2026 if (conf->peer_count > cap.max_rx_2_tx) { 2027 RTE_ETHDEV_LOG(ERR, 2028 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2029 conf->peer_count, cap.max_rx_2_tx); 2030 return -EINVAL; 2031 } 2032 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2033 RTE_ETHDEV_LOG(ERR, 2034 "Attempt to use locked device memory for Rx queue, which is not supported"); 2035 return -EINVAL; 2036 } 2037 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2038 RTE_ETHDEV_LOG(ERR, 2039 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2040 return -EINVAL; 2041 } 2042 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2043 RTE_ETHDEV_LOG(ERR, 2044 "Attempt to use mutually exclusive memory settings for Rx queue"); 2045 return -EINVAL; 2046 } 2047 if (conf->force_memory && 2048 !conf->use_locked_device_memory && 2049 !conf->use_rte_memory) { 2050 RTE_ETHDEV_LOG(ERR, 2051 "Attempt to force Rx queue memory settings, but none is set"); 2052 return -EINVAL; 2053 } 2054 if (conf->peer_count == 0) { 2055 RTE_ETHDEV_LOG(ERR, 2056 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2057 conf->peer_count); 2058 return -EINVAL; 2059 } 2060 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2061 cap.max_nb_queues != UINT16_MAX; i++) { 2062 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2063 count++; 2064 } 2065 if (count > cap.max_nb_queues) { 2066 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2067 cap.max_nb_queues); 2068 return -EINVAL; 2069 } 2070 if (dev->data->dev_started) 2071 return -EBUSY; 2072 eth_dev_rxq_release(dev, rx_queue_id); 2073 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2074 nb_rx_desc, conf); 2075 if (ret == 0) 2076 dev->data->rx_queue_state[rx_queue_id] = 2077 RTE_ETH_QUEUE_STATE_HAIRPIN; 2078 return eth_err(port_id, ret); 2079 } 2080 2081 int 2082 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2083 uint16_t nb_tx_desc, unsigned int socket_id, 2084 const struct rte_eth_txconf *tx_conf) 2085 { 2086 struct rte_eth_dev *dev; 2087 struct rte_eth_dev_info dev_info; 2088 struct rte_eth_txconf local_conf; 2089 int ret; 2090 2091 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2092 dev = &rte_eth_devices[port_id]; 2093 2094 if (tx_queue_id >= dev->data->nb_tx_queues) { 2095 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2096 return -EINVAL; 2097 } 2098 2099 if (*dev->dev_ops->tx_queue_setup == NULL) 2100 return -ENOTSUP; 2101 2102 ret = rte_eth_dev_info_get(port_id, &dev_info); 2103 if (ret != 0) 2104 return ret; 2105 2106 /* Use default specified by driver, if nb_tx_desc is zero */ 2107 if (nb_tx_desc == 0) { 2108 nb_tx_desc = dev_info.default_txportconf.ring_size; 2109 /* If driver default is zero, fall back on EAL default */ 2110 if (nb_tx_desc == 0) 2111 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2112 } 2113 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2114 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2115 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2116 RTE_ETHDEV_LOG(ERR, 2117 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2118 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2119 dev_info.tx_desc_lim.nb_min, 2120 dev_info.tx_desc_lim.nb_align); 2121 return -EINVAL; 2122 } 2123 2124 if (dev->data->dev_started && 2125 !(dev_info.dev_capa & 2126 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2127 return -EBUSY; 2128 2129 if (dev->data->dev_started && 2130 (dev->data->tx_queue_state[tx_queue_id] != 2131 RTE_ETH_QUEUE_STATE_STOPPED)) 2132 return -EBUSY; 2133 2134 eth_dev_txq_release(dev, tx_queue_id); 2135 2136 if (tx_conf == NULL) 2137 tx_conf = &dev_info.default_txconf; 2138 2139 local_conf = *tx_conf; 2140 2141 /* 2142 * If an offloading has already been enabled in 2143 * rte_eth_dev_configure(), it has been enabled on all queues, 2144 * so there is no need to enable it in this queue again. 2145 * The local_conf.offloads input to underlying PMD only carries 2146 * those offloadings which are only enabled on this queue and 2147 * not enabled on all queues. 2148 */ 2149 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2150 2151 /* 2152 * New added offloadings for this queue are those not enabled in 2153 * rte_eth_dev_configure() and they must be per-queue type. 2154 * A pure per-port offloading can't be enabled on a queue while 2155 * disabled on another queue. A pure per-port offloading can't 2156 * be enabled for any queue as new added one if it hasn't been 2157 * enabled in rte_eth_dev_configure(). 2158 */ 2159 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2160 local_conf.offloads) { 2161 RTE_ETHDEV_LOG(ERR, 2162 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2163 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2164 port_id, tx_queue_id, local_conf.offloads, 2165 dev_info.tx_queue_offload_capa, 2166 __func__); 2167 return -EINVAL; 2168 } 2169 2170 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2171 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2172 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2173 } 2174 2175 int 2176 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2177 uint16_t nb_tx_desc, 2178 const struct rte_eth_hairpin_conf *conf) 2179 { 2180 struct rte_eth_dev *dev; 2181 struct rte_eth_hairpin_cap cap; 2182 int i; 2183 int count; 2184 int ret; 2185 2186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2187 dev = &rte_eth_devices[port_id]; 2188 2189 if (tx_queue_id >= dev->data->nb_tx_queues) { 2190 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2191 return -EINVAL; 2192 } 2193 2194 if (conf == NULL) { 2195 RTE_ETHDEV_LOG(ERR, 2196 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2197 port_id); 2198 return -EINVAL; 2199 } 2200 2201 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2202 if (ret != 0) 2203 return ret; 2204 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2205 return -ENOTSUP; 2206 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2207 if (nb_tx_desc == 0) 2208 nb_tx_desc = cap.max_nb_desc; 2209 if (nb_tx_desc > cap.max_nb_desc) { 2210 RTE_ETHDEV_LOG(ERR, 2211 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2212 nb_tx_desc, cap.max_nb_desc); 2213 return -EINVAL; 2214 } 2215 if (conf->peer_count > cap.max_tx_2_rx) { 2216 RTE_ETHDEV_LOG(ERR, 2217 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2218 conf->peer_count, cap.max_tx_2_rx); 2219 return -EINVAL; 2220 } 2221 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2222 RTE_ETHDEV_LOG(ERR, 2223 "Attempt to use locked device memory for Tx queue, which is not supported"); 2224 return -EINVAL; 2225 } 2226 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2227 RTE_ETHDEV_LOG(ERR, 2228 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2229 return -EINVAL; 2230 } 2231 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2232 RTE_ETHDEV_LOG(ERR, 2233 "Attempt to use mutually exclusive memory settings for Tx queue"); 2234 return -EINVAL; 2235 } 2236 if (conf->force_memory && 2237 !conf->use_locked_device_memory && 2238 !conf->use_rte_memory) { 2239 RTE_ETHDEV_LOG(ERR, 2240 "Attempt to force Tx queue memory settings, but none is set"); 2241 return -EINVAL; 2242 } 2243 if (conf->peer_count == 0) { 2244 RTE_ETHDEV_LOG(ERR, 2245 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2246 conf->peer_count); 2247 return -EINVAL; 2248 } 2249 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2250 cap.max_nb_queues != UINT16_MAX; i++) { 2251 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2252 count++; 2253 } 2254 if (count > cap.max_nb_queues) { 2255 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2256 cap.max_nb_queues); 2257 return -EINVAL; 2258 } 2259 if (dev->data->dev_started) 2260 return -EBUSY; 2261 eth_dev_txq_release(dev, tx_queue_id); 2262 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2263 (dev, tx_queue_id, nb_tx_desc, conf); 2264 if (ret == 0) 2265 dev->data->tx_queue_state[tx_queue_id] = 2266 RTE_ETH_QUEUE_STATE_HAIRPIN; 2267 return eth_err(port_id, ret); 2268 } 2269 2270 int 2271 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2272 { 2273 struct rte_eth_dev *dev; 2274 int ret; 2275 2276 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2277 dev = &rte_eth_devices[tx_port]; 2278 2279 if (dev->data->dev_started == 0) { 2280 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2281 return -EBUSY; 2282 } 2283 2284 if (*dev->dev_ops->hairpin_bind == NULL) 2285 return -ENOTSUP; 2286 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2287 if (ret != 0) 2288 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2289 " to Rx %d (%d - all ports)\n", 2290 tx_port, rx_port, RTE_MAX_ETHPORTS); 2291 2292 return ret; 2293 } 2294 2295 int 2296 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2297 { 2298 struct rte_eth_dev *dev; 2299 int ret; 2300 2301 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2302 dev = &rte_eth_devices[tx_port]; 2303 2304 if (dev->data->dev_started == 0) { 2305 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2306 return -EBUSY; 2307 } 2308 2309 if (*dev->dev_ops->hairpin_unbind == NULL) 2310 return -ENOTSUP; 2311 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2312 if (ret != 0) 2313 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2314 " from Rx %d (%d - all ports)\n", 2315 tx_port, rx_port, RTE_MAX_ETHPORTS); 2316 2317 return ret; 2318 } 2319 2320 int 2321 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2322 size_t len, uint32_t direction) 2323 { 2324 struct rte_eth_dev *dev; 2325 int ret; 2326 2327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2328 dev = &rte_eth_devices[port_id]; 2329 2330 if (peer_ports == NULL) { 2331 RTE_ETHDEV_LOG(ERR, 2332 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2333 port_id); 2334 return -EINVAL; 2335 } 2336 2337 if (len == 0) { 2338 RTE_ETHDEV_LOG(ERR, 2339 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2340 port_id); 2341 return -EINVAL; 2342 } 2343 2344 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2345 return -ENOTSUP; 2346 2347 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2348 len, direction); 2349 if (ret < 0) 2350 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2351 port_id, direction ? "Rx" : "Tx"); 2352 2353 return ret; 2354 } 2355 2356 void 2357 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2358 void *userdata __rte_unused) 2359 { 2360 rte_pktmbuf_free_bulk(pkts, unsent); 2361 } 2362 2363 void 2364 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2365 void *userdata) 2366 { 2367 uint64_t *count = userdata; 2368 2369 rte_pktmbuf_free_bulk(pkts, unsent); 2370 *count += unsent; 2371 } 2372 2373 int 2374 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2375 buffer_tx_error_fn cbfn, void *userdata) 2376 { 2377 if (buffer == NULL) { 2378 RTE_ETHDEV_LOG(ERR, 2379 "Cannot set Tx buffer error callback to NULL buffer\n"); 2380 return -EINVAL; 2381 } 2382 2383 buffer->error_callback = cbfn; 2384 buffer->error_userdata = userdata; 2385 return 0; 2386 } 2387 2388 int 2389 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2390 { 2391 int ret = 0; 2392 2393 if (buffer == NULL) { 2394 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2395 return -EINVAL; 2396 } 2397 2398 buffer->size = size; 2399 if (buffer->error_callback == NULL) { 2400 ret = rte_eth_tx_buffer_set_err_callback( 2401 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2402 } 2403 2404 return ret; 2405 } 2406 2407 int 2408 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2409 { 2410 struct rte_eth_dev *dev; 2411 int ret; 2412 2413 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2414 dev = &rte_eth_devices[port_id]; 2415 2416 if (*dev->dev_ops->tx_done_cleanup == NULL) 2417 return -ENOTSUP; 2418 2419 /* Call driver to free pending mbufs. */ 2420 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2421 free_cnt); 2422 return eth_err(port_id, ret); 2423 } 2424 2425 int 2426 rte_eth_promiscuous_enable(uint16_t port_id) 2427 { 2428 struct rte_eth_dev *dev; 2429 int diag = 0; 2430 2431 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2432 dev = &rte_eth_devices[port_id]; 2433 2434 if (dev->data->promiscuous == 1) 2435 return 0; 2436 2437 if (*dev->dev_ops->promiscuous_enable == NULL) 2438 return -ENOTSUP; 2439 2440 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2441 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2442 2443 return eth_err(port_id, diag); 2444 } 2445 2446 int 2447 rte_eth_promiscuous_disable(uint16_t port_id) 2448 { 2449 struct rte_eth_dev *dev; 2450 int diag = 0; 2451 2452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2453 dev = &rte_eth_devices[port_id]; 2454 2455 if (dev->data->promiscuous == 0) 2456 return 0; 2457 2458 if (*dev->dev_ops->promiscuous_disable == NULL) 2459 return -ENOTSUP; 2460 2461 dev->data->promiscuous = 0; 2462 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2463 if (diag != 0) 2464 dev->data->promiscuous = 1; 2465 2466 return eth_err(port_id, diag); 2467 } 2468 2469 int 2470 rte_eth_promiscuous_get(uint16_t port_id) 2471 { 2472 struct rte_eth_dev *dev; 2473 2474 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2475 dev = &rte_eth_devices[port_id]; 2476 2477 return dev->data->promiscuous; 2478 } 2479 2480 int 2481 rte_eth_allmulticast_enable(uint16_t port_id) 2482 { 2483 struct rte_eth_dev *dev; 2484 int diag; 2485 2486 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2487 dev = &rte_eth_devices[port_id]; 2488 2489 if (dev->data->all_multicast == 1) 2490 return 0; 2491 2492 if (*dev->dev_ops->allmulticast_enable == NULL) 2493 return -ENOTSUP; 2494 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2495 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2496 2497 return eth_err(port_id, diag); 2498 } 2499 2500 int 2501 rte_eth_allmulticast_disable(uint16_t port_id) 2502 { 2503 struct rte_eth_dev *dev; 2504 int diag; 2505 2506 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2507 dev = &rte_eth_devices[port_id]; 2508 2509 if (dev->data->all_multicast == 0) 2510 return 0; 2511 2512 if (*dev->dev_ops->allmulticast_disable == NULL) 2513 return -ENOTSUP; 2514 dev->data->all_multicast = 0; 2515 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2516 if (diag != 0) 2517 dev->data->all_multicast = 1; 2518 2519 return eth_err(port_id, diag); 2520 } 2521 2522 int 2523 rte_eth_allmulticast_get(uint16_t port_id) 2524 { 2525 struct rte_eth_dev *dev; 2526 2527 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2528 dev = &rte_eth_devices[port_id]; 2529 2530 return dev->data->all_multicast; 2531 } 2532 2533 int 2534 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2535 { 2536 struct rte_eth_dev *dev; 2537 2538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2539 dev = &rte_eth_devices[port_id]; 2540 2541 if (eth_link == NULL) { 2542 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2543 port_id); 2544 return -EINVAL; 2545 } 2546 2547 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2548 rte_eth_linkstatus_get(dev, eth_link); 2549 else { 2550 if (*dev->dev_ops->link_update == NULL) 2551 return -ENOTSUP; 2552 (*dev->dev_ops->link_update)(dev, 1); 2553 *eth_link = dev->data->dev_link; 2554 } 2555 2556 return 0; 2557 } 2558 2559 int 2560 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2561 { 2562 struct rte_eth_dev *dev; 2563 2564 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2565 dev = &rte_eth_devices[port_id]; 2566 2567 if (eth_link == NULL) { 2568 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2569 port_id); 2570 return -EINVAL; 2571 } 2572 2573 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2574 rte_eth_linkstatus_get(dev, eth_link); 2575 else { 2576 if (*dev->dev_ops->link_update == NULL) 2577 return -ENOTSUP; 2578 (*dev->dev_ops->link_update)(dev, 0); 2579 *eth_link = dev->data->dev_link; 2580 } 2581 2582 return 0; 2583 } 2584 2585 const char * 2586 rte_eth_link_speed_to_str(uint32_t link_speed) 2587 { 2588 switch (link_speed) { 2589 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2590 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2591 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2592 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2593 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2594 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2595 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2596 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2597 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2598 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2599 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2600 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2601 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2602 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2603 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2604 default: return "Invalid"; 2605 } 2606 } 2607 2608 int 2609 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2610 { 2611 if (str == NULL) { 2612 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2613 return -EINVAL; 2614 } 2615 2616 if (len == 0) { 2617 RTE_ETHDEV_LOG(ERR, 2618 "Cannot convert link to string with zero size\n"); 2619 return -EINVAL; 2620 } 2621 2622 if (eth_link == NULL) { 2623 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2624 return -EINVAL; 2625 } 2626 2627 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2628 return snprintf(str, len, "Link down"); 2629 else 2630 return snprintf(str, len, "Link up at %s %s %s", 2631 rte_eth_link_speed_to_str(eth_link->link_speed), 2632 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2633 "FDX" : "HDX", 2634 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2635 "Autoneg" : "Fixed"); 2636 } 2637 2638 int 2639 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2640 { 2641 struct rte_eth_dev *dev; 2642 2643 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2644 dev = &rte_eth_devices[port_id]; 2645 2646 if (stats == NULL) { 2647 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2648 port_id); 2649 return -EINVAL; 2650 } 2651 2652 memset(stats, 0, sizeof(*stats)); 2653 2654 if (*dev->dev_ops->stats_get == NULL) 2655 return -ENOTSUP; 2656 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2657 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2658 } 2659 2660 int 2661 rte_eth_stats_reset(uint16_t port_id) 2662 { 2663 struct rte_eth_dev *dev; 2664 int ret; 2665 2666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2667 dev = &rte_eth_devices[port_id]; 2668 2669 if (*dev->dev_ops->stats_reset == NULL) 2670 return -ENOTSUP; 2671 ret = (*dev->dev_ops->stats_reset)(dev); 2672 if (ret != 0) 2673 return eth_err(port_id, ret); 2674 2675 dev->data->rx_mbuf_alloc_failed = 0; 2676 2677 return 0; 2678 } 2679 2680 static inline int 2681 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2682 { 2683 uint16_t nb_rxqs, nb_txqs; 2684 int count; 2685 2686 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2687 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2688 2689 count = RTE_NB_STATS; 2690 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2691 count += nb_rxqs * RTE_NB_RXQ_STATS; 2692 count += nb_txqs * RTE_NB_TXQ_STATS; 2693 } 2694 2695 return count; 2696 } 2697 2698 static int 2699 eth_dev_get_xstats_count(uint16_t port_id) 2700 { 2701 struct rte_eth_dev *dev; 2702 int count; 2703 2704 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2705 dev = &rte_eth_devices[port_id]; 2706 if (dev->dev_ops->xstats_get_names != NULL) { 2707 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2708 if (count < 0) 2709 return eth_err(port_id, count); 2710 } else 2711 count = 0; 2712 2713 2714 count += eth_dev_get_xstats_basic_count(dev); 2715 2716 return count; 2717 } 2718 2719 int 2720 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2721 uint64_t *id) 2722 { 2723 int cnt_xstats, idx_xstat; 2724 2725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2726 2727 if (xstat_name == NULL) { 2728 RTE_ETHDEV_LOG(ERR, 2729 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2730 port_id); 2731 return -ENOMEM; 2732 } 2733 2734 if (id == NULL) { 2735 RTE_ETHDEV_LOG(ERR, 2736 "Cannot get ethdev port %u xstats ID to NULL\n", 2737 port_id); 2738 return -ENOMEM; 2739 } 2740 2741 /* Get count */ 2742 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2743 if (cnt_xstats < 0) { 2744 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2745 return -ENODEV; 2746 } 2747 2748 /* Get id-name lookup table */ 2749 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2750 2751 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2752 port_id, xstats_names, cnt_xstats, NULL)) { 2753 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2754 return -1; 2755 } 2756 2757 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2758 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2759 *id = idx_xstat; 2760 return 0; 2761 }; 2762 } 2763 2764 return -EINVAL; 2765 } 2766 2767 /* retrieve basic stats names */ 2768 static int 2769 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2770 struct rte_eth_xstat_name *xstats_names) 2771 { 2772 int cnt_used_entries = 0; 2773 uint32_t idx, id_queue; 2774 uint16_t num_q; 2775 2776 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2777 strlcpy(xstats_names[cnt_used_entries].name, 2778 eth_dev_stats_strings[idx].name, 2779 sizeof(xstats_names[0].name)); 2780 cnt_used_entries++; 2781 } 2782 2783 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2784 return cnt_used_entries; 2785 2786 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2787 for (id_queue = 0; id_queue < num_q; id_queue++) { 2788 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2789 snprintf(xstats_names[cnt_used_entries].name, 2790 sizeof(xstats_names[0].name), 2791 "rx_q%u_%s", 2792 id_queue, eth_dev_rxq_stats_strings[idx].name); 2793 cnt_used_entries++; 2794 } 2795 2796 } 2797 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2798 for (id_queue = 0; id_queue < num_q; id_queue++) { 2799 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2800 snprintf(xstats_names[cnt_used_entries].name, 2801 sizeof(xstats_names[0].name), 2802 "tx_q%u_%s", 2803 id_queue, eth_dev_txq_stats_strings[idx].name); 2804 cnt_used_entries++; 2805 } 2806 } 2807 return cnt_used_entries; 2808 } 2809 2810 /* retrieve ethdev extended statistics names */ 2811 int 2812 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2813 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2814 uint64_t *ids) 2815 { 2816 struct rte_eth_xstat_name *xstats_names_copy; 2817 unsigned int no_basic_stat_requested = 1; 2818 unsigned int no_ext_stat_requested = 1; 2819 unsigned int expected_entries; 2820 unsigned int basic_count; 2821 struct rte_eth_dev *dev; 2822 unsigned int i; 2823 int ret; 2824 2825 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2826 dev = &rte_eth_devices[port_id]; 2827 2828 basic_count = eth_dev_get_xstats_basic_count(dev); 2829 ret = eth_dev_get_xstats_count(port_id); 2830 if (ret < 0) 2831 return ret; 2832 expected_entries = (unsigned int)ret; 2833 2834 /* Return max number of stats if no ids given */ 2835 if (!ids) { 2836 if (!xstats_names) 2837 return expected_entries; 2838 else if (xstats_names && size < expected_entries) 2839 return expected_entries; 2840 } 2841 2842 if (ids && !xstats_names) 2843 return -EINVAL; 2844 2845 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2846 uint64_t ids_copy[size]; 2847 2848 for (i = 0; i < size; i++) { 2849 if (ids[i] < basic_count) { 2850 no_basic_stat_requested = 0; 2851 break; 2852 } 2853 2854 /* 2855 * Convert ids to xstats ids that PMD knows. 2856 * ids known by user are basic + extended stats. 2857 */ 2858 ids_copy[i] = ids[i] - basic_count; 2859 } 2860 2861 if (no_basic_stat_requested) 2862 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2863 ids_copy, xstats_names, size); 2864 } 2865 2866 /* Retrieve all stats */ 2867 if (!ids) { 2868 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2869 expected_entries); 2870 if (num_stats < 0 || num_stats > (int)expected_entries) 2871 return num_stats; 2872 else 2873 return expected_entries; 2874 } 2875 2876 xstats_names_copy = calloc(expected_entries, 2877 sizeof(struct rte_eth_xstat_name)); 2878 2879 if (!xstats_names_copy) { 2880 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2881 return -ENOMEM; 2882 } 2883 2884 if (ids) { 2885 for (i = 0; i < size; i++) { 2886 if (ids[i] >= basic_count) { 2887 no_ext_stat_requested = 0; 2888 break; 2889 } 2890 } 2891 } 2892 2893 /* Fill xstats_names_copy structure */ 2894 if (ids && no_ext_stat_requested) { 2895 eth_basic_stats_get_names(dev, xstats_names_copy); 2896 } else { 2897 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2898 expected_entries); 2899 if (ret < 0) { 2900 free(xstats_names_copy); 2901 return ret; 2902 } 2903 } 2904 2905 /* Filter stats */ 2906 for (i = 0; i < size; i++) { 2907 if (ids[i] >= expected_entries) { 2908 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2909 free(xstats_names_copy); 2910 return -1; 2911 } 2912 xstats_names[i] = xstats_names_copy[ids[i]]; 2913 } 2914 2915 free(xstats_names_copy); 2916 return size; 2917 } 2918 2919 int 2920 rte_eth_xstats_get_names(uint16_t port_id, 2921 struct rte_eth_xstat_name *xstats_names, 2922 unsigned int size) 2923 { 2924 struct rte_eth_dev *dev; 2925 int cnt_used_entries; 2926 int cnt_expected_entries; 2927 int cnt_driver_entries; 2928 2929 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 2930 if (xstats_names == NULL || cnt_expected_entries < 0 || 2931 (int)size < cnt_expected_entries) 2932 return cnt_expected_entries; 2933 2934 /* port_id checked in eth_dev_get_xstats_count() */ 2935 dev = &rte_eth_devices[port_id]; 2936 2937 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 2938 2939 if (dev->dev_ops->xstats_get_names != NULL) { 2940 /* If there are any driver-specific xstats, append them 2941 * to end of list. 2942 */ 2943 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2944 dev, 2945 xstats_names + cnt_used_entries, 2946 size - cnt_used_entries); 2947 if (cnt_driver_entries < 0) 2948 return eth_err(port_id, cnt_driver_entries); 2949 cnt_used_entries += cnt_driver_entries; 2950 } 2951 2952 return cnt_used_entries; 2953 } 2954 2955 2956 static int 2957 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2958 { 2959 struct rte_eth_dev *dev; 2960 struct rte_eth_stats eth_stats; 2961 unsigned int count = 0, i, q; 2962 uint64_t val, *stats_ptr; 2963 uint16_t nb_rxqs, nb_txqs; 2964 int ret; 2965 2966 ret = rte_eth_stats_get(port_id, ð_stats); 2967 if (ret < 0) 2968 return ret; 2969 2970 dev = &rte_eth_devices[port_id]; 2971 2972 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2973 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2974 2975 /* global stats */ 2976 for (i = 0; i < RTE_NB_STATS; i++) { 2977 stats_ptr = RTE_PTR_ADD(ð_stats, 2978 eth_dev_stats_strings[i].offset); 2979 val = *stats_ptr; 2980 xstats[count++].value = val; 2981 } 2982 2983 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2984 return count; 2985 2986 /* per-rxq stats */ 2987 for (q = 0; q < nb_rxqs; q++) { 2988 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2989 stats_ptr = RTE_PTR_ADD(ð_stats, 2990 eth_dev_rxq_stats_strings[i].offset + 2991 q * sizeof(uint64_t)); 2992 val = *stats_ptr; 2993 xstats[count++].value = val; 2994 } 2995 } 2996 2997 /* per-txq stats */ 2998 for (q = 0; q < nb_txqs; q++) { 2999 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3000 stats_ptr = RTE_PTR_ADD(ð_stats, 3001 eth_dev_txq_stats_strings[i].offset + 3002 q * sizeof(uint64_t)); 3003 val = *stats_ptr; 3004 xstats[count++].value = val; 3005 } 3006 } 3007 return count; 3008 } 3009 3010 /* retrieve ethdev extended statistics */ 3011 int 3012 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3013 uint64_t *values, unsigned int size) 3014 { 3015 unsigned int no_basic_stat_requested = 1; 3016 unsigned int no_ext_stat_requested = 1; 3017 unsigned int num_xstats_filled; 3018 unsigned int basic_count; 3019 uint16_t expected_entries; 3020 struct rte_eth_dev *dev; 3021 unsigned int i; 3022 int ret; 3023 3024 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3025 dev = &rte_eth_devices[port_id]; 3026 3027 ret = eth_dev_get_xstats_count(port_id); 3028 if (ret < 0) 3029 return ret; 3030 expected_entries = (uint16_t)ret; 3031 struct rte_eth_xstat xstats[expected_entries]; 3032 basic_count = eth_dev_get_xstats_basic_count(dev); 3033 3034 /* Return max number of stats if no ids given */ 3035 if (!ids) { 3036 if (!values) 3037 return expected_entries; 3038 else if (values && size < expected_entries) 3039 return expected_entries; 3040 } 3041 3042 if (ids && !values) 3043 return -EINVAL; 3044 3045 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3046 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3047 uint64_t ids_copy[size]; 3048 3049 for (i = 0; i < size; i++) { 3050 if (ids[i] < basic_count) { 3051 no_basic_stat_requested = 0; 3052 break; 3053 } 3054 3055 /* 3056 * Convert ids to xstats ids that PMD knows. 3057 * ids known by user are basic + extended stats. 3058 */ 3059 ids_copy[i] = ids[i] - basic_count; 3060 } 3061 3062 if (no_basic_stat_requested) 3063 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3064 values, size); 3065 } 3066 3067 if (ids) { 3068 for (i = 0; i < size; i++) { 3069 if (ids[i] >= basic_count) { 3070 no_ext_stat_requested = 0; 3071 break; 3072 } 3073 } 3074 } 3075 3076 /* Fill the xstats structure */ 3077 if (ids && no_ext_stat_requested) 3078 ret = eth_basic_stats_get(port_id, xstats); 3079 else 3080 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3081 3082 if (ret < 0) 3083 return ret; 3084 num_xstats_filled = (unsigned int)ret; 3085 3086 /* Return all stats */ 3087 if (!ids) { 3088 for (i = 0; i < num_xstats_filled; i++) 3089 values[i] = xstats[i].value; 3090 return expected_entries; 3091 } 3092 3093 /* Filter stats */ 3094 for (i = 0; i < size; i++) { 3095 if (ids[i] >= expected_entries) { 3096 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3097 return -1; 3098 } 3099 values[i] = xstats[ids[i]].value; 3100 } 3101 return size; 3102 } 3103 3104 int 3105 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3106 unsigned int n) 3107 { 3108 struct rte_eth_dev *dev; 3109 unsigned int count, i; 3110 signed int xcount = 0; 3111 int ret; 3112 3113 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3114 if (xstats == NULL && n > 0) 3115 return -EINVAL; 3116 dev = &rte_eth_devices[port_id]; 3117 3118 count = eth_dev_get_xstats_basic_count(dev); 3119 3120 /* implemented by the driver */ 3121 if (dev->dev_ops->xstats_get != NULL) { 3122 /* Retrieve the xstats from the driver at the end of the 3123 * xstats struct. 3124 */ 3125 xcount = (*dev->dev_ops->xstats_get)(dev, 3126 (n > count) ? xstats + count : NULL, 3127 (n > count) ? n - count : 0); 3128 3129 if (xcount < 0) 3130 return eth_err(port_id, xcount); 3131 } 3132 3133 if (n < count + xcount || xstats == NULL) 3134 return count + xcount; 3135 3136 /* now fill the xstats structure */ 3137 ret = eth_basic_stats_get(port_id, xstats); 3138 if (ret < 0) 3139 return ret; 3140 count = ret; 3141 3142 for (i = 0; i < count; i++) 3143 xstats[i].id = i; 3144 /* add an offset to driver-specific stats */ 3145 for ( ; i < count + xcount; i++) 3146 xstats[i].id += count; 3147 3148 return count + xcount; 3149 } 3150 3151 /* reset ethdev extended statistics */ 3152 int 3153 rte_eth_xstats_reset(uint16_t port_id) 3154 { 3155 struct rte_eth_dev *dev; 3156 3157 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3158 dev = &rte_eth_devices[port_id]; 3159 3160 /* implemented by the driver */ 3161 if (dev->dev_ops->xstats_reset != NULL) 3162 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3163 3164 /* fallback to default */ 3165 return rte_eth_stats_reset(port_id); 3166 } 3167 3168 static int 3169 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3170 uint8_t stat_idx, uint8_t is_rx) 3171 { 3172 struct rte_eth_dev *dev; 3173 3174 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3175 dev = &rte_eth_devices[port_id]; 3176 3177 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3178 return -EINVAL; 3179 3180 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3181 return -EINVAL; 3182 3183 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3184 return -EINVAL; 3185 3186 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3187 return -ENOTSUP; 3188 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3189 } 3190 3191 int 3192 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3193 uint8_t stat_idx) 3194 { 3195 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3196 tx_queue_id, 3197 stat_idx, STAT_QMAP_TX)); 3198 } 3199 3200 int 3201 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3202 uint8_t stat_idx) 3203 { 3204 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3205 rx_queue_id, 3206 stat_idx, STAT_QMAP_RX)); 3207 } 3208 3209 int 3210 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3211 { 3212 struct rte_eth_dev *dev; 3213 3214 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3215 dev = &rte_eth_devices[port_id]; 3216 3217 if (fw_version == NULL && fw_size > 0) { 3218 RTE_ETHDEV_LOG(ERR, 3219 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3220 port_id); 3221 return -EINVAL; 3222 } 3223 3224 if (*dev->dev_ops->fw_version_get == NULL) 3225 return -ENOTSUP; 3226 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3227 fw_version, fw_size)); 3228 } 3229 3230 int 3231 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3232 { 3233 struct rte_eth_dev *dev; 3234 const struct rte_eth_desc_lim lim = { 3235 .nb_max = UINT16_MAX, 3236 .nb_min = 0, 3237 .nb_align = 1, 3238 .nb_seg_max = UINT16_MAX, 3239 .nb_mtu_seg_max = UINT16_MAX, 3240 }; 3241 int diag; 3242 3243 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3244 dev = &rte_eth_devices[port_id]; 3245 3246 if (dev_info == NULL) { 3247 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3248 port_id); 3249 return -EINVAL; 3250 } 3251 3252 /* 3253 * Init dev_info before port_id check since caller does not have 3254 * return status and does not know if get is successful or not. 3255 */ 3256 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3257 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3258 3259 dev_info->rx_desc_lim = lim; 3260 dev_info->tx_desc_lim = lim; 3261 dev_info->device = dev->device; 3262 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3263 RTE_ETHER_CRC_LEN; 3264 dev_info->max_mtu = UINT16_MAX; 3265 3266 if (*dev->dev_ops->dev_infos_get == NULL) 3267 return -ENOTSUP; 3268 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3269 if (diag != 0) { 3270 /* Cleanup already filled in device information */ 3271 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3272 return eth_err(port_id, diag); 3273 } 3274 3275 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3276 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3277 RTE_MAX_QUEUES_PER_PORT); 3278 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3279 RTE_MAX_QUEUES_PER_PORT); 3280 3281 dev_info->driver_name = dev->device->driver->name; 3282 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3283 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3284 3285 dev_info->dev_flags = &dev->data->dev_flags; 3286 3287 return 0; 3288 } 3289 3290 int 3291 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3292 { 3293 struct rte_eth_dev *dev; 3294 3295 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3296 dev = &rte_eth_devices[port_id]; 3297 3298 if (dev_conf == NULL) { 3299 RTE_ETHDEV_LOG(ERR, 3300 "Cannot get ethdev port %u configuration to NULL\n", 3301 port_id); 3302 return -EINVAL; 3303 } 3304 3305 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3306 3307 return 0; 3308 } 3309 3310 int 3311 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3312 uint32_t *ptypes, int num) 3313 { 3314 int i, j; 3315 struct rte_eth_dev *dev; 3316 const uint32_t *all_ptypes; 3317 3318 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3319 dev = &rte_eth_devices[port_id]; 3320 3321 if (ptypes == NULL && num > 0) { 3322 RTE_ETHDEV_LOG(ERR, 3323 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3324 port_id); 3325 return -EINVAL; 3326 } 3327 3328 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3329 return 0; 3330 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3331 3332 if (!all_ptypes) 3333 return 0; 3334 3335 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3336 if (all_ptypes[i] & ptype_mask) { 3337 if (j < num) 3338 ptypes[j] = all_ptypes[i]; 3339 j++; 3340 } 3341 3342 return j; 3343 } 3344 3345 int 3346 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3347 uint32_t *set_ptypes, unsigned int num) 3348 { 3349 const uint32_t valid_ptype_masks[] = { 3350 RTE_PTYPE_L2_MASK, 3351 RTE_PTYPE_L3_MASK, 3352 RTE_PTYPE_L4_MASK, 3353 RTE_PTYPE_TUNNEL_MASK, 3354 RTE_PTYPE_INNER_L2_MASK, 3355 RTE_PTYPE_INNER_L3_MASK, 3356 RTE_PTYPE_INNER_L4_MASK, 3357 }; 3358 const uint32_t *all_ptypes; 3359 struct rte_eth_dev *dev; 3360 uint32_t unused_mask; 3361 unsigned int i, j; 3362 int ret; 3363 3364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3365 dev = &rte_eth_devices[port_id]; 3366 3367 if (num > 0 && set_ptypes == NULL) { 3368 RTE_ETHDEV_LOG(ERR, 3369 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3370 port_id); 3371 return -EINVAL; 3372 } 3373 3374 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3375 *dev->dev_ops->dev_ptypes_set == NULL) { 3376 ret = 0; 3377 goto ptype_unknown; 3378 } 3379 3380 if (ptype_mask == 0) { 3381 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3382 ptype_mask); 3383 goto ptype_unknown; 3384 } 3385 3386 unused_mask = ptype_mask; 3387 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3388 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3389 if (mask && mask != valid_ptype_masks[i]) { 3390 ret = -EINVAL; 3391 goto ptype_unknown; 3392 } 3393 unused_mask &= ~valid_ptype_masks[i]; 3394 } 3395 3396 if (unused_mask) { 3397 ret = -EINVAL; 3398 goto ptype_unknown; 3399 } 3400 3401 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3402 if (all_ptypes == NULL) { 3403 ret = 0; 3404 goto ptype_unknown; 3405 } 3406 3407 /* 3408 * Accommodate as many set_ptypes as possible. If the supplied 3409 * set_ptypes array is insufficient fill it partially. 3410 */ 3411 for (i = 0, j = 0; set_ptypes != NULL && 3412 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3413 if (ptype_mask & all_ptypes[i]) { 3414 if (j < num - 1) { 3415 set_ptypes[j] = all_ptypes[i]; 3416 j++; 3417 continue; 3418 } 3419 break; 3420 } 3421 } 3422 3423 if (set_ptypes != NULL && j < num) 3424 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3425 3426 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3427 3428 ptype_unknown: 3429 if (num > 0) 3430 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3431 3432 return ret; 3433 } 3434 3435 int 3436 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3437 unsigned int num) 3438 { 3439 int32_t ret; 3440 struct rte_eth_dev *dev; 3441 struct rte_eth_dev_info dev_info; 3442 3443 if (ma == NULL) { 3444 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3445 return -EINVAL; 3446 } 3447 3448 /* will check for us that port_id is a valid one */ 3449 ret = rte_eth_dev_info_get(port_id, &dev_info); 3450 if (ret != 0) 3451 return ret; 3452 3453 dev = &rte_eth_devices[port_id]; 3454 num = RTE_MIN(dev_info.max_mac_addrs, num); 3455 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3456 3457 return num; 3458 } 3459 3460 int 3461 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3462 { 3463 struct rte_eth_dev *dev; 3464 3465 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3466 dev = &rte_eth_devices[port_id]; 3467 3468 if (mac_addr == NULL) { 3469 RTE_ETHDEV_LOG(ERR, 3470 "Cannot get ethdev port %u MAC address to NULL\n", 3471 port_id); 3472 return -EINVAL; 3473 } 3474 3475 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3476 3477 return 0; 3478 } 3479 3480 int 3481 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3482 { 3483 struct rte_eth_dev *dev; 3484 3485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3486 dev = &rte_eth_devices[port_id]; 3487 3488 if (mtu == NULL) { 3489 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3490 port_id); 3491 return -EINVAL; 3492 } 3493 3494 *mtu = dev->data->mtu; 3495 return 0; 3496 } 3497 3498 int 3499 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3500 { 3501 int ret; 3502 struct rte_eth_dev_info dev_info; 3503 struct rte_eth_dev *dev; 3504 3505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3506 dev = &rte_eth_devices[port_id]; 3507 if (*dev->dev_ops->mtu_set == NULL) 3508 return -ENOTSUP; 3509 3510 /* 3511 * Check if the device supports dev_infos_get, if it does not 3512 * skip min_mtu/max_mtu validation here as this requires values 3513 * that are populated within the call to rte_eth_dev_info_get() 3514 * which relies on dev->dev_ops->dev_infos_get. 3515 */ 3516 if (*dev->dev_ops->dev_infos_get != NULL) { 3517 ret = rte_eth_dev_info_get(port_id, &dev_info); 3518 if (ret != 0) 3519 return ret; 3520 3521 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3522 if (ret != 0) 3523 return ret; 3524 } 3525 3526 if (dev->data->dev_configured == 0) { 3527 RTE_ETHDEV_LOG(ERR, 3528 "Port %u must be configured before MTU set\n", 3529 port_id); 3530 return -EINVAL; 3531 } 3532 3533 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3534 if (ret == 0) 3535 dev->data->mtu = mtu; 3536 3537 return eth_err(port_id, ret); 3538 } 3539 3540 int 3541 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3542 { 3543 struct rte_eth_dev *dev; 3544 int ret; 3545 3546 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3547 dev = &rte_eth_devices[port_id]; 3548 3549 if (!(dev->data->dev_conf.rxmode.offloads & 3550 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3551 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3552 port_id); 3553 return -ENOSYS; 3554 } 3555 3556 if (vlan_id > 4095) { 3557 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3558 port_id, vlan_id); 3559 return -EINVAL; 3560 } 3561 if (*dev->dev_ops->vlan_filter_set == NULL) 3562 return -ENOTSUP; 3563 3564 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3565 if (ret == 0) { 3566 struct rte_vlan_filter_conf *vfc; 3567 int vidx; 3568 int vbit; 3569 3570 vfc = &dev->data->vlan_filter_conf; 3571 vidx = vlan_id / 64; 3572 vbit = vlan_id % 64; 3573 3574 if (on) 3575 vfc->ids[vidx] |= RTE_BIT64(vbit); 3576 else 3577 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3578 } 3579 3580 return eth_err(port_id, ret); 3581 } 3582 3583 int 3584 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3585 int on) 3586 { 3587 struct rte_eth_dev *dev; 3588 3589 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3590 dev = &rte_eth_devices[port_id]; 3591 3592 if (rx_queue_id >= dev->data->nb_rx_queues) { 3593 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3594 return -EINVAL; 3595 } 3596 3597 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 3598 return -ENOTSUP; 3599 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3600 3601 return 0; 3602 } 3603 3604 int 3605 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3606 enum rte_vlan_type vlan_type, 3607 uint16_t tpid) 3608 { 3609 struct rte_eth_dev *dev; 3610 3611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3612 dev = &rte_eth_devices[port_id]; 3613 3614 if (*dev->dev_ops->vlan_tpid_set == NULL) 3615 return -ENOTSUP; 3616 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3617 tpid)); 3618 } 3619 3620 int 3621 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3622 { 3623 struct rte_eth_dev_info dev_info; 3624 struct rte_eth_dev *dev; 3625 int ret = 0; 3626 int mask = 0; 3627 int cur, org = 0; 3628 uint64_t orig_offloads; 3629 uint64_t dev_offloads; 3630 uint64_t new_offloads; 3631 3632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3633 dev = &rte_eth_devices[port_id]; 3634 3635 /* save original values in case of failure */ 3636 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3637 dev_offloads = orig_offloads; 3638 3639 /* check which option changed by application */ 3640 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3641 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3642 if (cur != org) { 3643 if (cur) 3644 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3645 else 3646 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3647 mask |= RTE_ETH_VLAN_STRIP_MASK; 3648 } 3649 3650 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3651 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3652 if (cur != org) { 3653 if (cur) 3654 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3655 else 3656 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3657 mask |= RTE_ETH_VLAN_FILTER_MASK; 3658 } 3659 3660 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3661 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3662 if (cur != org) { 3663 if (cur) 3664 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3665 else 3666 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3667 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3668 } 3669 3670 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3671 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3672 if (cur != org) { 3673 if (cur) 3674 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3675 else 3676 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3677 mask |= RTE_ETH_QINQ_STRIP_MASK; 3678 } 3679 3680 /*no change*/ 3681 if (mask == 0) 3682 return ret; 3683 3684 ret = rte_eth_dev_info_get(port_id, &dev_info); 3685 if (ret != 0) 3686 return ret; 3687 3688 /* Rx VLAN offloading must be within its device capabilities */ 3689 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3690 new_offloads = dev_offloads & ~orig_offloads; 3691 RTE_ETHDEV_LOG(ERR, 3692 "Ethdev port_id=%u requested new added VLAN offloads " 3693 "0x%" PRIx64 " must be within Rx offloads capabilities " 3694 "0x%" PRIx64 " in %s()\n", 3695 port_id, new_offloads, dev_info.rx_offload_capa, 3696 __func__); 3697 return -EINVAL; 3698 } 3699 3700 if (*dev->dev_ops->vlan_offload_set == NULL) 3701 return -ENOTSUP; 3702 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3703 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3704 if (ret) { 3705 /* hit an error restore original values */ 3706 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3707 } 3708 3709 return eth_err(port_id, ret); 3710 } 3711 3712 int 3713 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3714 { 3715 struct rte_eth_dev *dev; 3716 uint64_t *dev_offloads; 3717 int ret = 0; 3718 3719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3720 dev = &rte_eth_devices[port_id]; 3721 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3722 3723 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3724 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3725 3726 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3727 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3728 3729 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3730 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3731 3732 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3733 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3734 3735 return ret; 3736 } 3737 3738 int 3739 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3740 { 3741 struct rte_eth_dev *dev; 3742 3743 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3744 dev = &rte_eth_devices[port_id]; 3745 3746 if (*dev->dev_ops->vlan_pvid_set == NULL) 3747 return -ENOTSUP; 3748 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3749 } 3750 3751 int 3752 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3753 { 3754 struct rte_eth_dev *dev; 3755 3756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3757 dev = &rte_eth_devices[port_id]; 3758 3759 if (fc_conf == NULL) { 3760 RTE_ETHDEV_LOG(ERR, 3761 "Cannot get ethdev port %u flow control config to NULL\n", 3762 port_id); 3763 return -EINVAL; 3764 } 3765 3766 if (*dev->dev_ops->flow_ctrl_get == NULL) 3767 return -ENOTSUP; 3768 memset(fc_conf, 0, sizeof(*fc_conf)); 3769 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3770 } 3771 3772 int 3773 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3774 { 3775 struct rte_eth_dev *dev; 3776 3777 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3778 dev = &rte_eth_devices[port_id]; 3779 3780 if (fc_conf == NULL) { 3781 RTE_ETHDEV_LOG(ERR, 3782 "Cannot set ethdev port %u flow control from NULL config\n", 3783 port_id); 3784 return -EINVAL; 3785 } 3786 3787 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3788 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3789 return -EINVAL; 3790 } 3791 3792 if (*dev->dev_ops->flow_ctrl_set == NULL) 3793 return -ENOTSUP; 3794 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3795 } 3796 3797 int 3798 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3799 struct rte_eth_pfc_conf *pfc_conf) 3800 { 3801 struct rte_eth_dev *dev; 3802 3803 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3804 dev = &rte_eth_devices[port_id]; 3805 3806 if (pfc_conf == NULL) { 3807 RTE_ETHDEV_LOG(ERR, 3808 "Cannot set ethdev port %u priority flow control from NULL config\n", 3809 port_id); 3810 return -EINVAL; 3811 } 3812 3813 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3814 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3815 return -EINVAL; 3816 } 3817 3818 /* High water, low water validation are device specific */ 3819 if (*dev->dev_ops->priority_flow_ctrl_set) 3820 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3821 (dev, pfc_conf)); 3822 return -ENOTSUP; 3823 } 3824 3825 static int 3826 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3827 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3828 { 3829 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 3830 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3831 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 3832 RTE_ETHDEV_LOG(ERR, 3833 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 3834 pfc_queue_conf->rx_pause.tx_qid, 3835 dev_info->nb_tx_queues); 3836 return -EINVAL; 3837 } 3838 3839 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 3840 RTE_ETHDEV_LOG(ERR, 3841 "PFC TC not in range for Rx pause requested:%d max:%d\n", 3842 pfc_queue_conf->rx_pause.tc, tc_max); 3843 return -EINVAL; 3844 } 3845 } 3846 3847 return 0; 3848 } 3849 3850 static int 3851 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3852 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3853 { 3854 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 3855 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3856 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 3857 RTE_ETHDEV_LOG(ERR, 3858 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 3859 pfc_queue_conf->tx_pause.rx_qid, 3860 dev_info->nb_rx_queues); 3861 return -EINVAL; 3862 } 3863 3864 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 3865 RTE_ETHDEV_LOG(ERR, 3866 "PFC TC not in range for Tx pause requested:%d max:%d\n", 3867 pfc_queue_conf->tx_pause.tc, tc_max); 3868 return -EINVAL; 3869 } 3870 } 3871 3872 return 0; 3873 } 3874 3875 int 3876 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 3877 struct rte_eth_pfc_queue_info *pfc_queue_info) 3878 { 3879 struct rte_eth_dev *dev; 3880 3881 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3882 dev = &rte_eth_devices[port_id]; 3883 3884 if (pfc_queue_info == NULL) { 3885 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 3886 port_id); 3887 return -EINVAL; 3888 } 3889 3890 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3891 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3892 (dev, pfc_queue_info)); 3893 return -ENOTSUP; 3894 } 3895 3896 int 3897 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 3898 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3899 { 3900 struct rte_eth_pfc_queue_info pfc_info; 3901 struct rte_eth_dev_info dev_info; 3902 struct rte_eth_dev *dev; 3903 int ret; 3904 3905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3906 dev = &rte_eth_devices[port_id]; 3907 3908 if (pfc_queue_conf == NULL) { 3909 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 3910 port_id); 3911 return -EINVAL; 3912 } 3913 3914 ret = rte_eth_dev_info_get(port_id, &dev_info); 3915 if (ret != 0) 3916 return ret; 3917 3918 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 3919 if (ret != 0) 3920 return ret; 3921 3922 if (pfc_info.tc_max == 0) { 3923 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 3924 port_id); 3925 return -ENOTSUP; 3926 } 3927 3928 /* Check requested mode supported or not */ 3929 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 3930 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 3931 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 3932 port_id); 3933 return -EINVAL; 3934 } 3935 3936 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 3937 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 3938 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 3939 port_id); 3940 return -EINVAL; 3941 } 3942 3943 /* Validate Rx pause parameters */ 3944 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3945 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 3946 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 3947 pfc_queue_conf); 3948 if (ret != 0) 3949 return ret; 3950 } 3951 3952 /* Validate Tx pause parameters */ 3953 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3954 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 3955 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 3956 pfc_queue_conf); 3957 if (ret != 0) 3958 return ret; 3959 } 3960 3961 if (*dev->dev_ops->priority_flow_ctrl_queue_config) 3962 return eth_err(port_id, 3963 (*dev->dev_ops->priority_flow_ctrl_queue_config)( 3964 dev, pfc_queue_conf)); 3965 return -ENOTSUP; 3966 } 3967 3968 static int 3969 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3970 uint16_t reta_size) 3971 { 3972 uint16_t i, num; 3973 3974 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 3975 for (i = 0; i < num; i++) { 3976 if (reta_conf[i].mask) 3977 return 0; 3978 } 3979 3980 return -EINVAL; 3981 } 3982 3983 static int 3984 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3985 uint16_t reta_size, 3986 uint16_t max_rxq) 3987 { 3988 uint16_t i, idx, shift; 3989 3990 if (max_rxq == 0) { 3991 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3992 return -EINVAL; 3993 } 3994 3995 for (i = 0; i < reta_size; i++) { 3996 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3997 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3998 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 3999 (reta_conf[idx].reta[shift] >= max_rxq)) { 4000 RTE_ETHDEV_LOG(ERR, 4001 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4002 idx, shift, 4003 reta_conf[idx].reta[shift], max_rxq); 4004 return -EINVAL; 4005 } 4006 } 4007 4008 return 0; 4009 } 4010 4011 int 4012 rte_eth_dev_rss_reta_update(uint16_t port_id, 4013 struct rte_eth_rss_reta_entry64 *reta_conf, 4014 uint16_t reta_size) 4015 { 4016 enum rte_eth_rx_mq_mode mq_mode; 4017 struct rte_eth_dev *dev; 4018 int ret; 4019 4020 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4021 dev = &rte_eth_devices[port_id]; 4022 4023 if (reta_conf == NULL) { 4024 RTE_ETHDEV_LOG(ERR, 4025 "Cannot update ethdev port %u RSS RETA to NULL\n", 4026 port_id); 4027 return -EINVAL; 4028 } 4029 4030 if (reta_size == 0) { 4031 RTE_ETHDEV_LOG(ERR, 4032 "Cannot update ethdev port %u RSS RETA with zero size\n", 4033 port_id); 4034 return -EINVAL; 4035 } 4036 4037 /* Check mask bits */ 4038 ret = eth_check_reta_mask(reta_conf, reta_size); 4039 if (ret < 0) 4040 return ret; 4041 4042 /* Check entry value */ 4043 ret = eth_check_reta_entry(reta_conf, reta_size, 4044 dev->data->nb_rx_queues); 4045 if (ret < 0) 4046 return ret; 4047 4048 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4049 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4050 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4051 return -ENOTSUP; 4052 } 4053 4054 if (*dev->dev_ops->reta_update == NULL) 4055 return -ENOTSUP; 4056 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4057 reta_size)); 4058 } 4059 4060 int 4061 rte_eth_dev_rss_reta_query(uint16_t port_id, 4062 struct rte_eth_rss_reta_entry64 *reta_conf, 4063 uint16_t reta_size) 4064 { 4065 struct rte_eth_dev *dev; 4066 int ret; 4067 4068 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4069 dev = &rte_eth_devices[port_id]; 4070 4071 if (reta_conf == NULL) { 4072 RTE_ETHDEV_LOG(ERR, 4073 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4074 port_id); 4075 return -EINVAL; 4076 } 4077 4078 /* Check mask bits */ 4079 ret = eth_check_reta_mask(reta_conf, reta_size); 4080 if (ret < 0) 4081 return ret; 4082 4083 if (*dev->dev_ops->reta_query == NULL) 4084 return -ENOTSUP; 4085 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4086 reta_size)); 4087 } 4088 4089 int 4090 rte_eth_dev_rss_hash_update(uint16_t port_id, 4091 struct rte_eth_rss_conf *rss_conf) 4092 { 4093 struct rte_eth_dev *dev; 4094 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4095 enum rte_eth_rx_mq_mode mq_mode; 4096 int ret; 4097 4098 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4099 dev = &rte_eth_devices[port_id]; 4100 4101 if (rss_conf == NULL) { 4102 RTE_ETHDEV_LOG(ERR, 4103 "Cannot update ethdev port %u RSS hash from NULL config\n", 4104 port_id); 4105 return -EINVAL; 4106 } 4107 4108 ret = rte_eth_dev_info_get(port_id, &dev_info); 4109 if (ret != 0) 4110 return ret; 4111 4112 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4113 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4114 dev_info.flow_type_rss_offloads) { 4115 RTE_ETHDEV_LOG(ERR, 4116 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4117 port_id, rss_conf->rss_hf, 4118 dev_info.flow_type_rss_offloads); 4119 return -EINVAL; 4120 } 4121 4122 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4123 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4124 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4125 return -ENOTSUP; 4126 } 4127 4128 if (*dev->dev_ops->rss_hash_update == NULL) 4129 return -ENOTSUP; 4130 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4131 rss_conf)); 4132 } 4133 4134 int 4135 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4136 struct rte_eth_rss_conf *rss_conf) 4137 { 4138 struct rte_eth_dev *dev; 4139 4140 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4141 dev = &rte_eth_devices[port_id]; 4142 4143 if (rss_conf == NULL) { 4144 RTE_ETHDEV_LOG(ERR, 4145 "Cannot get ethdev port %u RSS hash config to NULL\n", 4146 port_id); 4147 return -EINVAL; 4148 } 4149 4150 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4151 return -ENOTSUP; 4152 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4153 rss_conf)); 4154 } 4155 4156 int 4157 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4158 struct rte_eth_udp_tunnel *udp_tunnel) 4159 { 4160 struct rte_eth_dev *dev; 4161 4162 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4163 dev = &rte_eth_devices[port_id]; 4164 4165 if (udp_tunnel == NULL) { 4166 RTE_ETHDEV_LOG(ERR, 4167 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4168 port_id); 4169 return -EINVAL; 4170 } 4171 4172 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4173 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4174 return -EINVAL; 4175 } 4176 4177 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4178 return -ENOTSUP; 4179 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4180 udp_tunnel)); 4181 } 4182 4183 int 4184 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4185 struct rte_eth_udp_tunnel *udp_tunnel) 4186 { 4187 struct rte_eth_dev *dev; 4188 4189 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4190 dev = &rte_eth_devices[port_id]; 4191 4192 if (udp_tunnel == NULL) { 4193 RTE_ETHDEV_LOG(ERR, 4194 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4195 port_id); 4196 return -EINVAL; 4197 } 4198 4199 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4200 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4201 return -EINVAL; 4202 } 4203 4204 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4205 return -ENOTSUP; 4206 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4207 udp_tunnel)); 4208 } 4209 4210 int 4211 rte_eth_led_on(uint16_t port_id) 4212 { 4213 struct rte_eth_dev *dev; 4214 4215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4216 dev = &rte_eth_devices[port_id]; 4217 4218 if (*dev->dev_ops->dev_led_on == NULL) 4219 return -ENOTSUP; 4220 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4221 } 4222 4223 int 4224 rte_eth_led_off(uint16_t port_id) 4225 { 4226 struct rte_eth_dev *dev; 4227 4228 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4229 dev = &rte_eth_devices[port_id]; 4230 4231 if (*dev->dev_ops->dev_led_off == NULL) 4232 return -ENOTSUP; 4233 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4234 } 4235 4236 int 4237 rte_eth_fec_get_capability(uint16_t port_id, 4238 struct rte_eth_fec_capa *speed_fec_capa, 4239 unsigned int num) 4240 { 4241 struct rte_eth_dev *dev; 4242 int ret; 4243 4244 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4245 dev = &rte_eth_devices[port_id]; 4246 4247 if (speed_fec_capa == NULL && num > 0) { 4248 RTE_ETHDEV_LOG(ERR, 4249 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4250 port_id); 4251 return -EINVAL; 4252 } 4253 4254 if (*dev->dev_ops->fec_get_capability == NULL) 4255 return -ENOTSUP; 4256 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4257 4258 return ret; 4259 } 4260 4261 int 4262 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4263 { 4264 struct rte_eth_dev *dev; 4265 4266 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4267 dev = &rte_eth_devices[port_id]; 4268 4269 if (fec_capa == NULL) { 4270 RTE_ETHDEV_LOG(ERR, 4271 "Cannot get ethdev port %u current FEC mode to NULL\n", 4272 port_id); 4273 return -EINVAL; 4274 } 4275 4276 if (*dev->dev_ops->fec_get == NULL) 4277 return -ENOTSUP; 4278 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4279 } 4280 4281 int 4282 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4283 { 4284 struct rte_eth_dev *dev; 4285 4286 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4287 dev = &rte_eth_devices[port_id]; 4288 4289 if (*dev->dev_ops->fec_set == NULL) 4290 return -ENOTSUP; 4291 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4292 } 4293 4294 /* 4295 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4296 * an empty spot. 4297 */ 4298 static int 4299 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4300 { 4301 struct rte_eth_dev_info dev_info; 4302 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4303 unsigned i; 4304 int ret; 4305 4306 ret = rte_eth_dev_info_get(port_id, &dev_info); 4307 if (ret != 0) 4308 return -1; 4309 4310 for (i = 0; i < dev_info.max_mac_addrs; i++) 4311 if (memcmp(addr, &dev->data->mac_addrs[i], 4312 RTE_ETHER_ADDR_LEN) == 0) 4313 return i; 4314 4315 return -1; 4316 } 4317 4318 static const struct rte_ether_addr null_mac_addr; 4319 4320 int 4321 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4322 uint32_t pool) 4323 { 4324 struct rte_eth_dev *dev; 4325 int index; 4326 uint64_t pool_mask; 4327 int ret; 4328 4329 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4330 dev = &rte_eth_devices[port_id]; 4331 4332 if (addr == NULL) { 4333 RTE_ETHDEV_LOG(ERR, 4334 "Cannot add ethdev port %u MAC address from NULL address\n", 4335 port_id); 4336 return -EINVAL; 4337 } 4338 4339 if (*dev->dev_ops->mac_addr_add == NULL) 4340 return -ENOTSUP; 4341 4342 if (rte_is_zero_ether_addr(addr)) { 4343 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4344 port_id); 4345 return -EINVAL; 4346 } 4347 if (pool >= RTE_ETH_64_POOLS) { 4348 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4349 return -EINVAL; 4350 } 4351 4352 index = eth_dev_get_mac_addr_index(port_id, addr); 4353 if (index < 0) { 4354 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4355 if (index < 0) { 4356 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4357 port_id); 4358 return -ENOSPC; 4359 } 4360 } else { 4361 pool_mask = dev->data->mac_pool_sel[index]; 4362 4363 /* Check if both MAC address and pool is already there, and do nothing */ 4364 if (pool_mask & RTE_BIT64(pool)) 4365 return 0; 4366 } 4367 4368 /* Update NIC */ 4369 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4370 4371 if (ret == 0) { 4372 /* Update address in NIC data structure */ 4373 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4374 4375 /* Update pool bitmap in NIC data structure */ 4376 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4377 } 4378 4379 return eth_err(port_id, ret); 4380 } 4381 4382 int 4383 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4384 { 4385 struct rte_eth_dev *dev; 4386 int index; 4387 4388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4389 dev = &rte_eth_devices[port_id]; 4390 4391 if (addr == NULL) { 4392 RTE_ETHDEV_LOG(ERR, 4393 "Cannot remove ethdev port %u MAC address from NULL address\n", 4394 port_id); 4395 return -EINVAL; 4396 } 4397 4398 if (*dev->dev_ops->mac_addr_remove == NULL) 4399 return -ENOTSUP; 4400 4401 index = eth_dev_get_mac_addr_index(port_id, addr); 4402 if (index == 0) { 4403 RTE_ETHDEV_LOG(ERR, 4404 "Port %u: Cannot remove default MAC address\n", 4405 port_id); 4406 return -EADDRINUSE; 4407 } else if (index < 0) 4408 return 0; /* Do nothing if address wasn't found */ 4409 4410 /* Update NIC */ 4411 (*dev->dev_ops->mac_addr_remove)(dev, index); 4412 4413 /* Update address in NIC data structure */ 4414 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4415 4416 /* reset pool bitmap */ 4417 dev->data->mac_pool_sel[index] = 0; 4418 4419 return 0; 4420 } 4421 4422 int 4423 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4424 { 4425 struct rte_eth_dev *dev; 4426 int ret; 4427 4428 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4429 dev = &rte_eth_devices[port_id]; 4430 4431 if (addr == NULL) { 4432 RTE_ETHDEV_LOG(ERR, 4433 "Cannot set ethdev port %u default MAC address from NULL address\n", 4434 port_id); 4435 return -EINVAL; 4436 } 4437 4438 if (!rte_is_valid_assigned_ether_addr(addr)) 4439 return -EINVAL; 4440 4441 if (*dev->dev_ops->mac_addr_set == NULL) 4442 return -ENOTSUP; 4443 4444 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4445 if (ret < 0) 4446 return ret; 4447 4448 /* Update default address in NIC data structure */ 4449 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4450 4451 return 0; 4452 } 4453 4454 4455 /* 4456 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4457 * an empty spot. 4458 */ 4459 static int 4460 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4461 const struct rte_ether_addr *addr) 4462 { 4463 struct rte_eth_dev_info dev_info; 4464 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4465 unsigned i; 4466 int ret; 4467 4468 ret = rte_eth_dev_info_get(port_id, &dev_info); 4469 if (ret != 0) 4470 return -1; 4471 4472 if (!dev->data->hash_mac_addrs) 4473 return -1; 4474 4475 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4476 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4477 RTE_ETHER_ADDR_LEN) == 0) 4478 return i; 4479 4480 return -1; 4481 } 4482 4483 int 4484 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4485 uint8_t on) 4486 { 4487 int index; 4488 int ret; 4489 struct rte_eth_dev *dev; 4490 4491 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4492 dev = &rte_eth_devices[port_id]; 4493 4494 if (addr == NULL) { 4495 RTE_ETHDEV_LOG(ERR, 4496 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4497 port_id); 4498 return -EINVAL; 4499 } 4500 4501 if (rte_is_zero_ether_addr(addr)) { 4502 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4503 port_id); 4504 return -EINVAL; 4505 } 4506 4507 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4508 /* Check if it's already there, and do nothing */ 4509 if ((index >= 0) && on) 4510 return 0; 4511 4512 if (index < 0) { 4513 if (!on) { 4514 RTE_ETHDEV_LOG(ERR, 4515 "Port %u: the MAC address was not set in UTA\n", 4516 port_id); 4517 return -EINVAL; 4518 } 4519 4520 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4521 if (index < 0) { 4522 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4523 port_id); 4524 return -ENOSPC; 4525 } 4526 } 4527 4528 if (*dev->dev_ops->uc_hash_table_set == NULL) 4529 return -ENOTSUP; 4530 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4531 if (ret == 0) { 4532 /* Update address in NIC data structure */ 4533 if (on) 4534 rte_ether_addr_copy(addr, 4535 &dev->data->hash_mac_addrs[index]); 4536 else 4537 rte_ether_addr_copy(&null_mac_addr, 4538 &dev->data->hash_mac_addrs[index]); 4539 } 4540 4541 return eth_err(port_id, ret); 4542 } 4543 4544 int 4545 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4546 { 4547 struct rte_eth_dev *dev; 4548 4549 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4550 dev = &rte_eth_devices[port_id]; 4551 4552 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 4553 return -ENOTSUP; 4554 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4555 on)); 4556 } 4557 4558 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4559 uint32_t tx_rate) 4560 { 4561 struct rte_eth_dev *dev; 4562 struct rte_eth_dev_info dev_info; 4563 struct rte_eth_link link; 4564 int ret; 4565 4566 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4567 dev = &rte_eth_devices[port_id]; 4568 4569 ret = rte_eth_dev_info_get(port_id, &dev_info); 4570 if (ret != 0) 4571 return ret; 4572 4573 link = dev->data->dev_link; 4574 4575 if (queue_idx > dev_info.max_tx_queues) { 4576 RTE_ETHDEV_LOG(ERR, 4577 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4578 port_id, queue_idx); 4579 return -EINVAL; 4580 } 4581 4582 if (tx_rate > link.link_speed) { 4583 RTE_ETHDEV_LOG(ERR, 4584 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4585 tx_rate, link.link_speed); 4586 return -EINVAL; 4587 } 4588 4589 if (*dev->dev_ops->set_queue_rate_limit == NULL) 4590 return -ENOTSUP; 4591 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4592 queue_idx, tx_rate)); 4593 } 4594 4595 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 4596 uint8_t avail_thresh) 4597 { 4598 struct rte_eth_dev *dev; 4599 4600 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4601 dev = &rte_eth_devices[port_id]; 4602 4603 if (queue_id > dev->data->nb_rx_queues) { 4604 RTE_ETHDEV_LOG(ERR, 4605 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 4606 port_id, queue_id); 4607 return -EINVAL; 4608 } 4609 4610 if (avail_thresh > 99) { 4611 RTE_ETHDEV_LOG(ERR, 4612 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 4613 port_id); 4614 return -EINVAL; 4615 } 4616 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 4617 return -ENOTSUP; 4618 return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 4619 queue_id, avail_thresh)); 4620 } 4621 4622 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 4623 uint8_t *avail_thresh) 4624 { 4625 struct rte_eth_dev *dev; 4626 4627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4628 dev = &rte_eth_devices[port_id]; 4629 4630 if (queue_id == NULL) 4631 return -EINVAL; 4632 if (*queue_id >= dev->data->nb_rx_queues) 4633 *queue_id = 0; 4634 4635 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 4636 return -ENOTSUP; 4637 return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 4638 queue_id, avail_thresh)); 4639 } 4640 4641 RTE_INIT(eth_dev_init_fp_ops) 4642 { 4643 uint32_t i; 4644 4645 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4646 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4647 } 4648 4649 RTE_INIT(eth_dev_init_cb_lists) 4650 { 4651 uint16_t i; 4652 4653 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4654 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4655 } 4656 4657 int 4658 rte_eth_dev_callback_register(uint16_t port_id, 4659 enum rte_eth_event_type event, 4660 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4661 { 4662 struct rte_eth_dev *dev; 4663 struct rte_eth_dev_callback *user_cb; 4664 uint16_t next_port; 4665 uint16_t last_port; 4666 4667 if (cb_fn == NULL) { 4668 RTE_ETHDEV_LOG(ERR, 4669 "Cannot register ethdev port %u callback from NULL\n", 4670 port_id); 4671 return -EINVAL; 4672 } 4673 4674 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4675 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4676 return -EINVAL; 4677 } 4678 4679 if (port_id == RTE_ETH_ALL) { 4680 next_port = 0; 4681 last_port = RTE_MAX_ETHPORTS - 1; 4682 } else { 4683 next_port = last_port = port_id; 4684 } 4685 4686 rte_spinlock_lock(ð_dev_cb_lock); 4687 4688 do { 4689 dev = &rte_eth_devices[next_port]; 4690 4691 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4692 if (user_cb->cb_fn == cb_fn && 4693 user_cb->cb_arg == cb_arg && 4694 user_cb->event == event) { 4695 break; 4696 } 4697 } 4698 4699 /* create a new callback. */ 4700 if (user_cb == NULL) { 4701 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4702 sizeof(struct rte_eth_dev_callback), 0); 4703 if (user_cb != NULL) { 4704 user_cb->cb_fn = cb_fn; 4705 user_cb->cb_arg = cb_arg; 4706 user_cb->event = event; 4707 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4708 user_cb, next); 4709 } else { 4710 rte_spinlock_unlock(ð_dev_cb_lock); 4711 rte_eth_dev_callback_unregister(port_id, event, 4712 cb_fn, cb_arg); 4713 return -ENOMEM; 4714 } 4715 4716 } 4717 } while (++next_port <= last_port); 4718 4719 rte_spinlock_unlock(ð_dev_cb_lock); 4720 return 0; 4721 } 4722 4723 int 4724 rte_eth_dev_callback_unregister(uint16_t port_id, 4725 enum rte_eth_event_type event, 4726 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4727 { 4728 int ret; 4729 struct rte_eth_dev *dev; 4730 struct rte_eth_dev_callback *cb, *next; 4731 uint16_t next_port; 4732 uint16_t last_port; 4733 4734 if (cb_fn == NULL) { 4735 RTE_ETHDEV_LOG(ERR, 4736 "Cannot unregister ethdev port %u callback from NULL\n", 4737 port_id); 4738 return -EINVAL; 4739 } 4740 4741 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4742 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4743 return -EINVAL; 4744 } 4745 4746 if (port_id == RTE_ETH_ALL) { 4747 next_port = 0; 4748 last_port = RTE_MAX_ETHPORTS - 1; 4749 } else { 4750 next_port = last_port = port_id; 4751 } 4752 4753 rte_spinlock_lock(ð_dev_cb_lock); 4754 4755 do { 4756 dev = &rte_eth_devices[next_port]; 4757 ret = 0; 4758 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4759 cb = next) { 4760 4761 next = TAILQ_NEXT(cb, next); 4762 4763 if (cb->cb_fn != cb_fn || cb->event != event || 4764 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4765 continue; 4766 4767 /* 4768 * if this callback is not executing right now, 4769 * then remove it. 4770 */ 4771 if (cb->active == 0) { 4772 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4773 rte_free(cb); 4774 } else { 4775 ret = -EAGAIN; 4776 } 4777 } 4778 } while (++next_port <= last_port); 4779 4780 rte_spinlock_unlock(ð_dev_cb_lock); 4781 return ret; 4782 } 4783 4784 int 4785 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4786 { 4787 uint32_t vec; 4788 struct rte_eth_dev *dev; 4789 struct rte_intr_handle *intr_handle; 4790 uint16_t qid; 4791 int rc; 4792 4793 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4794 dev = &rte_eth_devices[port_id]; 4795 4796 if (!dev->intr_handle) { 4797 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4798 return -ENOTSUP; 4799 } 4800 4801 intr_handle = dev->intr_handle; 4802 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4803 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4804 return -EPERM; 4805 } 4806 4807 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4808 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4809 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4810 if (rc && rc != -EEXIST) { 4811 RTE_ETHDEV_LOG(ERR, 4812 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4813 port_id, qid, op, epfd, vec); 4814 } 4815 } 4816 4817 return 0; 4818 } 4819 4820 int 4821 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4822 { 4823 struct rte_intr_handle *intr_handle; 4824 struct rte_eth_dev *dev; 4825 unsigned int efd_idx; 4826 uint32_t vec; 4827 int fd; 4828 4829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4830 dev = &rte_eth_devices[port_id]; 4831 4832 if (queue_id >= dev->data->nb_rx_queues) { 4833 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4834 return -1; 4835 } 4836 4837 if (!dev->intr_handle) { 4838 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4839 return -1; 4840 } 4841 4842 intr_handle = dev->intr_handle; 4843 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4844 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4845 return -1; 4846 } 4847 4848 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4849 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4850 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4851 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4852 4853 return fd; 4854 } 4855 4856 int 4857 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4858 int epfd, int op, void *data) 4859 { 4860 uint32_t vec; 4861 struct rte_eth_dev *dev; 4862 struct rte_intr_handle *intr_handle; 4863 int rc; 4864 4865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4866 dev = &rte_eth_devices[port_id]; 4867 4868 if (queue_id >= dev->data->nb_rx_queues) { 4869 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4870 return -EINVAL; 4871 } 4872 4873 if (!dev->intr_handle) { 4874 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4875 return -ENOTSUP; 4876 } 4877 4878 intr_handle = dev->intr_handle; 4879 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4880 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4881 return -EPERM; 4882 } 4883 4884 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4885 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4886 if (rc && rc != -EEXIST) { 4887 RTE_ETHDEV_LOG(ERR, 4888 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4889 port_id, queue_id, op, epfd, vec); 4890 return rc; 4891 } 4892 4893 return 0; 4894 } 4895 4896 int 4897 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4898 uint16_t queue_id) 4899 { 4900 struct rte_eth_dev *dev; 4901 int ret; 4902 4903 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4904 dev = &rte_eth_devices[port_id]; 4905 4906 ret = eth_dev_validate_rx_queue(dev, queue_id); 4907 if (ret != 0) 4908 return ret; 4909 4910 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 4911 return -ENOTSUP; 4912 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 4913 } 4914 4915 int 4916 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4917 uint16_t queue_id) 4918 { 4919 struct rte_eth_dev *dev; 4920 int ret; 4921 4922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4923 dev = &rte_eth_devices[port_id]; 4924 4925 ret = eth_dev_validate_rx_queue(dev, queue_id); 4926 if (ret != 0) 4927 return ret; 4928 4929 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 4930 return -ENOTSUP; 4931 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 4932 } 4933 4934 4935 const struct rte_eth_rxtx_callback * 4936 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4937 rte_rx_callback_fn fn, void *user_param) 4938 { 4939 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4940 rte_errno = ENOTSUP; 4941 return NULL; 4942 #endif 4943 struct rte_eth_dev *dev; 4944 4945 /* check input parameters */ 4946 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4947 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4948 rte_errno = EINVAL; 4949 return NULL; 4950 } 4951 dev = &rte_eth_devices[port_id]; 4952 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4953 rte_errno = EINVAL; 4954 return NULL; 4955 } 4956 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4957 4958 if (cb == NULL) { 4959 rte_errno = ENOMEM; 4960 return NULL; 4961 } 4962 4963 cb->fn.rx = fn; 4964 cb->param = user_param; 4965 4966 rte_spinlock_lock(ð_dev_rx_cb_lock); 4967 /* Add the callbacks in fifo order. */ 4968 struct rte_eth_rxtx_callback *tail = 4969 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4970 4971 if (!tail) { 4972 /* Stores to cb->fn and cb->param should complete before 4973 * cb is visible to data plane. 4974 */ 4975 __atomic_store_n( 4976 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4977 cb, __ATOMIC_RELEASE); 4978 4979 } else { 4980 while (tail->next) 4981 tail = tail->next; 4982 /* Stores to cb->fn and cb->param should complete before 4983 * cb is visible to data plane. 4984 */ 4985 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4986 } 4987 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4988 4989 return cb; 4990 } 4991 4992 const struct rte_eth_rxtx_callback * 4993 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 4994 rte_rx_callback_fn fn, void *user_param) 4995 { 4996 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4997 rte_errno = ENOTSUP; 4998 return NULL; 4999 #endif 5000 /* check input parameters */ 5001 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5002 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5003 rte_errno = EINVAL; 5004 return NULL; 5005 } 5006 5007 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5008 5009 if (cb == NULL) { 5010 rte_errno = ENOMEM; 5011 return NULL; 5012 } 5013 5014 cb->fn.rx = fn; 5015 cb->param = user_param; 5016 5017 rte_spinlock_lock(ð_dev_rx_cb_lock); 5018 /* Add the callbacks at first position */ 5019 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5020 /* Stores to cb->fn, cb->param and cb->next should complete before 5021 * cb is visible to data plane threads. 5022 */ 5023 __atomic_store_n( 5024 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5025 cb, __ATOMIC_RELEASE); 5026 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5027 5028 return cb; 5029 } 5030 5031 const struct rte_eth_rxtx_callback * 5032 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5033 rte_tx_callback_fn fn, void *user_param) 5034 { 5035 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5036 rte_errno = ENOTSUP; 5037 return NULL; 5038 #endif 5039 struct rte_eth_dev *dev; 5040 5041 /* check input parameters */ 5042 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5043 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5044 rte_errno = EINVAL; 5045 return NULL; 5046 } 5047 5048 dev = &rte_eth_devices[port_id]; 5049 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5050 rte_errno = EINVAL; 5051 return NULL; 5052 } 5053 5054 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5055 5056 if (cb == NULL) { 5057 rte_errno = ENOMEM; 5058 return NULL; 5059 } 5060 5061 cb->fn.tx = fn; 5062 cb->param = user_param; 5063 5064 rte_spinlock_lock(ð_dev_tx_cb_lock); 5065 /* Add the callbacks in fifo order. */ 5066 struct rte_eth_rxtx_callback *tail = 5067 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5068 5069 if (!tail) { 5070 /* Stores to cb->fn and cb->param should complete before 5071 * cb is visible to data plane. 5072 */ 5073 __atomic_store_n( 5074 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5075 cb, __ATOMIC_RELEASE); 5076 5077 } else { 5078 while (tail->next) 5079 tail = tail->next; 5080 /* Stores to cb->fn and cb->param should complete before 5081 * cb is visible to data plane. 5082 */ 5083 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5084 } 5085 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5086 5087 return cb; 5088 } 5089 5090 int 5091 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5092 const struct rte_eth_rxtx_callback *user_cb) 5093 { 5094 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5095 return -ENOTSUP; 5096 #endif 5097 /* Check input parameters. */ 5098 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5099 if (user_cb == NULL || 5100 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5101 return -EINVAL; 5102 5103 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5104 struct rte_eth_rxtx_callback *cb; 5105 struct rte_eth_rxtx_callback **prev_cb; 5106 int ret = -EINVAL; 5107 5108 rte_spinlock_lock(ð_dev_rx_cb_lock); 5109 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5110 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5111 cb = *prev_cb; 5112 if (cb == user_cb) { 5113 /* Remove the user cb from the callback list. */ 5114 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5115 ret = 0; 5116 break; 5117 } 5118 } 5119 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5120 5121 return ret; 5122 } 5123 5124 int 5125 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5126 const struct rte_eth_rxtx_callback *user_cb) 5127 { 5128 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5129 return -ENOTSUP; 5130 #endif 5131 /* Check input parameters. */ 5132 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5133 if (user_cb == NULL || 5134 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5135 return -EINVAL; 5136 5137 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5138 int ret = -EINVAL; 5139 struct rte_eth_rxtx_callback *cb; 5140 struct rte_eth_rxtx_callback **prev_cb; 5141 5142 rte_spinlock_lock(ð_dev_tx_cb_lock); 5143 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5144 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5145 cb = *prev_cb; 5146 if (cb == user_cb) { 5147 /* Remove the user cb from the callback list. */ 5148 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5149 ret = 0; 5150 break; 5151 } 5152 } 5153 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5154 5155 return ret; 5156 } 5157 5158 int 5159 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5160 struct rte_eth_rxq_info *qinfo) 5161 { 5162 struct rte_eth_dev *dev; 5163 5164 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5165 dev = &rte_eth_devices[port_id]; 5166 5167 if (queue_id >= dev->data->nb_rx_queues) { 5168 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5169 return -EINVAL; 5170 } 5171 5172 if (qinfo == NULL) { 5173 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5174 port_id, queue_id); 5175 return -EINVAL; 5176 } 5177 5178 if (dev->data->rx_queues == NULL || 5179 dev->data->rx_queues[queue_id] == NULL) { 5180 RTE_ETHDEV_LOG(ERR, 5181 "Rx queue %"PRIu16" of device with port_id=%" 5182 PRIu16" has not been setup\n", 5183 queue_id, port_id); 5184 return -EINVAL; 5185 } 5186 5187 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5188 RTE_ETHDEV_LOG(INFO, 5189 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5190 queue_id, port_id); 5191 return -EINVAL; 5192 } 5193 5194 if (*dev->dev_ops->rxq_info_get == NULL) 5195 return -ENOTSUP; 5196 5197 memset(qinfo, 0, sizeof(*qinfo)); 5198 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5199 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5200 5201 return 0; 5202 } 5203 5204 int 5205 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5206 struct rte_eth_txq_info *qinfo) 5207 { 5208 struct rte_eth_dev *dev; 5209 5210 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5211 dev = &rte_eth_devices[port_id]; 5212 5213 if (queue_id >= dev->data->nb_tx_queues) { 5214 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5215 return -EINVAL; 5216 } 5217 5218 if (qinfo == NULL) { 5219 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5220 port_id, queue_id); 5221 return -EINVAL; 5222 } 5223 5224 if (dev->data->tx_queues == NULL || 5225 dev->data->tx_queues[queue_id] == NULL) { 5226 RTE_ETHDEV_LOG(ERR, 5227 "Tx queue %"PRIu16" of device with port_id=%" 5228 PRIu16" has not been setup\n", 5229 queue_id, port_id); 5230 return -EINVAL; 5231 } 5232 5233 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5234 RTE_ETHDEV_LOG(INFO, 5235 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5236 queue_id, port_id); 5237 return -EINVAL; 5238 } 5239 5240 if (*dev->dev_ops->txq_info_get == NULL) 5241 return -ENOTSUP; 5242 5243 memset(qinfo, 0, sizeof(*qinfo)); 5244 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5245 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5246 5247 return 0; 5248 } 5249 5250 int 5251 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5252 struct rte_eth_burst_mode *mode) 5253 { 5254 struct rte_eth_dev *dev; 5255 5256 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5257 dev = &rte_eth_devices[port_id]; 5258 5259 if (queue_id >= dev->data->nb_rx_queues) { 5260 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5261 return -EINVAL; 5262 } 5263 5264 if (mode == NULL) { 5265 RTE_ETHDEV_LOG(ERR, 5266 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5267 port_id, queue_id); 5268 return -EINVAL; 5269 } 5270 5271 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5272 return -ENOTSUP; 5273 memset(mode, 0, sizeof(*mode)); 5274 return eth_err(port_id, 5275 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5276 } 5277 5278 int 5279 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5280 struct rte_eth_burst_mode *mode) 5281 { 5282 struct rte_eth_dev *dev; 5283 5284 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5285 dev = &rte_eth_devices[port_id]; 5286 5287 if (queue_id >= dev->data->nb_tx_queues) { 5288 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5289 return -EINVAL; 5290 } 5291 5292 if (mode == NULL) { 5293 RTE_ETHDEV_LOG(ERR, 5294 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5295 port_id, queue_id); 5296 return -EINVAL; 5297 } 5298 5299 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5300 return -ENOTSUP; 5301 memset(mode, 0, sizeof(*mode)); 5302 return eth_err(port_id, 5303 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5304 } 5305 5306 int 5307 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5308 struct rte_power_monitor_cond *pmc) 5309 { 5310 struct rte_eth_dev *dev; 5311 5312 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5313 dev = &rte_eth_devices[port_id]; 5314 5315 if (queue_id >= dev->data->nb_rx_queues) { 5316 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5317 return -EINVAL; 5318 } 5319 5320 if (pmc == NULL) { 5321 RTE_ETHDEV_LOG(ERR, 5322 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5323 port_id, queue_id); 5324 return -EINVAL; 5325 } 5326 5327 if (*dev->dev_ops->get_monitor_addr == NULL) 5328 return -ENOTSUP; 5329 return eth_err(port_id, 5330 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5331 } 5332 5333 int 5334 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5335 struct rte_ether_addr *mc_addr_set, 5336 uint32_t nb_mc_addr) 5337 { 5338 struct rte_eth_dev *dev; 5339 5340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5341 dev = &rte_eth_devices[port_id]; 5342 5343 if (*dev->dev_ops->set_mc_addr_list == NULL) 5344 return -ENOTSUP; 5345 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5346 mc_addr_set, nb_mc_addr)); 5347 } 5348 5349 int 5350 rte_eth_timesync_enable(uint16_t port_id) 5351 { 5352 struct rte_eth_dev *dev; 5353 5354 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5355 dev = &rte_eth_devices[port_id]; 5356 5357 if (*dev->dev_ops->timesync_enable == NULL) 5358 return -ENOTSUP; 5359 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5360 } 5361 5362 int 5363 rte_eth_timesync_disable(uint16_t port_id) 5364 { 5365 struct rte_eth_dev *dev; 5366 5367 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5368 dev = &rte_eth_devices[port_id]; 5369 5370 if (*dev->dev_ops->timesync_disable == NULL) 5371 return -ENOTSUP; 5372 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5373 } 5374 5375 int 5376 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5377 uint32_t flags) 5378 { 5379 struct rte_eth_dev *dev; 5380 5381 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5382 dev = &rte_eth_devices[port_id]; 5383 5384 if (timestamp == NULL) { 5385 RTE_ETHDEV_LOG(ERR, 5386 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5387 port_id); 5388 return -EINVAL; 5389 } 5390 5391 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 5392 return -ENOTSUP; 5393 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5394 (dev, timestamp, flags)); 5395 } 5396 5397 int 5398 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5399 struct timespec *timestamp) 5400 { 5401 struct rte_eth_dev *dev; 5402 5403 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5404 dev = &rte_eth_devices[port_id]; 5405 5406 if (timestamp == NULL) { 5407 RTE_ETHDEV_LOG(ERR, 5408 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5409 port_id); 5410 return -EINVAL; 5411 } 5412 5413 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 5414 return -ENOTSUP; 5415 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5416 (dev, timestamp)); 5417 } 5418 5419 int 5420 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5421 { 5422 struct rte_eth_dev *dev; 5423 5424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5425 dev = &rte_eth_devices[port_id]; 5426 5427 if (*dev->dev_ops->timesync_adjust_time == NULL) 5428 return -ENOTSUP; 5429 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5430 } 5431 5432 int 5433 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5434 { 5435 struct rte_eth_dev *dev; 5436 5437 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5438 dev = &rte_eth_devices[port_id]; 5439 5440 if (timestamp == NULL) { 5441 RTE_ETHDEV_LOG(ERR, 5442 "Cannot read ethdev port %u timesync time to NULL\n", 5443 port_id); 5444 return -EINVAL; 5445 } 5446 5447 if (*dev->dev_ops->timesync_read_time == NULL) 5448 return -ENOTSUP; 5449 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5450 timestamp)); 5451 } 5452 5453 int 5454 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5455 { 5456 struct rte_eth_dev *dev; 5457 5458 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5459 dev = &rte_eth_devices[port_id]; 5460 5461 if (timestamp == NULL) { 5462 RTE_ETHDEV_LOG(ERR, 5463 "Cannot write ethdev port %u timesync from NULL time\n", 5464 port_id); 5465 return -EINVAL; 5466 } 5467 5468 if (*dev->dev_ops->timesync_write_time == NULL) 5469 return -ENOTSUP; 5470 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5471 timestamp)); 5472 } 5473 5474 int 5475 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5476 { 5477 struct rte_eth_dev *dev; 5478 5479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5480 dev = &rte_eth_devices[port_id]; 5481 5482 if (clock == NULL) { 5483 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5484 port_id); 5485 return -EINVAL; 5486 } 5487 5488 if (*dev->dev_ops->read_clock == NULL) 5489 return -ENOTSUP; 5490 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5491 } 5492 5493 int 5494 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5495 { 5496 struct rte_eth_dev *dev; 5497 5498 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5499 dev = &rte_eth_devices[port_id]; 5500 5501 if (info == NULL) { 5502 RTE_ETHDEV_LOG(ERR, 5503 "Cannot get ethdev port %u register info to NULL\n", 5504 port_id); 5505 return -EINVAL; 5506 } 5507 5508 if (*dev->dev_ops->get_reg == NULL) 5509 return -ENOTSUP; 5510 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5511 } 5512 5513 int 5514 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5515 { 5516 struct rte_eth_dev *dev; 5517 5518 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5519 dev = &rte_eth_devices[port_id]; 5520 5521 if (*dev->dev_ops->get_eeprom_length == NULL) 5522 return -ENOTSUP; 5523 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5524 } 5525 5526 int 5527 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5528 { 5529 struct rte_eth_dev *dev; 5530 5531 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5532 dev = &rte_eth_devices[port_id]; 5533 5534 if (info == NULL) { 5535 RTE_ETHDEV_LOG(ERR, 5536 "Cannot get ethdev port %u EEPROM info to NULL\n", 5537 port_id); 5538 return -EINVAL; 5539 } 5540 5541 if (*dev->dev_ops->get_eeprom == NULL) 5542 return -ENOTSUP; 5543 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5544 } 5545 5546 int 5547 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5548 { 5549 struct rte_eth_dev *dev; 5550 5551 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5552 dev = &rte_eth_devices[port_id]; 5553 5554 if (info == NULL) { 5555 RTE_ETHDEV_LOG(ERR, 5556 "Cannot set ethdev port %u EEPROM from NULL info\n", 5557 port_id); 5558 return -EINVAL; 5559 } 5560 5561 if (*dev->dev_ops->set_eeprom == NULL) 5562 return -ENOTSUP; 5563 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5564 } 5565 5566 int 5567 rte_eth_dev_get_module_info(uint16_t port_id, 5568 struct rte_eth_dev_module_info *modinfo) 5569 { 5570 struct rte_eth_dev *dev; 5571 5572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5573 dev = &rte_eth_devices[port_id]; 5574 5575 if (modinfo == NULL) { 5576 RTE_ETHDEV_LOG(ERR, 5577 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5578 port_id); 5579 return -EINVAL; 5580 } 5581 5582 if (*dev->dev_ops->get_module_info == NULL) 5583 return -ENOTSUP; 5584 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5585 } 5586 5587 int 5588 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5589 struct rte_dev_eeprom_info *info) 5590 { 5591 struct rte_eth_dev *dev; 5592 5593 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5594 dev = &rte_eth_devices[port_id]; 5595 5596 if (info == NULL) { 5597 RTE_ETHDEV_LOG(ERR, 5598 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5599 port_id); 5600 return -EINVAL; 5601 } 5602 5603 if (info->data == NULL) { 5604 RTE_ETHDEV_LOG(ERR, 5605 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5606 port_id); 5607 return -EINVAL; 5608 } 5609 5610 if (info->length == 0) { 5611 RTE_ETHDEV_LOG(ERR, 5612 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5613 port_id); 5614 return -EINVAL; 5615 } 5616 5617 if (*dev->dev_ops->get_module_eeprom == NULL) 5618 return -ENOTSUP; 5619 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5620 } 5621 5622 int 5623 rte_eth_dev_get_dcb_info(uint16_t port_id, 5624 struct rte_eth_dcb_info *dcb_info) 5625 { 5626 struct rte_eth_dev *dev; 5627 5628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5629 dev = &rte_eth_devices[port_id]; 5630 5631 if (dcb_info == NULL) { 5632 RTE_ETHDEV_LOG(ERR, 5633 "Cannot get ethdev port %u DCB info to NULL\n", 5634 port_id); 5635 return -EINVAL; 5636 } 5637 5638 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5639 5640 if (*dev->dev_ops->get_dcb_info == NULL) 5641 return -ENOTSUP; 5642 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5643 } 5644 5645 static void 5646 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5647 const struct rte_eth_desc_lim *desc_lim) 5648 { 5649 if (desc_lim->nb_align != 0) 5650 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5651 5652 if (desc_lim->nb_max != 0) 5653 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5654 5655 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5656 } 5657 5658 int 5659 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5660 uint16_t *nb_rx_desc, 5661 uint16_t *nb_tx_desc) 5662 { 5663 struct rte_eth_dev_info dev_info; 5664 int ret; 5665 5666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5667 5668 ret = rte_eth_dev_info_get(port_id, &dev_info); 5669 if (ret != 0) 5670 return ret; 5671 5672 if (nb_rx_desc != NULL) 5673 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5674 5675 if (nb_tx_desc != NULL) 5676 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5677 5678 return 0; 5679 } 5680 5681 int 5682 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5683 struct rte_eth_hairpin_cap *cap) 5684 { 5685 struct rte_eth_dev *dev; 5686 5687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5688 dev = &rte_eth_devices[port_id]; 5689 5690 if (cap == NULL) { 5691 RTE_ETHDEV_LOG(ERR, 5692 "Cannot get ethdev port %u hairpin capability to NULL\n", 5693 port_id); 5694 return -EINVAL; 5695 } 5696 5697 if (*dev->dev_ops->hairpin_cap_get == NULL) 5698 return -ENOTSUP; 5699 memset(cap, 0, sizeof(*cap)); 5700 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5701 } 5702 5703 int 5704 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5705 { 5706 struct rte_eth_dev *dev; 5707 5708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5709 dev = &rte_eth_devices[port_id]; 5710 5711 if (pool == NULL) { 5712 RTE_ETHDEV_LOG(ERR, 5713 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5714 port_id); 5715 return -EINVAL; 5716 } 5717 5718 if (*dev->dev_ops->pool_ops_supported == NULL) 5719 return 1; /* all pools are supported */ 5720 5721 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5722 } 5723 5724 static int 5725 eth_dev_handle_port_list(const char *cmd __rte_unused, 5726 const char *params __rte_unused, 5727 struct rte_tel_data *d) 5728 { 5729 int port_id; 5730 5731 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 5732 RTE_ETH_FOREACH_DEV(port_id) 5733 rte_tel_data_add_array_int(d, port_id); 5734 return 0; 5735 } 5736 5737 static void 5738 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 5739 const char *stat_name) 5740 { 5741 int q; 5742 struct rte_tel_data *q_data = rte_tel_data_alloc(); 5743 if (q_data == NULL) 5744 return; 5745 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 5746 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 5747 rte_tel_data_add_array_u64(q_data, q_stats[q]); 5748 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 5749 } 5750 5751 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 5752 5753 static int 5754 eth_dev_handle_port_stats(const char *cmd __rte_unused, 5755 const char *params, 5756 struct rte_tel_data *d) 5757 { 5758 struct rte_eth_stats stats; 5759 int port_id, ret; 5760 5761 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5762 return -1; 5763 5764 port_id = atoi(params); 5765 if (!rte_eth_dev_is_valid_port(port_id)) 5766 return -1; 5767 5768 ret = rte_eth_stats_get(port_id, &stats); 5769 if (ret < 0) 5770 return -1; 5771 5772 rte_tel_data_start_dict(d); 5773 ADD_DICT_STAT(stats, ipackets); 5774 ADD_DICT_STAT(stats, opackets); 5775 ADD_DICT_STAT(stats, ibytes); 5776 ADD_DICT_STAT(stats, obytes); 5777 ADD_DICT_STAT(stats, imissed); 5778 ADD_DICT_STAT(stats, ierrors); 5779 ADD_DICT_STAT(stats, oerrors); 5780 ADD_DICT_STAT(stats, rx_nombuf); 5781 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 5782 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 5783 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 5784 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 5785 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 5786 5787 return 0; 5788 } 5789 5790 static int 5791 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 5792 const char *params, 5793 struct rte_tel_data *d) 5794 { 5795 struct rte_eth_xstat *eth_xstats; 5796 struct rte_eth_xstat_name *xstat_names; 5797 int port_id, num_xstats; 5798 int i, ret; 5799 char *end_param; 5800 5801 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5802 return -1; 5803 5804 port_id = strtoul(params, &end_param, 0); 5805 if (*end_param != '\0') 5806 RTE_ETHDEV_LOG(NOTICE, 5807 "Extra parameters passed to ethdev telemetry command, ignoring"); 5808 if (!rte_eth_dev_is_valid_port(port_id)) 5809 return -1; 5810 5811 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 5812 if (num_xstats < 0) 5813 return -1; 5814 5815 /* use one malloc for both names and stats */ 5816 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 5817 sizeof(struct rte_eth_xstat_name)) * num_xstats); 5818 if (eth_xstats == NULL) 5819 return -1; 5820 xstat_names = (void *)ð_xstats[num_xstats]; 5821 5822 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 5823 if (ret < 0 || ret > num_xstats) { 5824 free(eth_xstats); 5825 return -1; 5826 } 5827 5828 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 5829 if (ret < 0 || ret > num_xstats) { 5830 free(eth_xstats); 5831 return -1; 5832 } 5833 5834 rte_tel_data_start_dict(d); 5835 for (i = 0; i < num_xstats; i++) 5836 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 5837 eth_xstats[i].value); 5838 free(eth_xstats); 5839 return 0; 5840 } 5841 5842 #ifndef RTE_EXEC_ENV_WINDOWS 5843 static int 5844 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, 5845 const char *params, 5846 struct rte_tel_data *d) 5847 { 5848 char *buf, *end_param; 5849 int port_id, ret; 5850 FILE *f; 5851 5852 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5853 return -EINVAL; 5854 5855 port_id = strtoul(params, &end_param, 0); 5856 if (*end_param != '\0') 5857 RTE_ETHDEV_LOG(NOTICE, 5858 "Extra parameters passed to ethdev telemetry command, ignoring"); 5859 if (!rte_eth_dev_is_valid_port(port_id)) 5860 return -EINVAL; 5861 5862 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 5863 if (buf == NULL) 5864 return -ENOMEM; 5865 5866 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 5867 if (f == NULL) { 5868 free(buf); 5869 return -EINVAL; 5870 } 5871 5872 ret = rte_eth_dev_priv_dump(port_id, f); 5873 fclose(f); 5874 if (ret == 0) { 5875 rte_tel_data_start_dict(d); 5876 rte_tel_data_string(d, buf); 5877 } 5878 5879 free(buf); 5880 return 0; 5881 } 5882 #endif /* !RTE_EXEC_ENV_WINDOWS */ 5883 5884 static int 5885 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 5886 const char *params, 5887 struct rte_tel_data *d) 5888 { 5889 static const char *status_str = "status"; 5890 int ret, port_id; 5891 struct rte_eth_link link; 5892 char *end_param; 5893 5894 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5895 return -1; 5896 5897 port_id = strtoul(params, &end_param, 0); 5898 if (*end_param != '\0') 5899 RTE_ETHDEV_LOG(NOTICE, 5900 "Extra parameters passed to ethdev telemetry command, ignoring"); 5901 if (!rte_eth_dev_is_valid_port(port_id)) 5902 return -1; 5903 5904 ret = rte_eth_link_get_nowait(port_id, &link); 5905 if (ret < 0) 5906 return -1; 5907 5908 rte_tel_data_start_dict(d); 5909 if (!link.link_status) { 5910 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 5911 return 0; 5912 } 5913 rte_tel_data_add_dict_string(d, status_str, "UP"); 5914 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 5915 rte_tel_data_add_dict_string(d, "duplex", 5916 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 5917 "full-duplex" : "half-duplex"); 5918 return 0; 5919 } 5920 5921 static int 5922 eth_dev_handle_port_info(const char *cmd __rte_unused, 5923 const char *params, 5924 struct rte_tel_data *d) 5925 { 5926 struct rte_tel_data *rxq_state, *txq_state; 5927 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 5928 struct rte_eth_dev *eth_dev; 5929 char *end_param; 5930 int port_id, i; 5931 5932 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5933 return -1; 5934 5935 port_id = strtoul(params, &end_param, 0); 5936 if (*end_param != '\0') 5937 RTE_ETHDEV_LOG(NOTICE, 5938 "Extra parameters passed to ethdev telemetry command, ignoring"); 5939 5940 if (!rte_eth_dev_is_valid_port(port_id)) 5941 return -EINVAL; 5942 5943 eth_dev = &rte_eth_devices[port_id]; 5944 5945 rxq_state = rte_tel_data_alloc(); 5946 if (!rxq_state) 5947 return -ENOMEM; 5948 5949 txq_state = rte_tel_data_alloc(); 5950 if (!txq_state) { 5951 rte_tel_data_free(rxq_state); 5952 return -ENOMEM; 5953 } 5954 5955 rte_tel_data_start_dict(d); 5956 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 5957 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 5958 rte_tel_data_add_dict_int(d, "nb_rx_queues", 5959 eth_dev->data->nb_rx_queues); 5960 rte_tel_data_add_dict_int(d, "nb_tx_queues", 5961 eth_dev->data->nb_tx_queues); 5962 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 5963 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 5964 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 5965 eth_dev->data->min_rx_buf_size); 5966 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 5967 eth_dev->data->rx_mbuf_alloc_failed); 5968 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 5969 eth_dev->data->mac_addrs); 5970 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 5971 rte_tel_data_add_dict_int(d, "promiscuous", 5972 eth_dev->data->promiscuous); 5973 rte_tel_data_add_dict_int(d, "scattered_rx", 5974 eth_dev->data->scattered_rx); 5975 rte_tel_data_add_dict_int(d, "all_multicast", 5976 eth_dev->data->all_multicast); 5977 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 5978 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 5979 rte_tel_data_add_dict_int(d, "dev_configured", 5980 eth_dev->data->dev_configured); 5981 5982 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 5983 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 5984 rte_tel_data_add_array_int(rxq_state, 5985 eth_dev->data->rx_queue_state[i]); 5986 5987 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 5988 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 5989 rte_tel_data_add_array_int(txq_state, 5990 eth_dev->data->tx_queue_state[i]); 5991 5992 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 5993 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 5994 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 5995 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 5996 rte_tel_data_add_dict_int(d, "rx_offloads", 5997 eth_dev->data->dev_conf.rxmode.offloads); 5998 rte_tel_data_add_dict_int(d, "tx_offloads", 5999 eth_dev->data->dev_conf.txmode.offloads); 6000 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 6001 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 6002 6003 return 0; 6004 } 6005 6006 int 6007 rte_eth_representor_info_get(uint16_t port_id, 6008 struct rte_eth_representor_info *info) 6009 { 6010 struct rte_eth_dev *dev; 6011 6012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6013 dev = &rte_eth_devices[port_id]; 6014 6015 if (*dev->dev_ops->representor_info_get == NULL) 6016 return -ENOTSUP; 6017 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6018 } 6019 6020 int 6021 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6022 { 6023 struct rte_eth_dev *dev; 6024 6025 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6026 dev = &rte_eth_devices[port_id]; 6027 6028 if (dev->data->dev_configured != 0) { 6029 RTE_ETHDEV_LOG(ERR, 6030 "The port (ID=%"PRIu16") is already configured\n", 6031 port_id); 6032 return -EBUSY; 6033 } 6034 6035 if (features == NULL) { 6036 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6037 return -EINVAL; 6038 } 6039 6040 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6041 return -ENOTSUP; 6042 return eth_err(port_id, 6043 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6044 } 6045 6046 int 6047 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6048 struct rte_eth_ip_reassembly_params *reassembly_capa) 6049 { 6050 struct rte_eth_dev *dev; 6051 6052 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6053 dev = &rte_eth_devices[port_id]; 6054 6055 if (dev->data->dev_configured == 0) { 6056 RTE_ETHDEV_LOG(ERR, 6057 "Device with port_id=%u is not configured.\n" 6058 "Cannot get IP reassembly capability\n", 6059 port_id); 6060 return -EINVAL; 6061 } 6062 6063 if (reassembly_capa == NULL) { 6064 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6065 return -EINVAL; 6066 } 6067 6068 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6069 return -ENOTSUP; 6070 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6071 6072 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6073 (dev, reassembly_capa)); 6074 } 6075 6076 int 6077 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6078 struct rte_eth_ip_reassembly_params *conf) 6079 { 6080 struct rte_eth_dev *dev; 6081 6082 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6083 dev = &rte_eth_devices[port_id]; 6084 6085 if (dev->data->dev_configured == 0) { 6086 RTE_ETHDEV_LOG(ERR, 6087 "Device with port_id=%u is not configured.\n" 6088 "Cannot get IP reassembly configuration\n", 6089 port_id); 6090 return -EINVAL; 6091 } 6092 6093 if (conf == NULL) { 6094 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6095 return -EINVAL; 6096 } 6097 6098 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6099 return -ENOTSUP; 6100 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6101 return eth_err(port_id, 6102 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6103 } 6104 6105 int 6106 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6107 const struct rte_eth_ip_reassembly_params *conf) 6108 { 6109 struct rte_eth_dev *dev; 6110 6111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6112 dev = &rte_eth_devices[port_id]; 6113 6114 if (dev->data->dev_configured == 0) { 6115 RTE_ETHDEV_LOG(ERR, 6116 "Device with port_id=%u is not configured.\n" 6117 "Cannot set IP reassembly configuration", 6118 port_id); 6119 return -EINVAL; 6120 } 6121 6122 if (dev->data->dev_started != 0) { 6123 RTE_ETHDEV_LOG(ERR, 6124 "Device with port_id=%u started,\n" 6125 "cannot configure IP reassembly params.\n", 6126 port_id); 6127 return -EINVAL; 6128 } 6129 6130 if (conf == NULL) { 6131 RTE_ETHDEV_LOG(ERR, 6132 "Invalid IP reassembly configuration (NULL)\n"); 6133 return -EINVAL; 6134 } 6135 6136 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6137 return -ENOTSUP; 6138 return eth_err(port_id, 6139 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6140 } 6141 6142 int 6143 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6144 { 6145 struct rte_eth_dev *dev; 6146 6147 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6148 dev = &rte_eth_devices[port_id]; 6149 6150 if (file == NULL) { 6151 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6152 return -EINVAL; 6153 } 6154 6155 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6156 return -ENOTSUP; 6157 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6158 } 6159 6160 int 6161 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6162 uint16_t offset, uint16_t num, FILE *file) 6163 { 6164 struct rte_eth_dev *dev; 6165 6166 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6167 dev = &rte_eth_devices[port_id]; 6168 6169 if (queue_id >= dev->data->nb_rx_queues) { 6170 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6171 return -EINVAL; 6172 } 6173 6174 if (file == NULL) { 6175 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6176 return -EINVAL; 6177 } 6178 6179 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6180 return -ENOTSUP; 6181 6182 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6183 queue_id, offset, num, file)); 6184 } 6185 6186 int 6187 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6188 uint16_t offset, uint16_t num, FILE *file) 6189 { 6190 struct rte_eth_dev *dev; 6191 6192 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6193 dev = &rte_eth_devices[port_id]; 6194 6195 if (queue_id >= dev->data->nb_tx_queues) { 6196 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6197 return -EINVAL; 6198 } 6199 6200 if (file == NULL) { 6201 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6202 return -EINVAL; 6203 } 6204 6205 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6206 return -ENOTSUP; 6207 6208 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6209 queue_id, offset, num, file)); 6210 } 6211 6212 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6213 6214 RTE_INIT(ethdev_init_telemetry) 6215 { 6216 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6217 "Returns list of available ethdev ports. Takes no parameters"); 6218 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6219 "Returns the common stats for a port. Parameters: int port_id"); 6220 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6221 "Returns the extended stats for a port. Parameters: int port_id"); 6222 #ifndef RTE_EXEC_ENV_WINDOWS 6223 rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv, 6224 "Returns dump private information for a port. Parameters: int port_id"); 6225 #endif 6226 rte_telemetry_register_cmd("/ethdev/link_status", 6227 eth_dev_handle_port_link_status, 6228 "Returns the link status for a port. Parameters: int port_id"); 6229 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6230 "Returns the device info for a port. Parameters: int port_id"); 6231 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 6232 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 6233 } 6234